diff --git a/.circleci/config.yml b/.circleci/config.yml index 01b2b10a1..dafcbea5f 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -2,7 +2,7 @@ version: 2.1 orbs: - prometheus: prometheus/prometheus@0.11.0 + prometheus: prometheus/prometheus@0.14.0 go: circleci/go@1.7.0 win: circleci/windows@2.3.0 @@ -99,21 +99,13 @@ jobs: steps: - checkout - run: go install ./cmd/promtool/. - - run: - command: go install -mod=readonly github.com/google/go-jsonnet/cmd/jsonnet github.com/google/go-jsonnet/cmd/jsonnetfmt github.com/jsonnet-bundler/jsonnet-bundler/cmd/jb - working_directory: ~/project/documentation/prometheus-mixin - - run: - command: make clean - working_directory: ~/project/documentation/prometheus-mixin - - run: - command: jb install - working_directory: ~/project/documentation/prometheus-mixin - - run: - command: make - working_directory: ~/project/documentation/prometheus-mixin - - run: - command: git diff --exit-code - working_directory: ~/project/documentation/prometheus-mixin + - run: go install github.com/google/go-jsonnet/cmd/jsonnet@latest + - run: go install github.com/google/go-jsonnet/cmd/jsonnetfmt@latest + - run: go install github.com/jsonnet-bundler/jsonnet-bundler/cmd/jb@latest + - run: make -C documentation/prometheus-mixin clean + - run: make -C documentation/prometheus-mixin jb_install + - run: make -C documentation/prometheus-mixin + - run: git diff --exit-code repo_sync: executor: golang @@ -148,8 +140,19 @@ workflows: only: /.*/ - prometheus/build: name: build + parallelism: 3 + promu_opts: "-p linux/amd64 -p windows/amd64 -p linux/arm64 -p darwin/amd64 -p darwin/arm64 -p linux/386" + filters: + tags: + ignore: /^v[0-9]+(\.[0-9]+){2}(-.+|[^-.]*)$/ + branches: + ignore: /^(main|release-.*|.*build-all.*)$/ + - prometheus/build: + name: build_all parallelism: 12 filters: + branches: + only: /^(main|release-.*|.*build-all.*)$/ tags: only: /^v[0-9]+(\.[0-9]+){2}(-.+|[^-.]*)$/ - prometheus/publish_main: @@ -157,7 +160,7 @@ workflows: requires: - test_go - test_ui - - build + - build_all filters: branches: only: main @@ -167,7 +170,7 @@ workflows: requires: - test_go - test_ui - - build + - build_all filters: tags: only: /^v[0-9]+(\.[0-9]+){2}(-.+|[^-.]*)$/ diff --git a/.github/dependabot.yml b/.github/dependabot.yml new file mode 100644 index 000000000..93868bcfc --- /dev/null +++ b/.github/dependabot.yml @@ -0,0 +1,18 @@ +version: 2 +updates: + - package-ecosystem: "gomod" + directory: "/" + schedule: + interval: "weekly" + - package-ecosystem: "npm" + directory: "/web/ui" + schedule: + interval: "weekly" + - package-ecosystem: "github-actions" + directory: "/" + schedule: + interval: "weekly" + - package-ecosystem: "docker" + directory: "/" + schedule: + interval: "weekly" diff --git a/.github/lock.yml b/.github/lock.yml deleted file mode 100644 index bed690b33..000000000 --- a/.github/lock.yml +++ /dev/null @@ -1,35 +0,0 @@ -# Configuration for Lock Threads - https://github.com/dessant/lock-threads - -# Number of days of inactivity before a closed issue or pull request is locked -daysUntilLock: 180 - -# Skip issues and pull requests created before a given timestamp. Timestamp must -# follow ISO 8601 (`YYYY-MM-DD`). Set to `false` to disable -skipCreatedBefore: false - -# Issues and pull requests with these labels will be ignored. Set to `[]` to disable -exemptLabels: [] - -# Label to add before locking, such as `outdated`. Set to `false` to disable -lockLabel: false - -# Comment to post before locking. Set to `false` to disable -lockComment: false - -# Assign `resolved` as the reason for locking. Set to `false` to disable -setLockReason: false - -# Limit to only `issues` or `pulls` -only: issues - -# Optionally, specify configuration settings just for `issues` or `pulls` -# issues: -# exemptLabels: -# - help-wanted -# lockLabel: outdated - -# pulls: -# daysUntilLock: 30 - -# Repository to extend settings from -# _extends: repo diff --git a/.github/workflows/fuzzing.yml b/.github/workflows/fuzzing.yml index eb4a76d54..c99b6d833 100644 --- a/.github/workflows/fuzzing.yml +++ b/.github/workflows/fuzzing.yml @@ -22,7 +22,7 @@ jobs: fuzz-seconds: 600 dry-run: false - name: Upload Crash - uses: actions/upload-artifact@v1 + uses: actions/upload-artifact@v2.2.4 if: failure() && steps.build.outcome == 'success' with: name: artifacts diff --git a/.github/workflows/golangci-lint.yml b/.github/workflows/golangci-lint.yml index 431fef711..f96c76a65 100644 --- a/.github/workflows/golangci-lint.yml +++ b/.github/workflows/golangci-lint.yml @@ -7,6 +7,7 @@ on: - "**.go" - "scripts/errcheck_excludes.txt" - ".github/workflows/golangci-lint.yml" + - ".golangci.yml" pull_request: paths: - "go.sum" @@ -14,6 +15,7 @@ on: - "**.go" - "scripts/errcheck_excludes.txt" - ".github/workflows/golangci-lint.yml" + - ".golangci.yml" jobs: golangci: diff --git a/.github/workflows/lock.yml b/.github/workflows/lock.yml new file mode 100644 index 000000000..3ca1f76e4 --- /dev/null +++ b/.github/workflows/lock.yml @@ -0,0 +1,22 @@ +name: 'Lock Threads' + +on: + schedule: + - cron: '13 23 * * *' + workflow_dispatch: + +permissions: + issues: write + +concurrency: + group: lock + +jobs: + action: + runs-on: ubuntu-latest + steps: + - uses: dessant/lock-threads@v3 + with: + process-only: 'issues' + issue-inactive-days: '180' + github-token: ${{ secrets.PROMBOT_LOCKTHREADS_TOKEN }} diff --git a/.gitignore b/.gitignore index b382f6c0e..35564c059 100644 --- a/.gitignore +++ b/.gitignore @@ -8,7 +8,9 @@ /promtool benchmark.txt /data +/data-agent /cmd/prometheus/data +/cmd/prometheus/data-agent /cmd/prometheus/debug /benchout /cmd/promtool/data diff --git a/.golangci.yml b/.golangci.yml index 642cf45c8..53d890f51 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -1,10 +1,18 @@ run: deadline: 5m + skip-files: + # Skip autogenerated files. + - ^.*\.(pb|y)\.go$ + +output: + sort-results: true linters: enable: - depguard - - golint + - gofumpt + - goimports + - revive issues: exclude-rules: @@ -25,3 +33,7 @@ linters-settings: - github.com/go-kit/kit/log: "Use github.com/go-kit/log instead of github.com/go-kit/kit/log" errcheck: exclude: scripts/errcheck_excludes.txt + goimports: + local-prefixes: github.com/prometheus/prometheus + gofumpt: + extra-rules: true diff --git a/CHANGELOG.md b/CHANGELOG.md index 940e6694d..34af214a8 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,58 @@ +## 2.32.0-beta.0 / 2021-11-16 + +This beta release introduces the Prometheus Agent, a new mode of operation for +Prometheus optimized for remote-write only scenarios. In this mode, Prometheus +does not generate blocks on the local filesystem and is not queryable locally. +Enable with `--enable-feature=agent`. + +Learn more about the Prometheus Agent in our [blog post](https://prometheus.io/blog/2021/11/16/agent/). + +* [CHANGE] remote-write: Change default max retry time from 100ms to 5 seconds. #9634 +* [FEATURE] Agent: New mode of operation optimized for remote-write only scenarios, without local storage. Enable with `--enable-feature=agent`. #8785 +* [FEATURE] Promtool: Add `promtool check service-discovery` command. #8970 +* [FEATURE] UI: Add search in metrics dropdown. #9629 +* [FEATURE] Templates: Add parseDuration to template functions. #8817 +* [ENHANCEMENT] Promtool: Improve test output. #8064 +* [ENHANCEMENT] Promtool: Use kahan summation for better numerical stability. #9588 +* [ENHANCEMENT] Remote-write: Reuse memory for marshalling. #9412 +* [ENHANCEMENT] Scrape: Add `scrape_body_size_bytes` scrape metric behind the `--enable-feature=extra-scrape-metrics` flag. #9569 +* [ENHANCEMENT] TSDB: Add windows arm64 support. #9703 +* [ENHANCEMENT] TSDB: Optimize query by skipping unneeded sorting in TSDB. #9673 +* [ENHANCEMENT] Templates: Support int and uint as datatypes for template formatting. #9680 +* [ENHANCEMENT] UI: Prefer `rate` over `rad`, `delta` over `deg`, and `count` over `cos` in autocomplete. #9688 +* [BUGFIX] TSDB: Add more size checks when writing individual sections in the index. #9710 +* [BUGFIX] PromQL: Make `deriv()` return zero values for constant series. #9728 + +## 2.31.1 / 2021-11-05 + +* [BUGFIX] SD: Fix a panic when the experimental discovery manager receives + targets during a reload. #9656 + +## 2.31.0 / 2021-11-02 + +* [CHANGE] UI: Remove standard PromQL editor in favour of the codemirror-based editor. #9452 +* [FEATURE] PromQL: Add trigonometric functions and `atan2` binary operator. #9239 #9248 #9515 +* [FEATURE] Remote: Add support for exemplar in the remote write receiver endpoint. #9319 #9414 +* [FEATURE] SD: Add PuppetDB service discovery. #8883 +* [FEATURE] SD: Add Uyuni service discovery. #8190 +* [FEATURE] Web: Add support for security-related HTTP headers. #9546 +* [ENHANCEMENT] Azure SD: Add `proxy_url`, `follow_redirects`, `tls_config`. #9267 +* [ENHANCEMENT] Backfill: Add `--max-block-duration` in `promtool create-blocks-from rules`. #9511 +* [ENHANCEMENT] Config: Print human-readable sizes with unit instead of raw numbers. #9361 +* [ENHANCEMENT] HTTP: Re-enable HTTP/2. #9398 +* [ENHANCEMENT] Kubernetes SD: Warn user if number of endpoints exceeds limit. #9467 +* [ENHANCEMENT] OAuth2: Add TLS configuration to token requests. #9550 +* [ENHANCEMENT] PromQL: Several optimizations. #9365 #9360 #9362 #9552 +* [ENHANCEMENT] PromQL: Make aggregations deterministic in instant queries. #9459 +* [ENHANCEMENT] Rules: Add the ability to limit number of alerts or series. #9260 #9541 +* [ENHANCEMENT] SD: Experimental discovery manager to avoid restarts upon reload. Disabled by default, enable with flag `--enable-feature=new-service-discovery-manager`. #9349 #9537 +* [ENHANCEMENT] UI: Debounce timerange setting changes. #9359 +* [BUGFIX] Backfill: Apply rule labels after query labels. #9421 +* [BUGFIX] Scrape: Resolve conflicts between multiple exported label prefixes. #9479 #9518 +* [BUGFIX] Scrape: Restart scrape loops when `__scrape_interval__` is changed. #9551 +* [BUGFIX] TSDB: Fix memory leak in samples deletion. #9151 +* [BUGFIX] UI: Use consistent margin-bottom for all alert kinds. #9318 + ## 2.30.3 / 2021-10-05 * [BUGFIX] TSDB: Fix panic on failed snapshot replay. #9438 @@ -191,7 +246,7 @@ Alertmanager API v2 was released in Alertmanager v0.16.0 (released in January * [ENHANCEMENT] Mixins: Scope grafana configuration. #8332 * [ENHANCEMENT] Kubernetes SD: Add endpoint labels metadata. #8273 * [ENHANCEMENT] UI: Expose total number of label pairs in head in TSDB stats page. #8343 -* [ENHANCEMENT] TSDB: Reload blocks every minute, to detect new blocks and enforce retention more often. #8343 +* [ENHANCEMENT] TSDB: Reload blocks every minute, to detect new blocks and enforce retention more often. #8340 * [BUGFIX] API: Fix global URL when external address has no port. #8359 * [BUGFIX] Backfill: Fix error message handling. #8432 * [BUGFIX] Backfill: Fix "add sample: out of bounds" error when series span an entire block. #8476 diff --git a/MAINTAINERS.md b/MAINTAINERS.md index cb99bca96..9f24ddf8c 100644 --- a/MAINTAINERS.md +++ b/MAINTAINERS.md @@ -9,6 +9,7 @@ Julien Pivotto ( / @roidelapluie) and Levi Harrison * `storage` * `remote`: Chris Marchbanks ( / @csmarchbanks), Callum Styan ( / @cstyan), Bartłomiej Płotka ( / @bwplotka), Tom Wilkie ( / @tomwilkie) * `tsdb`: Ganesh Vernekar ( / @codesome), Bartłomiej Płotka ( / @bwplotka) + * `agent`: Robert Fratto ( / @rfratto) * `web` * `ui`: Julius Volz ( / @juliusv) * `module`: Augustin Husson ( @nexucis) diff --git a/Makefile b/Makefile index b4961541c..1d4291770 100644 --- a/Makefile +++ b/Makefile @@ -55,8 +55,8 @@ assets: ui-install ui-build # Un-setting GOOS and GOARCH here because the generated Go code is always the same, # but the cached object code is incompatible between architectures and OSes (which # breaks cross-building for different combinations on CI in the same container). - cd web/ui && GO111MODULE=$(GO111MODULE) GOOS= GOARCH= $(GO) generate -x -v $(GOOPTS) - @$(GOFMT) -w ./web/ui + cd $(UI_PATH) && GO111MODULE=$(GO111MODULE) GOOS= GOARCH= $(GO) generate -x -v $(GOOPTS) + @$(GOFMT) -w ./$(UI_PATH) .PHONY: test # If we only want to only test go code we have to change the test target diff --git a/Makefile.common b/Makefile.common index 99e8f9f1b..bf7304e58 100644 --- a/Makefile.common +++ b/Makefile.common @@ -78,7 +78,7 @@ ifneq ($(shell which gotestsum),) endif endif -PROMU_VERSION ?= 0.12.0 +PROMU_VERSION ?= 0.13.0 PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_VERSION)/promu-$(PROMU_VERSION).$(GO_BUILD_PLATFORM).tar.gz GOLANGCI_LINT := @@ -160,7 +160,7 @@ endif update-go-deps: @echo ">> updating Go dependencies" @for m in $$($(GO) list -mod=readonly -m -f '{{ if and (not .Indirect) (not .Main)}}{{.Path}}{{end}}' all); do \ - $(GO) get $$m; \ + $(GO) get -d $$m; \ done GO111MODULE=$(GO111MODULE) $(GO) mod tidy ifneq (,$(wildcard vendor)) diff --git a/README.md b/README.md index 4a1aadce5..25f9fc97d 100644 --- a/README.md +++ b/README.md @@ -107,11 +107,11 @@ You can build a docker image locally with the following commands: ## React UI Development -For more information on building, running, and developing on the new React-based UI, see the React app's [README.md](web/ui/react-app/README.md). +For more information on building, running, and developing on the new React-based UI, see the React app's [README.md](web/ui/README.md). ## More information - * The source code is periodically indexed: [Prometheus Core](https://pkg.go.dev/github.com/prometheus/prometheus). + * The source code is periodically indexed, but due to an issue with versioning, the "latest" docs shown on Godoc are outdated. Instead, you can use [the docs for v2.31.1](https://pkg.go.dev/github.com/prometheus/prometheus@v1.8.2-0.20211105201321-411021ada9ab). * You will find a CircleCI configuration in [`.circleci/config.yml`](.circleci/config.yml). * See the [Community page](https://prometheus.io/community) for how to reach the Prometheus developers and users on various communication channels. diff --git a/VERSION b/VERSION index e88ba89ba..e41242108 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -2.30.3 +2.32.0-beta.0 diff --git a/cmd/prometheus/main.go b/cmd/prometheus/main.go index b405ef670..e5ef5c521 100644 --- a/cmd/prometheus/main.go +++ b/cmd/prometheus/main.go @@ -58,24 +58,29 @@ import ( "github.com/prometheus/prometheus/config" "github.com/prometheus/prometheus/discovery" _ "github.com/prometheus/prometheus/discovery/install" // Register service discovery implementations. + "github.com/prometheus/prometheus/discovery/legacymanager" + "github.com/prometheus/prometheus/discovery/targetgroup" + "github.com/prometheus/prometheus/model/exemplar" "github.com/prometheus/prometheus/model/histogram" + "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/model/relabel" "github.com/prometheus/prometheus/notifier" - "github.com/prometheus/prometheus/pkg/exemplar" - "github.com/prometheus/prometheus/pkg/labels" - "github.com/prometheus/prometheus/pkg/logging" - "github.com/prometheus/prometheus/pkg/relabel" - prom_runtime "github.com/prometheus/prometheus/pkg/runtime" "github.com/prometheus/prometheus/promql" "github.com/prometheus/prometheus/rules" "github.com/prometheus/prometheus/scrape" "github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/storage/remote" "github.com/prometheus/prometheus/tsdb" + "github.com/prometheus/prometheus/tsdb/agent" + "github.com/prometheus/prometheus/util/logging" + prom_runtime "github.com/prometheus/prometheus/util/runtime" "github.com/prometheus/prometheus/util/strutil" "github.com/prometheus/prometheus/web" ) var ( + appName = "prometheus" + configSuccess = prometheus.NewGauge(prometheus.GaugeOpts{ Name: "prometheus_config_last_reload_successful", Help: "Whether the last configuration reload attempt was successful.", @@ -87,10 +92,13 @@ var ( defaultRetentionString = "15d" defaultRetentionDuration model.Duration + + agentMode bool + agentOnlyFlags, serverOnlyFlags []string ) func init() { - prometheus.MustRegister(version.NewCollector("prometheus")) + prometheus.MustRegister(version.NewCollector(strings.ReplaceAll(appName, "-", "_"))) var err error defaultRetentionDuration, err = model.ParseDuration(defaultRetentionString) @@ -99,10 +107,31 @@ func init() { } } +// serverOnlyFlag creates server-only kingpin flag. +func serverOnlyFlag(app *kingpin.Application, name, help string) *kingpin.FlagClause { + return app.Flag(name, fmt.Sprintf("%s Use with server mode only.", help)). + PreAction(func(parseContext *kingpin.ParseContext) error { + // This will be invoked only if flag is actually provided by user. + serverOnlyFlags = append(serverOnlyFlags, "--"+name) + return nil + }) +} + +// agentOnlyFlag creates agent-only kingpin flag. +func agentOnlyFlag(app *kingpin.Application, name, help string) *kingpin.FlagClause { + return app.Flag(name, fmt.Sprintf("%s Use with agent mode only.", help)). + PreAction(func(parseContext *kingpin.ParseContext) error { + // This will be invoked only if flag is actually provided by user. + agentOnlyFlags = append(agentOnlyFlags, "--"+name) + return nil + }) +} + type flagConfig struct { configFile string - localStoragePath string + agentStoragePath string + serverStoragePath string notifier notifier.Options forGracePeriod model.Duration outageTolerance model.Duration @@ -110,6 +139,7 @@ type flagConfig struct { web web.Options scrape scrape.Options tsdb tsdbOptions + agent agentOptions lookbackDelta model.Duration webTimeout model.Duration queryTimeout model.Duration @@ -123,6 +153,7 @@ type flagConfig struct { enablePromQLAtModifier bool enablePromQLNegativeOffset bool enableExpandExternalLabels bool + enableNewSDManager bool prometheusURL string corsRegexString string @@ -157,6 +188,12 @@ func (c *flagConfig) setFeatureListOptions(logger log.Logger) error { case "extra-scrape-metrics": c.scrape.ExtraMetrics = true level.Info(logger).Log("msg", "Experimental additional scrape metrics") + case "new-service-discovery-manager": + c.enableNewSDManager = true + level.Info(logger).Log("msg", "Experimental service discovery manager") + case "agent": + agentMode = true + level.Info(logger).Log("msg", "Experimental agent mode enabled.") case "": continue default: @@ -191,7 +228,7 @@ func main() { a := kingpin.New(filepath.Base(os.Args[0]), "The Prometheus monitoring server").UsageWriter(os.Stdout) - a.Version(version.Print("prometheus")) + a.Version(version.Print(appName)) a.HelpFlag.Short('h') @@ -239,61 +276,86 @@ func main() { a.Flag("web.cors.origin", `Regex for CORS origin. It is fully anchored. Example: 'https?://(domain1|domain2)\.com'`). Default(".*").StringVar(&cfg.corsRegexString) - a.Flag("storage.tsdb.path", "Base path for metrics storage."). - Default("data/").StringVar(&cfg.localStoragePath) + serverOnlyFlag(a, "storage.tsdb.path", "Base path for metrics storage."). + Default("data/").StringVar(&cfg.serverStoragePath) - a.Flag("storage.tsdb.min-block-duration", "Minimum duration of a data block before being persisted. For use in testing."). + serverOnlyFlag(a, "storage.tsdb.min-block-duration", "Minimum duration of a data block before being persisted. For use in testing."). Hidden().Default("2h").SetValue(&cfg.tsdb.MinBlockDuration) - a.Flag("storage.tsdb.max-block-duration", + serverOnlyFlag(a, "storage.tsdb.max-block-duration", "Maximum duration compacted blocks may span. For use in testing. (Defaults to 10% of the retention period.)"). Hidden().PlaceHolder("").SetValue(&cfg.tsdb.MaxBlockDuration) - a.Flag("storage.tsdb.max-block-chunk-segment-size", + serverOnlyFlag(a, "storage.tsdb.max-block-chunk-segment-size", "The maximum size for a single chunk segment in a block. Example: 512MB"). Hidden().PlaceHolder("").BytesVar(&cfg.tsdb.MaxBlockChunkSegmentSize) - a.Flag("storage.tsdb.wal-segment-size", + serverOnlyFlag(a, "storage.tsdb.wal-segment-size", "Size at which to split the tsdb WAL segment files. Example: 100MB"). Hidden().PlaceHolder("").BytesVar(&cfg.tsdb.WALSegmentSize) - a.Flag("storage.tsdb.retention", "[DEPRECATED] How long to retain samples in storage. This flag has been deprecated, use \"storage.tsdb.retention.time\" instead."). + serverOnlyFlag(a, "storage.tsdb.retention", "[DEPRECATED] How long to retain samples in storage. This flag has been deprecated, use \"storage.tsdb.retention.time\" instead."). SetValue(&oldFlagRetentionDuration) - a.Flag("storage.tsdb.retention.time", "How long to retain samples in storage. When this flag is set it overrides \"storage.tsdb.retention\". If neither this flag nor \"storage.tsdb.retention\" nor \"storage.tsdb.retention.size\" is set, the retention time defaults to "+defaultRetentionString+". Units Supported: y, w, d, h, m, s, ms."). + serverOnlyFlag(a, "storage.tsdb.retention.time", "How long to retain samples in storage. When this flag is set it overrides \"storage.tsdb.retention\". If neither this flag nor \"storage.tsdb.retention\" nor \"storage.tsdb.retention.size\" is set, the retention time defaults to "+defaultRetentionString+". Units Supported: y, w, d, h, m, s, ms."). SetValue(&newFlagRetentionDuration) - a.Flag("storage.tsdb.retention.size", "Maximum number of bytes that can be stored for blocks. A unit is required, supported units: B, KB, MB, GB, TB, PB, EB. Ex: \"512MB\"."). + serverOnlyFlag(a, "storage.tsdb.retention.size", "Maximum number of bytes that can be stored for blocks. A unit is required, supported units: B, KB, MB, GB, TB, PB, EB. Ex: \"512MB\"."). BytesVar(&cfg.tsdb.MaxBytes) - a.Flag("storage.tsdb.no-lockfile", "Do not create lockfile in data directory."). + serverOnlyFlag(a, "storage.tsdb.no-lockfile", "Do not create lockfile in data directory."). Default("false").BoolVar(&cfg.tsdb.NoLockfile) - a.Flag("storage.tsdb.allow-overlapping-blocks", "Allow overlapping blocks, which in turn enables vertical compaction and vertical query merge."). + serverOnlyFlag(a, "storage.tsdb.allow-overlapping-blocks", "Allow overlapping blocks, which in turn enables vertical compaction and vertical query merge."). Default("false").BoolVar(&cfg.tsdb.AllowOverlappingBlocks) - a.Flag("storage.tsdb.wal-compression", "Compress the tsdb WAL."). + serverOnlyFlag(a, "storage.tsdb.wal-compression", "Compress the tsdb WAL."). Hidden().Default("true").BoolVar(&cfg.tsdb.WALCompression) + agentOnlyFlag(a, "storage.agent.path", "Base path for metrics storage."). + Default("data-agent/").StringVar(&cfg.agentStoragePath) + + agentOnlyFlag(a, "storage.agent.wal-segment-size", + "Size at which to split WAL segment files. Example: 100MB"). + Hidden().PlaceHolder("").BytesVar(&cfg.agent.WALSegmentSize) + + agentOnlyFlag(a, "storage.agent.wal-compression", "Compress the agent WAL."). + Default("true").BoolVar(&cfg.agent.WALCompression) + + agentOnlyFlag(a, "storage.agent.wal-truncate-frequency", + "The frequency at which to truncate the WAL and remove old data."). + Hidden().PlaceHolder("").SetValue(&cfg.agent.TruncateFrequency) + + agentOnlyFlag(a, "storage.agent.retention.min-time", + "Minimum age samples may be before being considered for deletion when the WAL is truncated"). + SetValue(&cfg.agent.MinWALTime) + + agentOnlyFlag(a, "storage.agent.retention.max-time", + "Maximum age samples may be before being forcibly deleted when the WAL is truncated"). + SetValue(&cfg.agent.MaxWALTime) + + agentOnlyFlag(a, "storage.agent.no-lockfile", "Do not create lockfile in data directory."). + Default("false").BoolVar(&cfg.agent.NoLockfile) + a.Flag("storage.remote.flush-deadline", "How long to wait flushing sample on shutdown or config reload."). Default("1m").PlaceHolder("").SetValue(&cfg.RemoteFlushDeadline) - a.Flag("storage.remote.read-sample-limit", "Maximum overall number of samples to return via the remote read interface, in a single query. 0 means no limit. This limit is ignored for streamed response types."). + serverOnlyFlag(a, "storage.remote.read-sample-limit", "Maximum overall number of samples to return via the remote read interface, in a single query. 0 means no limit. This limit is ignored for streamed response types."). Default("5e7").IntVar(&cfg.web.RemoteReadSampleLimit) - a.Flag("storage.remote.read-concurrent-limit", "Maximum number of concurrent remote read calls. 0 means no limit."). + serverOnlyFlag(a, "storage.remote.read-concurrent-limit", "Maximum number of concurrent remote read calls. 0 means no limit."). Default("10").IntVar(&cfg.web.RemoteReadConcurrencyLimit) - a.Flag("storage.remote.read-max-bytes-in-frame", "Maximum number of bytes in a single frame for streaming remote read response types before marshalling. Note that client might have limit on frame size as well. 1MB as recommended by protobuf by default."). + serverOnlyFlag(a, "storage.remote.read-max-bytes-in-frame", "Maximum number of bytes in a single frame for streaming remote read response types before marshalling. Note that client might have limit on frame size as well. 1MB as recommended by protobuf by default."). Default("1048576").IntVar(&cfg.web.RemoteReadBytesInFrame) - a.Flag("rules.alert.for-outage-tolerance", "Max time to tolerate prometheus outage for restoring \"for\" state of alert."). + serverOnlyFlag(a, "rules.alert.for-outage-tolerance", "Max time to tolerate prometheus outage for restoring \"for\" state of alert."). Default("1h").SetValue(&cfg.outageTolerance) - a.Flag("rules.alert.for-grace-period", "Minimum duration between alert and restored \"for\" state. This is maintained only for alerts with configured \"for\" time greater than grace period."). + serverOnlyFlag(a, "rules.alert.for-grace-period", "Minimum duration between alert and restored \"for\" state. This is maintained only for alerts with configured \"for\" time greater than grace period."). Default("10m").SetValue(&cfg.forGracePeriod) - a.Flag("rules.alert.resend-delay", "Minimum amount of time to wait before resending an alert to Alertmanager."). + serverOnlyFlag(a, "rules.alert.resend-delay", "Minimum amount of time to wait before resending an alert to Alertmanager."). Default("1m").SetValue(&cfg.resendDelay) a.Flag("scrape.adjust-timestamps", "Adjust scrape timestamps by up to `scrape.timestamp-tolerance` to align them to the intended schedule. See https://github.com/prometheus/prometheus/issues/7846 for more context. Experimental. This flag will be removed in a future release."). @@ -302,25 +364,25 @@ func main() { a.Flag("scrape.timestamp-tolerance", "Timestamp tolerance. See https://github.com/prometheus/prometheus/issues/7846 for more context. Experimental. This flag will be removed in a future release."). Hidden().Default("2ms").DurationVar(&scrape.ScrapeTimestampTolerance) - a.Flag("alertmanager.notification-queue-capacity", "The capacity of the queue for pending Alertmanager notifications."). + serverOnlyFlag(a, "alertmanager.notification-queue-capacity", "The capacity of the queue for pending Alertmanager notifications."). Default("10000").IntVar(&cfg.notifier.QueueCapacity) // TODO: Remove in Prometheus 3.0. alertmanagerTimeout := a.Flag("alertmanager.timeout", "[DEPRECATED] This flag has no effect.").Hidden().String() - a.Flag("query.lookback-delta", "The maximum lookback duration for retrieving metrics during expression evaluations and federation."). + serverOnlyFlag(a, "query.lookback-delta", "The maximum lookback duration for retrieving metrics during expression evaluations and federation."). Default("5m").SetValue(&cfg.lookbackDelta) - a.Flag("query.timeout", "Maximum time a query may take before being aborted."). + serverOnlyFlag(a, "query.timeout", "Maximum time a query may take before being aborted."). Default("2m").SetValue(&cfg.queryTimeout) - a.Flag("query.max-concurrency", "Maximum number of queries executed concurrently."). + serverOnlyFlag(a, "query.max-concurrency", "Maximum number of queries executed concurrently."). Default("20").IntVar(&cfg.queryConcurrency) - a.Flag("query.max-samples", "Maximum number of samples a single query can load into memory. Note that queries will fail if they try to load more samples than this into memory, so this also limits the number of samples a query can return."). + serverOnlyFlag(a, "query.max-samples", "Maximum number of samples a single query can load into memory. Note that queries will fail if they try to load more samples than this into memory, so this also limits the number of samples a query can return."). Default("50000000").IntVar(&cfg.queryMaxSamples) - a.Flag("enable-feature", "Comma separated feature names to enable. Valid options: exemplar-storage, expand-external-labels, memory-snapshot-on-shutdown, promql-at-modifier, promql-negative-offset, remote-write-receiver, extra-scrape-metrics. See https://prometheus.io/docs/prometheus/latest/feature_flags/ for more details."). + a.Flag("enable-feature", "Comma separated feature names to enable. Valid options: exemplar-storage, expand-external-labels, memory-snapshot-on-shutdown, promql-at-modifier, promql-negative-offset, remote-write-receiver, extra-scrape-metrics, new-service-discovery-manager. See https://prometheus.io/docs/prometheus/latest/feature_flags/ for more details."). Default("").StringsVar(&cfg.featureList) promlogflag.AddFlags(a, &cfg.promlogConfig) @@ -339,6 +401,21 @@ func main() { os.Exit(1) } + if agentMode && len(serverOnlyFlags) > 0 { + fmt.Fprintf(os.Stderr, "The following flag(s) can not be used in agent mode: %q", serverOnlyFlags) + os.Exit(3) + } + + if !agentMode && len(agentOnlyFlags) > 0 { + fmt.Fprintf(os.Stderr, "The following flag(s) can only be used in agent mode: %q", agentOnlyFlags) + os.Exit(3) + } + + localStoragePath := cfg.serverStoragePath + if agentMode { + localStoragePath = cfg.agentStoragePath + } + cfg.web.ExternalURL, err = computeExternalURL(cfg.prometheusURL, cfg.web.ListenAddress) if err != nil { fmt.Fprintln(os.Stderr, errors.Wrapf(err, "parse external URL %q", cfg.prometheusURL)) @@ -357,7 +434,7 @@ func main() { // Throw error for invalid config before starting other components. var cfgFile *config.Config - if cfgFile, err = config.LoadFile(cfg.configFile, false, log.NewNopLogger()); err != nil { + if cfgFile, err = config.LoadFile(cfg.configFile, agentMode, false, log.NewNopLogger()); err != nil { level.Error(logger).Log("msg", fmt.Sprintf("Error loading config (--config.file=%s)", cfg.configFile), "err", err) os.Exit(2) } @@ -385,7 +462,8 @@ func main() { // RoutePrefix must always be at least '/'. cfg.web.RoutePrefix = "/" + strings.Trim(cfg.web.RoutePrefix, "/") - { // Time retention settings. + if !agentMode { + // Time retention settings. if oldFlagRetentionDuration != 0 { level.Warn(logger).Log("deprecation_notice", "'storage.tsdb.retention' flag is deprecated use 'storage.tsdb.retention.time' instead.") cfg.tsdb.RetentionDuration = oldFlagRetentionDuration @@ -410,9 +488,8 @@ func main() { cfg.tsdb.RetentionDuration = y level.Warn(logger).Log("msg", "Time retention value is too high. Limiting to: "+y.String()) } - } - { // Max block size settings. + // Max block size settings. if cfg.tsdb.MaxBlockDuration == 0 { maxBlockDuration, err := model.ParseDuration("31d") if err != nil { @@ -449,7 +526,7 @@ func main() { var ( localStorage = &readyStorage{stats: tsdb.NewDBStats()} scraper = &readyScrapeManager{} - remoteStorage = remote.NewStorage(log.With(logger, "component", "remote"), prometheus.DefaultRegisterer, localStorage.StartTime, cfg.localStoragePath, time.Duration(cfg.RemoteFlushDeadline), scraper) + remoteStorage = remote.NewStorage(log.With(logger, "component", "remote"), prometheus.DefaultRegisterer, localStorage.StartTime, localStoragePath, time.Duration(cfg.RemoteFlushDeadline), scraper) fanoutStorage = storage.NewFanout(logger, localStorage, remoteStorage) ) @@ -460,19 +537,35 @@ func main() { notifierManager = notifier.NewManager(&cfg.notifier, log.With(logger, "component", "notifier")) ctxScrape, cancelScrape = context.WithCancel(context.Background()) - discoveryManagerScrape = discovery.NewManager(ctxScrape, log.With(logger, "component", "discovery manager scrape"), discovery.Name("scrape")) - ctxNotify, cancelNotify = context.WithCancel(context.Background()) - discoveryManagerNotify = discovery.NewManager(ctxNotify, log.With(logger, "component", "discovery manager notify"), discovery.Name("notify")) + discoveryManagerScrape discoveryManager + discoveryManagerNotify discoveryManager + ) + if cfg.enableNewSDManager { + discovery.RegisterMetrics() + discoveryManagerScrape = discovery.NewManager(ctxScrape, log.With(logger, "component", "discovery manager scrape"), discovery.Name("scrape")) + discoveryManagerNotify = discovery.NewManager(ctxNotify, log.With(logger, "component", "discovery manager notify"), discovery.Name("notify")) + } else { + legacymanager.RegisterMetrics() + discoveryManagerScrape = legacymanager.NewManager(ctxScrape, log.With(logger, "component", "discovery manager scrape"), legacymanager.Name("scrape")) + discoveryManagerNotify = legacymanager.NewManager(ctxNotify, log.With(logger, "component", "discovery manager notify"), legacymanager.Name("notify")) + } + + var ( scrapeManager = scrape.NewManager(&cfg.scrape, log.With(logger, "component", "scrape manager"), fanoutStorage) - opts = promql.EngineOpts{ + queryEngine *promql.Engine + ruleManager *rules.Manager + ) + + if !agentMode { + opts := promql.EngineOpts{ Logger: log.With(logger, "component", "query engine"), Reg: prometheus.DefaultRegisterer, MaxSamples: cfg.queryMaxSamples, Timeout: time.Duration(cfg.queryTimeout), - ActiveQueryTracker: promql.NewActiveQueryTracker(cfg.localStoragePath, cfg.queryConcurrency, log.With(logger, "component", "activeQueryTracker")), + ActiveQueryTracker: promql.NewActiveQueryTracker(localStoragePath, cfg.queryConcurrency, log.With(logger, "component", "activeQueryTracker")), LookbackDelta: time.Duration(cfg.lookbackDelta), NoStepSubqueryIntervalFn: noStepSubqueryInterval.Get, EnableAtModifier: cfg.enablePromQLAtModifier, @@ -494,14 +587,14 @@ func main() { ForGracePeriod: time.Duration(cfg.forGracePeriod), ResendDelay: time.Duration(cfg.resendDelay), }) - ) + } scraper.Set(scrapeManager) cfg.web.Context = ctxWeb cfg.web.TSDBRetentionDuration = cfg.tsdb.RetentionDuration cfg.web.TSDBMaxBytes = cfg.tsdb.MaxBytes - cfg.web.TSDBDir = cfg.localStoragePath + cfg.web.TSDBDir = localStoragePath cfg.web.LocalStorage = localStorage cfg.web.Storage = fanoutStorage cfg.web.ExemplarStorage = localStorage @@ -510,6 +603,7 @@ func main() { cfg.web.RuleManager = ruleManager cfg.web.Notifier = notifierManager cfg.web.LookbackDelta = time.Duration(cfg.lookbackDelta) + cfg.web.IsAgent = agentMode cfg.web.Version = &web.PrometheusVersion{ Version: version.Version, @@ -541,7 +635,7 @@ func main() { ) // This is passed to ruleManager.Update(). - var externalURL = cfg.web.ExternalURL.String() + externalURL := cfg.web.ExternalURL.String() reloaders := []reloader{ { @@ -556,6 +650,11 @@ func main() { }, { name: "query_engine", reloader: func(cfg *config.Config) error { + if agentMode { + // No-op in Agent mode. + return nil + } + if cfg.GlobalConfig.QueryLogFile == "" { queryEngine.SetQueryLogger(nil) return nil @@ -597,6 +696,11 @@ func main() { }, { name: "rules", reloader: func(cfg *config.Config) error { + if agentMode { + // No-op in Agent mode + return nil + } + // Get all rule files matching the configuration paths. var files []string for _, pat := range cfg.RuleFiles { @@ -763,7 +867,6 @@ func main() { return nil } } - }, func(err error) { // Wait for any in-progress reloads to complete to avoid @@ -801,7 +904,7 @@ func main() { }, ) } - { + if !agentMode { // Rule manager. g.Add( func() error { @@ -813,8 +916,7 @@ func main() { ruleManager.Stop() }, ) - } - { + // TSDB. opts := cfg.tsdb.ToTSDBOptions() cancel := make(chan struct{}) @@ -832,18 +934,12 @@ func main() { } } - db, err := openDBWithMetrics( - cfg.localStoragePath, - logger, - prometheus.DefaultRegisterer, - &opts, - localStorage.getStats(), - ) + db, err := openDBWithMetrics(localStoragePath, logger, prometheus.DefaultRegisterer, &opts, localStorage.getStats()) if err != nil { return errors.Wrapf(err, "opening storage failed") } - switch fsType := prom_runtime.Statfs(cfg.localStoragePath); fsType { + switch fsType := prom_runtime.Statfs(localStoragePath); fsType { case "NFS_SUPER_MAGIC": level.Warn(logger).Log("fs_type", fsType, "msg", "This filesystem is not supported and may lead to data corruption and data loss. Please carefully read https://prometheus.io/docs/prometheus/latest/storage/ to learn more about supported filesystems.") default: @@ -876,6 +972,59 @@ func main() { }, ) } + if agentMode { + // WAL storage. + opts := cfg.agent.ToAgentOptions() + cancel := make(chan struct{}) + g.Add( + func() error { + level.Info(logger).Log("msg", "Starting WAL storage ...") + if cfg.agent.WALSegmentSize != 0 { + if cfg.agent.WALSegmentSize < 10*1024*1024 || cfg.agent.WALSegmentSize > 256*1024*1024 { + return errors.New("flag 'storage.agent.wal-segment-size' must be set between 10MB and 256MB") + } + } + db, err := agent.Open( + logger, + prometheus.DefaultRegisterer, + remoteStorage, + localStoragePath, + &opts, + ) + if err != nil { + return errors.Wrap(err, "opening storage failed") + } + + switch fsType := prom_runtime.Statfs(localStoragePath); fsType { + case "NFS_SUPER_MAGIC": + level.Warn(logger).Log("fs_type", fsType, "msg", "This filesystem is not supported and may lead to data corruption and data loss. Please carefully read https://prometheus.io/docs/prometheus/latest/storage/ to learn more about supported filesystems.") + default: + level.Info(logger).Log("fs_type", fsType) + } + + level.Info(logger).Log("msg", "Agent WAL storage started") + level.Debug(logger).Log("msg", "Agent WAL storage options", + "WALSegmentSize", cfg.agent.WALSegmentSize, + "WALCompression", cfg.agent.WALCompression, + "StripeSize", cfg.agent.StripeSize, + "TruncateFrequency", cfg.agent.TruncateFrequency, + "MinWALTime", cfg.agent.MinWALTime, + "MaxWALTime", cfg.agent.MaxWALTime, + ) + + localStorage.Set(db, 0) + close(dbOpen) + <-cancel + return nil + }, + func(e error) { + if err := fanoutStorage.Close(); err != nil { + level.Error(logger).Log("msg", "Error stopping storage", "err", err) + } + close(cancel) + }, + ) + } { // Web handler. g.Add( @@ -961,6 +1110,7 @@ type safePromQLNoStepSubqueryInterval struct { func durationToInt64Millis(d time.Duration) int64 { return int64(d / time.Millisecond) } + func (i *safePromQLNoStepSubqueryInterval) Set(ev model.Duration) { i.value.Store(durationToInt64Millis(time.Duration(ev))) } @@ -974,7 +1124,7 @@ type reloader struct { reloader func(*config.Config) error } -func reloadConfig(filename string, expandExternalLabels bool, enableExemplarStorage bool, logger log.Logger, noStepSuqueryInterval *safePromQLNoStepSubqueryInterval, rls ...reloader) (err error) { +func reloadConfig(filename string, expandExternalLabels, enableExemplarStorage bool, logger log.Logger, noStepSuqueryInterval *safePromQLNoStepSubqueryInterval, rls ...reloader) (err error) { start := time.Now() timings := []interface{}{} level.Info(logger).Log("msg", "Loading configuration file", "filename", filename) @@ -988,7 +1138,7 @@ func reloadConfig(filename string, expandExternalLabels bool, enableExemplarStor } }() - conf, err := config.LoadFile(filename, expandExternalLabels, logger) + conf, err := config.LoadFile(filename, agentMode, expandExternalLabels, logger) if err != nil { return errors.Wrapf(err, "couldn't load configuration (--config.file=%q)", filename) } @@ -1099,18 +1249,21 @@ func sendAlerts(s sender, externalURL string) rules.NotifyFunc { // storage at a later point in time. type readyStorage struct { mtx sync.RWMutex - db *tsdb.DB + db storage.Storage startTimeMargin int64 stats *tsdb.DBStats } func (s *readyStorage) ApplyConfig(conf *config.Config) error { db := s.get() - return db.ApplyConfig(conf) + if db, ok := db.(*tsdb.DB); ok { + return db.ApplyConfig(conf) + } + return nil } // Set the storage. -func (s *readyStorage) Set(db *tsdb.DB, startTimeMargin int64) { +func (s *readyStorage) Set(db storage.Storage, startTimeMargin int64) { s.mtx.Lock() defer s.mtx.Unlock() @@ -1118,7 +1271,7 @@ func (s *readyStorage) Set(db *tsdb.DB, startTimeMargin int64) { s.startTimeMargin = startTimeMargin } -func (s *readyStorage) get() *tsdb.DB { +func (s *readyStorage) get() storage.Storage { s.mtx.RLock() x := s.db s.mtx.RUnlock() @@ -1135,15 +1288,21 @@ func (s *readyStorage) getStats() *tsdb.DBStats { // StartTime implements the Storage interface. func (s *readyStorage) StartTime() (int64, error) { if x := s.get(); x != nil { - var startTime int64 - - if len(x.Blocks()) > 0 { - startTime = x.Blocks()[0].Meta().MinTime - } else { - startTime = time.Now().Unix() * 1000 + switch db := x.(type) { + case *tsdb.DB: + var startTime int64 + if len(db.Blocks()) > 0 { + startTime = db.Blocks()[0].Meta().MinTime + } else { + startTime = time.Now().Unix() * 1000 + } + // Add a safety margin as it may take a few minutes for everything to spin up. + return startTime + s.startTimeMargin, nil + case *agent.DB: + return db.StartTime() + default: + panic(fmt.Sprintf("unknown storage type %T", db)) } - // Add a safety margin as it may take a few minutes for everything to spin up. - return startTime + s.startTimeMargin, nil } return math.MaxInt64, tsdb.ErrNotReady @@ -1167,7 +1326,14 @@ func (s *readyStorage) ChunkQuerier(ctx context.Context, mint, maxt int64) (stor func (s *readyStorage) ExemplarQuerier(ctx context.Context) (storage.ExemplarQuerier, error) { if x := s.get(); x != nil { - return x.ExemplarQuerier(ctx) + switch db := x.(type) { + case *tsdb.DB: + return db.ExemplarQuerier(ctx) + case *agent.DB: + return nil, agent.ErrUnsupported + default: + panic(fmt.Sprintf("unknown storage type %T", db)) + } } return nil, tsdb.ErrNotReady } @@ -1182,15 +1348,15 @@ func (s *readyStorage) Appender(ctx context.Context) storage.Appender { type notReadyAppender struct{} -func (n notReadyAppender) Append(ref uint64, l labels.Labels, t int64, v float64) (uint64, error) { +func (n notReadyAppender) Append(ref storage.SeriesRef, l labels.Labels, t int64, v float64) (storage.SeriesRef, error) { return 0, tsdb.ErrNotReady } -func (n notReadyAppender) AppendExemplar(ref uint64, l labels.Labels, e exemplar.Exemplar) (uint64, error) { +func (n notReadyAppender) AppendExemplar(ref storage.SeriesRef, l labels.Labels, e exemplar.Exemplar) (storage.SeriesRef, error) { return 0, tsdb.ErrNotReady } -func (n notReadyAppender) AppendHistogram(ref uint64, l labels.Labels, t int64, h *histogram.Histogram) (uint64, error) { +func (n notReadyAppender) AppendHistogram(ref storage.SeriesRef, l labels.Labels, t int64, h *histogram.Histogram) (storage.SeriesRef, error) { return 0, tsdb.ErrNotReady } @@ -1209,7 +1375,14 @@ func (s *readyStorage) Close() error { // CleanTombstones implements the api_v1.TSDBAdminStats and api_v2.TSDBAdmin interfaces. func (s *readyStorage) CleanTombstones() error { if x := s.get(); x != nil { - return x.CleanTombstones() + switch db := x.(type) { + case *tsdb.DB: + return db.CleanTombstones() + case *agent.DB: + return agent.ErrUnsupported + default: + panic(fmt.Sprintf("unknown storage type %T", db)) + } } return tsdb.ErrNotReady } @@ -1217,7 +1390,14 @@ func (s *readyStorage) CleanTombstones() error { // Delete implements the api_v1.TSDBAdminStats and api_v2.TSDBAdmin interfaces. func (s *readyStorage) Delete(mint, maxt int64, ms ...*labels.Matcher) error { if x := s.get(); x != nil { - return x.Delete(mint, maxt, ms...) + switch db := x.(type) { + case *tsdb.DB: + return db.Delete(mint, maxt, ms...) + case *agent.DB: + return agent.ErrUnsupported + default: + panic(fmt.Sprintf("unknown storage type %T", db)) + } } return tsdb.ErrNotReady } @@ -1225,7 +1405,14 @@ func (s *readyStorage) Delete(mint, maxt int64, ms ...*labels.Matcher) error { // Snapshot implements the api_v1.TSDBAdminStats and api_v2.TSDBAdmin interfaces. func (s *readyStorage) Snapshot(dir string, withHead bool) error { if x := s.get(); x != nil { - return x.Snapshot(dir, withHead) + switch db := x.(type) { + case *tsdb.DB: + return db.Snapshot(dir, withHead) + case *agent.DB: + return agent.ErrUnsupported + default: + panic(fmt.Sprintf("unknown storage type %T", db)) + } } return tsdb.ErrNotReady } @@ -1233,7 +1420,14 @@ func (s *readyStorage) Snapshot(dir string, withHead bool) error { // Stats implements the api_v1.TSDBAdminStats interface. func (s *readyStorage) Stats(statsByLabelName string) (*tsdb.Stats, error) { if x := s.get(); x != nil { - return x.Head().Stats(statsByLabelName), nil + switch db := x.(type) { + case *tsdb.DB: + return db.Head().Stats(statsByLabelName), nil + case *agent.DB: + return nil, agent.ErrUnsupported + default: + panic(fmt.Sprintf("unknown storage type %T", db)) + } } return nil, tsdb.ErrNotReady } @@ -1311,6 +1505,29 @@ func (opts tsdbOptions) ToTSDBOptions() tsdb.Options { } } +// agentOptions is a version of agent.Options with defined units. This is required +// as agent.Option fields are unit agnostic (time). +type agentOptions struct { + WALSegmentSize units.Base2Bytes + WALCompression bool + StripeSize int + TruncateFrequency model.Duration + MinWALTime, MaxWALTime model.Duration + NoLockfile bool +} + +func (opts agentOptions) ToAgentOptions() agent.Options { + return agent.Options{ + WALSegmentSize: int(opts.WALSegmentSize), + WALCompression: opts.WALCompression, + StripeSize: opts.StripeSize, + TruncateFrequency: time.Duration(opts.TruncateFrequency), + MinWALTime: durationToInt64Millis(time.Duration(opts.MinWALTime)), + MaxWALTime: durationToInt64Millis(time.Duration(opts.MaxWALTime)), + NoLockfile: opts.NoLockfile, + } +} + func initTracing(logger log.Logger) (io.Closer, error) { // Set tracing configuration defaults. cfg := &jcfg.Configuration{ @@ -1351,3 +1568,12 @@ func (l jaegerLogger) Infof(msg string, args ...interface{}) { keyvals := []interface{}{"msg", fmt.Sprintf(msg, args...)} level.Info(l.logger).Log(keyvals...) } + +// discoveryManager interfaces the discovery manager. This is used to keep using +// the manager that restarts SD's on reload for a few releases until we feel +// the new manager can be enabled for all users. +type discoveryManager interface { + ApplyConfig(cfg map[string]discovery.Configs) error + Run() error + SyncCh() <-chan map[string][]*targetgroup.Group +} diff --git a/cmd/prometheus/main_test.go b/cmd/prometheus/main_test.go index 182b6d228..c3915c7ca 100644 --- a/cmd/prometheus/main_test.go +++ b/cmd/prometheus/main_test.go @@ -14,6 +14,7 @@ package main import ( + "bytes" "context" "fmt" "io/ioutil" @@ -21,6 +22,7 @@ import ( "os" "os/exec" "path/filepath" + "strings" "syscall" "testing" "time" @@ -30,14 +32,16 @@ import ( "github.com/prometheus/common/model" "github.com/stretchr/testify/require" + "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/notifier" - "github.com/prometheus/prometheus/pkg/labels" "github.com/prometheus/prometheus/rules" ) -var promPath = os.Args[0] -var promConfig = filepath.Join("..", "..", "documentation", "examples", "prometheus.yml") -var promData = filepath.Join(os.TempDir(), "data") +var ( + promPath = os.Args[0] + promConfig = filepath.Join("..", "..", "documentation", "examples", "prometheus.yml") + agentConfig = filepath.Join("..", "..", "documentation", "examples", "prometheus-agent.yml") +) func TestMain(m *testing.M) { for i, arg := range os.Args { @@ -52,7 +56,6 @@ func TestMain(m *testing.M) { os.Setenv("no_proxy", "localhost,127.0.0.1,0.0.0.0,:") exitCode := m.Run() - os.RemoveAll(promData) os.Exit(exitCode) } @@ -202,7 +205,7 @@ func TestWALSegmentSizeBounds(t *testing.T) { } for size, expectedExitStatus := range map[string]int{"9MB": 1, "257MB": 1, "10": 2, "1GB": 1, "12MB": 0} { - prom := exec.Command(promPath, "-test.main", "--storage.tsdb.wal-segment-size="+size, "--config.file="+promConfig) + prom := exec.Command(promPath, "-test.main", "--storage.tsdb.wal-segment-size="+size, "--web.listen-address=0.0.0.0:0", "--config.file="+promConfig, "--storage.tsdb.path="+filepath.Join(t.TempDir(), "data")) // Log stderr in case of failure. stderr, err := prom.StderrPipe() @@ -223,6 +226,7 @@ func TestWALSegmentSizeBounds(t *testing.T) { t.Errorf("prometheus should be still running: %v", err) case <-time.After(5 * time.Second): prom.Process.Kill() + <-done } continue } @@ -239,12 +243,14 @@ func TestWALSegmentSizeBounds(t *testing.T) { } func TestMaxBlockChunkSegmentSizeBounds(t *testing.T) { + t.Parallel() + if testing.Short() { t.Skip("skipping test in short mode.") } for size, expectedExitStatus := range map[string]int{"512KB": 1, "1MB": 0} { - prom := exec.Command(promPath, "-test.main", "--storage.tsdb.max-block-chunk-segment-size="+size, "--config.file="+promConfig) + prom := exec.Command(promPath, "-test.main", "--storage.tsdb.max-block-chunk-segment-size="+size, "--web.listen-address=0.0.0.0:0", "--config.file="+promConfig, "--storage.tsdb.path="+filepath.Join(t.TempDir(), "data")) // Log stderr in case of failure. stderr, err := prom.StderrPipe() @@ -265,6 +271,7 @@ func TestMaxBlockChunkSegmentSizeBounds(t *testing.T) { t.Errorf("prometheus should be still running: %v", err) case <-time.After(5 * time.Second): prom.Process.Kill() + <-done } continue } @@ -347,3 +354,130 @@ func getCurrentGaugeValuesFor(t *testing.T, reg prometheus.Gatherer, metricNames } return res } + +func TestAgentSuccessfulStartup(t *testing.T) { + prom := exec.Command(promPath, "-test.main", "--enable-feature=agent", "--config.file="+agentConfig) + require.NoError(t, prom.Start()) + + actualExitStatus := 0 + done := make(chan error, 1) + + go func() { done <- prom.Wait() }() + select { + case err := <-done: + t.Logf("prometheus agent should be still running: %v", err) + actualExitStatus = prom.ProcessState.ExitCode() + case <-time.After(5 * time.Second): + prom.Process.Kill() + } + require.Equal(t, 0, actualExitStatus) +} + +func TestAgentFailedStartupWithServerFlag(t *testing.T) { + prom := exec.Command(promPath, "-test.main", "--enable-feature=agent", "--storage.tsdb.path=.", "--config.file="+promConfig) + + output := bytes.Buffer{} + prom.Stderr = &output + require.NoError(t, prom.Start()) + + actualExitStatus := 0 + done := make(chan error, 1) + + go func() { done <- prom.Wait() }() + select { + case err := <-done: + t.Logf("prometheus agent should not be running: %v", err) + actualExitStatus = prom.ProcessState.ExitCode() + case <-time.After(5 * time.Second): + prom.Process.Kill() + } + + require.Equal(t, 3, actualExitStatus) + + // Assert on last line. + lines := strings.Split(output.String(), "\n") + last := lines[len(lines)-1] + require.Equal(t, "The following flag(s) can not be used in agent mode: [\"--storage.tsdb.path\"]", last) +} + +func TestAgentFailedStartupWithInvalidConfig(t *testing.T) { + prom := exec.Command(promPath, "-test.main", "--enable-feature=agent", "--config.file="+promConfig) + require.NoError(t, prom.Start()) + + actualExitStatus := 0 + done := make(chan error, 1) + + go func() { done <- prom.Wait() }() + select { + case err := <-done: + t.Logf("prometheus agent should not be running: %v", err) + actualExitStatus = prom.ProcessState.ExitCode() + case <-time.After(5 * time.Second): + prom.Process.Kill() + } + require.Equal(t, 2, actualExitStatus) +} + +func TestModeSpecificFlags(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode.") + } + + testcases := []struct { + mode string + arg string + exitStatus int + }{ + {"agent", "--storage.agent.path", 0}, + {"server", "--storage.tsdb.path", 0}, + {"server", "--storage.agent.path", 3}, + {"agent", "--storage.tsdb.path", 3}, + } + + for _, tc := range testcases { + t.Run(fmt.Sprintf("%s mode with option %s", tc.mode, tc.arg), func(t *testing.T) { + args := []string{"-test.main", tc.arg, t.TempDir()} + + if tc.mode == "agent" { + args = append(args, "--enable-feature=agent", "--config.file="+agentConfig) + } else { + args = append(args, "--config.file="+promConfig) + } + + prom := exec.Command(promPath, args...) + + // Log stderr in case of failure. + stderr, err := prom.StderrPipe() + require.NoError(t, err) + go func() { + slurp, _ := ioutil.ReadAll(stderr) + t.Log(string(slurp)) + }() + + err = prom.Start() + require.NoError(t, err) + + if tc.exitStatus == 0 { + done := make(chan error, 1) + go func() { done <- prom.Wait() }() + select { + case err := <-done: + t.Errorf("prometheus should be still running: %v", err) + case <-time.After(5 * time.Second): + prom.Process.Kill() + <-done + } + return + } + + err = prom.Wait() + require.Error(t, err) + if exitError, ok := err.(*exec.ExitError); ok { + status := exitError.Sys().(syscall.WaitStatus) + require.Equal(t, tc.exitStatus, status.ExitStatus()) + } else { + t.Errorf("unable to retrieve the exit status for prometheus: %v", err) + } + }) + } +} diff --git a/cmd/prometheus/main_unix_test.go b/cmd/prometheus/main_unix_test.go index 7724f0dd1..d7eed636d 100644 --- a/cmd/prometheus/main_unix_test.go +++ b/cmd/prometheus/main_unix_test.go @@ -17,11 +17,14 @@ package main import ( + "fmt" "net/http" "os" "os/exec" "testing" "time" + + "github.com/prometheus/prometheus/util/testutil" ) // As soon as prometheus starts responding to http request it should be able to @@ -31,11 +34,12 @@ func TestStartupInterrupt(t *testing.T) { t.Skip("skipping test in short mode.") } - prom := exec.Command(promPath, "-test.main", "--config.file="+promConfig, "--storage.tsdb.path="+promData) + port := fmt.Sprintf(":%d", testutil.RandomUnprivilegedPort(t)) + + prom := exec.Command(promPath, "-test.main", "--config.file="+promConfig, "--storage.tsdb.path="+t.TempDir(), "--web.listen-address=0.0.0.0"+port) err := prom.Start() if err != nil { - t.Errorf("execution error: %v", err) - return + t.Fatalf("execution error: %v", err) } done := make(chan error, 1) @@ -46,11 +50,13 @@ func TestStartupInterrupt(t *testing.T) { var startedOk bool var stoppedErr error + url := "http://localhost" + port + "/graph" + Loop: for x := 0; x < 10; x++ { // error=nil means prometheus has started so we can send the interrupt // signal and wait for the graceful shutdown. - if _, err := http.Get("http://localhost:9090/graph"); err == nil { + if _, err := http.Get(url); err == nil { startedOk = true prom.Process.Signal(os.Interrupt) select { @@ -64,12 +70,11 @@ Loop: } if !startedOk { - t.Errorf("prometheus didn't start in the specified timeout") - return + t.Fatal("prometheus didn't start in the specified timeout") } if err := prom.Process.Kill(); err == nil { t.Errorf("prometheus didn't shutdown gracefully after sending the Interrupt signal") } else if stoppedErr != nil && stoppedErr.Error() != "signal: interrupt" { // TODO - find a better way to detect when the process didn't exit as expected! - t.Errorf("prometheus exited with an unexpected error:%v", stoppedErr) + t.Errorf("prometheus exited with an unexpected error: %v", stoppedErr) } } diff --git a/cmd/prometheus/query_log_test.go b/cmd/prometheus/query_log_test.go index 4e75ecae9..1b2bd4f97 100644 --- a/cmd/prometheus/query_log_test.go +++ b/cmd/prometheus/query_log_test.go @@ -31,6 +31,8 @@ import ( "time" "github.com/stretchr/testify/require" + + "github.com/prometheus/prometheus/util/testutil" ) type origin int @@ -412,7 +414,6 @@ func TestQueryLog(t *testing.T) { cwd, err := os.Getwd() require.NoError(t, err) - port := 15000 for _, host := range []string{"127.0.0.1", "[::1]"} { for _, prefix := range []string{"", "/foobar"} { for _, enabledAtStart := range []bool{true, false} { @@ -422,7 +423,7 @@ func TestQueryLog(t *testing.T) { host: host, enabledAtStart: enabledAtStart, prefix: prefix, - port: port, + port: testutil.RandomUnprivilegedPort(t), cwd: cwd, } diff --git a/cmd/promtool/archive.go b/cmd/promtool/archive.go index 520c26b63..cca148cb4 100644 --- a/cmd/promtool/archive.go +++ b/cmd/promtool/archive.go @@ -21,7 +21,7 @@ import ( "github.com/pkg/errors" ) -const filePerm = 0666 +const filePerm = 0o666 type tarGzFileWriter struct { tarWriter *tar.Writer diff --git a/cmd/promtool/backfill.go b/cmd/promtool/backfill.go index 3d05260e3..d1c8f9633 100644 --- a/cmd/promtool/backfill.go +++ b/cmd/promtool/backfill.go @@ -21,8 +21,9 @@ import ( "github.com/go-kit/log" "github.com/pkg/errors" - "github.com/prometheus/prometheus/pkg/labels" - "github.com/prometheus/prometheus/pkg/textparse" + + "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/model/textparse" "github.com/prometheus/prometheus/tsdb" tsdb_errors "github.com/prometheus/prometheus/tsdb/errors" ) @@ -66,7 +67,7 @@ func getMinAndMaxTimestamps(p textparse.Parser) (int64, int64, error) { return maxt, mint, nil } -func createBlocks(input []byte, mint, maxt, maxBlockDuration int64, maxSamplesInAppender int, outputDir string, humanReadable, quiet bool) (returnErr error) { +func getCompatibleBlockDuration(maxBlockDuration int64) int64 { blockDuration := tsdb.DefaultBlockDuration if maxBlockDuration > tsdb.DefaultBlockDuration { ranges := tsdb.ExponentialBlockRanges(tsdb.DefaultBlockDuration, 10, 3) @@ -79,6 +80,11 @@ func createBlocks(input []byte, mint, maxt, maxBlockDuration int64, maxSamplesIn } blockDuration = ranges[idx] } + return blockDuration +} + +func createBlocks(input []byte, mint, maxt, maxBlockDuration int64, maxSamplesInAppender int, outputDir string, humanReadable, quiet bool) (returnErr error) { + blockDuration := getCompatibleBlockDuration(maxBlockDuration) mint = blockDuration * (mint / blockDuration) db, err := tsdb.OpenDBReadOnly(outputDir, nil) @@ -100,7 +106,6 @@ func createBlocks(input []byte, mint, maxt, maxBlockDuration int64, maxSamplesIn // The next sample is not in this timerange, we can avoid parsing // the file for this timerange. continue - } nextSampleTs = math.MaxInt64 @@ -202,13 +207,11 @@ func createBlocks(input []byte, mint, maxt, maxBlockDuration int64, maxSamplesIn return nil }() - if err != nil { return errors.Wrap(err, "process blocks") } } return nil - } func backfill(maxSamplesInAppender int, input []byte, outputDir string, humanReadable, quiet bool, maxBlockDuration time.Duration) (err error) { diff --git a/cmd/promtool/backfill_test.go b/cmd/promtool/backfill_test.go index 4bbb5602c..c9493f134 100644 --- a/cmd/promtool/backfill_test.go +++ b/cmd/promtool/backfill_test.go @@ -22,10 +22,11 @@ import ( "testing" "time" - "github.com/prometheus/prometheus/pkg/labels" + "github.com/stretchr/testify/require" + + "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/tsdb" - "github.com/stretchr/testify/require" ) type backfillSample struct { diff --git a/cmd/promtool/debug.go b/cmd/promtool/debug.go index 280a0d44b..23d613bb0 100644 --- a/cmd/promtool/debug.go +++ b/cmd/promtool/debug.go @@ -57,7 +57,6 @@ func debugWrite(cfg debugWriterConfig) error { return errors.Wrap(err, "error writing into the archive") } } - } if err := archiver.close(); err != nil { diff --git a/cmd/promtool/main.go b/cmd/promtool/main.go index ae46d84e5..7ae656c68 100644 --- a/cmd/promtool/main.go +++ b/cmd/promtool/main.go @@ -44,13 +44,16 @@ import ( yaml "gopkg.in/yaml.v2" "github.com/prometheus/prometheus/config" + "github.com/prometheus/prometheus/discovery" "github.com/prometheus/prometheus/discovery/file" _ "github.com/prometheus/prometheus/discovery/install" // Register service discovery implementations. "github.com/prometheus/prometheus/discovery/kubernetes" "github.com/prometheus/prometheus/discovery/targetgroup" - "github.com/prometheus/prometheus/pkg/labels" - "github.com/prometheus/prometheus/pkg/rulefmt" + "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/model/rulefmt" + "github.com/prometheus/prometheus/notifier" "github.com/prometheus/prometheus/promql" + "github.com/prometheus/prometheus/scrape" ) func main() { @@ -60,6 +63,11 @@ func main() { checkCmd := app.Command("check", "Check the resources for validity.") + sdCheckCmd := checkCmd.Command("service-discovery", "Perform service discovery for the given job name and report the results, including relabeling.") + sdConfigFile := sdCheckCmd.Arg("config-file", "The prometheus config file.").Required().ExistingFile() + sdJobName := sdCheckCmd.Arg("job", "The job to run service discovery for.").Required().String() + sdTimeout := sdCheckCmd.Flag("timeout", "The time to wait for discovery results.").Default("30s").Duration() + checkConfigCmd := checkCmd.Command("config", "Check if the config files are valid or not.") configFiles := checkConfigCmd.Arg( "config-files", @@ -79,6 +87,7 @@ func main() { ).Required().ExistingFiles() checkMetricsCmd := checkCmd.Command("metrics", checkMetricsUsage) + agentMode := checkConfigCmd.Flag("agent", "Check config file for Prometheus in Agent mode.").Bool() queryCmd := app.Command("query", "Run query against a Prometheus server.") queryCmdFmt := queryCmd.Flag("format", "Output format of the query.").Short('o').Default("promql").Enum("promql", "json") @@ -198,8 +207,11 @@ func main() { } switch parsedCmd { + case sdCheckCmd.FullCommand(): + os.Exit(CheckSD(*sdConfigFile, *sdJobName, *sdTimeout)) + case checkConfigCmd.FullCommand(): - os.Exit(CheckConfig(*configFiles...)) + os.Exit(CheckConfig(*agentMode, *configFiles...)) case checkWebConfigCmd.FullCommand(): os.Exit(CheckWebConfig(*webConfigFiles...)) @@ -245,21 +257,21 @@ func main() { case tsdbDumpCmd.FullCommand(): os.Exit(checkErr(dumpSamples(*dumpPath, *dumpMinTime, *dumpMaxTime))) - //TODO(aSquare14): Work on adding support for custom block size. + // TODO(aSquare14): Work on adding support for custom block size. case openMetricsImportCmd.FullCommand(): os.Exit(backfillOpenMetrics(*importFilePath, *importDBPath, *importHumanReadable, *importQuiet, *maxBlockDuration)) case importRulesCmd.FullCommand(): - os.Exit(checkErr(importRules(*importRulesURL, *importRulesStart, *importRulesEnd, *importRulesOutputDir, *importRulesEvalInterval, *importRulesFiles...))) + os.Exit(checkErr(importRules(*importRulesURL, *importRulesStart, *importRulesEnd, *importRulesOutputDir, *importRulesEvalInterval, *maxBlockDuration, *importRulesFiles...))) } } // CheckConfig validates configuration files. -func CheckConfig(files ...string) int { +func CheckConfig(agentMode bool, files ...string) int { failed := false for _, f := range files { - ruleFiles, err := checkConfig(f) + ruleFiles, err := checkConfig(agentMode, f) if err != nil { fmt.Fprintln(os.Stderr, " FAILED:", err) failed = true @@ -314,10 +326,10 @@ func checkFileExists(fn string) error { return err } -func checkConfig(filename string) ([]string, error) { +func checkConfig(agentMode bool, filename string) ([]string, error) { fmt.Println("Checking", filename) - cfg, err := config.LoadFile(filename, false, log.NewNopLogger()) + cfg, err := config.LoadFile(filename, agentMode, false, log.NewNopLogger()) if err != nil { return nil, err } @@ -363,19 +375,60 @@ func checkConfig(filename string) ([]string, error) { } if len(files) != 0 { for _, f := range files { - err = checkSDFile(f) + var targetGroups []*targetgroup.Group + targetGroups, err = checkSDFile(f) if err != nil { return nil, errors.Errorf("checking SD file %q: %v", file, err) } + if err := checkTargetGroupsForScrapeConfig(targetGroups, scfg); err != nil { + return nil, err + } } continue } fmt.Printf(" WARNING: file %q for file_sd in scrape job %q does not exist\n", file, scfg.JobName) } + case discovery.StaticConfig: + if err := checkTargetGroupsForScrapeConfig(c, scfg); err != nil { + return nil, err + } } } } + alertConfig := cfg.AlertingConfig + for _, amcfg := range alertConfig.AlertmanagerConfigs { + for _, c := range amcfg.ServiceDiscoveryConfigs { + switch c := c.(type) { + case *file.SDConfig: + for _, file := range c.Files { + files, err := filepath.Glob(file) + if err != nil { + return nil, err + } + if len(files) != 0 { + for _, f := range files { + var targetGroups []*targetgroup.Group + targetGroups, err = checkSDFile(f) + if err != nil { + return nil, errors.Errorf("checking SD file %q: %v", file, err) + } + + if err := checkTargetGroupsForAlertmanager(targetGroups, amcfg); err != nil { + return nil, err + } + } + continue + } + fmt.Printf(" WARNING: file %q for file_sd in alertmanager config does not exist\n", file) + } + case discovery.StaticConfig: + if err := checkTargetGroupsForAlertmanager(c, amcfg); err != nil { + return nil, err + } + } + } + } return ruleFiles, nil } @@ -397,16 +450,16 @@ func checkTLSConfig(tlsConfig config_util.TLSConfig) error { return nil } -func checkSDFile(filename string) error { +func checkSDFile(filename string) ([]*targetgroup.Group, error) { fd, err := os.Open(filename) if err != nil { - return err + return nil, err } defer fd.Close() content, err := ioutil.ReadAll(fd) if err != nil { - return err + return nil, err } var targetGroups []*targetgroup.Group @@ -414,23 +467,23 @@ func checkSDFile(filename string) error { switch ext := filepath.Ext(filename); strings.ToLower(ext) { case ".json": if err := json.Unmarshal(content, &targetGroups); err != nil { - return err + return nil, err } case ".yml", ".yaml": if err := yaml.UnmarshalStrict(content, &targetGroups); err != nil { - return err + return nil, err } default: - return errors.Errorf("invalid file extension: %q", ext) + return nil, errors.Errorf("invalid file extension: %q", ext) } for i, tg := range targetGroups { if tg == nil { - return errors.Errorf("nil target group item found (index %d)", i) + return nil, errors.Errorf("nil target group item found (index %d)", i) } } - return nil + return targetGroups, nil } // CheckRules validates rule files. @@ -507,7 +560,6 @@ func checkDuplicates(groups []rulefmt.RuleGroup) []compareRuleType { var rules compareRuleTypes for _, group := range groups { - for _, rule := range group.Rules { rules = append(rules, compareRuleType{ metric: ruleMetric(rule), @@ -721,7 +773,7 @@ func QuerySeries(url *url.URL, matchers []string, start, end string, p printer) } // QueryLabels queries for label values against a Prometheus server. -func QueryLabels(url *url.URL, name string, start, end string, p printer) int { +func QueryLabels(url *url.URL, name, start, end string, p printer) int { if url.Scheme == "" { url.Scheme = "http" } @@ -899,11 +951,13 @@ type promqlPrinter struct{} func (p *promqlPrinter) printValue(v model.Value) { fmt.Println(v) } + func (p *promqlPrinter) printSeries(val []model.LabelSet) { for _, v := range val { fmt.Println(v) } } + func (p *promqlPrinter) printLabelValues(val model.LabelValues) { for _, v := range val { fmt.Println(v) @@ -916,10 +970,12 @@ func (j *jsonPrinter) printValue(v model.Value) { //nolint:errcheck json.NewEncoder(os.Stdout).Encode(v) } + func (j *jsonPrinter) printSeries(v []model.LabelSet) { //nolint:errcheck json.NewEncoder(os.Stdout).Encode(v) } + func (j *jsonPrinter) printLabelValues(v model.LabelValues) { //nolint:errcheck json.NewEncoder(os.Stdout).Encode(v) @@ -927,7 +983,7 @@ func (j *jsonPrinter) printLabelValues(v model.LabelValues) { // importRules backfills recording rules from the files provided. The output are blocks of data // at the outputDir location. -func importRules(url *url.URL, start, end, outputDir string, evalInterval time.Duration, files ...string) error { +func importRules(url *url.URL, start, end, outputDir string, evalInterval, maxBlockDuration time.Duration, files ...string) error { ctx := context.Background() var stime, etime time.Time var err error @@ -950,10 +1006,11 @@ func importRules(url *url.URL, start, end, outputDir string, evalInterval time.D } cfg := ruleImporterConfig{ - outputDir: outputDir, - start: stime, - end: etime, - evalInterval: evalInterval, + outputDir: outputDir, + start: stime, + end: etime, + evalInterval: evalInterval, + maxBlockDuration: maxBlockDuration, } client, err := api.NewClient(api.Config{ Address: url.String(), @@ -980,3 +1037,25 @@ func importRules(url *url.URL, start, end, outputDir string, evalInterval time.D return nil } + +func checkTargetGroupsForAlertmanager(targetGroups []*targetgroup.Group, amcfg *config.AlertmanagerConfig) error { + for _, tg := range targetGroups { + if _, _, err := notifier.AlertmanagerFromGroup(tg, amcfg); err != nil { + return err + } + } + + return nil +} + +func checkTargetGroupsForScrapeConfig(targetGroups []*targetgroup.Group, scfg *config.ScrapeConfig) error { + for _, tg := range targetGroups { + _, failures := scrape.TargetsFromGroup(tg, scfg) + if len(failures) > 0 { + first := failures[0] + return first + } + } + + return nil +} diff --git a/cmd/promtool/main_test.go b/cmd/promtool/main_test.go index 87ac13e76..1a8a47060 100644 --- a/cmd/promtool/main_test.go +++ b/cmd/promtool/main_test.go @@ -21,9 +21,10 @@ import ( "testing" "time" - "github.com/prometheus/prometheus/pkg/labels" - "github.com/prometheus/prometheus/pkg/rulefmt" "github.com/stretchr/testify/require" + + "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/model/rulefmt" ) func TestQueryRange(t *testing.T) { @@ -111,7 +112,7 @@ func TestCheckSDFile(t *testing.T) { } for _, test := range cases { t.Run(test.name, func(t *testing.T) { - err := checkSDFile(test.file) + _, err := checkSDFile(test.file) if test.err != "" { require.Equalf(t, test.err, err.Error(), "Expected error %q, got %q", test.err, err.Error()) return @@ -163,3 +164,42 @@ func BenchmarkCheckDuplicates(b *testing.B) { checkDuplicates(rgs.Groups) } } + +func TestCheckTargetConfig(t *testing.T) { + cases := []struct { + name string + file string + err string + }{ + { + name: "url_in_scrape_targetgroup_with_relabel_config.good", + file: "url_in_scrape_targetgroup_with_relabel_config.good.yml", + err: "", + }, + { + name: "url_in_alert_targetgroup_with_relabel_config.good", + file: "url_in_alert_targetgroup_with_relabel_config.good.yml", + err: "", + }, + { + name: "url_in_scrape_targetgroup_with_relabel_config.bad", + file: "url_in_scrape_targetgroup_with_relabel_config.bad.yml", + err: "instance 0 in group 0: \"http://bad\" is not a valid hostname", + }, + { + name: "url_in_alert_targetgroup_with_relabel_config.bad", + file: "url_in_alert_targetgroup_with_relabel_config.bad.yml", + err: "\"http://bad\" is not a valid hostname", + }, + } + for _, test := range cases { + t.Run(test.name, func(t *testing.T) { + _, err := checkConfig(false, "testdata/"+test.file) + if test.err != "" { + require.Equalf(t, test.err, err.Error(), "Expected error %q, got %q", test.err, err.Error()) + return + } + require.NoError(t, err) + }) + } +} diff --git a/cmd/promtool/rules.go b/cmd/promtool/rules.go index 642cc1ed0..7afca02f1 100644 --- a/cmd/promtool/rules.go +++ b/cmd/promtool/rules.go @@ -23,8 +23,9 @@ import ( "github.com/pkg/errors" v1 "github.com/prometheus/client_golang/api/prometheus/v1" "github.com/prometheus/common/model" - "github.com/prometheus/prometheus/pkg/labels" - "github.com/prometheus/prometheus/pkg/timestamp" + + "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/model/timestamp" "github.com/prometheus/prometheus/rules" "github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/tsdb" @@ -48,10 +49,11 @@ type ruleImporter struct { } type ruleImporterConfig struct { - outputDir string - start time.Time - end time.Time - evalInterval time.Duration + outputDir string + start time.Time + end time.Time + evalInterval time.Duration + maxBlockDuration time.Duration } // newRuleImporter creates a new rule importer that can be used to parse and evaluate recording rule files and create new series @@ -83,7 +85,7 @@ func (importer *ruleImporter) importAll(ctx context.Context) (errs []error) { for i, r := range group.Rules() { level.Info(importer.logger).Log("backfiller", "processing rule", "id", i, "name", r.Name()) - if err := importer.importRule(ctx, r.Query().String(), r.Name(), r.Labels(), importer.config.start, importer.config.end, group); err != nil { + if err := importer.importRule(ctx, r.Query().String(), r.Name(), r.Labels(), importer.config.start, importer.config.end, int64(importer.config.maxBlockDuration/time.Millisecond), group); err != nil { errs = append(errs, err) } } @@ -92,8 +94,9 @@ func (importer *ruleImporter) importAll(ctx context.Context) (errs []error) { } // importRule queries a prometheus API to evaluate rules at times in the past. -func (importer *ruleImporter) importRule(ctx context.Context, ruleExpr, ruleName string, ruleLabels labels.Labels, start, end time.Time, grp *rules.Group) (err error) { - blockDuration := tsdb.DefaultBlockDuration +func (importer *ruleImporter) importRule(ctx context.Context, ruleExpr, ruleName string, ruleLabels labels.Labels, start, end time.Time, + maxBlockDuration int64, grp *rules.Group) (err error) { + blockDuration := getCompatibleBlockDuration(maxBlockDuration) startInMs := start.Unix() * int64(time.Second/time.Millisecond) endInMs := end.Unix() * int64(time.Second/time.Millisecond) @@ -130,7 +133,7 @@ func (importer *ruleImporter) importRule(ctx context.Context, ruleExpr, ruleName // also need to append samples throughout the whole block range. To allow that, we // pretend that the block is twice as large here, but only really add sample in the // original interval later. - w, err := tsdb.NewBlockWriter(log.NewNopLogger(), importer.config.outputDir, 2*tsdb.DefaultBlockDuration) + w, err := tsdb.NewBlockWriter(log.NewNopLogger(), importer.config.outputDir, 2*blockDuration) if err != nil { return errors.Wrap(err, "new block writer") } @@ -168,7 +171,7 @@ func (importer *ruleImporter) importRule(ctx context.Context, ruleExpr, ruleName } } default: - return errors.New(fmt.Sprintf("rule result is wrong type %s", val.Type().String())) + return fmt.Errorf("rule result is wrong type %s", val.Type().String()) } if err := app.flushAndCommit(ctx); err != nil { diff --git a/cmd/promtool/rules_test.go b/cmd/promtool/rules_test.go index c81caaa16..10d59d5cc 100644 --- a/cmd/promtool/rules_test.go +++ b/cmd/promtool/rules_test.go @@ -25,9 +25,10 @@ import ( "github.com/go-kit/log" v1 "github.com/prometheus/client_golang/api/prometheus/v1" "github.com/prometheus/common/model" - "github.com/prometheus/prometheus/pkg/labels" - "github.com/prometheus/prometheus/tsdb" "github.com/stretchr/testify/require" + + "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/tsdb" ) type mockQueryRangeAPI struct { @@ -38,6 +39,8 @@ func (mockAPI mockQueryRangeAPI) QueryRange(ctx context.Context, query string, r return mockAPI.samples, v1.Warnings{}, nil } +const defaultBlockDuration = time.Duration(tsdb.DefaultBlockDuration) * time.Millisecond + // TestBackfillRuleIntegration is an integration test that runs all the rule importer code to confirm the parts work together. func TestBackfillRuleIntegration(t *testing.T) { const ( @@ -46,23 +49,26 @@ func TestBackfillRuleIntegration(t *testing.T) { testValue2 = 98 ) var ( - start = time.Date(2009, time.November, 10, 6, 34, 0, 0, time.UTC) - testTime = model.Time(start.Add(-9 * time.Hour).Unix()) - testTime2 = model.Time(start.Add(-8 * time.Hour).Unix()) + start = time.Date(2009, time.November, 10, 6, 34, 0, 0, time.UTC) + testTime = model.Time(start.Add(-9 * time.Hour).Unix()) + testTime2 = model.Time(start.Add(-8 * time.Hour).Unix()) + twentyFourHourDuration, _ = time.ParseDuration("24h") ) - var testCases = []struct { + testCases := []struct { name string runcount int + maxBlockDuration time.Duration expectedBlockCount int expectedSeriesCount int expectedSampleCount int samples []*model.SampleStream }{ - {"no samples", 1, 0, 0, 0, []*model.SampleStream{}}, - {"run importer once", 1, 8, 4, 4, []*model.SampleStream{{Metric: model.Metric{"name1": "val1"}, Values: []model.SamplePair{{Timestamp: testTime, Value: testValue}}}}}, - {"run importer with dup name label", 1, 8, 4, 4, []*model.SampleStream{{Metric: model.Metric{"__name__": "val1", "name1": "val1"}, Values: []model.SamplePair{{Timestamp: testTime, Value: testValue}}}}}, - {"one importer twice", 2, 8, 4, 8, []*model.SampleStream{{Metric: model.Metric{"name1": "val1"}, Values: []model.SamplePair{{Timestamp: testTime, Value: testValue}, {Timestamp: testTime2, Value: testValue2}}}}}, + {"no samples", 1, defaultBlockDuration, 0, 0, 0, []*model.SampleStream{}}, + {"run importer once", 1, defaultBlockDuration, 8, 4, 4, []*model.SampleStream{{Metric: model.Metric{"name1": "val1"}, Values: []model.SamplePair{{Timestamp: testTime, Value: testValue}}}}}, + {"run importer with dup name label", 1, defaultBlockDuration, 8, 4, 4, []*model.SampleStream{{Metric: model.Metric{"__name__": "val1", "name1": "val1"}, Values: []model.SamplePair{{Timestamp: testTime, Value: testValue}}}}}, + {"one importer twice", 2, defaultBlockDuration, 8, 4, 8, []*model.SampleStream{{Metric: model.Metric{"name1": "val1"}, Values: []model.SamplePair{{Timestamp: testTime, Value: testValue}, {Timestamp: testTime2, Value: testValue2}}}}}, + {"run importer once with larger blocks", 1, twentyFourHourDuration, 4, 4, 4, []*model.SampleStream{{Metric: model.Metric{"name1": "val1"}, Values: []model.SamplePair{{Timestamp: testTime, Value: testValue}}}}}, } for _, tt := range testCases { t.Run(tt.name, func(t *testing.T) { @@ -76,7 +82,8 @@ func TestBackfillRuleIntegration(t *testing.T) { // Execute the test more than once to simulate running the rule importer twice with the same data. // We expect duplicate blocks with the same series are created when run more than once. for i := 0; i < tt.runcount; i++ { - ruleImporter, err := newTestRuleImporter(ctx, start, tmpDir, tt.samples) + + ruleImporter, err := newTestRuleImporter(ctx, start, tmpDir, tt.samples, tt.maxBlockDuration) require.NoError(t, err) path1 := filepath.Join(tmpDir, "test.file") require.NoError(t, createSingleRuleTestFiles(path1)) @@ -162,13 +169,14 @@ func TestBackfillRuleIntegration(t *testing.T) { } } -func newTestRuleImporter(ctx context.Context, start time.Time, tmpDir string, testSamples model.Matrix) (*ruleImporter, error) { +func newTestRuleImporter(ctx context.Context, start time.Time, tmpDir string, testSamples model.Matrix, maxBlockDuration time.Duration) (*ruleImporter, error) { logger := log.NewNopLogger() cfg := ruleImporterConfig{ - outputDir: tmpDir, - start: start.Add(-10 * time.Hour), - end: start.Add(-7 * time.Hour), - evalInterval: 60 * time.Second, + outputDir: tmpDir, + start: start.Add(-10 * time.Hour), + end: start.Add(-7 * time.Hour), + evalInterval: 60 * time.Second, + maxBlockDuration: maxBlockDuration, } return newRuleImporter(logger, cfg, mockQueryRangeAPI{ @@ -185,7 +193,7 @@ func createSingleRuleTestFiles(path string) error { labels: testlabel11: testlabelvalue11 ` - return ioutil.WriteFile(path, []byte(recordingRules), 0777) + return ioutil.WriteFile(path, []byte(recordingRules), 0o777) } func createMultiRuleTestFiles(path string) error { @@ -205,7 +213,7 @@ func createMultiRuleTestFiles(path string) error { labels: testlabel11: testlabelvalue13 ` - return ioutil.WriteFile(path, []byte(recordingRules), 0777) + return ioutil.WriteFile(path, []byte(recordingRules), 0o777) } // TestBackfillLabels confirms that the labels in the rule file override the labels from the metrics @@ -225,7 +233,7 @@ func TestBackfillLabels(t *testing.T) { Values: []model.SamplePair{{Timestamp: model.TimeFromUnixNano(start.UnixNano()), Value: 123}}, }, } - ruleImporter, err := newTestRuleImporter(ctx, start, tmpDir, mockAPISamples) + ruleImporter, err := newTestRuleImporter(ctx, start, tmpDir, mockAPISamples, defaultBlockDuration) require.NoError(t, err) path := filepath.Join(tmpDir, "test.file") @@ -237,7 +245,7 @@ func TestBackfillLabels(t *testing.T) { labels: name1: value-from-rule ` - require.NoError(t, ioutil.WriteFile(path, []byte(recordingRules), 0777)) + require.NoError(t, ioutil.WriteFile(path, []byte(recordingRules), 0o777)) errs := ruleImporter.loadGroups(ctx, []string{path}) for _, err := range errs { require.NoError(t, err) diff --git a/cmd/promtool/sd.go b/cmd/promtool/sd.go new file mode 100644 index 000000000..f3b2c13b4 --- /dev/null +++ b/cmd/promtool/sd.go @@ -0,0 +1,148 @@ +// Copyright 2021 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package main + +import ( + "context" + "encoding/json" + "fmt" + "os" + "reflect" + "time" + + "github.com/go-kit/log" + + "github.com/prometheus/prometheus/config" + "github.com/prometheus/prometheus/discovery" + "github.com/prometheus/prometheus/discovery/targetgroup" + "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/scrape" +) + +type sdCheckResult struct { + DiscoveredLabels labels.Labels `json:"discoveredLabels"` + Labels labels.Labels `json:"labels"` + Error error `json:"error,omitempty"` +} + +// CheckSD performs service discovery for the given job name and reports the results. +func CheckSD(sdConfigFiles, sdJobName string, sdTimeout time.Duration) int { + logger := log.NewLogfmtLogger(log.NewSyncWriter(os.Stderr)) + + cfg, err := config.LoadFile(sdConfigFiles, false, false, logger) + if err != nil { + fmt.Fprintln(os.Stderr, "Cannot load config", err) + return 2 + } + + var scrapeConfig *config.ScrapeConfig + jobs := []string{} + jobMatched := false + for _, v := range cfg.ScrapeConfigs { + jobs = append(jobs, v.JobName) + if v.JobName == sdJobName { + jobMatched = true + scrapeConfig = v + break + } + } + + if !jobMatched { + fmt.Fprintf(os.Stderr, "Job %s not found. Select one of:\n", sdJobName) + for _, job := range jobs { + fmt.Fprintf(os.Stderr, "\t%s\n", job) + } + return 1 + } + + targetGroupChan := make(chan []*targetgroup.Group) + ctx, cancel := context.WithTimeout(context.Background(), sdTimeout) + defer cancel() + + for _, cfg := range scrapeConfig.ServiceDiscoveryConfigs { + d, err := cfg.NewDiscoverer(discovery.DiscovererOptions{Logger: logger}) + if err != nil { + fmt.Fprintln(os.Stderr, "Could not create new discoverer", err) + return 2 + } + go d.Run(ctx, targetGroupChan) + } + + var targetGroups []*targetgroup.Group + sdCheckResults := make(map[string][]*targetgroup.Group) +outerLoop: + for { + select { + case targetGroups = <-targetGroupChan: + for _, tg := range targetGroups { + sdCheckResults[tg.Source] = append(sdCheckResults[tg.Source], tg) + } + case <-ctx.Done(): + break outerLoop + } + } + results := []sdCheckResult{} + for _, tgs := range sdCheckResults { + results = append(results, getSDCheckResult(tgs, scrapeConfig)...) + } + + res, err := json.MarshalIndent(results, "", " ") + if err != nil { + fmt.Fprintf(os.Stderr, "Could not marshal result json: %s", err) + return 2 + } + + fmt.Printf("%s", res) + return 0 +} + +func getSDCheckResult(targetGroups []*targetgroup.Group, scrapeConfig *config.ScrapeConfig) []sdCheckResult { + sdCheckResults := []sdCheckResult{} + for _, targetGroup := range targetGroups { + for _, target := range targetGroup.Targets { + labelSlice := make([]labels.Label, 0, len(target)+len(targetGroup.Labels)) + + for name, value := range target { + labelSlice = append(labelSlice, labels.Label{Name: string(name), Value: string(value)}) + } + + for name, value := range targetGroup.Labels { + if _, ok := target[name]; !ok { + labelSlice = append(labelSlice, labels.Label{Name: string(name), Value: string(value)}) + } + } + + targetLabels := labels.New(labelSlice...) + res, orig, err := scrape.PopulateLabels(targetLabels, scrapeConfig) + result := sdCheckResult{ + DiscoveredLabels: orig, + Labels: res, + Error: err, + } + + duplicateRes := false + for _, sdCheckRes := range sdCheckResults { + if reflect.DeepEqual(sdCheckRes, result) { + duplicateRes = true + break + } + } + + if !duplicateRes { + sdCheckResults = append(sdCheckResults, result) + } + } + } + return sdCheckResults +} diff --git a/cmd/promtool/sd_test.go b/cmd/promtool/sd_test.go new file mode 100644 index 000000000..7f80437cf --- /dev/null +++ b/cmd/promtool/sd_test.go @@ -0,0 +1,70 @@ +// Copyright 2021 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package main + +import ( + "testing" + + "github.com/prometheus/common/model" + + "github.com/prometheus/prometheus/config" + "github.com/prometheus/prometheus/discovery/targetgroup" + "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/model/relabel" + + "github.com/stretchr/testify/require" +) + +func TestSDCheckResult(t *testing.T) { + targetGroups := []*targetgroup.Group{{ + Targets: []model.LabelSet{ + map[model.LabelName]model.LabelValue{"__address__": "localhost:8080", "foo": "bar"}, + }, + }} + + reg, err := relabel.NewRegexp("(.*)") + require.Nil(t, err) + + scrapeConfig := &config.ScrapeConfig{ + RelabelConfigs: []*relabel.Config{{ + SourceLabels: model.LabelNames{"foo"}, + Action: relabel.Replace, + TargetLabel: "newfoo", + Regex: reg, + Replacement: "$1", + }}, + } + + expectedSDCheckResult := []sdCheckResult{ + { + DiscoveredLabels: labels.Labels{ + labels.Label{Name: "__address__", Value: "localhost:8080"}, + labels.Label{Name: "__scrape_interval__", Value: "0s"}, + labels.Label{Name: "__scrape_timeout__", Value: "0s"}, + labels.Label{Name: "foo", Value: "bar"}, + }, + Labels: labels.Labels{ + labels.Label{Name: "__address__", Value: "localhost:8080"}, + labels.Label{Name: "__scrape_interval__", Value: "0s"}, + labels.Label{Name: "__scrape_timeout__", Value: "0s"}, + labels.Label{Name: "foo", Value: "bar"}, + labels.Label{Name: "instance", Value: "localhost:8080"}, + labels.Label{Name: "newfoo", Value: "bar"}, + }, + Error: nil, + }, + } + + require.Equal(t, expectedSDCheckResult, getSDCheckResult(targetGroups, scrapeConfig)) +} diff --git a/cmd/promtool/testdata/url_in_alert_targetgroup_with_relabel_config.bad.yml b/cmd/promtool/testdata/url_in_alert_targetgroup_with_relabel_config.bad.yml new file mode 100644 index 000000000..a23628e18 --- /dev/null +++ b/cmd/promtool/testdata/url_in_alert_targetgroup_with_relabel_config.bad.yml @@ -0,0 +1,8 @@ +alerting: + alertmanagers: + - relabel_configs: + - source_labels: [__address__] + target_label: __param_target + static_configs: + - targets: + - http://bad diff --git a/cmd/promtool/testdata/url_in_alert_targetgroup_with_relabel_config.good.yml b/cmd/promtool/testdata/url_in_alert_targetgroup_with_relabel_config.good.yml new file mode 100644 index 000000000..575fe5f11 --- /dev/null +++ b/cmd/promtool/testdata/url_in_alert_targetgroup_with_relabel_config.good.yml @@ -0,0 +1,10 @@ +alerting: + alertmanagers: + - relabel_configs: + - source_labels: [__address__] + target_label: __param_target + - target_label: __address__ + replacement: good + static_configs: + - targets: + - http://bad diff --git a/cmd/promtool/testdata/url_in_scrape_targetgroup_with_relabel_config.bad.yml b/cmd/promtool/testdata/url_in_scrape_targetgroup_with_relabel_config.bad.yml new file mode 100644 index 000000000..d5daf6e9b --- /dev/null +++ b/cmd/promtool/testdata/url_in_scrape_targetgroup_with_relabel_config.bad.yml @@ -0,0 +1,8 @@ +scrape_configs: + - job_name: prometheus + relabel_configs: + - source_labels: [__address__] + target_label: __param_target + static_configs: + - targets: + - http://bad diff --git a/cmd/promtool/testdata/url_in_scrape_targetgroup_with_relabel_config.good.yml b/cmd/promtool/testdata/url_in_scrape_targetgroup_with_relabel_config.good.yml new file mode 100644 index 000000000..ab26f00ed --- /dev/null +++ b/cmd/promtool/testdata/url_in_scrape_targetgroup_with_relabel_config.good.yml @@ -0,0 +1,10 @@ +scrape_configs: + - job_name: prometheus + relabel_configs: + - source_labels: [__address__] + target_label: __param_target + - target_label: __address__ + replacement: good + static_configs: + - targets: + - http://good diff --git a/cmd/promtool/tsdb.go b/cmd/promtool/tsdb.go index 45cde87cf..2e5d854de 100644 --- a/cmd/promtool/tsdb.go +++ b/cmd/promtool/tsdb.go @@ -17,7 +17,6 @@ import ( "bufio" "context" "fmt" - "github.com/prometheus/prometheus/tsdb/index" "io" "io/ioutil" "math" @@ -32,11 +31,14 @@ import ( "text/tabwriter" "time" + "github.com/prometheus/prometheus/storage" + "github.com/prometheus/prometheus/tsdb/index" + "github.com/alecthomas/units" "github.com/go-kit/log" "github.com/pkg/errors" - "github.com/prometheus/prometheus/pkg/labels" + "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/tsdb" "github.com/prometheus/prometheus/tsdb/chunks" tsdb_errors "github.com/prometheus/prometheus/tsdb/errors" @@ -78,7 +80,7 @@ func benchmarkWrite(outPath, samplesFile string, numMetrics, numScrapes int) err if err := os.RemoveAll(b.outPath); err != nil { return err } - if err := os.MkdirAll(b.outPath, 0777); err != nil { + if err := os.MkdirAll(b.outPath, 0o777); err != nil { return err } @@ -187,7 +189,7 @@ func (b *writeBenchmark) ingestScrapesShard(lbls []labels.Labels, scrapeCount in type sample struct { labels labels.Labels value int64 - ref *uint64 + ref *storage.SeriesRef } scrape := make([]*sample, 0, len(lbls)) @@ -207,7 +209,7 @@ func (b *writeBenchmark) ingestScrapesShard(lbls []labels.Labels, scrapeCount in for _, s := range scrape { s.value += 1000 - var ref uint64 + var ref storage.SeriesRef if s.ref != nil { ref = *s.ref } @@ -589,7 +591,7 @@ func analyzeCompaction(block tsdb.BlockReader, indexr tsdb.IndexReader) (err err histogram := make([]int, nBuckets) totalChunks := 0 for postingsr.Next() { - var lbsl = labels.Labels{} + lbsl := labels.Labels{} var chks []chunks.Meta if err := indexr.Series(postingsr.At(), &lbsl, &chks); err != nil { return err @@ -671,14 +673,14 @@ func checkErr(err error) int { return 0 } -func backfillOpenMetrics(path string, outputDir string, humanReadable, quiet bool, maxBlockDuration time.Duration) int { +func backfillOpenMetrics(path, outputDir string, humanReadable, quiet bool, maxBlockDuration time.Duration) int { inputFile, err := fileutil.OpenMmapFile(path) if err != nil { return checkErr(err) } defer inputFile.Close() - if err := os.MkdirAll(outputDir, 0777); err != nil { + if err := os.MkdirAll(outputDir, 0o777); err != nil { return checkErr(errors.Wrap(err, "create output dir")) } diff --git a/cmd/promtool/unittest.go b/cmd/promtool/unittest.go index 4c197d074..13cc79888 100644 --- a/cmd/promtool/unittest.go +++ b/cmd/promtool/unittest.go @@ -30,7 +30,7 @@ import ( "github.com/prometheus/common/model" yaml "gopkg.in/yaml.v2" - "github.com/prometheus/prometheus/pkg/labels" + "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/promql" "github.com/prometheus/prometheus/promql/parser" "github.com/prometheus/prometheus/rules" @@ -47,6 +47,7 @@ func RulesUnitTest(queryOpts promql.LazyLoaderOpts, files ...string) int { fmt.Fprintln(os.Stderr, " FAILED:") for _, e := range errs { fmt.Fprintln(os.Stderr, e.Error()) + fmt.Println() } failed = true } else { @@ -313,30 +314,18 @@ func (tg *testGroup) test(evalInterval time.Duration, groupOrderMap map[string]i }) } - var sb strings.Builder - if gotAlerts.Len() != expAlerts.Len() { + sort.Sort(gotAlerts) + sort.Sort(expAlerts) + + if !reflect.DeepEqual(expAlerts, gotAlerts) { + var testName string if tg.TestGroupName != "" { - fmt.Fprintf(&sb, " name: %s,\n", tg.TestGroupName) - } - fmt.Fprintf(&sb, " alertname:%s, time:%s, \n", testcase.Alertname, testcase.EvalTime.String()) - fmt.Fprintf(&sb, " exp:%#v, \n", expAlerts.String()) - fmt.Fprintf(&sb, " got:%#v", gotAlerts.String()) - - errs = append(errs, errors.New(sb.String())) - } else { - sort.Sort(gotAlerts) - sort.Sort(expAlerts) - - if !reflect.DeepEqual(expAlerts, gotAlerts) { - if tg.TestGroupName != "" { - fmt.Fprintf(&sb, " name: %s,\n", tg.TestGroupName) - } - fmt.Fprintf(&sb, " alertname:%s, time:%s, \n", testcase.Alertname, testcase.EvalTime.String()) - fmt.Fprintf(&sb, " exp:%#v, \n", expAlerts.String()) - fmt.Fprintf(&sb, " got:%#v", gotAlerts.String()) - - errs = append(errs, errors.New(sb.String())) + testName = fmt.Sprintf(" name: %s,\n", tg.TestGroupName) } + expString := indentLines(expAlerts.String(), " ") + gotString := indentLines(gotAlerts.String(), " ") + errs = append(errs, errors.Errorf("%s alertname: %s, time: %s, \n exp:%v, \n got:%v", + testName, testcase.Alertname, testcase.EvalTime.String(), expString, gotString)) } } @@ -385,7 +374,7 @@ Outer: return labels.Compare(gotSamples[i].Labels, gotSamples[j].Labels) <= 0 }) if !reflect.DeepEqual(expSamples, gotSamples) { - errs = append(errs, errors.Errorf(" expr: %q, time: %s,\n exp:%#v\n got:%#v", testCase.Expr, + errs = append(errs, errors.Errorf(" expr: %q, time: %s,\n exp: %v\n got: %v", testCase.Expr, testCase.EvalTime.String(), parsedSamplesString(expSamples), parsedSamplesString(gotSamples))) } } @@ -398,7 +387,6 @@ Outer: // seriesLoadingString returns the input series in PromQL notation. func (tg *testGroup) seriesLoadingString() string { - result := fmt.Sprintf("load %v\n", shortDuration(tg.Interval)) for _, is := range tg.InputSeries { result += fmt.Sprintf(" %v %v\n", is.Series, is.Values) @@ -468,6 +456,23 @@ func query(ctx context.Context, qs string, t time.Time, engine *promql.Engine, q } } +// indentLines prefixes each line in the supplied string with the given "indent" +// string. +func indentLines(lines, indent string) string { + sb := strings.Builder{} + n := strings.Split(lines, "\n") + for i, l := range n { + if i > 0 { + sb.WriteString(indent) + } + sb.WriteString(l) + if i != len(n)-1 { + sb.WriteRune('\n') + } + } + return sb.String() +} + type labelsAndAnnotations []labelAndAnnotation func (la labelsAndAnnotations) Len() int { return len(la) } @@ -484,11 +489,11 @@ func (la labelsAndAnnotations) String() string { if len(la) == 0 { return "[]" } - s := "[" + la[0].String() - for _, l := range la[1:] { - s += ", " + l.String() + s := "[\n0:" + indentLines("\n"+la[0].String(), " ") + for i, l := range la[1:] { + s += ",\n" + fmt.Sprintf("%d", i+1) + ":" + indentLines("\n"+l.String(), " ") } - s += "]" + s += "\n]" return s } @@ -499,7 +504,7 @@ type labelAndAnnotation struct { } func (la *labelAndAnnotation) String() string { - return "Labels:" + la.Labels.String() + " Annotations:" + la.Annotations.String() + return "Labels:" + la.Labels.String() + "\nAnnotations:" + la.Annotations.String() } type series struct { diff --git a/config/config.go b/config/config.go index dc2ed19a2..24441d2bc 100644 --- a/config/config.go +++ b/config/config.go @@ -33,8 +33,8 @@ import ( yaml "gopkg.in/yaml.v2" "github.com/prometheus/prometheus/discovery" - "github.com/prometheus/prometheus/pkg/labels" - "github.com/prometheus/prometheus/pkg/relabel" + "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/model/relabel" ) var ( @@ -99,7 +99,7 @@ func Load(s string, expandExternalLabels bool, logger log.Logger) (*Config, erro } // LoadFile parses the given YAML file into a Config. -func LoadFile(filename string, expandExternalLabels bool, logger log.Logger) (*Config, error) { +func LoadFile(filename string, agentMode, expandExternalLabels bool, logger log.Logger) (*Config, error) { content, err := ioutil.ReadFile(filename) if err != nil { return nil, err @@ -108,6 +108,25 @@ func LoadFile(filename string, expandExternalLabels bool, logger log.Logger) (*C if err != nil { return nil, errors.Wrapf(err, "parsing YAML file %s", filename) } + + if agentMode { + if len(cfg.RemoteWriteConfigs) == 0 { + return nil, errors.New("at least one remote_write target must be specified in agent mode") + } + + if len(cfg.AlertingConfig.AlertmanagerConfigs) > 0 || len(cfg.AlertingConfig.AlertRelabelConfigs) > 0 { + return nil, errors.New("field alerting is not allowed in agent mode") + } + + if len(cfg.RuleFiles) > 0 { + return nil, errors.New("field rule_files is not allowed in agent mode") + } + + if len(cfg.RemoteReadConfigs) > 0 { + return nil, errors.New("field remote_read is not allowed in agent mode") + } + } + cfg.SetDirectory(filepath.Dir(filename)) return cfg, nil } @@ -169,7 +188,7 @@ var ( // Backoff times for retrying a batch of samples on recoverable errors. MinBackoff: model.Duration(30 * time.Millisecond), - MaxBackoff: model.Duration(100 * time.Millisecond), + MaxBackoff: model.Duration(5 * time.Second), } // DefaultMetadataConfig is the default metadata configuration for a remote write endpoint. diff --git a/config/config_test.go b/config/config_test.go index 3055de74d..e8d52d4aa 100644 --- a/config/config_test.go +++ b/config/config_test.go @@ -52,8 +52,8 @@ import ( "github.com/prometheus/prometheus/discovery/uyuni" "github.com/prometheus/prometheus/discovery/xds" "github.com/prometheus/prometheus/discovery/zookeeper" - "github.com/prometheus/prometheus/pkg/labels" - "github.com/prometheus/prometheus/pkg/relabel" + "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/model/relabel" ) func mustParseURL(u string) *config.URL { @@ -103,6 +103,10 @@ var expectedConf = &Config{ ClientID: "123", ClientSecret: "456", TokenURL: "http://remote1/auth", + TLSConfig: config.TLSConfig{ + CertFile: filepath.FromSlash("testdata/valid_cert_file"), + KeyFile: filepath.FromSlash("testdata/valid_key_file"), + }, }, FollowRedirects: true, }, @@ -565,6 +569,7 @@ var expectedConf = &Config{ AuthenticationMethod: "OAuth", RefreshInterval: model.Duration(5 * time.Minute), Port: 9100, + HTTPClientConfig: config.DefaultHTTPClientConfig, }, }, }, @@ -779,17 +784,19 @@ var expectedConf = &Config{ Scheme: DefaultScrapeConfig.Scheme, HTTPClientConfig: config.DefaultHTTPClientConfig, - ServiceDiscoveryConfigs: discovery.Configs{&openstack.SDConfig{ - Role: "instance", - Region: "RegionOne", - Port: 80, - Availability: "public", - RefreshInterval: model.Duration(60 * time.Second), - TLSConfig: config.TLSConfig{ - CAFile: "testdata/valid_ca_file", - CertFile: "testdata/valid_cert_file", - KeyFile: "testdata/valid_key_file", - }}, + ServiceDiscoveryConfigs: discovery.Configs{ + &openstack.SDConfig{ + Role: "instance", + Region: "RegionOne", + Port: 80, + Availability: "public", + RefreshInterval: model.Duration(60 * time.Second), + TLSConfig: config.TLSConfig{ + CAFile: "testdata/valid_ca_file", + CertFile: "testdata/valid_cert_file", + KeyFile: "testdata/valid_key_file", + }, + }, }, }, { @@ -803,22 +810,23 @@ var expectedConf = &Config{ Scheme: DefaultScrapeConfig.Scheme, HTTPClientConfig: config.DefaultHTTPClientConfig, - ServiceDiscoveryConfigs: discovery.Configs{&puppetdb.SDConfig{ - URL: "https://puppetserver/", - Query: "resources { type = \"Package\" and title = \"httpd\" }", - IncludeParameters: true, - Port: 80, - RefreshInterval: model.Duration(60 * time.Second), - HTTPClientConfig: config.HTTPClientConfig{ - FollowRedirects: true, - TLSConfig: config.TLSConfig{ - CAFile: "testdata/valid_ca_file", - CertFile: "testdata/valid_cert_file", - KeyFile: "testdata/valid_key_file", + ServiceDiscoveryConfigs: discovery.Configs{ + &puppetdb.SDConfig{ + URL: "https://puppetserver/", + Query: "resources { type = \"Package\" and title = \"httpd\" }", + IncludeParameters: true, + Port: 80, + RefreshInterval: model.Duration(60 * time.Second), + HTTPClientConfig: config.HTTPClientConfig{ + FollowRedirects: true, + TLSConfig: config.TLSConfig{ + CAFile: "testdata/valid_ca_file", + CertFile: "testdata/valid_cert_file", + KeyFile: "testdata/valid_key_file", + }, }, }, }, - }, }, { JobName: "hetzner", @@ -981,7 +989,7 @@ var expectedConf = &Config{ } func TestYAMLRoundtrip(t *testing.T) { - want, err := LoadFile("testdata/roundtrip.good.yml", false, log.NewNopLogger()) + want, err := LoadFile("testdata/roundtrip.good.yml", false, false, log.NewNopLogger()) require.NoError(t, err) out, err := yaml.Marshal(want) @@ -994,7 +1002,7 @@ func TestYAMLRoundtrip(t *testing.T) { } func TestRemoteWriteRetryOnRateLimit(t *testing.T) { - want, err := LoadFile("testdata/remote_write_retry_on_rate_limit.good.yml", false, log.NewNopLogger()) + want, err := LoadFile("testdata/remote_write_retry_on_rate_limit.good.yml", false, false, log.NewNopLogger()) require.NoError(t, err) out, err := yaml.Marshal(want) @@ -1010,16 +1018,16 @@ func TestRemoteWriteRetryOnRateLimit(t *testing.T) { func TestLoadConfig(t *testing.T) { // Parse a valid file that sets a global scrape timeout. This tests whether parsing // an overwritten default field in the global config permanently changes the default. - _, err := LoadFile("testdata/global_timeout.good.yml", false, log.NewNopLogger()) + _, err := LoadFile("testdata/global_timeout.good.yml", false, false, log.NewNopLogger()) require.NoError(t, err) - c, err := LoadFile("testdata/conf.good.yml", false, log.NewNopLogger()) + c, err := LoadFile("testdata/conf.good.yml", false, false, log.NewNopLogger()) require.NoError(t, err) require.Equal(t, expectedConf, c) } func TestScrapeIntervalLarger(t *testing.T) { - c, err := LoadFile("testdata/scrape_interval_larger.good.yml", false, log.NewNopLogger()) + c, err := LoadFile("testdata/scrape_interval_larger.good.yml", false, false, log.NewNopLogger()) require.NoError(t, err) require.Equal(t, 1, len(c.ScrapeConfigs)) for _, sc := range c.ScrapeConfigs { @@ -1029,7 +1037,7 @@ func TestScrapeIntervalLarger(t *testing.T) { // YAML marshaling must not reveal authentication credentials. func TestElideSecrets(t *testing.T) { - c, err := LoadFile("testdata/conf.good.yml", false, log.NewNopLogger()) + c, err := LoadFile("testdata/conf.good.yml", false, false, log.NewNopLogger()) require.NoError(t, err) secretRe := regexp.MustCompile(`\\u003csecret\\u003e|`) @@ -1046,31 +1054,31 @@ func TestElideSecrets(t *testing.T) { func TestLoadConfigRuleFilesAbsolutePath(t *testing.T) { // Parse a valid file that sets a rule files with an absolute path - c, err := LoadFile(ruleFilesConfigFile, false, log.NewNopLogger()) + c, err := LoadFile(ruleFilesConfigFile, false, false, log.NewNopLogger()) require.NoError(t, err) require.Equal(t, ruleFilesExpectedConf, c) } func TestKubernetesEmptyAPIServer(t *testing.T) { - _, err := LoadFile("testdata/kubernetes_empty_apiserver.good.yml", false, log.NewNopLogger()) + _, err := LoadFile("testdata/kubernetes_empty_apiserver.good.yml", false, false, log.NewNopLogger()) require.NoError(t, err) } func TestKubernetesWithKubeConfig(t *testing.T) { - _, err := LoadFile("testdata/kubernetes_kubeconfig_without_apiserver.good.yml", false, log.NewNopLogger()) + _, err := LoadFile("testdata/kubernetes_kubeconfig_without_apiserver.good.yml", false, false, log.NewNopLogger()) require.NoError(t, err) } func TestKubernetesSelectors(t *testing.T) { - _, err := LoadFile("testdata/kubernetes_selectors_endpoints.good.yml", false, log.NewNopLogger()) + _, err := LoadFile("testdata/kubernetes_selectors_endpoints.good.yml", false, false, log.NewNopLogger()) require.NoError(t, err) - _, err = LoadFile("testdata/kubernetes_selectors_node.good.yml", false, log.NewNopLogger()) + _, err = LoadFile("testdata/kubernetes_selectors_node.good.yml", false, false, log.NewNopLogger()) require.NoError(t, err) - _, err = LoadFile("testdata/kubernetes_selectors_ingress.good.yml", false, log.NewNopLogger()) + _, err = LoadFile("testdata/kubernetes_selectors_ingress.good.yml", false, false, log.NewNopLogger()) require.NoError(t, err) - _, err = LoadFile("testdata/kubernetes_selectors_pod.good.yml", false, log.NewNopLogger()) + _, err = LoadFile("testdata/kubernetes_selectors_pod.good.yml", false, false, log.NewNopLogger()) require.NoError(t, err) - _, err = LoadFile("testdata/kubernetes_selectors_service.good.yml", false, log.NewNopLogger()) + _, err = LoadFile("testdata/kubernetes_selectors_service.good.yml", false, false, log.NewNopLogger()) require.NoError(t, err) } @@ -1081,170 +1089,224 @@ var expectedErrors = []struct { { filename: "jobname.bad.yml", errMsg: `job_name is empty`, - }, { + }, + { filename: "jobname_dup.bad.yml", errMsg: `found multiple scrape configs with job name "prometheus"`, - }, { + }, + { filename: "scrape_interval.bad.yml", errMsg: `scrape timeout greater than scrape interval`, - }, { + }, + { filename: "labelname.bad.yml", errMsg: `"not$allowed" is not a valid label name`, - }, { + }, + { filename: "labelname2.bad.yml", errMsg: `"not:allowed" is not a valid label name`, - }, { + }, + { filename: "labelvalue.bad.yml", errMsg: `"\xff" is not a valid label value`, - }, { + }, + { filename: "regex.bad.yml", errMsg: "error parsing regexp", - }, { + }, + { filename: "modulus_missing.bad.yml", errMsg: "relabel configuration for hashmod requires non-zero modulus", - }, { + }, + { filename: "labelkeep.bad.yml", errMsg: "labelkeep action requires only 'regex', and no other fields", - }, { + }, + { filename: "labelkeep2.bad.yml", errMsg: "labelkeep action requires only 'regex', and no other fields", - }, { + }, + { filename: "labelkeep3.bad.yml", errMsg: "labelkeep action requires only 'regex', and no other fields", - }, { + }, + { filename: "labelkeep4.bad.yml", errMsg: "labelkeep action requires only 'regex', and no other fields", - }, { + }, + { filename: "labelkeep5.bad.yml", errMsg: "labelkeep action requires only 'regex', and no other fields", - }, { + }, + { filename: "labeldrop.bad.yml", errMsg: "labeldrop action requires only 'regex', and no other fields", - }, { + }, + { filename: "labeldrop2.bad.yml", errMsg: "labeldrop action requires only 'regex', and no other fields", - }, { + }, + { filename: "labeldrop3.bad.yml", errMsg: "labeldrop action requires only 'regex', and no other fields", - }, { + }, + { filename: "labeldrop4.bad.yml", errMsg: "labeldrop action requires only 'regex', and no other fields", - }, { + }, + { filename: "labeldrop5.bad.yml", errMsg: "labeldrop action requires only 'regex', and no other fields", - }, { + }, + { filename: "labelmap.bad.yml", errMsg: "\"l-$1\" is invalid 'replacement' for labelmap action", - }, { + }, + { filename: "rules.bad.yml", errMsg: "invalid rule file path", - }, { + }, + { filename: "unknown_attr.bad.yml", errMsg: "field consult_sd_configs not found in type", - }, { + }, + { filename: "bearertoken.bad.yml", errMsg: "at most one of bearer_token & bearer_token_file must be configured", - }, { + }, + { filename: "bearertoken_basicauth.bad.yml", errMsg: "at most one of basic_auth, oauth2, bearer_token & bearer_token_file must be configured", - }, { + }, + { filename: "kubernetes_http_config_without_api_server.bad.yml", errMsg: "to use custom HTTP client configuration please provide the 'api_server' URL explicitly", - }, { + }, + { filename: "kubernetes_kubeconfig_with_apiserver.bad.yml", errMsg: "cannot use 'kubeconfig_file' and 'api_server' simultaneously", - }, { + }, + { filename: "kubernetes_kubeconfig_with_http_config.bad.yml", errMsg: "cannot use a custom HTTP client configuration together with 'kubeconfig_file'", }, { filename: "kubernetes_bearertoken.bad.yml", errMsg: "at most one of bearer_token & bearer_token_file must be configured", - }, { + }, + { filename: "kubernetes_role.bad.yml", errMsg: "role", - }, { + }, + { filename: "kubernetes_selectors_endpoints.bad.yml", errMsg: "endpoints role supports only pod, service, endpoints selectors", - }, { + }, + { filename: "kubernetes_selectors_ingress.bad.yml", errMsg: "ingress role supports only ingress selectors", - }, { + }, + { filename: "kubernetes_selectors_node.bad.yml", errMsg: "node role supports only node selectors", - }, { + }, + { filename: "kubernetes_selectors_pod.bad.yml", errMsg: "pod role supports only pod selectors", - }, { + }, + { filename: "kubernetes_selectors_service.bad.yml", errMsg: "service role supports only service selectors", - }, { + }, + { filename: "kubernetes_namespace_discovery.bad.yml", errMsg: "field foo not found in type kubernetes.plain", - }, { + }, + { filename: "kubernetes_selectors_duplicated_role.bad.yml", errMsg: "duplicated selector role: pod", - }, { + }, + { filename: "kubernetes_selectors_incorrect_selector.bad.yml", errMsg: "invalid selector: 'metadata.status-Running'; can't understand 'metadata.status-Running'", - }, { + }, + { filename: "kubernetes_bearertoken_basicauth.bad.yml", errMsg: "at most one of basic_auth, oauth2, bearer_token & bearer_token_file must be configured", - }, { + }, + { filename: "kubernetes_authorization_basicauth.bad.yml", errMsg: "at most one of basic_auth, oauth2 & authorization must be configured", - }, { + }, + { filename: "marathon_no_servers.bad.yml", errMsg: "marathon_sd: must contain at least one Marathon server", - }, { + }, + { filename: "marathon_authtoken_authtokenfile.bad.yml", errMsg: "marathon_sd: at most one of auth_token & auth_token_file must be configured", - }, { + }, + { filename: "marathon_authtoken_basicauth.bad.yml", errMsg: "marathon_sd: at most one of basic_auth, auth_token & auth_token_file must be configured", - }, { + }, + { filename: "marathon_authtoken_bearertoken.bad.yml", errMsg: "marathon_sd: at most one of bearer_token, bearer_token_file, auth_token & auth_token_file must be configured", - }, { + }, + { filename: "marathon_authtoken_authorization.bad.yml", errMsg: "marathon_sd: at most one of auth_token, auth_token_file & authorization must be configured", - }, { + }, + { filename: "openstack_role.bad.yml", errMsg: "unknown OpenStack SD role", - }, { + }, + { filename: "openstack_availability.bad.yml", errMsg: "unknown availability invalid, must be one of admin, internal or public", - }, { + }, + { filename: "url_in_targetgroup.bad.yml", errMsg: "\"http://bad\" is not a valid hostname", - }, { + }, + { filename: "target_label_missing.bad.yml", errMsg: "relabel configuration for replace action requires 'target_label' value", - }, { + }, + { filename: "target_label_hashmod_missing.bad.yml", errMsg: "relabel configuration for hashmod action requires 'target_label' value", - }, { + }, + { filename: "unknown_global_attr.bad.yml", errMsg: "field nonexistent_field not found in type config.plain", - }, { + }, + { filename: "remote_read_url_missing.bad.yml", errMsg: `url for remote_read is empty`, - }, { + }, + { filename: "remote_write_header.bad.yml", errMsg: `x-prometheus-remote-write-version is a reserved header. It must not be changed`, - }, { + }, + { filename: "remote_read_header.bad.yml", errMsg: `x-prometheus-remote-write-version is a reserved header. It must not be changed`, - }, { + }, + { filename: "remote_write_authorization_header.bad.yml", errMsg: `authorization header must be changed via the basic_auth, authorization, oauth2, or sigv4 parameter`, - }, { + }, + { filename: "remote_write_url_missing.bad.yml", errMsg: `url for remote_write is empty`, - }, { + }, + { filename: "remote_write_dup.bad.yml", errMsg: `found multiple remote write configs with job name "queue1"`, - }, { + }, + { filename: "remote_read_dup.bad.yml", errMsg: `found multiple remote read configs with job name "queue1"`, }, @@ -1376,7 +1438,7 @@ var expectedErrors = []struct { func TestBadConfigs(t *testing.T) { for _, ee := range expectedErrors { - _, err := LoadFile("testdata/"+ee.filename, false, log.NewNopLogger()) + _, err := LoadFile("testdata/"+ee.filename, false, false, log.NewNopLogger()) require.Error(t, err, "%s", ee.filename) require.Contains(t, err.Error(), ee.errMsg, "Expected error for %s to contain %q but got: %s", ee.filename, ee.errMsg, err) @@ -1410,20 +1472,20 @@ func TestExpandExternalLabels(t *testing.T) { // Cleanup ant TEST env variable that could exist on the system. os.Setenv("TEST", "") - c, err := LoadFile("testdata/external_labels.good.yml", false, log.NewNopLogger()) + c, err := LoadFile("testdata/external_labels.good.yml", false, false, log.NewNopLogger()) require.NoError(t, err) require.Equal(t, labels.Label{Name: "bar", Value: "foo"}, c.GlobalConfig.ExternalLabels[0]) require.Equal(t, labels.Label{Name: "baz", Value: "foo${TEST}bar"}, c.GlobalConfig.ExternalLabels[1]) require.Equal(t, labels.Label{Name: "foo", Value: "${TEST}"}, c.GlobalConfig.ExternalLabels[2]) - c, err = LoadFile("testdata/external_labels.good.yml", true, log.NewNopLogger()) + c, err = LoadFile("testdata/external_labels.good.yml", false, true, log.NewNopLogger()) require.NoError(t, err) require.Equal(t, labels.Label{Name: "bar", Value: "foo"}, c.GlobalConfig.ExternalLabels[0]) require.Equal(t, labels.Label{Name: "baz", Value: "foobar"}, c.GlobalConfig.ExternalLabels[1]) require.Equal(t, labels.Label{Name: "foo", Value: ""}, c.GlobalConfig.ExternalLabels[2]) os.Setenv("TEST", "TestValue") - c, err = LoadFile("testdata/external_labels.good.yml", true, log.NewNopLogger()) + c, err = LoadFile("testdata/external_labels.good.yml", false, true, log.NewNopLogger()) require.NoError(t, err) require.Equal(t, labels.Label{Name: "bar", Value: "foo"}, c.GlobalConfig.ExternalLabels[0]) require.Equal(t, labels.Label{Name: "baz", Value: "fooTestValuebar"}, c.GlobalConfig.ExternalLabels[1]) diff --git a/config/testdata/conf.good.yml b/config/testdata/conf.good.yml index cdd0c0b30..bfe0228be 100644 --- a/config/testdata/conf.good.yml +++ b/config/testdata/conf.good.yml @@ -23,6 +23,9 @@ remote_write: client_id: "123" client_secret: "456" token_url: "http://remote1/auth" + tls_config: + cert_file: valid_cert_file + key_file: valid_key_file - url: http://remote2/push name: rw_tls diff --git a/discovery/README.md b/discovery/README.md index 19b579b39..8854981a0 100644 --- a/discovery/README.md +++ b/discovery/README.md @@ -131,7 +131,7 @@ the Prometheus server will be able to see them. ### The SD interface -A Service Discovery (SD) mechanism has to discover targets and provide them to Prometheus. We expect similar targets to be grouped together, in the form of a [target group](https://pkg.go.dev/github.com/prometheus/prometheus/discovery/targetgroup#Group). The SD mechanism sends the targets down to prometheus as list of target groups. +A Service Discovery (SD) mechanism has to discover targets and provide them to Prometheus. We expect similar targets to be grouped together, in the form of a [target group](https://pkg.go.dev/github.com/prometheus/prometheus@v1.8.2-0.20211105201321-411021ada9ab/discovery/targetgroup#Group). The SD mechanism sends the targets down to prometheus as list of target groups. An SD mechanism has to implement the `Discoverer` Interface: ```go diff --git a/discovery/aws/ec2.go b/discovery/aws/ec2.go index 0bcfd0547..8984035e2 100644 --- a/discovery/aws/ec2.go +++ b/discovery/aws/ec2.go @@ -63,13 +63,11 @@ const ( ec2LabelSeparator = "," ) -var ( - // DefaultEC2SDConfig is the default EC2 SD configuration. - DefaultEC2SDConfig = EC2SDConfig{ - Port: 80, - RefreshInterval: model.Duration(60 * time.Second), - } -) +// DefaultEC2SDConfig is the default EC2 SD configuration. +var DefaultEC2SDConfig = EC2SDConfig{ + Port: 80, + RefreshInterval: model.Duration(60 * time.Second), +} func init() { discovery.RegisterConfig(&EC2SDConfig{}) diff --git a/discovery/aws/lightsail.go b/discovery/aws/lightsail.go index e3dc65b5d..e5165776f 100644 --- a/discovery/aws/lightsail.go +++ b/discovery/aws/lightsail.go @@ -53,13 +53,11 @@ const ( lightsailLabelSeparator = "," ) -var ( - // DefaultLightsailSDConfig is the default Lightsail SD configuration. - DefaultLightsailSDConfig = LightsailSDConfig{ - Port: 80, - RefreshInterval: model.Duration(60 * time.Second), - } -) +// DefaultLightsailSDConfig is the default Lightsail SD configuration. +var DefaultLightsailSDConfig = LightsailSDConfig{ + Port: 80, + RefreshInterval: model.Duration(60 * time.Second), +} func init() { discovery.RegisterConfig(&LightsailSDConfig{}) diff --git a/discovery/azure/azure.go b/discovery/azure/azure.go index b0de0abd8..3e7cd4e12 100644 --- a/discovery/azure/azure.go +++ b/discovery/azure/azure.go @@ -64,6 +64,7 @@ var DefaultSDConfig = SDConfig{ RefreshInterval: model.Duration(5 * time.Minute), Environment: azure.PublicCloud.Name, AuthenticationMethod: authMethodOAuth, + HTTPClientConfig: config_util.DefaultHTTPClientConfig, } func init() { @@ -80,6 +81,8 @@ type SDConfig struct { ClientSecret config_util.Secret `yaml:"client_secret,omitempty"` RefreshInterval model.Duration `yaml:"refresh_interval,omitempty"` AuthenticationMethod string `yaml:"authentication_method,omitempty"` + + HTTPClientConfig config_util.HTTPClientConfig `yaml:",inline"` } // Name returns the name of the Config. @@ -200,19 +203,29 @@ func createAzureClient(cfg SDConfig) (azureClient, error) { } } + client, err := config_util.NewClientFromConfig(cfg.HTTPClientConfig, "azure_sd") + if err != nil { + return azureClient{}, err + } + sender := autorest.DecorateSender(client) + bearerAuthorizer := autorest.NewBearerAuthorizer(spt) c.vm = compute.NewVirtualMachinesClientWithBaseURI(resourceManagerEndpoint, cfg.SubscriptionID) c.vm.Authorizer = bearerAuthorizer + c.vm.Sender = sender c.nic = network.NewInterfacesClientWithBaseURI(resourceManagerEndpoint, cfg.SubscriptionID) c.nic.Authorizer = bearerAuthorizer + c.nic.Sender = sender c.vmss = compute.NewVirtualMachineScaleSetsClientWithBaseURI(resourceManagerEndpoint, cfg.SubscriptionID) c.vmss.Authorizer = bearerAuthorizer + c.vm.Sender = sender c.vmssvm = compute.NewVirtualMachineScaleSetVMsClientWithBaseURI(resourceManagerEndpoint, cfg.SubscriptionID) c.vmssvm.Authorizer = bearerAuthorizer + c.vmssvm.Sender = sender return c, nil } @@ -326,7 +339,6 @@ func (d *Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) { // Get the IP address information via separate call to the network provider. for _, nicID := range vm.NetworkInterfaces { networkInterface, err := client.getNetworkInterfaceByID(ctx, nicID) - if err != nil { level.Error(d.logger).Log("msg", "Unable to get network interface", "name", nicID, "err", err) ch <- target{labelSet: nil, err: err} @@ -424,9 +436,8 @@ func (client *azureClient) getScaleSets(ctx context.Context) ([]compute.VirtualM func (client *azureClient) getScaleSetVMs(ctx context.Context, scaleSet compute.VirtualMachineScaleSet) ([]virtualMachine, error) { var vms []virtualMachine - //TODO do we really need to fetch the resourcegroup this way? + // TODO do we really need to fetch the resourcegroup this way? r, err := newAzureResourceFromID(*scaleSet.ID, nil) - if err != nil { return nil, errors.Wrap(err, "could not parse scale set ID") } diff --git a/discovery/consul/consul.go b/discovery/consul/consul.go index 16d0d2628..d07db7867 100644 --- a/discovery/consul/consul.go +++ b/discovery/consul/consul.go @@ -54,7 +54,7 @@ const ( healthLabel = model.MetaLabelPrefix + "consul_health" // serviceAddressLabel is the name of the label containing the (optional) service address. serviceAddressLabel = model.MetaLabelPrefix + "consul_service_address" - //servicePortLabel is the name of the label containing the service port. + // servicePortLabel is the name of the label containing the service port. servicePortLabel = model.MetaLabelPrefix + "consul_service_port" // datacenterLabel is the name of the label containing the datacenter ID. datacenterLabel = model.MetaLabelPrefix + "consul_dc" @@ -297,6 +297,7 @@ func (d *Discovery) getDatacenter() error { } d.clientDatacenter = dc + d.logger = log.With(d.logger, "datacenter", dc) return nil } @@ -530,7 +531,7 @@ func (srv *consulService) watch(ctx context.Context, ch chan<- []*targetgroup.Gr for _, serviceNode := range serviceNodes { // We surround the separated list with the separator as well. This way regular expressions // in relabeling rules don't have to consider tag positions. - var tags = srv.tagSeparator + strings.Join(serviceNode.Service.Tags, srv.tagSeparator) + srv.tagSeparator + tags := srv.tagSeparator + strings.Join(serviceNode.Service.Tags, srv.tagSeparator) + srv.tagSeparator // If the service address is not empty it should be used instead of the node address // since the service may be registered remotely through a different node. diff --git a/discovery/consul/consul_test.go b/discovery/consul/consul_test.go index 15092c304..49b69af7d 100644 --- a/discovery/consul/consul_test.go +++ b/discovery/consul/consul_test.go @@ -37,9 +37,9 @@ func TestMain(m *testing.M) { func TestConfiguredService(t *testing.T) { conf := &SDConfig{ - Services: []string{"configuredServiceName"}} + Services: []string{"configuredServiceName"}, + } consulDiscovery, err := NewDiscovery(conf, nil) - if err != nil { t.Errorf("Unexpected error when initializing discovery %v", err) } @@ -57,7 +57,6 @@ func TestConfiguredServiceWithTag(t *testing.T) { ServiceTags: []string{"http"}, } consulDiscovery, err := NewDiscovery(conf, nil) - if err != nil { t.Errorf("Unexpected error when initializing discovery %v", err) } @@ -153,7 +152,6 @@ func TestConfiguredServiceWithTags(t *testing.T) { for _, tc := range cases { consulDiscovery, err := NewDiscovery(tc.conf, nil) - if err != nil { t.Errorf("Unexpected error when initializing discovery %v", err) } @@ -168,7 +166,6 @@ func TestConfiguredServiceWithTags(t *testing.T) { func TestNonConfiguredService(t *testing.T) { conf := &SDConfig{} consulDiscovery, err := NewDiscovery(conf, nil) - if err != nil { t.Errorf("Unexpected error when initializing discovery %v", err) } @@ -310,11 +307,15 @@ func TestNoTargets(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) ch := make(chan []*targetgroup.Group) - go d.Run(ctx, ch) + go func() { + d.Run(ctx, ch) + close(ch) + }() targets := (<-ch)[0].Targets require.Equal(t, 0, len(targets)) cancel() + <-ch } // Watch only the test service. diff --git a/discovery/digitalocean/mock_test.go b/discovery/digitalocean/mock_test.go index edbdf92ba..2f19b5e1a 100644 --- a/discovery/digitalocean/mock_test.go +++ b/discovery/digitalocean/mock_test.go @@ -75,7 +75,8 @@ func (m *SDMock) HandleDropletsList() { panic(err) } } - fmt.Fprint(w, []string{` + fmt.Fprint(w, []string{ + ` { "droplets": [ { diff --git a/discovery/file/file.go b/discovery/file/file.go index 088fa12d4..2371ba194 100644 --- a/discovery/file/file.go +++ b/discovery/file/file.go @@ -25,13 +25,13 @@ import ( "sync" "time" + "github.com/fsnotify/fsnotify" "github.com/go-kit/log" "github.com/go-kit/log/level" "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/config" "github.com/prometheus/common/model" - fsnotify "gopkg.in/fsnotify/fsnotify.v1" yaml "gopkg.in/yaml.v2" "github.com/prometheus/prometheus/discovery" diff --git a/discovery/file/file_test.go b/discovery/file/file_test.go index bf50fc257..4a9f8c26f 100644 --- a/discovery/file/file_test.go +++ b/discovery/file/file_test.go @@ -73,7 +73,7 @@ func (t *testRunner) copyFile(src string) string { } // copyFileTo atomically copies a file with a different name to the runner's directory. -func (t *testRunner) copyFileTo(src string, name string) string { +func (t *testRunner) copyFileTo(src, name string) string { t.Helper() newf, err := ioutil.TempFile(t.dir, "") @@ -95,7 +95,7 @@ func (t *testRunner) copyFileTo(src string, name string) string { } // writeString writes atomically a string to a file. -func (t *testRunner) writeString(file string, data string) { +func (t *testRunner) writeString(file, data string) { t.Helper() newf, err := ioutil.TempFile(t.dir, "") @@ -477,6 +477,7 @@ func TestRemoveFile(t *testing.T) { }, { Source: fileSource(sdFile, 1), - }}, + }, + }, ) } diff --git a/discovery/hetzner/hcloud.go b/discovery/hetzner/hcloud.go index 35492e6a0..aa406a1a7 100644 --- a/discovery/hetzner/hcloud.go +++ b/discovery/hetzner/hcloud.go @@ -78,6 +78,7 @@ func newHcloudDiscovery(conf *SDConfig, logger log.Logger) (*hcloudDiscovery, er ) return d, nil } + func (d *hcloudDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) { servers, err := d.client.Server.All(ctx) if err != nil { diff --git a/discovery/hetzner/mock_test.go b/discovery/hetzner/mock_test.go index 5936d5257..ecf313274 100644 --- a/discovery/hetzner/mock_test.go +++ b/discovery/hetzner/mock_test.go @@ -489,8 +489,10 @@ func (m *SDMock) HandleHcloudNetworks() { }) } -const robotTestUsername = "my-hetzner" -const robotTestPassword = "my-password" +const ( + robotTestUsername = "my-hetzner" + robotTestPassword = "my-password" +) // HandleRobotServers mocks the robot servers list endpoint. func (m *SDMock) HandleRobotServers() { diff --git a/discovery/hetzner/robot.go b/discovery/hetzner/robot.go index 25f04502f..d0f3e4d94 100644 --- a/discovery/hetzner/robot.go +++ b/discovery/hetzner/robot.go @@ -70,6 +70,7 @@ func newRobotDiscovery(conf *SDConfig, logger log.Logger) (*robotDiscovery, erro return d, nil } + func (d *robotDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) { req, err := http.NewRequest("GET", d.endpoint+"/server", nil) if err != nil { diff --git a/discovery/http/http_test.go b/discovery/http/http_test.go index e9dc98b8f..896eec1be 100644 --- a/discovery/http/http_test.go +++ b/discovery/http/http_test.go @@ -24,8 +24,9 @@ import ( "github.com/go-kit/log" "github.com/prometheus/common/config" "github.com/prometheus/common/model" - "github.com/prometheus/prometheus/discovery/targetgroup" "github.com/stretchr/testify/require" + + "github.com/prometheus/prometheus/discovery/targetgroup" ) func TestHTTPValidRefresh(t *testing.T) { @@ -60,7 +61,6 @@ func TestHTTPValidRefresh(t *testing.T) { }, } require.Equal(t, tgs, expectedTargets) - } func TestHTTPInvalidCode(t *testing.T) { @@ -398,5 +398,4 @@ func TestSourceDisappeared(t *testing.T) { require.Equal(t, test.expectedTargets[i], tgs) } } - } diff --git a/discovery/kubernetes/client_metrics.go b/discovery/kubernetes/client_metrics.go index 45e249be2..3a33e3e8d 100644 --- a/discovery/kubernetes/client_metrics.go +++ b/discovery/kubernetes/client_metrics.go @@ -121,9 +121,11 @@ func (f *clientGoRequestMetricAdapter) Register(registerer prometheus.Registerer clientGoRequestLatencyMetricVec, ) } -func (clientGoRequestMetricAdapter) Increment(ctx context.Context, code string, method string, host string) { + +func (clientGoRequestMetricAdapter) Increment(ctx context.Context, code, method, host string) { clientGoRequestResultMetricVec.WithLabelValues(code).Inc() } + func (clientGoRequestMetricAdapter) Observe(ctx context.Context, verb string, u url.URL, latency time.Duration) { clientGoRequestLatencyMetricVec.WithLabelValues(u.EscapedPath()).Observe(latency.Seconds()) } @@ -146,21 +148,27 @@ func (f *clientGoWorkqueueMetricsProvider) Register(registerer prometheus.Regist func (f *clientGoWorkqueueMetricsProvider) NewDepthMetric(name string) workqueue.GaugeMetric { return clientGoWorkqueueDepthMetricVec.WithLabelValues(name) } + func (f *clientGoWorkqueueMetricsProvider) NewAddsMetric(name string) workqueue.CounterMetric { return clientGoWorkqueueAddsMetricVec.WithLabelValues(name) } + func (f *clientGoWorkqueueMetricsProvider) NewLatencyMetric(name string) workqueue.HistogramMetric { return clientGoWorkqueueLatencyMetricVec.WithLabelValues(name) } + func (f *clientGoWorkqueueMetricsProvider) NewWorkDurationMetric(name string) workqueue.HistogramMetric { return clientGoWorkqueueWorkDurationMetricVec.WithLabelValues(name) } + func (f *clientGoWorkqueueMetricsProvider) NewUnfinishedWorkSecondsMetric(name string) workqueue.SettableGaugeMetric { return clientGoWorkqueueUnfinishedWorkSecondsMetricVec.WithLabelValues(name) } + func (f *clientGoWorkqueueMetricsProvider) NewLongestRunningProcessorSecondsMetric(name string) workqueue.SettableGaugeMetric { return clientGoWorkqueueLongestRunningProcessorMetricVec.WithLabelValues(name) } + func (clientGoWorkqueueMetricsProvider) NewRetriesMetric(name string) workqueue.CounterMetric { // Retries are not used so the metric is omitted. return noopMetric{} diff --git a/discovery/kubernetes/endpoints.go b/discovery/kubernetes/endpoints.go index 49e515a14..510b33f44 100644 --- a/discovery/kubernetes/endpoints.go +++ b/discovery/kubernetes/endpoints.go @@ -18,8 +18,6 @@ import ( "net" "strconv" - "github.com/prometheus/prometheus/util/strutil" - "github.com/go-kit/log" "github.com/go-kit/log/level" "github.com/pkg/errors" @@ -29,6 +27,7 @@ import ( "k8s.io/client-go/util/workqueue" "github.com/prometheus/prometheus/discovery/targetgroup" + "github.com/prometheus/prometheus/util/strutil" ) var ( diff --git a/discovery/kubernetes/endpoints_test.go b/discovery/kubernetes/endpoints_test.go index e73d8ba06..335242c3c 100644 --- a/discovery/kubernetes/endpoints_test.go +++ b/discovery/kubernetes/endpoints_test.go @@ -27,7 +27,7 @@ import ( ) func makeEndpoints() *v1.Endpoints { - var nodeName = "foobar" + nodeName := "foobar" return &v1.Endpoints{ ObjectMeta: metav1.ObjectMeta{ Name: "testendpoints", diff --git a/discovery/kubernetes/kubernetes_test.go b/discovery/kubernetes/kubernetes_test.go index e1ca23402..416e41add 100644 --- a/discovery/kubernetes/kubernetes_test.go +++ b/discovery/kubernetes/kubernetes_test.go @@ -86,15 +86,18 @@ func (d k8sDiscoveryTest) Run(t *testing.T) { // Ensure that discovery has a discoverer set. This prevents a race // condition where the above go routine may or may not have set a // discoverer yet. + lastDiscoverersCount := 0 + dis := d.discovery.(*Discovery) for { - dis := d.discovery.(*Discovery) dis.RLock() l := len(dis.discoverers) dis.RUnlock() - if l > 0 { + if l > 0 && l == lastDiscoverersCount { break } - time.Sleep(10 * time.Millisecond) + time.Sleep(100 * time.Millisecond) + + lastDiscoverersCount = l } resChan := make(chan map[string]*targetgroup.Group) @@ -171,13 +174,15 @@ type hasSynced interface { hasSynced() bool } -var _ hasSynced = &Discovery{} -var _ hasSynced = &Node{} -var _ hasSynced = &Endpoints{} -var _ hasSynced = &EndpointSlice{} -var _ hasSynced = &Ingress{} -var _ hasSynced = &Pod{} -var _ hasSynced = &Service{} +var ( + _ hasSynced = &Discovery{} + _ hasSynced = &Node{} + _ hasSynced = &Endpoints{} + _ hasSynced = &EndpointSlice{} + _ hasSynced = &Ingress{} + _ hasSynced = &Pod{} + _ hasSynced = &Service{} +) func (d *Discovery) hasSynced() bool { d.RLock() diff --git a/discovery/kubernetes/node_test.go b/discovery/kubernetes/node_test.go index 4a08eefc8..afdaaf6b2 100644 --- a/discovery/kubernetes/node_test.go +++ b/discovery/kubernetes/node_test.go @@ -25,7 +25,7 @@ import ( "github.com/prometheus/prometheus/discovery/targetgroup" ) -func makeNode(name, address string, labels map[string]string, annotations map[string]string) *v1.Node { +func makeNode(name, address string, labels, annotations map[string]string) *v1.Node { return &v1.Node{ ObjectMeta: metav1.ObjectMeta{ Name: name, diff --git a/discovery/legacymanager/manager.go b/discovery/legacymanager/manager.go new file mode 100644 index 000000000..7a3d6b3b8 --- /dev/null +++ b/discovery/legacymanager/manager.go @@ -0,0 +1,357 @@ +// Copyright 2016 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package legacymanager + +import ( + "context" + "fmt" + "reflect" + "sync" + "time" + + "github.com/go-kit/log" + "github.com/go-kit/log/level" + "github.com/prometheus/client_golang/prometheus" + + "github.com/prometheus/prometheus/discovery" + "github.com/prometheus/prometheus/discovery/targetgroup" +) + +var ( + failedConfigs = prometheus.NewGaugeVec( + prometheus.GaugeOpts{ + Name: "prometheus_sd_failed_configs", + Help: "Current number of service discovery configurations that failed to load.", + }, + []string{"name"}, + ) + discoveredTargets = prometheus.NewGaugeVec( + prometheus.GaugeOpts{ + Name: "prometheus_sd_discovered_targets", + Help: "Current number of discovered targets.", + }, + []string{"name", "config"}, + ) + receivedUpdates = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: "prometheus_sd_received_updates_total", + Help: "Total number of update events received from the SD providers.", + }, + []string{"name"}, + ) + delayedUpdates = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: "prometheus_sd_updates_delayed_total", + Help: "Total number of update events that couldn't be sent immediately.", + }, + []string{"name"}, + ) + sentUpdates = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: "prometheus_sd_updates_total", + Help: "Total number of update events sent to the SD consumers.", + }, + []string{"name"}, + ) +) + +func RegisterMetrics() { + prometheus.MustRegister(failedConfigs, discoveredTargets, receivedUpdates, delayedUpdates, sentUpdates) +} + +type poolKey struct { + setName string + provider string +} + +// provider holds a Discoverer instance, its configuration and its subscribers. +type provider struct { + name string + d discovery.Discoverer + subs []string + config interface{} +} + +// NewManager is the Discovery Manager constructor. +func NewManager(ctx context.Context, logger log.Logger, options ...func(*Manager)) *Manager { + if logger == nil { + logger = log.NewNopLogger() + } + mgr := &Manager{ + logger: logger, + syncCh: make(chan map[string][]*targetgroup.Group), + targets: make(map[poolKey]map[string]*targetgroup.Group), + discoverCancel: []context.CancelFunc{}, + ctx: ctx, + updatert: 5 * time.Second, + triggerSend: make(chan struct{}, 1), + } + for _, option := range options { + option(mgr) + } + return mgr +} + +// Name sets the name of the manager. +func Name(n string) func(*Manager) { + return func(m *Manager) { + m.mtx.Lock() + defer m.mtx.Unlock() + m.name = n + } +} + +// Manager maintains a set of discovery providers and sends each update to a map channel. +// Targets are grouped by the target set name. +type Manager struct { + logger log.Logger + name string + mtx sync.RWMutex + ctx context.Context + discoverCancel []context.CancelFunc + + // Some Discoverers(eg. k8s) send only the updates for a given target group + // so we use map[tg.Source]*targetgroup.Group to know which group to update. + targets map[poolKey]map[string]*targetgroup.Group + // providers keeps track of SD providers. + providers []*provider + // The sync channel sends the updates as a map where the key is the job value from the scrape config. + syncCh chan map[string][]*targetgroup.Group + + // How long to wait before sending updates to the channel. The variable + // should only be modified in unit tests. + updatert time.Duration + + // The triggerSend channel signals to the manager that new updates have been received from providers. + triggerSend chan struct{} +} + +// Run starts the background processing +func (m *Manager) Run() error { + go m.sender() + for range m.ctx.Done() { + m.cancelDiscoverers() + return m.ctx.Err() + } + return nil +} + +// SyncCh returns a read only channel used by all the clients to receive target updates. +func (m *Manager) SyncCh() <-chan map[string][]*targetgroup.Group { + return m.syncCh +} + +// ApplyConfig removes all running discovery providers and starts new ones using the provided config. +func (m *Manager) ApplyConfig(cfg map[string]discovery.Configs) error { + m.mtx.Lock() + defer m.mtx.Unlock() + + for pk := range m.targets { + if _, ok := cfg[pk.setName]; !ok { + discoveredTargets.DeleteLabelValues(m.name, pk.setName) + } + } + m.cancelDiscoverers() + m.targets = make(map[poolKey]map[string]*targetgroup.Group) + m.providers = nil + m.discoverCancel = nil + + failedCount := 0 + for name, scfg := range cfg { + failedCount += m.registerProviders(scfg, name) + discoveredTargets.WithLabelValues(m.name, name).Set(0) + } + failedConfigs.WithLabelValues(m.name).Set(float64(failedCount)) + + for _, prov := range m.providers { + m.startProvider(m.ctx, prov) + } + + return nil +} + +// StartCustomProvider is used for sdtool. Only use this if you know what you're doing. +func (m *Manager) StartCustomProvider(ctx context.Context, name string, worker discovery.Discoverer) { + p := &provider{ + name: name, + d: worker, + subs: []string{name}, + } + m.providers = append(m.providers, p) + m.startProvider(ctx, p) +} + +func (m *Manager) startProvider(ctx context.Context, p *provider) { + level.Debug(m.logger).Log("msg", "Starting provider", "provider", p.name, "subs", fmt.Sprintf("%v", p.subs)) + ctx, cancel := context.WithCancel(ctx) + updates := make(chan []*targetgroup.Group) + + m.discoverCancel = append(m.discoverCancel, cancel) + + go p.d.Run(ctx, updates) + go m.updater(ctx, p, updates) +} + +func (m *Manager) updater(ctx context.Context, p *provider, updates chan []*targetgroup.Group) { + for { + select { + case <-ctx.Done(): + return + case tgs, ok := <-updates: + receivedUpdates.WithLabelValues(m.name).Inc() + if !ok { + level.Debug(m.logger).Log("msg", "Discoverer channel closed", "provider", p.name) + return + } + + for _, s := range p.subs { + m.updateGroup(poolKey{setName: s, provider: p.name}, tgs) + } + + select { + case m.triggerSend <- struct{}{}: + default: + } + } + } +} + +func (m *Manager) sender() { + ticker := time.NewTicker(m.updatert) + defer ticker.Stop() + + for { + select { + case <-m.ctx.Done(): + return + case <-ticker.C: // Some discoverers send updates too often so we throttle these with the ticker. + select { + case <-m.triggerSend: + sentUpdates.WithLabelValues(m.name).Inc() + select { + case m.syncCh <- m.allGroups(): + default: + delayedUpdates.WithLabelValues(m.name).Inc() + level.Debug(m.logger).Log("msg", "Discovery receiver's channel was full so will retry the next cycle") + select { + case m.triggerSend <- struct{}{}: + default: + } + } + default: + } + } + } +} + +func (m *Manager) cancelDiscoverers() { + for _, c := range m.discoverCancel { + c() + } +} + +func (m *Manager) updateGroup(poolKey poolKey, tgs []*targetgroup.Group) { + m.mtx.Lock() + defer m.mtx.Unlock() + + if _, ok := m.targets[poolKey]; !ok { + m.targets[poolKey] = make(map[string]*targetgroup.Group) + } + for _, tg := range tgs { + if tg != nil { // Some Discoverers send nil target group so need to check for it to avoid panics. + m.targets[poolKey][tg.Source] = tg + } + } +} + +func (m *Manager) allGroups() map[string][]*targetgroup.Group { + m.mtx.RLock() + defer m.mtx.RUnlock() + + tSets := map[string][]*targetgroup.Group{} + n := map[string]int{} + for pkey, tsets := range m.targets { + for _, tg := range tsets { + // Even if the target group 'tg' is empty we still need to send it to the 'Scrape manager' + // to signal that it needs to stop all scrape loops for this target set. + tSets[pkey.setName] = append(tSets[pkey.setName], tg) + n[pkey.setName] += len(tg.Targets) + } + } + for setName, v := range n { + discoveredTargets.WithLabelValues(m.name, setName).Set(float64(v)) + } + return tSets +} + +// registerProviders returns a number of failed SD config. +func (m *Manager) registerProviders(cfgs discovery.Configs, setName string) int { + var ( + failed int + added bool + ) + add := func(cfg discovery.Config) { + for _, p := range m.providers { + if reflect.DeepEqual(cfg, p.config) { + p.subs = append(p.subs, setName) + added = true + return + } + } + typ := cfg.Name() + d, err := cfg.NewDiscoverer(discovery.DiscovererOptions{ + Logger: log.With(m.logger, "discovery", typ), + }) + if err != nil { + level.Error(m.logger).Log("msg", "Cannot create service discovery", "err", err, "type", typ) + failed++ + return + } + m.providers = append(m.providers, &provider{ + name: fmt.Sprintf("%s/%d", typ, len(m.providers)), + d: d, + config: cfg, + subs: []string{setName}, + }) + added = true + } + for _, cfg := range cfgs { + add(cfg) + } + if !added { + // Add an empty target group to force the refresh of the corresponding + // scrape pool and to notify the receiver that this target set has no + // current targets. + // It can happen because the combined set of SD configurations is empty + // or because we fail to instantiate all the SD configurations. + add(discovery.StaticConfig{{}}) + } + return failed +} + +// StaticProvider holds a list of target groups that never change. +type StaticProvider struct { + TargetGroups []*targetgroup.Group +} + +// Run implements the Worker interface. +func (sd *StaticProvider) Run(ctx context.Context, ch chan<- []*targetgroup.Group) { + // We still have to consider that the consumer exits right away in which case + // the context will be canceled. + select { + case ch <- sd.TargetGroups: + case <-ctx.Done(): + } + close(ch) +} diff --git a/discovery/legacymanager/manager_test.go b/discovery/legacymanager/manager_test.go new file mode 100644 index 000000000..ce2278d23 --- /dev/null +++ b/discovery/legacymanager/manager_test.go @@ -0,0 +1,1140 @@ +// Copyright 2016 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package legacymanager + +import ( + "context" + "fmt" + "sort" + "strconv" + "testing" + "time" + + "github.com/go-kit/log" + client_testutil "github.com/prometheus/client_golang/prometheus/testutil" + "github.com/prometheus/common/model" + "github.com/stretchr/testify/require" + + "github.com/prometheus/prometheus/discovery" + "github.com/prometheus/prometheus/discovery/targetgroup" + "github.com/prometheus/prometheus/util/testutil" +) + +func TestMain(m *testing.M) { + testutil.TolerantVerifyLeak(m) +} + +// TestTargetUpdatesOrder checks that the target updates are received in the expected order. +func TestTargetUpdatesOrder(t *testing.T) { + // The order by which the updates are send is determined by the interval passed to the mock discovery adapter + // Final targets array is ordered alphabetically by the name of the discoverer. + // For example discoverer "A" with targets "t2,t3" and discoverer "B" with targets "t1,t2" will result in "t2,t3,t1,t2" after the merge. + testCases := []struct { + title string + updates map[string][]update + expectedTargets [][]*targetgroup.Group + }{ + { + title: "Single TP no updates", + updates: map[string][]update{ + "tp1": {}, + }, + expectedTargets: nil, + }, + { + title: "Multiple TPs no updates", + updates: map[string][]update{ + "tp1": {}, + "tp2": {}, + "tp3": {}, + }, + expectedTargets: nil, + }, + { + title: "Single TP empty initials", + updates: map[string][]update{ + "tp1": { + { + targetGroups: []targetgroup.Group{}, + interval: 5 * time.Millisecond, + }, + }, + }, + expectedTargets: [][]*targetgroup.Group{ + {}, + }, + }, + { + title: "Multiple TPs empty initials", + updates: map[string][]update{ + "tp1": { + { + targetGroups: []targetgroup.Group{}, + interval: 5 * time.Millisecond, + }, + }, + "tp2": { + { + targetGroups: []targetgroup.Group{}, + interval: 200 * time.Millisecond, + }, + }, + "tp3": { + { + targetGroups: []targetgroup.Group{}, + interval: 100 * time.Millisecond, + }, + }, + }, + expectedTargets: [][]*targetgroup.Group{ + {}, + {}, + {}, + }, + }, + { + title: "Single TP initials only", + updates: map[string][]update{ + "tp1": { + { + targetGroups: []targetgroup.Group{ + { + Source: "tp1_group1", + Targets: []model.LabelSet{{"__instance__": "1"}}, + }, + { + Source: "tp1_group2", + Targets: []model.LabelSet{{"__instance__": "2"}}, + }, + }, + }, + }, + }, + expectedTargets: [][]*targetgroup.Group{ + { + { + Source: "tp1_group1", + Targets: []model.LabelSet{{"__instance__": "1"}}, + }, + { + Source: "tp1_group2", + Targets: []model.LabelSet{{"__instance__": "2"}}, + }, + }, + }, + }, + { + title: "Multiple TPs initials only", + updates: map[string][]update{ + "tp1": { + { + targetGroups: []targetgroup.Group{ + { + Source: "tp1_group1", + Targets: []model.LabelSet{{"__instance__": "1"}}, + }, + { + Source: "tp1_group2", + Targets: []model.LabelSet{{"__instance__": "2"}}, + }, + }, + }, + }, + "tp2": { + { + targetGroups: []targetgroup.Group{ + { + Source: "tp2_group1", + Targets: []model.LabelSet{{"__instance__": "3"}}, + }, + }, + interval: 10 * time.Millisecond, + }, + }, + }, + expectedTargets: [][]*targetgroup.Group{ + { + { + Source: "tp1_group1", + Targets: []model.LabelSet{{"__instance__": "1"}}, + }, + { + Source: "tp1_group2", + Targets: []model.LabelSet{{"__instance__": "2"}}, + }, + }, { + { + Source: "tp1_group1", + Targets: []model.LabelSet{{"__instance__": "1"}}, + }, + { + Source: "tp1_group2", + Targets: []model.LabelSet{{"__instance__": "2"}}, + }, + { + Source: "tp2_group1", + Targets: []model.LabelSet{{"__instance__": "3"}}, + }, + }, + }, + }, + { + title: "Single TP initials followed by empty updates", + updates: map[string][]update{ + "tp1": { + { + targetGroups: []targetgroup.Group{ + { + Source: "tp1_group1", + Targets: []model.LabelSet{{"__instance__": "1"}}, + }, + { + Source: "tp1_group2", + Targets: []model.LabelSet{{"__instance__": "2"}}, + }, + }, + interval: 0, + }, + { + targetGroups: []targetgroup.Group{ + { + Source: "tp1_group1", + Targets: []model.LabelSet{}, + }, + { + Source: "tp1_group2", + Targets: []model.LabelSet{}, + }, + }, + interval: 10 * time.Millisecond, + }, + }, + }, + expectedTargets: [][]*targetgroup.Group{ + { + { + Source: "tp1_group1", + Targets: []model.LabelSet{{"__instance__": "1"}}, + }, + { + Source: "tp1_group2", + Targets: []model.LabelSet{{"__instance__": "2"}}, + }, + }, + { + { + Source: "tp1_group1", + Targets: []model.LabelSet{}, + }, + { + Source: "tp1_group2", + Targets: []model.LabelSet{}, + }, + }, + }, + }, + { + title: "Single TP initials and new groups", + updates: map[string][]update{ + "tp1": { + { + targetGroups: []targetgroup.Group{ + { + Source: "tp1_group1", + Targets: []model.LabelSet{{"__instance__": "1"}}, + }, + { + Source: "tp1_group2", + Targets: []model.LabelSet{{"__instance__": "2"}}, + }, + }, + interval: 0, + }, + { + targetGroups: []targetgroup.Group{ + { + Source: "tp1_group1", + Targets: []model.LabelSet{{"__instance__": "3"}}, + }, + { + Source: "tp1_group2", + Targets: []model.LabelSet{{"__instance__": "4"}}, + }, + { + Source: "tp1_group3", + Targets: []model.LabelSet{{"__instance__": "1"}}, + }, + }, + interval: 10 * time.Millisecond, + }, + }, + }, + expectedTargets: [][]*targetgroup.Group{ + { + { + Source: "tp1_group1", + Targets: []model.LabelSet{{"__instance__": "1"}}, + }, + { + Source: "tp1_group2", + Targets: []model.LabelSet{{"__instance__": "2"}}, + }, + }, + { + { + Source: "tp1_group1", + Targets: []model.LabelSet{{"__instance__": "3"}}, + }, + { + Source: "tp1_group2", + Targets: []model.LabelSet{{"__instance__": "4"}}, + }, + { + Source: "tp1_group3", + Targets: []model.LabelSet{{"__instance__": "1"}}, + }, + }, + }, + }, + { + title: "Multiple TPs initials and new groups", + updates: map[string][]update{ + "tp1": { + { + targetGroups: []targetgroup.Group{ + { + Source: "tp1_group1", + Targets: []model.LabelSet{{"__instance__": "1"}}, + }, + { + Source: "tp1_group2", + Targets: []model.LabelSet{{"__instance__": "2"}}, + }, + }, + interval: 10 * time.Millisecond, + }, + { + targetGroups: []targetgroup.Group{ + { + Source: "tp1_group3", + Targets: []model.LabelSet{{"__instance__": "3"}}, + }, + { + Source: "tp1_group4", + Targets: []model.LabelSet{{"__instance__": "4"}}, + }, + }, + interval: 500 * time.Millisecond, + }, + }, + "tp2": { + { + targetGroups: []targetgroup.Group{ + { + Source: "tp2_group1", + Targets: []model.LabelSet{{"__instance__": "5"}}, + }, + { + Source: "tp2_group2", + Targets: []model.LabelSet{{"__instance__": "6"}}, + }, + }, + interval: 100 * time.Millisecond, + }, + { + targetGroups: []targetgroup.Group{ + { + Source: "tp2_group3", + Targets: []model.LabelSet{{"__instance__": "7"}}, + }, + { + Source: "tp2_group4", + Targets: []model.LabelSet{{"__instance__": "8"}}, + }, + }, + interval: 10 * time.Millisecond, + }, + }, + }, + expectedTargets: [][]*targetgroup.Group{ + { + { + Source: "tp1_group1", + Targets: []model.LabelSet{{"__instance__": "1"}}, + }, + { + Source: "tp1_group2", + Targets: []model.LabelSet{{"__instance__": "2"}}, + }, + }, + { + { + Source: "tp1_group1", + Targets: []model.LabelSet{{"__instance__": "1"}}, + }, + { + Source: "tp1_group2", + Targets: []model.LabelSet{{"__instance__": "2"}}, + }, + { + Source: "tp2_group1", + Targets: []model.LabelSet{{"__instance__": "5"}}, + }, + { + Source: "tp2_group2", + Targets: []model.LabelSet{{"__instance__": "6"}}, + }, + }, + { + { + Source: "tp1_group1", + Targets: []model.LabelSet{{"__instance__": "1"}}, + }, + { + Source: "tp1_group2", + Targets: []model.LabelSet{{"__instance__": "2"}}, + }, + { + Source: "tp2_group1", + Targets: []model.LabelSet{{"__instance__": "5"}}, + }, + { + Source: "tp2_group2", + Targets: []model.LabelSet{{"__instance__": "6"}}, + }, + { + Source: "tp2_group3", + Targets: []model.LabelSet{{"__instance__": "7"}}, + }, + { + Source: "tp2_group4", + Targets: []model.LabelSet{{"__instance__": "8"}}, + }, + }, + { + { + Source: "tp1_group1", + Targets: []model.LabelSet{{"__instance__": "1"}}, + }, + { + Source: "tp1_group2", + Targets: []model.LabelSet{{"__instance__": "2"}}, + }, + { + Source: "tp1_group3", + Targets: []model.LabelSet{{"__instance__": "3"}}, + }, + { + Source: "tp1_group4", + Targets: []model.LabelSet{{"__instance__": "4"}}, + }, + { + Source: "tp2_group1", + Targets: []model.LabelSet{{"__instance__": "5"}}, + }, + { + Source: "tp2_group2", + Targets: []model.LabelSet{{"__instance__": "6"}}, + }, + { + Source: "tp2_group3", + Targets: []model.LabelSet{{"__instance__": "7"}}, + }, + { + Source: "tp2_group4", + Targets: []model.LabelSet{{"__instance__": "8"}}, + }, + }, + }, + }, + { + title: "One TP initials arrive after other TP updates.", + updates: map[string][]update{ + "tp1": { + { + targetGroups: []targetgroup.Group{ + { + Source: "tp1_group1", + Targets: []model.LabelSet{{"__instance__": "1"}}, + }, + { + Source: "tp1_group2", + Targets: []model.LabelSet{{"__instance__": "2"}}, + }, + }, + interval: 10 * time.Millisecond, + }, + { + targetGroups: []targetgroup.Group{ + { + Source: "tp1_group1", + Targets: []model.LabelSet{{"__instance__": "3"}}, + }, + { + Source: "tp1_group2", + Targets: []model.LabelSet{{"__instance__": "4"}}, + }, + }, + interval: 150 * time.Millisecond, + }, + }, + "tp2": { + { + targetGroups: []targetgroup.Group{ + { + Source: "tp2_group1", + Targets: []model.LabelSet{{"__instance__": "5"}}, + }, + { + Source: "tp2_group2", + Targets: []model.LabelSet{{"__instance__": "6"}}, + }, + }, + interval: 200 * time.Millisecond, + }, + { + targetGroups: []targetgroup.Group{ + { + Source: "tp2_group1", + Targets: []model.LabelSet{{"__instance__": "7"}}, + }, + { + Source: "tp2_group2", + Targets: []model.LabelSet{{"__instance__": "8"}}, + }, + }, + interval: 100 * time.Millisecond, + }, + }, + }, + expectedTargets: [][]*targetgroup.Group{ + { + { + Source: "tp1_group1", + Targets: []model.LabelSet{{"__instance__": "1"}}, + }, + { + Source: "tp1_group2", + Targets: []model.LabelSet{{"__instance__": "2"}}, + }, + }, + { + { + Source: "tp1_group1", + Targets: []model.LabelSet{{"__instance__": "3"}}, + }, + { + Source: "tp1_group2", + Targets: []model.LabelSet{{"__instance__": "4"}}, + }, + }, + { + { + Source: "tp1_group1", + Targets: []model.LabelSet{{"__instance__": "3"}}, + }, + { + Source: "tp1_group2", + Targets: []model.LabelSet{{"__instance__": "4"}}, + }, + { + Source: "tp2_group1", + Targets: []model.LabelSet{{"__instance__": "5"}}, + }, + { + Source: "tp2_group2", + Targets: []model.LabelSet{{"__instance__": "6"}}, + }, + }, + { + { + Source: "tp1_group1", + Targets: []model.LabelSet{{"__instance__": "3"}}, + }, + { + Source: "tp1_group2", + Targets: []model.LabelSet{{"__instance__": "4"}}, + }, + { + Source: "tp2_group1", + Targets: []model.LabelSet{{"__instance__": "7"}}, + }, + { + Source: "tp2_group2", + Targets: []model.LabelSet{{"__instance__": "8"}}, + }, + }, + }, + }, + + { + title: "Single TP empty update in between", + updates: map[string][]update{ + "tp1": { + { + targetGroups: []targetgroup.Group{ + { + Source: "tp1_group1", + Targets: []model.LabelSet{{"__instance__": "1"}}, + }, + { + Source: "tp1_group2", + Targets: []model.LabelSet{{"__instance__": "2"}}, + }, + }, + interval: 30 * time.Millisecond, + }, + { + targetGroups: []targetgroup.Group{ + { + Source: "tp1_group1", + Targets: []model.LabelSet{}, + }, + { + Source: "tp1_group2", + Targets: []model.LabelSet{}, + }, + }, + interval: 10 * time.Millisecond, + }, + { + targetGroups: []targetgroup.Group{ + { + Source: "tp1_group1", + Targets: []model.LabelSet{{"__instance__": "3"}}, + }, + { + Source: "tp1_group2", + Targets: []model.LabelSet{{"__instance__": "4"}}, + }, + }, + interval: 300 * time.Millisecond, + }, + }, + }, + expectedTargets: [][]*targetgroup.Group{ + { + { + Source: "tp1_group1", + Targets: []model.LabelSet{{"__instance__": "1"}}, + }, + { + Source: "tp1_group2", + Targets: []model.LabelSet{{"__instance__": "2"}}, + }, + }, + { + { + Source: "tp1_group1", + Targets: []model.LabelSet{}, + }, + { + Source: "tp1_group2", + Targets: []model.LabelSet{}, + }, + }, + { + { + Source: "tp1_group1", + Targets: []model.LabelSet{{"__instance__": "3"}}, + }, + { + Source: "tp1_group2", + Targets: []model.LabelSet{{"__instance__": "4"}}, + }, + }, + }, + }, + } + + for i, tc := range testCases { + tc := tc + t.Run(tc.title, func(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + + discoveryManager := NewManager(ctx, log.NewNopLogger()) + discoveryManager.updatert = 100 * time.Millisecond + + var totalUpdatesCount int + provUpdates := make(chan []*targetgroup.Group) + for _, up := range tc.updates { + go newMockDiscoveryProvider(up...).Run(ctx, provUpdates) + if len(up) > 0 { + totalUpdatesCount += len(up) + } + } + + for x := 0; x < totalUpdatesCount; x++ { + select { + case <-ctx.Done(): + t.Fatalf("%d: no update arrived within the timeout limit", x) + case tgs := <-provUpdates: + discoveryManager.updateGroup(poolKey{setName: strconv.Itoa(i), provider: tc.title}, tgs) + for _, got := range discoveryManager.allGroups() { + assertEqualGroups(t, got, tc.expectedTargets[x], func(got, expected string) string { + return fmt.Sprintf("%d: \ntargets mismatch \ngot: %v \nexpected: %v", + x, + got, + expected) + }) + } + } + } + }) + } +} + +func assertEqualGroups(t *testing.T, got, expected []*targetgroup.Group, msg func(got, expected string) string) { + t.Helper() + + // Need to sort by the groups's source as the received order is not guaranteed. + sort.Sort(byGroupSource(got)) + sort.Sort(byGroupSource(expected)) + + require.Equal(t, expected, got) +} + +func staticConfig(addrs ...string) discovery.StaticConfig { + var cfg discovery.StaticConfig + for i, addr := range addrs { + cfg = append(cfg, &targetgroup.Group{ + Source: fmt.Sprint(i), + Targets: []model.LabelSet{ + {model.AddressLabel: model.LabelValue(addr)}, + }, + }) + } + return cfg +} + +func verifyPresence(t *testing.T, tSets map[poolKey]map[string]*targetgroup.Group, poolKey poolKey, label string, present bool) { + t.Helper() + if _, ok := tSets[poolKey]; !ok { + t.Fatalf("'%s' should be present in Pool keys: %v", poolKey, tSets) + return + } + + match := false + var mergedTargets string + for _, targetGroup := range tSets[poolKey] { + for _, l := range targetGroup.Targets { + mergedTargets = mergedTargets + " " + l.String() + if l.String() == label { + match = true + } + } + } + if match != present { + msg := "" + if !present { + msg = "not" + } + t.Fatalf("%q should %s be present in Targets labels: %q", label, msg, mergedTargets) + } +} + +func TestTargetSetRecreatesTargetGroupsEveryRun(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + discoveryManager := NewManager(ctx, log.NewNopLogger()) + discoveryManager.updatert = 100 * time.Millisecond + go discoveryManager.Run() + + c := map[string]discovery.Configs{ + "prometheus": { + staticConfig("foo:9090", "bar:9090"), + }, + } + discoveryManager.ApplyConfig(c) + + <-discoveryManager.SyncCh() + verifyPresence(t, discoveryManager.targets, poolKey{setName: "prometheus", provider: "static/0"}, "{__address__=\"foo:9090\"}", true) + verifyPresence(t, discoveryManager.targets, poolKey{setName: "prometheus", provider: "static/0"}, "{__address__=\"bar:9090\"}", true) + + c["prometheus"] = discovery.Configs{ + staticConfig("foo:9090"), + } + discoveryManager.ApplyConfig(c) + + <-discoveryManager.SyncCh() + verifyPresence(t, discoveryManager.targets, poolKey{setName: "prometheus", provider: "static/0"}, "{__address__=\"foo:9090\"}", true) + verifyPresence(t, discoveryManager.targets, poolKey{setName: "prometheus", provider: "static/0"}, "{__address__=\"bar:9090\"}", false) +} + +func TestDiscovererConfigs(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + discoveryManager := NewManager(ctx, log.NewNopLogger()) + discoveryManager.updatert = 100 * time.Millisecond + go discoveryManager.Run() + + c := map[string]discovery.Configs{ + "prometheus": { + staticConfig("foo:9090", "bar:9090"), + staticConfig("baz:9090"), + }, + } + discoveryManager.ApplyConfig(c) + + <-discoveryManager.SyncCh() + verifyPresence(t, discoveryManager.targets, poolKey{setName: "prometheus", provider: "static/0"}, "{__address__=\"foo:9090\"}", true) + verifyPresence(t, discoveryManager.targets, poolKey{setName: "prometheus", provider: "static/0"}, "{__address__=\"bar:9090\"}", true) + verifyPresence(t, discoveryManager.targets, poolKey{setName: "prometheus", provider: "static/1"}, "{__address__=\"baz:9090\"}", true) +} + +// TestTargetSetRecreatesEmptyStaticConfigs ensures that reloading a config file after +// removing all targets from the static_configs sends an update with empty targetGroups. +// This is required to signal the receiver that this target set has no current targets. +func TestTargetSetRecreatesEmptyStaticConfigs(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + discoveryManager := NewManager(ctx, log.NewNopLogger()) + discoveryManager.updatert = 100 * time.Millisecond + go discoveryManager.Run() + + c := map[string]discovery.Configs{ + "prometheus": { + staticConfig("foo:9090"), + }, + } + discoveryManager.ApplyConfig(c) + + <-discoveryManager.SyncCh() + verifyPresence(t, discoveryManager.targets, poolKey{setName: "prometheus", provider: "static/0"}, "{__address__=\"foo:9090\"}", true) + + c["prometheus"] = discovery.Configs{ + discovery.StaticConfig{{}}, + } + discoveryManager.ApplyConfig(c) + + <-discoveryManager.SyncCh() + + pkey := poolKey{setName: "prometheus", provider: "static/0"} + targetGroups, ok := discoveryManager.targets[pkey] + if !ok { + t.Fatalf("'%v' should be present in target groups", pkey) + } + group, ok := targetGroups[""] + if !ok { + t.Fatalf("missing '' key in target groups %v", targetGroups) + } + + if len(group.Targets) != 0 { + t.Fatalf("Invalid number of targets: expected 0, got %d", len(group.Targets)) + } +} + +func TestIdenticalConfigurationsAreCoalesced(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + discoveryManager := NewManager(ctx, nil) + discoveryManager.updatert = 100 * time.Millisecond + go discoveryManager.Run() + + c := map[string]discovery.Configs{ + "prometheus": { + staticConfig("foo:9090"), + }, + "prometheus2": { + staticConfig("foo:9090"), + }, + } + discoveryManager.ApplyConfig(c) + + <-discoveryManager.SyncCh() + verifyPresence(t, discoveryManager.targets, poolKey{setName: "prometheus", provider: "static/0"}, "{__address__=\"foo:9090\"}", true) + verifyPresence(t, discoveryManager.targets, poolKey{setName: "prometheus2", provider: "static/0"}, "{__address__=\"foo:9090\"}", true) + if len(discoveryManager.providers) != 1 { + t.Fatalf("Invalid number of providers: expected 1, got %d", len(discoveryManager.providers)) + } +} + +func TestApplyConfigDoesNotModifyStaticTargets(t *testing.T) { + originalConfig := discovery.Configs{ + staticConfig("foo:9090", "bar:9090", "baz:9090"), + } + processedConfig := discovery.Configs{ + staticConfig("foo:9090", "bar:9090", "baz:9090"), + } + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + discoveryManager := NewManager(ctx, log.NewNopLogger()) + discoveryManager.updatert = 100 * time.Millisecond + go discoveryManager.Run() + + cfgs := map[string]discovery.Configs{ + "prometheus": processedConfig, + } + discoveryManager.ApplyConfig(cfgs) + <-discoveryManager.SyncCh() + + for _, cfg := range cfgs { + require.Equal(t, originalConfig, cfg) + } +} + +type errorConfig struct{ err error } + +func (e errorConfig) Name() string { return "error" } +func (e errorConfig) NewDiscoverer(discovery.DiscovererOptions) (discovery.Discoverer, error) { + return nil, e.err +} + +func TestGaugeFailedConfigs(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + discoveryManager := NewManager(ctx, log.NewNopLogger()) + discoveryManager.updatert = 100 * time.Millisecond + go discoveryManager.Run() + + c := map[string]discovery.Configs{ + "prometheus": { + errorConfig{fmt.Errorf("tests error 0")}, + errorConfig{fmt.Errorf("tests error 1")}, + errorConfig{fmt.Errorf("tests error 2")}, + }, + } + discoveryManager.ApplyConfig(c) + <-discoveryManager.SyncCh() + + failedCount := client_testutil.ToFloat64(failedConfigs) + if failedCount != 3 { + t.Fatalf("Expected to have 3 failed configs, got: %v", failedCount) + } + + c["prometheus"] = discovery.Configs{ + staticConfig("foo:9090"), + } + discoveryManager.ApplyConfig(c) + <-discoveryManager.SyncCh() + + failedCount = client_testutil.ToFloat64(failedConfigs) + if failedCount != 0 { + t.Fatalf("Expected to get no failed config, got: %v", failedCount) + } +} + +func TestCoordinationWithReceiver(t *testing.T) { + updateDelay := 100 * time.Millisecond + + type expect struct { + delay time.Duration + tgs map[string][]*targetgroup.Group + } + + testCases := []struct { + title string + providers map[string]discovery.Discoverer + expected []expect + }{ + { + title: "Receiver should get all updates even when one provider closes its channel", + providers: map[string]discovery.Discoverer{ + "once1": &onceProvider{ + tgs: []*targetgroup.Group{ + { + Source: "tg1", + Targets: []model.LabelSet{{"__instance__": "1"}}, + }, + }, + }, + "mock1": newMockDiscoveryProvider( + update{ + interval: 2 * updateDelay, + targetGroups: []targetgroup.Group{ + { + Source: "tg2", + Targets: []model.LabelSet{{"__instance__": "2"}}, + }, + }, + }, + ), + }, + expected: []expect{ + { + tgs: map[string][]*targetgroup.Group{ + "once1": { + { + Source: "tg1", + Targets: []model.LabelSet{{"__instance__": "1"}}, + }, + }, + }, + }, + { + tgs: map[string][]*targetgroup.Group{ + "once1": { + { + Source: "tg1", + Targets: []model.LabelSet{{"__instance__": "1"}}, + }, + }, + "mock1": { + { + Source: "tg2", + Targets: []model.LabelSet{{"__instance__": "2"}}, + }, + }, + }, + }, + }, + }, + { + title: "Receiver should get all updates even when the channel is blocked", + providers: map[string]discovery.Discoverer{ + "mock1": newMockDiscoveryProvider( + update{ + targetGroups: []targetgroup.Group{ + { + Source: "tg1", + Targets: []model.LabelSet{{"__instance__": "1"}}, + }, + }, + }, + update{ + interval: 4 * updateDelay, + targetGroups: []targetgroup.Group{ + { + Source: "tg2", + Targets: []model.LabelSet{{"__instance__": "2"}}, + }, + }, + }, + ), + }, + expected: []expect{ + { + delay: 2 * updateDelay, + tgs: map[string][]*targetgroup.Group{ + "mock1": { + { + Source: "tg1", + Targets: []model.LabelSet{{"__instance__": "1"}}, + }, + }, + }, + }, + { + delay: 4 * updateDelay, + tgs: map[string][]*targetgroup.Group{ + "mock1": { + { + Source: "tg1", + Targets: []model.LabelSet{{"__instance__": "1"}}, + }, + { + Source: "tg2", + Targets: []model.LabelSet{{"__instance__": "2"}}, + }, + }, + }, + }, + }, + }, + } + + for _, tc := range testCases { + tc := tc + t.Run(tc.title, func(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + + mgr := NewManager(ctx, nil) + mgr.updatert = updateDelay + go mgr.Run() + + for name, p := range tc.providers { + mgr.StartCustomProvider(ctx, name, p) + } + + for i, expected := range tc.expected { + time.Sleep(expected.delay) + select { + case <-ctx.Done(): + t.Fatalf("step %d: no update received in the expected timeframe", i) + case tgs, ok := <-mgr.SyncCh(): + if !ok { + t.Fatalf("step %d: discovery manager channel is closed", i) + } + if len(tgs) != len(expected.tgs) { + t.Fatalf("step %d: target groups mismatch, got: %d, expected: %d\ngot: %#v\nexpected: %#v", + i, len(tgs), len(expected.tgs), tgs, expected.tgs) + } + for k := range expected.tgs { + if _, ok := tgs[k]; !ok { + t.Fatalf("step %d: target group not found: %s\ngot: %#v", i, k, tgs) + } + assertEqualGroups(t, tgs[k], expected.tgs[k], func(got, expected string) string { + return fmt.Sprintf("step %d: targets mismatch \ngot: %q \nexpected: %q", i, got, expected) + }) + } + } + } + }) + } +} + +type update struct { + targetGroups []targetgroup.Group + interval time.Duration +} + +type mockdiscoveryProvider struct { + updates []update +} + +func newMockDiscoveryProvider(updates ...update) mockdiscoveryProvider { + tp := mockdiscoveryProvider{ + updates: updates, + } + return tp +} + +func (tp mockdiscoveryProvider) Run(ctx context.Context, upCh chan<- []*targetgroup.Group) { + for _, u := range tp.updates { + if u.interval > 0 { + select { + case <-ctx.Done(): + return + case <-time.After(u.interval): + } + } + tgs := make([]*targetgroup.Group, len(u.targetGroups)) + for i := range u.targetGroups { + tgs[i] = &u.targetGroups[i] + } + upCh <- tgs + } + <-ctx.Done() +} + +// byGroupSource implements sort.Interface so we can sort by the Source field. +type byGroupSource []*targetgroup.Group + +func (a byGroupSource) Len() int { return len(a) } +func (a byGroupSource) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a byGroupSource) Less(i, j int) bool { return a[i].Source < a[j].Source } + +// onceProvider sends updates once (if any) and closes the update channel. +type onceProvider struct { + tgs []*targetgroup.Group +} + +func (o onceProvider) Run(_ context.Context, ch chan<- []*targetgroup.Group) { + if len(o.tgs) > 0 { + ch <- o.tgs + } + close(ch) +} diff --git a/discovery/legacymanager/registry.go b/discovery/legacymanager/registry.go new file mode 100644 index 000000000..fb01e1648 --- /dev/null +++ b/discovery/legacymanager/registry.go @@ -0,0 +1,259 @@ +// Copyright 2020 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package legacymanager + +import ( + "fmt" + "reflect" + "sort" + "strconv" + "strings" + "sync" + + "gopkg.in/yaml.v2" + + "github.com/prometheus/prometheus/discovery" + "github.com/prometheus/prometheus/discovery/targetgroup" +) + +const ( + configFieldPrefix = "AUTO_DISCOVERY_" + staticConfigsKey = "static_configs" + staticConfigsFieldName = configFieldPrefix + staticConfigsKey +) + +var ( + configNames = make(map[string]discovery.Config) + configFieldNames = make(map[reflect.Type]string) + configFields []reflect.StructField + + configTypesMu sync.Mutex + configTypes = make(map[reflect.Type]reflect.Type) + + emptyStructType = reflect.TypeOf(struct{}{}) + configsType = reflect.TypeOf(discovery.Configs{}) +) + +// RegisterConfig registers the given Config type for YAML marshaling and unmarshaling. +func RegisterConfig(config discovery.Config) { + registerConfig(config.Name()+"_sd_configs", reflect.TypeOf(config), config) +} + +func init() { + // N.B.: static_configs is the only Config type implemented by default. + // All other types are registered at init by their implementing packages. + elemTyp := reflect.TypeOf(&targetgroup.Group{}) + registerConfig(staticConfigsKey, elemTyp, discovery.StaticConfig{}) +} + +func registerConfig(yamlKey string, elemType reflect.Type, config discovery.Config) { + name := config.Name() + if _, ok := configNames[name]; ok { + panic(fmt.Sprintf("discovery: Config named %q is already registered", name)) + } + configNames[name] = config + + fieldName := configFieldPrefix + yamlKey // Field must be exported. + configFieldNames[elemType] = fieldName + + // Insert fields in sorted order. + i := sort.Search(len(configFields), func(k int) bool { + return fieldName < configFields[k].Name + }) + configFields = append(configFields, reflect.StructField{}) // Add empty field at end. + copy(configFields[i+1:], configFields[i:]) // Shift fields to the right. + configFields[i] = reflect.StructField{ // Write new field in place. + Name: fieldName, + Type: reflect.SliceOf(elemType), + Tag: reflect.StructTag(`yaml:"` + yamlKey + `,omitempty"`), + } +} + +func getConfigType(out reflect.Type) reflect.Type { + configTypesMu.Lock() + defer configTypesMu.Unlock() + if typ, ok := configTypes[out]; ok { + return typ + } + // Initial exported fields map one-to-one. + var fields []reflect.StructField + for i, n := 0, out.NumField(); i < n; i++ { + switch field := out.Field(i); { + case field.PkgPath == "" && field.Type != configsType: + fields = append(fields, field) + default: + fields = append(fields, reflect.StructField{ + Name: "_" + field.Name, // Field must be unexported. + PkgPath: out.PkgPath(), + Type: emptyStructType, + }) + } + } + // Append extra config fields on the end. + fields = append(fields, configFields...) + typ := reflect.StructOf(fields) + configTypes[out] = typ + return typ +} + +// UnmarshalYAMLWithInlineConfigs helps implement yaml.Unmarshal for structs +// that have a Configs field that should be inlined. +func UnmarshalYAMLWithInlineConfigs(out interface{}, unmarshal func(interface{}) error) error { + outVal := reflect.ValueOf(out) + if outVal.Kind() != reflect.Ptr { + return fmt.Errorf("discovery: can only unmarshal into a struct pointer: %T", out) + } + outVal = outVal.Elem() + if outVal.Kind() != reflect.Struct { + return fmt.Errorf("discovery: can only unmarshal into a struct pointer: %T", out) + } + outTyp := outVal.Type() + + cfgTyp := getConfigType(outTyp) + cfgPtr := reflect.New(cfgTyp) + cfgVal := cfgPtr.Elem() + + // Copy shared fields (defaults) to dynamic value. + var configs *discovery.Configs + for i, n := 0, outVal.NumField(); i < n; i++ { + if outTyp.Field(i).Type == configsType { + configs = outVal.Field(i).Addr().Interface().(*discovery.Configs) + continue + } + if cfgTyp.Field(i).PkgPath != "" { + continue // Field is unexported: ignore. + } + cfgVal.Field(i).Set(outVal.Field(i)) + } + if configs == nil { + return fmt.Errorf("discovery: Configs field not found in type: %T", out) + } + + // Unmarshal into dynamic value. + if err := unmarshal(cfgPtr.Interface()); err != nil { + return replaceYAMLTypeError(err, cfgTyp, outTyp) + } + + // Copy shared fields from dynamic value. + for i, n := 0, outVal.NumField(); i < n; i++ { + if cfgTyp.Field(i).PkgPath != "" { + continue // Field is unexported: ignore. + } + outVal.Field(i).Set(cfgVal.Field(i)) + } + + var err error + *configs, err = readConfigs(cfgVal, outVal.NumField()) + return err +} + +func readConfigs(structVal reflect.Value, startField int) (discovery.Configs, error) { + var ( + configs discovery.Configs + targets []*targetgroup.Group + ) + for i, n := startField, structVal.NumField(); i < n; i++ { + field := structVal.Field(i) + if field.Kind() != reflect.Slice { + panic("discovery: internal error: field is not a slice") + } + for k := 0; k < field.Len(); k++ { + val := field.Index(k) + if val.IsZero() || (val.Kind() == reflect.Ptr && val.Elem().IsZero()) { + key := configFieldNames[field.Type().Elem()] + key = strings.TrimPrefix(key, configFieldPrefix) + return nil, fmt.Errorf("empty or null section in %s", key) + } + switch c := val.Interface().(type) { + case *targetgroup.Group: + // Add index to the static config target groups for unique identification + // within scrape pool. + c.Source = strconv.Itoa(len(targets)) + // Coalesce multiple static configs into a single static config. + targets = append(targets, c) + case discovery.Config: + configs = append(configs, c) + default: + panic("discovery: internal error: slice element is not a Config") + } + } + } + if len(targets) > 0 { + configs = append(configs, discovery.StaticConfig(targets)) + } + return configs, nil +} + +// MarshalYAMLWithInlineConfigs helps implement yaml.Marshal for structs +// that have a Configs field that should be inlined. +func MarshalYAMLWithInlineConfigs(in interface{}) (interface{}, error) { + inVal := reflect.ValueOf(in) + for inVal.Kind() == reflect.Ptr { + inVal = inVal.Elem() + } + inTyp := inVal.Type() + + cfgTyp := getConfigType(inTyp) + cfgPtr := reflect.New(cfgTyp) + cfgVal := cfgPtr.Elem() + + // Copy shared fields to dynamic value. + var configs *discovery.Configs + for i, n := 0, inTyp.NumField(); i < n; i++ { + if inTyp.Field(i).Type == configsType { + configs = inVal.Field(i).Addr().Interface().(*discovery.Configs) + } + if cfgTyp.Field(i).PkgPath != "" { + continue // Field is unexported: ignore. + } + cfgVal.Field(i).Set(inVal.Field(i)) + } + if configs == nil { + return nil, fmt.Errorf("discovery: Configs field not found in type: %T", in) + } + + if err := writeConfigs(cfgVal, *configs); err != nil { + return nil, err + } + + return cfgPtr.Interface(), nil +} + +func writeConfigs(structVal reflect.Value, configs discovery.Configs) error { + targets := structVal.FieldByName(staticConfigsFieldName).Addr().Interface().(*[]*targetgroup.Group) + for _, c := range configs { + if sc, ok := c.(discovery.StaticConfig); ok { + *targets = append(*targets, sc...) + continue + } + fieldName, ok := configFieldNames[reflect.TypeOf(c)] + if !ok { + return fmt.Errorf("discovery: cannot marshal unregistered Config type: %T", c) + } + field := structVal.FieldByName(fieldName) + field.Set(reflect.Append(field, reflect.ValueOf(c))) + } + return nil +} + +func replaceYAMLTypeError(err error, oldTyp, newTyp reflect.Type) error { + if e, ok := err.(*yaml.TypeError); ok { + oldStr := oldTyp.String() + newStr := newTyp.String() + for i, s := range e.Errors { + e.Errors[i] = strings.Replace(s, oldStr, newStr, -1) + } + } + return err +} diff --git a/discovery/linode/linode.go b/discovery/linode/linode.go index fad2a74f1..a673b44c6 100644 --- a/discovery/linode/linode.go +++ b/discovery/linode/linode.go @@ -161,8 +161,12 @@ func (d *Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) { if d.lastResults != nil && d.eventPollingEnabled { // Check to see if there have been any events. If so, refresh our data. - opts := linodego.NewListOptions(1, fmt.Sprintf(filterTemplate, d.lastRefreshTimestamp.Format("2006-01-02T15:04:05"))) - events, err := d.client.ListEvents(ctx, opts) + opts := linodego.ListOptions{ + PageOptions: &linodego.PageOptions{Page: 1}, + PageSize: 25, + Filter: fmt.Sprintf(filterTemplate, d.lastRefreshTimestamp.Format("2006-01-02T15:04:05")), + } + events, err := d.client.ListEvents(ctx, &opts) if err != nil { var e *linodego.Error if errors.As(err, &e) && e.Code == http.StatusUnauthorized { @@ -205,13 +209,13 @@ func (d *Discovery) refreshData(ctx context.Context) ([]*targetgroup.Group, erro } // Gather all linode instances. - instances, err := d.client.ListInstances(ctx, &linodego.ListOptions{}) + instances, err := d.client.ListInstances(ctx, &linodego.ListOptions{PageSize: 500}) if err != nil { return nil, err } // Gather detailed IP address info for all IPs on all linode instances. - detailedIPs, err := d.client.ListIPAddresses(ctx, &linodego.ListOptions{}) + detailedIPs, err := d.client.ListIPAddresses(ctx, &linodego.ListOptions{PageSize: 500}) if err != nil { return nil, err } diff --git a/discovery/linode/linode_test.go b/discovery/linode/linode_test.go index a201458a7..67eb8198e 100644 --- a/discovery/linode/linode_test.go +++ b/discovery/linode/linode_test.go @@ -56,7 +56,7 @@ func TestLinodeSDRefresh(t *testing.T) { require.NoError(t, err) endpoint, err := url.Parse(sdmock.Mock.Endpoint()) require.NoError(t, err) - d.client.SetBaseURL(fmt.Sprintf("%s/v4", endpoint.String())) + d.client.SetBaseURL(endpoint.String()) tgs, err := d.refresh(context.Background()) require.NoError(t, err) diff --git a/discovery/manager.go b/discovery/manager.go index b3dae5c59..e10cfc7bd 100644 --- a/discovery/manager.go +++ b/discovery/manager.go @@ -65,7 +65,7 @@ var ( ) ) -func init() { +func RegisterMetrics() { prometheus.MustRegister(failedConfigs, discoveredTargets, receivedUpdates, delayedUpdates, sentUpdates) } @@ -74,12 +74,26 @@ type poolKey struct { provider string } -// provider holds a Discoverer instance, its configuration and its subscribers. +// provider holds a Discoverer instance, its configuration, cancel func and its subscribers. type provider struct { name string d Discoverer - subs []string config interface{} + + cancel context.CancelFunc + // done should be called after cleaning up resources associated with cancelled provider. + done func() + + mu sync.RWMutex + subs map[string]struct{} + + // newSubs is used to temporary store subs to be used upon config reload completion. + newSubs map[string]struct{} +} + +// IsStarted return true if Discoverer is started. +func (p *provider) IsStarted() bool { + return p.cancel != nil } // NewManager is the Discovery Manager constructor. @@ -88,13 +102,12 @@ func NewManager(ctx context.Context, logger log.Logger, options ...func(*Manager logger = log.NewNopLogger() } mgr := &Manager{ - logger: logger, - syncCh: make(chan map[string][]*targetgroup.Group), - targets: make(map[poolKey]map[string]*targetgroup.Group), - discoverCancel: []context.CancelFunc{}, - ctx: ctx, - updatert: 5 * time.Second, - triggerSend: make(chan struct{}, 1), + logger: logger, + syncCh: make(chan map[string][]*targetgroup.Group), + targets: make(map[poolKey]map[string]*targetgroup.Group), + ctx: ctx, + updatert: 5 * time.Second, + triggerSend: make(chan struct{}, 1), } for _, option := range options { option(mgr) @@ -114,15 +127,16 @@ func Name(n string) func(*Manager) { // Manager maintains a set of discovery providers and sends each update to a map channel. // Targets are grouped by the target set name. type Manager struct { - logger log.Logger - name string - mtx sync.RWMutex - ctx context.Context - discoverCancel []context.CancelFunc + logger log.Logger + name string + mtx sync.RWMutex + ctx context.Context - // Some Discoverers(eg. k8s) send only the updates for a given target group + // Some Discoverers(e.g. k8s) send only the updates for a given target group, // so we use map[tg.Source]*targetgroup.Group to know which group to update. - targets map[poolKey]map[string]*targetgroup.Group + targets map[poolKey]map[string]*targetgroup.Group + targetsMtx sync.Mutex + // providers keeps track of SD providers. providers []*provider // The sync channel sends the updates as a map where the key is the job value from the scrape config. @@ -132,11 +146,14 @@ type Manager struct { // should only be modified in unit tests. updatert time.Duration - // The triggerSend channel signals to the manager that new updates have been received from providers. + // The triggerSend channel signals to the Manager that new updates have been received from providers. triggerSend chan struct{} + + // lastProvider counts providers registered during Manager's lifetime. + lastProvider uint } -// Run starts the background processing +// Run starts the background processing. func (m *Manager) Run() error { go m.sender() for range m.ctx.Done() { @@ -151,31 +168,82 @@ func (m *Manager) SyncCh() <-chan map[string][]*targetgroup.Group { return m.syncCh } -// ApplyConfig removes all running discovery providers and starts new ones using the provided config. +// ApplyConfig checks if discovery provider with supplied config is already running and keeps them as is. +// Remaining providers are then stopped and new required providers are started using the provided config. func (m *Manager) ApplyConfig(cfg map[string]Configs) error { m.mtx.Lock() defer m.mtx.Unlock() - for pk := range m.targets { - if _, ok := cfg[pk.setName]; !ok { - discoveredTargets.DeleteLabelValues(m.name, pk.setName) - } - } - m.cancelDiscoverers() - m.targets = make(map[poolKey]map[string]*targetgroup.Group) - m.providers = nil - m.discoverCancel = nil - - failedCount := 0 + var failedCount int for name, scfg := range cfg { failedCount += m.registerProviders(scfg, name) - discoveredTargets.WithLabelValues(m.name, name).Set(0) } failedConfigs.WithLabelValues(m.name).Set(float64(failedCount)) + var ( + wg sync.WaitGroup + // keep shows if we keep any providers after reload. + keep bool + newProviders []*provider + ) for _, prov := range m.providers { - m.startProvider(m.ctx, prov) + // Cancel obsolete providers. + if len(prov.newSubs) == 0 { + wg.Add(1) + prov.done = func() { + wg.Done() + } + prov.cancel() + continue + } + newProviders = append(newProviders, prov) + // refTargets keeps reference targets used to populate new subs' targets + var refTargets map[string]*targetgroup.Group + prov.mu.Lock() + + m.targetsMtx.Lock() + for s := range prov.subs { + keep = true + refTargets = m.targets[poolKey{s, prov.name}] + // Remove obsolete subs' targets. + if _, ok := prov.newSubs[s]; !ok { + delete(m.targets, poolKey{s, prov.name}) + discoveredTargets.DeleteLabelValues(m.name, s) + } + } + // Set metrics and targets for new subs. + for s := range prov.newSubs { + if _, ok := prov.subs[s]; !ok { + discoveredTargets.WithLabelValues(m.name, s).Set(0) + } + if l := len(refTargets); l > 0 { + m.targets[poolKey{s, prov.name}] = make(map[string]*targetgroup.Group, l) + for k, v := range refTargets { + m.targets[poolKey{s, prov.name}][k] = v + } + } + } + m.targetsMtx.Unlock() + + prov.subs = prov.newSubs + prov.newSubs = map[string]struct{}{} + prov.mu.Unlock() + if !prov.IsStarted() { + m.startProvider(m.ctx, prov) + } } + // Currently downstream managers expect full target state upon config reload, so we must oblige. + // While startProvider does pull the trigger, it may take some time to do so, therefore + // we pull the trigger as soon as possible so that downstream managers can populate their state. + // See https://github.com/prometheus/prometheus/pull/8639 for details. + if keep { + select { + case m.triggerSend <- struct{}{}: + default: + } + } + m.providers = newProviders + wg.Wait() return nil } @@ -185,7 +253,9 @@ func (m *Manager) StartCustomProvider(ctx context.Context, name string, worker D p := &provider{ name: name, d: worker, - subs: []string{name}, + subs: map[string]struct{}{ + name: {}, + }, } m.providers = append(m.providers, p) m.startProvider(ctx, p) @@ -196,13 +266,29 @@ func (m *Manager) startProvider(ctx context.Context, p *provider) { ctx, cancel := context.WithCancel(ctx) updates := make(chan []*targetgroup.Group) - m.discoverCancel = append(m.discoverCancel, cancel) + p.cancel = cancel go p.d.Run(ctx, updates) go m.updater(ctx, p, updates) } +// cleaner cleans resources associated with provider. +func (m *Manager) cleaner(p *provider) { + m.targetsMtx.Lock() + p.mu.RLock() + for s := range p.subs { + delete(m.targets, poolKey{s, p.name}) + } + p.mu.RUnlock() + m.targetsMtx.Unlock() + if p.done != nil { + p.done() + } +} + func (m *Manager) updater(ctx context.Context, p *provider, updates chan []*targetgroup.Group) { + // Ensure targets from this provider are cleaned up. + defer m.cleaner(p) for { select { case <-ctx.Done(): @@ -211,12 +297,16 @@ func (m *Manager) updater(ctx context.Context, p *provider, updates chan []*targ receivedUpdates.WithLabelValues(m.name).Inc() if !ok { level.Debug(m.logger).Log("msg", "Discoverer channel closed", "provider", p.name) + // Wait for provider cancellation to ensure targets are cleaned up when expected. + <-ctx.Done() return } - for _, s := range p.subs { + p.mu.RLock() + for s := range p.subs { m.updateGroup(poolKey{setName: s, provider: p.name}, tgs) } + p.mu.RUnlock() select { case m.triggerSend <- struct{}{}: @@ -234,7 +324,7 @@ func (m *Manager) sender() { select { case <-m.ctx.Done(): return - case <-ticker.C: // Some discoverers send updates too often so we throttle these with the ticker. + case <-ticker.C: // Some discoverers send updates too often, so we throttle these with the ticker. select { case <-m.triggerSend: sentUpdates.WithLabelValues(m.name).Inc() @@ -255,14 +345,18 @@ func (m *Manager) sender() { } func (m *Manager) cancelDiscoverers() { - for _, c := range m.discoverCancel { - c() + m.mtx.RLock() + defer m.mtx.RUnlock() + for _, p := range m.providers { + if p.cancel != nil { + p.cancel() + } } } func (m *Manager) updateGroup(poolKey poolKey, tgs []*targetgroup.Group) { - m.mtx.Lock() - defer m.mtx.Unlock() + m.targetsMtx.Lock() + defer m.targetsMtx.Unlock() if _, ok := m.targets[poolKey]; !ok { m.targets[poolKey] = make(map[string]*targetgroup.Group) @@ -275,11 +369,11 @@ func (m *Manager) updateGroup(poolKey poolKey, tgs []*targetgroup.Group) { } func (m *Manager) allGroups() map[string][]*targetgroup.Group { - m.mtx.RLock() - defer m.mtx.RUnlock() - tSets := map[string][]*targetgroup.Group{} n := map[string]int{} + + m.targetsMtx.Lock() + defer m.targetsMtx.Unlock() for pkey, tsets := range m.targets { for _, tg := range tsets { // Even if the target group 'tg' is empty we still need to send it to the 'Scrape manager' @@ -303,7 +397,7 @@ func (m *Manager) registerProviders(cfgs Configs, setName string) int { add := func(cfg Config) { for _, p := range m.providers { if reflect.DeepEqual(cfg, p.config) { - p.subs = append(p.subs, setName) + p.newSubs[setName] = struct{}{} added = true return } @@ -318,11 +412,14 @@ func (m *Manager) registerProviders(cfgs Configs, setName string) int { return } m.providers = append(m.providers, &provider{ - name: fmt.Sprintf("%s/%d", typ, len(m.providers)), + name: fmt.Sprintf("%s/%d", typ, m.lastProvider), d: d, config: cfg, - subs: []string{setName}, + newSubs: map[string]struct{}{ + setName: {}, + }, }) + m.lastProvider++ added = true } for _, cfg := range cfgs { diff --git a/discovery/manager_test.go b/discovery/manager_test.go index 0a438306e..80ea1008e 100644 --- a/discovery/manager_test.go +++ b/discovery/manager_test.go @@ -18,6 +18,7 @@ import ( "fmt" "sort" "strconv" + "sync" "testing" "time" @@ -36,7 +37,6 @@ func TestMain(m *testing.M) { // TestTargetUpdatesOrder checks that the target updates are received in the expected order. func TestTargetUpdatesOrder(t *testing.T) { - // The order by which the updates are send is determined by the interval passed to the mock discovery adapter // Final targets array is ordered alphabetically by the name of the discoverer. // For example discoverer "A" with targets "t2,t3" and discoverer "B" with targets "t1,t2" will result in "t2,t3,t1,t2" after the merge. @@ -116,7 +116,8 @@ func TestTargetUpdatesOrder(t *testing.T) { { Source: "tp1_group2", Targets: []model.LabelSet{{"__instance__": "2"}}, - }}, + }, + }, }, }, }, @@ -718,6 +719,31 @@ func staticConfig(addrs ...string) StaticConfig { return cfg } +func verifySyncedPresence(t *testing.T, tGroups map[string][]*targetgroup.Group, key, label string, present bool) { + t.Helper() + if _, ok := tGroups[key]; !ok { + t.Fatalf("'%s' should be present in Group map keys: %v", key, tGroups) + return + } + match := false + var mergedTargets string + for _, targetGroups := range tGroups[key] { + for _, l := range targetGroups.Targets { + mergedTargets = mergedTargets + " " + l.String() + if l.String() == label { + match = true + } + } + } + if match != present { + msg := "" + if !present { + msg = "not" + } + t.Fatalf("%q should %s be present in Group labels: %q", label, msg, mergedTargets) + } +} + func verifyPresence(t *testing.T, tSets map[poolKey]map[string]*targetgroup.Group, poolKey poolKey, label string, present bool) { t.Helper() if _, ok := tSets[poolKey]; !ok { @@ -728,14 +754,12 @@ func verifyPresence(t *testing.T, tSets map[poolKey]map[string]*targetgroup.Grou match := false var mergedTargets string for _, targetGroup := range tSets[poolKey] { - for _, l := range targetGroup.Targets { mergedTargets = mergedTargets + " " + l.String() if l.String() == label { match = true } } - } if match != present { msg := "" @@ -746,7 +770,180 @@ func verifyPresence(t *testing.T, tSets map[poolKey]map[string]*targetgroup.Grou } } -func TestTargetSetRecreatesTargetGroupsEveryRun(t *testing.T) { +func pk(provider, setName string, n int) poolKey { + return poolKey{ + setName: setName, + provider: fmt.Sprintf("%s/%d", provider, n), + } +} + +func TestTargetSetTargetGroupsPresentOnConfigReload(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + discoveryManager := NewManager(ctx, log.NewNopLogger()) + discoveryManager.updatert = 100 * time.Millisecond + go discoveryManager.Run() + + c := map[string]Configs{ + "prometheus": { + staticConfig("foo:9090"), + }, + } + discoveryManager.ApplyConfig(c) + + syncedTargets := <-discoveryManager.SyncCh() + require.Equal(t, 1, len(syncedTargets)) + verifySyncedPresence(t, syncedTargets, "prometheus", "{__address__=\"foo:9090\"}", true) + require.Equal(t, 1, len(syncedTargets["prometheus"])) + p := pk("static", "prometheus", 0) + verifyPresence(t, discoveryManager.targets, p, "{__address__=\"foo:9090\"}", true) + require.Equal(t, 1, len(discoveryManager.targets)) + + discoveryManager.ApplyConfig(c) + + syncedTargets = <-discoveryManager.SyncCh() + verifyPresence(t, discoveryManager.targets, p, "{__address__=\"foo:9090\"}", true) + require.Equal(t, 1, len(discoveryManager.targets)) + require.Equal(t, 1, len(syncedTargets)) + verifySyncedPresence(t, syncedTargets, "prometheus", "{__address__=\"foo:9090\"}", true) + require.Equal(t, 1, len(syncedTargets["prometheus"])) +} + +func TestTargetSetTargetGroupsPresentOnConfigRename(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + discoveryManager := NewManager(ctx, log.NewNopLogger()) + discoveryManager.updatert = 100 * time.Millisecond + go discoveryManager.Run() + + c := map[string]Configs{ + "prometheus": { + staticConfig("foo:9090"), + }, + } + discoveryManager.ApplyConfig(c) + + syncedTargets := <-discoveryManager.SyncCh() + require.Equal(t, 1, len(syncedTargets)) + verifySyncedPresence(t, syncedTargets, "prometheus", "{__address__=\"foo:9090\"}", true) + require.Equal(t, 1, len(syncedTargets["prometheus"])) + p := pk("static", "prometheus", 0) + verifyPresence(t, discoveryManager.targets, p, "{__address__=\"foo:9090\"}", true) + require.Equal(t, 1, len(discoveryManager.targets)) + + c["prometheus2"] = c["prometheus"] + delete(c, "prometheus") + discoveryManager.ApplyConfig(c) + + syncedTargets = <-discoveryManager.SyncCh() + p = pk("static", "prometheus2", 0) + verifyPresence(t, discoveryManager.targets, p, "{__address__=\"foo:9090\"}", true) + require.Equal(t, 1, len(discoveryManager.targets)) + require.Equal(t, 1, len(syncedTargets)) + verifySyncedPresence(t, syncedTargets, "prometheus2", "{__address__=\"foo:9090\"}", true) + require.Equal(t, 1, len(syncedTargets["prometheus2"])) +} + +func TestTargetSetTargetGroupsPresentOnConfigDuplicateAndDeleteOriginal(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + discoveryManager := NewManager(ctx, log.NewNopLogger()) + discoveryManager.updatert = 100 * time.Millisecond + go discoveryManager.Run() + + c := map[string]Configs{ + "prometheus": { + staticConfig("foo:9090"), + }, + } + discoveryManager.ApplyConfig(c) + <-discoveryManager.SyncCh() + + c["prometheus2"] = c["prometheus"] + discoveryManager.ApplyConfig(c) + syncedTargets := <-discoveryManager.SyncCh() + require.Equal(t, 2, len(syncedTargets)) + verifySyncedPresence(t, syncedTargets, "prometheus", "{__address__=\"foo:9090\"}", true) + require.Equal(t, 1, len(syncedTargets["prometheus"])) + verifySyncedPresence(t, syncedTargets, "prometheus2", "{__address__=\"foo:9090\"}", true) + require.Equal(t, 1, len(syncedTargets["prometheus2"])) + p := pk("static", "prometheus", 0) + verifyPresence(t, discoveryManager.targets, p, "{__address__=\"foo:9090\"}", true) + require.Equal(t, 2, len(discoveryManager.targets)) + + delete(c, "prometheus") + discoveryManager.ApplyConfig(c) + syncedTargets = <-discoveryManager.SyncCh() + p = pk("static", "prometheus2", 0) + verifyPresence(t, discoveryManager.targets, p, "{__address__=\"foo:9090\"}", true) + require.Equal(t, 1, len(discoveryManager.targets)) + require.Equal(t, 1, len(syncedTargets)) + verifySyncedPresence(t, syncedTargets, "prometheus2", "{__address__=\"foo:9090\"}", true) + require.Equal(t, 1, len(syncedTargets["prometheus2"])) +} + +func TestTargetSetTargetGroupsPresentOnConfigChange(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + discoveryManager := NewManager(ctx, log.NewNopLogger()) + discoveryManager.updatert = 100 * time.Millisecond + go discoveryManager.Run() + + c := map[string]Configs{ + "prometheus": { + staticConfig("foo:9090"), + }, + } + discoveryManager.ApplyConfig(c) + + syncedTargets := <-discoveryManager.SyncCh() + require.Equal(t, 1, len(syncedTargets)) + verifySyncedPresence(t, syncedTargets, "prometheus", "{__address__=\"foo:9090\"}", true) + require.Equal(t, 1, len(syncedTargets["prometheus"])) + + var mu sync.Mutex + c["prometheus2"] = Configs{ + lockStaticConfig{ + mu: &mu, + config: staticConfig("bar:9090"), + }, + } + mu.Lock() + discoveryManager.ApplyConfig(c) + + // Original targets should be present as soon as possible. + syncedTargets = <-discoveryManager.SyncCh() + mu.Unlock() + require.Equal(t, 1, len(syncedTargets)) + verifySyncedPresence(t, syncedTargets, "prometheus", "{__address__=\"foo:9090\"}", true) + require.Equal(t, 1, len(syncedTargets["prometheus"])) + + // prometheus2 configs should be ready on second sync. + syncedTargets = <-discoveryManager.SyncCh() + require.Equal(t, 2, len(syncedTargets)) + verifySyncedPresence(t, syncedTargets, "prometheus", "{__address__=\"foo:9090\"}", true) + require.Equal(t, 1, len(syncedTargets["prometheus"])) + verifySyncedPresence(t, syncedTargets, "prometheus2", "{__address__=\"bar:9090\"}", true) + require.Equal(t, 1, len(syncedTargets["prometheus2"])) + + p := pk("static", "prometheus", 0) + verifyPresence(t, discoveryManager.targets, p, "{__address__=\"foo:9090\"}", true) + p = pk("lockstatic", "prometheus2", 1) + verifyPresence(t, discoveryManager.targets, p, "{__address__=\"bar:9090\"}", true) + require.Equal(t, 2, len(discoveryManager.targets)) + + // Delete part of config and ensure only original targets exist. + delete(c, "prometheus2") + discoveryManager.ApplyConfig(c) + syncedTargets = <-discoveryManager.SyncCh() + require.Equal(t, 1, len(discoveryManager.targets)) + verifyPresence(t, discoveryManager.targets, pk("static", "prometheus", 0), "{__address__=\"foo:9090\"}", true) + require.Equal(t, 1, len(syncedTargets)) + verifySyncedPresence(t, syncedTargets, "prometheus", "{__address__=\"foo:9090\"}", true) + require.Equal(t, 1, len(syncedTargets["prometheus"])) +} + +func TestTargetSetRecreatesTargetGroupsOnConfigChange(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() discoveryManager := NewManager(ctx, log.NewNopLogger()) @@ -760,18 +957,29 @@ func TestTargetSetRecreatesTargetGroupsEveryRun(t *testing.T) { } discoveryManager.ApplyConfig(c) - <-discoveryManager.SyncCh() - verifyPresence(t, discoveryManager.targets, poolKey{setName: "prometheus", provider: "static/0"}, "{__address__=\"foo:9090\"}", true) - verifyPresence(t, discoveryManager.targets, poolKey{setName: "prometheus", provider: "static/0"}, "{__address__=\"bar:9090\"}", true) + syncedTargets := <-discoveryManager.SyncCh() + p := pk("static", "prometheus", 0) + verifyPresence(t, discoveryManager.targets, p, "{__address__=\"foo:9090\"}", true) + verifyPresence(t, discoveryManager.targets, p, "{__address__=\"bar:9090\"}", true) + require.Equal(t, 1, len(discoveryManager.targets)) + require.Equal(t, 1, len(syncedTargets)) + verifySyncedPresence(t, syncedTargets, "prometheus", "{__address__=\"foo:9090\"}", true) + verifySyncedPresence(t, syncedTargets, "prometheus", "{__address__=\"bar:9090\"}", true) + require.Equal(t, 2, len(syncedTargets["prometheus"])) c["prometheus"] = Configs{ staticConfig("foo:9090"), } discoveryManager.ApplyConfig(c) - - <-discoveryManager.SyncCh() - verifyPresence(t, discoveryManager.targets, poolKey{setName: "prometheus", provider: "static/0"}, "{__address__=\"foo:9090\"}", true) - verifyPresence(t, discoveryManager.targets, poolKey{setName: "prometheus", provider: "static/0"}, "{__address__=\"bar:9090\"}", false) + syncedTargets = <-discoveryManager.SyncCh() + require.Equal(t, 1, len(discoveryManager.targets)) + p = pk("static", "prometheus", 1) + verifyPresence(t, discoveryManager.targets, p, "{__address__=\"foo:9090\"}", true) + verifyPresence(t, discoveryManager.targets, p, "{__address__=\"bar:9090\"}", false) + require.Equal(t, 1, len(discoveryManager.targets)) + require.Equal(t, 1, len(syncedTargets)) + verifySyncedPresence(t, syncedTargets, "prometheus", "{__address__=\"foo:9090\"}", true) + require.Equal(t, 1, len(syncedTargets["prometheus"])) } func TestDiscovererConfigs(t *testing.T) { @@ -789,10 +997,18 @@ func TestDiscovererConfigs(t *testing.T) { } discoveryManager.ApplyConfig(c) - <-discoveryManager.SyncCh() - verifyPresence(t, discoveryManager.targets, poolKey{setName: "prometheus", provider: "static/0"}, "{__address__=\"foo:9090\"}", true) - verifyPresence(t, discoveryManager.targets, poolKey{setName: "prometheus", provider: "static/0"}, "{__address__=\"bar:9090\"}", true) - verifyPresence(t, discoveryManager.targets, poolKey{setName: "prometheus", provider: "static/1"}, "{__address__=\"baz:9090\"}", true) + syncedTargets := <-discoveryManager.SyncCh() + p := pk("static", "prometheus", 0) + verifyPresence(t, discoveryManager.targets, p, "{__address__=\"foo:9090\"}", true) + verifyPresence(t, discoveryManager.targets, p, "{__address__=\"bar:9090\"}", true) + p = pk("static", "prometheus", 1) + verifyPresence(t, discoveryManager.targets, p, "{__address__=\"baz:9090\"}", true) + require.Equal(t, 2, len(discoveryManager.targets)) + require.Equal(t, 1, len(syncedTargets)) + verifySyncedPresence(t, syncedTargets, "prometheus", "{__address__=\"foo:9090\"}", true) + verifySyncedPresence(t, syncedTargets, "prometheus", "{__address__=\"bar:9090\"}", true) + verifySyncedPresence(t, syncedTargets, "prometheus", "{__address__=\"baz:9090\"}", true) + require.Equal(t, 3, len(syncedTargets["prometheus"])) } // TestTargetSetRecreatesEmptyStaticConfigs ensures that reloading a config file after @@ -812,20 +1028,23 @@ func TestTargetSetRecreatesEmptyStaticConfigs(t *testing.T) { } discoveryManager.ApplyConfig(c) - <-discoveryManager.SyncCh() - verifyPresence(t, discoveryManager.targets, poolKey{setName: "prometheus", provider: "static/0"}, "{__address__=\"foo:9090\"}", true) + syncedTargets := <-discoveryManager.SyncCh() + p := pk("static", "prometheus", 0) + verifyPresence(t, discoveryManager.targets, p, "{__address__=\"foo:9090\"}", true) + require.Equal(t, 1, len(syncedTargets)) + verifySyncedPresence(t, syncedTargets, "prometheus", "{__address__=\"foo:9090\"}", true) + require.Equal(t, 1, len(syncedTargets["prometheus"])) c["prometheus"] = Configs{ StaticConfig{{}}, } discoveryManager.ApplyConfig(c) - <-discoveryManager.SyncCh() - - pkey := poolKey{setName: "prometheus", provider: "static/0"} - targetGroups, ok := discoveryManager.targets[pkey] + syncedTargets = <-discoveryManager.SyncCh() + p = pk("static", "prometheus", 1) + targetGroups, ok := discoveryManager.targets[p] if !ok { - t.Fatalf("'%v' should be present in target groups", pkey) + t.Fatalf("'%v' should be present in target groups", p) } group, ok := targetGroups[""] if !ok { @@ -835,6 +1054,11 @@ func TestTargetSetRecreatesEmptyStaticConfigs(t *testing.T) { if len(group.Targets) != 0 { t.Fatalf("Invalid number of targets: expected 0, got %d", len(group.Targets)) } + require.Equal(t, 1, len(syncedTargets)) + require.Equal(t, 1, len(syncedTargets["prometheus"])) + if lbls := syncedTargets["prometheus"][0].Labels; lbls != nil { + t.Fatalf("Unexpected Group: expected nil Labels, got %v", lbls) + } } func TestIdenticalConfigurationsAreCoalesced(t *testing.T) { @@ -854,12 +1078,17 @@ func TestIdenticalConfigurationsAreCoalesced(t *testing.T) { } discoveryManager.ApplyConfig(c) - <-discoveryManager.SyncCh() - verifyPresence(t, discoveryManager.targets, poolKey{setName: "prometheus", provider: "static/0"}, "{__address__=\"foo:9090\"}", true) - verifyPresence(t, discoveryManager.targets, poolKey{setName: "prometheus2", provider: "static/0"}, "{__address__=\"foo:9090\"}", true) + syncedTargets := <-discoveryManager.SyncCh() + verifyPresence(t, discoveryManager.targets, pk("static", "prometheus", 0), "{__address__=\"foo:9090\"}", true) + verifyPresence(t, discoveryManager.targets, pk("static", "prometheus2", 0), "{__address__=\"foo:9090\"}", true) if len(discoveryManager.providers) != 1 { t.Fatalf("Invalid number of providers: expected 1, got %d", len(discoveryManager.providers)) } + require.Equal(t, 2, len(syncedTargets)) + verifySyncedPresence(t, syncedTargets, "prometheus", "{__address__=\"foo:9090\"}", true) + require.Equal(t, 1, len(syncedTargets["prometheus"])) + verifySyncedPresence(t, syncedTargets, "prometheus2", "{__address__=\"foo:9090\"}", true) + require.Equal(t, 1, len(syncedTargets["prometheus2"])) } func TestApplyConfigDoesNotModifyStaticTargets(t *testing.T) { @@ -891,6 +1120,29 @@ type errorConfig struct{ err error } func (e errorConfig) Name() string { return "error" } func (e errorConfig) NewDiscoverer(DiscovererOptions) (Discoverer, error) { return nil, e.err } +type lockStaticConfig struct { + mu *sync.Mutex + config StaticConfig +} + +func (s lockStaticConfig) Name() string { return "lockstatic" } +func (s lockStaticConfig) NewDiscoverer(options DiscovererOptions) (Discoverer, error) { + return (lockStaticDiscoverer)(s), nil +} + +type lockStaticDiscoverer lockStaticConfig + +func (s lockStaticDiscoverer) Run(ctx context.Context, up chan<- []*targetgroup.Group) { + // TODO: existing implementation closes up chan, but documentation explicitly forbids it...? + defer close(up) + s.mu.Lock() + defer s.mu.Unlock() + select { + case <-ctx.Done(): + case up <- s.config: + } +} + func TestGaugeFailedConfigs(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -923,7 +1175,6 @@ func TestGaugeFailedConfigs(t *testing.T) { if failedCount != 0 { t.Fatalf("Expected to get no failed config, got: %v", failedCount) } - } func TestCoordinationWithReceiver(t *testing.T) { @@ -1115,7 +1366,11 @@ func (tp mockdiscoveryProvider) Run(ctx context.Context, upCh chan<- []*targetgr for i := range u.targetGroups { tgs[i] = &u.targetGroups[i] } - upCh <- tgs + select { + case <-ctx.Done(): + return + case upCh <- tgs: + } } <-ctx.Done() } @@ -1138,3 +1393,91 @@ func (o onceProvider) Run(_ context.Context, ch chan<- []*targetgroup.Group) { } close(ch) } + +// TestTargetSetTargetGroupsUpdateDuringApplyConfig is used to detect races when +// ApplyConfig happens at the same time as targets update. +func TestTargetSetTargetGroupsUpdateDuringApplyConfig(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + discoveryManager := NewManager(ctx, log.NewNopLogger()) + discoveryManager.updatert = 100 * time.Millisecond + go discoveryManager.Run() + + td := newTestDiscoverer() + + c := map[string]Configs{ + "prometheus": { + td, + }, + } + discoveryManager.ApplyConfig(c) + + var wg sync.WaitGroup + wg.Add(2000) + + start := make(chan struct{}) + for i := 0; i < 1000; i++ { + go func() { + <-start + td.update([]*targetgroup.Group{ + { + Targets: []model.LabelSet{ + {model.AddressLabel: model.LabelValue("127.0.0.1:9090")}, + }, + }, + }) + wg.Done() + }() + } + + for i := 0; i < 1000; i++ { + go func(i int) { + <-start + c := map[string]Configs{ + fmt.Sprintf("prometheus-%d", i): { + td, + }, + } + discoveryManager.ApplyConfig(c) + wg.Done() + }(i) + } + + close(start) + wg.Wait() +} + +// testDiscoverer is a config and a discoverer that can adjust targets with a +// simple function. +type testDiscoverer struct { + up chan<- []*targetgroup.Group + ready chan struct{} +} + +func newTestDiscoverer() *testDiscoverer { + return &testDiscoverer{ + ready: make(chan struct{}), + } +} + +// Name implements Config. +func (t *testDiscoverer) Name() string { + return "test" +} + +// NewDiscoverer implements Config. +func (t *testDiscoverer) NewDiscoverer(DiscovererOptions) (Discoverer, error) { + return t, nil +} + +// Run implements Discoverer. +func (t *testDiscoverer) Run(ctx context.Context, up chan<- []*targetgroup.Group) { + t.up = up + close(t.ready) + <-ctx.Done() +} + +func (t *testDiscoverer) update(tgs []*targetgroup.Group) { + <-t.ready + t.up <- tgs +} diff --git a/discovery/marathon/marathon.go b/discovery/marathon/marathon.go index 30c32bb5d..5688efedd 100644 --- a/discovery/marathon/marathon.go +++ b/discovery/marathon/marathon.go @@ -478,7 +478,6 @@ func targetsForApp(app *app) []model.LabelSet { // Generate a target endpoint string in host:port format. func targetEndpoint(task *task, port uint32, containerNet bool) string { - var host string // Use the task's ipAddress field when it's in a container network @@ -493,7 +492,6 @@ func targetEndpoint(task *task, port uint32, containerNet bool) string { // Get a list of ports and a list of labels from a PortMapping. func extractPortMapping(portMappings []portMapping, containerNet bool) ([]uint32, []map[string]string) { - ports := make([]uint32, len(portMappings)) labels := make([]map[string]string, len(portMappings)) diff --git a/discovery/marathon/marathon_test.go b/discovery/marathon/marathon_test.go index 8ac9713a2..4310ed847 100644 --- a/discovery/marathon/marathon_test.go +++ b/discovery/marathon/marathon_test.go @@ -29,11 +29,14 @@ import ( var ( marathonValidLabel = map[string]string{"prometheus": "yes"} testServers = []string{"http://localhost:8080"} - conf = SDConfig{Servers: testServers} ) +func testConfig() SDConfig { + return SDConfig{Servers: testServers} +} + func testUpdateServices(client appListClient) ([]*targetgroup.Group, error) { - md, err := NewDiscovery(conf, nil) + md, err := NewDiscovery(testConfig(), nil) if err != nil { return nil, err } @@ -60,9 +63,7 @@ func TestMarathonSDHandleError(t *testing.T) { } func TestMarathonSDEmptyList(t *testing.T) { - var ( - client = func(_ context.Context, _ *http.Client, _ string) (*appList, error) { return &appList{}, nil } - ) + client := func(_ context.Context, _ *http.Client, _ string) (*appList, error) { return &appList{}, nil } tgs, err := testUpdateServices(client) if err != nil { t.Fatalf("Got error: %s", err) @@ -99,11 +100,9 @@ func marathonTestAppList(labels map[string]string, runningTasks int) *appList { } func TestMarathonSDSendGroup(t *testing.T) { - var ( - client = func(_ context.Context, _ *http.Client, _ string) (*appList, error) { - return marathonTestAppList(marathonValidLabel, 1), nil - } - ) + client := func(_ context.Context, _ *http.Client, _ string) (*appList, error) { + return marathonTestAppList(marathonValidLabel, 1), nil + } tgs, err := testUpdateServices(client) if err != nil { t.Fatalf("Got error: %s", err) @@ -130,7 +129,7 @@ func TestMarathonSDSendGroup(t *testing.T) { } func TestMarathonSDRemoveApp(t *testing.T) { - md, err := NewDiscovery(conf, nil) + md, err := NewDiscovery(testConfig(), nil) if err != nil { t.Fatalf("%s", err) } @@ -195,11 +194,9 @@ func marathonTestAppListWithMultiplePorts(labels map[string]string, runningTasks } func TestMarathonSDSendGroupWithMultiplePort(t *testing.T) { - var ( - client = func(_ context.Context, _ *http.Client, _ string) (*appList, error) { - return marathonTestAppListWithMultiplePorts(marathonValidLabel, 1), nil - } - ) + client := func(_ context.Context, _ *http.Client, _ string) (*appList, error) { + return marathonTestAppListWithMultiplePorts(marathonValidLabel, 1), nil + } tgs, err := testUpdateServices(client) if err != nil { t.Fatalf("Got error: %s", err) @@ -254,11 +251,9 @@ func marathonTestZeroTaskPortAppList(labels map[string]string, runningTasks int) } func TestMarathonZeroTaskPorts(t *testing.T) { - var ( - client = func(_ context.Context, _ *http.Client, _ string) (*appList, error) { - return marathonTestZeroTaskPortAppList(marathonValidLabel, 1), nil - } - ) + client := func(_ context.Context, _ *http.Client, _ string) (*appList, error) { + return marathonTestZeroTaskPortAppList(marathonValidLabel, 1), nil + } tgs, err := testUpdateServices(client) if err != nil { t.Fatalf("Got error: %s", err) @@ -286,13 +281,6 @@ func Test500ErrorHttpResponseWithValidJSONBody(t *testing.T) { // Create a test server with mock HTTP handler. ts := httptest.NewServer(http.HandlerFunc(respHandler)) defer ts.Close() - // Backup conf for future tests. - backupConf := conf - defer func() { - conf = backupConf - }() - // Setup conf for the test case. - conf = SDConfig{Servers: []string{ts.URL}} // Execute test case and validate behavior. _, err := testUpdateServices(nil) if err == nil { @@ -331,11 +319,9 @@ func marathonTestAppListWithPortDefinitions(labels map[string]string, runningTas } func TestMarathonSDSendGroupWithPortDefinitions(t *testing.T) { - var ( - client = func(_ context.Context, _ *http.Client, _ string) (*appList, error) { - return marathonTestAppListWithPortDefinitions(marathonValidLabel, 1), nil - } - ) + client := func(_ context.Context, _ *http.Client, _ string) (*appList, error) { + return marathonTestAppListWithPortDefinitions(marathonValidLabel, 1), nil + } tgs, err := testUpdateServices(client) if err != nil { t.Fatalf("Got error: %s", err) @@ -403,11 +389,9 @@ func marathonTestAppListWithPortDefinitionsRequirePorts(labels map[string]string } func TestMarathonSDSendGroupWithPortDefinitionsRequirePorts(t *testing.T) { - var ( - client = func(_ context.Context, _ *http.Client, _ string) (*appList, error) { - return marathonTestAppListWithPortDefinitionsRequirePorts(marathonValidLabel, 1), nil - } - ) + client := func(_ context.Context, _ *http.Client, _ string) (*appList, error) { + return marathonTestAppListWithPortDefinitionsRequirePorts(marathonValidLabel, 1), nil + } tgs, err := testUpdateServices(client) if err != nil { t.Fatalf("Got error: %s", err) @@ -470,11 +454,9 @@ func marathonTestAppListWithPorts(labels map[string]string, runningTasks int) *a } func TestMarathonSDSendGroupWithPorts(t *testing.T) { - var ( - client = func(_ context.Context, _ *http.Client, _ string) (*appList, error) { - return marathonTestAppListWithPorts(marathonValidLabel, 1), nil - } - ) + client := func(_ context.Context, _ *http.Client, _ string) (*appList, error) { + return marathonTestAppListWithPorts(marathonValidLabel, 1), nil + } tgs, err := testUpdateServices(client) if err != nil { t.Fatalf("Got error: %s", err) @@ -546,11 +528,9 @@ func marathonTestAppListWithContainerPortMappings(labels map[string]string, runn } func TestMarathonSDSendGroupWithContainerPortMappings(t *testing.T) { - var ( - client = func(_ context.Context, _ *http.Client, _ string) (*appList, error) { - return marathonTestAppListWithContainerPortMappings(marathonValidLabel, 1), nil - } - ) + client := func(_ context.Context, _ *http.Client, _ string) (*appList, error) { + return marathonTestAppListWithContainerPortMappings(marathonValidLabel, 1), nil + } tgs, err := testUpdateServices(client) if err != nil { t.Fatalf("Got error: %s", err) @@ -622,11 +602,9 @@ func marathonTestAppListWithDockerContainerPortMappings(labels map[string]string } func TestMarathonSDSendGroupWithDockerContainerPortMappings(t *testing.T) { - var ( - client = func(_ context.Context, _ *http.Client, _ string) (*appList, error) { - return marathonTestAppListWithDockerContainerPortMappings(marathonValidLabel, 1), nil - } - ) + client := func(_ context.Context, _ *http.Client, _ string) (*appList, error) { + return marathonTestAppListWithDockerContainerPortMappings(marathonValidLabel, 1), nil + } tgs, err := testUpdateServices(client) if err != nil { t.Fatalf("Got error: %s", err) @@ -702,11 +680,9 @@ func marathonTestAppListWithContainerNetworkAndPortMappings(labels map[string]st } func TestMarathonSDSendGroupWithContainerNetworkAndPortMapping(t *testing.T) { - var ( - client = func(_ context.Context, _ *http.Client, _ string) (*appList, error) { - return marathonTestAppListWithContainerNetworkAndPortMappings(marathonValidLabel, 1), nil - } - ) + client := func(_ context.Context, _ *http.Client, _ string) (*appList, error) { + return marathonTestAppListWithContainerNetworkAndPortMappings(marathonValidLabel, 1), nil + } tgs, err := testUpdateServices(client) if err != nil { t.Fatalf("Got error: %s", err) diff --git a/discovery/moby/docker.go b/discovery/moby/docker.go index deabcdd1e..162833ece 100644 --- a/discovery/moby/docker.go +++ b/discovery/moby/docker.go @@ -28,6 +28,7 @@ import ( "github.com/go-kit/log" "github.com/prometheus/common/config" "github.com/prometheus/common/model" + "github.com/prometheus/prometheus/discovery" "github.com/prometheus/prometheus/discovery/refresh" "github.com/prometheus/prometheus/discovery/targetgroup" diff --git a/discovery/moby/network.go b/discovery/moby/network.go index 3982e5777..0e0d0041d 100644 --- a/discovery/moby/network.go +++ b/discovery/moby/network.go @@ -19,6 +19,7 @@ import ( "github.com/docker/docker/api/types" "github.com/docker/docker/client" + "github.com/prometheus/prometheus/util/strutil" ) diff --git a/discovery/openstack/hypervisor.go b/discovery/openstack/hypervisor.go index 594f4e433..877b3eb9b 100644 --- a/discovery/openstack/hypervisor.go +++ b/discovery/openstack/hypervisor.go @@ -51,8 +51,10 @@ type HypervisorDiscovery struct { // newHypervisorDiscovery returns a new hypervisor discovery. func newHypervisorDiscovery(provider *gophercloud.ProviderClient, opts *gophercloud.AuthOptions, port int, region string, availability gophercloud.Availability, l log.Logger) *HypervisorDiscovery { - return &HypervisorDiscovery{provider: provider, authOpts: opts, - region: region, port: port, availability: availability, logger: l} + return &HypervisorDiscovery{ + provider: provider, authOpts: opts, + region: region, port: port, availability: availability, logger: l, + } } func (h *HypervisorDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) { diff --git a/discovery/openstack/hypervisor_test.go b/discovery/openstack/hypervisor_test.go index 7556a88f3..396d5283d 100644 --- a/discovery/openstack/hypervisor_test.go +++ b/discovery/openstack/hypervisor_test.go @@ -47,7 +47,6 @@ func (s *OpenstackSDHypervisorTestSuite) openstackAuthSuccess() (refresher, erro } func TestOpenstackSDHypervisorRefresh(t *testing.T) { - mock := &OpenstackSDHypervisorTestSuite{} mock.SetupTest(t) diff --git a/discovery/openstack/instance.go b/discovery/openstack/instance.go index ab2221f4f..fa4039bea 100644 --- a/discovery/openstack/instance.go +++ b/discovery/openstack/instance.go @@ -63,8 +63,10 @@ func newInstanceDiscovery(provider *gophercloud.ProviderClient, opts *gopherclou if l == nil { l = log.NewNopLogger() } - return &InstanceDiscovery{provider: provider, authOpts: opts, - region: region, port: port, allTenants: allTenants, availability: availability, logger: l} + return &InstanceDiscovery{ + provider: provider, authOpts: opts, + region: region, port: port, allTenants: allTenants, availability: availability, logger: l, + } } type floatingIPKey struct { diff --git a/discovery/openstack/instance_test.go b/discovery/openstack/instance_test.go index 8275727a6..d47cb0020 100644 --- a/discovery/openstack/instance_test.go +++ b/discovery/openstack/instance_test.go @@ -51,7 +51,6 @@ func (s *OpenstackSDInstanceTestSuite) openstackAuthSuccess() (refresher, error) } func TestOpenstackSDInstanceRefresh(t *testing.T) { - mock := &OpenstackSDInstanceTestSuite{} mock.SetupTest(t) diff --git a/discovery/openstack/mock_test.go b/discovery/openstack/mock_test.go index f29d647d4..e64c33648 100644 --- a/discovery/openstack/mock_test.go +++ b/discovery/openstack/mock_test.go @@ -54,7 +54,7 @@ func testMethod(t *testing.T, r *http.Request, expected string) { } } -func testHeader(t *testing.T, r *http.Request, header string, expected string) { +func testHeader(t *testing.T, r *http.Request, header, expected string) { if actual := r.Header.Get(header); expected != actual { t.Errorf("Header %s = %s, expected %s", header, actual, expected) } diff --git a/discovery/openstack/openstack.go b/discovery/openstack/openstack.go index 2a341976e..932a4f5c2 100644 --- a/discovery/openstack/openstack.go +++ b/discovery/openstack/openstack.go @@ -145,7 +145,6 @@ func NewDiscovery(conf *SDConfig, l log.Logger) (*refresh.Discovery, error) { time.Duration(conf.RefreshInterval), r.refresh, ), nil - } func newRefresher(conf *SDConfig, l log.Logger) (refresher, error) { diff --git a/discovery/puppetdb/puppetdb_test.go b/discovery/puppetdb/puppetdb_test.go index 3fcfab549..25340bea7 100644 --- a/discovery/puppetdb/puppetdb_test.go +++ b/discovery/puppetdb/puppetdb_test.go @@ -25,8 +25,9 @@ import ( "github.com/go-kit/log" "github.com/prometheus/common/config" "github.com/prometheus/common/model" - "github.com/prometheus/prometheus/discovery/targetgroup" "github.com/stretchr/testify/require" + + "github.com/prometheus/prometheus/discovery/targetgroup" ) func mockServer(t *testing.T) *httptest.Server { diff --git a/discovery/puppetdb/resources.go b/discovery/puppetdb/resources.go index 64b3a781e..27792b646 100644 --- a/discovery/puppetdb/resources.go +++ b/discovery/puppetdb/resources.go @@ -18,6 +18,7 @@ import ( "strings" "github.com/prometheus/common/model" + "github.com/prometheus/prometheus/util/strutil" ) diff --git a/discovery/scaleway/baremetal.go b/discovery/scaleway/baremetal.go index 9e002b987..c313e6695 100644 --- a/discovery/scaleway/baremetal.go +++ b/discovery/scaleway/baremetal.go @@ -25,10 +25,11 @@ import ( "github.com/prometheus/common/config" "github.com/prometheus/common/model" "github.com/prometheus/common/version" - "github.com/prometheus/prometheus/discovery/refresh" - "github.com/prometheus/prometheus/discovery/targetgroup" "github.com/scaleway/scaleway-sdk-go/api/baremetal/v1" "github.com/scaleway/scaleway-sdk-go/scw" + + "github.com/prometheus/prometheus/discovery/refresh" + "github.com/prometheus/prometheus/discovery/targetgroup" ) type baremetalDiscovery struct { diff --git a/discovery/scaleway/instance.go b/discovery/scaleway/instance.go index a78c5e93c..67311216d 100644 --- a/discovery/scaleway/instance.go +++ b/discovery/scaleway/instance.go @@ -25,10 +25,11 @@ import ( "github.com/prometheus/common/config" "github.com/prometheus/common/model" "github.com/prometheus/common/version" - "github.com/prometheus/prometheus/discovery/refresh" - "github.com/prometheus/prometheus/discovery/targetgroup" "github.com/scaleway/scaleway-sdk-go/api/instance/v1" "github.com/scaleway/scaleway-sdk-go/scw" + + "github.com/prometheus/prometheus/discovery/refresh" + "github.com/prometheus/prometheus/discovery/targetgroup" ) const ( diff --git a/discovery/scaleway/scaleway.go b/discovery/scaleway/scaleway.go index c8689cb94..ed3d7f391 100644 --- a/discovery/scaleway/scaleway.go +++ b/discovery/scaleway/scaleway.go @@ -24,10 +24,11 @@ import ( "github.com/pkg/errors" "github.com/prometheus/common/config" "github.com/prometheus/common/model" + "github.com/scaleway/scaleway-sdk-go/scw" + "github.com/prometheus/prometheus/discovery" "github.com/prometheus/prometheus/discovery/refresh" "github.com/prometheus/prometheus/discovery/targetgroup" - "github.com/scaleway/scaleway-sdk-go/scw" ) // metaLabelPrefix is the meta prefix used for all meta labels. @@ -173,8 +174,7 @@ func init() { // Discovery periodically performs Scaleway requests. It implements // the Discoverer interface. -type Discovery struct { -} +type Discovery struct{} func NewDiscovery(conf *SDConfig, logger log.Logger) (*refresh.Discovery, error) { r, err := newRefresher(conf) diff --git a/discovery/targetgroup/targetgroup_test.go b/discovery/targetgroup/targetgroup_test.go index bf0d99553..fe9587eb8 100644 --- a/discovery/targetgroup/targetgroup_test.go +++ b/discovery/targetgroup/targetgroup_test.go @@ -38,7 +38,8 @@ func TestTargetGroupStrictJsonUnmarshal(t *testing.T) { expectedReply: nil, expectedGroup: Group{Targets: []model.LabelSet{ {"__address__": "localhost:9090"}, - {"__address__": "localhost:9091"}}, Labels: model.LabelSet{"my": "label"}}, + {"__address__": "localhost:9091"}, + }, Labels: model.LabelSet{"my": "label"}}, }, { json: ` {"label": {},"targets": []}`, @@ -56,7 +57,6 @@ func TestTargetGroupStrictJsonUnmarshal(t *testing.T) { require.Equal(t, test.expectedReply, actual) require.Equal(t, test.expectedGroup, tg) } - } func TestTargetGroupYamlMarshal(t *testing.T) { @@ -81,10 +81,13 @@ func TestTargetGroupYamlMarshal(t *testing.T) { }, { // targets only exposes addresses. - group: Group{Targets: []model.LabelSet{ - {"__address__": "localhost:9090"}, - {"__address__": "localhost:9091"}}, - Labels: model.LabelSet{"foo": "bar", "bar": "baz"}}, + group: Group{ + Targets: []model.LabelSet{ + {"__address__": "localhost:9090"}, + {"__address__": "localhost:9091"}, + }, + Labels: model.LabelSet{"foo": "bar", "bar": "baz"}, + }, expectedYaml: "targets:\n- localhost:9090\n- localhost:9091\nlabels:\n bar: baz\n foo: bar\n", expectedErr: nil, }, @@ -120,7 +123,8 @@ func TestTargetGroupYamlUnmarshal(t *testing.T) { expectedReply: nil, expectedGroup: Group{Targets: []model.LabelSet{ {"__address__": "localhost:9090"}, - {"__address__": "localhost:9191"}}, Labels: model.LabelSet{"my": "label"}}, + {"__address__": "localhost:9191"}, + }, Labels: model.LabelSet{"my": "label"}}, }, { // incorrect syntax. @@ -135,21 +139,25 @@ func TestTargetGroupYamlUnmarshal(t *testing.T) { require.Equal(t, test.expectedReply, actual) require.Equal(t, test.expectedGroup, tg) } - } func TestString(t *testing.T) { // String() should return only the source, regardless of other attributes. group1 := - Group{Targets: []model.LabelSet{ - {"__address__": "localhost:9090"}, - {"__address__": "localhost:9091"}}, + Group{ + Targets: []model.LabelSet{ + {"__address__": "localhost:9090"}, + {"__address__": "localhost:9091"}, + }, Source: "", - Labels: model.LabelSet{"foo": "bar", "bar": "baz"}} + Labels: model.LabelSet{"foo": "bar", "bar": "baz"}, + } group2 := - Group{Targets: []model.LabelSet{}, - Source: "", - Labels: model.LabelSet{}} + Group{ + Targets: []model.LabelSet{}, + Source: "", + Labels: model.LabelSet{}, + } require.Equal(t, "", group1.String()) require.Equal(t, "", group2.String()) require.Equal(t, group1.String(), group2.String()) diff --git a/discovery/triton/triton.go b/discovery/triton/triton.go index 187d31ab6..66efd9bbc 100644 --- a/discovery/triton/triton.go +++ b/discovery/triton/triton.go @@ -188,9 +188,9 @@ func (d *Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) { case "cn": endpointFormat = "https://%s:%d/v%d/gz/discover" default: - return nil, errors.New(fmt.Sprintf("unknown role '%s' in configuration", d.sdConfig.Role)) + return nil, fmt.Errorf("unknown role '%s' in configuration", d.sdConfig.Role) } - var endpoint = fmt.Sprintf(endpointFormat, d.sdConfig.Endpoint, d.sdConfig.Port, d.sdConfig.Version) + endpoint := fmt.Sprintf(endpointFormat, d.sdConfig.Endpoint, d.sdConfig.Port, d.sdConfig.Version) if len(d.sdConfig.Groups) > 0 { groups := url.QueryEscape(strings.Join(d.sdConfig.Groups, ",")) endpoint = fmt.Sprintf("%s?groups=%s", endpoint, groups) @@ -223,7 +223,7 @@ func (d *Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) { case "cn": return d.processComputeNodeResponse(data, endpoint) default: - return nil, errors.New(fmt.Sprintf("unknown role '%s' in configuration", d.sdConfig.Role)) + return nil, fmt.Errorf("unknown role '%s' in configuration", d.sdConfig.Role) } } diff --git a/discovery/triton/triton_test.go b/discovery/triton/triton_test.go index f83ffcc97..ca3896532 100644 --- a/discovery/triton/triton_test.go +++ b/discovery/triton/triton_test.go @@ -135,8 +135,7 @@ func TestTritonSDRefreshNoTargets(t *testing.T) { } func TestTritonSDRefreshMultipleTargets(t *testing.T) { - var ( - dstr = `{"containers":[ + dstr := `{"containers":[ { "groups":["foo","bar","baz"], "server_uuid":"44454c4c-5000-104d-8037-b7c04f5a5131", @@ -153,7 +152,6 @@ func TestTritonSDRefreshMultipleTargets(t *testing.T) { "vm_uuid":"7b27a514-89d7-11e6-bee6-3f96f367bee7" }] }` - ) tgts := testTritonSDRefresh(t, conf, dstr) require.NotNil(t, tgts) @@ -161,9 +159,7 @@ func TestTritonSDRefreshMultipleTargets(t *testing.T) { } func TestTritonSDRefreshNoServer(t *testing.T) { - var ( - td, _ = newTritonDiscovery(conf) - ) + td, _ := newTritonDiscovery(conf) _, err := td.refresh(context.Background()) require.Error(t, err) @@ -171,9 +167,7 @@ func TestTritonSDRefreshNoServer(t *testing.T) { } func TestTritonSDRefreshCancelled(t *testing.T) { - var ( - td, _ = newTritonDiscovery(conf) - ) + td, _ := newTritonDiscovery(conf) ctx, cancel := context.WithCancel(context.Background()) cancel() @@ -183,8 +177,7 @@ func TestTritonSDRefreshCancelled(t *testing.T) { } func TestTritonSDRefreshCNsUUIDOnly(t *testing.T) { - var ( - dstr = `{"cns":[ + dstr := `{"cns":[ { "server_uuid":"44454c4c-5000-104d-8037-b7c04f5a5131" }, @@ -192,7 +185,6 @@ func TestTritonSDRefreshCNsUUIDOnly(t *testing.T) { "server_uuid":"a5894692-bd32-4ca1-908a-e2dda3c3a5e6" }] }` - ) tgts := testTritonSDRefresh(t, cnconf, dstr) require.NotNil(t, tgts) @@ -200,8 +192,7 @@ func TestTritonSDRefreshCNsUUIDOnly(t *testing.T) { } func TestTritonSDRefreshCNsWithHostname(t *testing.T) { - var ( - dstr = `{"cns":[ + dstr := `{"cns":[ { "server_uuid":"44454c4c-5000-104d-8037-b7c04f5a5131", "server_hostname": "server01" @@ -211,7 +202,6 @@ func TestTritonSDRefreshCNsWithHostname(t *testing.T) { "server_hostname": "server02" }] }` - ) tgts := testTritonSDRefresh(t, cnconf, dstr) require.NotNil(t, tgts) diff --git a/discovery/uyuni/uyuni.go b/discovery/uyuni/uyuni.go index 080f17a8c..df8efeeaf 100644 --- a/discovery/uyuni/uyuni.go +++ b/discovery/uyuni/uyuni.go @@ -71,7 +71,6 @@ type SDConfig struct { RefreshInterval model.Duration `yaml:"refresh_interval,omitempty"` } -// Uyuni API Response structures type systemGroupID struct { GroupID int `xmlrpc:"id"` GroupName string `xmlrpc:"name"` @@ -120,7 +119,6 @@ func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { *c = DefaultSDConfig type plain SDConfig err := unmarshal((*plain)(c)) - if err != nil { return err } @@ -142,20 +140,17 @@ func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { return nil } -// Attempt to login in Uyuni Server and get an auth token -func login(rpcclient *xmlrpc.Client, user string, pass string) (string, error) { +func login(rpcclient *xmlrpc.Client, user, pass string) (string, error) { var result string err := rpcclient.Call("auth.login", []interface{}{user, pass}, &result) return result, err } -// Logout from Uyuni API func logout(rpcclient *xmlrpc.Client, token string) error { return rpcclient.Call("auth.logout", token, nil) } -// Get the system groups information of monitored clients -func getSystemGroupsInfoOfMonitoredClients(rpcclient *xmlrpc.Client, token string, entitlement string) (map[int][]systemGroupID, error) { +func getSystemGroupsInfoOfMonitoredClients(rpcclient *xmlrpc.Client, token, entitlement string) (map[int][]systemGroupID, error) { var systemGroupsInfos []struct { SystemID int `xmlrpc:"id"` SystemGroups []systemGroupID `xmlrpc:"system_groups"` @@ -173,7 +168,6 @@ func getSystemGroupsInfoOfMonitoredClients(rpcclient *xmlrpc.Client, token strin return result, nil } -// GetSystemNetworkInfo lists client FQDNs. func getNetworkInformationForSystems(rpcclient *xmlrpc.Client, token string, systemIDs []int) (map[int]networkInfo, error) { var networkInfos []networkInfo err := rpcclient.Call("system.getNetworkForSystems", []interface{}{token, systemIDs}, &networkInfos) @@ -188,7 +182,6 @@ func getNetworkInformationForSystems(rpcclient *xmlrpc.Client, token string, sys return result, nil } -// Get endpoints information for given systems func getEndpointInfoForSystems( rpcclient *xmlrpc.Client, token string, @@ -210,7 +203,7 @@ func NewDiscovery(conf *SDConfig, logger log.Logger) (*Discovery, error) { *apiURL = *conf.Server.URL apiURL.Path = path.Join(apiURL.Path, uyuniXMLRPCAPIPath) - rt, err := config.NewRoundTripperFromConfig(conf.HTTPClientConfig, "uyuni_sd", config.WithHTTP2Disabled()) + rt, err := config.NewRoundTripperFromConfig(conf.HTTPClientConfig, "uyuni_sd") if err != nil { return nil, err } @@ -240,7 +233,6 @@ func (d *Discovery) getEndpointLabels( systemGroupIDs []systemGroupID, networkInfo networkInfo, ) model.LabelSet { - var addr, scheme string managedGroupNames := getSystemGroupNames(systemGroupIDs) addr = fmt.Sprintf("%s:%d", networkInfo.Hostname, endpoint.Port) @@ -280,7 +272,6 @@ func (d *Discovery) getTargetsForSystems( token string, entitlement string, ) ([]model.LabelSet, error) { - result := make([]model.LabelSet, 0) systemGroupIDsBySystemID, err := getSystemGroupsInfoOfMonitoredClients(rpcClient, token, entitlement) diff --git a/discovery/xds/client_test.go b/discovery/xds/client_test.go index 1c0e321d3..ff5217359 100644 --- a/discovery/xds/client_test.go +++ b/discovery/xds/client_test.go @@ -26,8 +26,8 @@ import ( "google.golang.org/protobuf/types/known/anypb" ) -var ( - httpResourceConf = &HTTPResourceClientConfig{ +func testHTTPResourceConfig() *HTTPResourceClientConfig { + return &HTTPResourceClientConfig{ HTTPClientConfig: config.HTTPClientConfig{ TLSConfig: config.TLSConfig{InsecureSkipVerify: true}, }, @@ -37,11 +37,10 @@ var ( Server: "http://localhost", ClientID: "test-id", } -) +} func urlMustParse(str string) *url.URL { parsed, err := url.Parse(str) - if err != nil { panic(err) } @@ -92,7 +91,6 @@ func TestCreateNewHTTPResourceClient(t *testing.T) { require.Equal(t, client.endpoint, "http://127.0.0.1:5000/v3/discovery:monitoring?param1=v1") require.Equal(t, client.client.Timeout, 1*time.Minute) - } func createTestHTTPResourceClient(t *testing.T, conf *HTTPResourceClientConfig, protocolVersion ProtocolVersion, responder discoveryResponder) (*HTTPResourceClient, func()) { @@ -110,7 +108,7 @@ func createTestHTTPResourceClient(t *testing.T, conf *HTTPResourceClientConfig, } func TestHTTPResourceClientFetchEmptyResponse(t *testing.T) { - client, cleanup := createTestHTTPResourceClient(t, httpResourceConf, ProtocolV3, func(request *v3.DiscoveryRequest) (*v3.DiscoveryResponse, error) { + client, cleanup := createTestHTTPResourceClient(t, testHTTPResourceConfig(), ProtocolV3, func(request *v3.DiscoveryRequest) (*v3.DiscoveryResponse, error) { return nil, nil }) defer cleanup() @@ -121,7 +119,7 @@ func TestHTTPResourceClientFetchEmptyResponse(t *testing.T) { } func TestHTTPResourceClientFetchFullResponse(t *testing.T) { - client, cleanup := createTestHTTPResourceClient(t, httpResourceConf, ProtocolV3, func(request *v3.DiscoveryRequest) (*v3.DiscoveryResponse, error) { + client, cleanup := createTestHTTPResourceClient(t, testHTTPResourceConfig(), ProtocolV3, func(request *v3.DiscoveryRequest) (*v3.DiscoveryResponse, error) { if request.VersionInfo == "1" { return nil, nil } @@ -150,7 +148,7 @@ func TestHTTPResourceClientFetchFullResponse(t *testing.T) { } func TestHTTPResourceClientServerError(t *testing.T) { - client, cleanup := createTestHTTPResourceClient(t, httpResourceConf, ProtocolV3, func(request *v3.DiscoveryRequest) (*v3.DiscoveryResponse, error) { + client, cleanup := createTestHTTPResourceClient(t, testHTTPResourceConfig(), ProtocolV3, func(request *v3.DiscoveryRequest) (*v3.DiscoveryResponse, error) { return nil, errors.New("server error") }) defer cleanup() diff --git a/discovery/xds/kuma.go b/discovery/xds/kuma.go index 77f9f0561..5ef573720 100644 --- a/discovery/xds/kuma.go +++ b/discovery/xds/kuma.go @@ -103,11 +103,7 @@ func (c *KumaSDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { return errors.Errorf("kuma SD server must not be empty and have a scheme: %s", c.Server) } - if err := c.HTTPClientConfig.Validate(); err != nil { - return err - } - - return nil + return c.HTTPClientConfig.Validate() } func (c *KumaSDConfig) Name() string { diff --git a/discovery/xds/kuma_test.go b/discovery/xds/kuma_test.go index 0de1b986d..b62e9a0ef 100644 --- a/discovery/xds/kuma_test.go +++ b/discovery/xds/kuma_test.go @@ -91,7 +91,6 @@ func getKumaMadsV1DiscoveryResponse(resources ...*MonitoringAssignment) (*v3.Dis serialized := make([]*anypb.Any, len(resources)) for i, res := range resources { data, err := proto.Marshal(res) - if err != nil { return nil, err } diff --git a/docs/configuration/configuration.md b/docs/configuration/configuration.md index 6b451b595..bfe23e916 100644 --- a/docs/configuration/configuration.md +++ b/docs/configuration/configuration.md @@ -383,6 +383,10 @@ token_url: # Optional parameters to append to the token URL. endpoint_params: [ : ... ] + +# Configures the token request's TLS settings. +tls_config: + [ ] ``` ### `` @@ -429,6 +433,42 @@ subscription_id: # The port to scrape metrics from. If using the public IP address, this must # instead be specified in the relabeling rule. [ port: | default = 80 ] + +# Authentication information used to authenticate to the consul server. +# Note that `basic_auth`, `authorization` and `oauth2` options are +# mutually exclusive. +# `password` and `password_file` are mutually exclusive. + +# Optional HTTP basic authentication information, currently not support by Azure. +basic_auth: + [ username: ] + [ password: ] + [ password_file: ] + +# Optional `Authorization` header configuration, currently not supported by Azure. +authorization: + # Sets the authentication type. + [ type: | default: Bearer ] + # Sets the credentials. It is mutually exclusive with + # `credentials_file`. + [ credentials: ] + # Sets the credentials to the credentials read from the configured file. + # It is mutually exclusive with `credentials`. + [ credentials_file: ] + +# Optional OAuth 2.0 configuration, currently not supported by Azure. +oauth2: + [ ] + +# Optional proxy URL. +[ proxy_url: ] + +# Configure whether HTTP requests follow HTTP 3xx redirects. +[ follow_redirects: | default = true ] + +# TLS configuration. +tls_config: + [ ] ``` ### `` @@ -1563,7 +1603,7 @@ Available meta labels: from underlying pods), the following labels are attached: * `__meta_kubernetes_endpointslice_address_target_kind`: Kind of the referenced object. * `__meta_kubernetes_endpointslice_address_target_name`: Name of referenced object. -* `__meta_kubernetes_endpointslice_address_type`: The ip protocol family of the adress target. +* `__meta_kubernetes_endpointslice_address_type`: The ip protocol family of the address of the target. * `__meta_kubernetes_endpointslice_endpoint_conditions_ready`: Set to `true` or `false` for the referenced endpoint's ready state. * `__meta_kubernetes_endpointslice_endpoint_topology_kubernetes_io_hostname`: Name of the node hosting the referenced endpoint. * `__meta_kubernetes_endpointslice_endpoint_topology_present_kubernetes_io_hostname`: Flag that shows if the referenced object has a kubernetes.io/hostname annotation. @@ -2712,7 +2752,7 @@ queue_config: # Initial retry delay. Gets doubled for every retry. [ min_backoff: | default = 30ms ] # Maximum retry delay. - [ max_backoff: | default = 100ms ] + [ max_backoff: | default = 5s ] # Retry upon receiving a 429 status code from the remote-write storage. # This is experimental and might change in the future. [ retry_on_http_429: | default = false ] diff --git a/docs/configuration/https.md b/docs/configuration/https.md index 1799739d0..c060ec428 100644 --- a/docs/configuration/https.md +++ b/docs/configuration/https.md @@ -73,6 +73,30 @@ http_server_config: # Enable HTTP/2 support. Note that HTTP/2 is only supported with TLS. # This can not be changed on the fly. [ http2: | default = true ] + # List of headers that can be added to HTTP responses. + [ headers: + # Set the Content-Security-Policy header to HTTP responses. + # Unset if blank. + [ Content-Security-Policy: ] + # Set the X-Frame-Options header to HTTP responses. + # Unset if blank. Accepted values are deny and sameorigin. + # https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/X-Frame-Options + [ X-Frame-Options: ] + # Set the X-Content-Type-Options header to HTTP responses. + # Unset if blank. Accepted value is nosniff. + # https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/X-Content-Type-Options + [ X-Content-Type-Options: ] + # Set the X-XSS-Protection header to all responses. + # Unset if blank. Accepted value is nosniff. + # https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/X-XSS-Protection + [ X-XSS-Protection: ] + # Set the Strict-Transport-Security header to HTTP responses. + # Unset if blank. + # Please make sure that you use this with care as this header might force + # browsers to load Prometheus and the other applications hosted on the same + # domain and subdomains over HTTPS. + # https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Strict-Transport-Security + [ Strict-Transport-Security: ] ] # Usernames and hashed passwords that have full access to the web # server via basic authentication. If empty, no basic authentication is diff --git a/docs/configuration/recording_rules.md b/docs/configuration/recording_rules.md index 8b12632dc..a14aab09d 100644 --- a/docs/configuration/recording_rules.md +++ b/docs/configuration/recording_rules.md @@ -78,8 +78,8 @@ name: # How often rules in the group are evaluated. [ interval: | default = global.evaluation_interval ] -# Limit the number of alerts and series individual rules can produce. -# 0 is no limit. +# Limit the number of alerts an alerting rule and series a recording +# rule can produce. 0 is no limit. [ limit: | default = 0 ] rules: @@ -128,3 +128,11 @@ annotations: [ : ] ``` +# Limiting alerts and series + +A limit for alerts produced by alerting rules and series produced recording rules +can be configured per-group. When the limit is exceeded, _all_ series produced +by the rule are discarded, and if it's an alerting rule, _all_ alerts for +the rule, active, pending, or inactive, are cleared as well. The event will be +recorded as an error in the evaluation, and as such no stale markers are +written. diff --git a/docs/configuration/template_reference.md b/docs/configuration/template_reference.md index 67e37e0d3..ff664af06 100644 --- a/docs/configuration/template_reference.md +++ b/docs/configuration/template_reference.md @@ -74,6 +74,7 @@ versions. | reReplaceAll | pattern, replacement, text | string | [Regexp.ReplaceAllString](https://golang.org/pkg/regexp/#Regexp.ReplaceAllString) Regexp substitution, unanchored. | | graphLink | expr | string | Returns path to graph view in the [expression browser](https://prometheus.io/docs/visualization/browser/) for the expression. | | tableLink | expr | string | Returns path to tabular ("Table") view in the [expression browser](https://prometheus.io/docs/visualization/browser/) for the expression. | +| parseDuration | string | float | Parses a duration string such as "1h" into the number of seconds it represents. | ### Others diff --git a/docs/feature_flags.md b/docs/feature_flags.md index 07424d731..e5683c8ab 100644 --- a/docs/feature_flags.md +++ b/docs/feature_flags.md @@ -1,9 +1,9 @@ --- -title: Feature Flags +title: Feature flags sort_rank: 11 --- -# Feature Flags +# Feature flags Here is a list of features that are disabled by default since they are breaking changes or are considered experimental. Their behaviour can change in future releases which will be communicated via the [release changelog](https://github.com/prometheus/prometheus/blob/main/CHANGELOG.md). @@ -46,7 +46,7 @@ More details can be found [here](querying/basics.md#offset-modifier). The remote write receiver allows Prometheus to accept remote write requests from other Prometheus servers. More details can be found [here](storage.md#overview). -## Exemplars Storage +## Exemplars storage `--enable-feature=exemplar-storage` @@ -54,7 +54,7 @@ The remote write receiver allows Prometheus to accept remote write requests from Exemplar storage is implemented as a fixed size circular buffer that stores exemplars in memory for all series. Enabling this feature will enable the storage of exemplars scraped by Prometheus. The flag `storage.exemplars.exemplars-limit` can be used to control the size of circular buffer by # of exemplars. An exemplar with just a `traceID=` uses roughly 100 bytes of memory via the in-memory exemplar storage. If the exemplar storage is enabled, we will also append the exemplars to WAL for local persistence (for WAL duration). -## Memory Snapshot on Shutdown +## Memory snapshot on shutdown `--enable-feature=memory-snapshot-on-shutdown` @@ -62,7 +62,7 @@ This takes the snapshot of the chunks that are in memory along with the series i it on disk. This will reduce the startup time since the memory state can be restored with this snapshot and m-mapped chunks without the need of WAL replay. -## Extra Scrape Metrics +## Extra scrape metrics `--enable-feature=extra-scrape-metrics` @@ -71,3 +71,28 @@ When enabled, for each instance scrape, Prometheus stores a sample in the follow - `scrape_timeout_seconds`. The configured `scrape_timeout` for a target. This allows you to measure each target to find out how close they are to timing out with `scrape_duration_seconds / scrape_timeout_seconds`. - `scrape_sample_limit`. The configured `sample_limit` for a target. This allows you to measure each target to find out how close they are to reaching the limit with `scrape_samples_post_metric_relabeling / scrape_sample_limit`. Note that `scrape_sample_limit` can be zero if there is no limit configured, which means that the query above can return `+Inf` for targets with no limit (as we divide by zero). If you want to query only for targets that do have a sample limit use this query: `scrape_samples_post_metric_relabeling / (scrape_sample_limit > 0)`. +- `scrape_body_size_bytes`. The uncompressed size of the most recent scrape response, if successful. Scrapes failing because `body_size_limit` is exceeded report `-1`, other scrape failures report `0`. + +## New service discovery manager + +`--enable-feature=new-service-discovery-manager` + +When enabled, Prometheus uses a new service discovery manager that does not +restart unchanged discoveries upon reloading. This makes reloads faster and reduces +pressure on service discoveries' sources. + +Users are encouraged to test the new service discovery manager and report any +issues upstream. + +In future releases, this new service discovery manager will become the default and +this feature flag will be ignored. + +## Prometheus agent + +`--enable-feature=agent` + +When enabled, Prometheus runs in agent mode. The agent mode is limited to +discovery, scrape and remote write. + +This is useful when you do not need to query the Prometheus data locally, but +only from a central [remote endpoint](https://prometheus.io/docs/operating/integrations/#remote-endpoints-and-storage). diff --git a/docs/querying/operators.md b/docs/querying/operators.md index ae6265f6e..a0491bdbe 100644 --- a/docs/querying/operators.md +++ b/docs/querying/operators.md @@ -222,7 +222,7 @@ or both `(label1, label2)` and `(label1, label2,)` are valid syntax. `without` removes the listed labels from the result vector, while -all other labels are preserved the output. `by` does the opposite and drops +all other labels are preserved in the output. `by` does the opposite and drops labels that are not listed in the `by` clause, even if their label values are identical between all elements of the vector. diff --git a/documentation/examples/custom-sd/adapter-usage/main.go b/documentation/examples/custom-sd/adapter-usage/main.go index 2214a823d..80a8f1b5e 100644 --- a/documentation/examples/custom-sd/adapter-usage/main.go +++ b/documentation/examples/custom-sd/adapter-usage/main.go @@ -50,7 +50,7 @@ var ( tagsLabel = model.MetaLabelPrefix + "consul_tags" // serviceAddressLabel is the name of the label containing the (optional) service address. serviceAddressLabel = model.MetaLabelPrefix + "consul_service_address" - //servicePortLabel is the name of the label containing the service port. + // servicePortLabel is the name of the label containing the service port. servicePortLabel = model.MetaLabelPrefix + "consul_service_port" // serviceIDLabel is the name of the label containing the service ID. serviceIDLabel = model.MetaLabelPrefix + "consul_service_id" @@ -120,7 +120,7 @@ func (d *discovery) parseServiceNodes(resp *http.Response, name string) (*target for _, node := range nodes { // We surround the separated list with the separator as well. This way regular expressions // in relabeling rules don't have to consider tag positions. - var tags = "," + strings.Join(node.ServiceTags, ",") + "," + tags := "," + strings.Join(node.ServiceTags, ",") + "," // If the service address is not empty it should be used instead of the node address // since the service may be registered remotely through a different node. @@ -162,7 +162,6 @@ func (d *discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) { for c := time.Tick(time.Duration(d.refreshInterval) * time.Second); ; { var srvs map[string][]string resp, err := http.Get(fmt.Sprintf("http://%s/v1/catalog/services", d.address)) - if err != nil { level.Error(d.logger).Log("msg", "Error getting services list", "err", err) time.Sleep(time.Duration(d.refreshInterval) * time.Second) diff --git a/documentation/examples/custom-sd/adapter/adapter.go b/documentation/examples/custom-sd/adapter/adapter.go index e355e7b71..564a4e83b 100644 --- a/documentation/examples/custom-sd/adapter/adapter.go +++ b/documentation/examples/custom-sd/adapter/adapter.go @@ -163,7 +163,7 @@ func (a *Adapter) Run() { } // NewAdapter creates a new instance of Adapter. -func NewAdapter(ctx context.Context, file string, name string, d discovery.Discoverer, logger log.Logger) *Adapter { +func NewAdapter(ctx context.Context, file, name string, d discovery.Discoverer, logger log.Logger) *Adapter { return &Adapter{ ctx: ctx, disc: d, diff --git a/documentation/examples/prometheus-agent.yml b/documentation/examples/prometheus-agent.yml new file mode 100644 index 000000000..0e5180817 --- /dev/null +++ b/documentation/examples/prometheus-agent.yml @@ -0,0 +1,22 @@ +# my global config +global: + scrape_interval: 15s # Set the scrape interval to every 15 seconds. Default is every 1 minute. + evaluation_interval: 15s # Evaluate rules every 15 seconds. The default is every 1 minute. + # scrape_timeout is set to the global default (10s). + +# A scrape configuration containing exactly one endpoint to scrape: +# Here it's Prometheus itself. +scrape_configs: + # The job name is added as a label `job=` to any timeseries scraped from this config. + - job_name: "prometheus" + + # metrics_path defaults to '/metrics' + # scheme defaults to 'http'. + + static_configs: + - targets: ["localhost:9090"] + +# When running prometheus in Agent mode, remote-write is required. +remote_write: + # Agent is able to run with a invalid remote-write URL, but, of course, will fail to push timeseries. + - url: "http://remote-write-url" diff --git a/documentation/examples/prometheus-puppetdb.yml b/documentation/examples/prometheus-puppetdb.yml index f7f4313e7..5b10d39fc 100644 --- a/documentation/examples/prometheus-puppetdb.yml +++ b/documentation/examples/prometheus-puppetdb.yml @@ -14,11 +14,10 @@ scrape_configs: - job_name: 'puppetdb-scrape-jobs' puppetdb_sd_configs: - # This example uses the Prometheus::Scrape_job - # exported resources. - # https://github.com/camptocamp/prometheus-puppetdb-sd - # This examples is compatible with Prometheus-puppetdb-sd, - # if the exported Prometheus::Scrape_job only have at most one target. + # This example uses Prometheus::Scrape_job exported resources. + # It is compatible with the prometheus-puppetdb-sd + # (https://github.com/camptocamp/prometheus-puppetdb-sd) if the + # exported resources have exactly one target. - url: https://puppetdb.example.com query: 'resources { type = "Prometheus::Scrape_job" and exported = true }' include_parameters: true diff --git a/documentation/examples/remote_storage/remote_storage_adapter/graphite/client.go b/documentation/examples/remote_storage/remote_storage_adapter/graphite/client.go index fedea48db..36242a8f4 100644 --- a/documentation/examples/remote_storage/remote_storage_adapter/graphite/client.go +++ b/documentation/examples/remote_storage/remote_storage_adapter/graphite/client.go @@ -37,7 +37,7 @@ type Client struct { } // NewClient creates a new Client. -func NewClient(logger log.Logger, address string, transport string, timeout time.Duration, prefix string) *Client { +func NewClient(logger log.Logger, address, transport string, timeout time.Duration, prefix string) *Client { if logger == nil { logger = log.NewNopLogger() } diff --git a/documentation/examples/remote_storage/remote_storage_adapter/graphite/client_test.go b/documentation/examples/remote_storage/remote_storage_adapter/graphite/client_test.go index 68844d327..535027e07 100644 --- a/documentation/examples/remote_storage/remote_storage_adapter/graphite/client_test.go +++ b/documentation/examples/remote_storage/remote_storage_adapter/graphite/client_test.go @@ -20,13 +20,11 @@ import ( "github.com/stretchr/testify/require" ) -var ( - metric = model.Metric{ - model.MetricNameLabel: "test:metric", - "testlabel": "test:value", - "many_chars": "abc!ABC:012-3!45ö67~89./(){},=.\"\\", - } -) +var metric = model.Metric{ + model.MetricNameLabel: "test:metric", + "testlabel": "test:value", + "many_chars": "abc!ABC:012-3!45ö67~89./(){},=.\"\\", +} func TestEscape(t *testing.T) { // Can we correctly keep and escape valid chars. diff --git a/documentation/examples/remote_storage/remote_storage_adapter/influxdb/client.go b/documentation/examples/remote_storage/remote_storage_adapter/influxdb/client.go index 061c2e76d..f0ee58c5a 100644 --- a/documentation/examples/remote_storage/remote_storage_adapter/influxdb/client.go +++ b/documentation/examples/remote_storage/remote_storage_adapter/influxdb/client.go @@ -41,7 +41,7 @@ type Client struct { } // NewClient creates a new Client. -func NewClient(logger log.Logger, conf influx.HTTPConfig, db string, rp string) *Client { +func NewClient(logger log.Logger, conf influx.HTTPConfig, db, rp string) *Client { c, err := influx.NewHTTPClient(conf) // Currently influx.NewClient() *should* never return an error. if err != nil { diff --git a/documentation/examples/remote_storage/remote_storage_adapter/opentsdb/client_test.go b/documentation/examples/remote_storage/remote_storage_adapter/opentsdb/client_test.go index 51906efce..a30448e76 100644 --- a/documentation/examples/remote_storage/remote_storage_adapter/opentsdb/client_test.go +++ b/documentation/examples/remote_storage/remote_storage_adapter/opentsdb/client_test.go @@ -21,13 +21,11 @@ import ( "github.com/stretchr/testify/require" ) -var ( - metric = model.Metric{ - model.MetricNameLabel: "test:metric", - "testlabel": "test:value", - "many_chars": "abc!ABC:012-3!45ö67~89./", - } -) +var metric = model.Metric{ + model.MetricNameLabel: "test:metric", + "testlabel": "test:value", + "many_chars": "abc!ABC:012-3!45ö67~89./", +} func TestTagsFromMetric(t *testing.T) { expected := map[string]TagValue{ diff --git a/documentation/prometheus-mixin/Makefile b/documentation/prometheus-mixin/Makefile index 9ade5aa2b..c3023274c 100644 --- a/documentation/prometheus-mixin/Makefile +++ b/documentation/prometheus-mixin/Makefile @@ -21,5 +21,9 @@ lint: prometheus_alerts.yaml promtool check rules prometheus_alerts.yaml +.PHONY: jb_install +jb_install: + jb install + clean: rm -rf dashboards_out prometheus_alerts.yaml diff --git a/documentation/prometheus-mixin/go.mod b/documentation/prometheus-mixin/go.mod deleted file mode 100644 index 47b10d80d..000000000 --- a/documentation/prometheus-mixin/go.mod +++ /dev/null @@ -1,8 +0,0 @@ -module github.com/prometheus/prometheus/documentation/prometheus-mixin - -go 1.15 - -require ( - github.com/google/go-jsonnet v0.16.0 - github.com/jsonnet-bundler/jsonnet-bundler v0.4.0 -) diff --git a/documentation/prometheus-mixin/go.sum b/documentation/prometheus-mixin/go.sum deleted file mode 100644 index a6c4a6ee1..000000000 --- a/documentation/prometheus-mixin/go.sum +++ /dev/null @@ -1,49 +0,0 @@ -github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc h1:cAKDfWh5VpdgMhJosfJnn5/FoN2SRZ4p7fJNX58YPaU= -github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= -github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf h1:qet1QNfXsQxTZqLG4oE62mJzwPIB8+Tee4RNCL9ulrY= -github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/campoy/embedmd v1.0.0/go.mod h1:oxyr9RCiSXg0M3VJ3ks0UGfp98BpSSGr0kpiX3MzVl8= -github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= -github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= -github.com/fatih/color v1.9.0 h1:8xPHl4/q1VyqGIPif1F+1V3Y3lSmrq01EabUW3CoW5s= -github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= -github.com/google/go-jsonnet v0.16.0 h1:Nb4EEOp+rdeGGyB1rQ5eisgSAqrTnhf9ip+X6lzZbY0= -github.com/google/go-jsonnet v0.16.0/go.mod h1:sOcuej3UW1vpPTZOr8L7RQimqai1a57bt5j22LzGZCw= -github.com/jsonnet-bundler/jsonnet-bundler v0.4.0 h1:4BKZ6LDqPc2wJDmaKnmYD/vDjUptJtnUpai802MibFc= -github.com/jsonnet-bundler/jsonnet-bundler v0.4.0/go.mod h1:/by7P/OoohkI3q4CgSFqcoFsVY+IaNbzOVDknEsKDeU= -github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= -github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= -github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= -github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= -github.com/mattn/go-colorable v0.1.4 h1:snbPLB8fVfU9iwbbo30TPtbLRzwWu6aJS6Xh4eaaviA= -github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= -github.com/mattn/go-isatty v0.0.6/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= -github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= -github.com/mattn/go-isatty v0.0.11 h1:FxPOTFNqGkuDUGi3H/qkUbQO4ZiBa2brKq5r0l8TGeM= -github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE= -github.com/pkg/errors v0.8.0 h1:WdK/asTD0HN+q6hsWO3/vpuAkAr+tw6aNJNDFFf0+qw= -github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/sergi/go-diff v1.1.0 h1:we8PVUC3FE2uYfodKH/nBHMSetSfHDR6scGdBi+erh0= -github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= -github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= -github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= -golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190310054646-10058d7d4faa/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191026070338-33540a1f6037 h1:YyJpGZS1sBuBCzLAR1VEpK193GlqGZbnPFnPV/5Rsb4= -golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -gopkg.in/alecthomas/kingpin.v2 v2.2.6 h1:jMFz6MfLP0/4fUyZle81rXUoxOBFi19VUFKVDOQfozc= -gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo= -gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.4 h1:/eiJrUcujPVeJ3xlSWaiNi3uSVmDGBK1pDHUHAnao1I= -gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/go.mod b/go.mod index 526079f7b..6413d2ada 100644 --- a/go.mod +++ b/go.mod @@ -3,40 +3,41 @@ module github.com/prometheus/prometheus go 1.14 require ( - github.com/Azure/azure-sdk-for-go v57.1.0+incompatible - github.com/Azure/go-autorest/autorest v0.11.20 - github.com/Azure/go-autorest/autorest/adal v0.9.15 + github.com/Azure/azure-sdk-for-go v58.3.0+incompatible + github.com/Azure/go-autorest/autorest v0.11.22 + github.com/Azure/go-autorest/autorest/adal v0.9.17 github.com/Azure/go-autorest/autorest/to v0.4.0 // indirect github.com/Azure/go-autorest/autorest/validation v0.3.1 // indirect - github.com/alecthomas/units v0.0.0-20210912230133-d1bdfacee922 - github.com/aws/aws-sdk-go v1.40.37 + github.com/alecthomas/units v0.0.0-20210927113745-59d0afb8317a + github.com/aws/aws-sdk-go v1.42.6 github.com/cespare/xxhash/v2 v2.1.2 - github.com/containerd/containerd v1.5.4 // indirect + github.com/containerd/containerd v1.5.7 // indirect github.com/dennwc/varint v1.0.0 github.com/dgryski/go-sip13 v0.0.0-20200911182023-62edffca9245 - github.com/digitalocean/godo v1.65.0 - github.com/docker/docker v20.10.8+incompatible + github.com/digitalocean/godo v1.71.0 + github.com/docker/docker v20.10.10+incompatible github.com/docker/go-connections v0.4.0 // indirect github.com/edsrzf/mmap-go v1.0.0 - github.com/envoyproxy/go-control-plane v0.9.9 - github.com/envoyproxy/protoc-gen-validate v0.6.1 - github.com/go-kit/log v0.1.0 + github.com/envoyproxy/go-control-plane v0.10.1 + github.com/envoyproxy/protoc-gen-validate v0.6.2 + github.com/fsnotify/fsnotify v1.5.1 + github.com/go-kit/log v0.2.0 github.com/go-logfmt/logfmt v0.5.1 - github.com/go-openapi/strfmt v0.20.2 + github.com/go-openapi/strfmt v0.21.0 github.com/go-zookeeper/zk v1.0.2 github.com/gogo/protobuf v1.3.2 github.com/golang/snappy v0.0.4 - github.com/google/pprof v0.0.0-20210827144239-02619b876842 - github.com/gophercloud/gophercloud v0.20.0 + github.com/google/pprof v0.0.0-20211008130755-947d60d73cc0 + github.com/gophercloud/gophercloud v0.23.0 github.com/grpc-ecosystem/grpc-gateway v1.16.0 - github.com/hashicorp/consul/api v1.10.1 - github.com/hetznercloud/hcloud-go v1.32.0 - github.com/influxdata/influxdb v1.9.3 - github.com/json-iterator/go v1.1.11 + github.com/hashicorp/consul/api v1.11.0 + github.com/hetznercloud/hcloud-go v1.33.1 + github.com/influxdata/influxdb v1.9.5 + github.com/json-iterator/go v1.1.12 github.com/kolo/xmlrpc v0.0.0-20201022064351-38db28db192b - github.com/linode/linodego v0.32.0 + github.com/linode/linodego v1.2.1 github.com/miekg/dns v1.1.43 - github.com/moby/term v0.0.0-20201216013528-df9cb8a40635 // indirect + github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6 // indirect github.com/morikuni/aec v1.0.0 // indirect github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f github.com/oklog/run v1.1.0 @@ -47,9 +48,9 @@ require ( github.com/prometheus/alertmanager v0.23.0 github.com/prometheus/client_golang v1.11.0 github.com/prometheus/client_model v0.2.0 - github.com/prometheus/common v0.31.1 + github.com/prometheus/common v0.32.1 github.com/prometheus/common/sigv4 v0.1.0 - github.com/prometheus/exporter-toolkit v0.6.1 + github.com/prometheus/exporter-toolkit v0.7.0 github.com/scaleway/scaleway-sdk-go v1.0.0-beta.7.0.20210223165440-c65ae3540d44 github.com/shurcooL/httpfs v0.0.0-20190707220628-8d4bc4ba7749 github.com/shurcooL/vfsgen v0.0.0-20200824052919-0d455de96546 @@ -57,25 +58,24 @@ require ( github.com/uber/jaeger-client-go v2.29.1+incompatible github.com/uber/jaeger-lib v2.4.1+incompatible go.uber.org/atomic v1.9.0 - go.uber.org/goleak v1.1.10 - golang.org/x/net v0.0.0-20210903162142-ad29c8ab022f - golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f + go.uber.org/goleak v1.1.12 + golang.org/x/net v0.0.0-20211020060615-d418f374d309 + golang.org/x/oauth2 v0.0.0-20211005180243-6b3c2da341f1 golang.org/x/sync v0.0.0-20210220032951-036812b2e83c - golang.org/x/sys v0.0.0-20210906170528-6f6e22806c34 + golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359 golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac golang.org/x/tools v0.1.7 - google.golang.org/api v0.56.0 - google.golang.org/genproto v0.0.0-20210903162649-d08c68adba83 + google.golang.org/api v0.60.0 + google.golang.org/genproto v0.0.0-20211021150943-2b146023228c google.golang.org/protobuf v1.27.1 gopkg.in/alecthomas/kingpin.v2 v2.2.6 - gopkg.in/fsnotify/fsnotify.v1 v1.4.7 gopkg.in/yaml.v2 v2.4.0 gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b - k8s.io/api v0.22.1 - k8s.io/apimachinery v0.22.1 - k8s.io/client-go v0.22.1 + k8s.io/api v0.22.3 + k8s.io/apimachinery v0.22.3 + k8s.io/client-go v0.22.3 k8s.io/klog v1.0.0 - k8s.io/klog/v2 v2.10.0 + k8s.io/klog/v2 v2.20.0 ) replace ( diff --git a/go.sum b/go.sum index 152689679..c1e15b89b 100644 --- a/go.sum +++ b/go.sum @@ -24,8 +24,10 @@ cloud.google.com/go v0.83.0/go.mod h1:Z7MJUsANfY0pYPdw0lbnivPx4/vhy/e2FEkSkF7vAV cloud.google.com/go v0.84.0/go.mod h1:RazrYuxIK6Kb7YrzzhPoLmCVzl7Sup4NrbKPg8KHSUM= cloud.google.com/go v0.87.0/go.mod h1:TpDYlFy7vuLzZMMZ+B6iRiELaY7z/gJPaqbMx6mlWcY= cloud.google.com/go v0.90.0/go.mod h1:kRX0mNRHe0e2rC6oNakvwQqzyDmg57xJ+SZU1eT2aDQ= -cloud.google.com/go v0.93.3 h1:wPBktZFzYBcCZVARvwVKqH1uEj+aLXofJEtrb4oOsio= cloud.google.com/go v0.93.3/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+YI= +cloud.google.com/go v0.94.1/go.mod h1:qAlAugsXlC+JWO+Bke5vCtc9ONxjQT3drlTTnAplMW4= +cloud.google.com/go v0.97.0 h1:3DXvAyifywvq64LfkKaMOmkWPS1CikIQdMe2lY9vxU8= +cloud.google.com/go v0.97.0/go.mod h1:GF7l59pYBVlXQIBLx3a761cZ41F9bBH3JUlihCt2Udc= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= @@ -49,33 +51,32 @@ collectd.org v0.3.0/go.mod h1:A/8DzQBkF6abtvrT2j/AU/4tiBgJWYyh0y/oB/4MlWE= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= github.com/Azure/azure-sdk-for-go v16.2.1+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= github.com/Azure/azure-sdk-for-go v41.3.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= -github.com/Azure/azure-sdk-for-go v57.1.0+incompatible h1:TKQ3ieyB0vVKkF6t9dsWbMjq56O1xU3eh3Ec09v6ajM= -github.com/Azure/azure-sdk-for-go v57.1.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= -github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 h1:w+iIsaOQNcT7OZ575w+acHgRric5iCyQh+xv+KJ4HB8= +github.com/Azure/azure-sdk-for-go v58.3.0+incompatible h1:lb9OWePNuJMiibdxg9XvdbiOldR0Yifge37L4LoOxIs= +github.com/Azure/azure-sdk-for-go v58.3.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= +github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8= +github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= github.com/Azure/go-autorest v10.8.1+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs= github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= -github.com/Azure/go-autorest/autorest v0.9.3/go.mod h1:GsRuLYvwzLjjjRoWEIyMUaYq8GNUx2nRB378IPt/1p0= github.com/Azure/go-autorest/autorest v0.10.0/go.mod h1:/FALq9T/kS7b5J5qsQ+RSTUdAmGFqi0vUdVNNx8q630= -github.com/Azure/go-autorest/autorest v0.10.1/go.mod h1:/FALq9T/kS7b5J5qsQ+RSTUdAmGFqi0vUdVNNx8q630= github.com/Azure/go-autorest/autorest v0.11.1/go.mod h1:JFgpikqFJ/MleTTxwepExTKnFUKKszPS8UavbQYUMuw= +github.com/Azure/go-autorest/autorest v0.11.9/go.mod h1:eipySxLmqSyC5s5k1CLupqet0PSENBEDP93LQ9a8QYw= github.com/Azure/go-autorest/autorest v0.11.18/go.mod h1:dSiJPy22c3u0OtOKDNttNgqpNFY/GeWa7GH/Pz56QRA= -github.com/Azure/go-autorest/autorest v0.11.20 h1:s8H1PbCZSqg/DH7JMlOz6YMig6htWLNPsjDdlLqCx3M= -github.com/Azure/go-autorest/autorest v0.11.20/go.mod h1:o3tqFY+QR40VOlk+pV4d77mORO64jOXSgEnPQgLK6JY= +github.com/Azure/go-autorest/autorest v0.11.22 h1:bXiQwDjrRmBQOE67bwlvUKAC1EU1yZTPQ38c+bstZws= +github.com/Azure/go-autorest/autorest v0.11.22/go.mod h1:BAWYUWGPEtKPzjVkp0Q6an0MJcJDsoh5Z1BFAEFs4Xs= github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0= -github.com/Azure/go-autorest/autorest/adal v0.8.0/go.mod h1:Z6vX6WXXuyieHAXwMj0S6HY6e6wcHn37qQMBQlvY3lc= -github.com/Azure/go-autorest/autorest/adal v0.8.1/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q= github.com/Azure/go-autorest/autorest/adal v0.8.2/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q= github.com/Azure/go-autorest/autorest/adal v0.8.3/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q= github.com/Azure/go-autorest/autorest/adal v0.9.0/go.mod h1:/c022QCutn2P7uY+/oQWWNcK9YU+MH96NgK+jErpbcg= github.com/Azure/go-autorest/autorest/adal v0.9.5/go.mod h1:B7KF7jKIeC9Mct5spmyCB/A8CG/sEz1vwIRGv/bbw7A= github.com/Azure/go-autorest/autorest/adal v0.9.13/go.mod h1:W/MM4U6nLxnIskrw4UwWzlHfGjwUS50aOsc/I3yuU8M= -github.com/Azure/go-autorest/autorest/adal v0.9.15 h1:X+p2GF0GWyOiSmqohIaEeuNFNDY4I4EOlVuUQvFdWMk= -github.com/Azure/go-autorest/autorest/adal v0.9.15/go.mod h1:tGMin8I49Yij6AQ+rvV+Xa/zwxYQB5hmsd6DkfAx2+A= -github.com/Azure/go-autorest/autorest/azure/auth v0.4.2/go.mod h1:90gmfKdlmKgfjUpnCEpOJzsUEjrWDSLwHIG73tSXddM= -github.com/Azure/go-autorest/autorest/azure/cli v0.3.1/go.mod h1:ZG5p860J94/0kI9mNJVoIoLgXcirM2gF5i2kWloofxw= +github.com/Azure/go-autorest/autorest/adal v0.9.14/go.mod h1:W/MM4U6nLxnIskrw4UwWzlHfGjwUS50aOsc/I3yuU8M= +github.com/Azure/go-autorest/autorest/adal v0.9.17 h1:esOPl2dhcz9P3jqBSJ8tPGEj2EqzPPT6zfyuloiogKY= +github.com/Azure/go-autorest/autorest/adal v0.9.17/go.mod h1:XVVeme+LZwABT8K5Lc3hA4nAe8LDBVle26gTrguhhPQ= +github.com/Azure/go-autorest/autorest/azure/auth v0.5.3/go.mod h1:4bJZhUhcq8LB20TruwHbAQsmUs2Xh+QR7utuJpLXX3A= +github.com/Azure/go-autorest/autorest/azure/cli v0.4.2/go.mod h1:7qkJkT+j6b+hIpzMOwPChJhTqS8VbsqqgULzMNRugoM= github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA= github.com/Azure/go-autorest/autorest/date v0.2.0/go.mod h1:vcORJHLJEh643/Ioh9+vPmf1Ij9AEBM5FuBIXLmIy0g= github.com/Azure/go-autorest/autorest/date v0.3.0 h1:7gUk1U5M/CQbp9WoqinNzJar+8KY+LPI6wiWrP/myHw= @@ -125,7 +126,7 @@ github.com/Microsoft/hcsshim v0.8.9/go.mod h1:5692vkUqntj1idxauYlpoINNKeqCiG6Sg3 github.com/Microsoft/hcsshim v0.8.14/go.mod h1:NtVKoYxQuTLx6gEq0L96c9Ju4JbRJ4nY2ow3VK6a9Lg= github.com/Microsoft/hcsshim v0.8.15/go.mod h1:x38A4YbHbdxJtc0sF6oIz+RG0npwSCAvn69iY6URG00= github.com/Microsoft/hcsshim v0.8.16/go.mod h1:o5/SZqmR7x9JNKsW3pu+nqHm0MF8vbA+VxGOoXdC600= -github.com/Microsoft/hcsshim v0.8.18/go.mod h1:+w2gRZ5ReXQhFOrvSQeNfhrYB/dg3oDwTOcER2fw4I4= +github.com/Microsoft/hcsshim v0.8.21/go.mod h1:+w2gRZ5ReXQhFOrvSQeNfhrYB/dg3oDwTOcER2fw4I4= github.com/Microsoft/hcsshim/test v0.0.0-20201218223536-d3e5debf77da/go.mod h1:5hlzMzRKMLyo42nCZ9oml8AdTlq/0cvIaBv6tK1RehU= github.com/Microsoft/hcsshim/test v0.0.0-20210227013316-43a75bb4edd3/go.mod h1:mw7qgWloBUl75W/gVH3cQszUg1+gUITj7D6NY7ywVnY= github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= @@ -152,14 +153,15 @@ github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRF github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= github.com/alecthomas/units v0.0.0-20210208195552-ff826a37aa15/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE= -github.com/alecthomas/units v0.0.0-20210912230133-d1bdfacee922 h1:8ypNbf5sd3Sm3cKJ9waOGoQv6dKAFiFty9L6NP1AqJ4= -github.com/alecthomas/units v0.0.0-20210912230133-d1bdfacee922/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE= +github.com/alecthomas/units v0.0.0-20210927113745-59d0afb8317a h1:E/8AP5dFtMhl5KPJz66Kt9G0n+7Sn41Fy1wv9/jHOrc= +github.com/alecthomas/units v0.0.0-20210927113745-59d0afb8317a/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE= github.com/alexflint/go-filemutex v0.0.0-20171022225611-72bdc8eae2ae/go.mod h1:CgnQgUtFrFz9mxFNtED3jI5tLDjKlOM+oUF/sTk6ps0= github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8= github.com/antihax/optional v0.0.0-20180407024304-ca021399b1a6/go.mod h1:V8iCPQYkqmusNa815XgQio277wI47sdRh1dUOLdyC6Q= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/aokoli/goutils v1.0.1/go.mod h1:SijmP0QR8LtwsmDs8Yii5Z/S4trXFGFC2oO5g9DP+DQ= github.com/apache/arrow/go/arrow v0.0.0-20191024131854-af6fa24be0db/go.mod h1:VTxUBvSJ3s3eHAg65PNgrsn5BtqCRPdmyXh6rAfdxN0= +github.com/apache/arrow/go/arrow v0.0.0-20200601151325-b2287a20f230/go.mod h1:QNYViu/X0HXDHw7m3KXzWSVXIbfUvJqBFe6Gj8/pYA0= github.com/apache/arrow/go/arrow v0.0.0-20200923215132-ac86123a3f01/go.mod h1:QNYViu/X0HXDHw7m3KXzWSVXIbfUvJqBFe6Gj8/pYA0= github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= github.com/apache/thrift v0.13.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= @@ -185,8 +187,8 @@ github.com/aws/aws-sdk-go v1.30.12/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZve github.com/aws/aws-sdk-go v1.34.28/go.mod h1:H7NKnBqNVzoTJpGfLrQkkD+ytBA93eiDYi/+8rV9s48= github.com/aws/aws-sdk-go v1.38.35/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= github.com/aws/aws-sdk-go v1.40.11/go.mod h1:585smgzpB/KqRA+K3y/NL/oYRqQvpNJYvLm+LY1U59Q= -github.com/aws/aws-sdk-go v1.40.37 h1:I+Q6cLctkFyMMrKukcDnj+i2kjrQ37LGiOM6xmsxC48= -github.com/aws/aws-sdk-go v1.40.37/go.mod h1:585smgzpB/KqRA+K3y/NL/oYRqQvpNJYvLm+LY1U59Q= +github.com/aws/aws-sdk-go v1.42.6 h1:CiJmv8Fdc7wLZhfWy1ZA9TNoOQrFtUC0mhpgyJTaKOs= +github.com/aws/aws-sdk-go v1.42.6/go.mod h1:585smgzpB/KqRA+K3y/NL/oYRqQvpNJYvLm+LY1U59Q= github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g= github.com/benbjohnson/immutable v0.2.1/go.mod h1:uc6OHo6PN2++n98KHLxW8ef4W42ylHiQSENghE1ezxI= github.com/benbjohnson/tmpl v1.0.0/go.mod h1:igT620JFIi44B6awvU9IsDhR77IXWtFigTLil/RPdps= @@ -197,6 +199,7 @@ github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= github.com/bitly/go-simplejson v0.5.0/go.mod h1:cXHtHw4XUPsvGaxgjIAn8PhEWG9NfngEKAMDJEczWVA= +github.com/bits-and-blooms/bitset v1.2.0/go.mod h1:gIdJ4wp64HaoK2YrL1Q5/N7Y16edYb8uY+O0FJTyyDA= github.com/blang/semver v3.1.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4= @@ -222,6 +225,7 @@ github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XL github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE= github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/checkpoint-restore/go-criu/v4 v4.1.0/go.mod h1:xUQBLp4RLc5zJtWY++yjOoMoB5lihDt7fai+75m+rGw= +github.com/checkpoint-restore/go-criu/v5 v5.0.0/go.mod h1:cfwC0EG7HMUenopBsUf9d89JlCLQIfgVcNsNN0t6T2M= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= @@ -229,6 +233,7 @@ github.com/cilium/ebpf v0.0.0-20200110133405-4032b1d8aae3/go.mod h1:MA5e5Lr8slmE github.com/cilium/ebpf v0.0.0-20200702112145-1c8d4c9ef775/go.mod h1:7cR51M8ViRLIdUjrmSXlK9pkrsDlLHbO8jiB8X8JnOc= github.com/cilium/ebpf v0.2.0/go.mod h1:To2CFviqOWL/M0gIMsvSMlqe7em/l1ALkX1PyjrX2Qs= github.com/cilium/ebpf v0.4.0/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs= +github.com/cilium/ebpf v0.6.2/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs= github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= github.com/clbanning/x2j v0.0.0-20191024224557-825249438eec/go.mod h1:jMjuTZXRI4dUb/I5gc9Hdhagfvm9+RyrPryS/auMzxE= @@ -236,8 +241,9 @@ github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDk github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= -github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed h1:OZmjad4L3H8ncOIR8rnb5MREYqG8ixi5+WbeUsquF0c= github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe h1:QJDJubh0OEcpeGjC7/8uF9tt4e39U/Ya1uyK+itnNPQ= +github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI= github.com/containerd/aufs v0.0.0-20200908144142-dab0cbea06f4/go.mod h1:nukgQABAEopAHvB6j7cnP5zJ+/3aVcE7hCYqvIwAHyE= @@ -272,8 +278,8 @@ github.com/containerd/containerd v1.5.0-beta.3/go.mod h1:/wr9AVtEM7x9c+n0+stptlo github.com/containerd/containerd v1.5.0-beta.4/go.mod h1:GmdgZd2zA2GYIBZ0w09ZvgqEq8EfBp/m3lcVZIvPHhI= github.com/containerd/containerd v1.5.0-rc.0/go.mod h1:V/IXoMqNGgBlabz3tHD2TWDoTJseu1FGOKuoA4nNb2s= github.com/containerd/containerd v1.5.1/go.mod h1:0DOxVqwDy2iZvrZp2JUx/E+hS0UNTVn7dJnIOwtYR4g= -github.com/containerd/containerd v1.5.4 h1:uPF0og3ByFzDnaStfiQj3fVGTEtaSNyU+bW7GR/nqGA= -github.com/containerd/containerd v1.5.4/go.mod h1:sx18RgvW6ABJ4iYUw7Q5x7bgFOAB9B6G7+yO0XBc4zw= +github.com/containerd/containerd v1.5.7 h1:rQyoYtj4KddB3bxG6SAqd4+08gePNyJjRqvOIfV3rkM= +github.com/containerd/containerd v1.5.7/go.mod h1:gyvv6+ugqY25TiXxcZC3L5yOeYgEw0QMhscqVp1AR9c= github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= github.com/containerd/continuity v0.0.0-20190815185530-f2a389ac0a02/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= github.com/containerd/continuity v0.0.0-20191127005431-f65d91d395eb/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= @@ -335,6 +341,7 @@ github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7 github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd/v22 v22.0.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= github.com/coreos/go-systemd/v22 v22.1.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= +github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= @@ -360,14 +367,13 @@ github.com/dennwc/varint v1.0.0/go.mod h1:hnItb35rvZvJrbTALZtY/iQfDs48JKRG1RPpgz github.com/denverdino/aliyungo v0.0.0-20190125010748-a747050bb1ba/go.mod h1:dV8lFg6daOBZbT6/BDGIz6Y3WFGn8juu6G+CQ6LHtl0= github.com/dgrijalva/jwt-go v0.0.0-20170104182250-a601269ab70c/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= -github.com/dgrijalva/jwt-go/v4 v4.0.0-preview1/go.mod h1:+hnT3ywWDTAFrW5aE+u2Sa/wT555ZqwoCS+pk3p6ry4= github.com/dgryski/go-bitstream v0.0.0-20180413035011-3522498ce2c8/go.mod h1:VMaSuZ+SZcx/wljOQKvp5srsbCiKDEb6K2wC4+PiBmQ= github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= github.com/dgryski/go-sip13 v0.0.0-20190329191031-25c5027a8c7b/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= github.com/dgryski/go-sip13 v0.0.0-20200911182023-62edffca9245 h1:9cOfvEwjQxdwKuNDTQSaMKNRvwKwgZG+U4HrjeRKHso= github.com/dgryski/go-sip13 v0.0.0-20200911182023-62edffca9245/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= -github.com/digitalocean/godo v1.65.0 h1:3SywGJBC18HaYtPQF+T36jYzXBi+a6eIMonSjDll7TA= -github.com/digitalocean/godo v1.65.0/go.mod h1:p7dOjjtSBqCTUksqtA5Fd3uaKs9kyTq2xcz76ulEJRU= +github.com/digitalocean/godo v1.71.0 h1:a4UZCG1kr8eQ3MmsGoPzcAwkEtJG2Lc7eelzEkfZwtA= +github.com/digitalocean/godo v1.71.0/go.mod h1:GBmu8MkjZmNARE7IXRPmkbbnocNN8+uBm0xbEVw2LCs= github.com/dimchansky/utfbom v1.1.0/go.mod h1:rO41eb7gLfo8SF1jd9F8HplJm1Fewwi4mQvIirEdv+8= github.com/dnaeon/go-vcr v1.0.1 h1:r8L/HqC0Hje5AXMu1ooW8oyQyOFv4GxqpL0nRP7SLLY= github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E= @@ -375,8 +381,8 @@ github.com/docker/distribution v0.0.0-20190905152932-14b96e55d84c/go.mod h1:0+TT github.com/docker/distribution v2.7.1-0.20190205005809-0d3efadf0154+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/distribution v2.7.1+incompatible h1:a5mlkVzth6W5A4fOsS3D2EO5BUmsJpcB+cRlLU7cSug= github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= -github.com/docker/docker v20.10.8+incompatible h1:RVqD337BgQicVCzYrrlhLDWhq6OAD2PJDUg2LsEUvKM= -github.com/docker/docker v20.10.8+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v20.10.10+incompatible h1:GKkP0T7U4ks6X3lmmHKC2QDprnpRJor2Z5a8m62R9ZM= +github.com/docker/docker v20.10.10+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= github.com/docker/go-events v0.0.0-20170721190031-9461782956ad/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA= @@ -409,11 +415,11 @@ github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5y github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= -github.com/envoyproxy/go-control-plane v0.9.9 h1:vQLjymTobffN2R0F8eTqw6q7iozfRO5Z0m+/4Vw+/uA= -github.com/envoyproxy/go-control-plane v0.9.9/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= +github.com/envoyproxy/go-control-plane v0.10.1 h1:cgDRLG7bs59Zd+apAWuzLQL95obVYAymNJek76W3mgw= +github.com/envoyproxy/go-control-plane v0.10.1/go.mod h1:AY7fTTXNdv/aJ2O5jwpxAPOWUZ7hQAEvzN5Pf27BkQQ= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/envoyproxy/protoc-gen-validate v0.6.1 h1:4CF52PCseTFt4bE+Yk3dIpdVi7XWuPVMhPtm4FaIJPM= -github.com/envoyproxy/protoc-gen-validate v0.6.1/go.mod h1:txg5va2Qkip90uYoSKH+nkAAmXrb2j3iq4FLwdrCbXQ= +github.com/envoyproxy/protoc-gen-validate v0.6.2 h1:JiO+kJTpmYGjEodY7O1Zk8oZcNz1+f30UtwtXoFUPzE= +github.com/envoyproxy/protoc-gen-validate v0.6.2/go.mod h1:2t7qjJNvHPx8IjnBOzl9E9/baC+qXE/TeeyBRzgJDws= github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch v4.11.0+incompatible h1:glyUF9yIYtMHzn8xaKw5rMhdWcwsYV8dZHIq5567/xs= @@ -429,8 +435,9 @@ github.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVB github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8/go.mod h1:ZhphrRTfi2rbfLwlschooIH4+wKKDR4Pdxhh+TRoA20= github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= -github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= +github.com/fsnotify/fsnotify v1.5.1 h1:mZcQUHVQUQWoPXXtuf9yuEXKudkV2sx1E06UadKWpgI= +github.com/fsnotify/fsnotify v1.5.1/go.mod h1:T3375wBYaZdLLcVNkcVbzGHY7f1l/uK5T5Ai1i3InKU= github.com/fullsailor/pkcs7 v0.0.0-20190404230743-d7302db945fa/go.mod h1:KnogPXtdwXqoenmZCw6S+25EAm2MkxbG0deNDu4cbSA= github.com/garyburd/redigo v0.0.0-20150301180006-535138d7bcd7/go.mod h1:NR3MbYisc3/PwhQ00EMzDiPmrwpPxAn5GI05/YaO1SY= github.com/getkin/kin-openapi v0.53.0/go.mod h1:7Yn5whZr5kJi6t+kShccXS8ae1APpYTW6yheSwk8Yi4= @@ -453,8 +460,9 @@ github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2 github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.10.0 h1:dXFJfIHVvUcpSgDOV+Ne6t7jXri8Tfv2uOLHUZ2XNuo= github.com/go-kit/kit v0.10.0/go.mod h1:xUsJbQ/Fp4kEt7AFgCuvyX4a71u8h9jB8tj/ORgOZ7o= -github.com/go-kit/log v0.1.0 h1:DGJh0Sm43HbOeYDNnVZFl8BvcYVvjD5bqYJvp0REbwQ= github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= +github.com/go-kit/log v0.2.0 h1:7i2K3eKTos3Vc0enKCfnVcgHh2olr/MyfboYq7cAcFw= +github.com/go-kit/log v0.2.0/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= @@ -539,8 +547,8 @@ github.com/go-openapi/strfmt v0.19.5/go.mod h1:eftuHTlB/dI8Uq8JJOyRlieZf+WkkxUuk github.com/go-openapi/strfmt v0.19.11/go.mod h1:UukAYgTaQfqJuAFlNxxMWNvMYiwiXtLsF2VwmoFtbtc= github.com/go-openapi/strfmt v0.20.0/go.mod h1:UukAYgTaQfqJuAFlNxxMWNvMYiwiXtLsF2VwmoFtbtc= github.com/go-openapi/strfmt v0.20.1/go.mod h1:43urheQI9dNtE5lTZQfuFJvjYJKPrxicATpEfZwHUNk= -github.com/go-openapi/strfmt v0.20.2 h1:6XZL+fF4VZYFxKQGLAUB358hOrRh/wS51uWEtlONADE= -github.com/go-openapi/strfmt v0.20.2/go.mod h1:43urheQI9dNtE5lTZQfuFJvjYJKPrxicATpEfZwHUNk= +github.com/go-openapi/strfmt v0.21.0 h1:hX2qEZKmYks+t0hKeb4VTJpUm2UYsdL3+DCid5swxIs= +github.com/go-openapi/strfmt v0.21.0/go.mod h1:ZRQ409bWMj+SOgXofQAGTIo2Ebu72Gs+WaRADcS5iNg= github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I= github.com/go-openapi/swag v0.17.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= github.com/go-openapi/swag v0.18.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= @@ -602,6 +610,7 @@ github.com/godbus/dbus v0.0.0-20151105175453-c7fdd8b5cd55/go.mod h1:/YcGZj5zSblf github.com/godbus/dbus v0.0.0-20180201030542-885f9cc04c9c/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw= github.com/godbus/dbus v0.0.0-20190422162347-ade71ed3457e/go.mod h1:bBOAhwG1umN6/6ZUMtDFBMQR8jRg9O75tm9K00oMsK4= github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/gofrs/uuid v3.3.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= github.com/gofrs/uuid v4.0.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= github.com/gogo/googleapis v1.1.0/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s= @@ -616,6 +625,8 @@ github.com/gogo/protobuf v1.3.0/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXP github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/golang-jwt/jwt v3.2.1+incompatible h1:73Z+4BJcrTC+KczS6WvTPvRGOp1WmfEP4Q1lOd9Z/+c= +github.com/golang-jwt/jwt v3.2.1+incompatible/go.mod h1:8pz2t5EyA70fFQQSrl6XZXzqecmYZeUEB8OUGHkxJ+I= github.com/golang-jwt/jwt/v4 v4.0.0 h1:RAqyYixv1p7uEnocuy8P1nru5wprCh/MH2BIlW5z5/o= github.com/golang-jwt/jwt/v4 v4.0.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe/go.mod h1:8vg3r2VgvsThLBIFL93Qb5yWzgyZWhEmBwUJWevAkK0= @@ -707,8 +718,8 @@ github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLe github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210827144239-02619b876842 h1:JCrt5MIE1fHQtdy1825HwJ45oVQaqHE6lgssRhjcg/o= -github.com/google/pprof v0.0.0-20210827144239-02619b876842/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20211008130755-947d60d73cc0 h1:zHs+jv3LO743/zFGcByu2KmpbliCU2AhjcGgrdTwSG4= +github.com/google/pprof v0.0.0-20211008130755-947d60d73cc0/go.mod h1:KgnwoLYCZ8IQu3XUZ8Nc/bM9CCZFOyjUNOSygVozoDg= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= @@ -717,8 +728,9 @@ github.com/google/uuid v1.2.0 h1:qJYtXnJRWmpe7m/3XlyhrsLrEURqHRM2kxzoxXqyUDs= github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= -github.com/googleapis/gax-go/v2 v2.1.0 h1:6DWmvNpomjL1+3liNSZbVns3zsYzzCjm6pRBO1tLeso= github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0= +github.com/googleapis/gax-go/v2 v2.1.1 h1:dp3bWCh+PPO1zjRRiCSczJav13sBvG4UhNyVTa1KqdU= +github.com/googleapis/gax-go/v2 v2.1.1/go.mod h1:hddJymUZASv3XPyGkUpKj8pPO47Rmb0eJc8R6ouapiM= github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= github.com/googleapis/gnostic v0.4.0/go.mod h1:on+2t9HRStVgn95RSsFWFz+6Q0Snyqv1awfrALZdbtU= github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg= @@ -727,8 +739,8 @@ github.com/googleapis/gnostic v0.5.5 h1:9fHAtK0uDfpveeqqo1hkEZJcFvYXAiCN3UutL8F9 github.com/googleapis/gnostic v0.5.5/go.mod h1:7+EbHbldMins07ALC74bsA81Ovc97DwqyJO1AENw9kA= github.com/gophercloud/gophercloud v0.1.0/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8= github.com/gophercloud/gophercloud v0.10.0/go.mod h1:gmC5oQqMDOMO1t1gq5DquX/yAU808e/4mzjjDA76+Ss= -github.com/gophercloud/gophercloud v0.20.0 h1:1+4jrsjVhdX5omlAo4jkmFc6ftLbuXLzgFo4i6lH+Gk= -github.com/gophercloud/gophercloud v0.20.0/go.mod h1:wRtmUelyIIv3CSSDI47aUwbs075O6i+LY+pXsKCBsb4= +github.com/gophercloud/gophercloud v0.23.0 h1:I3P10oKlGu3DHP9PrEWMr1ya+/+3Rc9uRHNkRZ9wO7g= +github.com/gophercloud/gophercloud v0.23.0/go.mod h1:MRw6uyLj8uCGbIvBlqL7QW67t0QtNZnzydUzewo1Ioc= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= github.com/gorilla/handlers v0.0.0-20150720190736-60c7bfde3e33/go.mod h1:Qkdc/uu4tH4g6mTK6auzZ766c4CA0Ng8+o/OAirnOIQ= @@ -751,8 +763,8 @@ github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4 github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/hashicorp/consul/api v1.3.0/go.mod h1:MmDNSzIMUjNpY/mQ398R4bk2FnqQLoPndWW5VkKPlCE= github.com/hashicorp/consul/api v1.4.0/go.mod h1:xc8u05kyMa3Wjr9eEAsIAo3dg8+LywT5E/Cl7cNS5nU= -github.com/hashicorp/consul/api v1.10.1 h1:MwZJp86nlnL+6+W1Zly4JUuVn9YHhMggBirMpHGD7kw= -github.com/hashicorp/consul/api v1.10.1/go.mod h1:XjsvQN+RJGWI2TWy1/kqaE16HrR2J/FWgkYjdZQsX9M= +github.com/hashicorp/consul/api v1.11.0 h1:Hw/G8TtRvOElqxVIhBzXciiSTbapq8hZ2XKZsXk5ZCE= +github.com/hashicorp/consul/api v1.11.0/go.mod h1:XjsvQN+RJGWI2TWy1/kqaE16HrR2J/FWgkYjdZQsX9M= github.com/hashicorp/consul/sdk v0.3.0/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= github.com/hashicorp/consul/sdk v0.4.0/go.mod h1:fY08Y9z5SvJqevyZNy6WWPXiG3KwBPAvlcdx16zZ0fM= github.com/hashicorp/consul/sdk v0.8.0 h1:OJtKBtEjboEZvG6AOUdh4Z1Zbyu0WcxQ0qatRrZHTVU= @@ -806,34 +818,36 @@ github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/J github.com/hashicorp/serf v0.9.0/go.mod h1:YL0HO+FifKOW2u1ke99DGVu1zhcpZzNwrLIqBC7vbYU= github.com/hashicorp/serf v0.9.5 h1:EBWvyu9tcRszt3Bxp3KNssBMP1KuHWyO51lz9+786iM= github.com/hashicorp/serf v0.9.5/go.mod h1:UWDWwZeL5cuWDJdl0C6wrvrUwEqtQ4ZKBKKENpqIUyk= -github.com/hetznercloud/hcloud-go v1.32.0 h1:7zyN2V7hMlhm3HZdxOarmOtvzKvkcYKjM0hcwYMQZz0= -github.com/hetznercloud/hcloud-go v1.32.0/go.mod h1:XX/TQub3ge0yWR2yHWmnDVIrB+MQbda1pHxkUmDlUME= +github.com/hetznercloud/hcloud-go v1.33.1 h1:W1HdO2bRLTKU4WsyqAasDSpt54fYO4WNckWYfH5AuCQ= +github.com/hetznercloud/hcloud-go v1.33.1/go.mod h1:XX/TQub3ge0yWR2yHWmnDVIrB+MQbda1pHxkUmDlUME= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/huandu/xstrings v1.0.0/go.mod h1:4qWG/gcEcfX4z/mBDHJ++3ReCw9ibxbsNJbcucJdbSo= github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg= -github.com/iancoleman/strcase v0.0.0-20180726023541-3605ed457bf7/go.mod h1:SK73tn/9oHe+/Y0h39VT4UCxmurVJkR5NA7kMEAOgSE= +github.com/iancoleman/strcase v0.2.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/ianlancetaylor/demangle v0.0.0-20210905161508-09a460cdf81d/go.mod h1:aYm2/VgdVmcIU8iMfdMvDMsRAQjcfZSKFby6HOFvi/w= github.com/imdario/mergo v0.3.4/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/imdario/mergo v0.3.8/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/imdario/mergo v0.3.10/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= -github.com/imdario/mergo v0.3.11 h1:3tnifQM4i+fbajXKBHXWEH+KvNHqojZ778UH75j3bGA= github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= +github.com/imdario/mergo v0.3.12 h1:b6R2BslTbIEToALKP7LxUvijTsNI9TAe80pLWN2g/HU= +github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/influxdata/flux v0.65.0/go.mod h1:BwN2XG2lMszOoquQaFdPET8FRQfrXiZsWmcMO9rkaVY= -github.com/influxdata/flux v0.120.1/go.mod h1:pGSAvyAA5d3et7SSzajaYShWYXmnRnJJq2qWi+WWZ2I= +github.com/influxdata/flux v0.131.0/go.mod h1:CKvnYe6FHpTj/E0YGI7TcOZdGiYHoToOPSnoa12RtKI= github.com/influxdata/httprouter v1.3.1-0.20191122104820-ee83e2772f69/go.mod h1:pwymjR6SrP3gD3pRj9RJwdl1j5s3doEEV8gS4X9qSzA= github.com/influxdata/influxdb v1.8.0/go.mod h1:SIzcnsjaHRFpmlxpJ4S3NT64qtEKYweNTUMb/vh0OMQ= -github.com/influxdata/influxdb v1.9.3 h1:60F7eqotCxogyuZAjNglNRG9D6WY65KR9mcmugBx6cs= -github.com/influxdata/influxdb v1.9.3/go.mod h1:xD4ZjAgEJQO9/bX3NhFrssKtdNPi+ki1kjrttJRDhGc= +github.com/influxdata/influxdb v1.9.5 h1:4O7AC5jOA9RoqtDuD2rysXbumcEwaqWlWXmwuyK+a2s= +github.com/influxdata/influxdb v1.9.5/go.mod h1:4uPVvcry9KWQVWLxyT9641qpkRXUBN+xa0MJFFNNLKo= github.com/influxdata/influxdb-client-go/v2 v2.3.1-0.20210518120617-5d1fff431040/go.mod h1:vLNHdxTJkIf2mSLvGrpj8TCcISApPoXkaxP8g9uRlW8= github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo= github.com/influxdata/influxql v1.1.0/go.mod h1:KpVI7okXjK6PRi3Z5B+mtKZli+R1DnZgb3N+tzevNgo= github.com/influxdata/influxql v1.1.1-0.20210223160523-b6ab99450c93/go.mod h1:gHp9y86a/pxhjJ+zMjNXiQAA197Xk9wLxaz+fGG+kWk= github.com/influxdata/line-protocol v0.0.0-20180522152040-32c6aa80de5e/go.mod h1:4kt73NQhadE3daL3WhR5EJ/J2ocX0PZzwxQ0gXJ7oFE= github.com/influxdata/line-protocol v0.0.0-20200327222509-2487e7298839/go.mod h1:xaLFMmpvUxqXtVkUJfg9QmT88cDaCJ3ZKgdZ78oO8Qo= -github.com/influxdata/pkg-config v0.2.7/go.mod h1:EMS7Ll0S4qkzDk53XS3Z72/egBsPInt+BeRxb0WeSwk= +github.com/influxdata/pkg-config v0.2.8/go.mod h1:EMS7Ll0S4qkzDk53XS3Z72/egBsPInt+BeRxb0WeSwk= github.com/influxdata/promql/v2 v2.12.0/go.mod h1:fxOPu+DY0bqCTCECchSRtWfc+0X19ybifQhZoQNF5D8= github.com/influxdata/roaring v0.4.13-0.20180809181101-fc520f41fab6/go.mod h1:bSgUQ7q5ZLSO+bKBGqJiCBGAl+9DxyW63zLTujjUlOE= github.com/influxdata/tdigest v0.0.0-20181121200506-bf2b5ad3c0a9/go.mod h1:Js0mqiSBE6Ffsg94weZZ2c+v/ciT8QRHFOap7EKDrR0= @@ -861,8 +875,9 @@ github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/u github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.11 h1:uVUAXhF2To8cbw/3xN3pxj6kk7TYKs98NIrTqPlMWAQ= github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= +github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= github.com/jsternberg/zap-logfmt v1.0.0/go.mod h1:uvPs/4X51zdkcm5jXl5SYoN+4RK21K8mysFmDaM/h+o= @@ -883,6 +898,7 @@ github.com/klauspost/compress v1.4.0/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0 github.com/klauspost/compress v1.9.5/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= github.com/klauspost/compress v1.11.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/compress v1.11.13/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= +github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= github.com/klauspost/cpuid v0.0.0-20170728055534-ae7887de9fa5/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= github.com/klauspost/crc32 v0.0.0-20161016154125-cb6bfca970f6/go.mod h1:+ZoRqAPRLkC4NPOvfYeR5KNOrY6TD+/sAC3HXPZgDYg= github.com/klauspost/pgzip v1.0.2-0.20170402124221-0bf5dcad4ada/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= @@ -909,9 +925,9 @@ github.com/leodido/go-urn v1.1.0/go.mod h1:+cyI34gQWZcE1eQU7NVgKkkzdXDQHr1dBMtdA github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM= github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4= -github.com/linode/linodego v0.32.0 h1:IK04cx2b/IwAAd6XLruf1Dl/n3dRXj87Uw/5qo6afVU= -github.com/linode/linodego v0.32.0/go.mod h1:BR0gVkCJffEdIGJSl6bHR80Ty+Uvg/2jkjmrWaFectM= -github.com/lyft/protoc-gen-star v0.5.1/go.mod h1:9toiA3cC7z5uVbODF7kEQ91Xn7XNFkVUl+SrEe+ZORU= +github.com/linode/linodego v1.2.1 h1:v0vS/n9dGMA9evG+fhLJcy7hsf6TUVmacfYiYzARhts= +github.com/linode/linodego v1.2.1/go.mod h1:x/7+BoaKd4unViBmS2umdjYyVAmpFtBtEXZ0wou7FYQ= +github.com/lyft/protoc-gen-star v0.5.3/go.mod h1:V0xaHgaf5oCCqmcxYcWiDfTiKsZsRc87/1qhoTACD8w= github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ= github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= @@ -984,14 +1000,15 @@ github.com/moby/sys/mountinfo v0.4.0/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2J github.com/moby/sys/mountinfo v0.4.1/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A= github.com/moby/sys/symlink v0.1.0/go.mod h1:GGDODQmbFOjFsXvfLVn3+ZRxkch54RkSiGqsZeMYowQ= github.com/moby/term v0.0.0-20200312100748-672ec06f55cd/go.mod h1:DdlQx2hp0Ss5/fLikoLlEeIYiATotOjgB//nb973jeo= -github.com/moby/term v0.0.0-20201216013528-df9cb8a40635 h1:rzf0wL0CHVc8CEsgyygG0Mn9CNCCPZqOPaz8RiiHYQk= -github.com/moby/term v0.0.0-20201216013528-df9cb8a40635/go.mod h1:FBS0z0QWA44HXygs7VXDUOGoN/1TV3RuWkLO04am3wc= +github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6 h1:dcztxKSvZ4Id8iPpHERQBbIJfabdt4wUm5qy3wOL2Zc= +github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6/go.mod h1:E2VnQOmVuvZB6UYnnDB0qG5Nq/1tD9acaOpo6xmt0Kw= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= +github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc= github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= @@ -1055,14 +1072,17 @@ github.com/opencontainers/runc v0.1.1/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59P github.com/opencontainers/runc v1.0.0-rc8.0.20190926000215-3e425f80a8c9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= github.com/opencontainers/runc v1.0.0-rc9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= github.com/opencontainers/runc v1.0.0-rc93/go.mod h1:3NOsor4w32B2tC0Zbl8Knk4Wg84SM2ImC1fxBuqJ/H0= +github.com/opencontainers/runc v1.0.2/go.mod h1:aTaHFFwQXuA71CiyxOdFFIorAoemI04suvGRQFzWTD0= github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/runtime-spec v1.0.1/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/runtime-spec v1.0.2-0.20190207185410-29686dbc5559/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/runtime-spec v1.0.3-0.20200929063507-e6143ca7d51d/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/runtime-tools v0.0.0-20181011054405-1d69bd0f9c39/go.mod h1:r3f7wjNzSs2extwzU3Y+6pKfobzPh+kKFJ3ofN+3nfs= github.com/opencontainers/selinux v1.6.0/go.mod h1:VVGKuOLlE7v4PJyT6h7mNWvq1rzqiriPsEqVhc+svHE= github.com/opencontainers/selinux v1.8.0/go.mod h1:RScLhm78qiWa2gbVCcGkC7tCGdgk3ogry1nUQF8Evvo= +github.com/opencontainers/selinux v1.8.2/go.mod h1:MUIHuUEvKB1wtJjQdOyYRgOnLD2xAPP8dBsCoU0KuF8= github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492/go.mod h1:Ngi6UdF0k5OKD5t5wlmGhe/EDKPoUM3BXZSSfIuJbis= github.com/opentracing-contrib/go-stdlib v0.0.0-20190519235532-cf7a6c988dc9/go.mod h1:PLldrQSroqzH70Xl+1DQcGnefIbqsKR7UDaiux3zV+w= github.com/opentracing-contrib/go-stdlib v1.0.0 h1:TBS7YuVotp8myLon4Pv7BtCBzOTo1DeZCld0Z63mW2w= @@ -1143,12 +1163,13 @@ github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB8 github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= github.com/prometheus/common v0.29.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= github.com/prometheus/common v0.30.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= -github.com/prometheus/common v0.31.1 h1:d18hG4PkHnNAKNMOmFuXFaiY8Us0nird/2m60uS1AMs= -github.com/prometheus/common v0.31.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= +github.com/prometheus/common v0.32.1 h1:hWIdL3N2HoUx3B8j3YN9mWor0qhY/NlEKZEaXxuIRh4= +github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= github.com/prometheus/common/sigv4 v0.1.0 h1:qoVebwtwwEhS85Czm2dSROY5fTo2PAPEVdDeppTwGX4= github.com/prometheus/common/sigv4 v0.1.0/go.mod h1:2Jkxxk9yYvCkE5G1sQT7GuEXm57JrvHu9k5YwTjsNtI= -github.com/prometheus/exporter-toolkit v0.6.1 h1:Aqk75wQD92N9CqmTlZwjKwq6272nOGrWIbc8Z7+xQO0= github.com/prometheus/exporter-toolkit v0.6.1/go.mod h1:ZUBIj498ePooX9t/2xtDjeQYwvRpiPP2lh5u4iblj2g= +github.com/prometheus/exporter-toolkit v0.7.0 h1:XtYeVeeC5daG4txbc9+mieKq+/AK4gtIBLl9Mulrjnk= +github.com/prometheus/exporter-toolkit v0.7.0/go.mod h1:ZUBIj498ePooX9t/2xtDjeQYwvRpiPP2lh5u4iblj2g= github.com/prometheus/procfs v0.0.0-20180125133057-cb4147076ac7/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= @@ -1207,19 +1228,20 @@ github.com/sirupsen/logrus v1.4.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPx github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= -github.com/sirupsen/logrus v1.7.0 h1:ShrD1U9pZB12TX0cVy0DtePoCH97K8EtX+mg7ZARUtM= github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= +github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE= +github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= -github.com/snowflakedb/gosnowflake v1.3.4/go.mod h1:NsRq2QeiMUuoNUJhp5Q6xGC4uBrsS9g6LwZVEkTWgsE= +github.com/snowflakedb/gosnowflake v1.3.13/go.mod h1:6nfka9aTXkUNha1p1cjeeyjDvcyh7jfjp0l8kGpDBok= github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= github.com/sony/gobreaker v0.4.1/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJOjmxWY= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= github.com/spf13/afero v1.3.3/go.mod h1:5KUK8ByomD5Ti5Artl0RtHeI5pTF7MIDuXL3yY520V4= -github.com/spf13/afero v1.3.4/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I= +github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cobra v0.0.2-0.20171109065643-2da4a54c5cee/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= @@ -1328,8 +1350,9 @@ go.mongodb.org/mongo-driver v1.3.4/go.mod h1:MSWZXKOynuguX+JSvwP8i+58jYCXxbia8HS go.mongodb.org/mongo-driver v1.4.3/go.mod h1:WcMNYLx/IlOxLe6JRJiv2uXuCz6zBLndR4SoGjYphSc= go.mongodb.org/mongo-driver v1.4.4/go.mod h1:WcMNYLx/IlOxLe6JRJiv2uXuCz6zBLndR4SoGjYphSc= go.mongodb.org/mongo-driver v1.4.6/go.mod h1:WcMNYLx/IlOxLe6JRJiv2uXuCz6zBLndR4SoGjYphSc= -go.mongodb.org/mongo-driver v1.5.1 h1:9nOVLGDfOaZ9R0tBumx/BcuqkbFpyTCU2r/Po7A2azI= go.mongodb.org/mongo-driver v1.5.1/go.mod h1:gRXCHX4Jo7J0IJ1oDQyUxF7jfy19UfxniMS4xxMmUqw= +go.mongodb.org/mongo-driver v1.7.3 h1:G4l/eYY9VrQAK/AUgkV0koQKzQnyddnWxrd/Etf0jIs= +go.mongodb.org/mongo-driver v1.7.3/go.mod h1:NqaYOwnXWr5Pm7AOpO5QFxKJ503nbMse/R79oO62zWg= go.mozilla.org/pkcs7 v0.0.0-20200128120323-432b2356ecb1/go.mod h1:SNgMg+EgDFwmvSmLRTNKC5fegJjB7v23qTQ0XLGUNHk= go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= @@ -1349,8 +1372,8 @@ go.uber.org/atomic v1.5.1/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.9.0 h1:ECmE8Bn/WFTYwEW/bpKD3M8VtR/zQVbavAoalC1PYyE= go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= -go.uber.org/goleak v1.1.10 h1:z+mqJhf6ss6BSfSM671tgKyZBFPTTJM+HLxnhPC3wu0= -go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= +go.uber.org/goleak v1.1.12 h1:gZAh5/EyT/HQwlpkCy6wTpqfH9H8Lz8zbm3dZh+OyzA= +go.uber.org/goleak v1.1.12/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= go.uber.org/multierr v1.4.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= @@ -1385,6 +1408,7 @@ golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.0.0-20200220183623-bac4c82f6975/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200422194213-44a606286825/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200510223506-06a226fb4e37/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200728195943-123391ffb6de/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= @@ -1392,8 +1416,9 @@ golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= -golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e h1:gsTQYXdTw2Gq7RBsWvlQ91b+aEQ6bXFUngBGuR8sPpI= golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20210921155107-089bfa567519 h1:7I4JAnoQBe7ZtJcBaYHi5UtiO8tQHbUSXxL+pnGRANg= +golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -1434,8 +1459,9 @@ golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.2 h1:Gz96sIWK3OalVv/I/qNygP42zyoKp3xptRVCWRFEBvo= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.5.0 h1:UG21uOlmZabA4fW5i7ZX6bjw1xELEGg/ZLgZq9auk/Q= +golang.org/x/mod v0.5.0/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -1501,8 +1527,9 @@ golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qx golang.org/x/net v0.0.0-20210614182718-04defd469f4e/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210726213435-c6fcb2dbf985/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20210903162142-ad29c8ab022f h1:w6wWR0H+nyVpbSAQbzVEIACVyr/h8l/BEkY6Sokc7Eg= -golang.org/x/net v0.0.0-20210903162142-ad29c8ab022f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211020060615-d418f374d309 h1:A0lJIi+hcTR6aajJH4YqKWwohY4aW9RO7oRMcdv+HKI= +golang.org/x/net v0.0.0-20211020060615-d418f374d309/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -1517,8 +1544,9 @@ golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f h1:Qmd2pbz05z7z6lm0DrgQVVPuBm92jqujBKMHMOlOQEw= golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20211005180243-6b3c2da341f1 h1:B333XXssMuKQeBwiNODx4TupZy7bf4sxFZnN2ZOcvUE= +golang.org/x/oauth2 v0.0.0-20211005180243-6b3c2da341f1/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -1608,7 +1636,6 @@ golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200728102440-3e129f6d46b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200817155316-9781c653f443/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200826173525-f9321e4c35a6/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200831180312-196b9ba8737a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200909081042-eff7692f9009/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200916030750-2334cc1a136f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1630,6 +1657,7 @@ golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210324051608-47abb6519492/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210426230700-d19ff857e887/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -1639,9 +1667,12 @@ golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210816183151-1e6c022a8912/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210906170528-6f6e22806c34 h1:GkvMjFtXUmahfDtashnc1mnrCtuBVcwse5QV2lUk/tI= -golang.org/x/sys v0.0.0-20210906170528-6f6e22806c34/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359 h1:2B5p2L5IfGiD7+b9BOoRMC6DgObAVZV+Fsp050NqXik= +golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d h1:SZxvLBoTP5yHO3Frd4z4vrF+DBX9vMVanchswa69toE= @@ -1654,8 +1685,9 @@ golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.6 h1:aRYxNxv6iGQlyVaZmk6ZgYEDa+Jg18DxebPSrd6bg1M= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -1700,7 +1732,6 @@ golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtn golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= @@ -1728,7 +1759,6 @@ golang.org/x/tools v0.0.0-20200422205258-72e4a01eba43/go.mod h1:EkVYQZoAsY45+roY golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200522201501-cb1345f3a375/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200721032237-77f530d86f9a/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= @@ -1787,8 +1817,11 @@ google.golang.org/api v0.48.0/go.mod h1:71Pr1vy+TAZRPkPs/xlCf5SsU8WjuAWv1Pfjbtuk google.golang.org/api v0.50.0/go.mod h1:4bNT5pAuq5ji4SRZm+5QIkjny9JAyVD/3gaSihNefaw= google.golang.org/api v0.51.0/go.mod h1:t4HdrdoNgyN5cbEfm7Lum0lcLDLiise1F8qDKX00sOU= google.golang.org/api v0.54.0/go.mod h1:7C4bFFOvVDGXjfDTAsgGwDgAxRDeQ4X8NvUedIt6z3k= -google.golang.org/api v0.56.0 h1:08F9XVYTLOGeSQb3xI9C0gXMuQanhdGed0cWFhDozbI= +google.golang.org/api v0.55.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= google.golang.org/api v0.56.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= +google.golang.org/api v0.57.0/go.mod h1:dVPlbZyBo2/OjBpmvNdpn2GRm6rPy75jyU7bmhdrMgI= +google.golang.org/api v0.60.0 h1:eq/zs5WPH4J9undYM9IP1O7dSr7Yh8Y0GtSCpzGzIUk= +google.golang.org/api v0.60.0/go.mod h1:d7rl65NZAkEQ90JFzqBjcRq1TVeG5ZoGV3sSpEnnVb4= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -1858,8 +1891,12 @@ google.golang.org/genproto v0.0.0-20210805201207-89edb61ffb67/go.mod h1:ob2IJxKr google.golang.org/genproto v0.0.0-20210813162853-db860fec028c/go.mod h1:cFeNkxwySK631ADgubI+/XFU/xp8FD5KIVV4rj8UC5w= google.golang.org/genproto v0.0.0-20210821163610-241b8fcbd6c8/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= google.golang.org/genproto v0.0.0-20210828152312-66f60bf46e71/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= -google.golang.org/genproto v0.0.0-20210903162649-d08c68adba83 h1:3V2dxSZpz4zozWWUq36vUxXEKnSYitEH2LdsAx+RUmg= +google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= google.golang.org/genproto v0.0.0-20210903162649-d08c68adba83/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210909211513-a8c4777a87af/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210924002016-3dee208752a0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211021150943-2b146023228c h1:FqrtZMB5Wr+/RecOM3uPJNPfWR8Upb5hAPnt7PU6i4k= +google.golang.org/genproto v0.0.0-20211021150943-2b146023228c/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/grpc v0.0.0-20160317175043-d3ddb4469d5a/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= @@ -1921,7 +1958,6 @@ gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8 gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= -gopkg.in/fsnotify/fsnotify.v1 v1.4.7 h1:XNNYLJHt73EyYiCZi6+xjupS9CpvmiDgjPTAjrBlQbo= gopkg.in/fsnotify/fsnotify.v1 v1.4.7/go.mod h1:Fyux9zXlo4rWoMSIzpn9fDAYjalPqJ/K1qJ27s+7ltE= gopkg.in/gcfg.v1 v1.2.3/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o= gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2/go.mod h1:Xk6kEKp8OKb+X14hQBKWaSkCsqBpgog8nAV2xsGOxlo= @@ -1970,14 +2006,14 @@ k8s.io/api v0.17.5/go.mod h1:0zV5/ungglgy2Rlm3QK8fbxkXVs+BSJWpJP/+8gUVLY= k8s.io/api v0.20.1/go.mod h1:KqwcCVogGxQY3nBlRpwt+wpAMF/KjaCc7RpywacvqUo= k8s.io/api v0.20.4/go.mod h1:++lNL1AJMkDymriNniQsWRkMDzRaX2Y/POTUi8yvqYQ= k8s.io/api v0.20.6/go.mod h1:X9e8Qag6JV/bL5G6bU8sdVRltWKmdHsFUGS3eVndqE8= -k8s.io/api v0.22.1 h1:ISu3tD/jRhYfSW8jI/Q1e+lRxkR7w9UwQEZ7FgslrwY= -k8s.io/api v0.22.1/go.mod h1:bh13rkTp3F1XEaLGykbyRD2QaTTzPm0e/BMd8ptFONY= +k8s.io/api v0.22.3 h1:wOoES2GoSkUsdped2RB4zYypPqWtvprGoKCENTOOjP4= +k8s.io/api v0.22.3/go.mod h1:azgiXFiXqiWyLCfI62/eYBOu19rj2LKmIhFPP4+33fs= k8s.io/apimachinery v0.17.5/go.mod h1:ioIo1G/a+uONV7Tv+ZmCbMG1/a3kVw5YcDdncd8ugQ0= k8s.io/apimachinery v0.20.1/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU= k8s.io/apimachinery v0.20.4/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU= k8s.io/apimachinery v0.20.6/go.mod h1:ejZXtW1Ra6V1O5H8xPBGz+T3+4gfkTCeExAHKU57MAc= -k8s.io/apimachinery v0.22.1 h1:DTARnyzmdHMz7bFWFDDm22AM4pLWTQECMpRTFu2d2OM= -k8s.io/apimachinery v0.22.1/go.mod h1:O3oNtNadZdeOMxHFVxOreoznohCpy0z6mocxbZr7oJ0= +k8s.io/apimachinery v0.22.3 h1:mrvBG5CZnEfwgpVqWcrRKvdsYECTrhAR6cApAgdsflk= +k8s.io/apimachinery v0.22.3/go.mod h1:O3oNtNadZdeOMxHFVxOreoznohCpy0z6mocxbZr7oJ0= k8s.io/apiserver v0.20.1/go.mod h1:ro5QHeQkgMS7ZGpvf4tSMx6bBOgPfE+f52KwvXfScaU= k8s.io/apiserver v0.20.4/go.mod h1:Mc80thBKOyy7tbvFtB4kJv1kbdD0eIH8k8vianJcbFM= k8s.io/apiserver v0.20.6/go.mod h1:QIJXNt6i6JB+0YQRNcS0hdRHJlMhflFmsBDeSgT1r8Q= @@ -1985,8 +2021,8 @@ k8s.io/client-go v0.17.5/go.mod h1:S8uZpBpjJJdEH/fEyxcqg7Rn0P5jH+ilkgBHjriSmNo= k8s.io/client-go v0.20.1/go.mod h1:/zcHdt1TeWSd5HoUe6elJmHSQ6uLLgp4bIJHVEuy+/Y= k8s.io/client-go v0.20.4/go.mod h1:LiMv25ND1gLUdBeYxBIwKpkSC5IsozMMmOOeSJboP+k= k8s.io/client-go v0.20.6/go.mod h1:nNQMnOvEUEsOzRRFIIkdmYOjAZrC8bgq0ExboWSU1I0= -k8s.io/client-go v0.22.1 h1:jW0ZSHi8wW260FvcXHkIa0NLxFBQszTlhiAVsU5mopw= -k8s.io/client-go v0.22.1/go.mod h1:BquC5A4UOo4qVDUtoc04/+Nxp1MeHcVc1HJm1KmG8kk= +k8s.io/client-go v0.22.3 h1:6onkOSc+YNdwq5zXE0wFXicq64rrym+mXwHu/CPVGO4= +k8s.io/client-go v0.22.3/go.mod h1:ElDjYf8gvZsKDYexmsmnMQ0DYO8W9RwBjfQ1PI53yow= k8s.io/component-base v0.20.1/go.mod h1:guxkoJnNoh8LNrbtiQOlyp2Y2XFCZQmrcg2n/DeYNLk= k8s.io/component-base v0.20.4/go.mod h1:t4p9EdiagbVCJKrQ1RsA5/V4rFQNDfRlevJajlGwgjI= k8s.io/component-base v0.20.6/go.mod h1:6f1MPBAeI+mvuts3sIdtpjljHWBQ2cIy38oBIWMYnrM= @@ -2004,8 +2040,8 @@ k8s.io/kubernetes v1.13.0/go.mod h1:ocZa8+6APFNC2tX1DZASIbocyYT5jHzqFVsY5aoB7Jk= k8s.io/utils v0.0.0-20191114184206-e782cd3c129f/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= k8s.io/utils v0.0.0-20200414100711-2df71ebbae66/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= -k8s.io/utils v0.0.0-20210707171843-4b05e18ac7d9 h1:imL9YgXQ9p7xmPzHFm/vVd/cF78jad+n4wK1ABwYtMM= -k8s.io/utils v0.0.0-20210707171843-4b05e18ac7d9/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +k8s.io/utils v0.0.0-20210819203725-bdf08cb9a70a h1:8dYfu/Fc9Gz2rNJKB9IQRGgQOh2clmRzNIPPY1xLY5g= +k8s.io/utils v0.0.0-20210819203725-bdf08cb9a70a/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= diff --git a/pkg/exemplar/exemplar.go b/model/exemplar/exemplar.go similarity index 97% rename from pkg/exemplar/exemplar.go rename to model/exemplar/exemplar.go index 27ba64d4b..2e39cf689 100644 --- a/pkg/exemplar/exemplar.go +++ b/model/exemplar/exemplar.go @@ -13,7 +13,7 @@ package exemplar -import "github.com/prometheus/prometheus/pkg/labels" +import "github.com/prometheus/prometheus/model/labels" // The combined length of the label names and values of an Exemplar's LabelSet MUST NOT exceed 128 UTF-8 characters // https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#exemplars diff --git a/model/histogram/histogram.go b/model/histogram/histogram.go index 91c1aa15c..9e983d11e 100644 --- a/model/histogram/histogram.go +++ b/model/histogram/histogram.go @@ -380,24 +380,31 @@ var exponentialBounds = [][]float64{ // Schema 2: {0.5, 0.5946035575013605, 0.7071067811865475, 0.8408964152537144}, // Schema 3: - {0.5, 0.5452538663326288, 0.5946035575013605, 0.6484197773255048, - 0.7071067811865475, 0.7711054127039704, 0.8408964152537144, 0.9170040432046711}, + { + 0.5, 0.5452538663326288, 0.5946035575013605, 0.6484197773255048, + 0.7071067811865475, 0.7711054127039704, 0.8408964152537144, 0.9170040432046711, + }, // Schema 4: - {0.5, 0.5221368912137069, 0.5452538663326288, 0.5693943173783458, + { + 0.5, 0.5221368912137069, 0.5452538663326288, 0.5693943173783458, 0.5946035575013605, 0.620928906036742, 0.6484197773255048, 0.6771277734684463, 0.7071067811865475, 0.7384130729697496, 0.7711054127039704, 0.805245165974627, - 0.8408964152537144, 0.8781260801866495, 0.9170040432046711, 0.9576032806985735}, + 0.8408964152537144, 0.8781260801866495, 0.9170040432046711, 0.9576032806985735, + }, // Schema 5: - {0.5, 0.5109485743270583, 0.5221368912137069, 0.5335702003384117, + { + 0.5, 0.5109485743270583, 0.5221368912137069, 0.5335702003384117, 0.5452538663326288, 0.5571933712979462, 0.5693943173783458, 0.5818624293887887, 0.5946035575013605, 0.6076236799902344, 0.620928906036742, 0.6345254785958666, 0.6484197773255048, 0.6626183215798706, 0.6771277734684463, 0.6919549409819159, 0.7071067811865475, 0.7225904034885232, 0.7384130729697496, 0.7545822137967112, 0.7711054127039704, 0.7879904225539431, 0.805245165974627, 0.8228777390769823, 0.8408964152537144, 0.8593096490612387, 0.8781260801866495, 0.8973545375015533, - 0.9170040432046711, 0.9370838170551498, 0.9576032806985735, 0.9785720620876999}, + 0.9170040432046711, 0.9370838170551498, 0.9576032806985735, 0.9785720620876999, + }, // Schema 6: - {0.5, 0.5054446430258502, 0.5109485743270583, 0.5165124395106142, + { + 0.5, 0.5054446430258502, 0.5109485743270583, 0.5165124395106142, 0.5221368912137069, 0.5278225891802786, 0.5335702003384117, 0.5393803988785598, 0.5452538663326288, 0.5511912916539204, 0.5571933712979462, 0.5632608093041209, 0.5693943173783458, 0.5755946149764913, 0.5818624293887887, 0.5881984958251406, @@ -412,9 +419,11 @@ var exponentialBounds = [][]float64{ 0.8408964152537144, 0.8500531768592616, 0.8593096490612387, 0.8686669176368529, 0.8781260801866495, 0.8876882462632604, 0.8973545375015533, 0.9071260877501991, 0.9170040432046711, 0.9269895625416926, 0.9370838170551498, 0.9472879907934827, - 0.9576032806985735, 0.9680308967461471, 0.9785720620876999, 0.9892280131939752}, + 0.9576032806985735, 0.9680308967461471, 0.9785720620876999, 0.9892280131939752, + }, // Schema 7: - {0.5, 0.5027149505564014, 0.5054446430258502, 0.5081891574554764, + { + 0.5, 0.5027149505564014, 0.5054446430258502, 0.5081891574554764, 0.5109485743270583, 0.5137229745593818, 0.5165124395106142, 0.5193170509806894, 0.5221368912137069, 0.5249720429003435, 0.5278225891802786, 0.5306886136446309, 0.5335702003384117, 0.5364674337629877, 0.5393803988785598, 0.5423091811066545, @@ -445,9 +454,11 @@ var exponentialBounds = [][]float64{ 0.9170040432046711, 0.9219832844793128, 0.9269895625416926, 0.9320230241988943, 0.9370838170551498, 0.9421720895161669, 0.9472879907934827, 0.9524316709088368, 0.9576032806985735, 0.9628029718180622, 0.9680308967461471, 0.9732872087896164, - 0.9785720620876999, 0.9838856116165875, 0.9892280131939752, 0.9945994234836328}, + 0.9785720620876999, 0.9838856116165875, 0.9892280131939752, 0.9945994234836328, + }, // Schema 8: - {0.5, 0.5013556375251013, 0.5027149505564014, 0.5040779490592088, + { + 0.5, 0.5013556375251013, 0.5027149505564014, 0.5040779490592088, 0.5054446430258502, 0.5068150424757447, 0.5081891574554764, 0.509566998038869, 0.5109485743270583, 0.5123338964485679, 0.5137229745593818, 0.5151158188430205, 0.5165124395106142, 0.5179128468009786, 0.5193170509806894, 0.520725062344158, @@ -510,5 +521,6 @@ var exponentialBounds = [][]float64{ 0.9576032806985735, 0.9601996065815236, 0.9628029718180622, 0.9654133954938133, 0.9680308967461471, 0.9706554947643201, 0.9732872087896164, 0.9759260581154889, 0.9785720620876999, 0.9812252401044634, 0.9838856116165875, 0.9865531961276168, - 0.9892280131939752, 0.9919100824251095, 0.9945994234836328, 0.9972960560854698}, + 0.9892280131939752, 0.9919100824251095, 0.9945994234836328, 0.9972960560854698, + }, } diff --git a/pkg/labels/labels.go b/model/labels/labels.go similarity index 100% rename from pkg/labels/labels.go rename to model/labels/labels.go diff --git a/pkg/labels/labels_test.go b/model/labels/labels_test.go similarity index 99% rename from pkg/labels/labels_test.go rename to model/labels/labels_test.go index 57f28224e..2d5c2ed37 100644 --- a/pkg/labels/labels_test.go +++ b/model/labels/labels_test.go @@ -704,7 +704,7 @@ func BenchmarkLabels_Hash(b *testing.B) { lbls: func() Labels { lbls := make(Labels, 10) for i := 0; i < len(lbls); i++ { - //Label ~50B name, 50B value. + // Label ~50B name, 50B value. lbls[i] = Label{Name: fmt.Sprintf("abcdefghijabcdefghijabcdefghijabcdefghijabcdefghij%d", i), Value: fmt.Sprintf("abcdefghijabcdefghijabcdefghijabcdefghijabcdefghij%d", i)} } return lbls diff --git a/pkg/labels/matcher.go b/model/labels/matcher.go similarity index 100% rename from pkg/labels/matcher.go rename to model/labels/matcher.go diff --git a/pkg/labels/matcher_test.go b/model/labels/matcher_test.go similarity index 100% rename from pkg/labels/matcher_test.go rename to model/labels/matcher_test.go diff --git a/pkg/labels/regexp.go b/model/labels/regexp.go similarity index 100% rename from pkg/labels/regexp.go rename to model/labels/regexp.go diff --git a/pkg/labels/regexp_test.go b/model/labels/regexp_test.go similarity index 100% rename from pkg/labels/regexp_test.go rename to model/labels/regexp_test.go diff --git a/pkg/labels/test_utils.go b/model/labels/test_utils.go similarity index 100% rename from pkg/labels/test_utils.go rename to model/labels/test_utils.go diff --git a/pkg/relabel/relabel.go b/model/relabel/relabel.go similarity index 99% rename from pkg/relabel/relabel.go rename to model/relabel/relabel.go index ec452f5b5..db08f3c85 100644 --- a/pkg/relabel/relabel.go +++ b/model/relabel/relabel.go @@ -22,7 +22,7 @@ import ( "github.com/pkg/errors" "github.com/prometheus/common/model" - "github.com/prometheus/prometheus/pkg/labels" + "github.com/prometheus/prometheus/model/labels" ) var ( diff --git a/pkg/relabel/relabel_test.go b/model/relabel/relabel_test.go similarity index 99% rename from pkg/relabel/relabel_test.go rename to model/relabel/relabel_test.go index 868699122..1e50344a0 100644 --- a/pkg/relabel/relabel_test.go +++ b/model/relabel/relabel_test.go @@ -19,7 +19,7 @@ import ( "github.com/prometheus/common/model" "github.com/stretchr/testify/require" - "github.com/prometheus/prometheus/pkg/labels" + "github.com/prometheus/prometheus/model/labels" ) func TestRelabel(t *testing.T) { diff --git a/pkg/rulefmt/rulefmt.go b/model/rulefmt/rulefmt.go similarity index 94% rename from pkg/rulefmt/rulefmt.go rename to model/rulefmt/rulefmt.go index 13fd07c22..5332514ed 100644 --- a/pkg/rulefmt/rulefmt.go +++ b/model/rulefmt/rulefmt.go @@ -25,7 +25,7 @@ import ( "github.com/prometheus/common/model" yaml "gopkg.in/yaml.v3" - "github.com/prometheus/prometheus/pkg/timestamp" + "github.com/prometheus/prometheus/model/timestamp" "github.com/prometheus/prometheus/promql/parser" "github.com/prometheus/prometheus/template" ) @@ -38,6 +38,16 @@ type Error struct { Err WrappedError } +// Error prints the error message in a formatted string. +func (err *Error) Error() string { + if err.Err.nodeAlt != nil { + return errors.Wrapf(err.Err.err, "%d:%d: %d:%d: group %q, rule %d, %q", err.Err.node.Line, err.Err.node.Column, err.Err.nodeAlt.Line, err.Err.nodeAlt.Column, err.Group, err.Rule, err.RuleName).Error() + } else if err.Err.node != nil { + return errors.Wrapf(err.Err.err, "%d:%d: group %q, rule %d, %q", err.Err.node.Line, err.Err.node.Column, err.Group, err.Rule, err.RuleName).Error() + } + return errors.Wrapf(err.Err.err, "group %q, rule %d, %q", err.Group, err.Rule, err.RuleName).Error() +} + // WrappedError wraps error with the yaml node which can be used to represent // the line and column numbers of the error. type WrappedError struct { @@ -46,13 +56,14 @@ type WrappedError struct { nodeAlt *yaml.Node } -func (err *Error) Error() string { - if err.Err.nodeAlt != nil { - return errors.Wrapf(err.Err.err, "%d:%d: %d:%d: group %q, rule %d, %q", err.Err.node.Line, err.Err.node.Column, err.Err.nodeAlt.Line, err.Err.nodeAlt.Column, err.Group, err.Rule, err.RuleName).Error() - } else if err.Err.node != nil { - return errors.Wrapf(err.Err.err, "%d:%d: group %q, rule %d, %q", err.Err.node.Line, err.Err.node.Column, err.Group, err.Rule, err.RuleName).Error() +// Error prints the error message in a formatted string. +func (we *WrappedError) Error() string { + if we.nodeAlt != nil { + return errors.Wrapf(we.err, "%d:%d: %d:%d", we.node.Line, we.node.Column, we.nodeAlt.Line, we.nodeAlt.Column).Error() + } else if we.node != nil { + return errors.Wrapf(we.err, "%d:%d", we.node.Line, we.node.Column).Error() } - return errors.Wrapf(err.Err.err, "group %q, rule %d, %q", err.Group, err.Rule, err.RuleName).Error() + return we.err.Error() } // RuleGroups is a set of rule groups that are typically exposed in a file. diff --git a/pkg/rulefmt/rulefmt_test.go b/model/rulefmt/rulefmt_test.go similarity index 65% rename from pkg/rulefmt/rulefmt_test.go rename to model/rulefmt/rulefmt_test.go index 719c01cbd..21afb0b46 100644 --- a/pkg/rulefmt/rulefmt_test.go +++ b/model/rulefmt/rulefmt_test.go @@ -14,10 +14,12 @@ package rulefmt import ( + "errors" "path/filepath" "testing" "github.com/stretchr/testify/require" + "gopkg.in/yaml.v3" ) func TestParseFileSuccess(t *testing.T) { @@ -184,3 +186,116 @@ groups: err1 := errs[1].(*Error).Err.node require.NotEqual(t, err0, err1, "Error nodes should not be the same") } + +func TestError(t *testing.T) { + tests := []struct { + name string + error *Error + want string + }{ + { + name: "with alternative node provided in WrappedError", + error: &Error{ + Group: "some group", + Rule: 1, + RuleName: "some rule name", + Err: WrappedError{ + err: errors.New("some error"), + node: &yaml.Node{ + Line: 10, + Column: 20, + }, + nodeAlt: &yaml.Node{ + Line: 11, + Column: 21, + }, + }, + }, + want: `10:20: 11:21: group "some group", rule 1, "some rule name": some error`, + }, + { + name: "with node provided in WrappedError", + error: &Error{ + Group: "some group", + Rule: 1, + RuleName: "some rule name", + Err: WrappedError{ + err: errors.New("some error"), + node: &yaml.Node{ + Line: 10, + Column: 20, + }, + }, + }, + want: `10:20: group "some group", rule 1, "some rule name": some error`, + }, + { + name: "with only err provided in WrappedError", + error: &Error{ + Group: "some group", + Rule: 1, + RuleName: "some rule name", + Err: WrappedError{ + err: errors.New("some error"), + }, + }, + want: `group "some group", rule 1, "some rule name": some error`, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := tt.error.Error() + require.Equal(t, tt.want, got) + }) + } +} + +func TestWrappedError(t *testing.T) { + tests := []struct { + name string + wrappedError *WrappedError + want string + }{ + { + name: "with alternative node provided", + wrappedError: &WrappedError{ + err: errors.New("some error"), + node: &yaml.Node{ + Line: 10, + Column: 20, + }, + nodeAlt: &yaml.Node{ + Line: 11, + Column: 21, + }, + }, + want: `10:20: 11:21: some error`, + }, + { + name: "with node provided", + wrappedError: &WrappedError{ + err: errors.New("some error"), + node: &yaml.Node{ + Line: 10, + Column: 20, + }, + }, + want: `10:20: some error`, + }, + { + name: "with only err provided", + wrappedError: &WrappedError{ + err: errors.New("some error"), + }, + want: `some error`, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := tt.wrappedError.Error() + require.Equal(t, tt.want, got) + }) + } +} diff --git a/pkg/rulefmt/testdata/bad_annotation.bad.yaml b/model/rulefmt/testdata/bad_annotation.bad.yaml similarity index 100% rename from pkg/rulefmt/testdata/bad_annotation.bad.yaml rename to model/rulefmt/testdata/bad_annotation.bad.yaml diff --git a/pkg/rulefmt/testdata/bad_expr.bad.yaml b/model/rulefmt/testdata/bad_expr.bad.yaml similarity index 100% rename from pkg/rulefmt/testdata/bad_expr.bad.yaml rename to model/rulefmt/testdata/bad_expr.bad.yaml diff --git a/pkg/rulefmt/testdata/bad_field.bad.yaml b/model/rulefmt/testdata/bad_field.bad.yaml similarity index 100% rename from pkg/rulefmt/testdata/bad_field.bad.yaml rename to model/rulefmt/testdata/bad_field.bad.yaml diff --git a/pkg/rulefmt/testdata/bad_lname.bad.yaml b/model/rulefmt/testdata/bad_lname.bad.yaml similarity index 100% rename from pkg/rulefmt/testdata/bad_lname.bad.yaml rename to model/rulefmt/testdata/bad_lname.bad.yaml diff --git a/pkg/rulefmt/testdata/duplicate_grp.bad.yaml b/model/rulefmt/testdata/duplicate_grp.bad.yaml similarity index 100% rename from pkg/rulefmt/testdata/duplicate_grp.bad.yaml rename to model/rulefmt/testdata/duplicate_grp.bad.yaml diff --git a/pkg/rulefmt/testdata/invalid_label_name.bad.yaml b/model/rulefmt/testdata/invalid_label_name.bad.yaml similarity index 100% rename from pkg/rulefmt/testdata/invalid_label_name.bad.yaml rename to model/rulefmt/testdata/invalid_label_name.bad.yaml diff --git a/pkg/rulefmt/testdata/invalid_record_name.bad.yaml b/model/rulefmt/testdata/invalid_record_name.bad.yaml similarity index 100% rename from pkg/rulefmt/testdata/invalid_record_name.bad.yaml rename to model/rulefmt/testdata/invalid_record_name.bad.yaml diff --git a/pkg/rulefmt/testdata/no_rec_alert.bad.yaml b/model/rulefmt/testdata/no_rec_alert.bad.yaml similarity index 100% rename from pkg/rulefmt/testdata/no_rec_alert.bad.yaml rename to model/rulefmt/testdata/no_rec_alert.bad.yaml diff --git a/pkg/rulefmt/testdata/noexpr.bad.yaml b/model/rulefmt/testdata/noexpr.bad.yaml similarity index 100% rename from pkg/rulefmt/testdata/noexpr.bad.yaml rename to model/rulefmt/testdata/noexpr.bad.yaml diff --git a/pkg/rulefmt/testdata/record_and_alert.bad.yaml b/model/rulefmt/testdata/record_and_alert.bad.yaml similarity index 100% rename from pkg/rulefmt/testdata/record_and_alert.bad.yaml rename to model/rulefmt/testdata/record_and_alert.bad.yaml diff --git a/pkg/rulefmt/testdata/test.yaml b/model/rulefmt/testdata/test.yaml similarity index 100% rename from pkg/rulefmt/testdata/test.yaml rename to model/rulefmt/testdata/test.yaml diff --git a/pkg/textparse/README.md b/model/textparse/README.md similarity index 100% rename from pkg/textparse/README.md rename to model/textparse/README.md diff --git a/pkg/textparse/interface.go b/model/textparse/interface.go similarity index 97% rename from pkg/textparse/interface.go rename to model/textparse/interface.go index 25f0ec9ab..2a775dcd0 100644 --- a/pkg/textparse/interface.go +++ b/model/textparse/interface.go @@ -16,9 +16,9 @@ package textparse import ( "mime" + "github.com/prometheus/prometheus/model/exemplar" "github.com/prometheus/prometheus/model/histogram" - "github.com/prometheus/prometheus/pkg/exemplar" - "github.com/prometheus/prometheus/pkg/labels" + "github.com/prometheus/prometheus/model/labels" ) // Parser parses samples from a byte slice of samples in the official diff --git a/pkg/textparse/openmetricslex.l b/model/textparse/openmetricslex.l similarity index 100% rename from pkg/textparse/openmetricslex.l rename to model/textparse/openmetricslex.l diff --git a/pkg/textparse/openmetricslex.l.go b/model/textparse/openmetricslex.l.go similarity index 100% rename from pkg/textparse/openmetricslex.l.go rename to model/textparse/openmetricslex.l.go diff --git a/pkg/textparse/openmetricsparse.go b/model/textparse/openmetricsparse.go similarity index 98% rename from pkg/textparse/openmetricsparse.go rename to model/textparse/openmetricsparse.go index b51321ff4..334e5bb44 100644 --- a/pkg/textparse/openmetricsparse.go +++ b/model/textparse/openmetricsparse.go @@ -27,10 +27,10 @@ import ( "github.com/pkg/errors" + "github.com/prometheus/prometheus/model/exemplar" "github.com/prometheus/prometheus/model/histogram" - "github.com/prometheus/prometheus/pkg/exemplar" - "github.com/prometheus/prometheus/pkg/labels" - "github.com/prometheus/prometheus/pkg/value" + "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/model/value" ) var allowedSuffixes = [][]byte{[]byte("_total"), []byte("_bucket")} diff --git a/pkg/textparse/openmetricsparse_test.go b/model/textparse/openmetricsparse_test.go similarity index 99% rename from pkg/textparse/openmetricsparse_test.go rename to model/textparse/openmetricsparse_test.go index 5d8c94369..14742d390 100644 --- a/pkg/textparse/openmetricsparse_test.go +++ b/model/textparse/openmetricsparse_test.go @@ -19,8 +19,8 @@ import ( "github.com/stretchr/testify/require" - "github.com/prometheus/prometheus/pkg/exemplar" - "github.com/prometheus/prometheus/pkg/labels" + "github.com/prometheus/prometheus/model/exemplar" + "github.com/prometheus/prometheus/model/labels" ) func TestOpenMetricsParse(t *testing.T) { diff --git a/pkg/textparse/promlex.l b/model/textparse/promlex.l similarity index 100% rename from pkg/textparse/promlex.l rename to model/textparse/promlex.l diff --git a/pkg/textparse/promlex.l.go b/model/textparse/promlex.l.go similarity index 100% rename from pkg/textparse/promlex.l.go rename to model/textparse/promlex.l.go diff --git a/pkg/textparse/promparse.go b/model/textparse/promparse.go similarity index 98% rename from pkg/textparse/promparse.go rename to model/textparse/promparse.go index 0190ef6eb..5dee361ea 100644 --- a/pkg/textparse/promparse.go +++ b/model/textparse/promparse.go @@ -28,10 +28,10 @@ import ( "github.com/pkg/errors" + "github.com/prometheus/prometheus/model/exemplar" "github.com/prometheus/prometheus/model/histogram" - "github.com/prometheus/prometheus/pkg/exemplar" - "github.com/prometheus/prometheus/pkg/labels" - "github.com/prometheus/prometheus/pkg/value" + "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/model/value" ) type promlexer struct { diff --git a/pkg/textparse/promparse_test.go b/model/textparse/promparse_test.go similarity index 99% rename from pkg/textparse/promparse_test.go rename to model/textparse/promparse_test.go index 1676ccc13..b6e57108b 100644 --- a/pkg/textparse/promparse_test.go +++ b/model/textparse/promparse_test.go @@ -25,7 +25,7 @@ import ( "github.com/prometheus/common/model" "github.com/stretchr/testify/require" - "github.com/prometheus/prometheus/pkg/labels" + "github.com/prometheus/prometheus/model/labels" ) func TestPromParse(t *testing.T) { @@ -457,9 +457,7 @@ func BenchmarkParse(b *testing.B) { total := 0 for i := 0; i < b.N; i += promtestdataSampleCount { - var ( - decSamples = make(model.Vector, 0, 50) - ) + decSamples := make(model.Vector, 0, 50) sdec := expfmt.SampleDecoder{ Dec: expfmt.NewDecoder(bytes.NewReader(buf), expfmt.FmtText), Opts: &expfmt.DecodeOptions{ @@ -480,6 +478,7 @@ func BenchmarkParse(b *testing.B) { } } } + func BenchmarkGzip(b *testing.B) { for _, fn := range []string{"promtestdata.txt", "promtestdata.nometa.txt"} { b.Run(fn, func(b *testing.B) { diff --git a/pkg/textparse/promtestdata.nometa.txt b/model/textparse/promtestdata.nometa.txt similarity index 100% rename from pkg/textparse/promtestdata.nometa.txt rename to model/textparse/promtestdata.nometa.txt diff --git a/pkg/textparse/promtestdata.txt b/model/textparse/promtestdata.txt similarity index 100% rename from pkg/textparse/promtestdata.txt rename to model/textparse/promtestdata.txt diff --git a/pkg/textparse/protobufparse.go b/model/textparse/protobufparse.go similarity index 99% rename from pkg/textparse/protobufparse.go rename to model/textparse/protobufparse.go index b0bcf7ed0..9bef9506d 100644 --- a/pkg/textparse/protobufparse.go +++ b/model/textparse/protobufparse.go @@ -25,11 +25,11 @@ import ( "github.com/gogo/protobuf/proto" "github.com/pkg/errors" - "github.com/prometheus/common/model" + + "github.com/prometheus/prometheus/model/exemplar" "github.com/prometheus/prometheus/model/histogram" - "github.com/prometheus/prometheus/pkg/exemplar" - "github.com/prometheus/prometheus/pkg/labels" + "github.com/prometheus/prometheus/model/labels" dto "github.com/prometheus/prometheus/prompb/io/prometheus/client" ) diff --git a/pkg/textparse/protobufparse_test.go b/model/textparse/protobufparse_test.go similarity index 99% rename from pkg/textparse/protobufparse_test.go rename to model/textparse/protobufparse_test.go index c92c148fe..8a479a87a 100644 --- a/pkg/textparse/protobufparse_test.go +++ b/model/textparse/protobufparse_test.go @@ -22,9 +22,9 @@ import ( "github.com/gogo/protobuf/proto" "github.com/stretchr/testify/require" + "github.com/prometheus/prometheus/model/exemplar" "github.com/prometheus/prometheus/model/histogram" - "github.com/prometheus/prometheus/pkg/exemplar" - "github.com/prometheus/prometheus/pkg/labels" + "github.com/prometheus/prometheus/model/labels" dto "github.com/prometheus/prometheus/prompb/io/prometheus/client" ) diff --git a/pkg/timestamp/timestamp.go b/model/timestamp/timestamp.go similarity index 100% rename from pkg/timestamp/timestamp.go rename to model/timestamp/timestamp.go diff --git a/pkg/value/value.go b/model/value/value.go similarity index 100% rename from pkg/value/value.go rename to model/value/value.go diff --git a/notifier/notifier.go b/notifier/notifier.go index 97086d562..0fca28c33 100644 --- a/notifier/notifier.go +++ b/notifier/notifier.go @@ -41,8 +41,8 @@ import ( "github.com/prometheus/prometheus/config" "github.com/prometheus/prometheus/discovery/targetgroup" - "github.com/prometheus/prometheus/pkg/labels" - "github.com/prometheus/prometheus/pkg/relabel" + "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/model/relabel" ) const ( @@ -303,7 +303,6 @@ func (n *Manager) nextBatch() []*Alert { // Run dispatches notifications continuously. func (n *Manager) Run(tsets <-chan map[string][]*targetgroup.Group) { - for { select { case <-n.ctx.Done(): @@ -602,7 +601,7 @@ func (n *Manager) Stop() { n.cancel() } -// alertmanager holds Alertmanager endpoint information. +// Alertmanager holds Alertmanager endpoint information. type alertmanager interface { url() *url.URL } @@ -654,7 +653,7 @@ func (s *alertmanagerSet) sync(tgs []*targetgroup.Group) { allDroppedAms := []alertmanager{} for _, tg := range tgs { - ams, droppedAms, err := alertmanagerFromGroup(tg, s.cfg) + ams, droppedAms, err := AlertmanagerFromGroup(tg, s.cfg) if err != nil { level.Error(s.logger).Log("msg", "Creating discovered Alertmanagers failed", "err", err) continue @@ -691,9 +690,9 @@ func postPath(pre string, v config.AlertmanagerAPIVersion) string { return path.Join("/", pre, alertPushEndpoint) } -// alertmanagerFromGroup extracts a list of alertmanagers from a target group +// AlertmanagerFromGroup extracts a list of alertmanagers from a target group // and an associated AlertmanagerConfig. -func alertmanagerFromGroup(tg *targetgroup.Group, cfg *config.AlertmanagerConfig) ([]alertmanager, []alertmanager, error) { +func AlertmanagerFromGroup(tg *targetgroup.Group, cfg *config.AlertmanagerConfig) ([]alertmanager, []alertmanager, error) { var res []alertmanager var droppedAlertManagers []alertmanager diff --git a/notifier/notifier_test.go b/notifier/notifier_test.go index e4a7f26cd..4a2747efd 100644 --- a/notifier/notifier_test.go +++ b/notifier/notifier_test.go @@ -35,12 +35,12 @@ import ( "github.com/prometheus/prometheus/config" "github.com/prometheus/prometheus/discovery/targetgroup" - "github.com/prometheus/prometheus/pkg/labels" - "github.com/prometheus/prometheus/pkg/relabel" + "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/model/relabel" ) func TestPostPath(t *testing.T) { - var cases = []struct { + cases := []struct { in, out string }{ { @@ -447,7 +447,7 @@ func (a alertmanagerMock) url() *url.URL { func TestLabelSetNotReused(t *testing.T) { tg := makeInputTargetGroup() - _, _, err := alertmanagerFromGroup(tg, &config.AlertmanagerConfig{}) + _, _, err := AlertmanagerFromGroup(tg, &config.AlertmanagerConfig{}) require.NoError(t, err) @@ -456,7 +456,7 @@ func TestLabelSetNotReused(t *testing.T) { } func TestReload(t *testing.T) { - var tests = []struct { + tests := []struct { in *targetgroup.Group out string }{ @@ -500,11 +500,10 @@ alerting: require.Equal(t, tt.out, res) } - } func TestDroppedAlertmanagers(t *testing.T) { - var tests = []struct { + tests := []struct { in *targetgroup.Group out string }{ diff --git a/pkg/README.md b/pkg/README.md deleted file mode 100644 index 3aa989ffc..000000000 --- a/pkg/README.md +++ /dev/null @@ -1,3 +0,0 @@ -The `pkg` directory is deprecated. -Please do not add new packages to this directory. -Existing packages will be moved elsewhere eventually. diff --git a/promql/bench_test.go b/promql/bench_test.go index eb020d2f1..a46b4fcf7 100644 --- a/promql/bench_test.go +++ b/promql/bench_test.go @@ -21,14 +21,15 @@ import ( "testing" "time" - "github.com/prometheus/prometheus/pkg/labels" + "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/promql/parser" + "github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/util/teststorage" ) func BenchmarkRangeQuery(b *testing.B) { - storage := teststorage.New(b) - defer storage.Close() + stor := teststorage.New(b) + defer stor.Close() opts := EngineOpts{ Logger: nil, Reg: nil, @@ -62,13 +63,13 @@ func BenchmarkRangeQuery(b *testing.B) { } metrics = append(metrics, labels.FromStrings("__name__", "h_hundred", "l", strconv.Itoa(i), "le", "+Inf")) } - refs := make([]uint64, len(metrics)) + refs := make([]storage.SeriesRef, len(metrics)) // A day of data plus 10k steps. numIntervals := 8640 + 10000 for s := 0; s < numIntervals; s++ { - a := storage.Appender(context.Background()) + a := stor.Appender(context.Background()) ts := int64(s * 10000) // 10s interval. for i, metric := range metrics { ref, _ := a.Append(refs[i], metric, ts, float64(s)+float64(i)/float64(len(metrics))) @@ -216,7 +217,7 @@ func BenchmarkRangeQuery(b *testing.B) { b.ReportAllocs() for i := 0; i < b.N; i++ { qry, err := engine.NewRangeQuery( - storage, c.expr, + stor, c.expr, time.Unix(int64((numIntervals-c.steps)*10), 0), time.Unix(int64(numIntervals*10), 0), time.Second*10) if err != nil { diff --git a/promql/engine.go b/promql/engine.go index 02acab11e..7f720ed56 100644 --- a/promql/engine.go +++ b/promql/engine.go @@ -36,9 +36,9 @@ import ( "github.com/uber/jaeger-client-go" "github.com/prometheus/prometheus/model/histogram" - "github.com/prometheus/prometheus/pkg/labels" - "github.com/prometheus/prometheus/pkg/timestamp" - "github.com/prometheus/prometheus/pkg/value" + "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/model/timestamp" + "github.com/prometheus/prometheus/model/value" "github.com/prometheus/prometheus/promql/parser" "github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/tsdb/chunkenc" @@ -89,12 +89,15 @@ type ( func (e ErrQueryTimeout) Error() string { return fmt.Sprintf("query timed out in %s", string(e)) } + func (e ErrQueryCanceled) Error() string { return fmt.Sprintf("query was canceled in %s", string(e)) } + func (e ErrTooManySamples) Error() string { return fmt.Sprintf("query processing would load too many samples into memory in %s", string(e)) } + func (e ErrStorage) Error() string { return e.Err.Error() } @@ -403,8 +406,10 @@ func (ng *Engine) newQuery(q storage.Queryable, expr parser.Expr, start, end tim return qry, nil } -var ErrValidationAtModifierDisabled = errors.New("@ modifier is disabled") -var ErrValidationNegativeOffsetDisabled = errors.New("negative offset is disabled") +var ( + ErrValidationAtModifierDisabled = errors.New("@ modifier is disabled") + ErrValidationNegativeOffsetDisabled = errors.New("negative offset is disabled") +) func (ng *Engine) validateOpts(expr parser.Expr) error { if ng.enableAtModifier && ng.enableNegativeOffset { @@ -1176,7 +1181,9 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, storage.Warnings) { } unwrapParenExpr(&e.Param) - if s, ok := unwrapStepInvariantExpr(e.Param).(*parser.StringLiteral); ok { + param := unwrapStepInvariantExpr(e.Param) + unwrapParenExpr(¶m) + if s, ok := param.(*parser.StringLiteral); ok { return ev.rangeEval(initSeries, func(v []parser.Value, sh [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, storage.Warnings) { return ev.aggregation(e.Op, sortedGrouping, e.Without, s.Val, v[0].(Vector), sh[0], enh), nil }, e.Expr) @@ -1198,6 +1205,7 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, storage.Warnings) { // a vector selector. unwrapParenExpr(&e.Args[0]) arg := unwrapStepInvariantExpr(e.Args[0]) + unwrapParenExpr(&arg) vs, ok := arg.(*parser.VectorSelector) if ok { return ev.rangeEval(nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, storage.Warnings) { @@ -1221,6 +1229,7 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, storage.Warnings) { for i := range e.Args { unwrapParenExpr(&e.Args[i]) a := unwrapStepInvariantExpr(e.Args[i]) + unwrapParenExpr(&a) if _, ok := a.(*parser.MatrixSelector); ok { matrixArgIndex = i matrixArg = true @@ -1263,7 +1272,10 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, storage.Warnings) { } } - sel := unwrapStepInvariantExpr(e.Args[matrixArgIndex]).(*parser.MatrixSelector) + unwrapParenExpr(&e.Args[matrixArgIndex]) + arg := unwrapStepInvariantExpr(e.Args[matrixArgIndex]) + unwrapParenExpr(&arg) + sel := arg.(*parser.MatrixSelector) selVS := sel.VectorSelector.(*parser.VectorSelector) ws, err := checkAndExpandSeriesSet(ev.ctx, sel) @@ -1826,9 +1838,11 @@ func (ev *evaluator) VectorOr(lhs, rhs Vector, matching *parser.VectorMatching, panic("set operations must only use many-to-many matching") } if len(lhs) == 0 { // Short-circuit. - return rhs + enh.Out = append(enh.Out, rhs...) + return enh.Out } else if len(rhs) == 0 { - return lhs + enh.Out = append(enh.Out, lhs...) + return enh.Out } leftSigs := map[string]struct{}{} @@ -1853,7 +1867,8 @@ func (ev *evaluator) VectorUnless(lhs, rhs Vector, matching *parser.VectorMatchi // Short-circuit: empty rhs means we will return everything in lhs; // empty lhs means we will return empty - don't need to build a map. if len(lhs) == 0 || len(rhs) == 0 { - return lhs + enh.Out = append(enh.Out, lhs...) + return enh.Out } rightSigs := map[string]struct{}{} @@ -2170,7 +2185,6 @@ type groupedAggregation struct { // aggregation evaluates an aggregation operation on a Vector. The provided grouping labels // must be sorted. func (ev *evaluator) aggregation(op parser.ItemType, grouping []string, without bool, param interface{}, vec Vector, seriesHelper []EvalSeriesHelper, enh *EvalNodeHelper) Vector { - result := map[uint64]*groupedAggregation{} orderedResult := []*groupedAggregation{} var k int64 @@ -2540,7 +2554,6 @@ func preprocessExprHelper(expr parser.Expr, start, end time.Time) bool { } if isStepInvariant { - // The function and all arguments are step invariant. return true } @@ -2586,12 +2599,6 @@ func preprocessExprHelper(expr parser.Expr, start, end time.Time) bool { } func newStepInvariantExpr(expr parser.Expr) parser.Expr { - if e, ok := expr.(*parser.ParenExpr); ok { - // Wrapping the inside of () makes it easy to unwrap the paren later. - // But this effectively unwraps the paren. - return newStepInvariantExpr(e.Expr) - - } return &parser.StepInvariantExpr{Expr: expr} } diff --git a/promql/engine_test.go b/promql/engine_test.go index 88eed7f8b..0e4657a6e 100644 --- a/promql/engine_test.go +++ b/promql/engine_test.go @@ -27,8 +27,8 @@ import ( "github.com/stretchr/testify/require" "go.uber.org/goleak" - "github.com/prometheus/prometheus/pkg/labels" - "github.com/prometheus/prometheus/pkg/timestamp" + "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/model/timestamp" "github.com/prometheus/prometheus/promql/parser" "github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/tsdb" @@ -183,9 +183,11 @@ type errQuerier struct { func (q *errQuerier) Select(bool, *storage.SelectHints, ...*labels.Matcher) storage.SeriesSet { return errSeriesSet{err: q.err} } + func (*errQuerier) LabelValues(string, ...*labels.Matcher) ([]string, storage.Warnings, error) { return nil, nil, nil } + func (*errQuerier) LabelNames(...*labels.Matcher) ([]string, storage.Warnings, error) { return nil, nil, nil } @@ -269,277 +271,278 @@ func TestSelectHintsSetCorrectly(t *testing.T) { // TODO(bwplotka): Add support for better hints when subquerying. expected []*storage.SelectHints - }{{ - query: "foo", start: 10000, - expected: []*storage.SelectHints{ - {Start: 5000, End: 10000}, - }, - }, { - query: "foo @ 15", start: 10000, - expected: []*storage.SelectHints{ - {Start: 10000, End: 15000}, - }, - }, { - query: "foo @ 1", start: 10000, - expected: []*storage.SelectHints{ - {Start: -4000, End: 1000}, - }, - }, { - query: "foo[2m]", start: 200000, - expected: []*storage.SelectHints{ - {Start: 80000, End: 200000, Range: 120000}, - }, - }, { - query: "foo[2m] @ 180", start: 200000, - expected: []*storage.SelectHints{ - {Start: 60000, End: 180000, Range: 120000}, - }, - }, { - query: "foo[2m] @ 300", start: 200000, - expected: []*storage.SelectHints{ - {Start: 180000, End: 300000, Range: 120000}, - }, - }, { - query: "foo[2m] @ 60", start: 200000, - expected: []*storage.SelectHints{ - {Start: -60000, End: 60000, Range: 120000}, - }, - }, { - query: "foo[2m] offset 2m", start: 300000, - expected: []*storage.SelectHints{ - {Start: 60000, End: 180000, Range: 120000}, - }, - }, { - query: "foo[2m] @ 200 offset 2m", start: 300000, - expected: []*storage.SelectHints{ - {Start: -40000, End: 80000, Range: 120000}, - }, - }, { - query: "foo[2m:1s]", start: 300000, - expected: []*storage.SelectHints{ - {Start: 175000, End: 300000}, - }, - }, { - query: "count_over_time(foo[2m:1s])", start: 300000, - expected: []*storage.SelectHints{ - {Start: 175000, End: 300000, Func: "count_over_time"}, - }, - }, { - query: "count_over_time(foo[2m:1s] @ 300)", start: 200000, - expected: []*storage.SelectHints{ - {Start: 175000, End: 300000, Func: "count_over_time"}, - }, - }, { - query: "count_over_time(foo[2m:1s] @ 200)", start: 200000, - expected: []*storage.SelectHints{ - {Start: 75000, End: 200000, Func: "count_over_time"}, - }, - }, { - query: "count_over_time(foo[2m:1s] @ 100)", start: 200000, - expected: []*storage.SelectHints{ - {Start: -25000, End: 100000, Func: "count_over_time"}, - }, - }, { - query: "count_over_time(foo[2m:1s] offset 10s)", start: 300000, - expected: []*storage.SelectHints{ - {Start: 165000, End: 290000, Func: "count_over_time"}, - }, - }, { - query: "count_over_time((foo offset 10s)[2m:1s] offset 10s)", start: 300000, - expected: []*storage.SelectHints{ - {Start: 155000, End: 280000, Func: "count_over_time"}, - }, - }, { - // When the @ is on the vector selector, the enclosing subquery parameters - // don't affect the hint ranges. - query: "count_over_time((foo @ 200 offset 10s)[2m:1s] offset 10s)", start: 300000, - expected: []*storage.SelectHints{ - {Start: 185000, End: 190000, Func: "count_over_time"}, - }, - }, { - // When the @ is on the vector selector, the enclosing subquery parameters - // don't affect the hint ranges. - query: "count_over_time((foo @ 200 offset 10s)[2m:1s] @ 100 offset 10s)", start: 300000, - expected: []*storage.SelectHints{ - {Start: 185000, End: 190000, Func: "count_over_time"}, - }, - }, { - query: "count_over_time((foo offset 10s)[2m:1s] @ 100 offset 10s)", start: 300000, - expected: []*storage.SelectHints{ - {Start: -45000, End: 80000, Func: "count_over_time"}, - }, - }, { + }{ + { + query: "foo", start: 10000, + expected: []*storage.SelectHints{ + {Start: 5000, End: 10000}, + }, + }, { + query: "foo @ 15", start: 10000, + expected: []*storage.SelectHints{ + {Start: 10000, End: 15000}, + }, + }, { + query: "foo @ 1", start: 10000, + expected: []*storage.SelectHints{ + {Start: -4000, End: 1000}, + }, + }, { + query: "foo[2m]", start: 200000, + expected: []*storage.SelectHints{ + {Start: 80000, End: 200000, Range: 120000}, + }, + }, { + query: "foo[2m] @ 180", start: 200000, + expected: []*storage.SelectHints{ + {Start: 60000, End: 180000, Range: 120000}, + }, + }, { + query: "foo[2m] @ 300", start: 200000, + expected: []*storage.SelectHints{ + {Start: 180000, End: 300000, Range: 120000}, + }, + }, { + query: "foo[2m] @ 60", start: 200000, + expected: []*storage.SelectHints{ + {Start: -60000, End: 60000, Range: 120000}, + }, + }, { + query: "foo[2m] offset 2m", start: 300000, + expected: []*storage.SelectHints{ + {Start: 60000, End: 180000, Range: 120000}, + }, + }, { + query: "foo[2m] @ 200 offset 2m", start: 300000, + expected: []*storage.SelectHints{ + {Start: -40000, End: 80000, Range: 120000}, + }, + }, { + query: "foo[2m:1s]", start: 300000, + expected: []*storage.SelectHints{ + {Start: 175000, End: 300000}, + }, + }, { + query: "count_over_time(foo[2m:1s])", start: 300000, + expected: []*storage.SelectHints{ + {Start: 175000, End: 300000, Func: "count_over_time"}, + }, + }, { + query: "count_over_time(foo[2m:1s] @ 300)", start: 200000, + expected: []*storage.SelectHints{ + {Start: 175000, End: 300000, Func: "count_over_time"}, + }, + }, { + query: "count_over_time(foo[2m:1s] @ 200)", start: 200000, + expected: []*storage.SelectHints{ + {Start: 75000, End: 200000, Func: "count_over_time"}, + }, + }, { + query: "count_over_time(foo[2m:1s] @ 100)", start: 200000, + expected: []*storage.SelectHints{ + {Start: -25000, End: 100000, Func: "count_over_time"}, + }, + }, { + query: "count_over_time(foo[2m:1s] offset 10s)", start: 300000, + expected: []*storage.SelectHints{ + {Start: 165000, End: 290000, Func: "count_over_time"}, + }, + }, { + query: "count_over_time((foo offset 10s)[2m:1s] offset 10s)", start: 300000, + expected: []*storage.SelectHints{ + {Start: 155000, End: 280000, Func: "count_over_time"}, + }, + }, { + // When the @ is on the vector selector, the enclosing subquery parameters + // don't affect the hint ranges. + query: "count_over_time((foo @ 200 offset 10s)[2m:1s] offset 10s)", start: 300000, + expected: []*storage.SelectHints{ + {Start: 185000, End: 190000, Func: "count_over_time"}, + }, + }, { + // When the @ is on the vector selector, the enclosing subquery parameters + // don't affect the hint ranges. + query: "count_over_time((foo @ 200 offset 10s)[2m:1s] @ 100 offset 10s)", start: 300000, + expected: []*storage.SelectHints{ + {Start: 185000, End: 190000, Func: "count_over_time"}, + }, + }, { + query: "count_over_time((foo offset 10s)[2m:1s] @ 100 offset 10s)", start: 300000, + expected: []*storage.SelectHints{ + {Start: -45000, End: 80000, Func: "count_over_time"}, + }, + }, { - query: "foo", start: 10000, end: 20000, - expected: []*storage.SelectHints{ - {Start: 5000, End: 20000, Step: 1000}, + query: "foo", start: 10000, end: 20000, + expected: []*storage.SelectHints{ + {Start: 5000, End: 20000, Step: 1000}, + }, + }, { + query: "foo @ 15", start: 10000, end: 20000, + expected: []*storage.SelectHints{ + {Start: 10000, End: 15000, Step: 1000}, + }, + }, { + query: "foo @ 1", start: 10000, end: 20000, + expected: []*storage.SelectHints{ + {Start: -4000, End: 1000, Step: 1000}, + }, + }, { + query: "rate(foo[2m] @ 180)", start: 200000, end: 500000, + expected: []*storage.SelectHints{ + {Start: 60000, End: 180000, Range: 120000, Func: "rate", Step: 1000}, + }, + }, { + query: "rate(foo[2m] @ 300)", start: 200000, end: 500000, + expected: []*storage.SelectHints{ + {Start: 180000, End: 300000, Range: 120000, Func: "rate", Step: 1000}, + }, + }, { + query: "rate(foo[2m] @ 60)", start: 200000, end: 500000, + expected: []*storage.SelectHints{ + {Start: -60000, End: 60000, Range: 120000, Func: "rate", Step: 1000}, + }, + }, { + query: "rate(foo[2m])", start: 200000, end: 500000, + expected: []*storage.SelectHints{ + {Start: 80000, End: 500000, Range: 120000, Func: "rate", Step: 1000}, + }, + }, { + query: "rate(foo[2m] offset 2m)", start: 300000, end: 500000, + expected: []*storage.SelectHints{ + {Start: 60000, End: 380000, Range: 120000, Func: "rate", Step: 1000}, + }, + }, { + query: "rate(foo[2m:1s])", start: 300000, end: 500000, + expected: []*storage.SelectHints{ + {Start: 175000, End: 500000, Func: "rate", Step: 1000}, + }, + }, { + query: "count_over_time(foo[2m:1s])", start: 300000, end: 500000, + expected: []*storage.SelectHints{ + {Start: 175000, End: 500000, Func: "count_over_time", Step: 1000}, + }, + }, { + query: "count_over_time(foo[2m:1s] offset 10s)", start: 300000, end: 500000, + expected: []*storage.SelectHints{ + {Start: 165000, End: 490000, Func: "count_over_time", Step: 1000}, + }, + }, { + query: "count_over_time(foo[2m:1s] @ 300)", start: 200000, end: 500000, + expected: []*storage.SelectHints{ + {Start: 175000, End: 300000, Func: "count_over_time", Step: 1000}, + }, + }, { + query: "count_over_time(foo[2m:1s] @ 200)", start: 200000, end: 500000, + expected: []*storage.SelectHints{ + {Start: 75000, End: 200000, Func: "count_over_time", Step: 1000}, + }, + }, { + query: "count_over_time(foo[2m:1s] @ 100)", start: 200000, end: 500000, + expected: []*storage.SelectHints{ + {Start: -25000, End: 100000, Func: "count_over_time", Step: 1000}, + }, + }, { + query: "count_over_time((foo offset 10s)[2m:1s] offset 10s)", start: 300000, end: 500000, + expected: []*storage.SelectHints{ + {Start: 155000, End: 480000, Func: "count_over_time", Step: 1000}, + }, + }, { + // When the @ is on the vector selector, the enclosing subquery parameters + // don't affect the hint ranges. + query: "count_over_time((foo @ 200 offset 10s)[2m:1s] offset 10s)", start: 300000, end: 500000, + expected: []*storage.SelectHints{ + {Start: 185000, End: 190000, Func: "count_over_time", Step: 1000}, + }, + }, { + // When the @ is on the vector selector, the enclosing subquery parameters + // don't affect the hint ranges. + query: "count_over_time((foo @ 200 offset 10s)[2m:1s] @ 100 offset 10s)", start: 300000, end: 500000, + expected: []*storage.SelectHints{ + {Start: 185000, End: 190000, Func: "count_over_time", Step: 1000}, + }, + }, { + query: "count_over_time((foo offset 10s)[2m:1s] @ 100 offset 10s)", start: 300000, end: 500000, + expected: []*storage.SelectHints{ + {Start: -45000, End: 80000, Func: "count_over_time", Step: 1000}, + }, + }, { + query: "sum by (dim1) (foo)", start: 10000, + expected: []*storage.SelectHints{ + {Start: 5000, End: 10000, Func: "sum", By: true, Grouping: []string{"dim1"}}, + }, + }, { + query: "sum without (dim1) (foo)", start: 10000, + expected: []*storage.SelectHints{ + {Start: 5000, End: 10000, Func: "sum", Grouping: []string{"dim1"}}, + }, + }, { + query: "sum by (dim1) (avg_over_time(foo[1s]))", start: 10000, + expected: []*storage.SelectHints{ + {Start: 9000, End: 10000, Func: "avg_over_time", Range: 1000}, + }, + }, { + query: "sum by (dim1) (max by (dim2) (foo))", start: 10000, + expected: []*storage.SelectHints{ + {Start: 5000, End: 10000, Func: "max", By: true, Grouping: []string{"dim2"}}, + }, + }, { + query: "(max by (dim1) (foo))[5s:1s]", start: 10000, + expected: []*storage.SelectHints{ + {Start: 0, End: 10000, Func: "max", By: true, Grouping: []string{"dim1"}}, + }, + }, { + query: "(sum(http_requests{group=~\"p.*\"})+max(http_requests{group=~\"c.*\"}))[20s:5s]", start: 120000, + expected: []*storage.SelectHints{ + {Start: 95000, End: 120000, Func: "sum", By: true}, + {Start: 95000, End: 120000, Func: "max", By: true}, + }, + }, { + query: "foo @ 50 + bar @ 250 + baz @ 900", start: 100000, end: 500000, + expected: []*storage.SelectHints{ + {Start: 45000, End: 50000, Step: 1000}, + {Start: 245000, End: 250000, Step: 1000}, + {Start: 895000, End: 900000, Step: 1000}, + }, + }, { + query: "foo @ 50 + bar + baz @ 900", start: 100000, end: 500000, + expected: []*storage.SelectHints{ + {Start: 45000, End: 50000, Step: 1000}, + {Start: 95000, End: 500000, Step: 1000}, + {Start: 895000, End: 900000, Step: 1000}, + }, + }, { + query: "rate(foo[2s] @ 50) + bar @ 250 + baz @ 900", start: 100000, end: 500000, + expected: []*storage.SelectHints{ + {Start: 48000, End: 50000, Step: 1000, Func: "rate", Range: 2000}, + {Start: 245000, End: 250000, Step: 1000}, + {Start: 895000, End: 900000, Step: 1000}, + }, + }, { + query: "rate(foo[2s:1s] @ 50) + bar + baz", start: 100000, end: 500000, + expected: []*storage.SelectHints{ + {Start: 43000, End: 50000, Step: 1000, Func: "rate"}, + {Start: 95000, End: 500000, Step: 1000}, + {Start: 95000, End: 500000, Step: 1000}, + }, + }, { + query: "rate(foo[2s:1s] @ 50) + bar + rate(baz[2m:1s] @ 900 offset 2m) ", start: 100000, end: 500000, + expected: []*storage.SelectHints{ + {Start: 43000, End: 50000, Step: 1000, Func: "rate"}, + {Start: 95000, End: 500000, Step: 1000}, + {Start: 655000, End: 780000, Step: 1000, Func: "rate"}, + }, + }, { // Hints are based on the inner most subquery timestamp. + query: `sum_over_time(sum_over_time(metric{job="1"}[100s])[100s:25s] @ 50)[3s:1s] @ 3000`, start: 100000, + expected: []*storage.SelectHints{ + {Start: -150000, End: 50000, Range: 100000, Func: "sum_over_time"}, + }, + }, { // Hints are based on the inner most subquery timestamp. + query: `sum_over_time(sum_over_time(metric{job="1"}[100s])[100s:25s] @ 3000)[3s:1s] @ 50`, + expected: []*storage.SelectHints{ + {Start: 2800000, End: 3000000, Range: 100000, Func: "sum_over_time"}, + }, }, - }, { - query: "foo @ 15", start: 10000, end: 20000, - expected: []*storage.SelectHints{ - {Start: 10000, End: 15000, Step: 1000}, - }, - }, { - query: "foo @ 1", start: 10000, end: 20000, - expected: []*storage.SelectHints{ - {Start: -4000, End: 1000, Step: 1000}, - }, - }, { - query: "rate(foo[2m] @ 180)", start: 200000, end: 500000, - expected: []*storage.SelectHints{ - {Start: 60000, End: 180000, Range: 120000, Func: "rate", Step: 1000}, - }, - }, { - query: "rate(foo[2m] @ 300)", start: 200000, end: 500000, - expected: []*storage.SelectHints{ - {Start: 180000, End: 300000, Range: 120000, Func: "rate", Step: 1000}, - }, - }, { - query: "rate(foo[2m] @ 60)", start: 200000, end: 500000, - expected: []*storage.SelectHints{ - {Start: -60000, End: 60000, Range: 120000, Func: "rate", Step: 1000}, - }, - }, { - query: "rate(foo[2m])", start: 200000, end: 500000, - expected: []*storage.SelectHints{ - {Start: 80000, End: 500000, Range: 120000, Func: "rate", Step: 1000}, - }, - }, { - query: "rate(foo[2m] offset 2m)", start: 300000, end: 500000, - expected: []*storage.SelectHints{ - {Start: 60000, End: 380000, Range: 120000, Func: "rate", Step: 1000}, - }, - }, { - query: "rate(foo[2m:1s])", start: 300000, end: 500000, - expected: []*storage.SelectHints{ - {Start: 175000, End: 500000, Func: "rate", Step: 1000}, - }, - }, { - query: "count_over_time(foo[2m:1s])", start: 300000, end: 500000, - expected: []*storage.SelectHints{ - {Start: 175000, End: 500000, Func: "count_over_time", Step: 1000}, - }, - }, { - query: "count_over_time(foo[2m:1s] offset 10s)", start: 300000, end: 500000, - expected: []*storage.SelectHints{ - {Start: 165000, End: 490000, Func: "count_over_time", Step: 1000}, - }, - }, { - query: "count_over_time(foo[2m:1s] @ 300)", start: 200000, end: 500000, - expected: []*storage.SelectHints{ - {Start: 175000, End: 300000, Func: "count_over_time", Step: 1000}, - }, - }, { - query: "count_over_time(foo[2m:1s] @ 200)", start: 200000, end: 500000, - expected: []*storage.SelectHints{ - {Start: 75000, End: 200000, Func: "count_over_time", Step: 1000}, - }, - }, { - query: "count_over_time(foo[2m:1s] @ 100)", start: 200000, end: 500000, - expected: []*storage.SelectHints{ - {Start: -25000, End: 100000, Func: "count_over_time", Step: 1000}, - }, - }, { - query: "count_over_time((foo offset 10s)[2m:1s] offset 10s)", start: 300000, end: 500000, - expected: []*storage.SelectHints{ - {Start: 155000, End: 480000, Func: "count_over_time", Step: 1000}, - }, - }, { - // When the @ is on the vector selector, the enclosing subquery parameters - // don't affect the hint ranges. - query: "count_over_time((foo @ 200 offset 10s)[2m:1s] offset 10s)", start: 300000, end: 500000, - expected: []*storage.SelectHints{ - {Start: 185000, End: 190000, Func: "count_over_time", Step: 1000}, - }, - }, { - // When the @ is on the vector selector, the enclosing subquery parameters - // don't affect the hint ranges. - query: "count_over_time((foo @ 200 offset 10s)[2m:1s] @ 100 offset 10s)", start: 300000, end: 500000, - expected: []*storage.SelectHints{ - {Start: 185000, End: 190000, Func: "count_over_time", Step: 1000}, - }, - }, { - query: "count_over_time((foo offset 10s)[2m:1s] @ 100 offset 10s)", start: 300000, end: 500000, - expected: []*storage.SelectHints{ - {Start: -45000, End: 80000, Func: "count_over_time", Step: 1000}, - }, - }, { - query: "sum by (dim1) (foo)", start: 10000, - expected: []*storage.SelectHints{ - {Start: 5000, End: 10000, Func: "sum", By: true, Grouping: []string{"dim1"}}, - }, - }, { - query: "sum without (dim1) (foo)", start: 10000, - expected: []*storage.SelectHints{ - {Start: 5000, End: 10000, Func: "sum", Grouping: []string{"dim1"}}, - }, - }, { - query: "sum by (dim1) (avg_over_time(foo[1s]))", start: 10000, - expected: []*storage.SelectHints{ - {Start: 9000, End: 10000, Func: "avg_over_time", Range: 1000}, - }, - }, { - query: "sum by (dim1) (max by (dim2) (foo))", start: 10000, - expected: []*storage.SelectHints{ - {Start: 5000, End: 10000, Func: "max", By: true, Grouping: []string{"dim2"}}, - }, - }, { - query: "(max by (dim1) (foo))[5s:1s]", start: 10000, - expected: []*storage.SelectHints{ - {Start: 0, End: 10000, Func: "max", By: true, Grouping: []string{"dim1"}}, - }, - }, { - query: "(sum(http_requests{group=~\"p.*\"})+max(http_requests{group=~\"c.*\"}))[20s:5s]", start: 120000, - expected: []*storage.SelectHints{ - {Start: 95000, End: 120000, Func: "sum", By: true}, - {Start: 95000, End: 120000, Func: "max", By: true}, - }, - }, { - query: "foo @ 50 + bar @ 250 + baz @ 900", start: 100000, end: 500000, - expected: []*storage.SelectHints{ - {Start: 45000, End: 50000, Step: 1000}, - {Start: 245000, End: 250000, Step: 1000}, - {Start: 895000, End: 900000, Step: 1000}, - }, - }, { - query: "foo @ 50 + bar + baz @ 900", start: 100000, end: 500000, - expected: []*storage.SelectHints{ - {Start: 45000, End: 50000, Step: 1000}, - {Start: 95000, End: 500000, Step: 1000}, - {Start: 895000, End: 900000, Step: 1000}, - }, - }, { - query: "rate(foo[2s] @ 50) + bar @ 250 + baz @ 900", start: 100000, end: 500000, - expected: []*storage.SelectHints{ - {Start: 48000, End: 50000, Step: 1000, Func: "rate", Range: 2000}, - {Start: 245000, End: 250000, Step: 1000}, - {Start: 895000, End: 900000, Step: 1000}, - }, - }, { - query: "rate(foo[2s:1s] @ 50) + bar + baz", start: 100000, end: 500000, - expected: []*storage.SelectHints{ - {Start: 43000, End: 50000, Step: 1000, Func: "rate"}, - {Start: 95000, End: 500000, Step: 1000}, - {Start: 95000, End: 500000, Step: 1000}, - }, - }, { - query: "rate(foo[2s:1s] @ 50) + bar + rate(baz[2m:1s] @ 900 offset 2m) ", start: 100000, end: 500000, - expected: []*storage.SelectHints{ - {Start: 43000, End: 50000, Step: 1000, Func: "rate"}, - {Start: 95000, End: 500000, Step: 1000}, - {Start: 655000, End: 780000, Step: 1000, Func: "rate"}, - }, - }, { // Hints are based on the inner most subquery timestamp. - query: `sum_over_time(sum_over_time(metric{job="1"}[100s])[100s:25s] @ 50)[3s:1s] @ 3000`, start: 100000, - expected: []*storage.SelectHints{ - {Start: -150000, End: 50000, Range: 100000, Func: "sum_over_time"}, - }, - }, { // Hints are based on the inner most subquery timestamp. - query: `sum_over_time(sum_over_time(metric{job="1"}[100s])[100s:25s] @ 3000)[3s:1s] @ 50`, - expected: []*storage.SelectHints{ - {Start: 2800000, End: 3000000, Range: 100000, Func: "sum_over_time"}, - }, - }, } { t.Run(tc.query, func(t *testing.T) { engine := NewEngine(opts) @@ -561,7 +564,6 @@ func TestSelectHintsSetCorrectly(t *testing.T) { require.Equal(t, tc.expected, hintsRecorder.hints) }) - } } @@ -647,25 +649,31 @@ load 10s { Query: "metric", Result: Vector{ - Sample{Point: Point{V: 1, T: 1000}, - Metric: labels.FromStrings("__name__", "metric")}, + Sample{ + Point: Point{V: 1, T: 1000}, + Metric: labels.FromStrings("__name__", "metric"), + }, }, Start: time.Unix(1, 0), }, { Query: "metric[20s]", - Result: Matrix{Series{ - Points: []Point{{V: 1, T: 0}, {V: 2, T: 10000}}, - Metric: labels.FromStrings("__name__", "metric")}, + Result: Matrix{ + Series{ + Points: []Point{{V: 1, T: 0}, {V: 2, T: 10000}}, + Metric: labels.FromStrings("__name__", "metric"), + }, }, Start: time.Unix(10, 0), }, // Range queries. { Query: "1", - Result: Matrix{Series{ - Points: []Point{{V: 1, T: 0}, {V: 1, T: 1000}, {V: 1, T: 2000}}, - Metric: labels.FromStrings()}, + Result: Matrix{ + Series{ + Points: []Point{{V: 1, T: 0}, {V: 1, T: 1000}, {V: 1, T: 2000}}, + Metric: labels.FromStrings(), + }, }, Start: time.Unix(0, 0), End: time.Unix(2, 0), @@ -673,9 +681,11 @@ load 10s }, { Query: "metric", - Result: Matrix{Series{ - Points: []Point{{V: 1, T: 0}, {V: 1, T: 1000}, {V: 1, T: 2000}}, - Metric: labels.FromStrings("__name__", "metric")}, + Result: Matrix{ + Series{ + Points: []Point{{V: 1, T: 0}, {V: 1, T: 1000}, {V: 1, T: 2000}}, + Metric: labels.FromStrings("__name__", "metric"), + }, }, Start: time.Unix(0, 0), End: time.Unix(2, 0), @@ -683,9 +693,11 @@ load 10s }, { Query: "metric", - Result: Matrix{Series{ - Points: []Point{{V: 1, T: 0}, {V: 1, T: 5000}, {V: 2, T: 10000}}, - Metric: labels.FromStrings("__name__", "metric")}, + Result: Matrix{ + Series{ + Points: []Point{{V: 1, T: 0}, {V: 1, T: 5000}, {V: 2, T: 10000}}, + Metric: labels.FromStrings("__name__", "metric"), + }, }, Start: time.Unix(0, 0), End: time.Unix(10, 0), @@ -745,23 +757,28 @@ load 10s Query: "1", MaxSamples: 1, Start: time.Unix(1, 0), - }, { + }, + { Query: "metric", MaxSamples: 1, Start: time.Unix(1, 0), - }, { + }, + { Query: "metric[20s]", MaxSamples: 2, Start: time.Unix(10, 0), - }, { + }, + { Query: "rate(metric[20s])", MaxSamples: 3, Start: time.Unix(10, 0), - }, { + }, + { Query: "metric[20s:5s]", MaxSamples: 3, Start: time.Unix(10, 0), - }, { + }, + { Query: "metric[20s] @ 10", MaxSamples: 2, Start: time.Unix(0, 0), @@ -773,38 +790,44 @@ load 10s Start: time.Unix(0, 0), End: time.Unix(2, 0), Interval: time.Second, - }, { + }, + { Query: "1", MaxSamples: 3, Start: time.Unix(0, 0), End: time.Unix(2, 0), Interval: time.Second, - }, { + }, + { Query: "metric", MaxSamples: 3, Start: time.Unix(0, 0), End: time.Unix(2, 0), Interval: time.Second, - }, { + }, + { Query: "metric", MaxSamples: 3, Start: time.Unix(0, 0), End: time.Unix(10, 0), Interval: 5 * time.Second, - }, { + }, + { Query: "rate(bigmetric[1s])", MaxSamples: 1, Start: time.Unix(0, 0), End: time.Unix(10, 0), Interval: 5 * time.Second, - }, { + }, + { // Result is duplicated, so @ also produces 3 samples. Query: "metric @ 10", MaxSamples: 3, Start: time.Unix(0, 0), End: time.Unix(10, 0), Interval: 5 * time.Second, - }, { + }, + { // The peak samples in memory is during the first evaluation: // - Subquery takes 22 samples, 11 for each bigmetric, // - Result is calculated per series where the series samples is buffered, hence 11 more here. @@ -816,7 +839,8 @@ load 10s Start: time.Unix(0, 0), End: time.Unix(10, 0), Interval: 5 * time.Second, - }, { + }, + { // Here the reasoning is same as above. But LHS and RHS are done one after another. // So while one of them takes 35 samples at peak, we need to hold the 2 sample // result of the other till then. @@ -825,7 +849,8 @@ load 10s Start: time.Unix(0, 0), End: time.Unix(10, 0), Interval: 5 * time.Second, - }, { + }, + { // Sample as above but with only 1 part as step invariant. // Here the peak is caused by the non-step invariant part as it touches more time range. // Hence at peak it is 2*21 (subquery from 0s to 20s) @@ -836,14 +861,16 @@ load 10s Start: time.Unix(10, 0), End: time.Unix(20, 0), Interval: 5 * time.Second, - }, { + }, + { // Nested subquery. // We saw that innermost rate takes 35 samples which is still the peak // since the other two subqueries just duplicate the result. Query: `rate(rate(bigmetric[10s:1s] @ 10)[100s:25s] @ 1000)[100s:20s] @ 2000`, MaxSamples: 35, Start: time.Unix(10, 0), - }, { + }, + { // Nested subquery. // Now the outmost subquery produces more samples than inner most rate. Query: `rate(rate(bigmetric[10s:1s] @ 10)[100s:25s] @ 1000)[17s:1s] @ 2000`, @@ -1179,9 +1206,11 @@ func TestSubquerySelector(t *testing.T) { Query: "metric[20s:10s]", Result: Result{ nil, - Matrix{Series{ - Points: []Point{{V: 1, T: 0}, {V: 2, T: 10000}}, - Metric: labels.FromStrings("__name__", "metric")}, + Matrix{ + Series{ + Points: []Point{{V: 1, T: 0}, {V: 2, T: 10000}}, + Metric: labels.FromStrings("__name__", "metric"), + }, }, nil, }, @@ -1191,9 +1220,11 @@ func TestSubquerySelector(t *testing.T) { Query: "metric[20s:5s]", Result: Result{ nil, - Matrix{Series{ - Points: []Point{{V: 1, T: 0}, {V: 1, T: 5000}, {V: 2, T: 10000}}, - Metric: labels.FromStrings("__name__", "metric")}, + Matrix{ + Series{ + Points: []Point{{V: 1, T: 0}, {V: 1, T: 5000}, {V: 2, T: 10000}}, + Metric: labels.FromStrings("__name__", "metric"), + }, }, nil, }, @@ -1203,9 +1234,11 @@ func TestSubquerySelector(t *testing.T) { Query: "metric[20s:5s] offset 2s", Result: Result{ nil, - Matrix{Series{ - Points: []Point{{V: 1, T: 0}, {V: 1, T: 5000}, {V: 2, T: 10000}}, - Metric: labels.FromStrings("__name__", "metric")}, + Matrix{ + Series{ + Points: []Point{{V: 1, T: 0}, {V: 1, T: 5000}, {V: 2, T: 10000}}, + Metric: labels.FromStrings("__name__", "metric"), + }, }, nil, }, @@ -1215,9 +1248,11 @@ func TestSubquerySelector(t *testing.T) { Query: "metric[20s:5s] offset 6s", Result: Result{ nil, - Matrix{Series{ - Points: []Point{{V: 1, T: 0}, {V: 1, T: 5000}, {V: 2, T: 10000}}, - Metric: labels.FromStrings("__name__", "metric")}, + Matrix{ + Series{ + Points: []Point{{V: 1, T: 0}, {V: 1, T: 5000}, {V: 2, T: 10000}}, + Metric: labels.FromStrings("__name__", "metric"), + }, }, nil, }, @@ -1227,9 +1262,11 @@ func TestSubquerySelector(t *testing.T) { Query: "metric[20s:5s] offset 4s", Result: Result{ nil, - Matrix{Series{ - Points: []Point{{V: 2, T: 15000}, {V: 2, T: 20000}, {V: 2, T: 25000}, {V: 2, T: 30000}}, - Metric: labels.FromStrings("__name__", "metric")}, + Matrix{ + Series{ + Points: []Point{{V: 2, T: 15000}, {V: 2, T: 20000}, {V: 2, T: 25000}, {V: 2, T: 30000}}, + Metric: labels.FromStrings("__name__", "metric"), + }, }, nil, }, @@ -1239,9 +1276,11 @@ func TestSubquerySelector(t *testing.T) { Query: "metric[20s:5s] offset 5s", Result: Result{ nil, - Matrix{Series{ - Points: []Point{{V: 2, T: 10000}, {V: 2, T: 15000}, {V: 2, T: 20000}, {V: 2, T: 25000}, {V: 2, T: 30000}}, - Metric: labels.FromStrings("__name__", "metric")}, + Matrix{ + Series{ + Points: []Point{{V: 2, T: 10000}, {V: 2, T: 15000}, {V: 2, T: 20000}, {V: 2, T: 25000}, {V: 2, T: 30000}}, + Metric: labels.FromStrings("__name__", "metric"), + }, }, nil, }, @@ -1251,9 +1290,11 @@ func TestSubquerySelector(t *testing.T) { Query: "metric[20s:5s] offset 6s", Result: Result{ nil, - Matrix{Series{ - Points: []Point{{V: 2, T: 10000}, {V: 2, T: 15000}, {V: 2, T: 20000}, {V: 2, T: 25000}}, - Metric: labels.FromStrings("__name__", "metric")}, + Matrix{ + Series{ + Points: []Point{{V: 2, T: 10000}, {V: 2, T: 15000}, {V: 2, T: 20000}, {V: 2, T: 25000}}, + Metric: labels.FromStrings("__name__", "metric"), + }, }, nil, }, @@ -1263,9 +1304,11 @@ func TestSubquerySelector(t *testing.T) { Query: "metric[20s:5s] offset 7s", Result: Result{ nil, - Matrix{Series{ - Points: []Point{{V: 2, T: 10000}, {V: 2, T: 15000}, {V: 2, T: 20000}, {V: 2, T: 25000}}, - Metric: labels.FromStrings("__name__", "metric")}, + Matrix{ + Series{ + Points: []Point{{V: 2, T: 10000}, {V: 2, T: 15000}, {V: 2, T: 20000}, {V: 2, T: 25000}}, + Metric: labels.FromStrings("__name__", "metric"), + }, }, nil, }, @@ -1284,9 +1327,11 @@ func TestSubquerySelector(t *testing.T) { Query: `http_requests{group=~"pro.*",instance="0"}[30s:10s]`, Result: Result{ nil, - Matrix{Series{ - Points: []Point{{V: 9990, T: 9990000}, {V: 10000, T: 10000000}, {V: 100, T: 10010000}, {V: 130, T: 10020000}}, - Metric: labels.FromStrings("__name__", "http_requests", "job", "api-server", "instance", "0", "group", "production")}, + Matrix{ + Series{ + Points: []Point{{V: 9990, T: 9990000}, {V: 10000, T: 10000000}, {V: 100, T: 10010000}, {V: 130, T: 10020000}}, + Metric: labels.FromStrings("__name__", "http_requests", "job", "api-server", "instance", "0", "group", "production"), + }, }, nil, }, @@ -1296,9 +1341,11 @@ func TestSubquerySelector(t *testing.T) { Query: `http_requests{group=~"pro.*",instance="0"}[5m:]`, Result: Result{ nil, - Matrix{Series{ - Points: []Point{{V: 9840, T: 9840000}, {V: 9900, T: 9900000}, {V: 9960, T: 9960000}, {V: 130, T: 10020000}, {V: 310, T: 10080000}}, - Metric: labels.FromStrings("__name__", "http_requests", "job", "api-server", "instance", "0", "group", "production")}, + Matrix{ + Series{ + Points: []Point{{V: 9840, T: 9840000}, {V: 9900, T: 9900000}, {V: 9960, T: 9960000}, {V: 130, T: 10020000}, {V: 310, T: 10080000}}, + Metric: labels.FromStrings("__name__", "http_requests", "job", "api-server", "instance", "0", "group", "production"), + }, }, nil, }, @@ -1308,9 +1355,11 @@ func TestSubquerySelector(t *testing.T) { Query: `http_requests{group=~"pro.*",instance="0"}[5m:] offset 20m`, Result: Result{ nil, - Matrix{Series{ - Points: []Point{{V: 8640, T: 8640000}, {V: 8700, T: 8700000}, {V: 8760, T: 8760000}, {V: 8820, T: 8820000}, {V: 8880, T: 8880000}}, - Metric: labels.FromStrings("__name__", "http_requests", "job", "api-server", "instance", "0", "group", "production")}, + Matrix{ + Series{ + Points: []Point{{V: 8640, T: 8640000}, {V: 8700, T: 8700000}, {V: 8760, T: 8760000}, {V: 8820, T: 8820000}, {V: 8880, T: 8880000}}, + Metric: labels.FromStrings("__name__", "http_requests", "job", "api-server", "instance", "0", "group", "production"), + }, }, nil, }, @@ -1346,9 +1395,11 @@ func TestSubquerySelector(t *testing.T) { Query: `sum(http_requests{group=~"pro.*"})[30s:10s]`, Result: Result{ nil, - Matrix{Series{ - Points: []Point{{V: 270, T: 90000}, {V: 300, T: 100000}, {V: 330, T: 110000}, {V: 360, T: 120000}}, - Metric: labels.Labels{}}, + Matrix{ + Series{ + Points: []Point{{V: 270, T: 90000}, {V: 300, T: 100000}, {V: 330, T: 110000}, {V: 360, T: 120000}}, + Metric: labels.Labels{}, + }, }, nil, }, @@ -1358,9 +1409,11 @@ func TestSubquerySelector(t *testing.T) { Query: `sum(http_requests)[40s:10s]`, Result: Result{ nil, - Matrix{Series{ - Points: []Point{{V: 800, T: 80000}, {V: 900, T: 90000}, {V: 1000, T: 100000}, {V: 1100, T: 110000}, {V: 1200, T: 120000}}, - Metric: labels.Labels{}}, + Matrix{ + Series{ + Points: []Point{{V: 800, T: 80000}, {V: 900, T: 90000}, {V: 1000, T: 100000}, {V: 1100, T: 110000}, {V: 1200, T: 120000}}, + Metric: labels.Labels{}, + }, }, nil, }, @@ -1370,9 +1423,11 @@ func TestSubquerySelector(t *testing.T) { Query: `(sum(http_requests{group=~"p.*"})+sum(http_requests{group=~"c.*"}))[20s:5s]`, Result: Result{ nil, - Matrix{Series{ - Points: []Point{{V: 1000, T: 100000}, {V: 1000, T: 105000}, {V: 1100, T: 110000}, {V: 1100, T: 115000}, {V: 1200, T: 120000}}, - Metric: labels.Labels{}}, + Matrix{ + Series{ + Points: []Point{{V: 1000, T: 100000}, {V: 1000, T: 105000}, {V: 1100, T: 110000}, {V: 1100, T: 115000}, {V: 1200, T: 120000}}, + Metric: labels.Labels{}, + }, }, nil, }, @@ -1537,9 +1592,10 @@ func TestQueryLogger_error(t *testing.T) { func TestPreprocessAndWrapWithStepInvariantExpr(t *testing.T) { startTime := time.Unix(1000, 0) endTime := time.Unix(9999, 0) - var testCases = []struct { - input string // The input to be parsed. - expected parser.Expr // The expected expression AST. + testCases := []struct { + input string // The input to be parsed. + expected parser.Expr // The expected expression AST. + outputTest bool }{ { input: "123.4567", @@ -1549,7 +1605,8 @@ func TestPreprocessAndWrapWithStepInvariantExpr(t *testing.T) { PosRange: parser.PositionRange{Start: 0, End: 8}, }, }, - }, { + }, + { input: `"foo"`, expected: &parser.StepInvariantExpr{ Expr: &parser.StringLiteral{ @@ -1557,7 +1614,8 @@ func TestPreprocessAndWrapWithStepInvariantExpr(t *testing.T) { PosRange: parser.PositionRange{Start: 0, End: 5}, }, }, - }, { + }, + { input: "foo * bar", expected: &parser.BinaryExpr{ Op: parser.MUL, @@ -1583,7 +1641,8 @@ func TestPreprocessAndWrapWithStepInvariantExpr(t *testing.T) { }, VectorMatching: &parser.VectorMatching{Card: parser.CardOneToOne}, }, - }, { + }, + { input: "foo * bar @ 10", expected: &parser.BinaryExpr{ Op: parser.MUL, @@ -1612,7 +1671,8 @@ func TestPreprocessAndWrapWithStepInvariantExpr(t *testing.T) { }, VectorMatching: &parser.VectorMatching{Card: parser.CardOneToOne}, }, - }, { + }, + { input: "foo @ 20 * bar @ 10", expected: &parser.StepInvariantExpr{ Expr: &parser.BinaryExpr{ @@ -1642,7 +1702,8 @@ func TestPreprocessAndWrapWithStepInvariantExpr(t *testing.T) { VectorMatching: &parser.VectorMatching{Card: parser.CardOneToOne}, }, }, - }, { + }, + { input: "test[5s]", expected: &parser.MatrixSelector{ VectorSelector: &parser.VectorSelector{ @@ -1658,7 +1719,8 @@ func TestPreprocessAndWrapWithStepInvariantExpr(t *testing.T) { Range: 5 * time.Second, EndPos: 8, }, - }, { + }, + { input: `test{a="b"}[5y] @ 1603774699`, expected: &parser.StepInvariantExpr{ Expr: &parser.MatrixSelector{ @@ -1678,7 +1740,8 @@ func TestPreprocessAndWrapWithStepInvariantExpr(t *testing.T) { EndPos: 28, }, }, - }, { + }, + { input: "sum by (foo)(some_metric)", expected: &parser.AggregateExpr{ Op: parser.SUM, @@ -1698,7 +1761,8 @@ func TestPreprocessAndWrapWithStepInvariantExpr(t *testing.T) { End: 25, }, }, - }, { + }, + { input: "sum by (foo)(some_metric @ 10)", expected: &parser.StepInvariantExpr{ Expr: &parser.AggregateExpr{ @@ -1721,7 +1785,8 @@ func TestPreprocessAndWrapWithStepInvariantExpr(t *testing.T) { }, }, }, - }, { + }, + { input: "sum(some_metric1 @ 10) + sum(some_metric2 @ 20)", expected: &parser.StepInvariantExpr{ Expr: &parser.BinaryExpr{ @@ -1765,7 +1830,8 @@ func TestPreprocessAndWrapWithStepInvariantExpr(t *testing.T) { }, }, }, - }, { + }, + { input: "some_metric and topk(5, rate(some_metric[1m] @ 20))", expected: &parser.BinaryExpr{ Op: parser.LAND, @@ -1823,7 +1889,8 @@ func TestPreprocessAndWrapWithStepInvariantExpr(t *testing.T) { }, }, }, - }, { + }, + { input: "time()", expected: &parser.Call{ Func: parser.MustGetFunction("time"), @@ -1833,7 +1900,8 @@ func TestPreprocessAndWrapWithStepInvariantExpr(t *testing.T) { End: 6, }, }, - }, { + }, + { input: `foo{bar="baz"}[10m:6s]`, expected: &parser.SubqueryExpr{ Expr: &parser.VectorSelector{ @@ -1851,7 +1919,8 @@ func TestPreprocessAndWrapWithStepInvariantExpr(t *testing.T) { Step: 6 * time.Second, EndPos: 22, }, - }, { + }, + { input: `foo{bar="baz"}[10m:6s] @ 10`, expected: &parser.StepInvariantExpr{ Expr: &parser.SubqueryExpr{ @@ -1872,7 +1941,8 @@ func TestPreprocessAndWrapWithStepInvariantExpr(t *testing.T) { EndPos: 27, }, }, - }, { // Even though the subquery is step invariant, the inside is also wrapped separately. + }, + { // Even though the subquery is step invariant, the inside is also wrapped separately. input: `sum(foo{bar="baz"} @ 20)[10m:6s] @ 10`, expected: &parser.StepInvariantExpr{ Expr: &parser.SubqueryExpr{ @@ -1903,7 +1973,8 @@ func TestPreprocessAndWrapWithStepInvariantExpr(t *testing.T) { EndPos: 37, }, }, - }, { + }, + { input: `min_over_time(rate(foo{bar="baz"}[2s])[5m:] @ 1603775091)[4m:3s]`, expected: &parser.SubqueryExpr{ Expr: &parser.StepInvariantExpr{ @@ -1950,7 +2021,8 @@ func TestPreprocessAndWrapWithStepInvariantExpr(t *testing.T) { Step: 3 * time.Second, EndPos: 64, }, - }, { + }, + { input: `some_metric @ 123 offset 1m [10m:5s]`, expected: &parser.SubqueryExpr{ Expr: &parser.StepInvariantExpr{ @@ -1971,7 +2043,8 @@ func TestPreprocessAndWrapWithStepInvariantExpr(t *testing.T) { Step: 5 * time.Second, EndPos: 36, }, - }, { + }, + { input: `some_metric[10m:5s] offset 1m @ 123`, expected: &parser.StepInvariantExpr{ Expr: &parser.SubqueryExpr{ @@ -1992,7 +2065,8 @@ func TestPreprocessAndWrapWithStepInvariantExpr(t *testing.T) { EndPos: 35, }, }, - }, { + }, + { input: `(foo + bar{nm="val"} @ 1234)[5m:] @ 1603775019`, expected: &parser.StepInvariantExpr{ Expr: &parser.SubqueryExpr{ @@ -2037,7 +2111,8 @@ func TestPreprocessAndWrapWithStepInvariantExpr(t *testing.T) { EndPos: 46, }, }, - }, { + }, + { input: "abs(abs(metric @ 10))", expected: &parser.StepInvariantExpr{ Expr: &parser.Call{ @@ -2074,7 +2149,8 @@ func TestPreprocessAndWrapWithStepInvariantExpr(t *testing.T) { }, }, }, - }, { + }, + { input: "sum(sum(some_metric1 @ 10) + sum(some_metric2 @ 20))", expected: &parser.StepInvariantExpr{ Expr: &parser.AggregateExpr{ @@ -2125,7 +2201,8 @@ func TestPreprocessAndWrapWithStepInvariantExpr(t *testing.T) { }, }, }, - }, { + }, + { input: `foo @ start()`, expected: &parser.StepInvariantExpr{ Expr: &parser.VectorSelector{ @@ -2141,7 +2218,8 @@ func TestPreprocessAndWrapWithStepInvariantExpr(t *testing.T) { StartOrEnd: parser.START, }, }, - }, { + }, + { input: `foo @ end()`, expected: &parser.StepInvariantExpr{ Expr: &parser.VectorSelector{ @@ -2157,7 +2235,8 @@ func TestPreprocessAndWrapWithStepInvariantExpr(t *testing.T) { StartOrEnd: parser.END, }, }, - }, { + }, + { input: `test[5y] @ start()`, expected: &parser.StepInvariantExpr{ Expr: &parser.MatrixSelector{ @@ -2177,7 +2256,8 @@ func TestPreprocessAndWrapWithStepInvariantExpr(t *testing.T) { EndPos: 18, }, }, - }, { + }, + { input: `test[5y] @ end()`, expected: &parser.StepInvariantExpr{ Expr: &parser.MatrixSelector{ @@ -2197,7 +2277,8 @@ func TestPreprocessAndWrapWithStepInvariantExpr(t *testing.T) { EndPos: 16, }, }, - }, { + }, + { input: `some_metric[10m:5s] @ start()`, expected: &parser.StepInvariantExpr{ Expr: &parser.SubqueryExpr{ @@ -2218,7 +2299,8 @@ func TestPreprocessAndWrapWithStepInvariantExpr(t *testing.T) { EndPos: 29, }, }, - }, { + }, + { input: `some_metric[10m:5s] @ end()`, expected: &parser.StepInvariantExpr{ Expr: &parser.SubqueryExpr{ @@ -2240,6 +2322,61 @@ func TestPreprocessAndWrapWithStepInvariantExpr(t *testing.T) { }, }, }, + { + input: `floor(some_metric / (3 * 1024))`, + outputTest: true, + expected: &parser.Call{ + Func: &parser.Function{ + Name: "floor", + ArgTypes: []parser.ValueType{parser.ValueTypeVector}, + ReturnType: parser.ValueTypeVector, + }, + Args: parser.Expressions{ + &parser.BinaryExpr{ + Op: parser.DIV, + LHS: &parser.VectorSelector{ + Name: "some_metric", + LabelMatchers: []*labels.Matcher{ + parser.MustLabelMatcher(labels.MatchEqual, "__name__", "some_metric"), + }, + PosRange: parser.PositionRange{ + Start: 6, + End: 17, + }, + }, + RHS: &parser.StepInvariantExpr{ + Expr: &parser.ParenExpr{ + Expr: &parser.BinaryExpr{ + Op: parser.MUL, + LHS: &parser.NumberLiteral{ + Val: 3, + PosRange: parser.PositionRange{ + Start: 21, + End: 22, + }, + }, + RHS: &parser.NumberLiteral{ + Val: 1024, + PosRange: parser.PositionRange{ + Start: 25, + End: 29, + }, + }, + }, + PosRange: parser.PositionRange{ + Start: 20, + End: 30, + }, + }, + }, + }, + }, + PosRange: parser.PositionRange{ + Start: 0, + End: 31, + }, + }, + }, } for _, test := range testCases { @@ -2247,6 +2384,9 @@ func TestPreprocessAndWrapWithStepInvariantExpr(t *testing.T) { expr, err := parser.ParseExpr(test.input) require.NoError(t, err) expr = PreprocessExpr(expr, startTime, endTime) + if test.outputTest { + require.Equal(t, test.input, expr.String(), "error on input '%s'", test.input) + } require.Equal(t, test.expected, expr, "error on input '%s'", test.input) }) } @@ -2339,9 +2479,11 @@ func TestRangeQuery(t *testing.T) { Load: `load 30s bar 0 1 10 100 1000`, Query: "sum_over_time(bar[30s])", - Result: Matrix{Series{ - Points: []Point{{V: 0, T: 0}, {V: 11, T: 60000}, {V: 1100, T: 120000}}, - Metric: labels.Labels{}}, + Result: Matrix{ + Series{ + Points: []Point{{V: 0, T: 0}, {V: 11, T: 60000}, {V: 1100, T: 120000}}, + Metric: labels.Labels{}, + }, }, Start: time.Unix(0, 0), End: time.Unix(120, 0), @@ -2352,9 +2494,11 @@ func TestRangeQuery(t *testing.T) { Load: `load 30s bar 0 1 10 100 1000 0 0 0 0`, Query: "sum_over_time(bar[30s])", - Result: Matrix{Series{ - Points: []Point{{V: 0, T: 0}, {V: 11, T: 60000}, {V: 1100, T: 120000}}, - Metric: labels.Labels{}}, + Result: Matrix{ + Series{ + Points: []Point{{V: 0, T: 0}, {V: 11, T: 60000}, {V: 1100, T: 120000}}, + Metric: labels.Labels{}, + }, }, Start: time.Unix(0, 0), End: time.Unix(120, 0), @@ -2365,9 +2509,11 @@ func TestRangeQuery(t *testing.T) { Load: `load 30s bar 0 1 10 100 1000 10000 100000 1000000 10000000`, Query: "sum_over_time(bar[30s])", - Result: Matrix{Series{ - Points: []Point{{V: 0, T: 0}, {V: 11, T: 60000}, {V: 1100, T: 120000}, {V: 110000, T: 180000}, {V: 11000000, T: 240000}}, - Metric: labels.Labels{}}, + Result: Matrix{ + Series{ + Points: []Point{{V: 0, T: 0}, {V: 11, T: 60000}, {V: 1100, T: 120000}, {V: 110000, T: 180000}, {V: 11000000, T: 240000}}, + Metric: labels.Labels{}, + }, }, Start: time.Unix(0, 0), End: time.Unix(240, 0), @@ -2378,9 +2524,11 @@ func TestRangeQuery(t *testing.T) { Load: `load 30s bar 5 17 42 2 7 905 51`, Query: "sum_over_time(bar[30s])", - Result: Matrix{Series{ - Points: []Point{{V: 5, T: 0}, {V: 59, T: 60000}, {V: 9, T: 120000}, {V: 956, T: 180000}}, - Metric: labels.Labels{}}, + Result: Matrix{ + Series{ + Points: []Point{{V: 5, T: 0}, {V: 59, T: 60000}, {V: 9, T: 120000}, {V: 956, T: 180000}}, + Metric: labels.Labels{}, + }, }, Start: time.Unix(0, 0), End: time.Unix(180, 0), @@ -2391,9 +2539,11 @@ func TestRangeQuery(t *testing.T) { Load: `load 30s metric 1+1x4`, Query: "metric", - Result: Matrix{Series{ - Points: []Point{{V: 1, T: 0}, {V: 3, T: 60000}, {V: 5, T: 120000}}, - Metric: labels.Labels{labels.Label{Name: "__name__", Value: "metric"}}}, + Result: Matrix{ + Series{ + Points: []Point{{V: 1, T: 0}, {V: 3, T: 60000}, {V: 5, T: 120000}}, + Metric: labels.Labels{labels.Label{Name: "__name__", Value: "metric"}}, + }, }, Start: time.Unix(0, 0), End: time.Unix(120, 0), @@ -2404,9 +2554,37 @@ func TestRangeQuery(t *testing.T) { Load: `load 30s metric 1+1x8`, Query: "metric", - Result: Matrix{Series{ - Points: []Point{{V: 1, T: 0}, {V: 3, T: 60000}, {V: 5, T: 120000}}, - Metric: labels.Labels{labels.Label{Name: "__name__", Value: "metric"}}}, + Result: Matrix{ + Series{ + Points: []Point{{V: 1, T: 0}, {V: 3, T: 60000}, {V: 5, T: 120000}}, + Metric: labels.Labels{labels.Label{Name: "__name__", Value: "metric"}}, + }, + }, + Start: time.Unix(0, 0), + End: time.Unix(120, 0), + Interval: 1 * time.Minute, + }, + { + Name: "short-circuit", + Load: `load 30s + foo{job="1"} 1+1x4 + bar{job="2"} 1+1x4`, + Query: `foo > 2 or bar`, + Result: Matrix{ + Series{ + Points: []Point{{V: 1, T: 0}, {V: 3, T: 60000}, {V: 5, T: 120000}}, + Metric: labels.Labels{ + labels.Label{Name: "__name__", Value: "bar"}, + labels.Label{Name: "job", Value: "2"}, + }, + }, + Series{ + Points: []Point{{V: 3, T: 60000}, {V: 5, T: 120000}}, + Metric: labels.Labels{ + labels.Label{Name: "__name__", Value: "foo"}, + labels.Label{Name: "job", Value: "1"}, + }, + }, }, Start: time.Unix(0, 0), End: time.Unix(120, 0), diff --git a/promql/functions.go b/promql/functions.go index 50594503d..fbafc7864 100644 --- a/promql/functions.go +++ b/promql/functions.go @@ -24,7 +24,7 @@ import ( "github.com/pkg/errors" "github.com/prometheus/common/model" - "github.com/prometheus/prometheus/pkg/labels" + "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/promql/parser" ) @@ -56,7 +56,7 @@ func funcTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) // It calculates the rate (allowing for counter resets if isCounter is true), // extrapolates if the first/last sample is close to the boundary, and returns // the result as either per-second (if isRate is true) or overall. -func extrapolatedRate(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper, isCounter bool, isRate bool) Vector { +func extrapolatedRate(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper, isCounter, isRate bool) Vector { ms := args[0].(*parser.MatrixSelector) vs := ms.VectorSelector.(*parser.VectorSelector) var ( @@ -367,7 +367,7 @@ func aggrOverTime(vals []parser.Value, enh *EvalNodeHelper, aggrFn func([]Point) // === avg_over_time(Matrix parser.ValueTypeMatrix) Vector === func funcAvgOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { return aggrOverTime(vals, enh, func(values []Point) float64 { - var mean, count float64 + var mean, count, c float64 for _, v := range values { count++ if math.IsInf(mean, 0) { @@ -387,9 +387,13 @@ func funcAvgOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNode continue } } - mean += v.V/count - mean/count + mean, c = kahanSumInc(v.V/count-mean/count, mean, c) } - return mean + + if math.IsInf(mean, 0) { + return mean + } + return mean + c }) } @@ -439,11 +443,14 @@ func funcMinOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNode // === sum_over_time(Matrix parser.ValueTypeMatrix) Vector === func funcSumOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { return aggrOverTime(vals, enh, func(values []Point) float64 { - var sum float64 + var sum, c float64 for _, v := range values { - sum += v.V + sum, c = kahanSumInc(v.V, sum, c) } - return sum + if math.IsInf(sum, 0) { + return sum + } + return sum + c }) } @@ -464,28 +471,32 @@ func funcQuantileOverTime(vals []parser.Value, args parser.Expressions, enh *Eva // === stddev_over_time(Matrix parser.ValueTypeMatrix) Vector === func funcStddevOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { return aggrOverTime(vals, enh, func(values []Point) float64 { - var aux, count, mean float64 + var count float64 + var mean, cMean float64 + var aux, cAux float64 for _, v := range values { count++ - delta := v.V - mean - mean += delta / count - aux += delta * (v.V - mean) + delta := v.V - (mean + cMean) + mean, cMean = kahanSumInc(delta/count, mean, cMean) + aux, cAux = kahanSumInc(delta*(v.V-(mean+cMean)), aux, cAux) } - return math.Sqrt(aux / count) + return math.Sqrt((aux + cAux) / count) }) } // === stdvar_over_time(Matrix parser.ValueTypeMatrix) Vector === func funcStdvarOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { return aggrOverTime(vals, enh, func(values []Point) float64 { - var aux, count, mean float64 + var count float64 + var mean, cMean float64 + var aux, cAux float64 for _, v := range values { count++ - delta := v.V - mean - mean += delta / count - aux += delta * (v.V - mean) + delta := v.V - (mean + cMean) + mean, cMean = kahanSumInc(delta/count, mean, cMean) + aux, cAux = kahanSumInc(delta*(v.V-(mean+cMean)), aux, cAux) } - return aux / count + return (aux + cAux) / count }) } @@ -675,23 +686,64 @@ func funcTimestamp(vals []parser.Value, args parser.Expressions, enh *EvalNodeHe return enh.Out } +func kahanSum(samples []float64) float64 { + var sum, c float64 + + for _, v := range samples { + sum, c = kahanSumInc(v, sum, c) + } + return sum + c +} + +func kahanSumInc(inc, sum, c float64) (newSum, newC float64) { + t := sum + inc + // Using Neumaier improvement, swap if next term larger than sum. + if math.Abs(sum) >= math.Abs(inc) { + c += (sum - t) + inc + } else { + c += (inc - t) + sum + } + return t, c +} + // linearRegression performs a least-square linear regression analysis on the // provided SamplePairs. It returns the slope, and the intercept value at the // provided time. func linearRegression(samples []Point, interceptTime int64) (slope, intercept float64) { var ( - n float64 - sumX, sumY float64 - sumXY, sumX2 float64 + n float64 + sumX, cX float64 + sumY, cY float64 + sumXY, cXY float64 + sumX2, cX2 float64 + initY float64 + constY bool ) - for _, sample := range samples { - x := float64(sample.T-interceptTime) / 1e3 + initY = samples[0].V + constY = true + for i, sample := range samples { + // Set constY to false if any new y values are encountered. + if constY && i > 0 && sample.V != initY { + constY = false + } n += 1.0 - sumY += sample.V - sumX += x - sumXY += x * sample.V - sumX2 += x * x + x := float64(sample.T-interceptTime) / 1e3 + sumX, cX = kahanSumInc(x, sumX, cX) + sumY, cY = kahanSumInc(sample.V, sumY, cY) + sumXY, cXY = kahanSumInc(x*sample.V, sumXY, cXY) + sumX2, cX2 = kahanSumInc(x*x, sumX2, cX2) } + if constY { + if math.IsInf(initY, 0) { + return math.NaN(), math.NaN() + } + return 0, initY + } + sumX = sumX + cX + sumY = sumY + cY + sumXY = sumXY + cXY + sumX2 = sumX2 + cX2 + covXY := sumXY - sumX*sumY/n varX := sumX2 - sumX*sumX/n @@ -1188,5 +1240,7 @@ func createLabelsForAbsentFunction(expr parser.Expr) labels.Labels { } func stringFromArg(e parser.Expr) string { - return unwrapStepInvariantExpr(e).(*parser.StringLiteral).Val + tmp := unwrapStepInvariantExpr(e) // Unwrap StepInvariant + unwrapParenExpr(&tmp) // Optionally unwrap ParenExpr + return tmp.(*parser.StringLiteral).Val } diff --git a/promql/functions_test.go b/promql/functions_test.go index 5707cbed3..cde14461f 100644 --- a/promql/functions_test.go +++ b/promql/functions_test.go @@ -15,13 +15,14 @@ package promql import ( "context" + "math" "testing" "time" "github.com/stretchr/testify/require" - "github.com/prometheus/prometheus/pkg/labels" - "github.com/prometheus/prometheus/pkg/timestamp" + "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/model/timestamp" "github.com/prometheus/prometheus/promql/parser" "github.com/prometheus/prometheus/util/teststorage" ) @@ -43,8 +44,14 @@ func TestDeriv(t *testing.T) { a := storage.Appender(context.Background()) metric := labels.FromStrings("__name__", "foo") - a.Append(0, metric, 1493712816939, 1.0) - a.Append(0, metric, 1493712846939, 1.0) + start := 1493712816939 + interval := 30 * 1000 + // Introduce some timestamp jitter to test 0 slope case. + // https://github.com/prometheus/prometheus/issues/7180 + for i := 0; i < 15; i++ { + jitter := 12 * i % 2 + a.Append(0, metric, int64(start+interval*i+jitter), 1) + } require.NoError(t, a.Commit()) @@ -71,3 +78,9 @@ func TestFunctionList(t *testing.T) { require.True(t, ok, "function %s exists in parser package, but not in promql package", i) } } + +func TestKahanSum(t *testing.T) { + vals := []float64{1.0, math.Pow(10, 100), 1.0, -1 * math.Pow(10, 100)} + expected := 2.0 + require.Equal(t, expected, kahanSum(vals)) +} diff --git a/promql/fuzz.go b/promql/fuzz.go index b34fbbc66..7aaebdd6b 100644 --- a/promql/fuzz.go +++ b/promql/fuzz.go @@ -20,7 +20,7 @@ package promql import ( "io" - "github.com/prometheus/prometheus/pkg/textparse" + "github.com/prometheus/prometheus/model/textparse" "github.com/prometheus/prometheus/promql/parser" ) diff --git a/promql/parser/ast.go b/promql/parser/ast.go index 776242564..fc144cbbc 100644 --- a/promql/parser/ast.go +++ b/promql/parser/ast.go @@ -19,7 +19,7 @@ import ( "github.com/pkg/errors" - "github.com/prometheus/prometheus/pkg/labels" + "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/storage" ) @@ -407,7 +407,7 @@ type PositionRange struct { // mergeRanges is a helper function to merge the PositionRanges of two Nodes. // Note that the arguments must be in the same order as they // occur in the input string. -func mergeRanges(first Node, last Node) PositionRange { +func mergeRanges(first, last Node) PositionRange { return PositionRange{ Start: first.PositionRange().Start, End: last.PositionRange().End, @@ -426,15 +426,19 @@ func (i *Item) PositionRange() PositionRange { func (e *AggregateExpr) PositionRange() PositionRange { return e.PosRange } + func (e *BinaryExpr) PositionRange() PositionRange { return mergeRanges(e.LHS, e.RHS) } + func (e *Call) PositionRange() PositionRange { return e.PosRange } + func (e *EvalStmt) PositionRange() PositionRange { return e.Expr.PositionRange() } + func (e Expressions) PositionRange() PositionRange { if len(e) == 0 { // Position undefined. @@ -445,33 +449,40 @@ func (e Expressions) PositionRange() PositionRange { } return mergeRanges(e[0], e[len(e)-1]) } + func (e *MatrixSelector) PositionRange() PositionRange { return PositionRange{ Start: e.VectorSelector.PositionRange().Start, End: e.EndPos, } } + func (e *SubqueryExpr) PositionRange() PositionRange { return PositionRange{ Start: e.Expr.PositionRange().Start, End: e.EndPos, } } + func (e *NumberLiteral) PositionRange() PositionRange { return e.PosRange } + func (e *ParenExpr) PositionRange() PositionRange { return e.PosRange } + func (e *StringLiteral) PositionRange() PositionRange { return e.PosRange } + func (e *UnaryExpr) PositionRange() PositionRange { return PositionRange{ Start: e.StartPos, End: e.Expr.PositionRange().End, } } + func (e *VectorSelector) PositionRange() PositionRange { return e.PosRange } diff --git a/promql/parser/generated_parser.y b/promql/parser/generated_parser.y index fcf504aca..433f45259 100644 --- a/promql/parser/generated_parser.y +++ b/promql/parser/generated_parser.y @@ -20,8 +20,8 @@ import ( "strconv" "time" - "github.com/prometheus/prometheus/pkg/labels" - "github.com/prometheus/prometheus/pkg/value" + "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/model/value" ) %} diff --git a/promql/parser/generated_parser.y.go b/promql/parser/generated_parser.y.go index 71614913a..87f25f024 100644 --- a/promql/parser/generated_parser.y.go +++ b/promql/parser/generated_parser.y.go @@ -13,8 +13,8 @@ import ( "strconv" "time" - "github.com/prometheus/prometheus/pkg/labels" - "github.com/prometheus/prometheus/pkg/value" + "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/model/value" ) //line promql/parser/generated_parser.y:28 @@ -33,82 +33,84 @@ type yySymType struct { duration time.Duration } -const EQL = 57346 -const BLANK = 57347 -const COLON = 57348 -const COMMA = 57349 -const COMMENT = 57350 -const DURATION = 57351 -const EOF = 57352 -const ERROR = 57353 -const IDENTIFIER = 57354 -const LEFT_BRACE = 57355 -const LEFT_BRACKET = 57356 -const LEFT_PAREN = 57357 -const METRIC_IDENTIFIER = 57358 -const NUMBER = 57359 -const RIGHT_BRACE = 57360 -const RIGHT_BRACKET = 57361 -const RIGHT_PAREN = 57362 -const SEMICOLON = 57363 -const SPACE = 57364 -const STRING = 57365 -const TIMES = 57366 -const operatorsStart = 57367 -const ADD = 57368 -const DIV = 57369 -const EQLC = 57370 -const EQL_REGEX = 57371 -const GTE = 57372 -const GTR = 57373 -const LAND = 57374 -const LOR = 57375 -const LSS = 57376 -const LTE = 57377 -const LUNLESS = 57378 -const MOD = 57379 -const MUL = 57380 -const NEQ = 57381 -const NEQ_REGEX = 57382 -const POW = 57383 -const SUB = 57384 -const AT = 57385 -const ATAN2 = 57386 -const operatorsEnd = 57387 -const aggregatorsStart = 57388 -const AVG = 57389 -const BOTTOMK = 57390 -const COUNT = 57391 -const COUNT_VALUES = 57392 -const GROUP = 57393 -const MAX = 57394 -const MIN = 57395 -const QUANTILE = 57396 -const STDDEV = 57397 -const STDVAR = 57398 -const SUM = 57399 -const TOPK = 57400 -const aggregatorsEnd = 57401 -const keywordsStart = 57402 -const BOOL = 57403 -const BY = 57404 -const GROUP_LEFT = 57405 -const GROUP_RIGHT = 57406 -const IGNORING = 57407 -const OFFSET = 57408 -const ON = 57409 -const WITHOUT = 57410 -const keywordsEnd = 57411 -const preprocessorStart = 57412 -const START = 57413 -const END = 57414 -const preprocessorEnd = 57415 -const startSymbolsStart = 57416 -const START_METRIC = 57417 -const START_SERIES_DESCRIPTION = 57418 -const START_EXPRESSION = 57419 -const START_METRIC_SELECTOR = 57420 -const startSymbolsEnd = 57421 +const ( + EQL = 57346 + BLANK = 57347 + COLON = 57348 + COMMA = 57349 + COMMENT = 57350 + DURATION = 57351 + EOF = 57352 + ERROR = 57353 + IDENTIFIER = 57354 + LEFT_BRACE = 57355 + LEFT_BRACKET = 57356 + LEFT_PAREN = 57357 + METRIC_IDENTIFIER = 57358 + NUMBER = 57359 + RIGHT_BRACE = 57360 + RIGHT_BRACKET = 57361 + RIGHT_PAREN = 57362 + SEMICOLON = 57363 + SPACE = 57364 + STRING = 57365 + TIMES = 57366 + operatorsStart = 57367 + ADD = 57368 + DIV = 57369 + EQLC = 57370 + EQL_REGEX = 57371 + GTE = 57372 + GTR = 57373 + LAND = 57374 + LOR = 57375 + LSS = 57376 + LTE = 57377 + LUNLESS = 57378 + MOD = 57379 + MUL = 57380 + NEQ = 57381 + NEQ_REGEX = 57382 + POW = 57383 + SUB = 57384 + AT = 57385 + ATAN2 = 57386 + operatorsEnd = 57387 + aggregatorsStart = 57388 + AVG = 57389 + BOTTOMK = 57390 + COUNT = 57391 + COUNT_VALUES = 57392 + GROUP = 57393 + MAX = 57394 + MIN = 57395 + QUANTILE = 57396 + STDDEV = 57397 + STDVAR = 57398 + SUM = 57399 + TOPK = 57400 + aggregatorsEnd = 57401 + keywordsStart = 57402 + BOOL = 57403 + BY = 57404 + GROUP_LEFT = 57405 + GROUP_RIGHT = 57406 + IGNORING = 57407 + OFFSET = 57408 + ON = 57409 + WITHOUT = 57410 + keywordsEnd = 57411 + preprocessorStart = 57412 + START = 57413 + END = 57414 + preprocessorEnd = 57415 + startSymbolsStart = 57416 + START_METRIC = 57417 + START_SERIES_DESCRIPTION = 57418 + START_EXPRESSION = 57419 + START_METRIC_SELECTOR = 57420 + startSymbolsEnd = 57421 +) var yyToknames = [...]string{ "$end", @@ -194,9 +196,11 @@ var yyToknames = [...]string{ var yyStatenames = [...]string{} -const yyEofCode = 1 -const yyErrCode = 2 -const yyInitialStackSize = 16 +const ( + yyEofCode = 1 + yyErrCode = 2 + yyInitialStackSize = 16 +) //line promql/parser/generated_parser.y:749 diff --git a/promql/parser/lex_test.go b/promql/parser/lex_test.go index 95f4d978d..8e22f41d9 100644 --- a/promql/parser/lex_test.go +++ b/promql/parser/lex_test.go @@ -318,25 +318,32 @@ var tests = []struct { { input: "offset", expected: []Item{{OFFSET, 0, "offset"}}, - }, { + }, + { input: "by", expected: []Item{{BY, 0, "by"}}, - }, { + }, + { input: "without", expected: []Item{{WITHOUT, 0, "without"}}, - }, { + }, + { input: "on", expected: []Item{{ON, 0, "on"}}, - }, { + }, + { input: "ignoring", expected: []Item{{IGNORING, 0, "ignoring"}}, - }, { + }, + { input: "group_left", expected: []Item{{GROUP_LEFT, 0, "group_left"}}, - }, { + }, + { input: "group_right", expected: []Item{{GROUP_RIGHT, 0, "group_right"}}, - }, { + }, + { input: "bool", expected: []Item{{BOOL, 0, "bool"}}, }, @@ -569,7 +576,8 @@ var tests = []struct { {DURATION, 24, `4s`}, {RIGHT_BRACKET, 26, `]`}, }, - }, { + }, + { input: `test:name{on!~"b:ar"}[4m:4s]`, expected: []Item{ {METRIC_IDENTIFIER, 0, `test:name`}, @@ -584,7 +592,8 @@ var tests = []struct { {DURATION, 25, `4s`}, {RIGHT_BRACKET, 27, `]`}, }, - }, { + }, + { input: `test:name{on!~"b:ar"}[4m:]`, expected: []Item{ {METRIC_IDENTIFIER, 0, `test:name`}, @@ -598,7 +607,8 @@ var tests = []struct { {COLON, 24, `:`}, {RIGHT_BRACKET, 25, `]`}, }, - }, { // Nested Subquery. + }, + { // Nested Subquery. input: `min_over_time(rate(foo{bar="baz"}[2s])[5m:])[4m:3s]`, expected: []Item{ @@ -646,7 +656,8 @@ var tests = []struct { {OFFSET, 29, "offset"}, {DURATION, 36, "10m"}, }, - }, { + }, + { input: `min_over_time(rate(foo{bar="baz"}[2s])[5m:] offset 6m)[4m:3s]`, expected: []Item{ @@ -737,7 +748,6 @@ func TestLexer(t *testing.T) { if item.Typ == ERROR { hasError = true } - } if !hasError { t.Logf("%d: input %q", i, test.input) diff --git a/promql/parser/parse.go b/promql/parser/parse.go index edecfc0e9..5b60c5ed5 100644 --- a/promql/parser/parse.go +++ b/promql/parser/parse.go @@ -26,8 +26,8 @@ import ( "github.com/pkg/errors" "github.com/prometheus/common/model" - "github.com/prometheus/prometheus/pkg/labels" - "github.com/prometheus/prometheus/pkg/timestamp" + "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/model/timestamp" "github.com/prometheus/prometheus/util/strutil" ) @@ -241,7 +241,7 @@ func (p *parser) addParseErr(positionRange PositionRange, err error) { // unexpected creates a parser error complaining about an unexpected lexer item. // The item that is presented as unexpected is always the last item produced // by the lexer. -func (p *parser) unexpected(context string, expected string) { +func (p *parser) unexpected(context, expected string) { var errMsg strings.Builder // Do not report lexer errors twice @@ -354,7 +354,8 @@ func (p *parser) InjectItem(typ ItemType) { p.inject = typ p.injecting = true } -func (p *parser) newBinaryExpression(lhs Node, op Item, modifiers Node, rhs Node) *BinaryExpr { + +func (p *parser) newBinaryExpression(lhs Node, op Item, modifiers, rhs Node) *BinaryExpr { ret := modifiers.(*BinaryExpr) ret.LHS = lhs.(Expr) @@ -374,7 +375,7 @@ func (p *parser) assembleVectorSelector(vs *VectorSelector) { } } -func (p *parser) newAggregateExpr(op Item, modifier Node, args Node) (ret *AggregateExpr) { +func (p *parser) newAggregateExpr(op Item, modifier, args Node) (ret *AggregateExpr) { ret = modifier.(*AggregateExpr) arguments := args.(Expressions) @@ -650,10 +651,9 @@ func (p *parser) parseGenerated(startSymbol ItemType) interface{} { p.yyParser.Parse(p) return p.generatedParserResult - } -func (p *parser) newLabelMatcher(label Item, operator Item, value Item) *labels.Matcher { +func (p *parser) newLabelMatcher(label, operator, value Item) *labels.Matcher { op := operator.Typ val := p.unquoteString(value.Val) diff --git a/promql/parser/parse_test.go b/promql/parser/parse_test.go index b73dfd22a..790064d37 100644 --- a/promql/parser/parse_test.go +++ b/promql/parser/parse_test.go @@ -24,7 +24,7 @@ import ( "github.com/prometheus/common/model" "github.com/stretchr/testify/require" - "github.com/prometheus/prometheus/pkg/labels" + "github.com/prometheus/prometheus/model/labels" ) var testExpr = []struct { @@ -40,73 +40,85 @@ var testExpr = []struct { Val: 1, PosRange: PositionRange{Start: 0, End: 1}, }, - }, { + }, + { input: "+Inf", expected: &NumberLiteral{ Val: math.Inf(1), PosRange: PositionRange{Start: 0, End: 4}, }, - }, { + }, + { input: "-Inf", expected: &NumberLiteral{ Val: math.Inf(-1), PosRange: PositionRange{Start: 0, End: 4}, }, - }, { + }, + { input: ".5", expected: &NumberLiteral{ Val: 0.5, PosRange: PositionRange{Start: 0, End: 2}, }, - }, { + }, + { input: "5.", expected: &NumberLiteral{ Val: 5, PosRange: PositionRange{Start: 0, End: 2}, }, - }, { + }, + { input: "123.4567", expected: &NumberLiteral{ Val: 123.4567, PosRange: PositionRange{Start: 0, End: 8}, }, - }, { + }, + { input: "5e-3", expected: &NumberLiteral{ Val: 0.005, PosRange: PositionRange{Start: 0, End: 4}, }, - }, { + }, + { input: "5e3", expected: &NumberLiteral{ Val: 5000, PosRange: PositionRange{Start: 0, End: 3}, }, - }, { + }, + { input: "0xc", expected: &NumberLiteral{ Val: 12, PosRange: PositionRange{Start: 0, End: 3}, }, - }, { + }, + { input: "0755", expected: &NumberLiteral{ Val: 493, PosRange: PositionRange{Start: 0, End: 4}, }, - }, { + }, + { input: "+5.5e-3", expected: &NumberLiteral{ Val: 0.0055, PosRange: PositionRange{Start: 0, End: 7}, }, - }, { + }, + { input: "-0755", expected: &NumberLiteral{ Val: -493, PosRange: PositionRange{Start: 0, End: 5}, }, - }, { + }, + { input: "1 + 1", expected: &BinaryExpr{ Op: ADD, @@ -119,7 +131,8 @@ var testExpr = []struct { PosRange: PositionRange{Start: 4, End: 5}, }, }, - }, { + }, + { input: "1 - 1", expected: &BinaryExpr{ Op: SUB, @@ -132,7 +145,8 @@ var testExpr = []struct { PosRange: PositionRange{Start: 4, End: 5}, }, }, - }, { + }, + { input: "1 * 1", expected: &BinaryExpr{ Op: MUL, @@ -145,7 +159,8 @@ var testExpr = []struct { PosRange: PositionRange{Start: 4, End: 5}, }, }, - }, { + }, + { input: "1 % 1", expected: &BinaryExpr{ Op: MOD, @@ -158,7 +173,8 @@ var testExpr = []struct { PosRange: PositionRange{Start: 4, End: 5}, }, }, - }, { + }, + { input: "1 / 1", expected: &BinaryExpr{ Op: DIV, @@ -171,7 +187,8 @@ var testExpr = []struct { PosRange: PositionRange{Start: 4, End: 5}, }, }, - }, { + }, + { input: "1 == bool 1", expected: &BinaryExpr{ Op: EQLC, @@ -185,7 +202,8 @@ var testExpr = []struct { }, ReturnBool: true, }, - }, { + }, + { input: "1 != bool 1", expected: &BinaryExpr{ Op: NEQ, @@ -199,7 +217,8 @@ var testExpr = []struct { }, ReturnBool: true, }, - }, { + }, + { input: "1 > bool 1", expected: &BinaryExpr{ Op: GTR, @@ -213,7 +232,8 @@ var testExpr = []struct { }, ReturnBool: true, }, - }, { + }, + { input: "1 >= bool 1", expected: &BinaryExpr{ Op: GTE, @@ -227,7 +247,8 @@ var testExpr = []struct { }, ReturnBool: true, }, - }, { + }, + { input: "1 < bool 1", expected: &BinaryExpr{ Op: LSS, @@ -241,7 +262,8 @@ var testExpr = []struct { }, ReturnBool: true, }, - }, { + }, + { input: "1 <= bool 1", expected: &BinaryExpr{ Op: LTE, @@ -255,7 +277,8 @@ var testExpr = []struct { }, ReturnBool: true, }, - }, { + }, + { input: "-1^2", expected: &UnaryExpr{ Op: SUB, @@ -271,7 +294,8 @@ var testExpr = []struct { }, }, }, - }, { + }, + { input: "-1*2", expected: &BinaryExpr{ Op: MUL, @@ -284,7 +308,8 @@ var testExpr = []struct { PosRange: PositionRange{Start: 3, End: 4}, }, }, - }, { + }, + { input: "-1+2", expected: &BinaryExpr{ Op: ADD, @@ -297,7 +322,8 @@ var testExpr = []struct { PosRange: PositionRange{Start: 3, End: 4}, }, }, - }, { + }, + { input: "-1^-2", expected: &UnaryExpr{ Op: SUB, @@ -313,7 +339,8 @@ var testExpr = []struct { }, }, }, - }, { + }, + { input: "+1 + -2 * 1", expected: &BinaryExpr{ Op: ADD, @@ -333,7 +360,8 @@ var testExpr = []struct { }, }, }, - }, { + }, + { input: "1 + 2/(3*1)", expected: &BinaryExpr{ Op: ADD, @@ -363,7 +391,8 @@ var testExpr = []struct { }, }, }, - }, { + }, + { input: "1 < bool 2 - 1 * 2", expected: &BinaryExpr{ Op: LSS, @@ -391,7 +420,8 @@ var testExpr = []struct { }, }, }, - }, { + }, + { input: "-some_metric", expected: &UnaryExpr{ Op: SUB, @@ -406,7 +436,8 @@ var testExpr = []struct { }, }, }, - }, { + }, + { input: "+some_metric", expected: &UnaryExpr{ Op: ADD, @@ -421,7 +452,8 @@ var testExpr = []struct { }, }, }, - }, { + }, + { input: " +some_metric", expected: &UnaryExpr{ Op: ADD, @@ -437,103 +469,128 @@ var testExpr = []struct { }, StartPos: 1, }, - }, { + }, + { input: "", fail: true, errMsg: "no expression found in input", - }, { + }, + { input: "# just a comment\n\n", fail: true, errMsg: "no expression found in input", - }, { + }, + { input: "1+", fail: true, errMsg: "unexpected end of input", - }, { + }, + { input: ".", fail: true, errMsg: "unexpected character: '.'", - }, { + }, + { input: "2.5.", fail: true, errMsg: "unexpected character: '.'", - }, { + }, + { input: "100..4", fail: true, errMsg: `unexpected number ".4"`, - }, { + }, + { input: "0deadbeef", fail: true, errMsg: "bad number or duration syntax: \"0de\"", - }, { + }, + { input: "1 /", fail: true, errMsg: "unexpected end of input", - }, { + }, + { input: "*1", fail: true, errMsg: "unexpected ", - }, { + }, + { input: "(1))", fail: true, errMsg: "unexpected right parenthesis ')'", - }, { + }, + { input: "((1)", fail: true, errMsg: "unclosed left parenthesis", - }, { + }, + { input: "999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999", fail: true, errMsg: "out of range", - }, { + }, + { input: "(", fail: true, errMsg: "unclosed left parenthesis", - }, { + }, + { input: "1 and 1", fail: true, errMsg: "set operator \"and\" not allowed in binary scalar expression", - }, { + }, + { input: "1 == 1", fail: true, errMsg: "1:3: parse error: comparisons between scalars must use BOOL modifier", - }, { + }, + { input: "1 or 1", fail: true, errMsg: "set operator \"or\" not allowed in binary scalar expression", - }, { + }, + { input: "1 unless 1", fail: true, errMsg: "set operator \"unless\" not allowed in binary scalar expression", - }, { + }, + { input: "1 !~ 1", fail: true, errMsg: `unexpected character after '!': '~'`, - }, { + }, + { input: "1 =~ 1", fail: true, errMsg: `unexpected character after '=': '~'`, - }, { + }, + { input: `-"string"`, fail: true, errMsg: `unary expression only allowed on expressions of type scalar or instant vector, got "string"`, - }, { + }, + { input: `-test[5m]`, fail: true, errMsg: `unary expression only allowed on expressions of type scalar or instant vector, got "range vector"`, - }, { + }, + { input: `*test`, fail: true, errMsg: "unexpected ", - }, { + }, + { input: "1 offset 1d", fail: true, errMsg: "1:1: parse error: offset modifier must be preceded by an instant vector selector or range vector selector or a subquery", - }, { + }, + { input: "foo offset 1s offset 2s", fail: true, errMsg: "offset may not be set multiple times", - }, { + }, + { input: "a - on(b) ignoring(c) d", fail: true, errMsg: "1:11: parse error: unexpected ", @@ -565,7 +622,8 @@ var testExpr = []struct { }, VectorMatching: &VectorMatching{Card: CardOneToOne}, }, - }, { + }, + { input: "foo * sum", expected: &BinaryExpr{ Op: MUL, @@ -591,7 +649,8 @@ var testExpr = []struct { }, VectorMatching: &VectorMatching{Card: CardOneToOne}, }, - }, { + }, + { input: "foo == 1", expected: &BinaryExpr{ Op: EQLC, @@ -610,7 +669,8 @@ var testExpr = []struct { PosRange: PositionRange{Start: 7, End: 8}, }, }, - }, { + }, + { input: "foo == bool 1", expected: &BinaryExpr{ Op: EQLC, @@ -630,7 +690,8 @@ var testExpr = []struct { }, ReturnBool: true, }, - }, { + }, + { input: "2.5 / bar", expected: &BinaryExpr{ Op: DIV, @@ -649,7 +710,8 @@ var testExpr = []struct { }, }, }, - }, { + }, + { input: "foo and bar", expected: &BinaryExpr{ Op: LAND, @@ -675,7 +737,8 @@ var testExpr = []struct { }, VectorMatching: &VectorMatching{Card: CardManyToMany}, }, - }, { + }, + { input: "foo or bar", expected: &BinaryExpr{ Op: LOR, @@ -701,7 +764,8 @@ var testExpr = []struct { }, VectorMatching: &VectorMatching{Card: CardManyToMany}, }, - }, { + }, + { input: "foo unless bar", expected: &BinaryExpr{ Op: LUNLESS, @@ -727,7 +791,8 @@ var testExpr = []struct { }, VectorMatching: &VectorMatching{Card: CardManyToMany}, }, - }, { + }, + { // Test and/or precedence and reassigning of operands. input: "foo + bar or bla and blub", expected: &BinaryExpr{ @@ -782,7 +847,8 @@ var testExpr = []struct { }, VectorMatching: &VectorMatching{Card: CardManyToMany}, }, - }, { + }, + { // Test and/or/unless precedence. input: "foo and bar unless baz or qux", expected: &BinaryExpr{ @@ -837,7 +903,8 @@ var testExpr = []struct { }, VectorMatching: &VectorMatching{Card: CardManyToMany}, }, - }, { + }, + { // Test precedence and reassigning of operands. input: "bar + on(foo) bla / on(baz, buz) group_right(test) blub", expected: &BinaryExpr{ @@ -887,7 +954,8 @@ var testExpr = []struct { On: true, }, }, - }, { + }, + { input: "foo * on(test,blub) bar", expected: &BinaryExpr{ Op: MUL, @@ -917,7 +985,8 @@ var testExpr = []struct { On: true, }, }, - }, { + }, + { input: "foo * on(test,blub) group_left bar", expected: &BinaryExpr{ Op: MUL, @@ -947,7 +1016,8 @@ var testExpr = []struct { On: true, }, }, - }, { + }, + { input: "foo and on(test,blub) bar", expected: &BinaryExpr{ Op: LAND, @@ -977,7 +1047,8 @@ var testExpr = []struct { On: true, }, }, - }, { + }, + { input: "foo and on() bar", expected: &BinaryExpr{ Op: LAND, @@ -1007,7 +1078,8 @@ var testExpr = []struct { On: true, }, }, - }, { + }, + { input: "foo and ignoring(test,blub) bar", expected: &BinaryExpr{ Op: LAND, @@ -1036,7 +1108,8 @@ var testExpr = []struct { MatchingLabels: []string{"test", "blub"}, }, }, - }, { + }, + { input: "foo and ignoring() bar", expected: &BinaryExpr{ Op: LAND, @@ -1065,7 +1138,8 @@ var testExpr = []struct { MatchingLabels: []string{}, }, }, - }, { + }, + { input: "foo unless on(bar) baz", expected: &BinaryExpr{ Op: LUNLESS, @@ -1095,7 +1169,8 @@ var testExpr = []struct { On: true, }, }, - }, { + }, + { input: "foo / on(test,blub) group_left(bar) bar", expected: &BinaryExpr{ Op: DIV, @@ -1126,7 +1201,8 @@ var testExpr = []struct { Include: []string{"bar"}, }, }, - }, { + }, + { input: "foo / ignoring(test,blub) group_left(blub) bar", expected: &BinaryExpr{ Op: DIV, @@ -1156,7 +1232,8 @@ var testExpr = []struct { Include: []string{"blub"}, }, }, - }, { + }, + { input: "foo / ignoring(test,blub) group_left(bar) bar", expected: &BinaryExpr{ Op: DIV, @@ -1186,7 +1263,8 @@ var testExpr = []struct { Include: []string{"bar"}, }, }, - }, { + }, + { input: "foo - on(test,blub) group_right(bar,foo) bar", expected: &BinaryExpr{ Op: SUB, @@ -1217,7 +1295,8 @@ var testExpr = []struct { On: true, }, }, - }, { + }, + { input: "foo - ignoring(test,blub) group_right(bar,foo) bar", expected: &BinaryExpr{ Op: SUB, @@ -1247,79 +1326,98 @@ var testExpr = []struct { Include: []string{"bar", "foo"}, }, }, - }, { + }, + { input: "foo and 1", fail: true, errMsg: "set operator \"and\" not allowed in binary scalar expression", - }, { + }, + { input: "1 and foo", fail: true, errMsg: "set operator \"and\" not allowed in binary scalar expression", - }, { + }, + { input: "foo or 1", fail: true, errMsg: "set operator \"or\" not allowed in binary scalar expression", - }, { + }, + { input: "1 or foo", fail: true, errMsg: "set operator \"or\" not allowed in binary scalar expression", - }, { + }, + { input: "foo unless 1", fail: true, errMsg: "set operator \"unless\" not allowed in binary scalar expression", - }, { + }, + { input: "1 unless foo", fail: true, errMsg: "set operator \"unless\" not allowed in binary scalar expression", - }, { + }, + { input: "1 or on(bar) foo", fail: true, errMsg: "vector matching only allowed between instant vectors", - }, { + }, + { input: "foo == on(bar) 10", fail: true, errMsg: "vector matching only allowed between instant vectors", - }, { + }, + { input: "foo + group_left(baz) bar", fail: true, errMsg: "unexpected ", - }, { + }, + { input: "foo and on(bar) group_left(baz) bar", fail: true, errMsg: "no grouping allowed for \"and\" operation", - }, { + }, + { input: "foo and on(bar) group_right(baz) bar", fail: true, errMsg: "no grouping allowed for \"and\" operation", - }, { + }, + { input: "foo or on(bar) group_left(baz) bar", fail: true, errMsg: "no grouping allowed for \"or\" operation", - }, { + }, + { input: "foo or on(bar) group_right(baz) bar", fail: true, errMsg: "no grouping allowed for \"or\" operation", - }, { + }, + { input: "foo unless on(bar) group_left(baz) bar", fail: true, errMsg: "no grouping allowed for \"unless\" operation", - }, { + }, + { input: "foo unless on(bar) group_right(baz) bar", fail: true, errMsg: "no grouping allowed for \"unless\" operation", - }, { + }, + { input: `http_requests{group="production"} + on(instance) group_left(job,instance) cpu_count{type="smp"}`, fail: true, errMsg: "label \"instance\" must not occur in ON and GROUP clause at once", - }, { + }, + { input: "foo + bool bar", fail: true, errMsg: "bool modifier can only be used on comparison operators", - }, { + }, + { input: "foo + bool 10", fail: true, errMsg: "bool modifier can only be used on comparison operators", - }, { + }, + { input: "foo and bool 10", fail: true, errMsg: "bool modifier can only be used on comparison operators", @@ -1337,7 +1435,8 @@ var testExpr = []struct { End: 3, }, }, - }, { + }, + { input: "min", expected: &VectorSelector{ Name: "min", @@ -1349,7 +1448,8 @@ var testExpr = []struct { End: 3, }, }, - }, { + }, + { input: "foo offset 5m", expected: &VectorSelector{ Name: "foo", @@ -1362,7 +1462,8 @@ var testExpr = []struct { End: 13, }, }, - }, { + }, + { input: "foo offset -7m", expected: &VectorSelector{ Name: "foo", @@ -1375,7 +1476,8 @@ var testExpr = []struct { End: 14, }, }, - }, { + }, + { input: `foo OFFSET 1h30m`, expected: &VectorSelector{ Name: "foo", @@ -1388,7 +1490,8 @@ var testExpr = []struct { End: 16, }, }, - }, { + }, + { input: `foo OFFSET 1m30ms`, expected: &VectorSelector{ Name: "foo", @@ -1401,7 +1504,8 @@ var testExpr = []struct { End: 17, }, }, - }, { + }, + { input: `foo @ 1603774568`, expected: &VectorSelector{ Name: "foo", @@ -1414,7 +1518,8 @@ var testExpr = []struct { End: 16, }, }, - }, { + }, + { input: `foo @ -100`, expected: &VectorSelector{ Name: "foo", @@ -1427,7 +1532,8 @@ var testExpr = []struct { End: 10, }, }, - }, { + }, + { input: `foo @ .3`, expected: &VectorSelector{ Name: "foo", @@ -1440,7 +1546,8 @@ var testExpr = []struct { End: 8, }, }, - }, { + }, + { input: `foo @ 3.`, expected: &VectorSelector{ Name: "foo", @@ -1453,7 +1560,8 @@ var testExpr = []struct { End: 8, }, }, - }, { + }, + { input: `foo @ 3.33`, expected: &VectorSelector{ Name: "foo", @@ -1466,7 +1574,8 @@ var testExpr = []struct { End: 10, }, }, - }, { // Rounding off. + }, + { // Rounding off. input: `foo @ 3.3333`, expected: &VectorSelector{ Name: "foo", @@ -1479,7 +1588,8 @@ var testExpr = []struct { End: 12, }, }, - }, { // Rounding off. + }, + { // Rounding off. input: `foo @ 3.3335`, expected: &VectorSelector{ Name: "foo", @@ -1492,7 +1602,8 @@ var testExpr = []struct { End: 12, }, }, - }, { + }, + { input: `foo @ 3e2`, expected: &VectorSelector{ Name: "foo", @@ -1505,7 +1616,8 @@ var testExpr = []struct { End: 9, }, }, - }, { + }, + { input: `foo @ 3e-1`, expected: &VectorSelector{ Name: "foo", @@ -1518,7 +1630,8 @@ var testExpr = []struct { End: 10, }, }, - }, { + }, + { input: `foo @ 0xA`, expected: &VectorSelector{ Name: "foo", @@ -1531,7 +1644,8 @@ var testExpr = []struct { End: 9, }, }, - }, { + }, + { input: `foo @ -3.3e1`, expected: &VectorSelector{ Name: "foo", @@ -1544,27 +1658,33 @@ var testExpr = []struct { End: 12, }, }, - }, { + }, + { input: `foo @ +Inf`, fail: true, errMsg: "1:1: parse error: timestamp out of bounds for @ modifier: +Inf", - }, { + }, + { input: `foo @ -Inf`, fail: true, errMsg: "1:1: parse error: timestamp out of bounds for @ modifier: -Inf", - }, { + }, + { input: `foo @ NaN`, fail: true, errMsg: "1:1: parse error: timestamp out of bounds for @ modifier: NaN", - }, { + }, + { input: fmt.Sprintf(`foo @ %f`, float64(math.MaxInt64)+1), fail: true, errMsg: fmt.Sprintf("1:1: parse error: timestamp out of bounds for @ modifier: %f", float64(math.MaxInt64)+1), - }, { + }, + { input: fmt.Sprintf(`foo @ %f`, float64(math.MinInt64)-1), fail: true, errMsg: fmt.Sprintf("1:1: parse error: timestamp out of bounds for @ modifier: %f", float64(math.MinInt64)-1), - }, { + }, + { input: `foo:bar{a="bc"}`, expected: &VectorSelector{ Name: "foo:bar", @@ -1577,7 +1697,8 @@ var testExpr = []struct { End: 15, }, }, - }, { + }, + { input: `foo{NaN='bc'}`, expected: &VectorSelector{ Name: "foo", @@ -1590,7 +1711,8 @@ var testExpr = []struct { End: 13, }, }, - }, { + }, + { input: `foo{bar='}'}`, expected: &VectorSelector{ Name: "foo", @@ -1603,7 +1725,8 @@ var testExpr = []struct { End: 12, }, }, - }, { + }, + { input: `foo{a="b", foo!="bar", test=~"test", bar!~"baz"}`, expected: &VectorSelector{ Name: "foo", @@ -1619,7 +1742,8 @@ var testExpr = []struct { End: 48, }, }, - }, { + }, + { input: `foo{a="b", foo!="bar", test=~"test", bar!~"baz",}`, expected: &VectorSelector{ Name: "foo", @@ -1635,89 +1759,110 @@ var testExpr = []struct { End: 49, }, }, - }, { + }, + { input: `{`, fail: true, errMsg: "unexpected end of input inside braces", - }, { + }, + { input: `}`, fail: true, errMsg: "unexpected character: '}'", - }, { + }, + { input: `some{`, fail: true, errMsg: "unexpected end of input inside braces", - }, { + }, + { input: `some}`, fail: true, errMsg: "unexpected character: '}'", - }, { + }, + { input: `some_metric{a=b}`, fail: true, errMsg: "unexpected identifier \"b\" in label matching, expected string", - }, { + }, + { input: `some_metric{a:b="b"}`, fail: true, errMsg: "unexpected character inside braces: ':'", - }, { + }, + { input: `foo{a*"b"}`, fail: true, errMsg: "unexpected character inside braces: '*'", - }, { + }, + { input: `foo{a>="b"}`, fail: true, // TODO(fabxc): willingly lexing wrong tokens allows for more precise error // messages from the parser - consider if this is an option. errMsg: "unexpected character inside braces: '>'", - }, { + }, + { input: "some_metric{a=\"\xff\"}", fail: true, errMsg: "1:15: parse error: invalid UTF-8 rune", - }, { + }, + { input: `foo{gibberish}`, fail: true, errMsg: `unexpected "}" in label matching, expected label matching operator`, - }, { + }, + { input: `foo{1}`, fail: true, errMsg: "unexpected character inside braces: '1'", - }, { + }, + { input: `{}`, fail: true, errMsg: "vector selector must contain at least one non-empty matcher", - }, { + }, + { input: `{x=""}`, fail: true, errMsg: "vector selector must contain at least one non-empty matcher", - }, { + }, + { input: `{x=~".*"}`, fail: true, errMsg: "vector selector must contain at least one non-empty matcher", - }, { + }, + { input: `{x!~".+"}`, fail: true, errMsg: "vector selector must contain at least one non-empty matcher", - }, { + }, + { input: `{x!="a"}`, fail: true, errMsg: "vector selector must contain at least one non-empty matcher", - }, { + }, + { input: `foo{__name__="bar"}`, fail: true, errMsg: `metric name must not be set twice: "foo" or "bar"`, - }, { + }, + { input: `foo{__name__= =}`, fail: true, errMsg: `1:15: parse error: unexpected "=" in label matching, expected string`, - }, { + }, + { input: `foo{,}`, fail: true, errMsg: `unexpected "," in label matching, expected identifier or "}"`, - }, { + }, + { input: `foo{__name__ == "bar"}`, fail: true, errMsg: `1:15: parse error: unexpected "=" in label matching, expected string`, - }, { + }, + { input: `foo{__name__="bar" lol}`, fail: true, errMsg: `unexpected identifier "lol" in label matching, expected "," or "}"`, @@ -1739,7 +1884,8 @@ var testExpr = []struct { Range: 5 * time.Second, EndPos: 8, }, - }, { + }, + { input: "test[5m]", expected: &MatrixSelector{ VectorSelector: &VectorSelector{ @@ -1755,7 +1901,8 @@ var testExpr = []struct { Range: 5 * time.Minute, EndPos: 8, }, - }, { + }, + { input: `foo[5m30s]`, expected: &MatrixSelector{ VectorSelector: &VectorSelector{ @@ -1771,7 +1918,8 @@ var testExpr = []struct { Range: 5*time.Minute + 30*time.Second, EndPos: 10, }, - }, { + }, + { input: "test[5h] OFFSET 5m", expected: &MatrixSelector{ VectorSelector: &VectorSelector{ @@ -1788,7 +1936,8 @@ var testExpr = []struct { Range: 5 * time.Hour, EndPos: 18, }, - }, { + }, + { input: "test[5d] OFFSET 10s", expected: &MatrixSelector{ VectorSelector: &VectorSelector{ @@ -1805,7 +1954,8 @@ var testExpr = []struct { Range: 5 * 24 * time.Hour, EndPos: 19, }, - }, { + }, + { input: "test[5w] offset 2w", expected: &MatrixSelector{ VectorSelector: &VectorSelector{ @@ -1822,7 +1972,8 @@ var testExpr = []struct { Range: 5 * 7 * 24 * time.Hour, EndPos: 18, }, - }, { + }, + { input: `test{a="b"}[5y] OFFSET 3d`, expected: &MatrixSelector{ VectorSelector: &VectorSelector{ @@ -1840,7 +1991,8 @@ var testExpr = []struct { Range: 5 * 365 * 24 * time.Hour, EndPos: 25, }, - }, { + }, + { input: `test{a="b"}[5y] @ 1603774699`, expected: &MatrixSelector{ VectorSelector: &VectorSelector{ @@ -1858,70 +2010,87 @@ var testExpr = []struct { Range: 5 * 365 * 24 * time.Hour, EndPos: 28, }, - }, { + }, + { input: `foo[5mm]`, fail: true, errMsg: "bad duration syntax: \"5mm\"", - }, { + }, + { input: `foo[5m1]`, fail: true, errMsg: "bad duration syntax: \"5m1\"", - }, { + }, + { input: `foo[5m:1m1]`, fail: true, errMsg: "bad number or duration syntax: \"1m1\"", - }, { + }, + { input: `foo[5y1hs]`, fail: true, errMsg: "not a valid duration string: \"5y1hs\"", - }, { + }, + { input: `foo[5m1h]`, fail: true, errMsg: "not a valid duration string: \"5m1h\"", - }, { + }, + { input: `foo[5m1m]`, fail: true, errMsg: "not a valid duration string: \"5m1m\"", - }, { + }, + { input: `foo[0m]`, fail: true, errMsg: "duration must be greater than 0", - }, { + }, + { input: `foo["5m"]`, fail: true, - }, { + }, + { input: `foo[]`, fail: true, errMsg: "missing unit character in duration", - }, { + }, + { input: `foo[1]`, fail: true, errMsg: "missing unit character in duration", - }, { + }, + { input: `some_metric[5m] OFFSET 1`, fail: true, errMsg: "unexpected number \"1\" in offset, expected duration", - }, { + }, + { input: `some_metric[5m] OFFSET 1mm`, fail: true, errMsg: "bad number or duration syntax: \"1mm\"", - }, { + }, + { input: `some_metric[5m] OFFSET`, fail: true, errMsg: "unexpected end of input in offset, expected duration", - }, { + }, + { input: `some_metric OFFSET 1m[5m]`, fail: true, errMsg: "1:22: parse error: no offset modifiers allowed before range", - }, { + }, + { input: `some_metric[5m] @ 1m`, fail: true, errMsg: "1:19: parse error: unexpected duration \"1m\" in @, expected timestamp", - }, { + }, + { input: `some_metric[5m] @`, fail: true, errMsg: "1:18: parse error: unexpected end of input in @, expected timestamp", - }, { + }, + { input: `some_metric @ 1234 [5m]`, fail: true, errMsg: "1:20: parse error: no @ modifiers allowed before range", @@ -1952,7 +2121,8 @@ var testExpr = []struct { End: 25, }, }, - }, { + }, + { input: "avg by (foo)(some_metric)", expected: &AggregateExpr{ Op: AVG, @@ -1972,7 +2142,8 @@ var testExpr = []struct { End: 25, }, }, - }, { + }, + { input: "max by (foo)(some_metric)", expected: &AggregateExpr{ Op: MAX, @@ -1992,7 +2163,8 @@ var testExpr = []struct { End: 25, }, }, - }, { + }, + { input: "sum without (foo) (some_metric)", expected: &AggregateExpr{ Op: SUM, @@ -2013,7 +2185,8 @@ var testExpr = []struct { End: 31, }, }, - }, { + }, + { input: "sum (some_metric) without (foo)", expected: &AggregateExpr{ Op: SUM, @@ -2034,7 +2207,8 @@ var testExpr = []struct { End: 31, }, }, - }, { + }, + { input: "stddev(some_metric)", expected: &AggregateExpr{ Op: STDDEV, @@ -2053,7 +2227,8 @@ var testExpr = []struct { End: 19, }, }, - }, { + }, + { input: "stdvar by (foo)(some_metric)", expected: &AggregateExpr{ Op: STDVAR, @@ -2073,7 +2248,8 @@ var testExpr = []struct { End: 28, }, }, - }, { + }, + { input: "sum by ()(some_metric)", expected: &AggregateExpr{ Op: SUM, @@ -2093,7 +2269,8 @@ var testExpr = []struct { End: 22, }, }, - }, { + }, + { input: "sum by (foo,bar,)(some_metric)", expected: &AggregateExpr{ Op: SUM, @@ -2113,7 +2290,8 @@ var testExpr = []struct { End: 30, }, }, - }, { + }, + { input: "sum by (foo,)(some_metric)", expected: &AggregateExpr{ Op: SUM, @@ -2133,7 +2311,8 @@ var testExpr = []struct { End: 26, }, }, - }, { + }, + { input: "topk(5, some_metric)", expected: &AggregateExpr{ Op: TOPK, @@ -2159,7 +2338,8 @@ var testExpr = []struct { End: 20, }, }, - }, { + }, + { input: `count_values("value", some_metric)`, expected: &AggregateExpr{ Op: COUNT_VALUES, @@ -2185,7 +2365,8 @@ var testExpr = []struct { End: 34, }, }, - }, { + }, + { // Test usage of keywords as label names. input: "sum without(and, by, avg, count, alert, annotations)(some_metric)", expected: &AggregateExpr{ @@ -2207,67 +2388,83 @@ var testExpr = []struct { End: 65, }, }, - }, { + }, + { input: "sum without(==)(some_metric)", fail: true, errMsg: "unexpected in grouping opts, expected label", - }, { + }, + { input: "sum without(,)(some_metric)", fail: true, errMsg: `unexpected "," in grouping opts, expected label`, - }, { + }, + { input: "sum without(foo,,)(some_metric)", fail: true, errMsg: `unexpected "," in grouping opts, expected label`, - }, { + }, + { input: `sum some_metric by (test)`, fail: true, errMsg: "unexpected identifier \"some_metric\"", - }, { + }, + { input: `sum (some_metric) by test`, fail: true, errMsg: "unexpected identifier \"test\" in grouping opts", - }, { + }, + { input: `sum (some_metric) by test`, fail: true, errMsg: "unexpected identifier \"test\" in grouping opts", - }, { + }, + { input: `sum () by (test)`, fail: true, errMsg: "no arguments for aggregate expression provided", - }, { + }, + { input: "MIN keep_common (some_metric)", fail: true, errMsg: "1:5: parse error: unexpected identifier \"keep_common\"", - }, { + }, + { input: "MIN (some_metric) keep_common", fail: true, errMsg: `unexpected identifier "keep_common"`, - }, { + }, + { input: `sum (some_metric) without (test) by (test)`, fail: true, errMsg: "unexpected ", - }, { + }, + { input: `sum without (test) (some_metric) by (test)`, fail: true, errMsg: "unexpected ", - }, { + }, + { input: `topk(some_metric)`, fail: true, errMsg: "wrong number of arguments for aggregate expression provided, expected 2, got 1", - }, { + }, + { input: `topk(some_metric,)`, fail: true, errMsg: "trailing commas not allowed in function call args", - }, { + }, + { input: `topk(some_metric, other_metric)`, fail: true, errMsg: "1:6: parse error: expected type scalar in aggregation parameter, got instant vector", - }, { + }, + { input: `count_values(5, other_metric)`, fail: true, errMsg: "1:14: parse error: expected type string in aggregation parameter, got scalar", - }, { + }, + { input: `rate(some_metric[5m]) @ 1234`, fail: true, errMsg: "1:1: parse error: @ modifier must be preceded by an instant vector selector or range vector selector or a subquery", @@ -2283,7 +2480,8 @@ var testExpr = []struct { End: 6, }, }, - }, { + }, + { input: `floor(some_metric{foo!="bar"})`, expected: &Call{ Func: MustGetFunction("floor"), @@ -2305,7 +2503,8 @@ var testExpr = []struct { End: 30, }, }, - }, { + }, + { input: "rate(some_metric[5m])", expected: &Call{ Func: MustGetFunction("rate"), @@ -2330,7 +2529,8 @@ var testExpr = []struct { End: 21, }, }, - }, { + }, + { input: "round(some_metric)", expected: &Call{ Func: MustGetFunction("round"), @@ -2351,7 +2551,8 @@ var testExpr = []struct { End: 18, }, }, - }, { + }, + { input: "round(some_metric, 5)", expected: &Call{ Func: MustGetFunction("round"), @@ -2379,39 +2580,48 @@ var testExpr = []struct { End: 21, }, }, - }, { + }, + { input: "floor()", fail: true, errMsg: "expected 1 argument(s) in call to \"floor\", got 0", - }, { + }, + { input: "floor(some_metric, other_metric)", fail: true, errMsg: "expected 1 argument(s) in call to \"floor\", got 2", - }, { + }, + { input: "floor(some_metric, 1)", fail: true, errMsg: "expected 1 argument(s) in call to \"floor\", got 2", - }, { + }, + { input: "floor(1)", fail: true, errMsg: "expected type instant vector in call to function \"floor\", got scalar", - }, { + }, + { input: "hour(some_metric, some_metric, some_metric)", fail: true, errMsg: "expected at most 1 argument(s) in call to \"hour\", got 3", - }, { + }, + { input: "time(some_metric)", fail: true, errMsg: "expected 0 argument(s) in call to \"time\", got 1", - }, { + }, + { input: "non_existent_function_far_bar()", fail: true, errMsg: "unknown function with name \"non_existent_function_far_bar\"", - }, { + }, + { input: "rate(some_metric)", fail: true, errMsg: "expected type range vector in call to function \"rate\", got instant vector", - }, { + }, + { input: "label_replace(a, `b`, `c\xff`, `d`, `.*`)", fail: true, errMsg: "1:23: parse error: invalid UTF-8 rune", @@ -2421,28 +2631,34 @@ var testExpr = []struct { input: "-=", fail: true, errMsg: `unexpected "="`, - }, { + }, + { input: "++-++-+-+-<", fail: true, errMsg: `unexpected `, - }, { + }, + { input: "e-+=/(0)", fail: true, errMsg: `unexpected "="`, - }, { + }, + { input: "a>b()", fail: true, errMsg: `unknown function`, - }, { + }, + { input: "rate(avg)", fail: true, errMsg: `expected type range vector`, - }, { + }, + { // This is testing that we are not re-rendering the expression string for each error, which would timeout. input: "(" + strings.Repeat("-{}-1", 10000) + ")" + strings.Repeat("[1m:]", 1000), fail: true, errMsg: `1:3: parse error: vector selector must contain at least one non-empty matcher`, - }, { + }, + { input: "sum(sum)", expected: &AggregateExpr{ Op: SUM, @@ -2461,7 +2677,8 @@ var testExpr = []struct { End: 8, }, }, - }, { + }, + { input: "a + sum", expected: &BinaryExpr{ Op: ADD, @@ -2495,49 +2712,58 @@ var testExpr = []struct { Val: "double-quoted string \" with escaped quote", PosRange: PositionRange{Start: 0, End: 44}, }, - }, { + }, + { input: `'single-quoted string \' with escaped quote'`, expected: &StringLiteral{ Val: "single-quoted string ' with escaped quote", PosRange: PositionRange{Start: 0, End: 44}, }, - }, { + }, + { input: "`backtick-quoted string`", expected: &StringLiteral{ Val: "backtick-quoted string", PosRange: PositionRange{Start: 0, End: 24}, }, - }, { + }, + { input: `"\a\b\f\n\r\t\v\\\" - \xFF\377\u1234\U00010111\U0001011111☺"`, expected: &StringLiteral{ Val: "\a\b\f\n\r\t\v\\\" - \xFF\377\u1234\U00010111\U0001011111☺", PosRange: PositionRange{Start: 0, End: 62}, }, - }, { + }, + { input: `'\a\b\f\n\r\t\v\\\' - \xFF\377\u1234\U00010111\U0001011111☺'`, expected: &StringLiteral{ Val: "\a\b\f\n\r\t\v\\' - \xFF\377\u1234\U00010111\U0001011111☺", PosRange: PositionRange{Start: 0, End: 62}, }, - }, { + }, + { input: "`" + `\a\b\f\n\r\t\v\\\"\' - \xFF\377\u1234\U00010111\U0001011111☺` + "`", expected: &StringLiteral{ Val: `\a\b\f\n\r\t\v\\\"\' - \xFF\377\u1234\U00010111\U0001011111☺`, PosRange: PositionRange{Start: 0, End: 64}, }, - }, { + }, + { input: "`\\``", fail: true, errMsg: "unterminated raw string", - }, { + }, + { input: `"\`, fail: true, errMsg: "escape sequence not terminated", - }, { + }, + { input: `"\c"`, fail: true, errMsg: "unknown escape sequence U+0063 'c'", - }, { + }, + { input: `"\x."`, fail: true, errMsg: "illegal character U+002E '.' in escape sequence", @@ -2580,7 +2806,8 @@ var testExpr = []struct { Step: time.Hour + 6*time.Millisecond, EndPos: 27, }, - }, { + }, + { input: `foo[10m:]`, expected: &SubqueryExpr{ Expr: &VectorSelector{ @@ -2596,7 +2823,8 @@ var testExpr = []struct { Range: 10 * time.Minute, EndPos: 9, }, - }, { + }, + { input: `min_over_time(rate(foo{bar="baz"}[2s])[5m:5s])`, expected: &Call{ Func: MustGetFunction("min_over_time"), @@ -2637,7 +2865,8 @@ var testExpr = []struct { End: 46, }, }, - }, { + }, + { input: `min_over_time(rate(foo{bar="baz"}[2s])[5m:])[4m:3s]`, expected: &SubqueryExpr{ Expr: &Call{ @@ -2681,7 +2910,8 @@ var testExpr = []struct { Step: 3 * time.Second, EndPos: 51, }, - }, { + }, + { input: `min_over_time(rate(foo{bar="baz"}[2s])[5m:] offset 4m)[4m:3s]`, expected: &SubqueryExpr{ Expr: &Call{ @@ -2726,7 +2956,8 @@ var testExpr = []struct { Step: 3 * time.Second, EndPos: 61, }, - }, { + }, + { input: `min_over_time(rate(foo{bar="baz"}[2s])[5m:] @ 1603775091)[4m:3s]`, expected: &SubqueryExpr{ Expr: &Call{ @@ -2771,7 +3002,8 @@ var testExpr = []struct { Step: 3 * time.Second, EndPos: 64, }, - }, { + }, + { input: `min_over_time(rate(foo{bar="baz"}[2s])[5m:] @ -160377509)[4m:3s]`, expected: &SubqueryExpr{ Expr: &Call{ @@ -2816,7 +3048,8 @@ var testExpr = []struct { Step: 3 * time.Second, EndPos: 64, }, - }, { + }, + { input: "sum without(and, by, avg, count, alert, annotations)(some_metric) [30m:10s]", expected: &SubqueryExpr{ Expr: &AggregateExpr{ @@ -2842,7 +3075,8 @@ var testExpr = []struct { Step: 10 * time.Second, EndPos: 75, }, - }, { + }, + { input: `some_metric OFFSET 1m [10m:5s]`, expected: &SubqueryExpr{ Expr: &VectorSelector{ @@ -2860,7 +3094,8 @@ var testExpr = []struct { Step: 5 * time.Second, EndPos: 30, }, - }, { + }, + { input: `some_metric @ 123 [10m:5s]`, expected: &SubqueryExpr{ Expr: &VectorSelector{ @@ -2878,7 +3113,8 @@ var testExpr = []struct { Step: 5 * time.Second, EndPos: 26, }, - }, { + }, + { input: `some_metric @ 123 offset 1m [10m:5s]`, expected: &SubqueryExpr{ Expr: &VectorSelector{ @@ -2897,7 +3133,8 @@ var testExpr = []struct { Step: 5 * time.Second, EndPos: 36, }, - }, { + }, + { input: `some_metric offset 1m @ 123 [10m:5s]`, expected: &SubqueryExpr{ Expr: &VectorSelector{ @@ -2916,7 +3153,8 @@ var testExpr = []struct { Step: 5 * time.Second, EndPos: 36, }, - }, { + }, + { input: `some_metric[10m:5s] offset 1m @ 123`, expected: &SubqueryExpr{ Expr: &VectorSelector{ @@ -2935,7 +3173,8 @@ var testExpr = []struct { Step: 5 * time.Second, EndPos: 35, }, - }, { + }, + { input: `(foo + bar{nm="val"})[5m:]`, expected: &SubqueryExpr{ Expr: &ParenExpr{ @@ -2974,7 +3213,8 @@ var testExpr = []struct { Range: 5 * time.Minute, EndPos: 26, }, - }, { + }, + { input: `(foo + bar{nm="val"})[5m:] offset 10m`, expected: &SubqueryExpr{ Expr: &ParenExpr{ @@ -3014,7 +3254,8 @@ var testExpr = []struct { OriginalOffset: 10 * time.Minute, EndPos: 37, }, - }, { + }, + { input: `(foo + bar{nm="val"} @ 1234)[5m:] @ 1603775019`, expected: &SubqueryExpr{ Expr: &ParenExpr{ @@ -3055,19 +3296,23 @@ var testExpr = []struct { Timestamp: makeInt64Pointer(1603775019000), EndPos: 46, }, - }, { + }, + { input: "test[5d] OFFSET 10s [10m:5s]", fail: true, errMsg: "1:1: parse error: subquery is only allowed on instant vector, got matrix", - }, { + }, + { input: `(foo + bar{nm="val"})[5m:][10m:5s]`, fail: true, errMsg: `1:1: parse error: subquery is only allowed on instant vector, got matrix`, - }, { + }, + { input: "rate(food[1m])[1h] offset 1h", fail: true, errMsg: `1:15: parse error: ranges only allowed for vector selectors`, - }, { + }, + { input: "rate(food[1m])[1h] @ 100", fail: true, errMsg: `1:15: parse error: ranges only allowed for vector selectors`, @@ -3086,7 +3331,8 @@ var testExpr = []struct { End: 13, }, }, - }, { + }, + { input: `foo @ end()`, expected: &VectorSelector{ Name: "foo", @@ -3099,7 +3345,8 @@ var testExpr = []struct { End: 11, }, }, - }, { + }, + { input: `test[5y] @ start()`, expected: &MatrixSelector{ VectorSelector: &VectorSelector{ @@ -3116,7 +3363,8 @@ var testExpr = []struct { Range: 5 * 365 * 24 * time.Hour, EndPos: 18, }, - }, { + }, + { input: `test[5y] @ end()`, expected: &MatrixSelector{ VectorSelector: &VectorSelector{ @@ -3133,7 +3381,8 @@ var testExpr = []struct { Range: 5 * 365 * 24 * time.Hour, EndPos: 16, }, - }, { + }, + { input: `foo[10m:6s] @ start()`, expected: &SubqueryExpr{ Expr: &VectorSelector{ @@ -3151,7 +3400,8 @@ var testExpr = []struct { StartOrEnd: START, EndPos: 21, }, - }, { + }, + { input: `foo[10m:6s] @ end()`, expected: &SubqueryExpr{ Expr: &VectorSelector{ @@ -3169,11 +3419,13 @@ var testExpr = []struct { StartOrEnd: END, EndPos: 19, }, - }, { + }, + { input: `start()`, fail: true, errMsg: `1:6: parse error: unexpected "("`, - }, { + }, + { input: `end()`, fail: true, errMsg: `1:4: parse error: unexpected "("`, @@ -3191,7 +3443,8 @@ var testExpr = []struct { End: 5, }, }, - }, { + }, + { input: `end`, expected: &VectorSelector{ Name: "end", @@ -3203,7 +3456,8 @@ var testExpr = []struct { End: 3, }, }, - }, { + }, + { input: `start{end="foo"}`, expected: &VectorSelector{ Name: "start", @@ -3216,7 +3470,8 @@ var testExpr = []struct { End: 16, }, }, - }, { + }, + { input: `end{start="foo"}`, expected: &VectorSelector{ Name: "end", @@ -3229,7 +3484,8 @@ var testExpr = []struct { End: 16, }, }, - }, { + }, + { input: `foo unless on(start) bar`, expected: &BinaryExpr{ Op: LUNLESS, @@ -3259,7 +3515,8 @@ var testExpr = []struct { On: true, }, }, - }, { + }, + { input: `foo unless on(end) bar`, expected: &BinaryExpr{ Op: LUNLESS, diff --git a/promql/parser/printer.go b/promql/parser/printer.go index f5cfe789b..b21444cbf 100644 --- a/promql/parser/printer.go +++ b/promql/parser/printer.go @@ -21,7 +21,7 @@ import ( "github.com/prometheus/common/model" - "github.com/prometheus/prometheus/pkg/labels" + "github.com/prometheus/prometheus/model/labels" ) // Tree returns a string of the tree structure of the given node. diff --git a/promql/parser/printer_test.go b/promql/parser/printer_test.go index e687820c9..caaff7e46 100644 --- a/promql/parser/printer_test.go +++ b/promql/parser/printer_test.go @@ -16,9 +16,9 @@ package parser import ( "testing" - "github.com/prometheus/prometheus/pkg/labels" - "github.com/stretchr/testify/require" + + "github.com/prometheus/prometheus/model/labels" ) func TestExprString(t *testing.T) { diff --git a/promql/quantile.go b/promql/quantile.go index a25f8917c..e2de98840 100644 --- a/promql/quantile.go +++ b/promql/quantile.go @@ -17,7 +17,7 @@ import ( "math" "sort" - "github.com/prometheus/prometheus/pkg/labels" + "github.com/prometheus/prometheus/model/labels" ) // Helpers to calculate quantiles. diff --git a/promql/query_logger.go b/promql/query_logger.go index cf2fbbfcc..ecf93765c 100644 --- a/promql/query_logger.go +++ b/promql/query_logger.go @@ -81,8 +81,7 @@ func logUnfinishedQueries(filename string, filesize int, logger log.Logger) { } func getMMapedFile(filename string, filesize int, logger log.Logger) ([]byte, error) { - - file, err := os.OpenFile(filename, os.O_CREATE|os.O_RDWR|os.O_TRUNC, 0666) + file, err := os.OpenFile(filename, os.O_CREATE|os.O_RDWR|os.O_TRUNC, 0o666) if err != nil { level.Error(logger).Log("msg", "Error opening query log file", "file", filename, "err", err) return nil, err @@ -104,7 +103,7 @@ func getMMapedFile(filename string, filesize int, logger log.Logger) ([]byte, er } func NewActiveQueryTracker(localStoragePath string, maxConcurrent int, logger log.Logger) *ActiveQueryTracker { - err := os.MkdirAll(localStoragePath, 0777) + err := os.MkdirAll(localStoragePath, 0o777) if err != nil { level.Error(logger).Log("msg", "Failed to create directory for logging active queries") } @@ -147,7 +146,6 @@ func trimStringByBytes(str string, size int) string { func _newJSONEntry(query string, timestamp int64, logger log.Logger) []byte { entry := Entry{query, timestamp} jsonEntry, err := json.Marshal(entry) - if err != nil { level.Error(logger).Log("msg", "Cannot create json of query", "query", query) return []byte{} diff --git a/promql/test.go b/promql/test.go index d577b25d1..19b2dd830 100644 --- a/promql/test.go +++ b/promql/test.go @@ -27,9 +27,9 @@ import ( "github.com/prometheus/common/model" "github.com/stretchr/testify/require" - "github.com/prometheus/prometheus/pkg/exemplar" - "github.com/prometheus/prometheus/pkg/labels" - "github.com/prometheus/prometheus/pkg/timestamp" + "github.com/prometheus/prometheus/model/exemplar" + "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/model/timestamp" "github.com/prometheus/prometheus/promql/parser" "github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/tsdb" diff --git a/promql/test_test.go b/promql/test_test.go index ec2bac1b1..eb7b9e023 100644 --- a/promql/test_test.go +++ b/promql/test_test.go @@ -20,7 +20,7 @@ import ( "github.com/stretchr/testify/require" - "github.com/prometheus/prometheus/pkg/labels" + "github.com/prometheus/prometheus/model/labels" ) func TestLazyLoader_WithSamplesTill(t *testing.T) { diff --git a/promql/testdata/histograms.test b/promql/testdata/histograms.test index a9bb9b5e4..07e9a9225 100644 --- a/promql/testdata/histograms.test +++ b/promql/testdata/histograms.test @@ -15,6 +15,15 @@ load 5m testhistogram_bucket{le="0.3", start="negative"} 0+2x10 testhistogram_bucket{le="+Inf", start="negative"} 0+3x10 +# Another test histogram, where q(1/6), q(1/2), and q(5/6) are each in +# the middle of a bucket and should therefore be 1, 3, and 5, +# respectively. +load 5m + testhistogram2_bucket{le="0"} 0+0x10 + testhistogram2_bucket{le="2"} 0+1x10 + testhistogram2_bucket{le="4"} 0+2x10 + testhistogram2_bucket{le="6"} 0+3x10 + testhistogram2_bucket{le="+Inf"} 0+3x10 # Now a more realistic histogram per job and instance to test aggregation. load 5m @@ -91,6 +100,25 @@ eval instant at 50m histogram_quantile(0.8, rate(testhistogram_bucket[5m])) {start="positive"} 0.72 {start="negative"} 0.3 +# Want results exactly in the middle of the bucket. +eval instant at 7m histogram_quantile(1./6., testhistogram2_bucket) + {} 1 + +eval instant at 7m histogram_quantile(0.5, testhistogram2_bucket) + {} 3 + +eval instant at 7m histogram_quantile(5./6., testhistogram2_bucket) + {} 5 + +eval instant at 47m histogram_quantile(1./6., rate(testhistogram2_bucket[15m])) + {} 1 + +eval instant at 47m histogram_quantile(0.5, rate(testhistogram2_bucket[15m])) + {} 3 + +eval instant at 47m histogram_quantile(5./6., rate(testhistogram2_bucket[15m])) + {} 5 + # Aggregated histogram: Everything in one. eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds_bucket[5m])) by (le)) {} 0.075 diff --git a/promql/value.go b/promql/value.go index 727808423..9fdf5e509 100644 --- a/promql/value.go +++ b/promql/value.go @@ -22,7 +22,7 @@ import ( "github.com/pkg/errors" "github.com/prometheus/prometheus/model/histogram" - "github.com/prometheus/prometheus/pkg/labels" + "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/promql/parser" "github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/tsdb/chunkenc" diff --git a/rules/alerting.go b/rules/alerting.go index 5e7b8975c..929f7586d 100644 --- a/rules/alerting.go +++ b/rules/alerting.go @@ -28,9 +28,9 @@ import ( "github.com/prometheus/common/model" yaml "gopkg.in/yaml.v2" - "github.com/prometheus/prometheus/pkg/labels" - "github.com/prometheus/prometheus/pkg/rulefmt" - "github.com/prometheus/prometheus/pkg/timestamp" + "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/model/rulefmt" + "github.com/prometheus/prometheus/model/timestamp" "github.com/prometheus/prometheus/promql" "github.com/prometheus/prometheus/promql/parser" "github.com/prometheus/prometheus/template" @@ -291,6 +291,13 @@ func (r *AlertingRule) SetRestored(restored bool) { r.restored = restored } +// Restored returns the restoration state of the alerting rule. +func (r *AlertingRule) Restored() bool { + r.mtx.Lock() + defer r.mtx.Unlock() + return r.restored +} + // resolvedRetention is the duration for which a resolved alert instance // is kept in memory state and consequently repeatedly sent to the AlertManager. const resolvedRetention = 15 * time.Minute @@ -311,7 +318,7 @@ func (r *AlertingRule) Eval(ctx context.Context, ts time.Time, query QueryFunc, resultFPs := map[uint64]struct{}{} var vec promql.Vector - var alerts = make(map[uint64]*Alert, len(res)) + alerts := make(map[uint64]*Alert, len(res)) for _, smpl := range res { // Provide the alert information to the template. l := make(map[string]string, len(smpl.Metric)) @@ -389,6 +396,7 @@ func (r *AlertingRule) Eval(ctx context.Context, ts time.Time, query QueryFunc, r.active[h] = a } + var numActivePending int // Check if any pending alerts should be removed or fire now. Write out alert timeseries. for fp, a := range r.active { if _, ok := resultFPs[fp]; !ok { @@ -403,6 +411,7 @@ func (r *AlertingRule) Eval(ctx context.Context, ts time.Time, query QueryFunc, } continue } + numActivePending++ if a.State == StatePending && ts.Sub(a.ActiveAt) >= r.holdDuration { a.State = StateFiring @@ -415,10 +424,9 @@ func (r *AlertingRule) Eval(ctx context.Context, ts time.Time, query QueryFunc, } } - numActive := len(r.active) - if limit != 0 && numActive > limit { + if limit > 0 && numActivePending > limit { r.active = map[uint64]*Alert{} - return nil, errors.Errorf("exceeded limit of %d with %d alerts", limit, numActive) + return nil, errors.Errorf("exceeded limit of %d with %d alerts", limit, numActivePending) } return vec, nil @@ -478,7 +486,7 @@ func (r *AlertingRule) ForEachActiveAlert(f func(*Alert)) { } } -func (r *AlertingRule) sendAlerts(ctx context.Context, ts time.Time, resendDelay time.Duration, interval time.Duration, notifyFunc NotifyFunc) { +func (r *AlertingRule) sendAlerts(ctx context.Context, ts time.Time, resendDelay, interval time.Duration, notifyFunc NotifyFunc) { alerts := []*Alert{} r.ForEachActiveAlert(func(alert *Alert) { if alert.needsSending(ts, resendDelay) { diff --git a/rules/alerting_test.go b/rules/alerting_test.go index 94d505445..cfc5a9cd6 100644 --- a/rules/alerting_test.go +++ b/rules/alerting_test.go @@ -22,8 +22,8 @@ import ( "github.com/go-kit/log" "github.com/stretchr/testify/require" - "github.com/prometheus/prometheus/pkg/labels" - "github.com/prometheus/prometheus/pkg/timestamp" + "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/model/timestamp" "github.com/prometheus/prometheus/promql" "github.com/prometheus/prometheus/promql/parser" "github.com/prometheus/prometheus/util/teststorage" @@ -112,7 +112,7 @@ func TestAlertingRuleLabelsUpdate(t *testing.T) { results := []promql.Vector{ { - { + promql.Sample{ Metric: labels.FromStrings( "__name__", "ALERTS", "alertname", "HTTPRequestRateLow", @@ -125,7 +125,7 @@ func TestAlertingRuleLabelsUpdate(t *testing.T) { }, }, { - { + promql.Sample{ Metric: labels.FromStrings( "__name__", "ALERTS", "alertname", "HTTPRequestRateLow", @@ -138,7 +138,7 @@ func TestAlertingRuleLabelsUpdate(t *testing.T) { }, }, { - { + promql.Sample{ Metric: labels.FromStrings( "__name__", "ALERTS", "alertname", "HTTPRequestRateLow", @@ -151,7 +151,7 @@ func TestAlertingRuleLabelsUpdate(t *testing.T) { }, }, { - { + promql.Sample{ Metric: labels.FromStrings( "__name__", "ALERTS", "alertname", "HTTPRequestRateLow", @@ -222,7 +222,7 @@ func TestAlertingRuleExternalLabelsInTemplate(t *testing.T) { true, log.NewNopLogger(), ) result := promql.Vector{ - { + promql.Sample{ Metric: labels.FromStrings( "__name__", "ALERTS", "alertname", "ExternalLabelDoesNotExist", @@ -233,7 +233,7 @@ func TestAlertingRuleExternalLabelsInTemplate(t *testing.T) { ), Point: promql.Point{V: 1}, }, - { + promql.Sample{ Metric: labels.FromStrings( "__name__", "ALERTS", "alertname", "ExternalLabelExists", @@ -316,7 +316,7 @@ func TestAlertingRuleExternalURLInTemplate(t *testing.T) { true, log.NewNopLogger(), ) result := promql.Vector{ - { + promql.Sample{ Metric: labels.FromStrings( "__name__", "ALERTS", "alertname", "ExternalURLDoesNotExist", @@ -327,7 +327,7 @@ func TestAlertingRuleExternalURLInTemplate(t *testing.T) { ), Point: promql.Point{V: 1}, }, - { + promql.Sample{ Metric: labels.FromStrings( "__name__", "ALERTS", "alertname", "ExternalURLExists", @@ -400,7 +400,7 @@ func TestAlertingRuleEmptyLabelFromTemplate(t *testing.T) { true, log.NewNopLogger(), ) result := promql.Vector{ - { + promql.Sample{ Metric: labels.FromStrings( "__name__", "ALERTS", "alertname", "EmptyLabel", @@ -466,23 +466,17 @@ func TestAlertingRuleDuplicate(t *testing.T) { } func TestAlertingRuleLimit(t *testing.T) { - storage := teststorage.New(t) - defer storage.Close() + suite, err := promql.NewTest(t, ` + load 1m + metric{label="1"} 1 + metric{label="2"} 1 + `) + require.NoError(t, err) + defer suite.Close() - opts := promql.EngineOpts{ - Logger: nil, - Reg: nil, - MaxSamples: 10, - Timeout: 10 * time.Second, - } + require.NoError(t, suite.Run()) - engine := promql.NewEngine(opts) - ctx, cancelCtx := context.WithCancel(context.Background()) - defer cancelCtx() - - now := time.Now() - - suite := []struct { + tests := []struct { limit int err string }{ @@ -490,31 +484,37 @@ func TestAlertingRuleLimit(t *testing.T) { limit: 0, }, { - limit: 1, + limit: -1, }, { - limit: -1, - err: "exceeded limit of -1 with 1 alerts", + limit: 2, + }, + { + limit: 1, + err: "exceeded limit of 1 with 2 alerts", }, } - for _, test := range suite { - expr, _ := parser.ParseExpr(`1`) - rule := NewAlertingRule( - "foo", - expr, - time.Minute, - labels.FromStrings("test", "test"), - nil, - nil, - "", - true, log.NewNopLogger(), - ) - _, err := rule.Eval(ctx, now, EngineQueryFunc(engine, storage), nil, test.limit) - if test.err == "" { - require.NoError(t, err) - } else { - require.Equal(t, test.err, err.Error()) + expr, _ := parser.ParseExpr(`metric > 0`) + rule := NewAlertingRule( + "foo", + expr, + time.Minute, + labels.FromStrings("test", "test"), + nil, + nil, + "", + true, log.NewNopLogger(), + ) + + evalTime := time.Unix(0, 0) + + for _, test := range tests { + _, err := rule.Eval(suite.Context(), evalTime, EngineQueryFunc(suite.QueryEngine(), suite.Storage()), nil, test.limit) + if err != nil { + require.EqualError(t, err, test.err) + } else if test.err != "" { + t.Errorf("Expected errror %s, got none", test.err) } } } diff --git a/rules/manager.go b/rules/manager.go index a7863a60e..23a63eade 100644 --- a/rules/manager.go +++ b/rules/manager.go @@ -29,10 +29,10 @@ import ( "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" - "github.com/prometheus/prometheus/pkg/labels" - "github.com/prometheus/prometheus/pkg/rulefmt" - "github.com/prometheus/prometheus/pkg/timestamp" - "github.com/prometheus/prometheus/pkg/value" + "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/model/rulefmt" + "github.com/prometheus/prometheus/model/timestamp" + "github.com/prometheus/prometheus/model/value" "github.com/prometheus/prometheus/promql" "github.com/prometheus/prometheus/promql/parser" "github.com/prometheus/prometheus/storage" @@ -652,7 +652,8 @@ func (g *Group) Eval(ctx context.Context, ts time.Time) { level.Warn(g.logger).Log("msg", "Rule evaluation result discarded", "err", err, "sample", s) } } else { - seriesReturned[s.Metric.String()] = s.Metric + buf := [1024]byte{} + seriesReturned[string(s.Metric.Bytes(buf[:]))] = s.Metric } } if numOutOfOrder > 0 { @@ -672,7 +673,7 @@ func (g *Group) Eval(ctx context.Context, ts time.Time) { // Do not count these in logging, as this is expected if series // is exposed from a different rule. default: - level.Warn(g.logger).Log("msg", "Adding stale sample failed", "sample", metric, "err", err) + level.Warn(g.logger).Log("msg", "Adding stale sample failed", "sample", lset.String(), "err", err) } } } @@ -834,12 +835,10 @@ func (g *Group) RestoreForState(ts time.Time) { level.Debug(g.logger).Log("msg", "'for' state restored", labels.AlertName, alertRule.Name(), "restored_time", a.ActiveAt.Format(time.RFC850), "labels", a.Labels.String()) - }) alertRule.SetRestored(true) } - } // Equals return if two groups are the same. diff --git a/rules/manager_test.go b/rules/manager_test.go index f3b335070..3be0d6885 100644 --- a/rules/manager_test.go +++ b/rules/manager_test.go @@ -30,10 +30,10 @@ import ( "go.uber.org/goleak" yaml "gopkg.in/yaml.v2" - "github.com/prometheus/prometheus/pkg/labels" - "github.com/prometheus/prometheus/pkg/rulefmt" - "github.com/prometheus/prometheus/pkg/timestamp" - "github.com/prometheus/prometheus/pkg/value" + "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/model/rulefmt" + "github.com/prometheus/prometheus/model/timestamp" + "github.com/prometheus/prometheus/model/value" "github.com/prometheus/prometheus/promql" "github.com/prometheus/prometheus/promql/parser" "github.com/prometheus/prometheus/storage" @@ -67,7 +67,7 @@ func TestAlertingRule(t *testing.T) { nil, nil, "", true, nil, ) result := promql.Vector{ - { + promql.Sample{ Metric: labels.FromStrings( "__name__", "ALERTS", "alertname", "HTTPRequestRateLow", @@ -79,7 +79,7 @@ func TestAlertingRule(t *testing.T) { ), Point: promql.Point{V: 1}, }, - { + promql.Sample{ Metric: labels.FromStrings( "__name__", "ALERTS", "alertname", "HTTPRequestRateLow", @@ -91,7 +91,7 @@ func TestAlertingRule(t *testing.T) { ), Point: promql.Point{V: 1}, }, - { + promql.Sample{ Metric: labels.FromStrings( "__name__", "ALERTS", "alertname", "HTTPRequestRateLow", @@ -103,7 +103,7 @@ func TestAlertingRule(t *testing.T) { ), Point: promql.Point{V: 1}, }, - { + promql.Sample{ Metric: labels.FromStrings( "__name__", "ALERTS", "alertname", "HTTPRequestRateLow", @@ -119,17 +119,19 @@ func TestAlertingRule(t *testing.T) { baseTime := time.Unix(0, 0) - var tests = []struct { + tests := []struct { time time.Duration result promql.Vector }{ { time: 0, result: result[:2], - }, { + }, + { time: 5 * time.Minute, result: result[2:], - }, { + }, + { time: 10 * time.Minute, result: result[2:3], }, @@ -208,7 +210,7 @@ func TestForStateAddSamples(t *testing.T) { nil, nil, "", true, nil, ) result := promql.Vector{ - { + promql.Sample{ Metric: labels.FromStrings( "__name__", "ALERTS_FOR_STATE", "alertname", "HTTPRequestRateLow", @@ -219,7 +221,7 @@ func TestForStateAddSamples(t *testing.T) { ), Point: promql.Point{V: 1}, }, - { + promql.Sample{ Metric: labels.FromStrings( "__name__", "ALERTS_FOR_STATE", "alertname", "HTTPRequestRateLow", @@ -230,7 +232,7 @@ func TestForStateAddSamples(t *testing.T) { ), Point: promql.Point{V: 1}, }, - { + promql.Sample{ Metric: labels.FromStrings( "__name__", "ALERTS_FOR_STATE", "alertname", "HTTPRequestRateLow", @@ -241,7 +243,7 @@ func TestForStateAddSamples(t *testing.T) { ), Point: promql.Point{V: 1}, }, - { + promql.Sample{ Metric: labels.FromStrings( "__name__", "ALERTS_FOR_STATE", "alertname", "HTTPRequestRateLow", @@ -256,7 +258,7 @@ func TestForStateAddSamples(t *testing.T) { baseTime := time.Unix(0, 0) - var tests = []struct { + tests := []struct { time time.Duration result promql.Vector persistThisTime bool // If true, it means this 'time' is persisted for 'for'. @@ -769,7 +771,6 @@ func TestUpdate(t *testing.T) { } else { rgs.Groups[i].Interval = model.Duration(10) } - } reloadAndValidate(rgs, t, tmpFile, ruleManager, expected, ogs) diff --git a/rules/recording.go b/rules/recording.go index 08a7e37ca..0681db9a2 100644 --- a/rules/recording.go +++ b/rules/recording.go @@ -23,8 +23,8 @@ import ( yaml "gopkg.in/yaml.v2" - "github.com/prometheus/prometheus/pkg/labels" - "github.com/prometheus/prometheus/pkg/rulefmt" + "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/model/rulefmt" "github.com/prometheus/prometheus/promql" "github.com/prometheus/prometheus/promql/parser" "github.com/prometheus/prometheus/util/strutil" @@ -99,9 +99,9 @@ func (rule *RecordingRule) Eval(ctx context.Context, ts time.Time, query QueryFu return nil, fmt.Errorf("vector contains metrics with the same labelset after applying rule labels") } - numSamples := len(vector) - if limit != 0 && numSamples > limit { - return nil, fmt.Errorf("exceeded limit %d with %d samples", limit, numSamples) + numSeries := len(vector) + if limit > 0 && numSeries > limit { + return nil, fmt.Errorf("exceeded limit of %d with %d series", limit, numSeries) } rule.SetHealth(HealthGood) diff --git a/rules/recording_test.go b/rules/recording_test.go index 211b6cda6..dd06b775f 100644 --- a/rules/recording_test.go +++ b/rules/recording_test.go @@ -21,8 +21,8 @@ import ( "github.com/stretchr/testify/require" - "github.com/prometheus/prometheus/pkg/labels" - "github.com/prometheus/prometheus/pkg/timestamp" + "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/model/timestamp" "github.com/prometheus/prometheus/promql" "github.com/prometheus/prometheus/promql/parser" "github.com/prometheus/prometheus/util/teststorage" @@ -49,7 +49,6 @@ func TestRuleEval(t *testing.T) { name string expr parser.Expr labels labels.Labels - limit int result promql.Vector err string }{ @@ -71,38 +70,11 @@ func TestRuleEval(t *testing.T) { Point: promql.Point{V: 1, T: timestamp.FromTime(now)}, }}, }, - { - name: "underlimit", - expr: &parser.NumberLiteral{Val: 1}, - labels: labels.FromStrings("foo", "bar"), - limit: 2, - result: promql.Vector{promql.Sample{ - Metric: labels.FromStrings("__name__", "underlimit", "foo", "bar"), - Point: promql.Point{V: 1, T: timestamp.FromTime(now)}, - }}, - }, - { - name: "atlimit", - expr: &parser.NumberLiteral{Val: 1}, - labels: labels.FromStrings("foo", "bar"), - limit: 1, - result: promql.Vector{promql.Sample{ - Metric: labels.FromStrings("__name__", "atlimit", "foo", "bar"), - Point: promql.Point{V: 1, T: timestamp.FromTime(now)}, - }}, - }, - { - name: "overlimit", - expr: &parser.NumberLiteral{Val: 1}, - labels: labels.FromStrings("foo", "bar"), - limit: -1, - err: "exceeded limit -1 with 1 samples", - }, } for _, test := range suite { rule := NewRecordingRule(test.name, test.expr, test.labels) - result, err := rule.Eval(ctx, now, EngineQueryFunc(engine, storage), nil, test.limit) + result, err := rule.Eval(ctx, now, EngineQueryFunc(engine, storage), nil, 0) if test.err == "" { require.NoError(t, err) } else { @@ -151,3 +123,52 @@ func TestRuleEvalDuplicate(t *testing.T) { require.Error(t, err) require.EqualError(t, err, "vector contains metrics with the same labelset after applying rule labels") } + +func TestRecordingRuleLimit(t *testing.T) { + suite, err := promql.NewTest(t, ` + load 1m + metric{label="1"} 1 + metric{label="2"} 1 + `) + require.NoError(t, err) + defer suite.Close() + + require.NoError(t, suite.Run()) + + tests := []struct { + limit int + err string + }{ + { + limit: 0, + }, + { + limit: -1, + }, + { + limit: 2, + }, + { + limit: 1, + err: "exceeded limit of 1 with 2 series", + }, + } + + expr, _ := parser.ParseExpr(`metric > 0`) + rule := NewRecordingRule( + "foo", + expr, + labels.FromStrings("test", "test"), + ) + + evalTime := time.Unix(0, 0) + + for _, test := range tests { + _, err := rule.Eval(suite.Context(), evalTime, EngineQueryFunc(suite.QueryEngine(), suite.Storage()), nil, test.limit) + if err != nil { + require.EqualError(t, err, test.err) + } else if test.err != "" { + t.Errorf("Expected error %s, got none", test.err) + } + } +} diff --git a/scrape/helpers_test.go b/scrape/helpers_test.go index a46350a6e..3e3a456b7 100644 --- a/scrape/helpers_test.go +++ b/scrape/helpers_test.go @@ -17,9 +17,9 @@ import ( "context" "math/rand" + "github.com/prometheus/prometheus/model/exemplar" "github.com/prometheus/prometheus/model/histogram" - "github.com/prometheus/prometheus/pkg/exemplar" - "github.com/prometheus/prometheus/pkg/labels" + "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/storage" ) @@ -31,11 +31,15 @@ func (a nopAppendable) Appender(_ context.Context) storage.Appender { type nopAppender struct{} -func (a nopAppender) Append(uint64, labels.Labels, int64, float64) (uint64, error) { return 0, nil } -func (a nopAppender) AppendExemplar(uint64, labels.Labels, exemplar.Exemplar) (uint64, error) { +func (a nopAppender) Append(storage.SeriesRef, labels.Labels, int64, float64) (storage.SeriesRef, error) { return 0, nil } -func (a nopAppender) AppendHistogram(uint64, labels.Labels, int64, *histogram.Histogram) (uint64, error) { + +func (a nopAppender) AppendExemplar(storage.SeriesRef, labels.Labels, exemplar.Exemplar) (storage.SeriesRef, error) { + return 0, nil +} + +func (a nopAppender) AppendHistogram(storage.SeriesRef, labels.Labels, int64, *histogram.Histogram) (storage.SeriesRef, error) { return 0, nil } func (a nopAppender) Commit() error { return nil } @@ -66,7 +70,7 @@ type collectResultAppender struct { rolledbackHistograms []histogramSample } -func (a *collectResultAppender) Append(ref uint64, lset labels.Labels, t int64, v float64) (uint64, error) { +func (a *collectResultAppender) Append(ref storage.SeriesRef, lset labels.Labels, t int64, v float64) (storage.SeriesRef, error) { a.pendingResult = append(a.pendingResult, sample{ metric: lset, t: t, @@ -74,7 +78,7 @@ func (a *collectResultAppender) Append(ref uint64, lset labels.Labels, t int64, }) if ref == 0 { - ref = rand.Uint64() + ref = storage.SeriesRef(rand.Uint64()) } if a.next == nil { return ref, nil @@ -87,7 +91,7 @@ func (a *collectResultAppender) Append(ref uint64, lset labels.Labels, t int64, return ref, err } -func (a *collectResultAppender) AppendExemplar(ref uint64, l labels.Labels, e exemplar.Exemplar) (uint64, error) { +func (a *collectResultAppender) AppendExemplar(ref storage.SeriesRef, l labels.Labels, e exemplar.Exemplar) (storage.SeriesRef, error) { a.pendingExemplars = append(a.pendingExemplars, e) if a.next == nil { return 0, nil @@ -96,7 +100,7 @@ func (a *collectResultAppender) AppendExemplar(ref uint64, l labels.Labels, e ex return a.next.AppendExemplar(ref, l, e) } -func (a *collectResultAppender) AppendHistogram(ref uint64, l labels.Labels, t int64, h *histogram.Histogram) (uint64, error) { +func (a *collectResultAppender) AppendHistogram(ref storage.SeriesRef, l labels.Labels, t int64, h *histogram.Histogram) (storage.SeriesRef, error) { a.pendingHistograms = append(a.pendingHistograms, histogramSample{h: h, t: t}) if a.next == nil { return 0, nil diff --git a/scrape/manager.go b/scrape/manager.go index a4a7e5a50..35d47a86b 100644 --- a/scrape/manager.go +++ b/scrape/manager.go @@ -27,7 +27,7 @@ import ( "github.com/prometheus/prometheus/config" "github.com/prometheus/prometheus/discovery/targetgroup" - "github.com/prometheus/prometheus/pkg/labels" + "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/util/osutil" ) diff --git a/scrape/manager_test.go b/scrape/manager_test.go index 00228932a..6c5091bb6 100644 --- a/scrape/manager_test.go +++ b/scrape/manager_test.go @@ -25,8 +25,8 @@ import ( "github.com/prometheus/prometheus/config" "github.com/prometheus/prometheus/discovery/targetgroup" - "github.com/prometheus/prometheus/pkg/labels" - "github.com/prometheus/prometheus/pkg/relabel" + "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/model/relabel" ) func TestPopulateLabels(t *testing.T) { @@ -335,7 +335,7 @@ func TestPopulateLabels(t *testing.T) { for _, c := range cases { in := c.in.Copy() - res, orig, err := populateLabels(c.in, c.cfg) + res, orig, err := PopulateLabels(c.in, c.cfg) if c.err != "" { require.EqualError(t, err, c.err) } else { diff --git a/scrape/scrape.go b/scrape/scrape.go index a44ad0c85..71246672c 100644 --- a/scrape/scrape.go +++ b/scrape/scrape.go @@ -40,15 +40,15 @@ import ( "github.com/prometheus/prometheus/config" "github.com/prometheus/prometheus/discovery/targetgroup" + "github.com/prometheus/prometheus/model/exemplar" "github.com/prometheus/prometheus/model/histogram" - "github.com/prometheus/prometheus/pkg/exemplar" - "github.com/prometheus/prometheus/pkg/labels" - "github.com/prometheus/prometheus/pkg/pool" - "github.com/prometheus/prometheus/pkg/relabel" - "github.com/prometheus/prometheus/pkg/textparse" - "github.com/prometheus/prometheus/pkg/timestamp" - "github.com/prometheus/prometheus/pkg/value" + "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/model/relabel" + "github.com/prometheus/prometheus/model/textparse" + "github.com/prometheus/prometheus/model/timestamp" + "github.com/prometheus/prometheus/model/value" "github.com/prometheus/prometheus/storage" + "github.com/prometheus/prometheus/util/pool" ) // ScrapeTimestampTolerance is the tolerance for scrape appends timestamps @@ -265,7 +265,7 @@ const maxAheadTime = 10 * time.Minute type labelsMutator func(labels.Labels) labels.Labels -func newScrapePool(cfg *config.ScrapeConfig, app storage.Appendable, jitterSeed uint64, logger log.Logger, reportScrapeTimeout bool) (*scrapePool, error) { +func newScrapePool(cfg *config.ScrapeConfig, app storage.Appendable, jitterSeed uint64, logger log.Logger, reportExtraMetrics bool) (*scrapePool, error) { targetScrapePools.Inc() if logger == nil { logger = log.NewNopLogger() @@ -314,7 +314,7 @@ func newScrapePool(cfg *config.ScrapeConfig, app storage.Appendable, jitterSeed opts.labelLimits, opts.interval, opts.timeout, - reportScrapeTimeout, + reportExtraMetrics, ) } @@ -473,7 +473,7 @@ func (sp *scrapePool) Sync(tgs []*targetgroup.Group) { var all []*Target sp.droppedTargets = []*Target{} for _, tg := range tgs { - targets, failures := targetsFromGroup(tg, sp.config) + targets, failures := TargetsFromGroup(tg, sp.config) for _, err := range failures { level.Error(sp.logger).Log("msg", "Creating target failed", "err", err) } @@ -830,7 +830,7 @@ type loop interface { } type cacheEntry struct { - ref uint64 + ref storage.SeriesRef lastIter uint64 hash uint64 lset labels.Labels @@ -862,7 +862,7 @@ type scrapeLoop struct { disabledEndOfRunStalenessMarkers bool - reportScrapeTimeout bool + reportExtraMetrics bool } // scrapeCache tracks mappings of exposed metric strings to label sets and @@ -977,7 +977,7 @@ func (c *scrapeCache) get(met string) (*cacheEntry, bool) { return e, true } -func (c *scrapeCache) addRef(met string, ref uint64, lset labels.Labels, hash uint64) { +func (c *scrapeCache) addRef(met string, ref storage.SeriesRef, lset labels.Labels, hash uint64) { if ref == 0 { return } @@ -1123,7 +1123,7 @@ func newScrapeLoop(ctx context.Context, labelLimits *labelLimits, interval time.Duration, timeout time.Duration, - reportScrapeTimeout bool, + reportExtraMetrics bool, ) *scrapeLoop { if l == nil { l = log.NewNopLogger() @@ -1150,7 +1150,7 @@ func newScrapeLoop(ctx context.Context, labelLimits: labelLimits, interval: interval, timeout: timeout, - reportScrapeTimeout: reportScrapeTimeout, + reportExtraMetrics: reportExtraMetrics, } sl.ctx, sl.cancel = context.WithCancel(ctx) @@ -1201,7 +1201,7 @@ mainLoop: } } - last = sl.scrapeAndReport(sl.interval, sl.timeout, last, scrapeTime, errc) + last = sl.scrapeAndReport(last, scrapeTime, errc) select { case <-sl.parentCtx.Done(): @@ -1225,12 +1225,12 @@ mainLoop: // In the happy scenario, a single appender is used. // This function uses sl.parentCtx instead of sl.ctx on purpose. A scrape should // only be cancelled on shutdown, not on reloads. -func (sl *scrapeLoop) scrapeAndReport(interval, timeout time.Duration, last, appendTime time.Time, errc chan<- error) time.Time { +func (sl *scrapeLoop) scrapeAndReport(last, appendTime time.Time, errc chan<- error) time.Time { start := time.Now() // Only record after the first scrape. if !last.IsZero() { - targetIntervalLength.WithLabelValues(interval.String()).Observe( + targetIntervalLength.WithLabelValues(sl.interval.String()).Observe( time.Since(last).Seconds(), ) } @@ -1239,7 +1239,7 @@ func (sl *scrapeLoop) scrapeAndReport(interval, timeout time.Duration, last, app defer sl.buffers.Put(b) buf := bytes.NewBuffer(b) - var total, added, seriesAdded int + var total, added, seriesAdded, bytes int var err, appErr, scrapeErr error app := sl.appender(sl.parentCtx) @@ -1255,7 +1255,7 @@ func (sl *scrapeLoop) scrapeAndReport(interval, timeout time.Duration, last, app }() defer func() { - if err = sl.report(app, appendTime, timeout, time.Since(start), total, added, seriesAdded, scrapeErr); err != nil { + if err = sl.report(app, appendTime, time.Since(start), total, added, seriesAdded, bytes, scrapeErr); err != nil { level.Warn(sl.l).Log("msg", "Appending scrape report failed", "err", err) } }() @@ -1276,7 +1276,7 @@ func (sl *scrapeLoop) scrapeAndReport(interval, timeout time.Duration, last, app } var contentType string - scrapeCtx, cancel := context.WithTimeout(sl.parentCtx, timeout) + scrapeCtx, cancel := context.WithTimeout(sl.parentCtx, sl.timeout) contentType, scrapeErr = sl.scraper.scrape(scrapeCtx, buf) cancel() @@ -1288,11 +1288,15 @@ func (sl *scrapeLoop) scrapeAndReport(interval, timeout time.Duration, last, app if len(b) > 0 { sl.lastScrapeSize = len(b) } + bytes = len(b) } else { level.Debug(sl.l).Log("msg", "Scrape failed", "err", scrapeErr) if errc != nil { errc <- scrapeErr } + if errors.Is(scrapeErr, errBodySizeLimit) { + bytes = -1 + } } // A failed scrape is the same as an empty scrape, @@ -1484,7 +1488,7 @@ loop: } ce, ok := sl.cache.get(yoloString(met)) var ( - ref uint64 + ref storage.SeriesRef lset labels.Labels mets string hash uint64 @@ -1653,16 +1657,17 @@ func (sl *scrapeLoop) checkAddExemplarError(err error, e exemplar.Exemplar, appE // The constants are suffixed with the invalid \xff unicode rune to avoid collisions // with scraped metrics in the cache. const ( - scrapeHealthMetricName = "up" + "\xff" - scrapeDurationMetricName = "scrape_duration_seconds" + "\xff" - scrapeSamplesMetricName = "scrape_samples_scraped" + "\xff" - samplesPostRelabelMetricName = "scrape_samples_post_metric_relabeling" + "\xff" - scrapeSeriesAddedMetricName = "scrape_series_added" + "\xff" - scrapeTimeoutMetricName = "scrape_timeout_seconds" + "\xff" - scrapeSampleLimitMetricName = "scrape_sample_limit" + "\xff" + scrapeHealthMetricName = "up" + "\xff" + scrapeDurationMetricName = "scrape_duration_seconds" + "\xff" + scrapeSamplesMetricName = "scrape_samples_scraped" + "\xff" + samplesPostRelabelMetricName = "scrape_samples_post_metric_relabeling" + "\xff" + scrapeSeriesAddedMetricName = "scrape_series_added" + "\xff" + scrapeTimeoutMetricName = "scrape_timeout_seconds" + "\xff" + scrapeSampleLimitMetricName = "scrape_sample_limit" + "\xff" + scrapeBodySizeBytesMetricName = "scrape_body_size_bytes" + "\xff" ) -func (sl *scrapeLoop) report(app storage.Appender, start time.Time, timeout, duration time.Duration, scraped, added, seriesAdded int, scrapeErr error) (err error) { +func (sl *scrapeLoop) report(app storage.Appender, start time.Time, duration time.Duration, scraped, added, seriesAdded, bytes int, scrapeErr error) (err error) { sl.scraper.Report(start, duration, scrapeErr) ts := timestamp.FromTime(start) @@ -1687,13 +1692,16 @@ func (sl *scrapeLoop) report(app storage.Appender, start time.Time, timeout, dur if err = sl.addReportSample(app, scrapeSeriesAddedMetricName, ts, float64(seriesAdded)); err != nil { return } - if sl.reportScrapeTimeout { - if err = sl.addReportSample(app, scrapeTimeoutMetricName, ts, timeout.Seconds()); err != nil { + if sl.reportExtraMetrics { + if err = sl.addReportSample(app, scrapeTimeoutMetricName, ts, sl.timeout.Seconds()); err != nil { return } if err = sl.addReportSample(app, scrapeSampleLimitMetricName, ts, float64(sl.sampleLimit)); err != nil { return } + if err = sl.addReportSample(app, scrapeBodySizeBytesMetricName, ts, float64(bytes)); err != nil { + return + } } return } @@ -1718,20 +1726,23 @@ func (sl *scrapeLoop) reportStale(app storage.Appender, start time.Time) (err er if err = sl.addReportSample(app, scrapeSeriesAddedMetricName, ts, stale); err != nil { return } - if sl.reportScrapeTimeout { + if sl.reportExtraMetrics { if err = sl.addReportSample(app, scrapeTimeoutMetricName, ts, stale); err != nil { return } if err = sl.addReportSample(app, scrapeSampleLimitMetricName, ts, stale); err != nil { return } + if err = sl.addReportSample(app, scrapeBodySizeBytesMetricName, ts, stale); err != nil { + return + } } return } func (sl *scrapeLoop) addReportSample(app storage.Appender, s string, t int64, v float64) error { ce, ok := sl.cache.get(s) - var ref uint64 + var ref storage.SeriesRef var lset labels.Labels if ok { ref = ce.ref diff --git a/scrape/scrape_test.go b/scrape/scrape_test.go index d09a712e2..eca6bf714 100644 --- a/scrape/scrape_test.go +++ b/scrape/scrape_test.go @@ -38,12 +38,12 @@ import ( "github.com/prometheus/prometheus/config" "github.com/prometheus/prometheus/discovery/targetgroup" - "github.com/prometheus/prometheus/pkg/exemplar" - "github.com/prometheus/prometheus/pkg/labels" - "github.com/prometheus/prometheus/pkg/relabel" - "github.com/prometheus/prometheus/pkg/textparse" - "github.com/prometheus/prometheus/pkg/timestamp" - "github.com/prometheus/prometheus/pkg/value" + "github.com/prometheus/prometheus/model/exemplar" + "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/model/relabel" + "github.com/prometheus/prometheus/model/textparse" + "github.com/prometheus/prometheus/model/timestamp" + "github.com/prometheus/prometheus/model/value" "github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/util/teststorage" "github.com/prometheus/prometheus/util/testutil" @@ -109,7 +109,6 @@ func TestDroppedTargetsList(t *testing.T) { // TestDiscoveredLabelsUpdate checks that DiscoveredLabels are updated // even when new labels don't affect the target `hash`. func TestDiscoveredLabelsUpdate(t *testing.T) { - sp := &scrapePool{} // These are used when syncing so need this to avoid a panic. sp.config = &config.ScrapeConfig{ @@ -350,7 +349,7 @@ func TestScrapePoolTargetLimit(t *testing.T) { client: http.DefaultClient, } - var tgs = []*targetgroup.Group{} + tgs := []*targetgroup.Group{} for i := 0; i < 50; i++ { tgs = append(tgs, &targetgroup.Group{ @@ -1000,6 +999,7 @@ func BenchmarkScrapeLoopAppend(b *testing.B) { _, _, _, _ = sl.append(slApp, metrics, "", ts) } } + func BenchmarkScrapeLoopAppendOM(b *testing.B) { ctx, sl := simpleTestScrapeLoop(b) @@ -1409,8 +1409,10 @@ func TestScrapeLoopAppendForConflictingPrefixedLabels(t *testing.T) { "Two target labels collide with existing labels, both with and without prefix 'exported'": { targetLabels: []string{"foo", "3", "exported_foo", "4"}, exposedLabels: `metric{foo="1" exported_foo="2"} 0`, - expected: []string{"__name__", "metric", "exported_exported_foo", "1", "exported_exported_exported_foo", - "2", "exported_foo", "4", "foo", "3"}, + expected: []string{ + "__name__", "metric", "exported_exported_foo", "1", "exported_exported_exported_foo", + "2", "exported_foo", "4", "foo", "3", + }, }, "Extreme example": { targetLabels: []string{"foo", "0", "exported_exported_foo", "1", "exported_exported_exported_foo", "2"}, @@ -1473,7 +1475,7 @@ func TestScrapeLoopAppendCacheEntryButErrNotFound(t *testing.T) { false, ) - fakeRef := uint64(1) + fakeRef := storage.SeriesRef(1) expValue := float64(1) metric := `metric{n="1"} 1` p := textparse.New([]byte(metric), "") @@ -1743,7 +1745,8 @@ func TestScrapeLoopAppendExemplar(t *testing.T) { exemplars: []exemplar.Exemplar{ {Labels: labels.FromStrings("a", "abc"), Value: 1}, }, - }, { + }, + { title: "Metric with exemplars and TS", scrapeText: "metric_total{n=\"1\"} 0 # {a=\"abc\"} 1.0 10000\n# EOF", discoveryLabels: []string{"n", "2"}, @@ -1754,7 +1757,8 @@ func TestScrapeLoopAppendExemplar(t *testing.T) { exemplars: []exemplar.Exemplar{ {Labels: labels.FromStrings("a", "abc"), Value: 1, Ts: 10000000, HasTs: true}, }, - }, { + }, + { title: "Two metrics and exemplars", scrapeText: `metric_total{n="1"} 1 # {t="1"} 1.0 10000 metric_total{n="2"} 2 # {t="2"} 2.0 20000 @@ -1958,7 +1962,7 @@ type errorAppender struct { collectResultAppender } -func (app *errorAppender) Append(ref uint64, lset labels.Labels, t int64, v float64) (uint64, error) { +func (app *errorAppender) Append(ref storage.SeriesRef, lset labels.Labels, t int64, v float64) (storage.SeriesRef, error) { switch lset.Get(model.MetricNameLabel) { case "out_of_order": return 0, storage.ErrOutOfOrderSample @@ -2040,7 +2044,6 @@ func TestScrapeLoopOutOfBoundsTimeError(t *testing.T) { require.Equal(t, 1, total) require.Equal(t, 1, added) require.Equal(t, 0, seriesAdded) - } func TestTargetScraperScrapeOK(t *testing.T) { diff --git a/scrape/target.go b/scrape/target.go index ada1bcdc5..59d6c9403 100644 --- a/scrape/target.go +++ b/scrape/target.go @@ -27,10 +27,10 @@ import ( "github.com/prometheus/prometheus/config" "github.com/prometheus/prometheus/discovery/targetgroup" - "github.com/prometheus/prometheus/pkg/labels" - "github.com/prometheus/prometheus/pkg/relabel" - "github.com/prometheus/prometheus/pkg/textparse" - "github.com/prometheus/prometheus/pkg/value" + "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/model/relabel" + "github.com/prometheus/prometheus/model/textparse" + "github.com/prometheus/prometheus/model/value" "github.com/prometheus/prometheus/storage" ) @@ -144,17 +144,8 @@ func (t *Target) SetMetadataStore(s MetricMetadataStore) { func (t *Target) hash() uint64 { h := fnv.New64a() - // We must build a label set without the scrape interval and timeout - // labels because those aren't defining attributes of a target - // and can be changed without qualifying its parent as a new target, - // therefore they should not effect its unique hash. - l := t.labels.Map() - delete(l, model.ScrapeIntervalLabel) - delete(l, model.ScrapeTimeoutLabel) - lset := labels.FromMap(l) - //nolint: errcheck - h.Write([]byte(fmt.Sprintf("%016d", lset.Hash()))) + h.Write([]byte(fmt.Sprintf("%016d", t.labels.Hash()))) //nolint: errcheck h.Write([]byte(t.URL().String())) @@ -325,7 +316,7 @@ type limitAppender struct { i int } -func (app *limitAppender) Append(ref uint64, lset labels.Labels, t int64, v float64) (uint64, error) { +func (app *limitAppender) Append(ref storage.SeriesRef, lset labels.Labels, t int64, v float64) (storage.SeriesRef, error) { if !value.IsStaleNaN(v) { app.i++ if app.i > app.limit { @@ -345,7 +336,7 @@ type timeLimitAppender struct { maxTime int64 } -func (app *timeLimitAppender) Append(ref uint64, lset labels.Labels, t int64, v float64) (uint64, error) { +func (app *timeLimitAppender) Append(ref storage.SeriesRef, lset labels.Labels, t int64, v float64) (storage.SeriesRef, error) { if t > app.maxTime { return 0, storage.ErrOutOfBounds } @@ -357,10 +348,10 @@ func (app *timeLimitAppender) Append(ref uint64, lset labels.Labels, t int64, v return ref, nil } -// populateLabels builds a label set from the given label set and scrape configuration. +// PopulateLabels builds a label set from the given label set and scrape configuration. // It returns a label set before relabeling was applied as the second return value. // Returns the original discovered label set found before relabelling was applied if the target is dropped during relabeling. -func populateLabels(lset labels.Labels, cfg *config.ScrapeConfig) (res, orig labels.Labels, err error) { +func PopulateLabels(lset labels.Labels, cfg *config.ScrapeConfig) (res, orig labels.Labels, err error) { // Copy labels into the labelset for the target if they are not set already. scrapeLabels := []labels.Label{ {Name: model.JobLabel, Value: cfg.JobName}, @@ -478,8 +469,8 @@ func populateLabels(lset labels.Labels, cfg *config.ScrapeConfig) (res, orig lab return res, preRelabelLabels, nil } -// targetsFromGroup builds targets based on the given TargetGroup and config. -func targetsFromGroup(tg *targetgroup.Group, cfg *config.ScrapeConfig) ([]*Target, []error) { +// TargetsFromGroup builds targets based on the given TargetGroup and config. +func TargetsFromGroup(tg *targetgroup.Group, cfg *config.ScrapeConfig) ([]*Target, []error) { targets := make([]*Target, 0, len(tg.Targets)) failures := []error{} @@ -497,7 +488,7 @@ func targetsFromGroup(tg *targetgroup.Group, cfg *config.ScrapeConfig) ([]*Targe lset := labels.New(lbls...) - lbls, origLabels, err := populateLabels(lset, cfg) + lbls, origLabels, err := PopulateLabels(lset, cfg) if err != nil { failures = append(failures, errors.Wrapf(err, "instance %d in group %s", i, tg)) } diff --git a/scrape/target_test.go b/scrape/target_test.go index a578d2760..32ab4669e 100644 --- a/scrape/target_test.go +++ b/scrape/target_test.go @@ -31,7 +31,7 @@ import ( "github.com/prometheus/prometheus/config" "github.com/prometheus/prometheus/discovery/targetgroup" - "github.com/prometheus/prometheus/pkg/labels" + "github.com/prometheus/prometheus/model/labels" ) const ( @@ -371,7 +371,7 @@ func TestNewClientWithBadTLSConfig(t *testing.T) { func TestTargetsFromGroup(t *testing.T) { expectedError := "instance 0 in group : no address" - targets, failures := targetsFromGroup(&targetgroup.Group{Targets: []model.LabelSet{{}, {model.AddressLabel: "localhost:9090"}}}, &config.ScrapeConfig{}) + targets, failures := TargetsFromGroup(&targetgroup.Group{Targets: []model.LabelSet{{}, {model.AddressLabel: "localhost:9090"}}}, &config.ScrapeConfig{}) if len(targets) != 1 { t.Fatalf("Expected 1 target, got %v", len(targets)) } @@ -382,29 +382,3 @@ func TestTargetsFromGroup(t *testing.T) { t.Fatalf("Expected error %s, got %s", expectedError, failures[0]) } } - -func TestTargetHash(t *testing.T) { - target1 := &Target{ - labels: labels.Labels{ - {Name: model.AddressLabel, Value: "localhost"}, - {Name: model.SchemeLabel, Value: "http"}, - {Name: model.MetricsPathLabel, Value: "/metrics"}, - {Name: model.ScrapeIntervalLabel, Value: "15s"}, - {Name: model.ScrapeTimeoutLabel, Value: "500ms"}, - }, - } - hash1 := target1.hash() - - target2 := &Target{ - labels: labels.Labels{ - {Name: model.AddressLabel, Value: "localhost"}, - {Name: model.SchemeLabel, Value: "http"}, - {Name: model.MetricsPathLabel, Value: "/metrics"}, - {Name: model.ScrapeIntervalLabel, Value: "14s"}, - {Name: model.ScrapeTimeoutLabel, Value: "600ms"}, - }, - } - hash2 := target2.hash() - - require.Equal(t, hash1, hash2, "Scrape interval and duration labels should not effect hash.") -} diff --git a/scripts/sync_codemirror.sh b/scripts/sync_codemirror.sh index 82362d2cf..83cdd349c 100755 --- a/scripts/sync_codemirror.sh +++ b/scripts/sync_codemirror.sh @@ -11,7 +11,7 @@ branch="repo_sync_codemirror" commit_msg="Update codemirror" pr_title="Synchronize codemirror from prometheus/prometheus" pr_msg="Propagating changes from prometheus/prometheus default branch." -target_repo="prometheus-community/codemirror-promql" +target_repo="prometheus/codemirror-promql" source_path="web/ui/module/codemirror-promql" color_red='\e[31m' diff --git a/storage/buffer.go b/storage/buffer.go index cad7e6653..767e204b3 100644 --- a/storage/buffer.go +++ b/storage/buffer.go @@ -326,7 +326,7 @@ func (r *sampleRing) nthLast(n int) (sample, bool) { func (r *sampleRing) samples() []sample { res := make([]sample, r.l) - var k = r.f + r.l + k := r.f + r.l var j int if k > len(r.buf) { k = len(r.buf) diff --git a/storage/buffer_test.go b/storage/buffer_test.go index 5cb8aeab8..a10874903 100644 --- a/storage/buffer_test.go +++ b/storage/buffer_test.go @@ -17,9 +17,10 @@ import ( "math/rand" "testing" + "github.com/stretchr/testify/require" + "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/tsdb/chunkenc" - "github.com/stretchr/testify/require" ) func TestSampleRing(t *testing.T) { @@ -199,6 +200,7 @@ func (m *mockSeriesIterator) At() (int64, float64) { return m.at() } func (m *mockSeriesIterator) AtHistogram() (int64, *histogram.Histogram) { return 0, nil } + func (m *mockSeriesIterator) ChunkEncoding() chunkenc.Encoding { return chunkenc.EncXOR } diff --git a/storage/fanout.go b/storage/fanout.go index 672756d96..46371c59e 100644 --- a/storage/fanout.go +++ b/storage/fanout.go @@ -20,9 +20,9 @@ import ( "github.com/go-kit/log/level" "github.com/prometheus/common/model" + "github.com/prometheus/prometheus/model/exemplar" "github.com/prometheus/prometheus/model/histogram" - "github.com/prometheus/prometheus/pkg/exemplar" - "github.com/prometheus/prometheus/pkg/labels" + "github.com/prometheus/prometheus/model/labels" tsdb_errors "github.com/prometheus/prometheus/tsdb/errors" ) @@ -145,7 +145,7 @@ type fanoutAppender struct { secondaries []Appender } -func (f *fanoutAppender) Append(ref uint64, l labels.Labels, t int64, v float64) (uint64, error) { +func (f *fanoutAppender) Append(ref SeriesRef, l labels.Labels, t int64, v float64) (SeriesRef, error) { ref, err := f.primary.Append(ref, l, t, v) if err != nil { return ref, err @@ -159,7 +159,7 @@ func (f *fanoutAppender) Append(ref uint64, l labels.Labels, t int64, v float64) return ref, nil } -func (f *fanoutAppender) AppendExemplar(ref uint64, l labels.Labels, e exemplar.Exemplar) (uint64, error) { +func (f *fanoutAppender) AppendExemplar(ref SeriesRef, l labels.Labels, e exemplar.Exemplar) (SeriesRef, error) { ref, err := f.primary.AppendExemplar(ref, l, e) if err != nil { return ref, err @@ -173,7 +173,7 @@ func (f *fanoutAppender) AppendExemplar(ref uint64, l labels.Labels, e exemplar. return ref, nil } -func (f *fanoutAppender) AppendHistogram(ref uint64, l labels.Labels, t int64, h *histogram.Histogram) (uint64, error) { +func (f *fanoutAppender) AppendHistogram(ref SeriesRef, l labels.Labels, t int64, h *histogram.Histogram) (SeriesRef, error) { ref, err := f.primary.AppendHistogram(ref, l, t, h) if err != nil { return ref, err diff --git a/storage/fanout_test.go b/storage/fanout_test.go index 725ffec49..cc228e666 100644 --- a/storage/fanout_test.go +++ b/storage/fanout_test.go @@ -21,7 +21,7 @@ import ( "github.com/prometheus/common/model" "github.com/stretchr/testify/require" - "github.com/prometheus/prometheus/pkg/labels" + "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/util/teststorage" ) diff --git a/storage/generic.go b/storage/generic.go index 817122bc1..6762f32a1 100644 --- a/storage/generic.go +++ b/storage/generic.go @@ -17,7 +17,7 @@ package storage import ( - "github.com/prometheus/prometheus/pkg/labels" + "github.com/prometheus/prometheus/model/labels" ) type genericQuerier interface { diff --git a/storage/interface.go b/storage/interface.go index f15b1c00d..bb3db1085 100644 --- a/storage/interface.go +++ b/storage/interface.go @@ -18,9 +18,9 @@ import ( "errors" "fmt" + "github.com/prometheus/prometheus/model/exemplar" "github.com/prometheus/prometheus/model/histogram" - "github.com/prometheus/prometheus/pkg/exemplar" - "github.com/prometheus/prometheus/pkg/labels" + "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/tsdb/chunkenc" "github.com/prometheus/prometheus/tsdb/chunks" ) @@ -37,6 +37,11 @@ var ( ErrExemplarsDisabled = fmt.Errorf("exemplar storage is disabled or max exemplars is less than or equal to 0") ) +// SeriesRef is a generic series reference. In prometheus it is either a +// HeadSeriesRef or BlockSeriesRef, though other implementations may have +// their own reference types. +type SeriesRef uint64 + // Appendable allows creating appenders. type Appendable interface { // Appender returns a new appender for the storage. The implementation @@ -146,6 +151,11 @@ type SelectHints struct { Grouping []string // List of label names used in aggregation. By bool // Indicate whether it is without or by. Range int64 // Range vector selector range in milliseconds. + + // DisableTrimming allows to disable trimming of matching series chunks based on query Start and End time. + // When disabled, the result may contain samples outside the queried time range but Select() performances + // may be improved. + DisableTrimming bool } // TODO(bwplotka): Move to promql/engine_test.go? @@ -164,14 +174,14 @@ func (f QueryableFunc) Querier(ctx context.Context, mint, maxt int64) (Querier, // Operations on the Appender interface are not goroutine-safe. type Appender interface { // Append adds a sample pair for the given series. - // An optional reference number can be provided to accelerate calls. - // A reference number is returned which can be used to add further - // samples in the same or later transactions. + // An optional series reference can be provided to accelerate calls. + // A series reference number is returned which can be used to add further + // samples to the given series in the same or later transactions. // Returned reference numbers are ephemeral and may be rejected in calls // to Append() at any point. Adding the sample via Append() returns a new // reference number. // If the reference is 0 it must not be used for caching. - Append(ref uint64, l labels.Labels, t int64, v float64) (uint64, error) + Append(ref SeriesRef, l labels.Labels, t int64, v float64) (SeriesRef, error) // Commit submits the collected samples and purges the batch. If Commit // returns a non-nil error, it also rolls back all modifications made in @@ -193,7 +203,7 @@ type GetRef interface { // Returns reference number that can be used to pass to Appender.Append(), // and a set of labels that will not cause another copy when passed to Appender.Append(). // 0 means the appender does not have a reference to this series. - GetRef(lset labels.Labels) (uint64, labels.Labels) + GetRef(lset labels.Labels) (SeriesRef, labels.Labels) } // ExemplarAppender provides an interface for adding samples to exemplar storage, which @@ -210,7 +220,7 @@ type ExemplarAppender interface { // Note that in our current implementation of Prometheus' exemplar storage // calls to Append should generate the reference numbers, AppendExemplar // generating a new reference number should be considered possible erroneous behaviour and be logged. - AppendExemplar(ref uint64, l labels.Labels, e exemplar.Exemplar) (uint64, error) + AppendExemplar(ref SeriesRef, l labels.Labels, e exemplar.Exemplar) (SeriesRef, error) } // HistogramAppender provides an interface for appending histograms to the storage. @@ -222,7 +232,7 @@ type HistogramAppender interface { // numbers are ephemeral and may be rejected in calls to Append() at any // point. Adding the sample via Append() returns a new reference number. // If the reference is 0 it must not be used for caching. - AppendHistogram(ref uint64, l labels.Labels, t int64, h *histogram.Histogram) (uint64, error) + AppendHistogram(ref SeriesRef, l labels.Labels, t int64, h *histogram.Histogram) (SeriesRef, error) } // SeriesSet contains a set of series. diff --git a/storage/merge.go b/storage/merge.go index daa12108a..464e77a17 100644 --- a/storage/merge.go +++ b/storage/merge.go @@ -23,7 +23,7 @@ import ( "github.com/pkg/errors" "github.com/prometheus/prometheus/model/histogram" - "github.com/prometheus/prometheus/pkg/labels" + "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/tsdb/chunkenc" "github.com/prometheus/prometheus/tsdb/chunks" tsdb_errors "github.com/prometheus/prometheus/tsdb/errors" @@ -43,7 +43,7 @@ type mergeGenericQuerier struct { // See NewFanout commentary to learn more about primary vs secondary differences. // // In case of overlaps between the data given by primaries' and secondaries' Selects, merge function will be used. -func NewMergeQuerier(primaries []Querier, secondaries []Querier, mergeFn VerticalSeriesMergeFunc) Querier { +func NewMergeQuerier(primaries, secondaries []Querier, mergeFn VerticalSeriesMergeFunc) Querier { queriers := make([]genericQuerier, 0, len(primaries)+len(secondaries)) for _, q := range primaries { if _, ok := q.(noopQuerier); !ok && q != nil { @@ -72,7 +72,7 @@ func NewMergeQuerier(primaries []Querier, secondaries []Querier, mergeFn Vertica // // In case of overlaps between the data given by primaries' and secondaries' Selects, merge function will be used. // TODO(bwplotka): Currently merge will compact overlapping chunks with bigger chunk, without limit. Split it: https://github.com/prometheus/tsdb/issues/670 -func NewMergeChunkQuerier(primaries []ChunkQuerier, secondaries []ChunkQuerier, mergeFn VerticalChunkSeriesMergeFunc) ChunkQuerier { +func NewMergeChunkQuerier(primaries, secondaries []ChunkQuerier, mergeFn VerticalChunkSeriesMergeFunc) ChunkQuerier { queriers := make([]genericQuerier, 0, len(primaries)+len(secondaries)) for _, q := range primaries { if _, ok := q.(noopChunkQuerier); !ok && q != nil { @@ -105,7 +105,7 @@ func (q *mergeGenericQuerier) Select(sortSeries bool, hints *SelectHints, matche return q.queriers[0].Select(sortSeries, hints, matchers...) } - var seriesSets = make([]genericSeriesSet, 0, len(q.queriers)) + seriesSets := make([]genericSeriesSet, 0, len(q.queriers)) if !q.concurrentSelect { for _, querier := range q.queriers { // We need to sort for merge to work. @@ -266,7 +266,6 @@ func NewMergeSeriesSet(sets []SeriesSet, mergeFunc VerticalSeriesMergeFunc) Seri genericSets := make([]genericSeriesSet, 0, len(sets)) for _, s := range sets { genericSets = append(genericSets, &genericSeriesSetAdapter{s}) - } return &seriesSetAdapter{newGenericMergeSeriesSet(genericSets, (&seriesMergerAdapter{VerticalSeriesMergeFunc: mergeFunc}).Merge)} } @@ -282,7 +281,6 @@ func NewMergeChunkSeriesSet(sets []ChunkSeriesSet, mergeFunc VerticalChunkSeries genericSets := make([]genericSeriesSet, 0, len(sets)) for _, s := range sets { genericSets = append(genericSets, &genericChunkSeriesSetAdapter{s}) - } return &chunkSeriesSetAdapter{newGenericMergeSeriesSet(genericSets, (&chunkSeriesMergerAdapter{VerticalChunkSeriesMergeFunc: mergeFunc}).Merge)} } diff --git a/storage/merge_test.go b/storage/merge_test.go index 62aa4376e..61844a566 100644 --- a/storage/merge_test.go +++ b/storage/merge_test.go @@ -23,7 +23,7 @@ import ( "github.com/pkg/errors" "github.com/stretchr/testify/require" - "github.com/prometheus/prometheus/pkg/labels" + "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/tsdb/chunkenc" "github.com/prometheus/prometheus/tsdb/tsdbutil" ) @@ -616,7 +616,8 @@ func TestChainSampleIterator(t *testing.T) { NewListSeriesIterator(samples{sample{2, 2, nil}, sample{5, 5, nil}}), }, expected: []tsdbutil.Sample{ - sample{0, 0, nil}, sample{1, 1, nil}, sample{2, 2, nil}, sample{3, 3, nil}, sample{4, 4, nil}, sample{5, 5, nil}}, + sample{0, 0, nil}, sample{1, 1, nil}, sample{2, 2, nil}, sample{3, 3, nil}, sample{4, 4, nil}, sample{5, 5, nil}, + }, }, // Overlap. { diff --git a/storage/noop.go b/storage/noop.go index c63353b92..83953ca43 100644 --- a/storage/noop.go +++ b/storage/noop.go @@ -14,7 +14,7 @@ package storage import ( - "github.com/prometheus/prometheus/pkg/labels" + "github.com/prometheus/prometheus/model/labels" ) type noopQuerier struct{} diff --git a/storage/remote/codec.go b/storage/remote/codec.go index 666e847f8..393a03a13 100644 --- a/storage/remote/codec.go +++ b/storage/remote/codec.go @@ -26,10 +26,10 @@ import ( "github.com/pkg/errors" "github.com/prometheus/common/model" + "github.com/prometheus/prometheus/model/exemplar" "github.com/prometheus/prometheus/model/histogram" - "github.com/prometheus/prometheus/pkg/exemplar" - "github.com/prometheus/prometheus/pkg/labels" - "github.com/prometheus/prometheus/pkg/textparse" + "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/model/textparse" "github.com/prometheus/prometheus/prompb" "github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/tsdb/chunkenc" diff --git a/storage/remote/codec_test.go b/storage/remote/codec_test.go index 68cfe9899..b5949cd31 100644 --- a/storage/remote/codec_test.go +++ b/storage/remote/codec_test.go @@ -20,8 +20,8 @@ import ( "github.com/stretchr/testify/require" - "github.com/prometheus/prometheus/pkg/labels" - "github.com/prometheus/prometheus/pkg/textparse" + "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/model/textparse" "github.com/prometheus/prometheus/prompb" "github.com/prometheus/prometheus/storage" ) @@ -292,7 +292,7 @@ func TestMetricTypeToMetricTypeProto(t *testing.T) { } func TestDecodeWriteRequest(t *testing.T) { - buf, _, err := buildWriteRequest(writeRequestFixture.Timeseries, nil, nil) + buf, _, err := buildWriteRequest(writeRequestFixture.Timeseries, nil, nil, nil) require.NoError(t, err) actual, err := DecodeWriteRequest(bytes.NewReader(buf)) diff --git a/storage/remote/metadata_watcher.go b/storage/remote/metadata_watcher.go index 1096e406a..eee36463b 100644 --- a/storage/remote/metadata_watcher.go +++ b/storage/remote/metadata_watcher.go @@ -21,6 +21,7 @@ import ( "github.com/go-kit/log/level" "github.com/pkg/errors" "github.com/prometheus/common/model" + "github.com/prometheus/prometheus/scrape" ) diff --git a/storage/remote/metadata_watcher_test.go b/storage/remote/metadata_watcher_test.go index c8114fbd6..3660bb9a7 100644 --- a/storage/remote/metadata_watcher_test.go +++ b/storage/remote/metadata_watcher_test.go @@ -20,9 +20,10 @@ import ( "github.com/pkg/errors" "github.com/prometheus/common/model" - "github.com/prometheus/prometheus/pkg/textparse" - "github.com/prometheus/prometheus/scrape" "github.com/stretchr/testify/require" + + "github.com/prometheus/prometheus/model/textparse" + "github.com/prometheus/prometheus/scrape" ) var ( diff --git a/storage/remote/queue_manager.go b/storage/remote/queue_manager.go index 17e57ea85..e7aafaf3e 100644 --- a/storage/remote/queue_manager.go +++ b/storage/remote/queue_manager.go @@ -26,15 +26,16 @@ import ( "github.com/golang/snappy" "github.com/opentracing/opentracing-go" "github.com/opentracing/opentracing-go/ext" - "go.uber.org/atomic" - "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" + "go.uber.org/atomic" + "github.com/prometheus/prometheus/config" - "github.com/prometheus/prometheus/pkg/labels" - "github.com/prometheus/prometheus/pkg/relabel" + "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/model/relabel" "github.com/prometheus/prometheus/prompb" "github.com/prometheus/prometheus/scrape" + "github.com/prometheus/prometheus/tsdb/chunks" "github.com/prometheus/prometheus/tsdb/record" "github.com/prometheus/prometheus/tsdb/wal" ) @@ -353,11 +354,11 @@ type QueueManager struct { storeClient WriteClient seriesMtx sync.Mutex // Covers seriesLabels and droppedSeries. - seriesLabels map[uint64]labels.Labels - droppedSeries map[uint64]struct{} + seriesLabels map[chunks.HeadSeriesRef]labels.Labels + droppedSeries map[chunks.HeadSeriesRef]struct{} seriesSegmentMtx sync.Mutex // Covers seriesSegmentIndexes - if you also lock seriesMtx, take seriesMtx first. - seriesSegmentIndexes map[uint64]int + seriesSegmentIndexes map[chunks.HeadSeriesRef]int shards *shards numShards int @@ -406,9 +407,9 @@ func NewQueueManager( storeClient: client, sendExemplars: enableExemplarRemoteWrite, - seriesLabels: make(map[uint64]labels.Labels), - seriesSegmentIndexes: make(map[uint64]int), - droppedSeries: make(map[uint64]struct{}), + seriesLabels: make(map[chunks.HeadSeriesRef]labels.Labels), + seriesSegmentIndexes: make(map[chunks.HeadSeriesRef]int), + droppedSeries: make(map[chunks.HeadSeriesRef]struct{}), numShards: cfg.MinShards, reshardChan: make(chan int), @@ -433,7 +434,7 @@ func NewQueueManager( return t } -// AppendMetadata sends metadata the remote storage. Metadata is sent all at once and is not parallelized. +// AppendMetadata sends metadata the remote storage. Metadata is sent in batches, but is not parallelized. func (t *QueueManager) AppendMetadata(ctx context.Context, metadata []scrape.MetricMetadata) { mm := make([]prompb.MetricMetadata, 0, len(metadata)) for _, entry := range metadata { @@ -445,13 +446,14 @@ func (t *QueueManager) AppendMetadata(ctx context.Context, metadata []scrape.Met }) } + pBuf := proto.NewBuffer(nil) numSends := int(math.Ceil(float64(len(metadata)) / float64(t.mcfg.MaxSamplesPerSend))) for i := 0; i < numSends; i++ { last := (i + 1) * t.mcfg.MaxSamplesPerSend if last > len(metadata) { last = len(metadata) } - err := t.sendMetadataWithBackoff(ctx, mm[i*t.mcfg.MaxSamplesPerSend:last]) + err := t.sendMetadataWithBackoff(ctx, mm[i*t.mcfg.MaxSamplesPerSend:last], pBuf) if err != nil { t.metrics.failedMetadataTotal.Add(float64(last - (i * t.mcfg.MaxSamplesPerSend))) level.Error(t.logger).Log("msg", "non-recoverable error while sending metadata", "count", last-(i*t.mcfg.MaxSamplesPerSend), "err", err) @@ -459,9 +461,9 @@ func (t *QueueManager) AppendMetadata(ctx context.Context, metadata []scrape.Met } } -func (t *QueueManager) sendMetadataWithBackoff(ctx context.Context, metadata []prompb.MetricMetadata) error { +func (t *QueueManager) sendMetadataWithBackoff(ctx context.Context, metadata []prompb.MetricMetadata, pBuf *proto.Buffer) error { // Build the WriteRequest with no samples. - req, _, err := buildWriteRequest(nil, metadata, nil) + req, _, err := buildWriteRequest(nil, metadata, pBuf, nil) if err != nil { return err } @@ -727,7 +729,7 @@ func (t *QueueManager) releaseLabels(ls labels.Labels) { // processExternalLabels merges externalLabels into ls. If ls contains // a label in externalLabels, the value in ls wins. -func processExternalLabels(ls labels.Labels, externalLabels labels.Labels) labels.Labels { +func processExternalLabels(ls, externalLabels labels.Labels) labels.Labels { i, j, result := 0, 0, make(labels.Labels, 0, len(ls)+len(externalLabels)) for i < len(ls) && j < len(externalLabels) { if ls[i].Name < externalLabels[j].Name { @@ -996,7 +998,7 @@ func (s *shards) stop() { // enqueue data (sample or exemplar). If we are currently in the process of shutting down or resharding, // will return false; in this case, you should back off and retry. -func (s *shards) enqueue(ref uint64, data interface{}) bool { +func (s *shards) enqueue(ref chunks.HeadSeriesRef, data interface{}) bool { s.mtx.RLock() defer s.mtx.RUnlock() @@ -1040,13 +1042,14 @@ func (s *shards) runShard(ctx context.Context, shardID int, queue chan interface max = s.qm.cfg.MaxSamplesPerSend nPending, nPendingSamples, nPendingExemplars = 0, 0, 0 - buf []byte + pBuf = proto.NewBuffer(nil) + buf []byte ) if s.qm.sendExemplars { max += int(float64(max) * 0.1) } - var pendingData = make([]prompb.TimeSeries, max) + pendingData := make([]prompb.TimeSeries, max) for i := range pendingData { pendingData[i].Samples = []prompb.Sample{{}} if s.qm.sendExemplars { @@ -1084,7 +1087,7 @@ func (s *shards) runShard(ctx context.Context, shardID int, queue chan interface if !ok { if nPendingSamples > 0 || nPendingExemplars > 0 { level.Debug(s.qm.logger).Log("msg", "Flushing data to remote storage...", "samples", nPendingSamples, "exemplars", nPendingExemplars) - s.sendSamples(ctx, pendingData[:nPending], nPendingSamples, nPendingExemplars, &buf) + s.sendSamples(ctx, pendingData[:nPending], nPendingSamples, nPendingExemplars, pBuf, &buf) s.qm.metrics.pendingSamples.Sub(float64(nPendingSamples)) s.qm.metrics.pendingExemplars.Sub(float64(nPendingExemplars)) level.Debug(s.qm.logger).Log("msg", "Done flushing.") @@ -1114,7 +1117,7 @@ func (s *shards) runShard(ctx context.Context, shardID int, queue chan interface } if nPending >= max { - s.sendSamples(ctx, pendingData[:nPending], nPendingSamples, nPendingExemplars, &buf) + s.sendSamples(ctx, pendingData[:nPending], nPendingSamples, nPendingExemplars, pBuf, &buf) s.qm.metrics.pendingSamples.Sub(float64(nPendingSamples)) s.qm.metrics.pendingExemplars.Sub(float64(nPendingExemplars)) nPendingSamples = 0 @@ -1128,7 +1131,7 @@ func (s *shards) runShard(ctx context.Context, shardID int, queue chan interface case <-timer.C: if nPendingSamples > 0 || nPendingExemplars > 0 { level.Debug(s.qm.logger).Log("msg", "runShard timer ticked, sending buffered data", "samples", nPendingSamples, "exemplars", nPendingExemplars, "shard", shardNum) - s.sendSamples(ctx, pendingData[:nPending], nPendingSamples, nPendingExemplars, &buf) + s.sendSamples(ctx, pendingData[:nPending], nPendingSamples, nPendingExemplars, pBuf, &buf) s.qm.metrics.pendingSamples.Sub(float64(nPendingSamples)) s.qm.metrics.pendingExemplars.Sub(float64(nPendingExemplars)) nPendingSamples = 0 @@ -1140,9 +1143,9 @@ func (s *shards) runShard(ctx context.Context, shardID int, queue chan interface } } -func (s *shards) sendSamples(ctx context.Context, samples []prompb.TimeSeries, sampleCount int, exemplarCount int, buf *[]byte) { +func (s *shards) sendSamples(ctx context.Context, samples []prompb.TimeSeries, sampleCount, exemplarCount int, pBuf *proto.Buffer, buf *[]byte) { begin := time.Now() - err := s.sendSamplesWithBackoff(ctx, samples, sampleCount, exemplarCount, buf) + err := s.sendSamplesWithBackoff(ctx, samples, sampleCount, exemplarCount, pBuf, buf) if err != nil { level.Error(s.qm.logger).Log("msg", "non-recoverable error", "count", sampleCount, "exemplarCount", exemplarCount, "err", err) s.qm.metrics.failedSamplesTotal.Add(float64(sampleCount)) @@ -1157,16 +1160,16 @@ func (s *shards) sendSamples(ctx context.Context, samples []prompb.TimeSeries, s } // sendSamples to the remote storage with backoff for recoverable errors. -func (s *shards) sendSamplesWithBackoff(ctx context.Context, samples []prompb.TimeSeries, sampleCount int, exemplarCount int, buf *[]byte) error { +func (s *shards) sendSamplesWithBackoff(ctx context.Context, samples []prompb.TimeSeries, sampleCount, exemplarCount int, pBuf *proto.Buffer, buf *[]byte) error { // Build the WriteRequest with no metadata. - req, highest, err := buildWriteRequest(samples, nil, *buf) + req, highest, err := buildWriteRequest(samples, nil, pBuf, *buf) if err != nil { // Failing to build the write request is non-recoverable, since it will // only error if marshaling the proto to bytes fails. return err } - reqSize := len(*buf) + reqSize := len(req) *buf = req // An anonymous function allows us to defer the completion of our per-try spans @@ -1262,11 +1265,10 @@ func sendWriteRequestWithBackoff(ctx context.Context, cfg config.QueueConfig, l } try++ - continue } } -func buildWriteRequest(samples []prompb.TimeSeries, metadata []prompb.MetricMetadata, buf []byte) ([]byte, int64, error) { +func buildWriteRequest(samples []prompb.TimeSeries, metadata []prompb.MetricMetadata, pBuf *proto.Buffer, buf []byte) ([]byte, int64, error) { var highest int64 for _, ts := range samples { // At the moment we only ever append a TimeSeries with a single sample or exemplar in it. @@ -1283,7 +1285,12 @@ func buildWriteRequest(samples []prompb.TimeSeries, metadata []prompb.MetricMeta Metadata: metadata, } - data, err := proto.Marshal(req) + if pBuf == nil { + pBuf = proto.NewBuffer(nil) // For convenience in tests. Not efficient. + } else { + pBuf.Reset() + } + err := pBuf.Marshal(req) if err != nil { return nil, highest, err } @@ -1293,6 +1300,6 @@ func buildWriteRequest(samples []prompb.TimeSeries, metadata []prompb.MetricMeta if buf != nil { buf = buf[0:cap(buf)] } - compressed := snappy.Encode(buf, data) + compressed := snappy.Encode(buf, pBuf.Bytes()) return compressed, highest, nil } diff --git a/storage/remote/queue_manager_test.go b/storage/remote/queue_manager_test.go index ccca000df..64a3d6462 100644 --- a/storage/remote/queue_manager_test.go +++ b/storage/remote/queue_manager_test.go @@ -38,11 +38,12 @@ import ( "go.uber.org/atomic" "github.com/prometheus/prometheus/config" - "github.com/prometheus/prometheus/pkg/labels" - "github.com/prometheus/prometheus/pkg/textparse" - "github.com/prometheus/prometheus/pkg/timestamp" + "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/model/textparse" + "github.com/prometheus/prometheus/model/timestamp" "github.com/prometheus/prometheus/prompb" "github.com/prometheus/prometheus/scrape" + "github.com/prometheus/prometheus/tsdb/chunks" "github.com/prometheus/prometheus/tsdb/record" ) @@ -60,7 +61,6 @@ func newHighestTimestampMetric() *maxTimestamp { } func TestSampleDelivery(t *testing.T) { - testcases := []struct { name string samples bool @@ -107,7 +107,6 @@ func TestSampleDelivery(t *testing.T) { for _, tc := range testcases { t.Run(tc.name, func(t *testing.T) { - var ( series []record.RefSeries samples []record.RefSample @@ -229,12 +228,12 @@ func TestSampleDeliveryOrder(t *testing.T) { for i := 0; i < n; i++ { name := fmt.Sprintf("test_metric_%d", i%ts) samples = append(samples, record.RefSample{ - Ref: uint64(i), + Ref: chunks.HeadSeriesRef(i), T: int64(i), V: float64(i), }) series = append(series, record.RefSeries{ - Ref: uint64(i), + Ref: chunks.HeadSeriesRef(i), Labels: labels.Labels{labels.Label{Name: "__name__", Value: name}}, }) } @@ -322,7 +321,7 @@ func TestSeriesReset(t *testing.T) { for i := 0; i < numSegments; i++ { series := []record.RefSeries{} for j := 0; j < numSeries; j++ { - series = append(series, record.RefSeries{Ref: uint64((i * 100) + j), Labels: labels.Labels{{Name: "a", Value: "a"}}}) + series = append(series, record.RefSeries{Ref: chunks.HeadSeriesRef((i * 100) + j), Labels: labels.Labels{{Name: "a", Value: "a"}}}) } m.StoreSeries(series, i) } @@ -408,11 +407,12 @@ func TestReleaseNoninternedString(t *testing.T) { c := NewTestWriteClient() m := NewQueueManager(metrics, nil, nil, nil, "", newEWMARate(ewmaWeight, shardUpdateDuration), cfg, mcfg, nil, nil, c, defaultFlushDeadline, newPool(), newHighestTimestampMetric(), nil, false) m.Start() + defer m.Stop() for i := 1; i < 1000; i++ { m.StoreSeries([]record.RefSeries{ { - Ref: uint64(i), + Ref: chunks.HeadSeriesRef(i), Labels: labels.Labels{ labels.Label{ Name: "asdf", @@ -481,13 +481,13 @@ func createTimeseries(numSamples, numSeries int, extraLabels ...labels.Label) ([ name := fmt.Sprintf("test_metric_%d", i) for j := 0; j < numSamples; j++ { samples = append(samples, record.RefSample{ - Ref: uint64(i), + Ref: chunks.HeadSeriesRef(i), T: int64(j), V: float64(i), }) } series = append(series, record.RefSeries{ - Ref: uint64(i), + Ref: chunks.HeadSeriesRef(i), Labels: append(labels.Labels{{Name: "__name__", Value: name}}, extraLabels...), }) } @@ -501,7 +501,7 @@ func createExemplars(numExemplars, numSeries int) ([]record.RefExemplar, []recor name := fmt.Sprintf("test_metric_%d", i) for j := 0; j < numExemplars; j++ { e := record.RefExemplar{ - Ref: uint64(i), + Ref: chunks.HeadSeriesRef(i), T: int64(j), V: float64(i), Labels: labels.FromStrings("traceID", fmt.Sprintf("trace-%d", i)), @@ -509,7 +509,7 @@ func createExemplars(numExemplars, numSeries int) ([]record.RefExemplar, []recor exemplars = append(exemplars, e) } series = append(series, record.RefSeries{ - Ref: uint64(i), + Ref: chunks.HeadSeriesRef(i), Labels: labels.Labels{{Name: "__name__", Value: name}}, }) } @@ -714,7 +714,7 @@ func BenchmarkSampleDelivery(b *testing.B) { const numSeries = 10000 // Extra labels to make a more realistic workload - taken from Kubernetes' embedded cAdvisor metrics. - var extraLabels = labels.Labels{ + extraLabels := labels.Labels{ {Name: "kubernetes_io_arch", Value: "amd64"}, {Name: "kubernetes_io_instance_type", Value: "c3.somesize"}, {Name: "kubernetes_io_os", Value: "linux"}, diff --git a/storage/remote/read.go b/storage/remote/read.go index 7f1d749e6..071763593 100644 --- a/storage/remote/read.go +++ b/storage/remote/read.go @@ -18,7 +18,7 @@ import ( "github.com/pkg/errors" - "github.com/prometheus/prometheus/pkg/labels" + "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/storage" ) diff --git a/storage/remote/read_handler.go b/storage/remote/read_handler.go index ceea4579a..e1f1df21c 100644 --- a/storage/remote/read_handler.go +++ b/storage/remote/read_handler.go @@ -20,13 +20,13 @@ import ( "github.com/go-kit/log" "github.com/go-kit/log/level" - "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/prometheus/config" - "github.com/prometheus/prometheus/pkg/gate" - "github.com/prometheus/prometheus/pkg/labels" + "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/prompb" "github.com/prometheus/prometheus/storage" + "github.com/prometheus/prometheus/util/gate" ) type readHandler struct { diff --git a/storage/remote/read_handler_test.go b/storage/remote/read_handler_test.go index 8962a8ba3..86edbe0b9 100644 --- a/storage/remote/read_handler_test.go +++ b/storage/remote/read_handler_test.go @@ -26,7 +26,7 @@ import ( "github.com/stretchr/testify/require" "github.com/prometheus/prometheus/config" - "github.com/prometheus/prometheus/pkg/labels" + "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/prompb" "github.com/prometheus/prometheus/promql" "github.com/prometheus/prometheus/storage" diff --git a/storage/remote/read_test.go b/storage/remote/read_test.go index 646d00c46..bf3bff1de 100644 --- a/storage/remote/read_test.go +++ b/storage/remote/read_test.go @@ -27,7 +27,7 @@ import ( "github.com/stretchr/testify/require" "github.com/prometheus/prometheus/config" - "github.com/prometheus/prometheus/pkg/labels" + "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/prompb" "github.com/prometheus/prometheus/storage" ) @@ -506,7 +506,6 @@ func TestSampleAndChunkQueryableClient(t *testing.T) { } require.NoError(t, ss.Err()) require.Equal(t, tc.expectedSeries, got) - }) } } diff --git a/storage/remote/storage.go b/storage/remote/storage.go index 5716605a8..c82db1f9d 100644 --- a/storage/remote/storage.go +++ b/storage/remote/storage.go @@ -27,10 +27,10 @@ import ( "gopkg.in/yaml.v2" "github.com/prometheus/prometheus/config" - "github.com/prometheus/prometheus/pkg/labels" - "github.com/prometheus/prometheus/pkg/logging" + "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/scrape" "github.com/prometheus/prometheus/storage" + "github.com/prometheus/prometheus/util/logging" ) // String constants for instrumentation. @@ -51,7 +51,7 @@ type startTimeCallback func() (int64, error) // Storage represents all the remote read and write endpoints. It implements // storage.Storage. type Storage struct { - logger log.Logger + logger *logging.Deduper mtx sync.Mutex rws *WriteStorage @@ -66,9 +66,10 @@ func NewStorage(l log.Logger, reg prometheus.Registerer, stCallback startTimeCal if l == nil { l = log.NewNopLogger() } + logger := logging.Dedupe(l, 1*time.Minute) s := &Storage{ - logger: logging.Dedupe(l, 1*time.Minute), + logger: logger, localStartTimeCallback: stCallback, } s.rws = NewWriteStorage(s.logger, reg, walDir, flushDeadline, sm) @@ -179,8 +180,14 @@ func (s *Storage) Appender(ctx context.Context) storage.Appender { return s.rws.Appender(ctx) } +// LowestSentTimestamp returns the lowest sent timestamp across all queues. +func (s *Storage) LowestSentTimestamp() int64 { + return s.rws.LowestSentTimestamp() +} + // Close the background processing of the storage queues. func (s *Storage) Close() error { + s.logger.Stop() s.mtx.Lock() defer s.mtx.Unlock() return s.rws.Close() diff --git a/storage/remote/write.go b/storage/remote/write.go index c47bc0e98..6dd3740c5 100644 --- a/storage/remote/write.go +++ b/storage/remote/write.go @@ -16,6 +16,7 @@ package remote import ( "context" "fmt" + "math" "sync" "time" @@ -24,9 +25,9 @@ import ( "github.com/prometheus/client_golang/prometheus/promauto" "github.com/prometheus/prometheus/config" + "github.com/prometheus/prometheus/model/exemplar" "github.com/prometheus/prometheus/model/histogram" - "github.com/prometheus/prometheus/pkg/exemplar" - "github.com/prometheus/prometheus/pkg/labels" + "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/tsdb/wal" ) @@ -61,6 +62,7 @@ type WriteStorage struct { flushDeadline time.Duration interner *pool scraper ReadyScrapeManager + quit chan struct{} // For timestampTracker. highestTimestamp *maxTimestamp @@ -82,6 +84,7 @@ func NewWriteStorage(logger log.Logger, reg prometheus.Registerer, walDir string walDir: walDir, interner: newPool(), scraper: sm, + quit: make(chan struct{}), highestTimestamp: &maxTimestamp{ Gauge: prometheus.NewGauge(prometheus.GaugeOpts{ Namespace: namespace, @@ -101,8 +104,13 @@ func NewWriteStorage(logger log.Logger, reg prometheus.Registerer, walDir string func (rws *WriteStorage) run() { ticker := time.NewTicker(shardUpdateDuration) defer ticker.Stop() - for range ticker.C { - rws.samplesIn.tick() + for { + select { + case <-ticker.C: + rws.samplesIn.tick() + case <-rws.quit: + return + } } } @@ -208,6 +216,26 @@ func (rws *WriteStorage) Appender(_ context.Context) storage.Appender { } } +// LowestSentTimestamp returns the lowest sent timestamp across all queues. +func (rws *WriteStorage) LowestSentTimestamp() int64 { + rws.mtx.Lock() + defer rws.mtx.Unlock() + + var lowestTs int64 = math.MaxInt64 + + for _, q := range rws.queues { + ts := int64(q.metrics.highestSentTimestamp.Get() * 1000) + if ts < lowestTs { + lowestTs = ts + } + } + if len(rws.queues) == 0 { + lowestTs = 0 + } + + return lowestTs +} + // Close closes the WriteStorage. func (rws *WriteStorage) Close() error { rws.mtx.Lock() @@ -215,6 +243,7 @@ func (rws *WriteStorage) Close() error { for _, q := range rws.queues { q.Stop() } + close(rws.quit) return nil } @@ -228,7 +257,7 @@ type timestampTracker struct { } // Append implements storage.Appender. -func (t *timestampTracker) Append(_ uint64, _ labels.Labels, ts int64, _ float64) (uint64, error) { +func (t *timestampTracker) Append(_ storage.SeriesRef, _ labels.Labels, ts int64, _ float64) (storage.SeriesRef, error) { t.samples++ if ts > t.highestTimestamp { t.highestTimestamp = ts @@ -236,12 +265,12 @@ func (t *timestampTracker) Append(_ uint64, _ labels.Labels, ts int64, _ float64 return 0, nil } -func (t *timestampTracker) AppendExemplar(_ uint64, _ labels.Labels, _ exemplar.Exemplar) (uint64, error) { +func (t *timestampTracker) AppendExemplar(_ storage.SeriesRef, _ labels.Labels, _ exemplar.Exemplar) (storage.SeriesRef, error) { t.exemplars++ return 0, nil } -func (t *timestampTracker) AppendHistogram(_ uint64, _ labels.Labels, ts int64, _ *histogram.Histogram) (uint64, error) { +func (t *timestampTracker) AppendHistogram(_ storage.SeriesRef, _ labels.Labels, ts int64, _ *histogram.Histogram) (storage.SeriesRef, error) { t.histograms++ if ts > t.highestTimestamp { t.highestTimestamp = ts diff --git a/storage/remote/write_handler.go b/storage/remote/write_handler.go index 92637cf47..42e106038 100644 --- a/storage/remote/write_handler.go +++ b/storage/remote/write_handler.go @@ -22,7 +22,7 @@ import ( "github.com/go-kit/log/level" "github.com/pkg/errors" - "github.com/prometheus/prometheus/pkg/exemplar" + "github.com/prometheus/prometheus/model/exemplar" "github.com/prometheus/prometheus/prompb" "github.com/prometheus/prometheus/storage" ) @@ -81,9 +81,7 @@ func (h *writeHandler) checkAppendExemplarError(err error, e exemplar.Exemplar, } func (h *writeHandler) write(ctx context.Context, req *prompb.WriteRequest) (err error) { - var ( - outOfOrderExemplarErrs = 0 - ) + outOfOrderExemplarErrs := 0 app := h.appendable.Appender(ctx) defer func() { diff --git a/storage/remote/write_handler_test.go b/storage/remote/write_handler_test.go index 2d91d4f41..52873e4d0 100644 --- a/storage/remote/write_handler_test.go +++ b/storage/remote/write_handler_test.go @@ -25,15 +25,15 @@ import ( "github.com/go-kit/log" "github.com/stretchr/testify/require" + "github.com/prometheus/prometheus/model/exemplar" "github.com/prometheus/prometheus/model/histogram" - "github.com/prometheus/prometheus/pkg/exemplar" - "github.com/prometheus/prometheus/pkg/labels" + "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/prompb" "github.com/prometheus/prometheus/storage" ) func TestRemoteWriteHandler(t *testing.T) { - buf, _, err := buildWriteRequest(writeRequestFixture.Timeseries, nil, nil) + buf, _, err := buildWriteRequest(writeRequestFixture.Timeseries, nil, nil, nil) require.NoError(t, err) req, err := http.NewRequest("", "", bytes.NewReader(buf)) @@ -69,7 +69,7 @@ func TestOutOfOrderSample(t *testing.T) { buf, _, err := buildWriteRequest([]prompb.TimeSeries{{ Labels: []prompb.Label{{Name: "__name__", Value: "test_metric"}}, Samples: []prompb.Sample{{Value: 1, Timestamp: 0}}, - }}, nil, nil) + }}, nil, nil, nil) require.NoError(t, err) req, err := http.NewRequest("", "", bytes.NewReader(buf)) @@ -94,7 +94,7 @@ func TestOutOfOrderExemplar(t *testing.T) { buf, _, err := buildWriteRequest([]prompb.TimeSeries{{ Labels: []prompb.Label{{Name: "__name__", Value: "test_metric"}}, Exemplars: []prompb.Exemplar{{Labels: []prompb.Label{{Name: "foo", Value: "bar"}}, Value: 1, Timestamp: 0}}, - }}, nil, nil) + }}, nil, nil, nil) require.NoError(t, err) req, err := http.NewRequest("", "", bytes.NewReader(buf)) @@ -114,7 +114,7 @@ func TestOutOfOrderExemplar(t *testing.T) { } func TestCommitErr(t *testing.T) { - buf, _, err := buildWriteRequest(writeRequestFixture.Timeseries, nil, nil) + buf, _, err := buildWriteRequest(writeRequestFixture.Timeseries, nil, nil, nil) require.NoError(t, err) req, err := http.NewRequest("", "", bytes.NewReader(buf)) @@ -160,7 +160,7 @@ func (m *mockAppendable) Appender(_ context.Context) storage.Appender { return m } -func (m *mockAppendable) Append(_ uint64, l labels.Labels, t int64, v float64) (uint64, error) { +func (m *mockAppendable) Append(_ storage.SeriesRef, l labels.Labels, t int64, v float64) (storage.SeriesRef, error) { if t < m.latestSample { return 0, storage.ErrOutOfOrderSample } @@ -178,7 +178,7 @@ func (*mockAppendable) Rollback() error { return fmt.Errorf("not implemented") } -func (m *mockAppendable) AppendExemplar(_ uint64, l labels.Labels, e exemplar.Exemplar) (uint64, error) { +func (m *mockAppendable) AppendExemplar(_ storage.SeriesRef, l labels.Labels, e exemplar.Exemplar) (storage.SeriesRef, error) { if e.Ts < m.latestExemplar { return 0, storage.ErrOutOfOrderExemplar } @@ -188,7 +188,7 @@ func (m *mockAppendable) AppendExemplar(_ uint64, l labels.Labels, e exemplar.Ex return 0, nil } -func (*mockAppendable) AppendHistogram(ref uint64, l labels.Labels, t int64, h *histogram.Histogram) (uint64, error) { +func (*mockAppendable) AppendHistogram(ref storage.SeriesRef, l labels.Labels, t int64, h *histogram.Histogram) (storage.SeriesRef, error) { // TODO(beorn7): Noop until we implement sparse histograms over remote write. return 0, nil } diff --git a/storage/remote/write_test.go b/storage/remote/write_test.go index 27c568e1c..f2545337b 100644 --- a/storage/remote/write_test.go +++ b/storage/remote/write_test.go @@ -26,19 +26,21 @@ import ( "github.com/stretchr/testify/require" "github.com/prometheus/prometheus/config" - "github.com/prometheus/prometheus/pkg/labels" - "github.com/prometheus/prometheus/pkg/relabel" + "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/model/relabel" ) -var cfg = config.RemoteWriteConfig{ - Name: "dev", - URL: &common_config.URL{ - URL: &url.URL{ - Scheme: "http", - Host: "localhost", +func testRemoteWriteConfig() *config.RemoteWriteConfig { + return &config.RemoteWriteConfig{ + Name: "dev", + URL: &common_config.URL{ + URL: &url.URL{ + Scheme: "http", + Host: "localhost", + }, }, - }, - QueueConfig: config.DefaultQueueConfig, + QueueConfig: config.DefaultQueueConfig, + } } func TestNoDuplicateWriteConfigs(t *testing.T) { @@ -136,14 +138,17 @@ func TestRestartOnNameChange(t *testing.T) { require.NoError(t, os.RemoveAll(dir)) }() + cfg := testRemoteWriteConfig() + hash, err := toHash(cfg) require.NoError(t, err) s := NewWriteStorage(nil, nil, dir, time.Millisecond, nil) + conf := &config.Config{ GlobalConfig: config.DefaultGlobalConfig, RemoteWriteConfigs: []*config.RemoteWriteConfig{ - &cfg, + cfg, }, } require.NoError(t, s.ApplyConfig(conf)) @@ -218,7 +223,7 @@ func TestWriteStorageLifecycle(t *testing.T) { &config.DefaultRemoteWriteConfig, }, } - s.ApplyConfig(conf) + require.NoError(t, s.ApplyConfig(conf)) require.Equal(t, 1, len(s.queues)) err = s.Close() @@ -238,19 +243,19 @@ func TestUpdateExternalLabels(t *testing.T) { conf := &config.Config{ GlobalConfig: config.GlobalConfig{}, RemoteWriteConfigs: []*config.RemoteWriteConfig{ - &cfg, + testRemoteWriteConfig(), }, } hash, err := toHash(conf.RemoteWriteConfigs[0]) require.NoError(t, err) - s.ApplyConfig(conf) + require.NoError(t, s.ApplyConfig(conf)) require.Equal(t, 1, len(s.queues)) require.Equal(t, labels.Labels(nil), s.queues[hash].externalLabels) conf.GlobalConfig.ExternalLabels = externalLabels hash, err = toHash(conf.RemoteWriteConfigs[0]) require.NoError(t, err) - s.ApplyConfig(conf) + require.NoError(t, s.ApplyConfig(conf)) require.Equal(t, 1, len(s.queues)) require.Equal(t, externalLabels, s.queues[hash].externalLabels) @@ -270,22 +275,27 @@ func TestWriteStorageApplyConfigsIdempotent(t *testing.T) { conf := &config.Config{ GlobalConfig: config.GlobalConfig{}, RemoteWriteConfigs: []*config.RemoteWriteConfig{ - &config.DefaultRemoteWriteConfig, - }, - } - // We need to set URL's so that metric creation doesn't panic. - conf.RemoteWriteConfigs[0].URL = &common_config.URL{ - URL: &url.URL{ - Host: "http://test-storage.com", + { + RemoteTimeout: config.DefaultRemoteWriteConfig.RemoteTimeout, + QueueConfig: config.DefaultRemoteWriteConfig.QueueConfig, + MetadataConfig: config.DefaultRemoteWriteConfig.MetadataConfig, + HTTPClientConfig: config.DefaultRemoteWriteConfig.HTTPClientConfig, + // We need to set URL's so that metric creation doesn't panic. + URL: &common_config.URL{ + URL: &url.URL{ + Host: "http://test-storage.com", + }, + }, + }, }, } hash, err := toHash(conf.RemoteWriteConfigs[0]) require.NoError(t, err) - s.ApplyConfig(conf) + require.NoError(t, s.ApplyConfig(conf)) require.Equal(t, 1, len(s.queues)) - s.ApplyConfig(conf) + require.NoError(t, s.ApplyConfig(conf)) require.Equal(t, 1, len(s.queues)) _, hashExists := s.queues[hash] require.True(t, hashExists, "Queue pointer should have remained the same") @@ -391,7 +401,7 @@ func TestWriteStorageApplyConfigsPartialUpdate(t *testing.T) { GlobalConfig: config.GlobalConfig{}, RemoteWriteConfigs: []*config.RemoteWriteConfig{c1, c2}, } - s.ApplyConfig(conf) + require.NoError(t, s.ApplyConfig(conf)) require.Equal(t, 2, len(s.queues)) _, hashExists = s.queues[hashes[0]] diff --git a/storage/secondary.go b/storage/secondary.go index 64a83b5e7..d66a28617 100644 --- a/storage/secondary.go +++ b/storage/secondary.go @@ -16,7 +16,7 @@ package storage import ( "sync" - "github.com/prometheus/prometheus/pkg/labels" + "github.com/prometheus/prometheus/model/labels" ) // secondaryQuerier is a wrapper that allows a querier to be treated in a best effort manner. diff --git a/storage/series.go b/storage/series.go index cd3531efb..1c72df733 100644 --- a/storage/series.go +++ b/storage/series.go @@ -18,7 +18,7 @@ import ( "sort" "github.com/prometheus/prometheus/model/histogram" - "github.com/prometheus/prometheus/pkg/labels" + "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/tsdb/chunkenc" "github.com/prometheus/prometheus/tsdb/chunks" "github.com/prometheus/prometheus/tsdb/tsdbutil" diff --git a/template/template.go b/template/template.go index dca5aa432..e79de5ee6 100644 --- a/template/template.go +++ b/template/template.go @@ -87,7 +87,7 @@ func query(ctx context.Context, q string, ts time.Time, queryFn QueryFunc) (quer // promql.Vector is hard to work with in templates, so convert to // base data types. // TODO(fabxc): probably not true anymore after type rework. - var result = make(queryResult, len(vector)) + result := make(queryResult, len(vector)) for n, v := range vector { s := sample{ Value: v.V, @@ -104,6 +104,10 @@ func convertToFloat(i interface{}) (float64, error) { return v, nil case string: return strconv.ParseFloat(v, 64) + case int: + return float64(v), nil + case uint: + return float64(v), nil default: return 0, fmt.Errorf("can't convert %T to float", v) } @@ -244,10 +248,11 @@ func NewTemplateExpander( sign = "-" v = -v } - seconds := int64(v) % 60 - minutes := (int64(v) / 60) % 60 - hours := (int64(v) / 60 / 60) % 24 - days := int64(v) / 60 / 60 / 24 + duration := int64(v) + seconds := duration % 60 + minutes := (duration / 60) % 60 + hours := (duration / 60 / 60) % 24 + days := duration / 60 / 60 / 24 // For days to minutes, we display seconds as an integer. if days != 0 { return fmt.Sprintf("%s%dd %dh %dm %ds", sign, days, hours, minutes, seconds), nil @@ -286,7 +291,11 @@ func NewTemplateExpander( if math.IsNaN(v) || math.IsInf(v, 0) { return fmt.Sprintf("%.4g", v), nil } - t := model.TimeFromUnixNano(int64(v * 1e9)).Time().UTC() + timestamp := v * 1e9 + if timestamp > math.MaxInt64 || timestamp < math.MinInt64 { + return "", fmt.Errorf("%v cannot be represented as a nanoseconds timestamp since it overflows int64", v) + } + t := model.TimeFromUnixNano(int64(timestamp)).Time().UTC() return fmt.Sprint(t), nil }, "pathPrefix": func() string { @@ -295,13 +304,20 @@ func NewTemplateExpander( "externalURL": func() string { return externalURL.String() }, + "parseDuration": func(d string) (float64, error) { + v, err := model.ParseDuration(d) + if err != nil { + return 0, err + } + return float64(time.Duration(v)) / float64(time.Second), nil + }, }, options: options, } } // AlertTemplateData returns the interface to be used in expanding the template. -func AlertTemplateData(labels map[string]string, externalLabels map[string]string, externalURL string, value float64) interface{} { +func AlertTemplateData(labels, externalLabels map[string]string, externalURL string, value float64) interface{} { return struct { Labels map[string]string ExternalLabels map[string]string diff --git a/template/template_test.go b/template/template_test.go index 6c67789ad..f3af79712 100644 --- a/template/template_test.go +++ b/template/template_test.go @@ -22,7 +22,7 @@ import ( "github.com/stretchr/testify/require" - "github.com/prometheus/prometheus/pkg/labels" + "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/promql" ) @@ -87,7 +87,8 @@ func TestTemplateExpansion(t *testing.T) { { Metric: labels.FromStrings(labels.MetricName, "metric", "instance", "a"), Point: promql.Point{T: 0, V: 11}, - }}, + }, + }, output: "11", }, { @@ -98,7 +99,8 @@ func TestTemplateExpansion(t *testing.T) { { Metric: labels.FromStrings(labels.MetricName, "metric", "instance", "a"), Point: promql.Point{T: 0, V: 11}, - }}, + }, + }, output: "a", }, { @@ -108,7 +110,8 @@ func TestTemplateExpansion(t *testing.T) { { Metric: labels.FromStrings(labels.MetricName, "metric", "__value__", "a"), Point: promql.Point{T: 0, V: 11}, - }}, + }, + }, output: "a", }, { @@ -118,7 +121,8 @@ func TestTemplateExpansion(t *testing.T) { { Metric: labels.FromStrings(labels.MetricName, "metric", "instance", "a"), Point: promql.Point{T: 0, V: 11}, - }}, + }, + }, output: "", }, { @@ -128,7 +132,8 @@ func TestTemplateExpansion(t *testing.T) { { Metric: labels.FromStrings(labels.MetricName, "metric", "instance", "a"), Point: promql.Point{T: 0, V: 11}, - }}, + }, + }, output: "", }, { @@ -137,7 +142,8 @@ func TestTemplateExpansion(t *testing.T) { { Metric: labels.FromStrings(labels.MetricName, "metric", "instance", "a"), Point: promql.Point{T: 0, V: 11}, - }}, + }, + }, output: "", html: true, }, @@ -151,7 +157,8 @@ func TestTemplateExpansion(t *testing.T) { }, { Metric: labels.FromStrings(labels.MetricName, "metric", "instance", "a"), Point: promql.Point{T: 0, V: 11}, - }}, + }, + }, output: "a:11: b:21: ", }, { @@ -236,6 +243,18 @@ func TestTemplateExpansion(t *testing.T) { shouldFail: true, errorMsg: `error executing template test: template: test:1:3: executing "test" at : error calling humanize: strconv.ParseFloat: parsing "one": invalid syntax`, }, + { + // Humanize - int. + text: "{{ range . }}{{ humanize . }}:{{ end }}", + input: []int{0, -1, 1, 1234567, math.MaxInt64}, + output: "0:-1:1:1.235M:9.223E:", + }, + { + // Humanize - uint. + text: "{{ range . }}{{ humanize . }}:{{ end }}", + input: []uint{0, 1, 1234567, math.MaxUint64}, + output: "0:1:1.235M:18.45E:", + }, { // Humanize1024 - float64. text: "{{ range . }}{{ humanize1024 . }}:{{ end }}", @@ -254,6 +273,18 @@ func TestTemplateExpansion(t *testing.T) { shouldFail: true, errorMsg: `error executing template test: template: test:1:3: executing "test" at : error calling humanize1024: strconv.ParseFloat: parsing "one": invalid syntax`, }, + { + // Humanize1024 - int. + text: "{{ range . }}{{ humanize1024 . }}:{{ end }}", + input: []int{0, -1, 1, 1234567, math.MaxInt64}, + output: "0:-1:1:1.177Mi:8Ei:", + }, + { + // Humanize1024 - uint. + text: "{{ range . }}{{ humanize1024 . }}:{{ end }}", + input: []uint{0, 1, 1234567, math.MaxUint64}, + output: "0:1:1.177Mi:16Ei:", + }, { // HumanizeDuration - seconds - float64. text: "{{ range . }}{{ humanizeDuration . }}:{{ end }}", @@ -284,6 +315,18 @@ func TestTemplateExpansion(t *testing.T) { shouldFail: true, errorMsg: `error executing template test: template: test:1:3: executing "test" at : error calling humanizeDuration: strconv.ParseFloat: parsing "one": invalid syntax`, }, + { + // HumanizeDuration - int. + text: "{{ range . }}{{ humanizeDuration . }}:{{ end }}", + input: []int{0, -1, 1, 1234567, math.MaxInt64}, + output: "0s:-1s:1s:14d 6h 56m 7s:-106751991167300d -15h -30m -8s:", + }, + { + // HumanizeDuration - uint. + text: "{{ range . }}{{ humanizeDuration . }}:{{ end }}", + input: []uint{0, 1, 1234567, math.MaxUint64}, + output: "0s:1s:14d 6h 56m 7s:-106751991167300d -15h -30m -8s:", + }, { // Humanize* Inf and NaN - float64. text: "{{ range . }}{{ humanize . }}:{{ humanize1024 . }}:{{ humanizeDuration . }}:{{humanizeTimestamp .}}:{{ end }}", @@ -301,6 +344,18 @@ func TestTemplateExpansion(t *testing.T) { text: "{{ -0.22222 | humanizePercentage }}:{{ 0.0 | humanizePercentage }}:{{ 0.1234567 | humanizePercentage }}:{{ 1.23456 | humanizePercentage }}", output: "-22.22%:0%:12.35%:123.5%", }, + { + // HumanizePercentage - int. + text: "{{ range . }}{{ humanizePercentage . }}:{{ end }}", + input: []int{0, -1, 1, 1234567, math.MaxInt64}, + output: "0%:-100%:100%:1.235e+08%:9.223e+20%:", + }, + { + // HumanizePercentage - uint. + text: "{{ range . }}{{ humanizePercentage . }}:{{ end }}", + input: []uint{0, 1, 1234567, math.MaxUint64}, + output: "0%:100%:1.235e+08%:1.845e+21%:", + }, { // HumanizePercentage - model.SampleValue input - string. text: `{{ "-0.22222" | humanizePercentage }}:{{ "0.0" | humanizePercentage }}:{{ "0.1234567" | humanizePercentage }}:{{ "1.23456" | humanizePercentage }}`, @@ -312,6 +367,32 @@ func TestTemplateExpansion(t *testing.T) { shouldFail: true, errorMsg: `error executing template test: template: test:1:11: executing "test" at : error calling humanizePercentage: strconv.ParseFloat: parsing "one": invalid syntax`, }, + { + // HumanizeTimestamp - int. + text: "{{ range . }}{{ humanizeTimestamp . }}:{{ end }}", + input: []int{0, -1, 1, 1234567, 9223372036}, + output: "1970-01-01 00:00:00 +0000 UTC:1969-12-31 23:59:59 +0000 UTC:1970-01-01 00:00:01 +0000 UTC:1970-01-15 06:56:07 +0000 UTC:2262-04-11 23:47:16 +0000 UTC:", + }, + { + // HumanizeTimestamp - uint. + text: "{{ range . }}{{ humanizeTimestamp . }}:{{ end }}", + input: []uint{0, 1, 1234567, 9223372036}, + output: "1970-01-01 00:00:00 +0000 UTC:1970-01-01 00:00:01 +0000 UTC:1970-01-15 06:56:07 +0000 UTC:2262-04-11 23:47:16 +0000 UTC:", + }, + { + // HumanizeTimestamp - int with error. + text: "{{ range . }}{{ humanizeTimestamp . }}:{{ end }}", + input: []int{math.MinInt64, math.MaxInt64}, + shouldFail: true, + errorMsg: `error executing template test: template: test:1:16: executing "test" at : error calling humanizeTimestamp: -9.223372036854776e+18 cannot be represented as a nanoseconds timestamp since it overflows int64`, + }, + { + // HumanizeTimestamp - uint with error. + text: "{{ range . }}{{ humanizeTimestamp . }}:{{ end }}", + input: []uint{math.MaxUint64}, + shouldFail: true, + errorMsg: `error executing template test: template: test:1:16: executing "test" at : error calling humanizeTimestamp: 1.8446744073709552e+19 cannot be represented as a nanoseconds timestamp since it overflows int64`, + }, { // HumanizeTimestamp - model.SampleValue input - float64. text: "{{ 1435065584.128 | humanizeTimestamp }}", @@ -368,6 +449,11 @@ func TestTemplateExpansion(t *testing.T) { text: "{{ externalURL }}", output: "http://testhost:9090/path/prefix", }, + { + // parseDuration (using printf to ensure the return is a string). + text: "{{ printf \"%0.2f\" (parseDuration \"1h2m10ms\") }}", + output: "3720.01", + }, } extURL, err := url.Parse("http://testhost:9090/path/prefix") diff --git a/tsdb/README.md b/tsdb/README.md index 59f800c7a..a8dfdc540 100644 --- a/tsdb/README.md +++ b/tsdb/README.md @@ -1,16 +1,25 @@ # TSDB -[![GoPkg](https://pkg.go.dev/badge/github.com/prometheus/prometheus/tsdb.svg)](https://pkg.go.dev/github.com/prometheus/prometheus/tsdb) +[![GoPkg](https://pkg.go.dev/badge/github.com/prometheus/prometheus/tsdb.svg)](https://pkg.go.dev/github.com/prometheus/prometheus@v1.8.2-0.20211105201321-411021ada9ab/tsdb) -This directory contains the Prometheus storage layer that is used in its 2.x releases. +This directory contains the Prometheus TSDB (Time Series DataBase) library, +which handles storage and querying of all Prometheus v2 data. -A writeup of its design can be found [here](https://fabxc.org/blog/2017-04-10-writing-a-tsdb/). +Due to an issue with versioning, the "latest" docs shown on Godoc are outdated. +Instead you may use [the docs for v2.31.1](https://pkg.go.dev/github.com/prometheus/prometheus@v1.8.2-0.20211105201321-411021ada9ab) -Based on the Gorilla TSDB [white papers](http://www.vldb.org/pvldb/vol8/p1816-teller.pdf). +## Documentation -Video: [Storing 16 Bytes at Scale](https://youtu.be/b_pEevMAC3I) from [PromCon 2017](https://promcon.io/2017-munich/). +* [Data format](docs/format/README.md). +* [Usage](docs/usage.md). +* [Bstream details](docs/bstream.md). + +## External resources + +* A writeup of the original design can be found [here](https://fabxc.org/blog/2017-04-10-writing-a-tsdb/). +* Video: [Storing 16 Bytes at Scale](https://youtu.be/b_pEevMAC3I) from [PromCon 2017](https://promcon.io/2017-munich/). +* Compression is based on the Gorilla TSDB [white paper](http://www.vldb.org/pvldb/vol8/p1816-teller.pdf). -See also the [format documentation](docs/format/README.md). A series of blog posts explaining different components of TSDB: * [The Head Block](https://ganeshvernekar.com/blog/prometheus-tsdb-the-head-block/) diff --git a/tsdb/agent/db.go b/tsdb/agent/db.go new file mode 100644 index 000000000..e4c30bfc7 --- /dev/null +++ b/tsdb/agent/db.go @@ -0,0 +1,786 @@ +// Copyright 2021 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package agent + +import ( + "context" + "fmt" + "path/filepath" + "sync" + "time" + + "github.com/go-kit/log" + "github.com/go-kit/log/level" + "github.com/pkg/errors" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/common/model" + "go.uber.org/atomic" + + "github.com/prometheus/prometheus/model/exemplar" + "github.com/prometheus/prometheus/model/histogram" + "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/model/timestamp" + "github.com/prometheus/prometheus/storage" + "github.com/prometheus/prometheus/storage/remote" + "github.com/prometheus/prometheus/tsdb" + "github.com/prometheus/prometheus/tsdb/chunks" + tsdb_errors "github.com/prometheus/prometheus/tsdb/errors" + "github.com/prometheus/prometheus/tsdb/record" + "github.com/prometheus/prometheus/tsdb/tsdbutil" + "github.com/prometheus/prometheus/tsdb/wal" +) + +var ErrUnsupported = errors.New("unsupported operation with WAL-only storage") + +// Default values for options. +var ( + DefaultTruncateFrequency = 2 * time.Hour + DefaultMinWALTime = int64(5 * time.Minute / time.Millisecond) + DefaultMaxWALTime = int64(4 * time.Hour / time.Millisecond) +) + +// Options of the WAL storage. +type Options struct { + // Segments (wal files) max size. + // WALSegmentSize <= 0, segment size is default size. + // WALSegmentSize > 0, segment size is WALSegmentSize. + WALSegmentSize int + + // WALCompression will turn on Snappy compression for records on the WAL. + WALCompression bool + + // StripeSize is the size (power of 2) in entries of the series hash map. Reducing the size will save memory but impact performance. + StripeSize int + + // TruncateFrequency determines how frequently to truncate data from the WAL. + TruncateFrequency time.Duration + + // Shortest and longest amount of time data can exist in the WAL before being + // deleted. + MinWALTime, MaxWALTime int64 + + // NoLockfile disables creation and consideration of a lock file. + NoLockfile bool +} + +// DefaultOptions used for the WAL storage. They are sane for setups using +// millisecond-precision timestamps. +func DefaultOptions() *Options { + return &Options{ + WALSegmentSize: wal.DefaultSegmentSize, + WALCompression: false, + StripeSize: tsdb.DefaultStripeSize, + TruncateFrequency: DefaultTruncateFrequency, + MinWALTime: DefaultMinWALTime, + MaxWALTime: DefaultMaxWALTime, + NoLockfile: false, + } +} + +type dbMetrics struct { + r prometheus.Registerer + + numActiveSeries prometheus.Gauge + numWALSeriesPendingDeletion prometheus.Gauge + totalAppendedSamples prometheus.Counter + walTruncateDuration prometheus.Summary + walCorruptionsTotal prometheus.Counter + walTotalReplayDuration prometheus.Gauge + checkpointDeleteFail prometheus.Counter + checkpointDeleteTotal prometheus.Counter + checkpointCreationFail prometheus.Counter + checkpointCreationTotal prometheus.Counter +} + +func newDBMetrics(r prometheus.Registerer) *dbMetrics { + m := dbMetrics{r: r} + m.numActiveSeries = prometheus.NewGauge(prometheus.GaugeOpts{ + Name: "prometheus_agent_active_series", + Help: "Number of active series being tracked by the WAL storage", + }) + + m.numWALSeriesPendingDeletion = prometheus.NewGauge(prometheus.GaugeOpts{ + Name: "prometheus_agent_deleted_series", + Help: "Number of series pending deletion from the WAL", + }) + + m.totalAppendedSamples = prometheus.NewCounter(prometheus.CounterOpts{ + Name: "prometheus_agent_samples_appended_total", + Help: "Total number of samples appended to the storage", + }) + + m.walTruncateDuration = prometheus.NewSummary(prometheus.SummaryOpts{ + Name: "prometheus_agent_truncate_duration_seconds", + Help: "Duration of WAL truncation.", + }) + + m.walCorruptionsTotal = prometheus.NewCounter(prometheus.CounterOpts{ + Name: "prometheus_agent_corruptions_total", + Help: "Total number of WAL corruptions.", + }) + + m.walTotalReplayDuration = prometheus.NewGauge(prometheus.GaugeOpts{ + Name: "prometheus_agent_data_replay_duration_seconds", + Help: "Time taken to replay the data on disk.", + }) + + m.checkpointDeleteFail = prometheus.NewCounter(prometheus.CounterOpts{ + Name: "prometheus_agent_checkpoint_deletions_failed_total", + Help: "Total number of checkpoint deletions that failed.", + }) + + m.checkpointDeleteTotal = prometheus.NewCounter(prometheus.CounterOpts{ + Name: "prometheus_agent_checkpoint_deletions_total", + Help: "Total number of checkpoint deletions attempted.", + }) + + m.checkpointCreationFail = prometheus.NewCounter(prometheus.CounterOpts{ + Name: "prometheus_agent_checkpoint_creations_failed_total", + Help: "Total number of checkpoint creations that failed.", + }) + + m.checkpointCreationTotal = prometheus.NewCounter(prometheus.CounterOpts{ + Name: "prometheus_agent_checkpoint_creations_total", + Help: "Total number of checkpoint creations attempted.", + }) + + if r != nil { + r.MustRegister( + m.numActiveSeries, + m.numWALSeriesPendingDeletion, + m.totalAppendedSamples, + m.walTruncateDuration, + m.walCorruptionsTotal, + m.walTotalReplayDuration, + m.checkpointDeleteFail, + m.checkpointDeleteTotal, + m.checkpointCreationFail, + m.checkpointCreationTotal, + ) + } + + return &m +} + +func (m *dbMetrics) Unregister() { + if m.r == nil { + return + } + cs := []prometheus.Collector{ + m.numActiveSeries, + m.numWALSeriesPendingDeletion, + m.totalAppendedSamples, + } + for _, c := range cs { + m.r.Unregister(c) + } +} + +// DB represents a WAL-only storage. It implements storage.DB. +type DB struct { + mtx sync.RWMutex + logger log.Logger + opts *Options + rs *remote.Storage + + wal *wal.WAL + locker *tsdbutil.DirLocker + + appenderPool sync.Pool + bufPool sync.Pool + + nextRef *atomic.Uint64 + series *stripeSeries + // deleted is a map of (ref IDs that should be deleted from WAL) to (the WAL segment they + // must be kept around to). + deleted map[chunks.HeadSeriesRef]int + + donec chan struct{} + stopc chan struct{} + + metrics *dbMetrics +} + +// Open returns a new agent.DB in the given directory. +func Open(l log.Logger, reg prometheus.Registerer, rs *remote.Storage, dir string, opts *Options) (*DB, error) { + opts = validateOptions(opts) + + locker, err := tsdbutil.NewDirLocker(dir, "agent", l, reg) + if err != nil { + return nil, err + } + if !opts.NoLockfile { + if err := locker.Lock(); err != nil { + return nil, err + } + } + + // remote_write expects WAL to be stored in a "wal" subdirectory of the main storage. + dir = filepath.Join(dir, "wal") + + w, err := wal.NewSize(l, reg, dir, opts.WALSegmentSize, opts.WALCompression) + if err != nil { + return nil, errors.Wrap(err, "creating WAL") + } + + db := &DB{ + logger: l, + opts: opts, + rs: rs, + + wal: w, + locker: locker, + + nextRef: atomic.NewUint64(0), + series: newStripeSeries(opts.StripeSize), + deleted: make(map[chunks.HeadSeriesRef]int), + + donec: make(chan struct{}), + stopc: make(chan struct{}), + + metrics: newDBMetrics(reg), + } + + db.bufPool.New = func() interface{} { + return make([]byte, 0, 1024) + } + + db.appenderPool.New = func() interface{} { + return &appender{ + DB: db, + pendingSeries: make([]record.RefSeries, 0, 100), + pendingSamples: make([]record.RefSample, 0, 100), + } + } + + if err := db.replayWAL(); err != nil { + level.Warn(db.logger).Log("msg", "encountered WAL read error, attempting repair", "err", err) + if err := w.Repair(err); err != nil { + return nil, errors.Wrap(err, "repair corrupted WAL") + } + } + + go db.run() + return db, nil +} + +func validateOptions(opts *Options) *Options { + if opts == nil { + opts = DefaultOptions() + } + if opts.WALSegmentSize <= 0 { + opts.WALSegmentSize = wal.DefaultSegmentSize + } + + // Revert Stripesize to DefaultStripsize if Stripsize is either 0 or not a power of 2. + if opts.StripeSize <= 0 || ((opts.StripeSize & (opts.StripeSize - 1)) != 0) { + opts.StripeSize = tsdb.DefaultStripeSize + } + if opts.TruncateFrequency <= 0 { + opts.TruncateFrequency = DefaultTruncateFrequency + } + if opts.MinWALTime <= 0 { + opts.MinWALTime = 0 + } + if opts.MaxWALTime <= 0 { + opts.MaxWALTime = DefaultMaxWALTime + } + + if t := int64(opts.TruncateFrequency * time.Hour / time.Millisecond); opts.MaxWALTime < t { + opts.MaxWALTime = t + } + return opts +} + +func (db *DB) replayWAL() error { + level.Info(db.logger).Log("msg", "replaying WAL, this may take a while", "dir", db.wal.Dir()) + start := time.Now() + + dir, startFrom, err := wal.LastCheckpoint(db.wal.Dir()) + if err != nil && err != record.ErrNotFound { + return errors.Wrap(err, "find last checkpoint") + } + + multiRef := map[chunks.HeadSeriesRef]chunks.HeadSeriesRef{} + + if err == nil { + sr, err := wal.NewSegmentsReader(dir) + if err != nil { + return errors.Wrap(err, "open checkpoint") + } + defer func() { + if err := sr.Close(); err != nil { + level.Warn(db.logger).Log("msg", "error while closing the wal segments reader", "err", err) + } + }() + + // A corrupted checkpoint is a hard error for now and requires user + // intervention. There's likely little data that can be recovered anyway. + if err := db.loadWAL(wal.NewReader(sr), multiRef); err != nil { + return errors.Wrap(err, "backfill checkpoint") + } + startFrom++ + level.Info(db.logger).Log("msg", "WAL checkpoint loaded") + } + + // Find the last segment. + _, last, err := wal.Segments(db.wal.Dir()) + if err != nil { + return errors.Wrap(err, "finding WAL segments") + } + + // Backfil segments from the most recent checkpoint onwards. + for i := startFrom; i <= last; i++ { + seg, err := wal.OpenReadSegment(wal.SegmentName(db.wal.Dir(), i)) + if err != nil { + return errors.Wrap(err, fmt.Sprintf("open WAL segment: %d", i)) + } + + sr := wal.NewSegmentBufReader(seg) + err = db.loadWAL(wal.NewReader(sr), multiRef) + if err := sr.Close(); err != nil { + level.Warn(db.logger).Log("msg", "error while closing the wal segments reader", "err", err) + } + if err != nil { + return err + } + level.Info(db.logger).Log("msg", "WAL segment loaded", "segment", i, "maxSegment", last) + } + + walReplayDuration := time.Since(start) + db.metrics.walTotalReplayDuration.Set(walReplayDuration.Seconds()) + + return nil +} + +func (db *DB) loadWAL(r *wal.Reader, multiRef map[chunks.HeadSeriesRef]chunks.HeadSeriesRef) (err error) { + var ( + dec record.Decoder + lastRef chunks.HeadSeriesRef + + decoded = make(chan interface{}, 10) + errCh = make(chan error, 1) + seriesPool = sync.Pool{ + New: func() interface{} { + return []record.RefSeries{} + }, + } + samplesPool = sync.Pool{ + New: func() interface{} { + return []record.RefSample{} + }, + } + ) + + go func() { + defer close(decoded) + for r.Next() { + rec := r.Record() + switch dec.Type(rec) { + case record.Series: + series := seriesPool.Get().([]record.RefSeries)[:0] + series, err = dec.Series(rec, series) + if err != nil { + errCh <- &wal.CorruptionErr{ + Err: errors.Wrap(err, "decode series"), + Segment: r.Segment(), + Offset: r.Offset(), + } + return + } + decoded <- series + case record.Samples: + samples := samplesPool.Get().([]record.RefSample)[:0] + samples, err = dec.Samples(rec, samples) + if err != nil { + errCh <- &wal.CorruptionErr{ + Err: errors.Wrap(err, "decode samples"), + Segment: r.Segment(), + Offset: r.Offset(), + } + return + } + decoded <- samples + case record.Tombstones: + // We don't care about tombstones + continue + case record.Exemplars: + // We don't care about exemplars + continue + default: + errCh <- &wal.CorruptionErr{ + Err: errors.Errorf("invalid record type %v", dec.Type(rec)), + Segment: r.Segment(), + Offset: r.Offset(), + } + } + } + }() + + var nonExistentSeriesRefs atomic.Uint64 + + for d := range decoded { + switch v := d.(type) { + case []record.RefSeries: + for _, entry := range v { + // If this is a new series, create it in memory. If we never read in a + // sample for this series, its timestamp will remain at 0 and it will + // be deleted at the next GC. + if db.series.GetByID(entry.Ref) == nil { + series := &memSeries{ref: entry.Ref, lset: entry.Labels, lastTs: 0} + db.series.Set(entry.Labels.Hash(), series) + multiRef[entry.Ref] = series.ref + db.metrics.numActiveSeries.Inc() + if entry.Ref > lastRef { + lastRef = entry.Ref + } + } + } + + //nolint:staticcheck + seriesPool.Put(v) + case []record.RefSample: + for _, entry := range v { + // Update the lastTs for the series based + ref, ok := multiRef[entry.Ref] + if !ok { + nonExistentSeriesRefs.Inc() + continue + } + series := db.series.GetByID(ref) + if entry.T > series.lastTs { + series.lastTs = entry.T + } + } + + //nolint:staticcheck + samplesPool.Put(v) + default: + panic(fmt.Errorf("unexpected decoded type: %T", d)) + } + } + + if v := nonExistentSeriesRefs.Load(); v > 0 { + level.Warn(db.logger).Log("msg", "found sample referencing non-existing series", "skipped_series", v) + } + + db.nextRef.Store(uint64(lastRef)) + + select { + case err := <-errCh: + return err + default: + if r.Err() != nil { + return errors.Wrap(r.Err(), "read records") + } + return nil + } +} + +func (db *DB) run() { + defer close(db.donec) + +Loop: + for { + select { + case <-db.stopc: + break Loop + case <-time.After(db.opts.TruncateFrequency): + // The timestamp ts is used to determine which series are not receiving + // samples and may be deleted from the WAL. Their most recent append + // timestamp is compared to ts, and if that timestamp is older then ts, + // they are considered inactive and may be deleted. + // + // Subtracting a duration from ts will add a buffer for when series are + // considered inactive and safe for deletion. + ts := db.rs.LowestSentTimestamp() - db.opts.MinWALTime + if ts < 0 { + ts = 0 + } + + // Network issues can prevent the result of getRemoteWriteTimestamp from + // changing. We don't want data in the WAL to grow forever, so we set a cap + // on the maximum age data can be. If our ts is older than this cutoff point, + // we'll shift it forward to start deleting very stale data. + if maxTS := timestamp.FromTime(time.Now()) - db.opts.MaxWALTime; ts < maxTS { + ts = maxTS + } + + level.Debug(db.logger).Log("msg", "truncating the WAL", "ts", ts) + if err := db.truncate(ts); err != nil { + level.Warn(db.logger).Log("msg", "failed to truncate WAL", "err", err) + } + } + } +} + +func (db *DB) truncate(mint int64) error { + db.mtx.RLock() + defer db.mtx.RUnlock() + + start := time.Now() + + db.gc(mint) + level.Info(db.logger).Log("msg", "series GC completed", "duration", time.Since(start)) + + first, last, err := wal.Segments(db.wal.Dir()) + if err != nil { + return errors.Wrap(err, "get segment range") + } + + // Start a new segment so low ingestion volume instances don't have more WAL + // than needed. + err = db.wal.NextSegment() + if err != nil { + return errors.Wrap(err, "next segment") + } + + last-- // Never consider most recent segment for checkpoint + if last < 0 { + return nil // no segments yet + } + + // The lower two-thirds of segments should contain mostly obsolete samples. + // If we have less than two segments, it's not worth checkpointing yet. + last = first + (last-first)*2/3 + if last <= first { + return nil + } + + keep := func(id chunks.HeadSeriesRef) bool { + if db.series.GetByID(id) != nil { + return true + } + + seg, ok := db.deleted[id] + return ok && seg >= first + } + + db.metrics.checkpointCreationTotal.Inc() + + if _, err = wal.Checkpoint(db.logger, db.wal, first, last, keep, mint); err != nil { + db.metrics.checkpointCreationFail.Inc() + if _, ok := errors.Cause(err).(*wal.CorruptionErr); ok { + db.metrics.walCorruptionsTotal.Inc() + } + return errors.Wrap(err, "create checkpoint") + } + if err := db.wal.Truncate(last + 1); err != nil { + // If truncating fails, we'll just try it again at the next checkpoint. + // Leftover segments will still just be ignored in the future if there's a + // checkpoint that supersedes them. + level.Error(db.logger).Log("msg", "truncating segments failed", "err", err) + } + + // The checkpoint is written and segments before it are truncated, so we + // no longer need to track deleted series that were being kept around. + for ref, segment := range db.deleted { + if segment < first { + delete(db.deleted, ref) + } + } + db.metrics.checkpointDeleteTotal.Inc() + db.metrics.numWALSeriesPendingDeletion.Set(float64(len(db.deleted))) + + if err := wal.DeleteCheckpoints(db.wal.Dir(), last); err != nil { + // Leftover old checkpoints do not cause problems down the line beyond + // occupying disk space. They will just be ignored since a newer checkpoint + // exists. + level.Error(db.logger).Log("msg", "delete old checkpoints", "err", err) + db.metrics.checkpointDeleteFail.Inc() + } + + db.metrics.walTruncateDuration.Observe(time.Since(start).Seconds()) + + level.Info(db.logger).Log("msg", "WAL checkpoint complete", "first", first, "last", last, "duration", time.Since(start)) + return nil +} + +// gc marks ref IDs that have not received a sample since mint as deleted in +// s.deleted, along with the segment where they originally got deleted. +func (db *DB) gc(mint int64) { + deleted := db.series.GC(mint) + db.metrics.numActiveSeries.Sub(float64(len(deleted))) + + _, last, _ := wal.Segments(db.wal.Dir()) + + // We want to keep series records for any newly deleted series + // until we've passed the last recorded segment. This prevents + // the WAL having samples for series records that no longer exist. + for ref := range deleted { + db.deleted[ref] = last + } + + db.metrics.numWALSeriesPendingDeletion.Set(float64(len(db.deleted))) +} + +// StartTime implements the Storage interface. +func (db *DB) StartTime() (int64, error) { + return int64(model.Latest), nil +} + +// Querier implements the Storage interface. +func (db *DB) Querier(ctx context.Context, mint, maxt int64) (storage.Querier, error) { + return nil, ErrUnsupported +} + +// ChunkQuerier implements the Storage interface. +func (db *DB) ChunkQuerier(ctx context.Context, mint, maxt int64) (storage.ChunkQuerier, error) { + return nil, ErrUnsupported +} + +// ExemplarQuerier implements the Storage interface. +func (db *DB) ExemplarQuerier(ctx context.Context) (storage.ExemplarQuerier, error) { + return nil, ErrUnsupported +} + +// Appender implements storage.Storage. +func (db *DB) Appender(_ context.Context) storage.Appender { + return db.appenderPool.Get().(storage.Appender) +} + +// Close implements the Storage interface. +func (db *DB) Close() error { + db.mtx.Lock() + defer db.mtx.Unlock() + + close(db.stopc) + <-db.donec + + db.metrics.Unregister() + + return tsdb_errors.NewMulti(db.locker.Release(), db.wal.Close()).Err() +} + +type appender struct { + *DB + + pendingSeries []record.RefSeries + pendingSamples []record.RefSample +} + +func (a *appender) Append(ref storage.SeriesRef, l labels.Labels, t int64, v float64) (storage.SeriesRef, error) { + if ref == 0 { + r, err := a.Add(l, t, v) + return storage.SeriesRef(r), err + } + return ref, a.AddFast(chunks.HeadSeriesRef(ref), t, v) +} + +func (a *appender) Add(l labels.Labels, t int64, v float64) (chunks.HeadSeriesRef, error) { + hash := l.Hash() + series := a.series.GetByHash(hash, l) + if series != nil { + return series.ref, a.AddFast(series.ref, t, v) + } + + // Ensure no empty or duplicate labels have gotten through. This mirrors the + // equivalent validation code in the TSDB's headAppender. + l = l.WithoutEmpty() + if len(l) == 0 { + return 0, errors.Wrap(tsdb.ErrInvalidSample, "empty labelset") + } + + if lbl, dup := l.HasDuplicateLabelNames(); dup { + return 0, errors.Wrap(tsdb.ErrInvalidSample, fmt.Sprintf(`label name "%s" is not unique`, lbl)) + } + + ref := chunks.HeadSeriesRef(a.nextRef.Inc()) + series = &memSeries{ref: ref, lset: l, lastTs: t} + + a.pendingSeries = append(a.pendingSeries, record.RefSeries{ + Ref: ref, + Labels: l, + }) + a.pendingSamples = append(a.pendingSamples, record.RefSample{ + Ref: ref, + T: t, + V: v, + }) + + a.series.Set(hash, series) + + a.metrics.numActiveSeries.Inc() + a.metrics.totalAppendedSamples.Inc() + + return series.ref, nil +} + +func (a *appender) AddFast(ref chunks.HeadSeriesRef, t int64, v float64) error { + series := a.series.GetByID(ref) + if series == nil { + return storage.ErrNotFound + } + series.Lock() + defer series.Unlock() + + // Update last recorded timestamp. Used by Storage.gc to determine if a + // series is dead. + series.lastTs = t + + a.pendingSamples = append(a.pendingSamples, record.RefSample{ + Ref: ref, + T: t, + V: v, + }) + + a.metrics.totalAppendedSamples.Inc() + return nil +} + +func (a *appender) AppendExemplar(ref storage.SeriesRef, l labels.Labels, e exemplar.Exemplar) (storage.SeriesRef, error) { + // remote_write doesn't support exemplars yet, so do nothing here. + return 0, nil +} + +func (a *appender) AppendHistogram(ref storage.SeriesRef, l labels.Labels, t int64, h *histogram.Histogram) (storage.SeriesRef, error) { + // remote_write doesn't support histograms yet, so do nothing here. + return 0, nil +} + +// Commit submits the collected samples and purges the batch. +func (a *appender) Commit() error { + a.mtx.RLock() + defer a.mtx.RUnlock() + + var encoder record.Encoder + buf := a.bufPool.Get().([]byte) + + if len(a.pendingSeries) > 0 { + buf = encoder.Series(a.pendingSeries, buf) + if err := a.wal.Log(buf); err != nil { + return err + } + buf = buf[:0] + } + + if len(a.pendingSamples) > 0 { + buf = encoder.Samples(a.pendingSamples, buf) + if err := a.wal.Log(buf); err != nil { + return err + } + buf = buf[:0] + } + + //nolint:staticcheck + a.bufPool.Put(buf) + return a.Rollback() +} + +func (a *appender) Rollback() error { + a.pendingSeries = a.pendingSeries[:0] + a.pendingSamples = a.pendingSamples[:0] + a.appenderPool.Put(a) + return nil +} diff --git a/tsdb/agent/db_test.go b/tsdb/agent/db_test.go new file mode 100644 index 000000000..4a196180d --- /dev/null +++ b/tsdb/agent/db_test.go @@ -0,0 +1,479 @@ +// Copyright 2021 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package agent + +import ( + "context" + "strconv" + "sync" + "testing" + "time" + + "github.com/go-kit/log" + + "github.com/prometheus/client_golang/prometheus" + dto "github.com/prometheus/client_model/go" + "github.com/stretchr/testify/require" + + "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/storage/remote" + "github.com/prometheus/prometheus/tsdb/record" + "github.com/prometheus/prometheus/tsdb/tsdbutil" + "github.com/prometheus/prometheus/tsdb/wal" + "github.com/prometheus/prometheus/util/testutil" +) + +func TestUnsupported(t *testing.T) { + promAgentDir := t.TempDir() + + opts := DefaultOptions() + logger := log.NewNopLogger() + + s, err := Open(logger, prometheus.NewRegistry(), nil, promAgentDir, opts) + require.NoError(t, err) + defer func() { + require.NoError(t, s.Close()) + }() + + t.Run("Querier", func(t *testing.T) { + _, err := s.Querier(context.TODO(), 0, 0) + require.Equal(t, err, ErrUnsupported) + }) + + t.Run("ChunkQuerier", func(t *testing.T) { + _, err := s.ChunkQuerier(context.TODO(), 0, 0) + require.Equal(t, err, ErrUnsupported) + }) + + t.Run("ExemplarQuerier", func(t *testing.T) { + _, err := s.ExemplarQuerier(context.TODO()) + require.Equal(t, err, ErrUnsupported) + }) +} + +func TestCommit(t *testing.T) { + const ( + numDatapoints = 1000 + numSeries = 8 + ) + + promAgentDir := t.TempDir() + + lbls := labelsForTest(t.Name(), numSeries) + opts := DefaultOptions() + logger := log.NewNopLogger() + reg := prometheus.NewRegistry() + remoteStorage := remote.NewStorage(log.With(logger, "component", "remote"), reg, startTime, promAgentDir, time.Second*30, nil) + defer func(rs *remote.Storage) { + require.NoError(t, rs.Close()) + }(remoteStorage) + + s, err := Open(logger, reg, remoteStorage, promAgentDir, opts) + require.NoError(t, err) + + a := s.Appender(context.TODO()) + + for _, l := range lbls { + lset := labels.New(l...) + + for i := 0; i < numDatapoints; i++ { + sample := tsdbutil.GenerateSamples(0, 1) + _, err := a.Append(0, lset, sample[0].T(), sample[0].V()) + require.NoError(t, err) + } + } + + require.NoError(t, a.Commit()) + require.NoError(t, s.Close()) + + // Read records from WAL and check for expected count of series and samples. + walSeriesCount := 0 + walSamplesCount := 0 + + reg = prometheus.NewRegistry() + remoteStorage = remote.NewStorage(log.With(logger, "component", "remote"), reg, startTime, promAgentDir, time.Second*30, nil) + defer func() { + require.NoError(t, remoteStorage.Close()) + }() + + s1, err := Open(logger, nil, remoteStorage, promAgentDir, opts) + require.NoError(t, err) + defer func() { + require.NoError(t, s1.Close()) + }() + + var dec record.Decoder + + if err == nil { + sr, err := wal.NewSegmentsReader(s1.wal.Dir()) + require.NoError(t, err) + defer func() { + require.NoError(t, sr.Close()) + }() + + r := wal.NewReader(sr) + seriesPool := sync.Pool{ + New: func() interface{} { + return []record.RefSeries{} + }, + } + samplesPool := sync.Pool{ + New: func() interface{} { + return []record.RefSample{} + }, + } + + for r.Next() { + rec := r.Record() + switch dec.Type(rec) { + case record.Series: + series := seriesPool.Get().([]record.RefSeries)[:0] + series, _ = dec.Series(rec, series) + walSeriesCount += len(series) + case record.Samples: + samples := samplesPool.Get().([]record.RefSample)[:0] + samples, _ = dec.Samples(rec, samples) + walSamplesCount += len(samples) + default: + } + } + } + + // Retrieved series count from WAL should match the count of series been added to the WAL. + require.Equal(t, walSeriesCount, numSeries) + + // Retrieved samples count from WAL should match the count of samples been added to the WAL. + require.Equal(t, walSamplesCount, numSeries*numDatapoints) +} + +func TestRollback(t *testing.T) { + const ( + numDatapoints = 1000 + numSeries = 8 + ) + + promAgentDir := t.TempDir() + + lbls := labelsForTest(t.Name(), numSeries) + opts := DefaultOptions() + logger := log.NewNopLogger() + reg := prometheus.NewRegistry() + remoteStorage := remote.NewStorage(log.With(logger, "component", "remote"), reg, startTime, promAgentDir, time.Second*30, nil) + defer func(rs *remote.Storage) { + require.NoError(t, rs.Close()) + }(remoteStorage) + + s, err := Open(logger, reg, remoteStorage, promAgentDir, opts) + require.NoError(t, err) + + a := s.Appender(context.TODO()) + + for _, l := range lbls { + lset := labels.New(l...) + + for i := 0; i < numDatapoints; i++ { + sample := tsdbutil.GenerateSamples(0, 1) + _, err := a.Append(0, lset, sample[0].T(), sample[0].V()) + require.NoError(t, err) + } + } + + require.NoError(t, a.Rollback()) + require.NoError(t, s.Close()) + + // Read records from WAL and check for expected count of series and samples. + walSeriesCount := 0 + walSamplesCount := 0 + + reg = prometheus.NewRegistry() + remoteStorage = remote.NewStorage(log.With(logger, "component", "remote"), reg, startTime, promAgentDir, time.Second*30, nil) + defer func() { + require.NoError(t, remoteStorage.Close()) + }() + + s1, err := Open(logger, nil, remoteStorage, promAgentDir, opts) + require.NoError(t, err) + defer func() { + require.NoError(t, s1.Close()) + }() + + var dec record.Decoder + + if err == nil { + sr, err := wal.NewSegmentsReader(s1.wal.Dir()) + require.NoError(t, err) + defer func() { + require.NoError(t, sr.Close()) + }() + + r := wal.NewReader(sr) + seriesPool := sync.Pool{ + New: func() interface{} { + return []record.RefSeries{} + }, + } + samplesPool := sync.Pool{ + New: func() interface{} { + return []record.RefSample{} + }, + } + + for r.Next() { + rec := r.Record() + switch dec.Type(rec) { + case record.Series: + series := seriesPool.Get().([]record.RefSeries)[:0] + series, _ = dec.Series(rec, series) + walSeriesCount += len(series) + case record.Samples: + samples := samplesPool.Get().([]record.RefSample)[:0] + samples, _ = dec.Samples(rec, samples) + walSamplesCount += len(samples) + default: + } + } + } + + // Retrieved series count from WAL should be zero. + require.Equal(t, walSeriesCount, 0) + + // Retrieved samples count from WAL should be zero. + require.Equal(t, walSamplesCount, 0) +} + +func TestFullTruncateWAL(t *testing.T) { + const ( + numDatapoints = 1000 + numSeries = 800 + lastTs = 500 + ) + + promAgentDir := t.TempDir() + + lbls := labelsForTest(t.Name(), numSeries) + opts := DefaultOptions() + opts.TruncateFrequency = time.Minute * 2 + logger := log.NewNopLogger() + reg := prometheus.NewRegistry() + remoteStorage := remote.NewStorage(log.With(logger, "component", "remote"), reg, startTime, promAgentDir, time.Second*30, nil) + defer func() { + require.NoError(t, remoteStorage.Close()) + }() + + s, err := Open(logger, reg, remoteStorage, promAgentDir, opts) + require.NoError(t, err) + defer func() { + require.NoError(t, s.Close()) + }() + + a := s.Appender(context.TODO()) + + for _, l := range lbls { + lset := labels.New(l...) + + for i := 0; i < numDatapoints; i++ { + _, err := a.Append(0, lset, int64(lastTs), 0) + require.NoError(t, err) + } + require.NoError(t, a.Commit()) + } + + // Truncate WAL with mint to GC all the samples. + s.truncate(lastTs + 1) + + m := gatherFamily(t, reg, "prometheus_agent_deleted_series") + require.Equal(t, float64(numSeries), m.Metric[0].Gauge.GetValue(), "agent wal truncate mismatch of deleted series count") +} + +func TestPartialTruncateWAL(t *testing.T) { + const ( + numDatapoints = 1000 + numSeries = 800 + ) + + promAgentDir := t.TempDir() + + opts := DefaultOptions() + opts.TruncateFrequency = time.Minute * 2 + logger := log.NewNopLogger() + reg := prometheus.NewRegistry() + remoteStorage := remote.NewStorage(log.With(logger, "component", "remote"), reg, startTime, promAgentDir, time.Second*30, nil) + defer func() { + require.NoError(t, remoteStorage.Close()) + }() + + s, err := Open(logger, reg, remoteStorage, promAgentDir, opts) + require.NoError(t, err) + defer func() { + require.NoError(t, s.Close()) + }() + + a := s.Appender(context.TODO()) + + var lastTs int64 + + // Create first batch of 800 series with 1000 data-points with a fixed lastTs as 500. + lastTs = 500 + lbls := labelsForTest(t.Name()+"batch-1", numSeries) + for _, l := range lbls { + lset := labels.New(l...) + + for i := 0; i < numDatapoints; i++ { + _, err := a.Append(0, lset, lastTs, 0) + require.NoError(t, err) + } + require.NoError(t, a.Commit()) + } + + // Create second batch of 800 series with 1000 data-points with a fixed lastTs as 600. + lastTs = 600 + + lbls = labelsForTest(t.Name()+"batch-2", numSeries) + for _, l := range lbls { + lset := labels.New(l...) + + for i := 0; i < numDatapoints; i++ { + _, err := a.Append(0, lset, lastTs, 0) + require.NoError(t, err) + } + require.NoError(t, a.Commit()) + } + + // Truncate WAL with mint to GC only the first batch of 800 series and retaining 2nd batch of 800 series. + s.truncate(lastTs - 1) + + m := gatherFamily(t, reg, "prometheus_agent_deleted_series") + require.Equal(t, m.Metric[0].Gauge.GetValue(), float64(numSeries), "agent wal truncate mismatch of deleted series count") +} + +func TestWALReplay(t *testing.T) { + const ( + numDatapoints = 1000 + numSeries = 8 + lastTs = 500 + ) + + promAgentDir := t.TempDir() + + lbls := labelsForTest(t.Name(), numSeries) + opts := DefaultOptions() + + logger := log.NewNopLogger() + reg := prometheus.NewRegistry() + remoteStorage := remote.NewStorage(log.With(logger, "component", "remote"), reg, startTime, promAgentDir, time.Second*30, nil) + defer func() { + require.NoError(t, remoteStorage.Close()) + }() + + s, err := Open(logger, reg, remoteStorage, promAgentDir, opts) + require.NoError(t, err) + + a := s.Appender(context.TODO()) + + for _, l := range lbls { + lset := labels.New(l...) + + for i := 0; i < numDatapoints; i++ { + _, err := a.Append(0, lset, lastTs, 0) + require.NoError(t, err) + } + } + + require.NoError(t, a.Commit()) + require.NoError(t, s.Close()) + + restartOpts := DefaultOptions() + restartLogger := log.NewNopLogger() + restartReg := prometheus.NewRegistry() + + // Open a new DB with the same WAL to check that series from the previous DB + // get replayed. + replayDB, err := Open(restartLogger, restartReg, nil, promAgentDir, restartOpts) + require.NoError(t, err) + defer func() { + require.NoError(t, replayDB.Close()) + }() + + // Check if all the series are retrieved back from the WAL. + m := gatherFamily(t, restartReg, "prometheus_agent_active_series") + require.Equal(t, float64(numSeries), m.Metric[0].Gauge.GetValue(), "agent wal replay mismatch of active series count") + + // Check if lastTs of the samples retrieved from the WAL is retained. + metrics := replayDB.series.series + for i := 0; i < len(metrics); i++ { + mp := metrics[i] + for _, v := range mp { + require.Equal(t, v.lastTs, int64(lastTs)) + } + } +} + +func TestLockfile(t *testing.T) { + tsdbutil.TestDirLockerUsage(t, func(t *testing.T, data string, createLock bool) (*tsdbutil.DirLocker, testutil.Closer) { + logger := log.NewNopLogger() + reg := prometheus.NewRegistry() + rs := remote.NewStorage(logger, reg, startTime, data, time.Second*30, nil) + t.Cleanup(func() { + require.NoError(t, rs.Close()) + }) + + opts := DefaultOptions() + opts.NoLockfile = !createLock + + // Create the DB. This should create lockfile and its metrics. + db, err := Open(logger, nil, rs, data, opts) + require.NoError(t, err) + + return db.locker, testutil.NewCallbackCloser(func() { + require.NoError(t, db.Close()) + }) + }) +} + +func startTime() (int64, error) { + return time.Now().Unix() * 1000, nil +} + +// Create series for tests. +func labelsForTest(lName string, seriesCount int) []labels.Labels { + var series []labels.Labels + + for i := 0; i < seriesCount; i++ { + lset := labels.Labels{ + {Name: "a", Value: lName}, + {Name: "job", Value: "prometheus"}, + {Name: "instance", Value: "localhost" + strconv.Itoa(i)}, + } + series = append(series, lset) + } + + return series +} + +func gatherFamily(t *testing.T, reg prometheus.Gatherer, familyName string) *dto.MetricFamily { + t.Helper() + + families, err := reg.Gather() + require.NoError(t, err, "failed to gather metrics") + + for _, f := range families { + if f.GetName() == familyName { + return f + } + } + + t.Fatalf("could not find family %s", familyName) + + return nil +} diff --git a/tsdb/agent/series.go b/tsdb/agent/series.go new file mode 100644 index 000000000..73fcb6009 --- /dev/null +++ b/tsdb/agent/series.go @@ -0,0 +1,177 @@ +// Copyright 2021 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package agent + +import ( + "sync" + + "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/tsdb/chunks" +) + +// memSeries is a chunkless version of tsdb.memSeries. +type memSeries struct { + sync.Mutex + + ref chunks.HeadSeriesRef + lset labels.Labels + lastTs int64 +} + +// seriesHashmap is a simple hashmap for memSeries by their label set. +// It is built on top of a regular hashmap and holds a slice of series to +// resolve hash collisions. Its methods require the hash to be submitted +// with the label set to avoid re-computing hash throughout the code. +type seriesHashmap map[uint64][]*memSeries + +func (m seriesHashmap) Get(hash uint64, lset labels.Labels) *memSeries { + for _, s := range m[hash] { + if labels.Equal(s.lset, lset) { + return s + } + } + return nil +} + +func (m seriesHashmap) Set(hash uint64, s *memSeries) { + seriesSet := m[hash] + for i, prev := range seriesSet { + if labels.Equal(prev.lset, s.lset) { + seriesSet[i] = s + return + } + } + m[hash] = append(seriesSet, s) +} + +func (m seriesHashmap) Delete(hash uint64, ref chunks.HeadSeriesRef) { + var rem []*memSeries + for _, s := range m[hash] { + if s.ref != ref { + rem = append(rem, s) + } + } + if len(rem) == 0 { + delete(m, hash) + } else { + m[hash] = rem + } +} + +// stripeSeries locks modulo ranges of IDs and hashes to reduce lock +// contention. The locks are padded to not be on the same cache line. +// Filling the padded space with the maps was profiled to be slower - +// likely due to the additional pointer dereferences. +type stripeSeries struct { + size int + series []map[chunks.HeadSeriesRef]*memSeries + hashes []seriesHashmap + locks []stripeLock +} + +type stripeLock struct { + sync.RWMutex + // Padding to avoid multiple locks being on the same cache line. + _ [40]byte +} + +func newStripeSeries(stripeSize int) *stripeSeries { + s := &stripeSeries{ + size: stripeSize, + series: make([]map[chunks.HeadSeriesRef]*memSeries, stripeSize), + hashes: make([]seriesHashmap, stripeSize), + locks: make([]stripeLock, stripeSize), + } + for i := range s.series { + s.series[i] = map[chunks.HeadSeriesRef]*memSeries{} + } + for i := range s.hashes { + s.hashes[i] = seriesHashmap{} + } + return s +} + +// GC garbage collects old series that have not received a sample after mint +// and will fully delete them. +func (s *stripeSeries) GC(mint int64) map[chunks.HeadSeriesRef]struct{} { + deleted := map[chunks.HeadSeriesRef]struct{}{} + + for hashLock := 0; hashLock < s.size; hashLock++ { + s.locks[hashLock].Lock() + + for hash, all := range s.hashes[hashLock] { + for _, series := range all { + series.Lock() + + // Any series that has received a write since mint is still alive. + if series.lastTs >= mint { + series.Unlock() + continue + } + + // The series is stale. We need to obtain a second lock for the + // ref if it's different than the hash lock. + refLock := int(series.ref) & (s.size - 1) + if hashLock != refLock { + s.locks[refLock].Lock() + } + + deleted[series.ref] = struct{}{} + delete(s.series[refLock], series.ref) + s.hashes[hashLock].Delete(hash, series.ref) + + if hashLock != refLock { + s.locks[refLock].Unlock() + } + series.Unlock() + } + } + + s.locks[hashLock].Unlock() + } + + return deleted +} + +func (s *stripeSeries) GetByID(id chunks.HeadSeriesRef) *memSeries { + refLock := uint64(id) & uint64(s.size-1) + s.locks[refLock].RLock() + defer s.locks[refLock].RUnlock() + return s.series[refLock][id] +} + +func (s *stripeSeries) GetByHash(hash uint64, lset labels.Labels) *memSeries { + hashLock := hash & uint64(s.size-1) + + s.locks[hashLock].RLock() + defer s.locks[hashLock].RUnlock() + return s.hashes[hashLock].Get(hash, lset) +} + +func (s *stripeSeries) Set(hash uint64, series *memSeries) { + var ( + hashLock = hash & uint64(s.size-1) + refLock = uint64(series.ref) & uint64(s.size-1) + ) + s.locks[hashLock].Lock() + defer s.locks[hashLock].Unlock() + + if hashLock != refLock { + s.locks[refLock].Lock() + defer s.locks[refLock].Unlock() + } + + s.hashes[hashLock].Set(hash, series) + s.series[refLock][series.ref] = series +} diff --git a/tsdb/block.go b/tsdb/block.go index 42a91ff59..e4db21f85 100644 --- a/tsdb/block.go +++ b/tsdb/block.go @@ -28,7 +28,8 @@ import ( "github.com/oklog/ulid" "github.com/pkg/errors" - "github.com/prometheus/prometheus/pkg/labels" + "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/tsdb/chunkenc" "github.com/prometheus/prometheus/tsdb/chunks" tsdb_errors "github.com/prometheus/prometheus/tsdb/errors" @@ -49,7 +50,7 @@ type IndexWriter interface { // Implementations may require series to be insert in strictly increasing order by // their labels. The reference numbers are used to resolve entries in postings lists // that are added later. - AddSeries(ref uint64, l labels.Labels, chunks ...chunks.Meta) error + AddSeries(ref storage.SeriesRef, l labels.Labels, chunks ...chunks.Meta) error // Close writes any finalization and closes the resources associated with // the underlying writer. @@ -82,7 +83,7 @@ type IndexReader interface { // Series populates the given labels and chunk metas for the series identified // by the reference. // Returns storage.ErrNotFound if the ref does not resolve to a known series. - Series(ref uint64, lset *labels.Labels, chks *[]chunks.Meta) error + Series(ref storage.SeriesRef, lset *labels.Labels, chks *[]chunks.Meta) error // LabelNames returns all the unique label names present in the index in sorted order. LabelNames(matchers ...*labels.Matcher) ([]string, error) @@ -90,11 +91,11 @@ type IndexReader interface { // LabelValueFor returns label value for the given label name in the series referred to by ID. // If the series couldn't be found or the series doesn't have the requested label a // storage.ErrNotFound is returned as error. - LabelValueFor(id uint64, label string) (string, error) + LabelValueFor(id storage.SeriesRef, label string) (string, error) // LabelNamesFor returns all the label names for the series referred to by IDs. // The names returned are sorted. - LabelNamesFor(ids ...uint64) ([]string, error) + LabelNamesFor(ids ...storage.SeriesRef) ([]string, error) // Close releases the underlying resources of the reader. Close() error @@ -116,7 +117,7 @@ type ChunkWriter interface { // ChunkReader provides reading access of serialized time series data. type ChunkReader interface { // Chunk returns the series data chunk with the given reference. - Chunk(ref uint64) (chunkenc.Chunk, error) + Chunk(ref chunks.ChunkRef) (chunkenc.Chunk, error) // Close releases all underlying resources of the reader. Close() error @@ -191,9 +192,11 @@ type BlockMetaCompaction struct { Failed bool `json:"failed,omitempty"` } -const indexFilename = "index" -const metaFilename = "meta.json" -const metaVersion1 = 1 +const ( + indexFilename = "index" + metaFilename = "meta.json" + metaVersion1 = 1 +) func chunkDir(dir string) string { return filepath.Join(dir, "chunks") } @@ -470,7 +473,7 @@ func (r blockIndexReader) SortedPostings(p index.Postings) index.Postings { return r.ir.SortedPostings(p) } -func (r blockIndexReader) Series(ref uint64, lset *labels.Labels, chks *[]chunks.Meta) error { +func (r blockIndexReader) Series(ref storage.SeriesRef, lset *labels.Labels, chks *[]chunks.Meta) error { if err := r.ir.Series(ref, lset, chks); err != nil { return errors.Wrapf(err, "block: %s", r.b.Meta().ULID) } @@ -483,13 +486,13 @@ func (r blockIndexReader) Close() error { } // LabelValueFor returns label value for the given label name in the series referred to by ID. -func (r blockIndexReader) LabelValueFor(id uint64, label string) (string, error) { +func (r blockIndexReader) LabelValueFor(id storage.SeriesRef, label string) (string, error) { return r.ir.LabelValueFor(id, label) } // LabelNamesFor returns all the label names for the series referred to by IDs. // The names returned are sorted. -func (r blockIndexReader) LabelNamesFor(ids ...uint64) ([]string, error) { +func (r blockIndexReader) LabelNamesFor(ids ...storage.SeriesRef) ([]string, error) { return r.ir.LabelNamesFor(ids...) } @@ -556,7 +559,7 @@ Outer: return p.Err() } - err = pb.tombstones.Iter(func(id uint64, ivs tombstones.Intervals) error { + err = pb.tombstones.Iter(func(id storage.SeriesRef, ivs tombstones.Intervals) error { for _, iv := range ivs { stones.AddInterval(id, iv) } @@ -588,7 +591,7 @@ Outer: func (pb *Block) CleanTombstones(dest string, c Compactor) (*ulid.ULID, bool, error) { numStones := 0 - if err := pb.tombstones.Iter(func(id uint64, ivs tombstones.Intervals) error { + if err := pb.tombstones.Iter(func(id storage.SeriesRef, ivs tombstones.Intervals) error { numStones += len(ivs) return nil }); err != nil { @@ -611,12 +614,12 @@ func (pb *Block) CleanTombstones(dest string, c Compactor) (*ulid.ULID, bool, er // Snapshot creates snapshot of the block into dir. func (pb *Block) Snapshot(dir string) error { blockDir := filepath.Join(dir, pb.meta.ULID.String()) - if err := os.MkdirAll(blockDir, 0777); err != nil { + if err := os.MkdirAll(blockDir, 0o777); err != nil { return errors.Wrap(err, "create snapshot block dir") } chunksDir := chunkDir(blockDir) - if err := os.MkdirAll(chunksDir, 0777); err != nil { + if err := os.MkdirAll(chunksDir, 0o777); err != nil { return errors.Wrap(err, "create snapshot chunk dir") } diff --git a/tsdb/block_test.go b/tsdb/block_test.go index 31ba76ea6..e677ce2a3 100644 --- a/tsdb/block_test.go +++ b/tsdb/block_test.go @@ -30,7 +30,7 @@ import ( "github.com/go-kit/log" "github.com/stretchr/testify/require" - "github.com/prometheus/prometheus/pkg/labels" + "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/tsdb/chunks" "github.com/prometheus/prometheus/tsdb/fileutil" @@ -185,7 +185,7 @@ func TestCorruptedChunk(t *testing.T) { require.NoError(t, err) require.Greater(t, len(files), 0, "No chunk created.") - f, err := os.OpenFile(files[0], os.O_RDWR, 0666) + f, err := os.OpenFile(files[0], os.O_RDWR, 0o666) require.NoError(t, err) // Apply corruption function. @@ -505,7 +505,7 @@ func createBlockFromHead(tb testing.TB, dir string, head *Head) string { compactor, err := NewLeveledCompactor(context.Background(), nil, log.NewNopLogger(), []int64{1000000}, nil, nil) require.NoError(tb, err) - require.NoError(tb, os.MkdirAll(dir, 0777)) + require.NoError(tb, os.MkdirAll(dir, 0o777)) // Add +1 millisecond to block maxt because block intervals are half-open: [b.MinTime, b.MaxTime). // Because of this block intervals are always +1 than the total samples it includes. @@ -522,7 +522,7 @@ func createHead(tb testing.TB, w *wal.WAL, series []storage.Series, chunkDir str app := head.Appender(context.Background()) for _, s := range series { - ref := uint64(0) + ref := storage.SeriesRef(0) it := s.Iterator() lset := s.Labels() for it.Next() { diff --git a/tsdb/blockwriter.go b/tsdb/blockwriter.go index a5a5d7474..064cd01c7 100644 --- a/tsdb/blockwriter.go +++ b/tsdb/blockwriter.go @@ -24,7 +24,7 @@ import ( "github.com/oklog/ulid" "github.com/pkg/errors" - "github.com/prometheus/prometheus/pkg/timestamp" + "github.com/prometheus/prometheus/model/timestamp" "github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/tsdb/chunkenc" ) diff --git a/tsdb/blockwriter_test.go b/tsdb/blockwriter_test.go index 07b500d7a..a47f3983e 100644 --- a/tsdb/blockwriter_test.go +++ b/tsdb/blockwriter_test.go @@ -24,7 +24,7 @@ import ( "github.com/go-kit/log" "github.com/stretchr/testify/require" - "github.com/prometheus/prometheus/pkg/labels" + "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/tsdb/tsdbutil" ) diff --git a/tsdb/chunkenc/bstream.go b/tsdb/chunkenc/bstream.go index c8efeab1d..833c9794b 100644 --- a/tsdb/chunkenc/bstream.go +++ b/tsdb/chunkenc/bstream.go @@ -48,8 +48,8 @@ import ( // bstream is a stream of bits. type bstream struct { - stream []byte // the data stream - count uint8 // how many bits are valid in current byte + stream []byte // The data stream. + count uint8 // How many right-most bits are available for writing in the current byte (the last byte of the stream). } func (b *bstream) bytes() []byte { @@ -86,14 +86,17 @@ func (b *bstream) writeByte(byt byte) { i := len(b.stream) - 1 - // fill up b.b with b.count bits from byt + // Complete the last byte with the leftmost b.count bits from byt. b.stream[i] |= byt >> (8 - b.count) b.stream = append(b.stream, 0) i++ + // Write the remainder, if any. b.stream[i] = byt << b.count } +// writeBits writes the nbits right-most bits of u to the stream +// in left-to-right order. func (b *bstream) writeBits(u uint64, nbits int) { u <<= 64 - uint(nbits) for nbits >= 8 { @@ -115,7 +118,7 @@ type bstreamReader struct { streamOffset int // The offset from which read the next byte from the stream. buffer uint64 // The current buffer, filled from the stream, containing up to 8 bytes from which read bits. - valid uint8 // The number of bits valid to read (from left) in the current buffer. + valid uint8 // The number of right-most bits valid to read (from left) in the current 8 byte buffer. } func newBReader(b []byte) bstreamReader { @@ -148,6 +151,8 @@ func (b *bstreamReader) readBitFast() (bit, error) { return (b.buffer & bitmask) != 0, nil } +// readBits constructs a uint64 with the nbits right-most bits +// read from the stream, and any other bits 0. func (b *bstreamReader) readBits(nbits uint8) (uint64, error) { if b.valid == 0 { if !b.loadNextBuffer(nbits) { diff --git a/tsdb/chunkenc/chunk.go b/tsdb/chunkenc/chunk.go index 5fe8c08f7..1eff42841 100644 --- a/tsdb/chunkenc/chunk.go +++ b/tsdb/chunkenc/chunk.go @@ -18,6 +18,7 @@ import ( "sync" "github.com/pkg/errors" + "github.com/prometheus/prometheus/model/histogram" ) diff --git a/tsdb/chunkenc/histogram.go b/tsdb/chunkenc/histogram.go index c7f94795c..aca1ad693 100644 --- a/tsdb/chunkenc/histogram.go +++ b/tsdb/chunkenc/histogram.go @@ -18,11 +18,9 @@ import ( "math" "github.com/prometheus/prometheus/model/histogram" - "github.com/prometheus/prometheus/pkg/value" + "github.com/prometheus/prometheus/model/value" ) -const () - // HistogramChunk holds encoded sample data for a sparse, high-resolution // histogram. // @@ -247,7 +245,7 @@ func (a *HistogramAppender) Append(int64, float64) { // append. If counterReset is true, okToAppend is always false. func (a *HistogramAppender) Appendable(h *histogram.Histogram) ( positiveInterjections, negativeInterjections []Interjection, - okToAppend bool, counterReset bool, + okToAppend, counterReset bool, ) { if value.IsStaleNaN(h.Sum) { // This is a stale sample whose buckets and spans don't matter. diff --git a/tsdb/chunkenc/histogram_meta_test.go b/tsdb/chunkenc/histogram_meta_test.go index 2b9de71ec..e3ae4149b 100644 --- a/tsdb/chunkenc/histogram_meta_test.go +++ b/tsdb/chunkenc/histogram_meta_test.go @@ -22,8 +22,9 @@ import ( "math" "testing" - "github.com/prometheus/prometheus/model/histogram" "github.com/stretchr/testify/require" + + "github.com/prometheus/prometheus/model/histogram" ) // Example of a span layout and resulting bucket indices (_idx_ is used in this @@ -291,7 +292,6 @@ func TestInterjection(t *testing.T) { gotBuckets := make([]int64, len(s.bucketsOut)) interject(s.bucketsIn, gotBuckets, interjections) require.Equal(t, s.bucketsOut, gotBuckets) - }) } } diff --git a/tsdb/chunkenc/histogram_test.go b/tsdb/chunkenc/histogram_test.go index e4a9cbe64..ede26cf0e 100644 --- a/tsdb/chunkenc/histogram_test.go +++ b/tsdb/chunkenc/histogram_test.go @@ -16,8 +16,9 @@ package chunkenc import ( "testing" - "github.com/prometheus/prometheus/model/histogram" "github.com/stretchr/testify/require" + + "github.com/prometheus/prometheus/model/histogram" ) func TestHistogramChunkSameBuckets(t *testing.T) { @@ -83,9 +84,9 @@ func TestHistogramChunkSameBuckets(t *testing.T) { require.Equal(t, exp, act) // 2. Expand second iterator while reusing first one. - //it2 := c.Iterator(it1) - //var res2 []pair - //for it2.Next() { + // it2 := c.Iterator(it1) + // var res2 []pair + // for it2.Next() { // ts, v := it2.At() // res2 = append(res2, pair{t: ts, v: v}) // } diff --git a/tsdb/chunkenc/xor.go b/tsdb/chunkenc/xor.go index 62cf4396a..fed09a6b6 100644 --- a/tsdb/chunkenc/xor.go +++ b/tsdb/chunkenc/xor.go @@ -213,6 +213,8 @@ func (a *xorAppender) Append(t int64, v float64) { a.tDelta = tDelta } +// bitRange returns whether the given integer can be represented by nbits. +// See docs/bstream.md. func bitRange(x int64, nbits uint8) bool { return -((1<<(nbits-1))-1) <= x && x <= 1<<(nbits-1) } @@ -362,9 +364,11 @@ func (it *xorIterator) Next() bool { it.err = err return false } + + // Account for negative numbers, which come back as high unsigned numbers. + // See docs/bstream.md. if bits > (1 << (sz - 1)) { - // or something - bits = bits - (1 << sz) + bits -= 1 << sz } dod = int64(bits) } diff --git a/tsdb/chunks/chunks.go b/tsdb/chunks/chunks.go index 11417c38c..2f565fcb1 100644 --- a/tsdb/chunks/chunks.go +++ b/tsdb/chunks/chunks.go @@ -54,13 +54,69 @@ const ( ChunkEncodingSize = 1 ) +// ChunkRef is a generic reference for reading chunk data. In prometheus it +// is either a HeadChunkRef or BlockChunkRef, though other implementations +// may have their own reference types. +type ChunkRef uint64 + +// HeadSeriesRef refers to in-memory series. +type HeadSeriesRef uint64 + +// HeadChunkRef packs a HeadSeriesRef and a ChunkID into a global 8 Byte ID. +// The HeadSeriesRef and ChunkID may not exceed 5 and 3 bytes respectively. +type HeadChunkRef uint64 + +func NewHeadChunkRef(hsr HeadSeriesRef, chunkID HeadChunkID) HeadChunkRef { + if hsr > (1<<40)-1 { + panic("series ID exceeds 5 bytes") + } + if chunkID > (1<<24)-1 { + panic("chunk ID exceeds 3 bytes") + } + return HeadChunkRef(uint64(hsr<<24) | uint64(chunkID)) +} + +func (p HeadChunkRef) Unpack() (HeadSeriesRef, HeadChunkID) { + return HeadSeriesRef(p >> 24), HeadChunkID(p<<40) >> 40 +} + +// HeadChunkID refers to a specific chunk in a series (memSeries) in the Head. +// Each memSeries has its own monotonically increasing number to refer to its chunks. +// If the HeadChunkID value is... +// * memSeries.firstChunkID+len(memSeries.mmappedChunks), it's the head chunk. +// * less than the above, but >= memSeries.firstID, then it's +// memSeries.mmappedChunks[i] where i = HeadChunkID - memSeries.firstID. +// Example: +// assume a memSeries.firstChunkID=7 and memSeries.mmappedChunks=[p5,p6,p7,p8,p9]. +// | HeadChunkID value | refers to ... | +// |-------------------|----------------------------------------------------------------------------------------| +// | 0-6 | chunks that have been compacted to blocks, these won't return data for queries in Head | +// | 7-11 | memSeries.mmappedChunks[i] where i is 0 to 4. | +// | 12 | memSeries.headChunk | +type HeadChunkID uint64 + +// BlockChunkRef refers to a chunk within a persisted block. +// The upper 4 bytes are for the segment index and +// the lower 4 bytes are for the segment offset where the data starts for this chunk. +type BlockChunkRef uint64 + +// NewBlockChunkRef packs the file index and byte offset into a BlockChunkRef. +func NewBlockChunkRef(fileIndex, fileOffset uint64) BlockChunkRef { + return BlockChunkRef(fileIndex<<32 | fileOffset) +} + +func (b BlockChunkRef) Unpack() (int, int) { + sgmIndex := int(b >> 32) + chkStart := int((b << 32) >> 32) + return sgmIndex, chkStart +} + // Meta holds information about a chunk of data. type Meta struct { // Ref and Chunk hold either a reference that can be used to retrieve // chunk data or the data itself. - // When it is a reference it is the segment offset at which the chunk bytes start. - // Generally, only one of them is set. - Ref uint64 + // If Chunk is nil, call ChunkReader.Chunk(Meta.Ref) to get the chunk and assign it to the Chunk field + Ref ChunkRef Chunk chunkenc.Chunk // Time range the data covers. @@ -68,7 +124,7 @@ type Meta struct { MinTime, MaxTime int64 } -// Iterator iterates over the chunk of a time series. +// Iterator iterates over the chunks of a single time series. type Iterator interface { // At returns the current meta. // It depends on implementation if the chunk is populated or not. @@ -97,9 +153,7 @@ func (cm *Meta) OverlapsClosedInterval(mint, maxt int64) bool { return cm.MinTime <= maxt && mint <= cm.MaxTime } -var ( - errInvalidSize = fmt.Errorf("invalid size") -) +var errInvalidSize = fmt.Errorf("invalid size") var castagnoliTable *crc32.Table @@ -148,7 +202,7 @@ func newWriter(dir string, segmentSize int64) (*Writer, error) { segmentSize = DefaultChunkSegmentSize } - if err := os.MkdirAll(dir, 0777); err != nil { + if err := os.MkdirAll(dir, 0o777); err != nil { return nil, err } dirFile, err := fileutil.OpenDir(dir) @@ -224,7 +278,7 @@ func cutSegmentFile(dirFile *os.File, magicNumber uint32, chunksFormat byte, all return 0, nil, 0, errors.Wrap(err, "next sequence file") } ptmp := p + ".tmp" - f, err := os.OpenFile(ptmp, os.O_WRONLY|os.O_CREATE, 0666) + f, err := os.OpenFile(ptmp, os.O_WRONLY|os.O_CREATE, 0o666) if err != nil { return 0, nil, 0, errors.Wrap(err, "open temp file") } @@ -266,7 +320,7 @@ func cutSegmentFile(dirFile *os.File, magicNumber uint32, chunksFormat byte, all return 0, nil, 0, errors.Wrap(err, "replace file") } - f, err = os.OpenFile(p, os.O_WRONLY, 0666) + f, err = os.OpenFile(p, os.O_WRONLY, 0o666) if err != nil { return 0, nil, 0, errors.Wrap(err, "open final file") } @@ -355,16 +409,11 @@ func (w *Writer) writeChunks(chks []Meta) error { return nil } - var seq = uint64(w.seq()) << 32 + seq := uint64(w.seq()) for i := range chks { chk := &chks[i] - // The reference is set to the segment index and the offset where - // the data starts for this chunk. - // - // The upper 4 bytes are for the segment index and - // the lower 4 bytes are for the segment offset where to start reading this chunk. - chk.Ref = seq | uint64(w.n) + chk.Ref = ChunkRef(NewBlockChunkRef(seq, uint64(w.n))) n := binary.PutUvarint(w.buf[:], uint64(len(chk.Chunk.Bytes()))) @@ -497,16 +546,9 @@ func (s *Reader) Size() int64 { } // Chunk returns a chunk from a given reference. -func (s *Reader) Chunk(ref uint64) (chunkenc.Chunk, error) { - var ( - // Get the upper 4 bytes. - // These contain the segment index. - sgmIndex = int(ref >> 32) - // Get the lower 4 bytes. - // These contain the segment offset where the data for this chunk starts. - chkStart = int((ref << 32) >> 32) - chkCRC32 = newCRC32() - ) +func (s *Reader) Chunk(ref ChunkRef) (chunkenc.Chunk, error) { + sgmIndex, chkStart := BlockChunkRef(ref).Unpack() + chkCRC32 := newCRC32() if sgmIndex >= len(s.bs) { return nil, errors.Errorf("segment index %d out of range", sgmIndex) diff --git a/tsdb/chunks/head_chunks.go b/tsdb/chunks/head_chunks.go index f4ac61fcc..51e6e3232 100644 --- a/tsdb/chunks/head_chunks.go +++ b/tsdb/chunks/head_chunks.go @@ -42,18 +42,16 @@ const ( headChunksFormatV1 = 1 ) -var ( - // ErrChunkDiskMapperClosed returned by any method indicates - // that the ChunkDiskMapper was closed. - ErrChunkDiskMapperClosed = errors.New("ChunkDiskMapper closed") -) +// ErrChunkDiskMapperClosed returned by any method indicates +// that the ChunkDiskMapper was closed. +var ErrChunkDiskMapperClosed = errors.New("ChunkDiskMapper closed") const ( // MintMaxtSize is the size of the mint/maxt for head chunk file and chunks. MintMaxtSize = 8 // SeriesRefSize is the size of series reference on disk. SeriesRefSize = 8 - // HeadChunkFileHeaderSize is the total size of the header for the head chunk file. + // HeadChunkFileHeaderSize is the total size of the header for a head chunk file. HeadChunkFileHeaderSize = SegmentHeaderSize // MaxHeadChunkFileSize is the max size of a head chunk file. MaxHeadChunkFileSize = 128 * 1024 * 1024 // 128 MiB. @@ -83,7 +81,6 @@ func (ref ChunkDiskMapperRef) Unpack() (sgmIndex, chkStart int) { sgmIndex = int(ref >> 32) chkStart = int((ref << 32) >> 32) return sgmIndex, chkStart - } // CorruptionErr is an error that's returned when corruption is encountered. @@ -127,32 +124,33 @@ type ChunkDiskMapper struct { // from which chunks are served till they are flushed and are ready for m-mapping. chunkBuffer *chunkBuffer - // If 'true', it indicated that the maxt of all the on-disk files were set - // after iterating through all the chunks in those files. + // Whether the maxt field is set for all mmapped chunk files tracked within the mmappedChunkFiles map. + // This is done after iterating through all the chunks in those files using the IterateAllChunks method. fileMaxtSet bool closed bool } +// mmappedChunkFile provides mmapp access to an entire head chunks file that holds many chunks. type mmappedChunkFile struct { byteSlice ByteSlice - maxt int64 + maxt int64 // Max timestamp among all of this file's chunks. } -// NewChunkDiskMapper returns a new writer against the given directory +// NewChunkDiskMapper returns a new ChunkDiskMapper against the given directory // using the default head chunk file duration. // NOTE: 'IterateAllChunks' method needs to be called at least once after creating ChunkDiskMapper // to set the maxt of all the file. func NewChunkDiskMapper(dir string, pool chunkenc.Pool, writeBufferSize int) (*ChunkDiskMapper, error) { // Validate write buffer size. if writeBufferSize < MinWriteBufferSize || writeBufferSize > MaxWriteBufferSize { - return nil, errors.Errorf("ChunkDiskMapper write buffer size should be between %d and %d (actual: %d)", MinWriteBufferSize, MaxHeadChunkFileSize, writeBufferSize) + return nil, errors.Errorf("ChunkDiskMapper write buffer size should be between %d and %d (actual: %d)", MinWriteBufferSize, MaxWriteBufferSize, writeBufferSize) } if writeBufferSize%1024 != 0 { return nil, errors.Errorf("ChunkDiskMapper write buffer size should be a multiple of 1024 (actual: %d)", writeBufferSize) } - if err := os.MkdirAll(dir, 0777); err != nil { + if err := os.MkdirAll(dir, 0o777); err != nil { return nil, err } dirFile, err := fileutil.OpenDir(dir) @@ -175,6 +173,7 @@ func NewChunkDiskMapper(dir string, pool chunkenc.Pool, writeBufferSize int) (*C return m, m.openMMapFiles() } +// openMMapFiles opens all files within dir for mmapping. func (cdm *ChunkDiskMapper) openMMapFiles() (returnErr error) { cdm.mmappedChunkFiles = map[int]*mmappedChunkFile{} cdm.closers = map[int]io.Closer{} @@ -257,7 +256,7 @@ func listChunkFiles(dir string) (map[int]string, error) { } // repairLastChunkFile deletes the last file if it's empty. -// Because we don't fsync when creating these file, we could end +// Because we don't fsync when creating these files, we could end // up with an empty file at the end during an abrupt shutdown. func repairLastChunkFile(files map[int]string) (_ map[int]string, returnErr error) { lastFile := -1 @@ -288,7 +287,7 @@ func repairLastChunkFile(files map[int]string) (_ map[int]string, returnErr erro // WriteChunk writes the chunk to the disk. // The returned chunk ref is the reference from where the chunk encoding starts for the chunk. -func (cdm *ChunkDiskMapper) WriteChunk(seriesRef uint64, mint, maxt int64, chk chunkenc.Chunk) (chkRef ChunkDiskMapperRef, err error) { +func (cdm *ChunkDiskMapper) WriteChunk(seriesRef HeadSeriesRef, mint, maxt int64, chk chunkenc.Chunk) (chkRef ChunkDiskMapperRef, err error) { cdm.writePathMtx.Lock() defer cdm.writePathMtx.Unlock() @@ -315,7 +314,7 @@ func (cdm *ChunkDiskMapper) WriteChunk(seriesRef uint64, mint, maxt int64, chk c chkRef = newChunkDiskMapperRef(uint64(cdm.curFileSequence), uint64(cdm.curFileSize())) - binary.BigEndian.PutUint64(cdm.byteBuf[bytesWritten:], seriesRef) + binary.BigEndian.PutUint64(cdm.byteBuf[bytesWritten:], uint64(seriesRef)) bytesWritten += SeriesRefSize binary.BigEndian.PutUint64(cdm.byteBuf[bytesWritten:], uint64(mint)) bytesWritten += MintMaxtSize @@ -353,7 +352,7 @@ func (cdm *ChunkDiskMapper) WriteChunk(seriesRef uint64, mint, maxt int64, chk c return chkRef, nil } -// shouldCutNewFile decides the cutting of a new file based on time and size retention. +// shouldCutNewFile returns whether a new file should be cut, based on time and size retention. // Size retention: because depending on the system architecture, there is a limit on how big of a file we can m-map. // Time retention: so that we can delete old chunks with some time guarantee in low load environments. func (cdm *ChunkDiskMapper) shouldCutNewFile(chunkSize int) bool { @@ -468,7 +467,7 @@ func (cdm *ChunkDiskMapper) flushBuffer() error { // Chunk returns a chunk from a given reference. func (cdm *ChunkDiskMapper) Chunk(ref ChunkDiskMapperRef) (chunkenc.Chunk, error) { cdm.readPathMtx.RLock() - // We hold this read lock for the entire duration because if the Close() + // We hold this read lock for the entire duration because if Close() // is called, the data in the byte slice will get corrupted as the mmapped // file will be closed. defer cdm.readPathMtx.RUnlock() @@ -578,11 +577,11 @@ func (cdm *ChunkDiskMapper) Chunk(ref ChunkDiskMapperRef) (chunkenc.Chunk, error return chk, nil } -// IterateAllChunks iterates on all the chunks in its byte slices in the order of the head chunk file sequence -// and runs the provided function on each chunk. It returns on the first error encountered. +// IterateAllChunks iterates all mmappedChunkFiles (in order of head chunk file name/number) and all the chunks within it +// and runs the provided function with information about each chunk. It returns on the first error encountered. // NOTE: This method needs to be called at least once after creating ChunkDiskMapper // to set the maxt of all the file. -func (cdm *ChunkDiskMapper) IterateAllChunks(f func(seriesRef uint64, chunkRef ChunkDiskMapperRef, mint, maxt int64, numSamples uint16) error) (err error) { +func (cdm *ChunkDiskMapper) IterateAllChunks(f func(seriesRef HeadSeriesRef, chunkRef ChunkDiskMapperRef, mint, maxt int64, numSamples uint16) error) (err error) { cdm.writePathMtx.Lock() defer cdm.writePathMtx.Unlock() @@ -630,7 +629,7 @@ func (cdm *ChunkDiskMapper) IterateAllChunks(f func(seriesRef uint64, chunkRef C chunkRef := newChunkDiskMapperRef(uint64(segID), uint64(idx)) startIdx := idx - seriesRef := binary.BigEndian.Uint64(mmapFile.byteSlice.Range(idx, idx+SeriesRefSize)) + seriesRef := HeadSeriesRef(binary.BigEndian.Uint64(mmapFile.byteSlice.Range(idx, idx+SeriesRefSize))) idx += SeriesRefSize mint := int64(binary.BigEndian.Uint64(mmapFile.byteSlice.Range(idx, idx+MintMaxtSize))) idx += MintMaxtSize @@ -828,7 +827,7 @@ func closeAllFromMap(cs map[int]io.Closer) error { const inBufferShards = 128 // 128 is a randomly chosen number. -// chunkBuffer is a thread safe buffer for chunks. +// chunkBuffer is a thread safe lookup table for chunks by their ref. type chunkBuffer struct { inBufferChunks [inBufferShards]map[ChunkDiskMapperRef]chunkenc.Chunk inBufferChunksMtxs [inBufferShards]sync.RWMutex diff --git a/tsdb/chunks/head_chunks_other.go b/tsdb/chunks/head_chunks_other.go index 8b37dd8c2..3d00fddc3 100644 --- a/tsdb/chunks/head_chunks_other.go +++ b/tsdb/chunks/head_chunks_other.go @@ -16,8 +16,7 @@ package chunks -var ( - // HeadChunkFilePreallocationSize is the size to which the m-map file should be preallocated when a new file is cut. - // Windows needs pre-allocations while the other OS does not. - HeadChunkFilePreallocationSize int64 -) +// HeadChunkFilePreallocationSize is the size to which the m-map file should be preallocated when a new file is cut. +// Windows needs pre-allocations while the other OS does not. But we observed that a 0 pre-allocation causes unit tests to flake. +// This small allocation for non-Windows OSes removes the flake. +var HeadChunkFilePreallocationSize int64 = MinWriteBufferSize * 2 diff --git a/tsdb/chunks/head_chunks_test.go b/tsdb/chunks/head_chunks_test.go index f1aa13cec..58c0f8ada 100644 --- a/tsdb/chunks/head_chunks_test.go +++ b/tsdb/chunks/head_chunks_test.go @@ -38,7 +38,7 @@ func TestChunkDiskMapper_WriteChunk_Chunk_IterateChunks(t *testing.T) { chkCRC32 := newCRC32() type expectedDataType struct { - seriesRef uint64 + seriesRef HeadSeriesRef chunkRef ChunkDiskMapperRef mint, maxt int64 numSamples uint16 @@ -75,7 +75,7 @@ func TestChunkDiskMapper_WriteChunk_Chunk_IterateChunks(t *testing.T) { bytesWritten := 0 chkCRC32.Reset() - binary.BigEndian.PutUint64(buf[bytesWritten:], seriesRef) + binary.BigEndian.PutUint64(buf[bytesWritten:], uint64(seriesRef)) bytesWritten += SeriesRefSize binary.BigEndian.PutUint64(buf[bytesWritten:], uint64(mint)) bytesWritten += MintMaxtSize @@ -133,7 +133,7 @@ func TestChunkDiskMapper_WriteChunk_Chunk_IterateChunks(t *testing.T) { require.NoError(t, err) idx := 0 - require.NoError(t, hrw.IterateAllChunks(func(seriesRef uint64, chunkRef ChunkDiskMapperRef, mint, maxt int64, numSamples uint16) error { + require.NoError(t, hrw.IterateAllChunks(func(seriesRef HeadSeriesRef, chunkRef ChunkDiskMapperRef, mint, maxt int64, numSamples uint16) error { t.Helper() expData := expectedData[idx] @@ -221,7 +221,7 @@ func TestChunkDiskMapper_Truncate(t *testing.T) { require.NoError(t, err) require.False(t, hrw.fileMaxtSet) - require.NoError(t, hrw.IterateAllChunks(func(_ uint64, _ ChunkDiskMapperRef, _, _ int64, _ uint16) error { return nil })) + require.NoError(t, hrw.IterateAllChunks(func(_ HeadSeriesRef, _ ChunkDiskMapperRef, _, _ int64, _ uint16) error { return nil })) require.True(t, hrw.fileMaxtSet) verifyFiles([]int{3, 4, 5, 6, 7, 8}) @@ -335,7 +335,7 @@ func TestHeadReadWriter_TruncateAfterFailedIterateChunks(t *testing.T) { require.NoError(t, err) // Forcefully failing IterateAllChunks. - require.Error(t, hrw.IterateAllChunks(func(_ uint64, _ ChunkDiskMapperRef, _, _ int64, _ uint16) error { + require.Error(t, hrw.IterateAllChunks(func(_ HeadSeriesRef, _ ChunkDiskMapperRef, _, _ int64, _ uint16) error { return errors.New("random error") })) @@ -379,7 +379,7 @@ func TestHeadReadWriter_ReadRepairOnEmptyLastFile(t *testing.T) { // Write an empty last file mimicking an abrupt shutdown on file creation. emptyFileName := segmentFile(dir, lastFile+1) - f, err := os.OpenFile(emptyFileName, os.O_WRONLY|os.O_CREATE, 0666) + f, err := os.OpenFile(emptyFileName, os.O_WRONLY|os.O_CREATE, 0o666) require.NoError(t, err) require.NoError(t, f.Sync()) stat, err := f.Stat() @@ -391,7 +391,7 @@ func TestHeadReadWriter_ReadRepairOnEmptyLastFile(t *testing.T) { hrw, err = NewChunkDiskMapper(dir, chunkenc.NewPool(), DefaultWriteBufferSize) require.NoError(t, err) require.False(t, hrw.fileMaxtSet) - require.NoError(t, hrw.IterateAllChunks(func(_ uint64, _ ChunkDiskMapperRef, _, _ int64, _ uint16) error { return nil })) + require.NoError(t, hrw.IterateAllChunks(func(_ HeadSeriesRef, _ ChunkDiskMapperRef, _, _ int64, _ uint16) error { return nil })) require.True(t, hrw.fileMaxtSet) // Removed from memory. @@ -409,7 +409,6 @@ func TestHeadReadWriter_ReadRepairOnEmptyLastFile(t *testing.T) { require.NoError(t, err) require.LessOrEqual(t, seq, uint64(lastFile), "file index on disk is bigger than previous last file") } - } func testChunkDiskMapper(t *testing.T) *ChunkDiskMapper { @@ -422,7 +421,7 @@ func testChunkDiskMapper(t *testing.T) *ChunkDiskMapper { hrw, err := NewChunkDiskMapper(tmpdir, chunkenc.NewPool(), DefaultWriteBufferSize) require.NoError(t, err) require.False(t, hrw.fileMaxtSet) - require.NoError(t, hrw.IterateAllChunks(func(_ uint64, _ ChunkDiskMapperRef, _, _ int64, _ uint16) error { return nil })) + require.NoError(t, hrw.IterateAllChunks(func(_ HeadSeriesRef, _ ChunkDiskMapperRef, _, _ int64, _ uint16) error { return nil })) require.True(t, hrw.fileMaxtSet) return hrw } @@ -438,9 +437,9 @@ func randomChunk(t *testing.T) chunkenc.Chunk { return chunk } -func createChunk(t *testing.T, idx int, hrw *ChunkDiskMapper) (seriesRef uint64, chunkRef ChunkDiskMapperRef, mint, maxt int64, chunk chunkenc.Chunk) { +func createChunk(t *testing.T, idx int, hrw *ChunkDiskMapper) (seriesRef HeadSeriesRef, chunkRef ChunkDiskMapperRef, mint, maxt int64, chunk chunkenc.Chunk) { var err error - seriesRef = uint64(rand.Int63()) + seriesRef = HeadSeriesRef(rand.Int63()) mint = int64((idx)*1000 + 1) maxt = int64((idx + 1) * 1000) chunk = randomChunk(t) diff --git a/tsdb/chunks/head_chunks_windows.go b/tsdb/chunks/head_chunks_windows.go index b772b64b4..214ee42f5 100644 --- a/tsdb/chunks/head_chunks_windows.go +++ b/tsdb/chunks/head_chunks_windows.go @@ -13,8 +13,6 @@ package chunks -var ( - // HeadChunkFilePreallocationSize is the size to which the m-map file should be preallocated when a new file is cut. - // Windows needs pre-allocation to m-map the file. - HeadChunkFilePreallocationSize int64 = MaxHeadChunkFileSize -) +// HeadChunkFilePreallocationSize is the size to which the m-map file should be preallocated when a new file is cut. +// Windows needs pre-allocation to m-map the file. +var HeadChunkFilePreallocationSize int64 = MaxHeadChunkFileSize diff --git a/tsdb/compact.go b/tsdb/compact.go index b2ae7e4ea..7c12d1729 100644 --- a/tsdb/compact.go +++ b/tsdb/compact.go @@ -564,7 +564,7 @@ func (c *LeveledCompactor) write(dest string, meta *BlockMeta, blocks ...BlockRe return err } - if err = os.MkdirAll(tmp, 0777); err != nil { + if err = os.MkdirAll(tmp, 0o777); err != nil { return err } @@ -726,7 +726,7 @@ func (c *LeveledCompactor) populateBlock(blocks []BlockReader, meta *BlockMeta, } all = indexr.SortedPostings(all) // Blocks meta is half open: [min, max), so subtract 1 to ensure we don't hold samples with exact meta.MaxTime timestamp. - sets = append(sets, newBlockChunkSeriesSet(indexr, chunkr, tombsr, all, meta.MinTime, meta.MaxTime-1)) + sets = append(sets, newBlockChunkSeriesSet(indexr, chunkr, tombsr, all, meta.MinTime, meta.MaxTime-1, false)) syms := indexr.Symbols() if i == 0 { symbols = syms @@ -745,7 +745,7 @@ func (c *LeveledCompactor) populateBlock(blocks []BlockReader, meta *BlockMeta, } var ( - ref = uint64(0) + ref = storage.SeriesRef(0) chks []chunks.Meta ) diff --git a/tsdb/compact_test.go b/tsdb/compact_test.go index dd9e426a5..2c34355b3 100644 --- a/tsdb/compact_test.go +++ b/tsdb/compact_test.go @@ -33,7 +33,7 @@ import ( "github.com/stretchr/testify/require" "github.com/prometheus/prometheus/model/histogram" - "github.com/prometheus/prometheus/pkg/labels" + "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/tsdb/chunkenc" "github.com/prometheus/prometheus/tsdb/chunks" @@ -1488,7 +1488,7 @@ func TestSparseHistogramSpaceSavings(t *testing.T) { // Ingest sparse histograms. for _, ah := range allSparseSeries { var ( - ref uint64 + ref storage.SeriesRef err error ) for i := 0; i < numHistograms; i++ { @@ -1515,7 +1515,7 @@ func TestSparseHistogramSpaceSavings(t *testing.T) { // Ingest histograms the old way. for _, ah := range allSparseSeries { - refs := make([]uint64, c.numBuckets+((c.numSpans-1)*c.gapBetweenSpans)) + refs := make([]storage.SeriesRef, c.numBuckets+((c.numSpans-1)*c.gapBetweenSpans)) for i := 0; i < numHistograms; i++ { ts := int64(i) * timeStep diff --git a/tsdb/db.go b/tsdb/db.go index c69f6f464..5a04e1977 100644 --- a/tsdb/db.go +++ b/tsdb/db.go @@ -36,13 +36,14 @@ import ( "golang.org/x/sync/errgroup" "github.com/prometheus/prometheus/config" - "github.com/prometheus/prometheus/pkg/labels" + "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/tsdb/chunkenc" "github.com/prometheus/prometheus/tsdb/chunks" tsdb_errors "github.com/prometheus/prometheus/tsdb/errors" "github.com/prometheus/prometheus/tsdb/fileutil" _ "github.com/prometheus/prometheus/tsdb/goversion" // Load the package into main to make sure minium Go version is met. + "github.com/prometheus/prometheus/tsdb/tsdbutil" "github.com/prometheus/prometheus/tsdb/wal" ) @@ -59,16 +60,10 @@ const ( tmpForCreationBlockDirSuffix = ".tmp-for-creation" // Pre-2.21 tmp dir suffix, used in clean-up functions. tmpLegacy = ".tmp" - - lockfileDisabled = -1 - lockfileReplaced = 0 - lockfileCreatedCleanly = 1 ) -var ( - // ErrNotReady is returned if the underlying storage is not ready yet. - ErrNotReady = errors.New("TSDB not ready") -) +// ErrNotReady is returned if the underlying storage is not ready yet. +var ErrNotReady = errors.New("TSDB not ready") // DefaultOptions used for the DB. They are sane for setups using // millisecond precision timestamps. @@ -164,9 +159,8 @@ type BlocksToDeleteFunc func(blocks []*Block) map[ulid.ULID]struct{} // DB handles reads and writes of time series falling into // a hashed partition of a seriedb. type DB struct { - dir string - lockf fileutil.Releaser - lockfPath string + dir string + locker *tsdbutil.DirLocker logger log.Logger metrics *dbMetrics @@ -198,20 +192,19 @@ type DB struct { } type dbMetrics struct { - loadedBlocks prometheus.GaugeFunc - symbolTableSize prometheus.GaugeFunc - reloads prometheus.Counter - reloadsFailed prometheus.Counter - compactionsFailed prometheus.Counter - compactionsTriggered prometheus.Counter - compactionsSkipped prometheus.Counter - sizeRetentionCount prometheus.Counter - timeRetentionCount prometheus.Counter - startTime prometheus.GaugeFunc - tombCleanTimer prometheus.Histogram - blocksBytes prometheus.Gauge - maxBytes prometheus.Gauge - lockfileCreatedCleanly prometheus.Gauge + loadedBlocks prometheus.GaugeFunc + symbolTableSize prometheus.GaugeFunc + reloads prometheus.Counter + reloadsFailed prometheus.Counter + compactionsFailed prometheus.Counter + compactionsTriggered prometheus.Counter + compactionsSkipped prometheus.Counter + sizeRetentionCount prometheus.Counter + timeRetentionCount prometheus.Counter + startTime prometheus.GaugeFunc + tombCleanTimer prometheus.Histogram + blocksBytes prometheus.Gauge + maxBytes prometheus.Gauge } func newDBMetrics(db *DB, r prometheus.Registerer) *dbMetrics { @@ -289,10 +282,6 @@ func newDBMetrics(db *DB, r prometheus.Registerer) *dbMetrics { Name: "prometheus_tsdb_size_retentions_total", Help: "The number of times that blocks were deleted because the maximum number of bytes was exceeded.", }) - m.lockfileCreatedCleanly = prometheus.NewGauge(prometheus.GaugeOpts{ - Name: "prometheus_tsdb_clean_start", - Help: "-1: lockfile is disabled. 0: a lockfile from a previous execution was replaced. 1: lockfile creation was clean", - }) if r != nil { r.MustRegister( @@ -309,7 +298,6 @@ func newDBMetrics(db *DB, r prometheus.Registerer) *dbMetrics { m.tombCleanTimer, m.blocksBytes, m.maxBytes, - m.lockfileCreatedCleanly, ) } return m @@ -608,8 +596,11 @@ func validateOpts(opts *Options, rngs []int64) (*Options, []int64) { return opts, rngs } +// open returns a new DB in the given directory. +// It initializes the lockfile, WAL, compactor, and Head (by replaying the WAL), and runs the database. +// It is not safe to open more than one DB in the same directory. func open(dir string, l log.Logger, r prometheus.Registerer, opts *Options, rngs []int64, stats *DBStats) (_ *DB, returnedErr error) { - if err := os.MkdirAll(dir, 0777); err != nil { + if err := os.MkdirAll(dir, 0o777); err != nil { return nil, err } if l == nil { @@ -671,29 +662,17 @@ func open(dir string, l log.Logger, r prometheus.Registerer, opts *Options, rngs db.blocksToDelete = DefaultBlocksToDelete(db) } - lockfileCreationStatus := lockfileDisabled + var err error + db.locker, err = tsdbutil.NewDirLocker(dir, "tsdb", db.logger, r) + if err != nil { + return nil, err + } if !opts.NoLockfile { - absdir, err := filepath.Abs(dir) - if err != nil { + if err := db.locker.Lock(); err != nil { return nil, err } - db.lockfPath = filepath.Join(absdir, "lock") - - if _, err := os.Stat(db.lockfPath); err == nil { - level.Warn(db.logger).Log("msg", "A TSDB lockfile from a previous execution already existed. It was replaced", "file", db.lockfPath) - lockfileCreationStatus = lockfileReplaced - } else { - lockfileCreationStatus = lockfileCreatedCleanly - } - - lockf, _, err := fileutil.Flock(db.lockfPath) - if err != nil { - return nil, errors.Wrap(err, "lock DB directory") - } - db.lockf = lockf } - var err error ctx, cancel := context.WithCancel(context.Background()) db.compactor, err = NewLeveledCompactorWithChunkSize(ctx, r, l, rngs, db.chunkPool, opts.MaxBlockChunkSegmentSize, nil) if err != nil { @@ -737,7 +716,6 @@ func open(dir string, l log.Logger, r prometheus.Registerer, opts *Options, rngs if maxBytes < 0 { maxBytes = 0 } - db.metrics.lockfileCreatedCleanly.Set(float64(lockfileCreationStatus)) db.metrics.maxBytes.Set(float64(maxBytes)) if err := db.reload(); err != nil { @@ -860,7 +838,7 @@ type dbAppender struct { var _ storage.GetRef = dbAppender{} -func (a dbAppender) GetRef(lset labels.Labels) (uint64, labels.Labels) { +func (a dbAppender) GetRef(lset labels.Labels) (storage.SeriesRef, labels.Labels) { if g, ok := a.Appender.(storage.GetRef); ok { return g.GetRef(lset) } @@ -1450,11 +1428,7 @@ func (db *DB) Close() error { g.Go(pb.Close) } - errs := tsdb_errors.NewMulti(g.Wait()) - if db.lockf != nil { - errs.Add(db.lockf.Release()) - errs.Add(os.Remove(db.lockfPath)) - } + errs := tsdb_errors.NewMulti(g.Wait(), db.locker.Release()) if db.head != nil { errs.Add(db.head.Close()) } @@ -1642,7 +1616,7 @@ func (db *DB) ExemplarQuerier(ctx context.Context) (storage.ExemplarQuerier, err return db.head.exemplars.ExemplarQuerier(ctx) } -func rangeForTimestamp(t int64, width int64) (maxt int64) { +func rangeForTimestamp(t, width int64) (maxt int64) { return (t/width)*width + width } diff --git a/tsdb/db_test.go b/tsdb/db_test.go index baa8dd4ea..1773579f9 100644 --- a/tsdb/db_test.go +++ b/tsdb/db_test.go @@ -40,7 +40,7 @@ import ( "github.com/stretchr/testify/require" "go.uber.org/goleak" - "github.com/prometheus/prometheus/pkg/labels" + "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/tsdb/chunkenc" "github.com/prometheus/prometheus/tsdb/chunks" @@ -228,7 +228,7 @@ func TestNoPanicAfterWALCorruption(t *testing.T) { { walFiles, err := ioutil.ReadDir(path.Join(db.Dir(), "wal")) require.NoError(t, err) - f, err := os.OpenFile(path.Join(db.Dir(), "wal", walFiles[0].Name()), os.O_RDWR, 0666) + f, err := os.OpenFile(path.Join(db.Dir(), "wal", walFiles[0].Name()), os.O_RDWR, 0o666) require.NoError(t, err) r := wal.NewReader(bufio.NewReader(f)) require.True(t, r.Next(), "reading the series record") @@ -663,6 +663,7 @@ func TestDB_SnapshotWithDelete(t *testing.T) { numSamples := int64(10) db := openTestDB(t, nil, nil) + defer func() { require.NoError(t, db.Close()) }() ctx := context.Background() app := db.Appender(ctx) @@ -700,15 +701,14 @@ Outer: require.NoError(t, os.RemoveAll(snap)) }() require.NoError(t, db.Snapshot(snap, true)) - require.NoError(t, db.Close()) // reopen DB from snapshot - db, err = Open(snap, nil, nil, nil, nil) + newDB, err := Open(snap, nil, nil, nil, nil) require.NoError(t, err) - defer func() { require.NoError(t, db.Close()) }() + defer func() { require.NoError(t, newDB.Close()) }() // Compare the result. - q, err := db.Querier(context.TODO(), 0, numSamples) + q, err := newDB.Querier(context.TODO(), 0, numSamples) require.NoError(t, err) defer func() { require.NoError(t, q.Close()) }() @@ -1202,6 +1202,10 @@ func TestTombstoneCleanFail(t *testing.T) { // and retention limit policies, when triggered at the same time, // won't race against each other. func TestTombstoneCleanRetentionLimitsRace(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode.") + } + opts := DefaultOptions() var wg sync.WaitGroup @@ -1245,7 +1249,6 @@ func TestTombstoneCleanRetentionLimitsRace(t *testing.T) { require.NoError(t, db.Close()) } - } func intersection(oldBlocks, actualBlocks []string) (intersection []string) { @@ -1272,6 +1275,7 @@ type mockCompactorFailing struct { func (*mockCompactorFailing) Plan(dir string) ([]string, error) { return nil, nil } + func (c *mockCompactorFailing) Write(dest string, b BlockReader, mint, maxt int64, parent *BlockMeta) (ulid.ULID, error) { if len(c.blocks) >= c.max { return ulid.ULID{}, fmt.Errorf("the compactor already did the maximum allowed blocks so it is time to fail") @@ -1388,7 +1392,7 @@ func TestSizeRetention(t *testing.T) { // Create a WAL checkpoint, and compare sizes. first, last, err := wal.Segments(db.Head().wal.Dir()) require.NoError(t, err) - _, err = wal.Checkpoint(log.NewNopLogger(), db.Head().wal, first, last-1, func(x uint64) bool { return false }, 0) + _, err = wal.Checkpoint(log.NewNopLogger(), db.Head().wal, first, last-1, func(x chunks.HeadSeriesRef) bool { return false }, 0) require.NoError(t, err) blockSize = int64(prom_testutil.ToFloat64(db.metrics.blocksBytes)) // Use the actual internal metrics. walSize, err = db.Head().wal.Size() @@ -1559,7 +1563,7 @@ func expandSeriesSet(ss storage.SeriesSet) ([]labels.Labels, map[string][]sample func TestOverlappingBlocksDetectsAllOverlaps(t *testing.T) { // Create 10 blocks that does not overlap (0-10, 10-20, ..., 100-110) but in reverse order to ensure our algorithm // will handle that. - var metas = make([]BlockMeta, 11) + metas := make([]BlockMeta, 11) for i := 10; i >= 0; i-- { metas[i] = BlockMeta{MinTime: int64(i * 10), MaxTime: int64((i + 1) * 10)} } @@ -1781,7 +1785,7 @@ func TestInitializeHeadTimestamp(t *testing.T) { require.NoError(t, os.RemoveAll(dir)) }() - require.NoError(t, os.MkdirAll(path.Join(dir, "wal"), 0777)) + require.NoError(t, os.MkdirAll(path.Join(dir, "wal"), 0o777)) w, err := wal.New(nil, nil, path.Join(dir, "wal"), false) require.NoError(t, err) @@ -1831,7 +1835,7 @@ func TestInitializeHeadTimestamp(t *testing.T) { createBlock(t, dir, genSeries(1, 1, 1000, 6000)) - require.NoError(t, os.MkdirAll(path.Join(dir, "wal"), 0777)) + require.NoError(t, os.MkdirAll(path.Join(dir, "wal"), 0o777)) w, err := wal.New(nil, nil, path.Join(dir, "wal"), false) require.NoError(t, err) @@ -2663,7 +2667,6 @@ func TestChunkWriter_ReadAfterWrite(t *testing.T) { for i, test := range tests { t.Run(strconv.Itoa(i), func(t *testing.T) { - tempDir, err := ioutil.TempDir("", "test_chunk_writer") require.NoError(t, err) defer func() { require.NoError(t, os.RemoveAll(tempDir)) }() @@ -2899,7 +2902,7 @@ func TestOpen_VariousBlockStates(t *testing.T) { expectedLoadedDirs[outDir] = struct{}{} // Touch chunks dir in block. - require.NoError(t, os.MkdirAll(filepath.Join(dbDir, "chunks"), 0777)) + require.NoError(t, os.MkdirAll(filepath.Join(dbDir, "chunks"), 0o777)) defer func() { require.NoError(t, os.RemoveAll(filepath.Join(dbDir, "chunks"))) }() @@ -3113,80 +3116,30 @@ func TestNoPanicOnTSDBOpenError(t *testing.T) { require.NoError(t, os.RemoveAll(tmpdir)) }) - absdir, err := filepath.Abs(tmpdir) - require.NoError(t, err) - // Taking the file lock will cause TSDB startup error. - lockf, _, err := fileutil.Flock(filepath.Join(absdir, "lock")) + // Taking the lock will cause a TSDB startup error. + l, err := tsdbutil.NewDirLocker(tmpdir, "tsdb", log.NewNopLogger(), nil) require.NoError(t, err) + require.NoError(t, l.Lock()) _, err = Open(tmpdir, nil, nil, DefaultOptions(), nil) require.Error(t, err) - require.NoError(t, lockf.Release()) + require.NoError(t, l.Release()) } -func TestLockfileMetric(t *testing.T) { - cases := []struct { - fileAlreadyExists bool - lockFileDisabled bool - expectedValue int - }{ - { - fileAlreadyExists: false, - lockFileDisabled: false, - expectedValue: lockfileCreatedCleanly, - }, - { - fileAlreadyExists: true, - lockFileDisabled: false, - expectedValue: lockfileReplaced, - }, - { - fileAlreadyExists: true, - lockFileDisabled: true, - expectedValue: lockfileDisabled, - }, - { - fileAlreadyExists: false, - lockFileDisabled: true, - expectedValue: lockfileDisabled, - }, - } +func TestLockfile(t *testing.T) { + tsdbutil.TestDirLockerUsage(t, func(t *testing.T, data string, createLock bool) (*tsdbutil.DirLocker, testutil.Closer) { + opts := DefaultOptions() + opts.NoLockfile = !createLock - for _, c := range cases { - t.Run(fmt.Sprintf("%+v", c), func(t *testing.T) { - tmpdir, err := ioutil.TempDir("", "test") - require.NoError(t, err) - t.Cleanup(func() { - require.NoError(t, os.RemoveAll(tmpdir)) - }) - absdir, err := filepath.Abs(tmpdir) - require.NoError(t, err) + // Create the DB. This should create lockfile and its metrics. + db, err := Open(data, nil, nil, opts, nil) + require.NoError(t, err) - // Test preconditions (file already exists + lockfile option) - lockfilePath := filepath.Join(absdir, "lock") - if c.fileAlreadyExists { - err = ioutil.WriteFile(lockfilePath, []byte{}, 0644) - require.NoError(t, err) - } - opts := DefaultOptions() - opts.NoLockfile = c.lockFileDisabled - - // Create the DB, this should create a lockfile and the metrics - db, err := Open(tmpdir, nil, nil, opts, nil) - require.NoError(t, err) - require.Equal(t, float64(c.expectedValue), prom_testutil.ToFloat64(db.metrics.lockfileCreatedCleanly)) - - // Close the DB, this should delete the lockfile + return db.locker, testutil.NewCallbackCloser(func() { require.NoError(t, db.Close()) - - // Check that the lockfile is always deleted - if !c.lockFileDisabled { - _, err = os.Stat(lockfilePath) - require.Error(t, err, "lockfile was not deleted") - } }) - } + }) } func TestQuerier_ShouldNotPanicIfHeadChunkIsTruncatedWhileReadingQueriedChunks(t *testing.T) { diff --git a/tsdb/docs/bstream.md b/tsdb/docs/bstream.md new file mode 100644 index 000000000..91dec1b14 --- /dev/null +++ b/tsdb/docs/bstream.md @@ -0,0 +1,62 @@ +# bstream details + +This doc describes details of the bstream (bitstream) and how we use it for encoding and decoding. +This doc is incomplete. For more background, see the Gorilla TSDB [white paper](http://www.vldb.org/pvldb/vol8/p1816-teller.pdf) +or the original [go-tsz](https://github.com/dgryski/go-tsz) implementation, which this code is based on. + +## Delta-of-delta encoding for timestamps + +We need to be able to encode and decode dod's for timestamps, which can be positive, zero, or negative. +Note that int64's are implemented as [2's complement](https://en.wikipedia.org/wiki/Two%27s_complement) + +and look like: + +``` +0111...111 = maxint64 + ... +0000...111 = 7 +0000...110 = 6 +0000...101 = 5 +0000...100 = 4 +0000...011 = 3 +0000...010 = 2 +0000...001 = 1 +0000...000 = 0 +1111...111 = -1 +1111...110 = -2 +1111...101 = -3 +1111...100 = -4 +1111...011 = -5 +1111...010 = -6 +1111...001 = -7 +1111...000 = -8 + ... +1000...001 = minint64+1 +1000...000 = minint64 +``` + +All numbers have a prefix (of zeroes for positive numbers, of ones for negative numbers), followed by a number of significant digits at the end. +In all cases, the smaller the absolute value of the number, the fewer the amount of significant digits. + +To encode these numbers, we use: +* A prefix which declares the amount of bits that follow (we use a predefined list of options in order of increasing number of significant bits). +* A number of bits which is one more than the number of significant bits. The extra bit is needed because we deal with unsigned integers, although + it isn't exactly a sign bit. (See below for details). + +The `bitRange` function determines whether a given integer can be represented by a number of bits. +For a given number of bits `nbits` we can distinguish (and thus encode) any set of `2^nbits` numbers. +E.g. for `nbits = 3`, we can encode 8 distinct numbers, and we have a choice of choosing our boundaries. For example -4 to 3, +-3 to 4, 0 to 7 or even -2 to 5 (always inclusive). (Observe in the list above that this is always true.) +Because we need to support positive and negative numbers equally, we choose boundaries that grow symmetrically. Following the same example, +we choose -3 to 4. + +When decoding the number, the most interesting part is how to recognize whether a number is negative or positive, and thus which prefix to set. +Note that the bstream library doesn't interpret integers to a specific type, but rather returns them as uint64's (which are really just a container for 64 bits). +Within the ranges we choose, if looked at as unsigned integers, the higher portion of the range represent the negative numbers. +Continuing the same example, the numbers 001, 010, 011 and 100 are returned as unsigned integers 1,2,3,4 and mean the same thing when casted to int64's. +But the others, 101, 110 and 111 are returned as unsigned integers 5,6,7 but actually represent -3, -2 and -1 (see list above), +The cutoff value is the value set by the `nbit`'th bit, and needs a value subtracted that is represented by the `nbit+1`th bit. +In our example, the 3rd bit sets the number 4, and the 4th sets the number 8. So if we see an unsigned integer exceeding 4 (5,6,7) we subtract 8. This gives us our desired values (-3, -2 and -1). + +Careful observers may note that, if we shift our boundaries down by one, the first bit would always indicate the sign (and imply the needed prefix). +In our example of `nbits = 3`, that would mean the range from -4 to 3. But what we have now works just fine too. diff --git a/tsdb/docs/refs.md b/tsdb/docs/refs.md new file mode 100644 index 000000000..b070b5c7a --- /dev/null +++ b/tsdb/docs/refs.md @@ -0,0 +1,91 @@ +# An overview of different Series and Chunk reference types + +## Used by callers of TSDB + +| Location | Series access | Chunk access | +|--------------------|--------------------------------|--------------------------------------------------------------------| +| Global interfaces | `SeriesRef` (in postings list) | `chunks.ChunkRef` (`ChunkReader` interface, `Meta.Ref`) | +| Head | `HeadSeriesRef` (autoinc) | `HeadChunkRef` (could be head chunk or mmapped chunk. 5/3B split) | +| blocks | `BlockSeriesRef` (16B aligned) | `BlockChunkRef` (4/4B split) | + +### `SeriesRef` + +Note: we cover the implementations as used in Prometheus. Other projects may use different implementations. + +#### `HeadSeriesRef` + +`HeadSeriesRef` is simply a 64bit counter that increments when a new series comes in. +Due to series churn, the set of actively used `HeadSeriesRef`s may be well above zero (e.g. 0-10M may not be used, and 10M-11M is active) + +Usage: +* [`stripeSeries`](https://github.com/prometheus/prometheus/blob/fdbc40a9efcc8197a94f23f0e479b0b56e52d424/tsdb/head.go#L1292-L1298) (note: when you don't know a `HeadSeriesRef` for a series, you can also access it by a hash of the series' labels) +* WAL +* `HeadChunkRef`s include them for addressing head chunks, as those are owned by the `memSeries`. + +Notes: +1) M-mapped Head chunks, while they use `HeadSeriesRef`s, don't contain an index and depend on the series listing in memory. +Once mmapped, chunks have `HeadSeriesRef`s inside them, allowing you to recreate the index from reading chunks +(Along with WAL which has the labels for those `HeadSeriesRef`s. It also has all those samples, but by using m-mapped chunks we can save cpu/time and not replay all of WAL on startup) + +2) During querying, `HeadSeriesRef` are limited to 2^40 (see `HeadChunkRef`) + +3) The last `HeadSeriesRef` is always replayed from the WAL and is continued from there. + +#### `BlockSeriesRef` + +Persistent blocks are independent entities and the format/structure is completely different from head block. + +In blocks, series are lexicographically ordered by labels and the byte offset in the index file (divided by 16 because they're all aligned on 16 bytes) becomes the `BlockSeriesRef`. + +They are not sequential because index entries may be multiples of 16 bytes. And they don't start from 0 because the byte offset is absolute and includes the magic number, symbols table, etc. + +`BlockSeriesRef` are only 32 bits for now, because 64 bits would slow down the postings lists disk access. (note: this limits the index size to 2^32 * 16 = 64 GB) + + +See also: +* https://ganeshvernekar.com/blog/prometheus-tsdb-persistent-block-and-its-index/#3-index +* https://ganeshvernekar.com/blog/prometheus-tsdb-persistent-block-and-its-index/#c-series + +### `ChunkRef` + +Chunk references are used to load chunk data during query execution. +Note: we cover the implementations as used in Prometheus. Other projects may use different implementations. + +#### `HeadChunkRef` + +A `HeadChunkRef` is an 8 byte integer that packs together: + +* 5 Bytes for `HeadSeriesRef`. +* 3 Bytes for `HeadChunkID` (uint64) (see below). + +There are two implications here: + +* While `HeadSeriesRef`s can during ingestion go higher, during querying they are limited to 2^40. Querying too high numbers will lead to query failures (but not impact ingestion). +* `ChunkID` keeps growing as we enter new chunks until Prometheus restarts. If Prometheus runs too long, we might hit 2^24. + ([957 years](https://www.wolframalpha.com/input/?i=2%5E24+*+120+*+15+seconds+in+years) at 1 sample per 15 seconds). If `ChunkID=len(mmappedchunks)` then it's the head chunk. + +#### `BlockChunkRef` + +A `BlockChunkRef` is an 8 byte integer. Unlike `HeadChunkRef`, it is static and independent of factors such as Prometheus restarting. + +It packs together: + +* 4 Bytes for chunk file index in the block. This number just increments. Filenames [start at 1](https://ganeshvernekar.com/blog/prometheus-tsdb-persistent-block-and-its-index/#contents-of-a-block) +but the `BlockChunkRef` start at 0. +* 4 Bytes for the byte offset within the file. + +#### Why does `HeadChunkRef` contain a series reference and `BlockChunkRef` does not? + +The `ChunkRef` types allow retrieving the chunk data as efficiently as possible. +* In the Head block the chunks are in the series struct. So we need to reach the series before we can access the chunk from it. + Hence we need to pack the `HeadSeriesRef` to get to the series. +* In persistent blocks, the chunk files are separated from the index and static. Hence you only need the co-ordinates within the `chunks` directory + to get to the chunk. Hence no need of `BlockSeriesRef`. + +## Used internally in TSDB + +* [`HeadChunkID`](https://pkg.go.dev/github.com/prometheus/prometheus@v1.8.2-0.20211105201321-411021ada9ab/tsdb/chunks#HeadChunkID) references a chunk of a `memSeries` (either an `mmappedChunk` or `headChunk`). + If a caller has, for whatever reason, an "old" `HeadChunkID` that refers to a chunk that has been compacted into a block, querying the memSeries for it will not return any data. +* [`ChunkDiskMapperRef`](https://pkg.go.dev/github.com/prometheus/prometheus@v1.8.2-0.20211105201321-411021ada9ab/tsdb/chunks#ChunkDiskMapperRef) is an 8 Byte integer. + 4 Bytes are used to refer to a chunks file number and 4 bytes serve as byte offset (similar to `BlockChunkRef`). `mmappedChunk` provide this value such that callers can load the mmapped chunk from disk. + diff --git a/tsdb/docs/usage.md b/tsdb/docs/usage.md new file mode 100644 index 000000000..600973089 --- /dev/null +++ b/tsdb/docs/usage.md @@ -0,0 +1,71 @@ +# Usage + +TSDB can be - and is - used by other applications such as [Cortex](https://cortexmetrics.io/) and [Thanos](https://thanos.io/). +This directory contains documentation for any developers who wish to work on or with TSDB. + +For a full example of instantiating a database, adding and querying data, see the [tsdb example in the docs](https://pkg.go.dev/github.com/prometheus/prometheus@v1.8.2-0.20211105201321-411021ada9ab/tsdb). +`tsdb/db_test.go` also demonstrates various specific usages of the TSDB library. + +## Instantiating a database + +Callers should use [`tsdb.Open`](https://pkg.go.dev/github.com/prometheus/prometheus@v1.8.2-0.20211105201321-411021ada9ab/tsdb#Open) to open a TSDB +(the directory may be new or pre-existing). +This returns a [`*tsdb.DB`](https://pkg.go.dev/github.com/prometheus/prometheus@v1.8.2-0.20211105201321-411021ada9ab/tsdb#DB) which is the actual database. + +A `DB` has the following main components: + +* Compactor: a [leveled compactor](https://pkg.go.dev/github.com/prometheus/prometheus@v1.8.2-0.20211105201321-411021ada9ab/tsdb#LeveledCompactor). Note: it is currently the only compactor implementation. It runs automatically. +* [`Head`](https://pkg.go.dev/github.com/prometheus/prometheus@v1.8.2-0.20211105201321-411021ada9ab/tsdb#DB.Head) +* [Blocks (persistent blocks)](https://pkg.go.dev/github.com/prometheus/prometheus@v1.8.2-0.20211105201321-411021ada9ab/tsdb#DB.Blocks) + +The `Head` is responsible for a lot. Here are its main components: + +* [WAL](https://pkg.go.dev/github.com/prometheus/prometheus@v1.8.2-0.20211105201321-411021ada9ab/tsdb/wal#WAL) (Write Ahead Log). +* [`stripeSeries`](https://github.com/prometheus/prometheus/blob/411021ada9ab41095923b8d2df9365b632fd40c3/tsdb/head.go#L1292): + this holds all the active series by linking to [`memSeries`](https://github.com/prometheus/prometheus/blob/411021ada9ab41095923b8d2df9365b632fd40c3/tsdb/head.go#L1462) + by an ID (aka "ref") and by labels hash. +* Postings list (reverse index): For any label-value pair, holds all the corresponding series refs. Used for queries. +* Tombstones. + +## Adding data + +Use [`db.Appender()`](https://pkg.go.dev/github.com/prometheus/prometheus@v1.8.2-0.20211105201321-411021ada9ab/tsdb#DB.Appender) to obtain an "appender". +The [golang docs](https://pkg.go.dev/github.com/prometheus/prometheus@v1.8.2-0.20211105201321-411021ada9ab/storage#Appender) speak mostly for themselves. + +Remember: + +* Use `Commit()` to add the samples to the DB and update the WAL. +* Create a new appender each time you commit. +* Appenders are not concurrency safe, but scrapes run concurrently and as such, leverage multiple appenders concurrently. + This reduces contention, although Commit() contend the same critical section (writing to the WAL is serialized), and may + inflate append tail latency if multiple appenders try to commit at the same time. + +Append may reject data due to these conditions: + +1) `timestamp < minValidTime` where `minValidTime` is the highest of: + * the maxTime of the last block (i.e. the last truncation time of Head) - updated via [`Head.Truncate()`](https://pkg.go.dev/github.com/prometheus/prometheus@v1.8.2-0.20211105201321-411021ada9ab/tsdb#Head.Truncate) and [`DB.compactHead()`](https://github.com/prometheus/prometheus/blob/411021ada9ab41095923b8d2df9365b632fd40c3/tsdb/db.go#L968) + * `tsdb.min-block-duration/2` older than the max time in the Head block. Note that while technically `storage.tsdb.min-block-duration` is configurable, it's a hidden option and changing it is discouraged. So We can assume this value to be 2h. + + Breaching this condition results in "out of bounds" errors. + The first condition assures the block that will be generated doesn't overlap with the previous one (which simplifies querying) + The second condition assures the sample won't go into the so called "compaction window", that is the section of the data that might be in process of being saved into a persistent block on disk. (because that logic runs concurrently with ingestion without a lock) +2) The labels don't validate. (if the set is empty or contains duplicate label names) +3) If the sample, for the respective series (based on all the labels) is out of order or has a different value for the last (highest) timestamp seen. (results in `storage.ErrOutOfOrderSample` and `storage.ErrDuplicateSampleForTimestamp` respectively) + +`Commit()` may also refuse data that is out of order with respect to samples that were added via a different appender. + +## Querying data + +Use [`db.Querier()`](https://pkg.go.dev/github.com/prometheus/prometheus@v1.8.2-0.20211105201321-411021ada9ab/tsdb#DB.Querier) to obtain a "querier". +The [golang docs](https://pkg.go.dev/github.com/prometheus/prometheus@v1.8.2-0.20211105201321-411021ada9ab/storage#Querier) speak mostly for themselves. + +Remember: + +* A querier can only see data that was committed when it was created. This limits the lifetime of a querier. +* A querier should be closed when you're done with it. +* Use mint/maxt to avoid loading unneeded data. + + +## Example code + +Find the example code for ingesting samples and querying them in [`tsdb/example_test.go`](../example_test.go) diff --git a/tsdb/encoding/encoding.go b/tsdb/encoding/encoding.go index b326ab475..11a2bae83 100644 --- a/tsdb/encoding/encoding.go +++ b/tsdb/encoding/encoding.go @@ -133,7 +133,6 @@ func NewDecbufAt(bs ByteSlice, off int, castagnoliTable *crc32.Table) Decbuf { dec := Decbuf{B: b[:len(b)-4]} if castagnoliTable != nil { - if exp := binary.BigEndian.Uint32(b[len(b)-4:]); dec.Crc32(castagnoliTable) != exp { return Decbuf{E: ErrInvalidChecksum} } diff --git a/tsdb/errors/errors.go b/tsdb/errors/errors.go index aeac4d277..607a7782a 100644 --- a/tsdb/errors/errors.go +++ b/tsdb/errors/errors.go @@ -24,7 +24,7 @@ import ( type multiError []error // NewMulti returns multiError with provided errors added if not nil. -func NewMulti(errs ...error) multiError { // nolint:golint +func NewMulti(errs ...error) multiError { // nolint:revive m := multiError{} m.Add(errs...) return m diff --git a/tsdb/example_test.go b/tsdb/example_test.go new file mode 100644 index 000000000..afe73b64a --- /dev/null +++ b/tsdb/example_test.go @@ -0,0 +1,112 @@ +// Copyright 2021 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tsdb + +import ( + "context" + "fmt" + "math" + "testing" + "time" + + "github.com/stretchr/testify/require" + + "github.com/prometheus/prometheus/model/labels" +) + +func TestExample(t *testing.T) { + // Create a random dir to work in. Open() doesn't require a pre-existing dir, but + // we want to make sure not to make a mess where we shouldn't. + dir := t.TempDir() + + // Open a TSDB for reading and/or writing. + db, err := Open(dir, nil, nil, DefaultOptions(), nil) + require.NoError(t, err) + + // Open an appender for writing. + app := db.Appender(context.Background()) + + lbls := labels.FromStrings("foo", "bar") + var appendedSamples []sample + + // Ref is 0 for the first append since we don't know the reference for the series. + ts, v := time.Now().Unix(), 123.0 + ref, err := app.Append(0, lbls, ts, v) + require.NoError(t, err) + appendedSamples = append(appendedSamples, sample{ts, v, nil}) + + // Another append for a second later. + // Re-using the ref from above since it's the same series, makes append faster. + time.Sleep(time.Second) + ts, v = time.Now().Unix(), 124 + _, err = app.Append(ref, lbls, ts, v) + require.NoError(t, err) + appendedSamples = append(appendedSamples, sample{ts, v, nil}) + + // Commit to storage. + err = app.Commit() + require.NoError(t, err) + + // In case you want to do more appends after app.Commit(), + // you need a new appender. + // app = db.Appender(context.Background()) + // + // ... adding more samples. + // + // Commit to storage. + // err = app.Commit() + // require.NoError(t, err) + + // Open a querier for reading. + querier, err := db.Querier(context.Background(), math.MinInt64, math.MaxInt64) + require.NoError(t, err) + + ss := querier.Select(false, nil, labels.MustNewMatcher(labels.MatchEqual, "foo", "bar")) + var queriedSamples []sample + for ss.Next() { + series := ss.At() + fmt.Println("series:", series.Labels().String()) + + it := series.Iterator() + for it.Next() { + ts, v := it.At() + fmt.Println("sample", ts, v) + queriedSamples = append(queriedSamples, sample{ts, v, nil}) + } + + require.NoError(t, it.Err()) + fmt.Println("it.Err():", it.Err()) + } + require.NoError(t, ss.Err()) + fmt.Println("ss.Err():", ss.Err()) + ws := ss.Warnings() + if len(ws) > 0 { + fmt.Println("warnings:", ws) + } + err = querier.Close() + require.NoError(t, err) + + // Clean up any last resources when done. + err = db.Close() + require.NoError(t, err) + + require.Equal(t, appendedSamples, queriedSamples) + + // Output: + // series: {foo="bar"} + // sample 123 + // sample 124 + // it.Err(): + // ss.Err(): +} diff --git a/tsdb/exemplar.go b/tsdb/exemplar.go index 1e09da21e..516b538e1 100644 --- a/tsdb/exemplar.go +++ b/tsdb/exemplar.go @@ -20,9 +20,10 @@ import ( "unicode/utf8" "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/prometheus/config" - "github.com/prometheus/prometheus/pkg/exemplar" - "github.com/prometheus/prometheus/pkg/labels" + "github.com/prometheus/prometheus/model/exemplar" + "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/storage" ) @@ -284,7 +285,7 @@ func (ce *CircularExemplarStorage) Resize(l int64) int { // This math is essentially looking at nextIndex, where we would write the next exemplar to, // and find the index in the old exemplar buffer that we should start migrating exemplars from. // This way we don't migrate exemplars that would just be overwritten when migrating later exemplars. - var startIndex int64 = (oldNextIndex - count + int64(len(oldBuffer))) % int64(len(oldBuffer)) + startIndex := (oldNextIndex - count + int64(len(oldBuffer))) % int64(len(oldBuffer)) for i := int64(0); i < count; i++ { idx := (startIndex + i) % int64(len(oldBuffer)) diff --git a/tsdb/exemplar_test.go b/tsdb/exemplar_test.go index eb95daa34..1418dcca9 100644 --- a/tsdb/exemplar_test.go +++ b/tsdb/exemplar_test.go @@ -22,11 +22,11 @@ import ( "strings" "testing" + "github.com/prometheus/client_golang/prometheus" "github.com/stretchr/testify/require" - "github.com/prometheus/client_golang/prometheus" - "github.com/prometheus/prometheus/pkg/exemplar" - "github.com/prometheus/prometheus/pkg/labels" + "github.com/prometheus/prometheus/model/exemplar" + "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/storage" ) @@ -448,7 +448,6 @@ func TestResize(t *testing.T) { for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { - exs, err := NewCircularExemplarStorage(tc.startSize, eMetrics) require.NoError(t, err) es := exs.(*CircularExemplarStorage) @@ -456,7 +455,8 @@ func TestResize(t *testing.T) { for i := 0; int64(i) < tc.startSize; i++ { err = es.AddExemplar(labels.FromStrings("service", strconv.Itoa(i)), exemplar.Exemplar{ Value: float64(i), - Ts: int64(i)}) + Ts: int64(i), + }) require.NoError(t, err) } diff --git a/tsdb/fileutil/fileutil.go b/tsdb/fileutil/fileutil.go index 927ebe004..8ab8ce3dd 100644 --- a/tsdb/fileutil/fileutil.go +++ b/tsdb/fileutil/fileutil.go @@ -27,7 +27,7 @@ import ( // CopyDirs copies all directories, subdirectories and files recursively including the empty folders. // Source and destination must be full paths. func CopyDirs(src, dest string) error { - if err := os.MkdirAll(dest, 0777); err != nil { + if err := os.MkdirAll(dest, 0o777); err != nil { return err } files, err := readDirs(src) @@ -46,7 +46,7 @@ func CopyDirs(src, dest string) error { // Empty directories are also created. if stat.IsDir() { - if err := os.MkdirAll(dp, 0777); err != nil { + if err := os.MkdirAll(dp, 0o777); err != nil { return err } continue @@ -65,7 +65,7 @@ func copyFile(src, dest string) error { return err } - err = ioutil.WriteFile(dest, data, 0666) + err = ioutil.WriteFile(dest, data, 0o666) if err != nil { return err } diff --git a/tsdb/fileutil/flock.go b/tsdb/fileutil/flock.go index d5eaa7ca2..e0082e2f2 100644 --- a/tsdb/fileutil/flock.go +++ b/tsdb/fileutil/flock.go @@ -29,7 +29,7 @@ type Releaser interface { // locking has failed. Neither this function nor the returned Releaser is // goroutine-safe. func Flock(fileName string) (r Releaser, existed bool, err error) { - if err = os.MkdirAll(filepath.Dir(fileName), 0755); err != nil { + if err = os.MkdirAll(filepath.Dir(fileName), 0o755); err != nil { return nil, false, err } diff --git a/tsdb/fileutil/flock_plan9.go b/tsdb/fileutil/flock_plan9.go index 71ed67e8c..3b9550e7f 100644 --- a/tsdb/fileutil/flock_plan9.go +++ b/tsdb/fileutil/flock_plan9.go @@ -24,7 +24,7 @@ func (l *plan9Lock) Release() error { } func newLock(fileName string) (Releaser, error) { - f, err := os.OpenFile(path, os.O_RDWR|os.O_CREATE, os.ModeExclusive|0666) + f, err := os.OpenFile(path, os.O_RDWR|os.O_CREATE, os.ModeExclusive|0o666) if err != nil { return nil, err } diff --git a/tsdb/fileutil/flock_solaris.go b/tsdb/fileutil/flock_solaris.go index f19c184a4..21be384d3 100644 --- a/tsdb/fileutil/flock_solaris.go +++ b/tsdb/fileutil/flock_solaris.go @@ -46,7 +46,7 @@ func (l *unixLock) set(lock bool) error { } func newLock(fileName string) (Releaser, error) { - f, err := os.OpenFile(fileName, os.O_RDWR|os.O_CREATE, 0666) + f, err := os.OpenFile(fileName, os.O_RDWR|os.O_CREATE, 0o666) if err != nil { return nil, err } diff --git a/tsdb/fileutil/flock_unix.go b/tsdb/fileutil/flock_unix.go index c0aeb6948..9637f073b 100644 --- a/tsdb/fileutil/flock_unix.go +++ b/tsdb/fileutil/flock_unix.go @@ -41,7 +41,7 @@ func (l *unixLock) set(lock bool) error { } func newLock(fileName string) (Releaser, error) { - f, err := os.OpenFile(fileName, os.O_RDWR|os.O_CREATE, 0666) + f, err := os.OpenFile(fileName, os.O_RDWR|os.O_CREATE, 0o666) if err != nil { return nil, err } diff --git a/documentation/prometheus-mixin/tools.go b/tsdb/fileutil/mmap_arm64.go similarity index 54% rename from documentation/prometheus-mixin/tools.go rename to tsdb/fileutil/mmap_arm64.go index 1115bb953..4c9534e93 100644 --- a/documentation/prometheus-mixin/tools.go +++ b/tsdb/fileutil/mmap_arm64.go @@ -1,10 +1,9 @@ -// Copyright 2020 The prometheus Authors -// +// Copyright 2018 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // -// http://www.apache.org/licenses/LICENSE-2.0 +// http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, @@ -12,15 +11,9 @@ // See the License for the specific language governing permissions and // limitations under the License. -//go:build tools -// +build tools +//go:build windows +// +build windows -// Package tools tracks dependencies for tools that used in the build process. -// See https://github.com/golang/go/issues/25922 -package tools +package fileutil -import ( - _ "github.com/google/go-jsonnet/cmd/jsonnet" - _ "github.com/google/go-jsonnet/cmd/jsonnetfmt" - _ "github.com/jsonnet-bundler/jsonnet-bundler/cmd/jb" -) +const maxMapSize = 0xFFFFFFFFFFFF // 256TB diff --git a/tsdb/head.go b/tsdb/head.go index 7e95b4bf3..033a7ce26 100644 --- a/tsdb/head.go +++ b/tsdb/head.go @@ -29,9 +29,9 @@ import ( "go.uber.org/atomic" "github.com/prometheus/prometheus/config" + "github.com/prometheus/prometheus/model/exemplar" "github.com/prometheus/prometheus/model/histogram" - "github.com/prometheus/prometheus/pkg/exemplar" - "github.com/prometheus/prometheus/pkg/labels" + "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/tsdb/chunkenc" "github.com/prometheus/prometheus/tsdb/chunks" @@ -81,7 +81,7 @@ type Head struct { series *stripeSeries deletedMtx sync.Mutex - deleted map[uint64]int // Deleted series, and what WAL segment they must be kept until. + deleted map[chunks.HeadSeriesRef]int // Deleted series, and what WAL segment they must be kept until. postings *index.MemPostings // Postings lists for terms. @@ -237,7 +237,7 @@ func (h *Head) resetInMemoryState() error { h.postings = index.NewUnorderedMemPostings() h.tombstones = tombstones.NewMemTombstones() h.iso = newIsolation() - h.deleted = map[uint64]int{} + h.deleted = map[chunks.HeadSeriesRef]int{} h.chunkRange.Store(h.opts.ChunkRange) h.minTime.Store(math.MaxInt64) h.maxTime.Store(math.MinInt64) @@ -496,7 +496,7 @@ func (h *Head) Init(minValidTime int64) error { start := time.Now() snapIdx, snapOffset := -1, 0 - refSeries := make(map[uint64]*memSeries) + refSeries := make(map[chunks.HeadSeriesRef]*memSeries) if h.opts.EnableMemorySnapshotOnShutdown { level.Info(h.logger).Log("msg", "Chunk snapshot is enabled, replaying from the snapshot") @@ -549,7 +549,7 @@ func (h *Head) Init(minValidTime int64) error { h.startWALReplayStatus(startFrom, endAt) - multiRef := map[uint64]uint64{} + multiRef := map[chunks.HeadSeriesRef]chunks.HeadSeriesRef{} if err == nil && startFrom >= snapIdx { sr, err := wal.NewSegmentsReader(dir) if err != nil { @@ -634,9 +634,9 @@ func (h *Head) Init(minValidTime int64) error { return nil } -func (h *Head) loadMmappedChunks(refSeries map[uint64]*memSeries) (map[uint64][]*mmappedChunk, error) { - mmappedChunks := map[uint64][]*mmappedChunk{} - if err := h.chunkDiskMapper.IterateAllChunks(func(seriesRef uint64, chunkRef chunks.ChunkDiskMapperRef, mint, maxt int64, numSamples uint16) error { +func (h *Head) loadMmappedChunks(refSeries map[chunks.HeadSeriesRef]*memSeries) (map[chunks.HeadSeriesRef][]*mmappedChunk, error) { + mmappedChunks := map[chunks.HeadSeriesRef][]*mmappedChunk{} + if err := h.chunkDiskMapper.IterateAllChunks(func(seriesRef chunks.HeadSeriesRef, chunkRef chunks.ChunkDiskMapperRef, mint, maxt int64, numSamples uint16) error { if maxt < h.minValidTime.Load() { return nil } @@ -686,19 +686,19 @@ func (h *Head) loadMmappedChunks(refSeries map[uint64]*memSeries) (map[uint64][] // removeCorruptedMmappedChunks attempts to delete the corrupted mmapped chunks and if it fails, it clears all the previously // loaded mmapped chunks. -func (h *Head) removeCorruptedMmappedChunks(err error, refSeries map[uint64]*memSeries) map[uint64][]*mmappedChunk { +func (h *Head) removeCorruptedMmappedChunks(err error, refSeries map[chunks.HeadSeriesRef]*memSeries) map[chunks.HeadSeriesRef][]*mmappedChunk { level.Info(h.logger).Log("msg", "Deleting mmapped chunk files") if err := h.chunkDiskMapper.DeleteCorrupted(err); err != nil { level.Info(h.logger).Log("msg", "Deletion of mmap chunk files failed, discarding chunk files completely", "err", err) - return map[uint64][]*mmappedChunk{} + return map[chunks.HeadSeriesRef][]*mmappedChunk{} } level.Info(h.logger).Log("msg", "Deletion of mmap chunk files successful, reattempting m-mapping the on-disk chunks") mmappedChunks, err := h.loadMmappedChunks(refSeries) if err != nil { level.Error(h.logger).Log("msg", "Loading on-disk chunks failed, discarding chunk files completely", "err", err) - mmappedChunks = map[uint64][]*mmappedChunk{} + mmappedChunks = map[chunks.HeadSeriesRef][]*mmappedChunk{} } return mmappedChunks @@ -881,7 +881,7 @@ func (h *Head) WaitForPendingReadersInTimeRange(mint, maxt int64) { // new range head and the new querier. This methods helps preventing races with the truncation of in-memory data. // // NOTE: The querier should already be taken before calling this. -func (h *Head) IsQuerierCollidingWithTruncation(querierMint, querierMaxt int64) (shouldClose bool, getNew bool, newMint int64) { +func (h *Head) IsQuerierCollidingWithTruncation(querierMint, querierMaxt int64) (shouldClose, getNew bool, newMint int64) { if !h.memTruncationInProcess.Load() { return false, false, 0 } @@ -956,7 +956,7 @@ func (h *Head) truncateWAL(mint int64) error { return nil } - keep := func(id uint64) bool { + keep := func(id chunks.HeadSeriesRef) bool { if h.series.getByID(id) != nil { return true } @@ -1023,12 +1023,15 @@ func (h *Head) Stats(statsByLabelName string) *Stats { } } +// RangeHead allows querying Head via an IndexReader, ChunkReader and tombstones.Reader +// but only within a restricted range. Used for queries and compactions. type RangeHead struct { head *Head mint, maxt int64 } // NewRangeHead returns a *RangeHead. +// There are no restrictions on mint/maxt. func NewRangeHead(head *Head, mint, maxt int64) *RangeHead { return &RangeHead{ head: head, @@ -1103,7 +1106,7 @@ func (h *Head) Delete(mint, maxt int64, ms ...*labels.Matcher) error { var stones []tombstones.Stone for p.Next() { - series := h.series.getByID(p.At()) + series := h.series.getByID(chunks.HeadSeriesRef(p.At())) series.RLock() t0, t1 := series.minTime(), series.maxTime() @@ -1125,7 +1128,7 @@ func (h *Head) Delete(mint, maxt int64, ms ...*labels.Matcher) error { } } for _, s := range stones { - h.tombstones.AddInterval(s.Ref, s.Intervals[0]) + h.tombstones.AddInterval(storage.SeriesRef(s.Ref), s.Intervals[0]) } return nil @@ -1165,7 +1168,7 @@ func (h *Head) gc() int64 { // that reads the WAL, wouldn't be able to use those // samples since we would have no labels for that ref ID. for ref := range deleted { - h.deleted[ref] = last + h.deleted[chunks.HeadSeriesRef(ref)] = last } h.deletedMtx.Unlock() } @@ -1233,7 +1236,6 @@ func (h *Head) Close() error { errs.Add(h.performChunkSnapshot()) } return errs.Err() - } // String returns an human readable representation of the TSDB head. It's important to @@ -1253,12 +1255,12 @@ func (h *Head) getOrCreate(hash uint64, lset labels.Labels) (*memSeries, bool, e } // Optimistically assume that we are the first one to create the series. - id := h.lastSeriesID.Inc() + id := chunks.HeadSeriesRef(h.lastSeriesID.Inc()) return h.getOrCreateWithID(id, hash, lset) } -func (h *Head) getOrCreateWithID(id, hash uint64, lset labels.Labels) (*memSeries, bool, error) { +func (h *Head) getOrCreateWithID(id chunks.HeadSeriesRef, hash uint64, lset labels.Labels) (*memSeries, bool, error) { s, created, err := h.series.getOrSet(hash, lset, func() *memSeries { return newMemSeries(lset, id, h.chunkRange.Load(), &h.memChunkPool) }) @@ -1272,7 +1274,7 @@ func (h *Head) getOrCreateWithID(id, hash uint64, lset labels.Labels) (*memSerie h.metrics.seriesCreated.Inc() h.numSeries.Inc() - h.postings.Add(id, lset) + h.postings.Add(storage.SeriesRef(id), lset) return s, true, nil } @@ -1321,15 +1323,17 @@ const ( DefaultStripeSize = 1 << 14 ) -// stripeSeries locks modulo ranges of IDs and hashes to reduce lock contention. +// stripeSeries holds series by HeadSeriesRef ("ID") and also by hash of their labels. +// ID-based lookups via (getByID()) are preferred over getByHash() for performance reasons. +// It locks modulo ranges of IDs and hashes to reduce lock contention. // The locks are padded to not be on the same cache line. Filling the padded space // with the maps was profiled to be slower – likely due to the additional pointer // dereferences. type stripeSeries struct { size int - series []map[uint64]*memSeries - hashes []seriesHashmap - locks []stripeLock + series []map[chunks.HeadSeriesRef]*memSeries // Sharded by ref. A series ref is the value of `size` when the series was being newly added. + hashes []seriesHashmap // Sharded by label hash. + locks []stripeLock // Sharded by ref for series access, by label hash for hashes access. seriesLifecycleCallback SeriesLifecycleCallback } @@ -1342,14 +1346,14 @@ type stripeLock struct { func newStripeSeries(stripeSize int, seriesCallback SeriesLifecycleCallback) *stripeSeries { s := &stripeSeries{ size: stripeSize, - series: make([]map[uint64]*memSeries, stripeSize), + series: make([]map[chunks.HeadSeriesRef]*memSeries, stripeSize), hashes: make([]seriesHashmap, stripeSize), locks: make([]stripeLock, stripeSize), seriesLifecycleCallback: seriesCallback, } for i := range s.series { - s.series[i] = map[uint64]*memSeries{} + s.series[i] = map[chunks.HeadSeriesRef]*memSeries{} } for i := range s.hashes { s.hashes[i] = seriesHashmap{} @@ -1359,9 +1363,12 @@ func newStripeSeries(stripeSize int, seriesCallback SeriesLifecycleCallback) *st // gc garbage collects old chunks that are strictly before mint and removes // series entirely that have no chunks left. -func (s *stripeSeries) gc(mint int64) (map[uint64]struct{}, int, int64, int) { +// note: returning map[chunks.HeadSeriesRef]struct{} would be more accurate, +// but the returned map goes into postings.Delete() which expects a map[storage.SeriesRef]struct +// and there's no easy way to cast maps. +func (s *stripeSeries) gc(mint int64) (map[storage.SeriesRef]struct{}, int, int64, int) { var ( - deleted = map[uint64]struct{}{} + deleted = map[storage.SeriesRef]struct{}{} deletedForCallback = []labels.Labels{} rmChunks = 0 actualMint int64 = math.MaxInt64 @@ -1400,7 +1407,7 @@ func (s *stripeSeries) gc(mint int64) (map[uint64]struct{}, int, int64, int) { if series.histogramSeries { sparseHistogramSeriesDeleted++ } - deleted[series.ref] = struct{}{} + deleted[storage.SeriesRef(series.ref)] = struct{}{} s.hashes[i].del(hash, series.lset) delete(s.series[j], series.ref) deletedForCallback = append(deletedForCallback, series.lset) @@ -1426,8 +1433,8 @@ func (s *stripeSeries) gc(mint int64) (map[uint64]struct{}, int, int64, int) { return deleted, rmChunks, actualMint, sparseHistogramSeriesDeleted } -func (s *stripeSeries) getByID(id uint64) *memSeries { - i := id & uint64(s.size-1) +func (s *stripeSeries) getByID(id chunks.HeadSeriesRef) *memSeries { + i := uint64(id) & uint64(s.size-1) s.locks[i].RLock() series := s.series[i][id] @@ -1479,7 +1486,7 @@ func (s *stripeSeries) getOrSet(hash uint64, lset labels.Labels, createSeries fu // as any further calls to this methods would return that series. s.seriesLifecycleCallback.PostCreation(series.lset) - i = series.ref & uint64(s.size-1) + i = uint64(series.ref) & uint64(s.size-1) s.locks[i].Lock() s.series[i][series.ref] = series @@ -1510,20 +1517,38 @@ func (s sample) H() *histogram.Histogram { return s.h } type memSeries struct { sync.RWMutex - ref uint64 - lset labels.Labels - mmappedChunks []*mmappedChunk - mmMaxTime int64 // Max time of any mmapped chunk, only used during WAL replay. - headChunk *memChunk - chunkRange int64 - firstChunkID int + ref chunks.HeadSeriesRef + lset labels.Labels + + // Immutable chunks on disk that have not yet gone into a block, in order of ascending time stamps. + // When compaction runs, chunks get moved into a block and all pointers are shifted like so: + // + // /------- let's say these 2 chunks get stored into a block + // | | + // before compaction: mmappedChunks=[p5,p6,p7,p8,p9] firstChunkID=5 + // after compaction: mmappedChunks=[p7,p8,p9] firstChunkID=7 + // + // pN is the pointer to the mmappedChunk referered to by HeadChunkID=N + mmappedChunks []*mmappedChunk + + mmMaxTime int64 // Max time of any mmapped chunk, only used during WAL replay. + headChunk *memChunk // Most recent chunk in memory that's still being built. + chunkRange int64 + firstChunkID chunks.HeadChunkID // HeadChunkID for mmappedChunks[0] + + nextAt int64 // Timestamp at which to cut the next chunk. + + // We keep the last 4 samples here (in addition to appending them to the chunk) so we don't need coordination between appender and querier. + // Even the most compact encoding of a sample takes 2 bits, so the last byte is not contended. + sampleBuf [4]sample + histogramBuf [4]histogramSample - nextAt int64 // Timestamp at which to cut the next chunk. - sampleBuf [4]sample - histogramBuf [4]histogramSample pendingCommit bool // Whether there are samples waiting to be committed to this series. - app chunkenc.Appender // Current appender for the chunk. + // Current appender for the head chunk. Set when a new head chunk is cut. + // It is nil only if headChunk is nil. E.g. if there was an appender that created a new series, but rolled back the commit + // (the first sample would create a headChunk, hence appender, but rollback skipped it while the Append() call would create a series). + app chunkenc.Appender memChunkPool *sync.Pool @@ -1533,7 +1558,7 @@ type memSeries struct { histogramSeries bool } -func newMemSeries(lset labels.Labels, id uint64, chunkRange int64, memChunkPool *sync.Pool) *memSeries { +func newMemSeries(lset labels.Labels, id chunks.HeadSeriesRef, chunkRange int64, memChunkPool *sync.Pool) *memSeries { s := &memSeries{ lset: lset, ref: id, @@ -1573,7 +1598,7 @@ func (s *memSeries) truncateChunksBefore(mint int64) (removed int) { if s.headChunk != nil && s.headChunk.maxTime < mint { // If head chunk is truncated, we can truncate all mmapped chunks. removed = 1 + len(s.mmappedChunks) - s.firstChunkID += removed + s.firstChunkID += chunks.HeadChunkID(removed) s.headChunk = nil s.mmappedChunks = nil return removed @@ -1586,7 +1611,7 @@ func (s *memSeries) truncateChunksBefore(mint int64) (removed int) { removed = i + 1 } s.mmappedChunks = append(s.mmappedChunks[:0], s.mmappedChunks[removed:]...) - s.firstChunkID += removed + s.firstChunkID += chunks.HeadChunkID(removed) } return removed } @@ -1615,7 +1640,7 @@ func overlapsClosedInterval(mint1, maxt1, mint2, maxt2 int64) bool { return mint1 <= maxt2 && mint2 <= maxt1 } -// mappedChunks describes chunk data on disk that can be mmapped +// mappedChunks describes a head chunk on disk that has been mmapped type mmappedChunk struct { ref chunks.ChunkDiskMapperRef numSamples uint16 diff --git a/tsdb/head_append.go b/tsdb/head_append.go index 14a7db77d..d8834c09f 100644 --- a/tsdb/head_append.go +++ b/tsdb/head_append.go @@ -21,10 +21,10 @@ import ( "github.com/go-kit/log/level" "github.com/pkg/errors" + "github.com/prometheus/prometheus/model/exemplar" "github.com/prometheus/prometheus/model/histogram" - "github.com/prometheus/prometheus/pkg/exemplar" - "github.com/prometheus/prometheus/pkg/labels" - "github.com/prometheus/prometheus/pkg/value" + "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/model/value" "github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/tsdb/chunkenc" "github.com/prometheus/prometheus/tsdb/chunks" @@ -40,7 +40,7 @@ type initAppender struct { var _ storage.GetRef = &initAppender{} -func (a *initAppender) Append(ref uint64, lset labels.Labels, t int64, v float64) (uint64, error) { +func (a *initAppender) Append(ref storage.SeriesRef, lset labels.Labels, t int64, v float64) (storage.SeriesRef, error) { if a.app != nil { return a.app.Append(ref, lset, t, v) } @@ -50,7 +50,7 @@ func (a *initAppender) Append(ref uint64, lset labels.Labels, t int64, v float64 return a.app.Append(ref, lset, t, v) } -func (a *initAppender) AppendExemplar(ref uint64, l labels.Labels, e exemplar.Exemplar) (uint64, error) { +func (a *initAppender) AppendExemplar(ref storage.SeriesRef, l labels.Labels, e exemplar.Exemplar) (storage.SeriesRef, error) { // Check if exemplar storage is enabled. if !a.head.opts.EnableExemplarStorage || a.head.opts.MaxExemplars.Load() <= 0 { return 0, nil @@ -67,7 +67,7 @@ func (a *initAppender) AppendExemplar(ref uint64, l labels.Labels, e exemplar.Ex return a.app.AppendExemplar(ref, l, e) } -func (a *initAppender) AppendHistogram(ref uint64, l labels.Labels, t int64, h *histogram.Histogram) (uint64, error) { +func (a *initAppender) AppendHistogram(ref storage.SeriesRef, l labels.Labels, t int64, h *histogram.Histogram) (storage.SeriesRef, error) { if a.app != nil { return a.app.AppendHistogram(ref, l, t, h) } @@ -88,7 +88,7 @@ func (h *Head) initTime(t int64) { h.maxTime.CAS(math.MinInt64, t) } -func (a *initAppender) GetRef(lset labels.Labels) (uint64, labels.Labels) { +func (a *initAppender) GetRef(lset labels.Labels) (storage.SeriesRef, labels.Labels) { if g, ok := a.app.(storage.GetRef); ok { return g.GetRef(lset) } @@ -126,7 +126,7 @@ func (h *Head) Appender(_ context.Context) storage.Appender { } func (h *Head) appender() *headAppender { - appendID, cleanupAppendIDsBelow := h.iso.newAppendID() + appendID, cleanupAppendIDsBelow := h.iso.newAppendID() // Every appender gets an ID that is cleared upon commit/rollback. // Allocate the exemplars buffer only if exemplars are enabled. var exemplarsBuf []exemplarWithSeriesRef @@ -153,6 +153,16 @@ func (h *Head) appendableMinValidTime() int64 { return max(h.minValidTime.Load(), h.MaxTime()-h.chunkRange.Load()/2) } +// AppendableMinValidTime returns the minimum valid time for samples to be appended to the Head. +// Returns false if Head hasn't been initialized yet and the minimum time isn't known yet. +func (h *Head) AppendableMinValidTime() (int64, bool) { + if h.MinTime() == math.MaxInt64 { + return 0, false + } + + return h.appendableMinValidTime(), true +} + func max(a, b int64) int64 { if a > b { return a @@ -217,7 +227,7 @@ func (h *Head) putBytesBuffer(b []byte) { } type exemplarWithSeriesRef struct { - ref uint64 + ref storage.SeriesRef exemplar exemplar.Exemplar } @@ -226,24 +236,24 @@ type headAppender struct { minValidTime int64 // No samples below this timestamp are allowed. mint, maxt int64 - series []record.RefSeries - samples []record.RefSample - exemplars []exemplarWithSeriesRef - sampleSeries []*memSeries - histograms []record.RefHistogram - histogramSeries []*memSeries + series []record.RefSeries // New series held by this appender. + samples []record.RefSample // New float samples held by this appender. + exemplars []exemplarWithSeriesRef // New exemplars held by this appender. + sampleSeries []*memSeries // Float series corresponding to the samples held by this appender (using corresponding slice indices - same series may appear more than once). + histograms []record.RefHistogram // New histogram samples held by this appender. + histogramSeries []*memSeries // Histogram series corresponding to the samples held by this appender (using corresponding slice indices - same series may appear more than once). appendID, cleanupAppendIDsBelow uint64 closed bool } -func (a *headAppender) Append(ref uint64, lset labels.Labels, t int64, v float64) (uint64, error) { +func (a *headAppender) Append(ref storage.SeriesRef, lset labels.Labels, t int64, v float64) (storage.SeriesRef, error) { if t < a.minValidTime { a.head.metrics.outOfBoundSamples.Inc() return 0, storage.ErrOutOfBounds } - s := a.head.series.getByID(ref) + s := a.head.series.getByID(chunks.HeadSeriesRef(ref)) if s == nil { // Ensure no empty labels have gotten through. lset = lset.WithoutEmpty() @@ -297,7 +307,7 @@ func (a *headAppender) Append(ref uint64, lset labels.Labels, t int64, v float64 V: v, }) a.sampleSeries = append(a.sampleSeries, s) - return s.ref, nil + return storage.SeriesRef(s.ref), nil } // appendable checks whether the given sample is valid for appending to the series. @@ -345,14 +355,22 @@ func (s *memSeries) appendableHistogram(t int64, h *histogram.Histogram) error { // AppendExemplar for headAppender assumes the series ref already exists, and so it doesn't // use getOrCreate or make any of the lset sanity checks that Append does. -func (a *headAppender) AppendExemplar(ref uint64, _ labels.Labels, e exemplar.Exemplar) (uint64, error) { +func (a *headAppender) AppendExemplar(ref storage.SeriesRef, lset labels.Labels, e exemplar.Exemplar) (storage.SeriesRef, error) { // Check if exemplar storage is enabled. if !a.head.opts.EnableExemplarStorage || a.head.opts.MaxExemplars.Load() <= 0 { return 0, nil } - s := a.head.series.getByID(ref) + + // Get Series + s := a.head.series.getByID(chunks.HeadSeriesRef(ref)) if s == nil { - return 0, fmt.Errorf("unknown series ref. when trying to add exemplar: %d", ref) + s = a.head.series.getByHash(lset.Hash(), lset) + if s != nil { + ref = storage.SeriesRef(s.ref) + } + } + if s == nil { + return 0, fmt.Errorf("unknown HeadSeriesRef when trying to add exemplar: %d", ref) } // Ensure no empty labels have gotten through. @@ -369,16 +387,16 @@ func (a *headAppender) AppendExemplar(ref uint64, _ labels.Labels, e exemplar.Ex a.exemplars = append(a.exemplars, exemplarWithSeriesRef{ref, e}) - return s.ref, nil + return storage.SeriesRef(s.ref), nil } -func (a *headAppender) AppendHistogram(ref uint64, lset labels.Labels, t int64, h *histogram.Histogram) (uint64, error) { +func (a *headAppender) AppendHistogram(ref storage.SeriesRef, lset labels.Labels, t int64, h *histogram.Histogram) (storage.SeriesRef, error) { if t < a.minValidTime { a.head.metrics.outOfBoundSamples.Inc() return 0, storage.ErrOutOfBounds } - s := a.head.series.getByID(ref) + s := a.head.series.getByID(chunks.HeadSeriesRef(ref)) if s == nil { // Ensure no empty labels have gotten through. lset = lset.WithoutEmpty() @@ -430,20 +448,21 @@ func (a *headAppender) AppendHistogram(ref uint64, lset labels.Labels, t int64, H: h, }) a.histogramSeries = append(a.histogramSeries, s) - return s.ref, nil + return storage.SeriesRef(s.ref), nil } var _ storage.GetRef = &headAppender{} -func (a *headAppender) GetRef(lset labels.Labels) (uint64, labels.Labels) { +func (a *headAppender) GetRef(lset labels.Labels) (storage.SeriesRef, labels.Labels) { s := a.head.series.getByHash(lset.Hash(), lset) if s == nil { return 0, nil } // returned labels must be suitable to pass to Append() - return s.ref, s.lset + return storage.SeriesRef(s.ref), s.lset } +// log writes all headAppender's data to the WAL. func (a *headAppender) log() error { if a.head.wal == nil { return nil @@ -493,7 +512,7 @@ func exemplarsForEncoding(es []exemplarWithSeriesRef) []record.RefExemplar { ret := make([]record.RefExemplar, 0, len(es)) for _, e := range es { ret = append(ret, record.RefExemplar{ - Ref: e.ref, + Ref: chunks.HeadSeriesRef(e.ref), T: e.exemplar.Ts, V: e.exemplar.Value, Labels: e.exemplar.Labels, @@ -502,6 +521,7 @@ func exemplarsForEncoding(es []exemplarWithSeriesRef) []record.RefExemplar { return ret } +// Commit writes to the WAL and adds the data to the Head. func (a *headAppender) Commit() (err error) { if a.closed { return ErrAppenderClosed @@ -515,7 +535,7 @@ func (a *headAppender) Commit() (err error) { // No errors logging to WAL, so pass the exemplars along to the in memory storage. for _, e := range a.exemplars { - s := a.head.series.getByID(e.ref) + s := a.head.series.getByID(chunks.HeadSeriesRef(e.ref)) // We don't instrument exemplar appends here, all is instrumented by storage. if err := a.head.exemplars.AddExemplar(s.lset, e.exemplar); err != nil { if err == storage.ErrOutOfOrderExemplar { @@ -687,7 +707,7 @@ func (s *memSeries) appendPreprocessor(t int64, e chunkenc.Encoding, chunkDiskMa if c == nil { if len(s.mmappedChunks) > 0 && s.mmappedChunks[len(s.mmappedChunks)-1].maxTime >= t { - // Out of order sample. Sample timestamp is already in the mmaped chunks, so ignore it. + // Out of order sample. Sample timestamp is already in the mmapped chunks, so ignore it. return c, false, false } // There is no chunk in this series yet, create the first chunk for the sample. @@ -785,6 +805,7 @@ func (s *memSeries) mmapCurrentHeadChunk(chunkDiskMapper *chunks.ChunkDiskMapper }) } +// Rollback removes the samples and exemplars from headAppender and writes any series to WAL. func (a *headAppender) Rollback() (err error) { if a.closed { return ErrAppenderClosed diff --git a/tsdb/head_bench_test.go b/tsdb/head_bench_test.go index 2f9100d13..a62429137 100644 --- a/tsdb/head_bench_test.go +++ b/tsdb/head_bench_test.go @@ -23,7 +23,7 @@ import ( "github.com/stretchr/testify/require" "go.uber.org/atomic" - "github.com/prometheus/prometheus/pkg/labels" + "github.com/prometheus/prometheus/model/labels" ) func BenchmarkHeadStripeSeriesCreate(b *testing.B) { diff --git a/tsdb/head_read.go b/tsdb/head_read.go index 48a294ade..340a49ddf 100644 --- a/tsdb/head_read.go +++ b/tsdb/head_read.go @@ -22,7 +22,7 @@ import ( "github.com/pkg/errors" "github.com/prometheus/prometheus/model/histogram" - "github.com/prometheus/prometheus/pkg/labels" + "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/tsdb/chunkenc" "github.com/prometheus/prometheus/tsdb/chunks" @@ -116,7 +116,7 @@ func (h *headIndexReader) SortedPostings(p index.Postings) index.Postings { // Fetch all the series only once. for p.Next() { - s := h.head.series.getByID(p.At()) + s := h.head.series.getByID(chunks.HeadSeriesRef(p.At())) if s == nil { level.Debug(h.head.logger).Log("msg", "Looked up series not found") } else { @@ -132,16 +132,16 @@ func (h *headIndexReader) SortedPostings(p index.Postings) index.Postings { }) // Convert back to list. - ep := make([]uint64, 0, len(series)) + ep := make([]storage.SeriesRef, 0, len(series)) for _, p := range series { - ep = append(ep, p.ref) + ep = append(ep, storage.SeriesRef(p.ref)) } return index.NewListPostings(ep) } // Series returns the series for the given reference. -func (h *headIndexReader) Series(ref uint64, lbls *labels.Labels, chks *[]chunks.Meta) error { - s := h.head.series.getByID(ref) +func (h *headIndexReader) Series(ref storage.SeriesRef, lbls *labels.Labels, chks *[]chunks.Meta) error { + s := h.head.series.getByID(chunks.HeadSeriesRef(ref)) if s == nil { h.head.metrics.seriesNotFound.Inc() @@ -162,27 +162,28 @@ func (h *headIndexReader) Series(ref uint64, lbls *labels.Labels, chks *[]chunks *chks = append(*chks, chunks.Meta{ MinTime: c.minTime, MaxTime: c.maxTime, - Ref: packChunkID(s.ref, uint64(s.chunkID(i))), + Ref: chunks.ChunkRef(chunks.NewHeadChunkRef(s.ref, s.headChunkID(i))), }) } if s.headChunk != nil && s.headChunk.OverlapsClosedInterval(h.mint, h.maxt) { *chks = append(*chks, chunks.Meta{ MinTime: s.headChunk.minTime, MaxTime: math.MaxInt64, // Set the head chunks as open (being appended to). - Ref: packChunkID(s.ref, uint64(s.chunkID(len(s.mmappedChunks)))), + Ref: chunks.ChunkRef(chunks.NewHeadChunkRef(s.ref, s.headChunkID(len(s.mmappedChunks)))), }) } return nil } -func (s *memSeries) chunkID(pos int) int { - return pos + s.firstChunkID +// headChunkID returns the HeadChunkID corresponding to .mmappedChunks[pos] +func (s *memSeries) headChunkID(pos int) chunks.HeadChunkID { + return chunks.HeadChunkID(pos) + s.firstChunkID } // LabelValueFor returns label value for the given label name in the series referred to by ID. -func (h *headIndexReader) LabelValueFor(id uint64, label string) (string, error) { - memSeries := h.head.series.getByID(id) +func (h *headIndexReader) LabelValueFor(id storage.SeriesRef, label string) (string, error) { + memSeries := h.head.series.getByID(chunks.HeadSeriesRef(id)) if memSeries == nil { return "", storage.ErrNotFound } @@ -197,10 +198,10 @@ func (h *headIndexReader) LabelValueFor(id uint64, label string) (string, error) // LabelNamesFor returns all the label names for the series referred to by IDs. // The names returned are sorted. -func (h *headIndexReader) LabelNamesFor(ids ...uint64) ([]string, error) { +func (h *headIndexReader) LabelNamesFor(ids ...storage.SeriesRef) ([]string, error) { namesMap := make(map[string]struct{}) for _, id := range ids { - memSeries := h.head.series.getByID(id) + memSeries := h.head.series.getByID(chunks.HeadSeriesRef(id)) if memSeries == nil { return nil, storage.ErrNotFound } @@ -249,25 +250,9 @@ func (h *headChunkReader) Close() error { return nil } -// packChunkID packs a seriesID and a chunkID within it into a global 8 byte ID. -// It panicks if the seriesID exceeds 5 bytes or the chunk ID 3 bytes. -func packChunkID(seriesID, chunkID uint64) uint64 { - if seriesID > (1<<40)-1 { - panic("series ID exceeds 5 bytes") - } - if chunkID > (1<<24)-1 { - panic("chunk ID exceeds 3 bytes") - } - return (seriesID << 24) | chunkID -} - -func unpackChunkID(id uint64) (seriesID, chunkID uint64) { - return id >> 24, (id << 40) >> 40 -} - // Chunk returns the chunk for the reference number. -func (h *headChunkReader) Chunk(ref uint64) (chunkenc.Chunk, error) { - sid, cid := unpackChunkID(ref) +func (h *headChunkReader) Chunk(ref chunks.ChunkRef) (chunkenc.Chunk, error) { + sid, cid := chunks.HeadChunkRef(ref).Unpack() s := h.head.series.getByID(sid) // This means that the series has been garbage collected. @@ -276,7 +261,7 @@ func (h *headChunkReader) Chunk(ref uint64) (chunkenc.Chunk, error) { } s.Lock() - c, garbageCollect, err := s.chunk(int(cid), h.head.chunkDiskMapper) + c, garbageCollect, err := s.chunk(cid, h.head.chunkDiskMapper) if err != nil { s.Unlock() return nil, err @@ -299,21 +284,21 @@ func (h *headChunkReader) Chunk(ref uint64) (chunkenc.Chunk, error) { return &safeChunk{ Chunk: c.chunk, s: s, - cid: int(cid), + cid: cid, isoState: h.isoState, chunkDiskMapper: h.head.chunkDiskMapper, }, nil } -// chunk returns the chunk for the chunk id from memory or by m-mapping it from the disk. +// chunk returns the chunk for the HeadChunkID from memory or by m-mapping it from the disk. // If garbageCollect is true, it means that the returned *memChunk -// (and not the chunkenc.Chunk inside it) can be garbage collected after it's usage. -func (s *memSeries) chunk(id int, chunkDiskMapper *chunks.ChunkDiskMapper) (chunk *memChunk, garbageCollect bool, err error) { +// (and not the chunkenc.Chunk inside it) can be garbage collected after its usage. +func (s *memSeries) chunk(id chunks.HeadChunkID, chunkDiskMapper *chunks.ChunkDiskMapper) (chunk *memChunk, garbageCollect bool, err error) { // ix represents the index of chunk in the s.mmappedChunks slice. The chunk id's are // incremented by 1 when new chunk is created, hence (id - firstChunkID) gives the slice index. // The max index for the s.mmappedChunks slice can be len(s.mmappedChunks)-1, hence if the ix // is len(s.mmappedChunks), it represents the next chunk, which is the head chunk. - ix := id - s.firstChunkID + ix := int(id) - int(s.firstChunkID) if ix < 0 || ix > len(s.mmappedChunks) { return nil, false, storage.ErrNotFound } @@ -340,7 +325,7 @@ func (s *memSeries) chunk(id int, chunkDiskMapper *chunks.ChunkDiskMapper) (chun type safeChunk struct { chunkenc.Chunk s *memSeries - cid int + cid chunks.HeadChunkID isoState *isolationState chunkDiskMapper *chunks.ChunkDiskMapper } @@ -352,9 +337,9 @@ func (c *safeChunk) Iterator(reuseIter chunkenc.Iterator) chunkenc.Iterator { return it } -// iterator returns a chunk iterator. +// iterator returns a chunk iterator for the requested chunkID, or a NopIterator if the requested ID is out of range. // It is unsafe to call this concurrently with s.append(...) without holding the series lock. -func (s *memSeries) iterator(id int, isoState *isolationState, chunkDiskMapper *chunks.ChunkDiskMapper, it chunkenc.Iterator) chunkenc.Iterator { +func (s *memSeries) iterator(id chunks.HeadChunkID, isoState *isolationState, chunkDiskMapper *chunks.ChunkDiskMapper, it chunkenc.Iterator) chunkenc.Iterator { c, garbageCollect, err := s.chunk(id, chunkDiskMapper) // TODO(fabxc): Work around! An error will be returns when a querier have retrieved a pointer to a // series's chunk, which got then garbage collected before it got @@ -372,7 +357,7 @@ func (s *memSeries) iterator(id int, isoState *isolationState, chunkDiskMapper * } }() - ix := id - s.firstChunkID + ix := int(id) - int(s.firstChunkID) numSamples := c.chunk.NumSamples() stopAfter := numSamples @@ -419,7 +404,7 @@ func (s *memSeries) iterator(id int, isoState *isolationState, chunkDiskMapper * return chunkenc.NewNopIterator() } - if id-s.firstChunkID < len(s.mmappedChunks) { + if int(id)-int(s.firstChunkID) < len(s.mmappedChunks) { if stopAfter == numSamples { return c.chunk.Iterator(it) } @@ -460,6 +445,8 @@ func (s *memSeries) iterator(id int, isoState *isolationState, chunkDiskMapper * } } +// memSafeIterator returns values from the wrapped stopIterator +// except the last 4, which come from buf. type memSafeIterator struct { stopIterator @@ -527,6 +514,8 @@ func (it *memSafeIterator) AtHistogram() (int64, *histogram.Histogram) { return s.t, s.h } +// stopIterator wraps an Iterator, but only returns the first +// stopAfter values, if initialized with i=-1. type stopIterator struct { chunkenc.Iterator diff --git a/tsdb/head_test.go b/tsdb/head_test.go index a8cb10dc4..2ef846556 100644 --- a/tsdb/head_test.go +++ b/tsdb/head_test.go @@ -37,10 +37,10 @@ import ( "go.uber.org/atomic" "github.com/prometheus/prometheus/config" + "github.com/prometheus/prometheus/model/exemplar" "github.com/prometheus/prometheus/model/histogram" - "github.com/prometheus/prometheus/pkg/exemplar" - "github.com/prometheus/prometheus/pkg/labels" - "github.com/prometheus/prometheus/pkg/value" + "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/model/value" "github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/tsdb/chunkenc" "github.com/prometheus/prometheus/tsdb/chunks" @@ -65,7 +65,7 @@ func newTestHead(t testing.TB, chunkRange int64, compressWAL bool) (*Head, *wal. h, err := NewHead(nil, nil, wlog, opts, nil) require.NoError(t, err) - require.NoError(t, h.chunkDiskMapper.IterateAllChunks(func(_ uint64, _ chunks.ChunkDiskMapperRef, _, _ int64, _ uint16) error { return nil })) + require.NoError(t, h.chunkDiskMapper.IterateAllChunks(func(_ chunks.HeadSeriesRef, _ chunks.ChunkDiskMapperRef, _, _ int64, _ uint16) error { return nil })) t.Cleanup(func() { require.NoError(t, os.RemoveAll(dir)) @@ -202,7 +202,7 @@ func BenchmarkLoadWAL(b *testing.B) { for j := 1; len(lbls) < labelsPerSeries; j++ { lbls[defaultLabelName+strconv.Itoa(j)] = defaultLabelValue + strconv.Itoa(j) } - refSeries = append(refSeries, record.RefSeries{Ref: uint64(i) * 101, Labels: labels.FromMap(lbls)}) + refSeries = append(refSeries, record.RefSeries{Ref: chunks.HeadSeriesRef(i) * 101, Labels: labels.FromMap(lbls)}) } populateTestWAL(b, w, []interface{}{refSeries}) } @@ -214,7 +214,7 @@ func BenchmarkLoadWAL(b *testing.B) { refSamples = refSamples[:0] for k := j * c.seriesPerBatch; k < (j+1)*c.seriesPerBatch; k++ { refSamples = append(refSamples, record.RefSample{ - Ref: uint64(k) * 101, + Ref: chunks.HeadSeriesRef(k) * 101, T: int64(i) * 10, V: float64(i) * 100, }) @@ -229,7 +229,7 @@ func BenchmarkLoadWAL(b *testing.B) { require.NoError(b, err) for k := 0; k < c.batches*c.seriesPerBatch; k++ { // Create one mmapped chunk per series, with one sample at the given time. - s := newMemSeries(labels.Labels{}, uint64(k)*101, c.mmappedChunkT, nil) + s := newMemSeries(labels.Labels{}, chunks.HeadSeriesRef(k)*101, c.mmappedChunkT, nil) s.append(c.mmappedChunkT, 42, 0, chunkDiskMapper) s.mmapCurrentHeadChunk(chunkDiskMapper) } @@ -243,7 +243,7 @@ func BenchmarkLoadWAL(b *testing.B) { refExemplars = refExemplars[:0] for k := j * c.seriesPerBatch; k < (j+1)*c.seriesPerBatch; k++ { refExemplars = append(refExemplars, record.RefExemplar{ - Ref: uint64(k) * 101, + Ref: chunks.HeadSeriesRef(k) * 101, T: int64(i) * 10, V: float64(i) * 100, Labels: labels.FromStrings("traceID", fmt.Sprintf("trace-%d", i)), @@ -499,10 +499,10 @@ func TestHead_Truncate(t *testing.T) { postingsC1, _ := index.ExpandPostings(h.postings.Get("c", "1")) postingsAll, _ := index.ExpandPostings(h.postings.Get("", "")) - require.Equal(t, []uint64{s1.ref}, postingsA1) - require.Equal(t, []uint64{s2.ref}, postingsA2) - require.Equal(t, []uint64{s1.ref, s2.ref}, postingsB1) - require.Equal(t, []uint64{s1.ref, s2.ref}, postingsAll) + require.Equal(t, []storage.SeriesRef{storage.SeriesRef(s1.ref)}, postingsA1) + require.Equal(t, []storage.SeriesRef{storage.SeriesRef(s2.ref)}, postingsA2) + require.Equal(t, []storage.SeriesRef{storage.SeriesRef(s1.ref), storage.SeriesRef(s2.ref)}, postingsB1) + require.Equal(t, []storage.SeriesRef{storage.SeriesRef(s1.ref), storage.SeriesRef(s2.ref)}, postingsAll) require.Nil(t, postingsB2) require.Nil(t, postingsC1) @@ -563,7 +563,7 @@ func TestMemSeries_truncateChunks(t *testing.T) { // Check that truncate removes half of the chunks and afterwards // that the ID of the last chunk still gives us the same chunk afterwards. countBefore := len(s.mmappedChunks) + 1 // +1 for the head chunk. - lastID := s.chunkID(countBefore - 1) + lastID := s.headChunkID(countBefore - 1) lastChunk, _, err := s.chunk(lastID, chunkDiskMapper) require.NoError(t, err) require.NotNil(t, lastChunk) @@ -584,11 +584,11 @@ func TestMemSeries_truncateChunks(t *testing.T) { // Validate that the series' sample buffer is applied correctly to the last chunk // after truncation. - it1 := s.iterator(s.chunkID(len(s.mmappedChunks)), nil, chunkDiskMapper, nil) + it1 := s.iterator(s.headChunkID(len(s.mmappedChunks)), nil, chunkDiskMapper, nil) _, ok := it1.(*memSafeIterator) require.True(t, ok) - it2 := s.iterator(s.chunkID(len(s.mmappedChunks)-1), nil, chunkDiskMapper, nil) + it2 := s.iterator(s.headChunkID(len(s.mmappedChunks)-1), nil, chunkDiskMapper, nil) _, ok = it2.(*memSafeIterator) require.False(t, ok, "non-last chunk incorrectly wrapped with sample buffer") } @@ -844,7 +844,6 @@ func TestDeletedSamplesAndSeriesStillInWALAfterCheckpoint(t *testing.T) { require.Equal(t, 1, series) require.Equal(t, 9999, samples) require.Equal(t, 1, stones) - } func TestDelete_e2e(t *testing.T) { @@ -1399,7 +1398,7 @@ func TestWalRepair_DecodingError(t *testing.T) { err = errors.Cause(initErr) // So that we can pick up errors even if wrapped. _, corrErr := err.(*wal.CorruptionErr) require.True(t, corrErr, "reading the wal didn't return corruption error") - require.NoError(t, w.Close()) + require.NoError(t, h.Close()) // Head will close the wal as well. } // Open the db to trigger a repair. @@ -1474,7 +1473,7 @@ func TestHeadReadWriterRepair(t *testing.T) { require.Equal(t, 7, len(files)) // Corrupt the 4th file by writing a random byte to series ref. - f, err := os.OpenFile(filepath.Join(mmappedChunksDir(dir), files[3].Name()), os.O_WRONLY, 0666) + f, err := os.OpenFile(filepath.Join(mmappedChunksDir(dir), files[3].Name()), os.O_WRONLY, 0o666) require.NoError(t, err) n, err := f.WriteAt([]byte{67, 88}, chunks.HeadChunkFileHeaderSize+2) require.NoError(t, err) @@ -1948,7 +1947,8 @@ func TestHeadLabelNamesValuesWithMinMaxRange(t *testing.T) { lastSeriesTimestamp int64 = 300 ) var ( - seriesTimestamps = []int64{firstSeriesTimestamp, + seriesTimestamps = []int64{ + firstSeriesTimestamp, secondSeriesTimestamp, lastSeriesTimestamp, } @@ -1965,7 +1965,7 @@ func TestHeadLabelNamesValuesWithMinMaxRange(t *testing.T) { require.Equal(t, head.MinTime(), firstSeriesTimestamp) require.Equal(t, head.MaxTime(), lastSeriesTimestamp) - var testCases = []struct { + testCases := []struct { name string mint int64 maxt int64 @@ -2009,7 +2009,7 @@ func TestHeadLabelValuesWithMatchers(t *testing.T) { } require.NoError(t, app.Commit()) - var testCases = []struct { + testCases := []struct { name string labelName string matchers []*labels.Matcher @@ -2264,7 +2264,7 @@ func TestMemSafeIteratorSeekIntoBuffer(t *testing.T) { require.True(t, ok, "sample append failed") } - it := s.iterator(s.chunkID(len(s.mmappedChunks)), nil, chunkDiskMapper, nil) + it := s.iterator(s.headChunkID(len(s.mmappedChunks)), nil, chunkDiskMapper, nil) _, ok := it.(*memSafeIterator) require.True(t, ok) @@ -2319,7 +2319,7 @@ func TestChunkNotFoundHeadGCRace(t *testing.T) { var ( app = db.Appender(context.Background()) - ref = uint64(0) + ref = storage.SeriesRef(0) mint, maxt = int64(0), int64(0) err error ) @@ -2384,7 +2384,7 @@ func TestDataMissingOnQueryDuringCompaction(t *testing.T) { var ( app = db.Appender(context.Background()) - ref = uint64(0) + ref = storage.SeriesRef(0) mint, maxt = int64(0), int64(0) err error ) @@ -2431,7 +2431,7 @@ func TestIsQuerierCollidingWithTruncation(t *testing.T) { var ( app = db.Appender(context.Background()) - ref = uint64(0) + ref = storage.SeriesRef(0) err error ) @@ -2478,7 +2478,7 @@ func TestWaitForPendingReadersInTimeRange(t *testing.T) { var ( app = db.Appender(context.Background()) - ref = uint64(0) + ref = storage.SeriesRef(0) err error ) @@ -2644,10 +2644,10 @@ func TestChunkSnapshot(t *testing.T) { numSeries := 10 expSeries := make(map[string][]tsdbutil.Sample) - expTombstones := make(map[uint64]tombstones.Intervals) + expTombstones := make(map[storage.SeriesRef]tombstones.Intervals) expExemplars := make([]ex, 0) - addExemplar := func(app storage.Appender, ref uint64, lbls labels.Labels, ts int64) { + addExemplar := func(app storage.Appender, ref storage.SeriesRef, lbls labels.Labels, ts int64) { e := ex{ seriesLabels: lbls, e: exemplar.Exemplar{ @@ -2670,8 +2670,8 @@ func TestChunkSnapshot(t *testing.T) { checkTombstones := func() { tr, err := head.Tombstones() require.NoError(t, err) - actTombstones := make(map[uint64]tombstones.Intervals) - require.NoError(t, tr.Iter(func(ref uint64, itvs tombstones.Intervals) error { + actTombstones := make(map[storage.SeriesRef]tombstones.Intervals) + require.NoError(t, tr.Iter(func(ref storage.SeriesRef, itvs tombstones.Intervals) error { for _, itv := range itvs { actTombstones[ref].Add(itv) } @@ -2745,7 +2745,7 @@ func TestChunkSnapshot(t *testing.T) { // Add some tombstones. var enc record.Encoder for i := 1; i <= numSeries; i++ { - ref := uint64(i) + ref := storage.SeriesRef(i) itvs := tombstones.Intervals{ {Mint: 1234, Maxt: 2345}, {Mint: 3456, Maxt: 4567}, @@ -2805,7 +2805,7 @@ func TestChunkSnapshot(t *testing.T) { // Add more tombstones. var enc record.Encoder for i := 1; i <= numSeries; i++ { - ref := uint64(i) + ref := storage.SeriesRef(i) itvs := tombstones.Intervals{ {Mint: 12345, Maxt: 23456}, {Mint: 34567, Maxt: 45678}, @@ -2868,7 +2868,6 @@ func TestChunkSnapshot(t *testing.T) { require.Equal(t, 0.0, prom_testutil.ToFloat64(head.metrics.snapshotReplayErrorTotal)) } - } func TestSnapshotError(t *testing.T) { diff --git a/tsdb/head_wal.go b/tsdb/head_wal.go index e7e18fdb2..c2b0308a5 100644 --- a/tsdb/head_wal.go +++ b/tsdb/head_wal.go @@ -25,24 +25,24 @@ import ( "sync" "time" - "github.com/prometheus/prometheus/pkg/labels" - "github.com/prometheus/prometheus/tsdb/chunkenc" - "github.com/prometheus/prometheus/tsdb/encoding" - tsdb_errors "github.com/prometheus/prometheus/tsdb/errors" - "github.com/prometheus/prometheus/tsdb/fileutil" - "github.com/go-kit/log/level" "github.com/pkg/errors" "go.uber.org/atomic" - "github.com/prometheus/prometheus/pkg/exemplar" + "github.com/prometheus/prometheus/model/exemplar" + "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/storage" + "github.com/prometheus/prometheus/tsdb/chunkenc" + "github.com/prometheus/prometheus/tsdb/chunks" + "github.com/prometheus/prometheus/tsdb/encoding" + tsdb_errors "github.com/prometheus/prometheus/tsdb/errors" + "github.com/prometheus/prometheus/tsdb/fileutil" "github.com/prometheus/prometheus/tsdb/record" "github.com/prometheus/prometheus/tsdb/tombstones" "github.com/prometheus/prometheus/tsdb/wal" ) -func (h *Head) loadWAL(r *wal.Reader, multiRef map[uint64]uint64, mmappedChunks map[uint64][]*mmappedChunk) (err error) { +func (h *Head) loadWAL(r *wal.Reader, multiRef map[chunks.HeadSeriesRef]chunks.HeadSeriesRef, mmappedChunks map[chunks.HeadSeriesRef][]*mmappedChunk) (err error) { // Track number of samples that referenced a series we don't know about // for error reporting. var unknownRefs atomic.Uint64 @@ -269,8 +269,8 @@ Outer: break Outer } - if h.lastSeriesID.Load() < walSeries.Ref { - h.lastSeriesID.Store(walSeries.Ref) + if chunks.HeadSeriesRef(h.lastSeriesID.Load()) < walSeries.Ref { + h.lastSeriesID.Store(uint64(walSeries.Ref)) } mmc := mmappedChunks[walSeries.Ref] @@ -287,7 +287,7 @@ Outer: multiRef[walSeries.Ref] = mSeries.ref - idx := mSeries.ref % uint64(n) + idx := uint64(mSeries.ref) % uint64(n) // It is possible that some old sample is being processed in processWALSamples that // could cause race below. So we wait for the goroutine to empty input the buffer and finish // processing all old samples after emptying the buffer. @@ -359,7 +359,7 @@ Outer: if r, ok := multiRef[sam.Ref]; ok { sam.Ref = r } - mod := sam.Ref % uint64(n) + mod := uint64(sam.Ref) % uint64(n) shards[mod] = append(shards[mod], sam) } for i := 0; i < n; i++ { @@ -375,11 +375,11 @@ Outer: if itv.Maxt < h.minValidTime.Load() { continue } - if m := h.series.getByID(s.Ref); m == nil { + if m := h.series.getByID(chunks.HeadSeriesRef(s.Ref)); m == nil { unknownRefs.Inc() continue } - h.tombstones.AddInterval(s.Ref, itv) + h.tombstones.AddInterval(storage.SeriesRef(s.Ref), itv) } } //nolint:staticcheck // Ignore SA6002 relax staticcheck verification. @@ -498,7 +498,7 @@ const ( ) type chunkSnapshotRecord struct { - ref uint64 + ref chunks.HeadSeriesRef lset labels.Labels chunkRange int64 mc *memChunk @@ -509,7 +509,7 @@ func (s *memSeries) encodeToSnapshotRecord(b []byte) []byte { buf := encoding.Encbuf{B: b} buf.PutByte(chunkSnapshotRecordTypeSeries) - buf.PutBE64(s.ref) + buf.PutBE64(uint64(s.ref)) buf.PutUvarint(len(s.lset)) for _, l := range s.lset { buf.PutUvarintStr(l.Name) @@ -544,7 +544,7 @@ func decodeSeriesFromChunkSnapshot(b []byte) (csr chunkSnapshotRecord, err error return csr, errors.Errorf("invalid record type %x", flag) } - csr.ref = dec.Be64() + csr.ref = chunks.HeadSeriesRef(dec.Be64()) // The label set written to the disk is already sorted. csr.lset = make(labels.Labels, dec.Uvarint()) @@ -655,7 +655,7 @@ func (h *Head) ChunkSnapshot() (*ChunkSnapshotStats, error) { cpdirtmp := cpdir + ".tmp" stats.Dir = cpdir - if err := os.MkdirAll(cpdirtmp, 0777); err != nil { + if err := os.MkdirAll(cpdirtmp, 0o777); err != nil { return stats, errors.Wrap(err, "create chunk snapshot dir") } cp, err := wal.New(nil, nil, cpdirtmp, h.wal.CompressionEnabled()) @@ -886,7 +886,7 @@ func DeleteChunkSnapshots(dir string, maxIndex, maxOffset int) error { // loadChunkSnapshot replays the chunk snapshot and restores the Head state from it. If there was any error returned, // it is the responsibility of the caller to clear the contents of the Head. -func (h *Head) loadChunkSnapshot() (int, int, map[uint64]*memSeries, error) { +func (h *Head) loadChunkSnapshot() (int, int, map[chunks.HeadSeriesRef]*memSeries, error) { dir, snapIdx, snapOffset, err := LastChunkSnapshot(h.opts.ChunkDirRoot) if err != nil { if err == record.ErrNotFound { @@ -912,9 +912,9 @@ func (h *Head) loadChunkSnapshot() (int, int, map[uint64]*memSeries, error) { n = runtime.GOMAXPROCS(0) wg sync.WaitGroup recordChan = make(chan chunkSnapshotRecord, 5*n) - shardedRefSeries = make([]map[uint64]*memSeries, n) + shardedRefSeries = make([]map[chunks.HeadSeriesRef]*memSeries, n) errChan = make(chan error, n) - refSeries map[uint64]*memSeries + refSeries map[chunks.HeadSeriesRef]*memSeries exemplarBuf []record.RefExemplar dec record.Decoder ) @@ -930,7 +930,7 @@ func (h *Head) loadChunkSnapshot() (int, int, map[uint64]*memSeries, error) { } }() - shardedRefSeries[idx] = make(map[uint64]*memSeries) + shardedRefSeries[idx] = make(map[chunks.HeadSeriesRef]*memSeries) localRefSeries := shardedRefSeries[idx] for csr := range rc { @@ -940,8 +940,8 @@ func (h *Head) loadChunkSnapshot() (int, int, map[uint64]*memSeries, error) { return } localRefSeries[csr.ref] = series - if h.lastSeriesID.Load() < series.ref { - h.lastSeriesID.Store(series.ref) + if chunks.HeadSeriesRef(h.lastSeriesID.Load()) < series.ref { + h.lastSeriesID.Store(uint64(series.ref)) } series.chunkRange = csr.chunkRange @@ -996,7 +996,7 @@ Outer: break Outer } - if err = tr.Iter(func(ref uint64, ivs tombstones.Intervals) error { + if err = tr.Iter(func(ref storage.SeriesRef, ivs tombstones.Intervals) error { h.tombstones.AddInterval(ref, ivs...) return nil }); err != nil { @@ -1010,7 +1010,7 @@ Outer: close(recordChan) wg.Wait() - refSeries = make(map[uint64]*memSeries, numSeries) + refSeries = make(map[chunks.HeadSeriesRef]*memSeries, numSeries) for _, shard := range shardedRefSeries { for k, v := range shard { refSeries[k] = v @@ -1076,7 +1076,7 @@ Outer: if len(refSeries) == 0 { // We had no exemplar record, so we have to build the map here. - refSeries = make(map[uint64]*memSeries, numSeries) + refSeries = make(map[chunks.HeadSeriesRef]*memSeries, numSeries) for _, shard := range shardedRefSeries { for k, v := range shard { refSeries[k] = v diff --git a/tsdb/index/index.go b/tsdb/index/index.go index d6f5d6b88..957e468f6 100644 --- a/tsdb/index/index.go +++ b/tsdb/index/index.go @@ -30,7 +30,7 @@ import ( "github.com/pkg/errors" - "github.com/prometheus/prometheus/pkg/labels" + "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/tsdb/chunks" "github.com/prometheus/prometheus/tsdb/encoding" @@ -143,7 +143,7 @@ type Writer struct { // Hold last series to validate that clients insert new series in order. lastSeries labels.Labels - lastRef uint64 + lastRef storage.SeriesRef crc32 hash.Hash @@ -262,7 +262,7 @@ type FileWriter struct { } func NewFileWriter(name string) (*FileWriter, error) { - f, err := os.OpenFile(name, os.O_CREATE|os.O_RDWR, 0666) + f, err := os.OpenFile(name, os.O_CREATE|os.O_RDWR, 0o666) if err != nil { return nil, err } @@ -414,7 +414,7 @@ func (w *Writer) writeMeta() error { } // AddSeries adds the series one at a time along with its chunks. -func (w *Writer) AddSeries(ref uint64, lset labels.Labels, chunks ...chunks.Meta) error { +func (w *Writer) AddSeries(ref storage.SeriesRef, lset labels.Labels, chunks ...chunks.Meta) error { if err := w.ensureStage(idxStageSeries); err != nil { return err } @@ -472,7 +472,7 @@ func (w *Writer) AddSeries(ref uint64, lset labels.Labels, chunks ...chunks.Meta c := chunks[0] w.buf2.PutVarint64(c.MinTime) w.buf2.PutUvarint64(uint64(c.MaxTime - c.MinTime)) - w.buf2.PutUvarint64(c.Ref) + w.buf2.PutUvarint64(uint64(c.Ref)) t0 := c.MaxTime ref0 := int64(c.Ref) @@ -525,7 +525,7 @@ func (w *Writer) AddSymbol(sym string) error { func (w *Writer) finishSymbols() error { symbolTableSize := w.f.pos - w.toc.Symbols - 4 // The symbol table's part is 4 bytes. So the total symbol table size must be less than or equal to 2^32-1 - if symbolTableSize > 4294967295 { + if symbolTableSize > math.MaxUint32 { return errors.Errorf("symbol table size exceeds 4 bytes: %d", symbolTableSize) } @@ -657,7 +657,11 @@ func (w *Writer) writeLabelIndex(name string, values []uint32) error { // Write out the length. w.buf1.Reset() - w.buf1.PutBE32int(int(w.f.pos - startPos - 4)) + l := w.f.pos - startPos - 4 + if l > math.MaxUint32 { + return errors.Errorf("label index size exceeds 4 bytes: %d", l) + } + w.buf1.PutBE32int(int(l)) if err := w.writeAt(w.buf1.Get(), startPos); err != nil { return err } @@ -697,7 +701,11 @@ func (w *Writer) writeLabelIndexesOffsetTable() error { } // Write out the length. w.buf1.Reset() - w.buf1.PutBE32int(int(w.f.pos - startPos - 4)) + l := w.f.pos - startPos - 4 + if l > math.MaxUint32 { + return errors.Errorf("label indexes offset table size exceeds 4 bytes: %d", l) + } + w.buf1.PutBE32int(int(l)) if err := w.writeAt(w.buf1.Get(), startPos); err != nil { return err } @@ -774,7 +782,11 @@ func (w *Writer) writePostingsOffsetTable() error { // Write out the length. w.buf1.Reset() - w.buf1.PutBE32int(int(w.f.pos - startPos - 4)) + l := w.f.pos - startPos - 4 + if l > math.MaxUint32 { + return errors.Errorf("postings offset table size exceeds 4 bytes: %d", l) + } + w.buf1.PutBE32int(int(l)) if err := w.writeAt(w.buf1.Get(), startPos); err != nil { return err } @@ -903,7 +915,6 @@ func (w *Writer) writePostingsToTmpFiles() error { values := make([]uint32, 0, len(postings[sid])) for v := range postings[sid] { values = append(values, v) - } // Symbol numbers are in order, so the strings will also be in order. sort.Sort(uint32slice(values)) @@ -955,7 +966,12 @@ func (w *Writer) writePosting(name, value string, offs []uint32) error { } w.buf2.Reset() - w.buf2.PutBE32int(w.buf1.Len()) + l := w.buf1.Len() + // We convert to uint to make code compile on 32-bit systems, as math.MaxUint32 doesn't fit into int there. + if uint(l) > math.MaxUint32 { + return errors.Errorf("posting size exceeds 4 bytes: %d", l) + } + w.buf2.PutBE32int(l) w.buf1.PutHash(w.crc32) return w.fP.Write(w.buf2.Get(), w.buf1.Get()) } @@ -1265,7 +1281,7 @@ type Symbols struct { const symbolFactor = 32 // NewSymbols returns a Symbols object for symbol lookups. -func NewSymbols(bs ByteSlice, version int, off int) (*Symbols, error) { +func NewSymbols(bs ByteSlice, version, off int) (*Symbols, error) { s := &Symbols{ bs: bs, version: version, @@ -1504,7 +1520,7 @@ func (r *Reader) LabelValues(name string, matchers ...*labels.Matcher) ([]string } else { d.Skip(skip) } - s := yoloString(d.UvarintBytes()) //Label value. + s := yoloString(d.UvarintBytes()) // Label value. values = append(values, s) if s == lastVal { break @@ -1519,7 +1535,7 @@ func (r *Reader) LabelValues(name string, matchers ...*labels.Matcher) ([]string // LabelNamesFor returns all the label names for the series referred to by IDs. // The names returned are sorted. -func (r *Reader) LabelNamesFor(ids ...uint64) ([]string, error) { +func (r *Reader) LabelNamesFor(ids ...storage.SeriesRef) ([]string, error) { // Gather offsetsMap the name offsetsMap in the symbol table first offsetsMap := make(map[uint32]struct{}) for _, id := range ids { @@ -1561,7 +1577,7 @@ func (r *Reader) LabelNamesFor(ids ...uint64) ([]string, error) { } // LabelValueFor returns label value for the given label name in the series referred to by ID. -func (r *Reader) LabelValueFor(id uint64, label string) (string, error) { +func (r *Reader) LabelValueFor(id storage.SeriesRef, label string) (string, error) { offset := id // In version 2 series IDs are no longer exact references but series are 16-byte padded // and the ID is the multiple of 16 of the actual position. @@ -1587,7 +1603,7 @@ func (r *Reader) LabelValueFor(id uint64, label string) (string, error) { } // Series reads the series with the given ID and writes its labels and chunks into lbls and chks. -func (r *Reader) Series(id uint64, lbls *labels.Labels, chks *[]chunks.Meta) error { +func (r *Reader) Series(id storage.SeriesRef, lbls *labels.Labels, chks *[]chunks.Meta) error { offset := id // In version 2 series IDs are no longer exact references but series are 16-byte padded // and the ID is the multiple of 16 of the actual position. @@ -1860,7 +1876,7 @@ func (dec *Decoder) Series(b []byte, lbls *labels.Labels, chks *[]chunks.Meta) e ref0 := int64(d.Uvarint64()) *chks = append(*chks, chunks.Meta{ - Ref: uint64(ref0), + Ref: chunks.ChunkRef(ref0), MinTime: t0, MaxTime: maxt, }) @@ -1878,7 +1894,7 @@ func (dec *Decoder) Series(b []byte, lbls *labels.Labels, chks *[]chunks.Meta) e } *chks = append(*chks, chunks.Meta{ - Ref: uint64(ref0), + Ref: chunks.ChunkRef(ref0), MinTime: mint, MaxTime: maxt, }) diff --git a/tsdb/index/index_test.go b/tsdb/index/index_test.go index 0e75d3758..c859dbb47 100644 --- a/tsdb/index/index_test.go +++ b/tsdb/index/index_test.go @@ -28,7 +28,8 @@ import ( "github.com/stretchr/testify/require" "go.uber.org/goleak" - "github.com/prometheus/prometheus/pkg/labels" + "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/tsdb/chunkenc" "github.com/prometheus/prometheus/tsdb/chunks" "github.com/prometheus/prometheus/tsdb/encoding" @@ -45,18 +46,18 @@ type series struct { } type mockIndex struct { - series map[uint64]series - postings map[labels.Label][]uint64 + series map[storage.SeriesRef]series + postings map[labels.Label][]storage.SeriesRef symbols map[string]struct{} } func newMockIndex() mockIndex { ix := mockIndex{ - series: make(map[uint64]series), - postings: make(map[labels.Label][]uint64), + series: make(map[storage.SeriesRef]series), + postings: make(map[labels.Label][]storage.SeriesRef), symbols: make(map[string]struct{}), } - ix.postings[allPostingsKey] = []uint64{} + ix.postings[allPostingsKey] = []storage.SeriesRef{} return ix } @@ -64,7 +65,7 @@ func (m mockIndex) Symbols() (map[string]struct{}, error) { return m.symbols, nil } -func (m mockIndex) AddSeries(ref uint64, l labels.Labels, chunks ...chunks.Meta) error { +func (m mockIndex) AddSeries(ref storage.SeriesRef, l labels.Labels, chunks ...chunks.Meta) error { if _, ok := m.series[ref]; ok { return errors.Errorf("series with reference %d already added", ref) } @@ -72,7 +73,7 @@ func (m mockIndex) AddSeries(ref uint64, l labels.Labels, chunks ...chunks.Meta) m.symbols[lbl.Name] = struct{}{} m.symbols[lbl.Value] = struct{}{} if _, ok := m.postings[lbl]; !ok { - m.postings[lbl] = []uint64{} + m.postings[lbl] = []storage.SeriesRef{} } m.postings[lbl] = append(m.postings[lbl], ref) } @@ -124,7 +125,7 @@ func (m mockIndex) SortedPostings(p Postings) Postings { return NewListPostings(ep) } -func (m mockIndex) Series(ref uint64, lset *labels.Labels, chks *[]chunks.Meta) error { +func (m mockIndex) Series(ref storage.SeriesRef, lset *labels.Labels, chks *[]chunks.Meta) error { s, ok := m.series[ref] if !ok { return errors.New("not found") @@ -154,7 +155,7 @@ func TestIndexRW_Create_Open(t *testing.T) { require.NoError(t, ir.Close()) // Modify magic header must cause open to fail. - f, err := os.OpenFile(fn, os.O_WRONLY, 0666) + f, err := os.OpenFile(fn, os.O_WRONLY, 0o666) require.NoError(t, err) _, err = f.WriteAt([]byte{0, 0}, 0) require.NoError(t, err) @@ -281,7 +282,7 @@ func TestPostingsMany(t *testing.T) { } for i, s := range series { - require.NoError(t, iw.AddSeries(uint64(i), s)) + require.NoError(t, iw.AddSeries(storage.SeriesRef(i), s)) } require.NoError(t, iw.Close()) @@ -340,7 +341,6 @@ func TestPostingsMany(t *testing.T) { } require.Equal(t, exp, got, fmt.Sprintf("input: %v", c.in)) } - } func TestPersistence_index_e2e(t *testing.T) { @@ -374,7 +374,7 @@ func TestPersistence_index_e2e(t *testing.T) { metas = append(metas, chunks.Meta{ MinTime: int64(j * 10000), MaxTime: int64((j + 1) * 10000), - Ref: rand.Uint64(), + Ref: chunks.ChunkRef(rand.Uint64()), Chunk: chunkenc.NewXORChunk(), }) } @@ -405,9 +405,9 @@ func TestPersistence_index_e2e(t *testing.T) { mi := newMockIndex() for i, s := range input { - err = iw.AddSeries(uint64(i), s.labels, s.chunks...) + err = iw.AddSeries(storage.SeriesRef(i), s.labels, s.chunks...) require.NoError(t, err) - require.NoError(t, mi.AddSeries(uint64(i), s.labels, s.chunks...)) + require.NoError(t, mi.AddSeries(storage.SeriesRef(i), s.labels, s.chunks...)) for _, l := range s.labels { valset, ok := values[l.Name] @@ -417,7 +417,7 @@ func TestPersistence_index_e2e(t *testing.T) { } valset[l.Value] = struct{}{} } - postings.Add(uint64(i), s.labels) + postings.Add(storage.SeriesRef(i), s.labels) } err = iw.Close() @@ -504,7 +504,7 @@ func TestNewFileReaderErrorNoOpenFiles(t *testing.T) { dir := testutil.NewTemporaryDirectory("block", t) idxName := filepath.Join(dir.Path(), "index") - err := ioutil.WriteFile(idxName, []byte("corrupted contents"), 0666) + err := ioutil.WriteFile(idxName, []byte("corrupted contents"), 0o666) require.NoError(t, err) _, err = NewFileReader(idxName) diff --git a/tsdb/index/postings.go b/tsdb/index/postings.go index 6c493be1d..10312d84d 100644 --- a/tsdb/index/postings.go +++ b/tsdb/index/postings.go @@ -20,7 +20,8 @@ import ( "sort" "sync" - "github.com/prometheus/prometheus/pkg/labels" + "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/storage" ) var allPostingsKey = labels.Label{} @@ -30,29 +31,39 @@ func AllPostingsKey() (name, value string) { return allPostingsKey.Name, allPostingsKey.Value } +// ensureOrderBatchSize is the max number of postings passed to a worker in a single batch in MemPostings.EnsureOrder(). +const ensureOrderBatchSize = 1024 + +// ensureOrderBatchPool is a pool used to recycle batches passed to workers in MemPostings.EnsureOrder(). +var ensureOrderBatchPool = sync.Pool{ + New: func() interface{} { + return make([][]storage.SeriesRef, 0, ensureOrderBatchSize) + }, +} + // MemPostings holds postings list for series ID per label pair. They may be written // to out of order. -// ensureOrder() must be called once before any reads are done. This allows for quick +// EnsureOrder() must be called once before any reads are done. This allows for quick // unordered batch fills on startup. type MemPostings struct { mtx sync.RWMutex - m map[string]map[string][]uint64 + m map[string]map[string][]storage.SeriesRef ordered bool } // NewMemPostings returns a memPostings that's ready for reads and writes. func NewMemPostings() *MemPostings { return &MemPostings{ - m: make(map[string]map[string][]uint64, 512), + m: make(map[string]map[string][]storage.SeriesRef, 512), ordered: true, } } // NewUnorderedMemPostings returns a memPostings that is not safe to be read from -// until ensureOrder was called once. +// until EnsureOrder() was called once. func NewUnorderedMemPostings() *MemPostings { return &MemPostings{ - m: make(map[string]map[string][]uint64, 512), + m: make(map[string]map[string][]storage.SeriesRef, 512), ordered: false, } } @@ -188,7 +199,7 @@ func (p *MemPostings) Stats(label string) *PostingsStats { // Get returns a postings list for the given label pair. func (p *MemPostings) Get(name, value string) Postings { - var lp []uint64 + var lp []storage.SeriesRef p.mtx.RLock() l := p.m[name] if l != nil { @@ -218,25 +229,42 @@ func (p *MemPostings) EnsureOrder() { } n := runtime.GOMAXPROCS(0) - workc := make(chan []uint64) + workc := make(chan [][]storage.SeriesRef) var wg sync.WaitGroup wg.Add(n) for i := 0; i < n; i++ { go func() { - for l := range workc { - sort.Slice(l, func(a, b int) bool { return l[a] < l[b] }) + for job := range workc { + for _, l := range job { + sort.Sort(seriesRefSlice(l)) + } + + job = job[:0] + ensureOrderBatchPool.Put(job) //nolint:staticcheck // Ignore SA6002 safe to ignore and actually fixing it has some performance penalty. } wg.Done() }() } + nextJob := ensureOrderBatchPool.Get().([][]storage.SeriesRef) for _, e := range p.m { for _, l := range e { - workc <- l + nextJob = append(nextJob, l) + + if len(nextJob) >= ensureOrderBatchSize { + workc <- nextJob + nextJob = ensureOrderBatchPool.Get().([][]storage.SeriesRef) + } } } + + // If the last job was partially filled, we need to push it to workers too. + if len(nextJob) > 0 { + workc <- nextJob + } + close(workc) wg.Wait() @@ -244,7 +272,7 @@ func (p *MemPostings) EnsureOrder() { } // Delete removes all ids in the given map from the postings lists. -func (p *MemPostings) Delete(deleted map[uint64]struct{}) { +func (p *MemPostings) Delete(deleted map[storage.SeriesRef]struct{}) { var keys, vals []string // Collect all keys relevant for deletion once. New keys added afterwards @@ -280,7 +308,7 @@ func (p *MemPostings) Delete(deleted map[uint64]struct{}) { p.mtx.Unlock() continue } - repl := make([]uint64, 0, len(p.m[n][l])) + repl := make([]storage.SeriesRef, 0, len(p.m[n][l])) for _, id := range p.m[n][l] { if _, ok := deleted[id]; !ok { @@ -318,7 +346,7 @@ func (p *MemPostings) Iter(f func(labels.Label, Postings) error) error { } // Add a label set to the postings index. -func (p *MemPostings) Add(id uint64, lset labels.Labels) { +func (p *MemPostings) Add(id storage.SeriesRef, lset labels.Labels) { p.mtx.Lock() for _, l := range lset { @@ -329,10 +357,10 @@ func (p *MemPostings) Add(id uint64, lset labels.Labels) { p.mtx.Unlock() } -func (p *MemPostings) addFor(id uint64, l labels.Label) { +func (p *MemPostings) addFor(id storage.SeriesRef, l labels.Label) { nm, ok := p.m[l.Name] if !ok { - nm = map[string][]uint64{} + nm = map[string][]storage.SeriesRef{} p.m[l.Name] = nm } list := append(nm[l.Value], id) @@ -354,7 +382,7 @@ func (p *MemPostings) addFor(id uint64, l labels.Label) { } // ExpandPostings returns the postings expanded as a slice. -func ExpandPostings(p Postings) (res []uint64, err error) { +func ExpandPostings(p Postings) (res []storage.SeriesRef, err error) { for p.Next() { res = append(res, p.At()) } @@ -368,10 +396,10 @@ type Postings interface { // Seek advances the iterator to value v or greater and returns // true if a value was found. - Seek(v uint64) bool + Seek(v storage.SeriesRef) bool // At returns the value at the current iterator position. - At() uint64 + At() storage.SeriesRef // Err returns the last error of the iterator. Err() error @@ -382,15 +410,15 @@ type errPostings struct { err error } -func (e errPostings) Next() bool { return false } -func (e errPostings) Seek(uint64) bool { return false } -func (e errPostings) At() uint64 { return 0 } -func (e errPostings) Err() error { return e.err } +func (e errPostings) Next() bool { return false } +func (e errPostings) Seek(storage.SeriesRef) bool { return false } +func (e errPostings) At() storage.SeriesRef { return 0 } +func (e errPostings) Err() error { return e.err } var emptyPostings = errPostings{} // EmptyPostings returns a postings list that's always empty. -// NOTE: Returning EmptyPostings sentinel when index.Postings struct has no postings is recommended. +// NOTE: Returning EmptyPostings sentinel when Postings struct has no postings is recommended. // It triggers optimized flow in other functions like Intersect, Without etc. func EmptyPostings() Postings { return emptyPostings @@ -421,14 +449,14 @@ func Intersect(its ...Postings) Postings { type intersectPostings struct { arr []Postings - cur uint64 + cur storage.SeriesRef } func newIntersectPostings(its ...Postings) *intersectPostings { return &intersectPostings{arr: its} } -func (it *intersectPostings) At() uint64 { +func (it *intersectPostings) At() storage.SeriesRef { return it.cur } @@ -460,7 +488,7 @@ func (it *intersectPostings) Next() bool { return it.doNext() } -func (it *intersectPostings) Seek(id uint64) bool { +func (it *intersectPostings) Seek(id storage.SeriesRef) bool { it.cur = id return it.doNext() } @@ -511,7 +539,7 @@ func (h *postingsHeap) Pop() interface{} { type mergedPostings struct { h postingsHeap initialized bool - cur uint64 + cur storage.SeriesRef err error } @@ -571,7 +599,7 @@ func (it *mergedPostings) Next() bool { } } -func (it *mergedPostings) Seek(id uint64) bool { +func (it *mergedPostings) Seek(id storage.SeriesRef) bool { if it.h.Len() == 0 || it.err != nil { return false } @@ -601,7 +629,7 @@ func (it *mergedPostings) Seek(id uint64) bool { return true } -func (it mergedPostings) At() uint64 { +func (it mergedPostings) At() storage.SeriesRef { return it.cur } @@ -625,7 +653,7 @@ func Without(full, drop Postings) Postings { type removedPostings struct { full, remove Postings - cur uint64 + cur storage.SeriesRef initialized bool fok, rok bool @@ -638,7 +666,7 @@ func newRemovedPostings(full, remove Postings) *removedPostings { } } -func (rp *removedPostings) At() uint64 { +func (rp *removedPostings) At() storage.SeriesRef { return rp.cur } @@ -675,7 +703,7 @@ func (rp *removedPostings) Next() bool { } } -func (rp *removedPostings) Seek(id uint64) bool { +func (rp *removedPostings) Seek(id storage.SeriesRef) bool { if rp.cur >= id { return true } @@ -697,19 +725,19 @@ func (rp *removedPostings) Err() error { // ListPostings implements the Postings interface over a plain list. type ListPostings struct { - list []uint64 - cur uint64 + list []storage.SeriesRef + cur storage.SeriesRef } -func NewListPostings(list []uint64) Postings { +func NewListPostings(list []storage.SeriesRef) Postings { return newListPostings(list...) } -func newListPostings(list ...uint64) *ListPostings { +func newListPostings(list ...storage.SeriesRef) *ListPostings { return &ListPostings{list: list} } -func (it *ListPostings) At() uint64 { +func (it *ListPostings) At() storage.SeriesRef { return it.cur } @@ -723,7 +751,7 @@ func (it *ListPostings) Next() bool { return false } -func (it *ListPostings) Seek(x uint64) bool { +func (it *ListPostings) Seek(x storage.SeriesRef) bool { // If the current value satisfies, then return. if it.cur >= x { return true @@ -760,8 +788,8 @@ func newBigEndianPostings(list []byte) *bigEndianPostings { return &bigEndianPostings{list: list} } -func (it *bigEndianPostings) At() uint64 { - return uint64(it.cur) +func (it *bigEndianPostings) At() storage.SeriesRef { + return storage.SeriesRef(it.cur) } func (it *bigEndianPostings) Next() bool { @@ -773,8 +801,8 @@ func (it *bigEndianPostings) Next() bool { return false } -func (it *bigEndianPostings) Seek(x uint64) bool { - if uint64(it.cur) >= x { +func (it *bigEndianPostings) Seek(x storage.SeriesRef) bool { + if storage.SeriesRef(it.cur) >= x { return true } @@ -796,3 +824,10 @@ func (it *bigEndianPostings) Seek(x uint64) bool { func (it *bigEndianPostings) Err() error { return nil } + +// seriesRefSlice attaches the methods of sort.Interface to []storage.SeriesRef, sorting in increasing order. +type seriesRefSlice []storage.SeriesRef + +func (x seriesRefSlice) Len() int { return len(x) } +func (x seriesRefSlice) Less(i, j int) bool { return x[i] < x[j] } +func (x seriesRefSlice) Swap(i, j int) { x[i], x[j] = x[j], x[i] } diff --git a/tsdb/index/postings_test.go b/tsdb/index/postings_test.go index bbf5332a5..8cb76b0ad 100644 --- a/tsdb/index/postings_test.go +++ b/tsdb/index/postings_test.go @@ -18,31 +18,33 @@ import ( "fmt" "math/rand" "sort" + "strconv" "testing" "github.com/stretchr/testify/require" - "github.com/prometheus/prometheus/pkg/labels" + "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/storage" ) func TestMemPostings_addFor(t *testing.T) { p := NewMemPostings() - p.m[allPostingsKey.Name] = map[string][]uint64{} - p.m[allPostingsKey.Name][allPostingsKey.Value] = []uint64{1, 2, 3, 4, 6, 7, 8} + p.m[allPostingsKey.Name] = map[string][]storage.SeriesRef{} + p.m[allPostingsKey.Name][allPostingsKey.Value] = []storage.SeriesRef{1, 2, 3, 4, 6, 7, 8} p.addFor(5, allPostingsKey) - require.Equal(t, []uint64{1, 2, 3, 4, 5, 6, 7, 8}, p.m[allPostingsKey.Name][allPostingsKey.Value]) + require.Equal(t, []storage.SeriesRef{1, 2, 3, 4, 5, 6, 7, 8}, p.m[allPostingsKey.Name][allPostingsKey.Value]) } func TestMemPostings_ensureOrder(t *testing.T) { p := NewUnorderedMemPostings() - p.m["a"] = map[string][]uint64{} + p.m["a"] = map[string][]storage.SeriesRef{} for i := 0; i < 100; i++ { - l := make([]uint64, 100) + l := make([]storage.SeriesRef, 100) for j := range l { - l[j] = rand.Uint64() + l[j] = storage.SeriesRef(rand.Uint64()) } v := fmt.Sprintf("%d", i) @@ -63,11 +65,64 @@ func TestMemPostings_ensureOrder(t *testing.T) { } } +func BenchmarkMemPostings_ensureOrder(b *testing.B) { + tests := map[string]struct { + numLabels int + numValuesPerLabel int + numRefsPerValue int + }{ + "many values per label": { + numLabels: 100, + numValuesPerLabel: 10000, + numRefsPerValue: 100, + }, + "few values per label": { + numLabels: 1000000, + numValuesPerLabel: 1, + numRefsPerValue: 100, + }, + "few refs per label value": { + numLabels: 1000, + numValuesPerLabel: 1000, + numRefsPerValue: 10, + }, + } + + for testName, testData := range tests { + b.Run(testName, func(b *testing.B) { + p := NewUnorderedMemPostings() + + // Generate postings. + for l := 0; l < testData.numLabels; l++ { + labelName := strconv.Itoa(l) + p.m[labelName] = map[string][]storage.SeriesRef{} + + for v := 0; v < testData.numValuesPerLabel; v++ { + refs := make([]storage.SeriesRef, testData.numRefsPerValue) + for j := range refs { + refs[j] = storage.SeriesRef(rand.Uint64()) + } + + labelValue := strconv.Itoa(v) + p.m[labelName][labelValue] = refs + } + } + + b.ResetTimer() + + for n := 0; n < b.N; n++ { + p.EnsureOrder() + p.ordered = false + } + }) + } +} + func TestIntersect(t *testing.T) { a := newListPostings(1, 2, 3) b := newListPostings(2, 3, 4) - var cases = []struct { + cases := []struct { in []Postings res Postings @@ -182,30 +237,30 @@ func TestIntersect(t *testing.T) { } func TestMultiIntersect(t *testing.T) { - var cases = []struct { - p [][]uint64 - res []uint64 + cases := []struct { + p [][]storage.SeriesRef + res []storage.SeriesRef }{ { - p: [][]uint64{ + p: [][]storage.SeriesRef{ {1, 2, 3, 4, 5, 6, 1000, 1001}, {2, 4, 5, 6, 7, 8, 999, 1001}, {1, 2, 5, 6, 7, 8, 1001, 1200}, }, - res: []uint64{2, 5, 6, 1001}, + res: []storage.SeriesRef{2, 5, 6, 1001}, }, // One of the reproducible cases for: // https://github.com/prometheus/prometheus/issues/2616 // The initialisation of intersectPostings was moving the iterator forward // prematurely making us miss some postings. { - p: [][]uint64{ + p: [][]storage.SeriesRef{ {1, 2}, {1, 2}, {1, 2}, {2}, }, - res: []uint64{2}, + res: []storage.SeriesRef{2}, }, } @@ -224,22 +279,22 @@ func TestMultiIntersect(t *testing.T) { func BenchmarkIntersect(t *testing.B) { t.Run("LongPostings1", func(bench *testing.B) { - var a, b, c, d []uint64 + var a, b, c, d []storage.SeriesRef for i := 0; i < 10000000; i += 2 { - a = append(a, uint64(i)) + a = append(a, storage.SeriesRef(i)) } for i := 5000000; i < 5000100; i += 4 { - b = append(b, uint64(i)) + b = append(b, storage.SeriesRef(i)) } for i := 5090000; i < 5090600; i += 4 { - b = append(b, uint64(i)) + b = append(b, storage.SeriesRef(i)) } for i := 4990000; i < 5100000; i++ { - c = append(c, uint64(i)) + c = append(c, storage.SeriesRef(i)) } for i := 4000000; i < 6000000; i++ { - d = append(d, uint64(i)) + d = append(d, storage.SeriesRef(i)) } i1 := newListPostings(a...) @@ -257,19 +312,19 @@ func BenchmarkIntersect(t *testing.B) { }) t.Run("LongPostings2", func(bench *testing.B) { - var a, b, c, d []uint64 + var a, b, c, d []storage.SeriesRef for i := 0; i < 12500000; i++ { - a = append(a, uint64(i)) + a = append(a, storage.SeriesRef(i)) } for i := 7500000; i < 12500000; i++ { - b = append(b, uint64(i)) + b = append(b, storage.SeriesRef(i)) } for i := 9000000; i < 20000000; i++ { - c = append(c, uint64(i)) + c = append(c, storage.SeriesRef(i)) } for i := 10000000; i < 12000000; i++ { - d = append(d, uint64(i)) + d = append(d, storage.SeriesRef(i)) } i1 := newListPostings(a...) @@ -292,9 +347,9 @@ func BenchmarkIntersect(t *testing.B) { // 100000 matchers(k=100000). for i := 0; i < 100000; i++ { - var temp []uint64 - for j := 1; j < 100; j++ { - temp = append(temp, uint64(j)) + var temp []storage.SeriesRef + for j := storage.SeriesRef(1); j < 100; j++ { + temp = append(temp, j) } its = append(its, newListPostings(temp...)) } @@ -316,11 +371,11 @@ func TestMultiMerge(t *testing.T) { res, err := ExpandPostings(Merge(i1, i2, i3)) require.NoError(t, err) - require.Equal(t, []uint64{1, 2, 3, 4, 5, 6, 7, 8, 999, 1000, 1001, 1200}, res) + require.Equal(t, []storage.SeriesRef{1, 2, 3, 4, 5, 6, 7, 8, 999, 1000, 1001, 1200}, res) } func TestMergedPostings(t *testing.T) { - var cases = []struct { + cases := []struct { in []Postings res Postings @@ -424,44 +479,44 @@ func TestMergedPostings(t *testing.T) { } func TestMergedPostingsSeek(t *testing.T) { - var cases = []struct { - a, b []uint64 + cases := []struct { + a, b []storage.SeriesRef - seek uint64 + seek storage.SeriesRef success bool - res []uint64 + res []storage.SeriesRef }{ { - a: []uint64{2, 3, 4, 5}, - b: []uint64{6, 7, 8, 9, 10}, + a: []storage.SeriesRef{2, 3, 4, 5}, + b: []storage.SeriesRef{6, 7, 8, 9, 10}, seek: 1, success: true, - res: []uint64{2, 3, 4, 5, 6, 7, 8, 9, 10}, + res: []storage.SeriesRef{2, 3, 4, 5, 6, 7, 8, 9, 10}, }, { - a: []uint64{1, 2, 3, 4, 5}, - b: []uint64{6, 7, 8, 9, 10}, + a: []storage.SeriesRef{1, 2, 3, 4, 5}, + b: []storage.SeriesRef{6, 7, 8, 9, 10}, seek: 2, success: true, - res: []uint64{2, 3, 4, 5, 6, 7, 8, 9, 10}, + res: []storage.SeriesRef{2, 3, 4, 5, 6, 7, 8, 9, 10}, }, { - a: []uint64{1, 2, 3, 4, 5}, - b: []uint64{4, 5, 6, 7, 8}, + a: []storage.SeriesRef{1, 2, 3, 4, 5}, + b: []storage.SeriesRef{4, 5, 6, 7, 8}, seek: 9, success: false, res: nil, }, { - a: []uint64{1, 2, 3, 4, 9, 10}, - b: []uint64{1, 4, 5, 6, 7, 8, 10, 11}, + a: []storage.SeriesRef{1, 2, 3, 4, 9, 10}, + b: []storage.SeriesRef{1, 4, 5, 6, 7, 8, 10, 11}, seek: 10, success: true, - res: []uint64{10, 11}, + res: []storage.SeriesRef{10, 11}, }, } @@ -479,51 +534,51 @@ func TestMergedPostingsSeek(t *testing.T) { lst, err := ExpandPostings(p) require.NoError(t, err) - lst = append([]uint64{start}, lst...) + lst = append([]storage.SeriesRef{start}, lst...) require.Equal(t, c.res, lst) } } } func TestRemovedPostings(t *testing.T) { - var cases = []struct { - a, b []uint64 - res []uint64 + cases := []struct { + a, b []storage.SeriesRef + res []storage.SeriesRef }{ { a: nil, b: nil, - res: []uint64(nil), + res: []storage.SeriesRef(nil), }, { - a: []uint64{1, 2, 3, 4}, + a: []storage.SeriesRef{1, 2, 3, 4}, b: nil, - res: []uint64{1, 2, 3, 4}, + res: []storage.SeriesRef{1, 2, 3, 4}, }, { a: nil, - b: []uint64{1, 2, 3, 4}, - res: []uint64(nil), + b: []storage.SeriesRef{1, 2, 3, 4}, + res: []storage.SeriesRef(nil), }, { - a: []uint64{1, 2, 3, 4, 5}, - b: []uint64{6, 7, 8, 9, 10}, - res: []uint64{1, 2, 3, 4, 5}, + a: []storage.SeriesRef{1, 2, 3, 4, 5}, + b: []storage.SeriesRef{6, 7, 8, 9, 10}, + res: []storage.SeriesRef{1, 2, 3, 4, 5}, }, { - a: []uint64{1, 2, 3, 4, 5}, - b: []uint64{4, 5, 6, 7, 8}, - res: []uint64{1, 2, 3}, + a: []storage.SeriesRef{1, 2, 3, 4, 5}, + b: []storage.SeriesRef{4, 5, 6, 7, 8}, + res: []storage.SeriesRef{1, 2, 3}, }, { - a: []uint64{1, 2, 3, 4, 9, 10}, - b: []uint64{1, 4, 5, 6, 7, 8, 10, 11}, - res: []uint64{2, 3, 9}, + a: []storage.SeriesRef{1, 2, 3, 4, 9, 10}, + b: []storage.SeriesRef{1, 4, 5, 6, 7, 8, 10, 11}, + res: []storage.SeriesRef{2, 3, 9}, }, { - a: []uint64{1, 2, 3, 4, 9, 10}, - b: []uint64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11}, - res: []uint64(nil), + a: []storage.SeriesRef{1, 2, 3, 4, 9, 10}, + b: []storage.SeriesRef{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11}, + res: []storage.SeriesRef(nil), }, } @@ -535,14 +590,13 @@ func TestRemovedPostings(t *testing.T) { require.NoError(t, err) require.Equal(t, c.res, res) } - } func TestRemovedNextStackoverflow(t *testing.T) { - var full []uint64 - var remove []uint64 + var full []storage.SeriesRef + var remove []storage.SeriesRef - var i uint64 + var i storage.SeriesRef for i = 0; i < 1e7; i++ { full = append(full, i) remove = append(remove, i) @@ -561,68 +615,68 @@ func TestRemovedNextStackoverflow(t *testing.T) { } func TestRemovedPostingsSeek(t *testing.T) { - var cases = []struct { - a, b []uint64 + cases := []struct { + a, b []storage.SeriesRef - seek uint64 + seek storage.SeriesRef success bool - res []uint64 + res []storage.SeriesRef }{ { - a: []uint64{2, 3, 4, 5}, - b: []uint64{6, 7, 8, 9, 10}, + a: []storage.SeriesRef{2, 3, 4, 5}, + b: []storage.SeriesRef{6, 7, 8, 9, 10}, seek: 1, success: true, - res: []uint64{2, 3, 4, 5}, + res: []storage.SeriesRef{2, 3, 4, 5}, }, { - a: []uint64{1, 2, 3, 4, 5}, - b: []uint64{6, 7, 8, 9, 10}, + a: []storage.SeriesRef{1, 2, 3, 4, 5}, + b: []storage.SeriesRef{6, 7, 8, 9, 10}, seek: 2, success: true, - res: []uint64{2, 3, 4, 5}, + res: []storage.SeriesRef{2, 3, 4, 5}, }, { - a: []uint64{1, 2, 3, 4, 5}, - b: []uint64{4, 5, 6, 7, 8}, + a: []storage.SeriesRef{1, 2, 3, 4, 5}, + b: []storage.SeriesRef{4, 5, 6, 7, 8}, seek: 9, success: false, res: nil, }, { - a: []uint64{1, 2, 3, 4, 9, 10}, - b: []uint64{1, 4, 5, 6, 7, 8, 10, 11}, + a: []storage.SeriesRef{1, 2, 3, 4, 9, 10}, + b: []storage.SeriesRef{1, 4, 5, 6, 7, 8, 10, 11}, seek: 10, success: false, res: nil, }, { - a: []uint64{1, 2, 3, 4, 9, 10}, - b: []uint64{1, 4, 5, 6, 7, 8, 11}, + a: []storage.SeriesRef{1, 2, 3, 4, 9, 10}, + b: []storage.SeriesRef{1, 4, 5, 6, 7, 8, 11}, seek: 4, success: true, - res: []uint64{9, 10}, + res: []storage.SeriesRef{9, 10}, }, { - a: []uint64{1, 2, 3, 4, 9, 10}, - b: []uint64{1, 4, 5, 6, 7, 8, 11}, + a: []storage.SeriesRef{1, 2, 3, 4, 9, 10}, + b: []storage.SeriesRef{1, 4, 5, 6, 7, 8, 11}, seek: 5, success: true, - res: []uint64{9, 10}, + res: []storage.SeriesRef{9, 10}, }, { - a: []uint64{1, 2, 3, 4, 9, 10}, - b: []uint64{1, 4, 5, 6, 7, 8, 11}, + a: []storage.SeriesRef{1, 2, 3, 4, 9, 10}, + b: []storage.SeriesRef{1, 4, 5, 6, 7, 8, 11}, seek: 10, success: true, - res: []uint64{10}, + res: []storage.SeriesRef{10}, }, } @@ -640,7 +694,7 @@ func TestRemovedPostingsSeek(t *testing.T) { lst, err := ExpandPostings(p) require.NoError(t, err) - lst = append([]uint64{start}, lst...) + lst = append([]storage.SeriesRef{start}, lst...) require.Equal(t, c.res, lst) } } @@ -665,7 +719,7 @@ func TestBigEndian(t *testing.T) { bep := newBigEndianPostings(beLst) for i := 0; i < num; i++ { require.True(t, bep.Next()) - require.Equal(t, uint64(ls[i]), bep.At()) + require.Equal(t, storage.SeriesRef(ls[i]), bep.At()) } require.False(t, bep.Next()) @@ -713,8 +767,8 @@ func TestBigEndian(t *testing.T) { bep := newBigEndianPostings(beLst) for _, v := range table { - require.Equal(t, v.found, bep.Seek(uint64(v.seek))) - require.Equal(t, uint64(v.val), bep.At()) + require.Equal(t, v.found, bep.Seek(storage.SeriesRef(v.seek))) + require.Equal(t, storage.SeriesRef(v.val), bep.At()) require.NoError(t, bep.Err()) } }) @@ -734,11 +788,11 @@ func TestIntersectWithMerge(t *testing.T) { res, err := ExpandPostings(p) require.NoError(t, err) - require.Equal(t, []uint64{30}, res) + require.Equal(t, []storage.SeriesRef{30}, res) } func TestWithoutPostings(t *testing.T) { - var cases = []struct { + cases := []struct { base Postings drop Postings @@ -818,7 +872,7 @@ func TestWithoutPostings(t *testing.T) { func BenchmarkPostings_Stats(b *testing.B) { p := NewMemPostings() - var seriesID uint64 + var seriesID storage.SeriesRef createPostingsLabelValues := func(name, valuePrefix string, count int) { for n := 1; n < count; n++ { @@ -826,7 +880,6 @@ func BenchmarkPostings_Stats(b *testing.B) { p.Add(seriesID, labels.FromStrings(name, value)) seriesID++ } - } createPostingsLabelValues("__name__", "metrics_name_can_be_very_big_and_bad", 1e3) for i := 0; i < 20; i++ { @@ -845,7 +898,6 @@ func BenchmarkPostings_Stats(b *testing.B) { for n := 0; n < b.N; n++ { p.Stats("__name__") } - } func TestMemPostings_Delete(t *testing.T) { @@ -855,7 +907,7 @@ func TestMemPostings_Delete(t *testing.T) { p.Add(3, labels.FromStrings("lbl2", "a")) before := p.Get(allPostingsKey.Name, allPostingsKey.Value) - p.Delete(map[uint64]struct{}{ + p.Delete(map[storage.SeriesRef]struct{}{ 2: {}, }) after := p.Get(allPostingsKey.Name, allPostingsKey.Value) @@ -864,13 +916,13 @@ func TestMemPostings_Delete(t *testing.T) { // iterated over. expanded, err := ExpandPostings(before) require.NoError(t, err) - require.Equal(t, []uint64{1, 2, 3}, expanded) + require.Equal(t, []storage.SeriesRef{1, 2, 3}, expanded) // Make sure postings gotten after the delete have the new data when // iterated over. expanded, err = ExpandPostings(after) require.NoError(t, err) - require.Equal(t, []uint64{1, 3}, expanded) + require.Equal(t, []storage.SeriesRef{1, 3}, expanded) deleted := p.Get("lbl1", "b") expanded, err = ExpandPostings(deleted) diff --git a/tsdb/index/postingsstats.go b/tsdb/index/postingsstats.go index 2fc2465d9..5e5880720 100644 --- a/tsdb/index/postingsstats.go +++ b/tsdb/index/postingsstats.go @@ -59,7 +59,6 @@ func (m *maxHeap) push(item Stat) { m.minIndex = i } } - } func (m *maxHeap) get() []Stat { diff --git a/tsdb/index/postingsstats_test.go b/tsdb/index/postingsstats_test.go index 910b5a06a..7ce51c795 100644 --- a/tsdb/index/postingsstats_test.go +++ b/tsdb/index/postingsstats_test.go @@ -37,7 +37,6 @@ func TestPostingsStats(t *testing.T) { for i := 0; i < heapLength; i++ { require.Equal(t, uint64(max-i), data[i].Count) } - } func TestPostingsStats2(t *testing.T) { @@ -55,6 +54,7 @@ func TestPostingsStats2(t *testing.T) { require.Equal(t, 4, len(data)) require.Equal(t, uint64(11), data[0].Count) } + func BenchmarkPostingStatsMaxHep(b *testing.B) { stats := &maxHeap{} max := 9000000 @@ -71,5 +71,4 @@ func BenchmarkPostingStatsMaxHep(b *testing.B) { } stats.get() } - } diff --git a/tsdb/mocks_test.go b/tsdb/mocks_test.go index ac368e7ba..268017caa 100644 --- a/tsdb/mocks_test.go +++ b/tsdb/mocks_test.go @@ -16,7 +16,8 @@ package tsdb import ( "github.com/pkg/errors" - "github.com/prometheus/prometheus/pkg/labels" + "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/tsdb/chunkenc" "github.com/prometheus/prometheus/tsdb/chunks" "github.com/prometheus/prometheus/tsdb/tombstones" @@ -34,7 +35,7 @@ func copyChunk(c chunkenc.Chunk) (chunkenc.Chunk, error) { } func (mockIndexWriter) AddSymbol(string) error { return nil } -func (m *mockIndexWriter) AddSeries(_ uint64, l labels.Labels, chks ...chunks.Meta) error { +func (m *mockIndexWriter) AddSeries(_ storage.SeriesRef, l labels.Labels, chks ...chunks.Meta) error { // Copy chunks as their bytes are pooled. chksNew := make([]chunks.Meta, len(chks)) for i, chk := range chks { diff --git a/tsdb/querier.go b/tsdb/querier.go index 5739fbdc5..c3574d61d 100644 --- a/tsdb/querier.go +++ b/tsdb/querier.go @@ -22,7 +22,7 @@ import ( "github.com/pkg/errors" "github.com/prometheus/prometheus/model/histogram" - "github.com/prometheus/prometheus/pkg/labels" + "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/tsdb/chunkenc" "github.com/prometheus/prometheus/tsdb/chunks" @@ -124,6 +124,8 @@ func NewBlockQuerier(b BlockReader, mint, maxt int64) (storage.Querier, error) { func (q *blockQuerier) Select(sortSeries bool, hints *storage.SelectHints, ms ...*labels.Matcher) storage.SeriesSet { mint := q.mint maxt := q.maxt + disableTrimming := false + p, err := PostingsForMatchers(q.index, ms...) if err != nil { return storage.ErrSeriesSet(err) @@ -135,13 +137,14 @@ func (q *blockQuerier) Select(sortSeries bool, hints *storage.SelectHints, ms .. if hints != nil { mint = hints.Start maxt = hints.End + disableTrimming = hints.DisableTrimming if hints.Func == "series" { // When you're only looking up metadata (for example series API), you don't need to load any chunks. - return newBlockSeriesSet(q.index, newNopChunkReader(), q.tombstones, p, mint, maxt) + return newBlockSeriesSet(q.index, newNopChunkReader(), q.tombstones, p, mint, maxt, disableTrimming) } } - return newBlockSeriesSet(q.index, q.chunks, q.tombstones, p, mint, maxt) + return newBlockSeriesSet(q.index, q.chunks, q.tombstones, p, mint, maxt, disableTrimming) } // blockChunkQuerier provides chunk querying access to a single block database. @@ -161,9 +164,11 @@ func NewBlockChunkQuerier(b BlockReader, mint, maxt int64) (storage.ChunkQuerier func (q *blockChunkQuerier) Select(sortSeries bool, hints *storage.SelectHints, ms ...*labels.Matcher) storage.ChunkSeriesSet { mint := q.mint maxt := q.maxt + disableTrimming := false if hints != nil { mint = hints.Start maxt = hints.End + disableTrimming = hints.DisableTrimming } p, err := PostingsForMatchers(q.index, ms...) if err != nil { @@ -172,7 +177,7 @@ func (q *blockChunkQuerier) Select(sortSeries bool, hints *storage.SelectHints, if sortSeries { p = q.index.SortedPostings(p) } - return newBlockChunkSeriesSet(q.index, q.chunks, q.tombstones, p, mint, maxt) + return newBlockChunkSeriesSet(q.index, q.chunks, q.tombstones, p, mint, maxt, disableTrimming) } func findSetMatches(pattern string) []string { @@ -414,7 +419,7 @@ func labelNamesWithMatchers(r IndexReader, matchers ...*labels.Matcher) ([]strin return nil, err } - var postings []uint64 + var postings []storage.SeriesRef for p.Next() { postings = append(postings, p.At()) } @@ -429,11 +434,12 @@ func labelNamesWithMatchers(r IndexReader, matchers ...*labels.Matcher) ([]strin // Iterated series are trimmed with given min and max time as well as tombstones. // See newBlockSeriesSet and newBlockChunkSeriesSet to use it for either sample or chunk iterating. type blockBaseSeriesSet struct { - p index.Postings - index IndexReader - chunks ChunkReader - tombstones tombstones.Reader - mint, maxt int64 + p index.Postings + index IndexReader + chunks ChunkReader + tombstones tombstones.Reader + mint, maxt int64 + disableTrimming bool currIterFn func() *populateWithDelGenericSeriesIterator currLabels labels.Labels @@ -488,11 +494,13 @@ func (b *blockBaseSeriesSet) Next() bool { } // If still not entirely deleted, check if trim is needed based on requested time range. - if chk.MinTime < b.mint { - trimFront = true - } - if chk.MaxTime > b.maxt { - trimBack = true + if !b.disableTrimming { + if chk.MinTime < b.mint { + trimFront = true + } + if chk.MaxTime > b.maxt { + trimBack = true + } } } @@ -607,6 +615,7 @@ func (p *populateWithDelGenericSeriesIterator) Err() error { return p.err } func (p *populateWithDelGenericSeriesIterator) toSeriesIterator() chunkenc.Iterator { return &populateWithDelSeriesIterator{populateWithDelGenericSeriesIterator: p} } + func (p *populateWithDelGenericSeriesIterator) toChunkSeriesIterator() chunks.Iterator { return &populateWithDelChunkSeriesIterator{populateWithDelGenericSeriesIterator: p} } @@ -758,16 +767,17 @@ type blockSeriesSet struct { blockBaseSeriesSet } -func newBlockSeriesSet(i IndexReader, c ChunkReader, t tombstones.Reader, p index.Postings, mint, maxt int64) storage.SeriesSet { +func newBlockSeriesSet(i IndexReader, c ChunkReader, t tombstones.Reader, p index.Postings, mint, maxt int64, disableTrimming bool) storage.SeriesSet { return &blockSeriesSet{ blockBaseSeriesSet{ - index: i, - chunks: c, - tombstones: t, - p: p, - mint: mint, - maxt: maxt, - bufLbls: make(labels.Labels, 0, 10), + index: i, + chunks: c, + tombstones: t, + p: p, + mint: mint, + maxt: maxt, + disableTrimming: disableTrimming, + bufLbls: make(labels.Labels, 0, 10), }, } } @@ -790,16 +800,17 @@ type blockChunkSeriesSet struct { blockBaseSeriesSet } -func newBlockChunkSeriesSet(i IndexReader, c ChunkReader, t tombstones.Reader, p index.Postings, mint, maxt int64) storage.ChunkSeriesSet { +func newBlockChunkSeriesSet(i IndexReader, c ChunkReader, t tombstones.Reader, p index.Postings, mint, maxt int64, disableTrimming bool) storage.ChunkSeriesSet { return &blockChunkSeriesSet{ blockBaseSeriesSet{ - index: i, - chunks: c, - tombstones: t, - p: p, - mint: mint, - maxt: maxt, - bufLbls: make(labels.Labels, 0, 10), + index: i, + chunks: c, + tombstones: t, + p: p, + mint: mint, + maxt: maxt, + disableTrimming: disableTrimming, + bufLbls: make(labels.Labels, 0, 10), }, } } @@ -816,7 +827,7 @@ func (b *blockChunkSeriesSet) At() storage.ChunkSeries { } // NewMergedStringIter returns string iterator that allows to merge symbols on demand and stream result. -func NewMergedStringIter(a index.StringIter, b index.StringIter) index.StringIter { +func NewMergedStringIter(a, b index.StringIter) index.StringIter { return &mergedStringIter{a: a, b: b, aok: a.Next(), bok: b.Next()} } @@ -931,7 +942,6 @@ Outer: if ts <= tr.Maxt { return true - } it.Intervals = it.Intervals[1:] } @@ -952,6 +962,8 @@ func newNopChunkReader() ChunkReader { } } -func (cr nopChunkReader) Chunk(ref uint64) (chunkenc.Chunk, error) { return cr.emptyChunk, nil } +func (cr nopChunkReader) Chunk(ref chunks.ChunkRef) (chunkenc.Chunk, error) { + return cr.emptyChunk, nil +} func (cr nopChunkReader) Close() error { return nil } diff --git a/tsdb/querier_bench_test.go b/tsdb/querier_bench_test.go index ed2d6fb4b..71dfef35b 100644 --- a/tsdb/querier_bench_test.go +++ b/tsdb/querier_bench_test.go @@ -23,7 +23,7 @@ import ( "github.com/stretchr/testify/require" - "github.com/prometheus/prometheus/pkg/labels" + "github.com/prometheus/prometheus/model/labels" ) // Make entries ~50B in size, to emulate real-world high cardinality. diff --git a/tsdb/querier_test.go b/tsdb/querier_test.go index 9ae34e726..555b029a7 100644 --- a/tsdb/querier_test.go +++ b/tsdb/querier_test.go @@ -29,7 +29,7 @@ import ( "github.com/pkg/errors" "github.com/stretchr/testify/require" - "github.com/prometheus/prometheus/pkg/labels" + "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/tsdb/chunkenc" "github.com/prometheus/prometheus/tsdb/chunks" @@ -106,13 +106,13 @@ func createIdxChkReaders(t *testing.T, tc []seriesSamples) (IndexReader, ChunkRe }) postings := index.NewMemPostings() - chkReader := mockChunkReader(make(map[uint64]chunkenc.Chunk)) + chkReader := mockChunkReader(make(map[chunks.ChunkRef]chunkenc.Chunk)) lblIdx := make(map[string]map[string]struct{}) mi := newMockIndex() blockMint := int64(math.MaxInt64) blockMaxt := int64(math.MinInt64) - var chunkRef uint64 + var chunkRef chunks.ChunkRef for i, s := range tc { i = i + 1 // 0 is not a valid posting. metas := make([]chunks.Meta, 0, len(s.chunks)) @@ -139,9 +139,9 @@ func createIdxChkReaders(t *testing.T, tc []seriesSamples) (IndexReader, ChunkRe chunkRef++ } ls := labels.FromMap(s.lset) - require.NoError(t, mi.AddSeries(uint64(i), ls, metas...)) + require.NoError(t, mi.AddSeries(storage.SeriesRef(i), ls, metas...)) - postings.Add(uint64(i), ls) + postings.Add(storage.SeriesRef(i), ls) for _, l := range ls { vs, present := lblIdx[l.Name] @@ -162,6 +162,7 @@ func createIdxChkReaders(t *testing.T, tc []seriesSamples) (IndexReader, ChunkRe type blockQuerierTestCase struct { mint, maxt int64 ms []*labels.Matcher + hints *storage.SelectHints exp storage.SeriesSet expChks storage.ChunkSeriesSet } @@ -179,7 +180,7 @@ func testBlockQuerier(t *testing.T, c blockQuerierTestCase, ir IndexReader, cr C }, } - res := q.Select(false, nil, c.ms...) + res := q.Select(false, c.hints, c.ms...) defer func() { require.NoError(t, q.Close()) }() for { @@ -214,7 +215,7 @@ func testBlockQuerier(t *testing.T, c blockQuerierTestCase, ir IndexReader, cr C maxt: c.maxt, }, } - res := q.Select(false, nil, c.ms...) + res := q.Select(false, c.hints, c.ms...) defer func() { require.NoError(t, q.Close()) }() for { @@ -319,6 +320,56 @@ func TestBlockQuerier(t *testing.T) { ), }), }, + { + // This test runs a query disabling trimming. All chunks containing at least 1 sample within the queried + // time range will be returned. + mint: 2, + maxt: 6, + hints: &storage.SelectHints{Start: 2, End: 6, DisableTrimming: true}, + ms: []*labels.Matcher{labels.MustNewMatcher(labels.MatchEqual, "a", "a")}, + exp: newMockSeriesSet([]storage.Series{ + storage.NewListSeries(labels.Labels{{Name: "a", Value: "a"}}, + []tsdbutil.Sample{sample{1, 2, nil}, sample{2, 3, nil}, sample{3, 4, nil}, sample{5, 2, nil}, sample{6, 3, nil}, sample{7, 4, nil}}, + ), + storage.NewListSeries(labels.Labels{{Name: "a", Value: "a"}, {Name: "b", Value: "b"}}, + []tsdbutil.Sample{sample{1, 1, nil}, sample{2, 2, nil}, sample{3, 3, nil}, sample{5, 3, nil}, sample{6, 6, nil}}, + ), + }), + expChks: newMockChunkSeriesSet([]storage.ChunkSeries{ + storage.NewListChunkSeriesFromSamples(labels.Labels{{Name: "a", Value: "a"}}, + []tsdbutil.Sample{sample{1, 2, nil}, sample{2, 3, nil}, sample{3, 4, nil}}, + []tsdbutil.Sample{sample{5, 2, nil}, sample{6, 3, nil}, sample{7, 4, nil}}, + ), + storage.NewListChunkSeriesFromSamples(labels.Labels{{Name: "a", Value: "a"}, {Name: "b", Value: "b"}}, + []tsdbutil.Sample{sample{1, 1, nil}, sample{2, 2, nil}, sample{3, 3, nil}}, + []tsdbutil.Sample{sample{5, 3, nil}, sample{6, 6, nil}}, + ), + }), + }, + { + // This test runs a query disabling trimming. All chunks containing at least 1 sample within the queried + // time range will be returned. + mint: 5, + maxt: 6, + hints: &storage.SelectHints{Start: 5, End: 6, DisableTrimming: true}, + ms: []*labels.Matcher{labels.MustNewMatcher(labels.MatchEqual, "a", "a")}, + exp: newMockSeriesSet([]storage.Series{ + storage.NewListSeries(labels.Labels{{Name: "a", Value: "a"}}, + []tsdbutil.Sample{sample{5, 2, nil}, sample{6, 3, nil}, sample{7, 4, nil}}, + ), + storage.NewListSeries(labels.Labels{{Name: "a", Value: "a"}, {Name: "b", Value: "b"}}, + []tsdbutil.Sample{sample{5, 3, nil}, sample{6, 6, nil}}, + ), + }), + expChks: newMockChunkSeriesSet([]storage.ChunkSeries{ + storage.NewListChunkSeriesFromSamples(labels.Labels{{Name: "a", Value: "a"}}, + []tsdbutil.Sample{sample{5, 2, nil}, sample{6, 3, nil}, sample{7, 4, nil}}, + ), + storage.NewListChunkSeriesFromSamples(labels.Labels{{Name: "a", Value: "a"}, {Name: "b", Value: "b"}}, + []tsdbutil.Sample{sample{5, 3, nil}, sample{6, 6, nil}}, + ), + }), + }, } { t.Run("", func(t *testing.T) { ir, cr, _, _ := createIdxChkReaders(t, testData) @@ -556,21 +607,21 @@ func TestBlockQuerierDelete(t *testing.T) { type fakeChunksReader struct { ChunkReader - chks map[uint64]chunkenc.Chunk + chks map[chunks.ChunkRef]chunkenc.Chunk } func createFakeReaderAndNotPopulatedChunks(s ...[]tsdbutil.Sample) (*fakeChunksReader, []chunks.Meta) { f := &fakeChunksReader{ - chks: map[uint64]chunkenc.Chunk{}, + chks: map[chunks.ChunkRef]chunkenc.Chunk{}, } chks := make([]chunks.Meta, 0, len(s)) for ref, samples := range s { chk := tsdbutil.ChunkFromSamples(samples) - f.chks[uint64(ref)] = chk.Chunk + f.chks[chunks.ChunkRef(ref)] = chk.Chunk chks = append(chks, chunks.Meta{ - Ref: uint64(ref), + Ref: chunks.ChunkRef(ref), MinTime: chk.MinTime, MaxTime: chk.MaxTime, }) @@ -578,7 +629,7 @@ func createFakeReaderAndNotPopulatedChunks(s ...[]tsdbutil.Sample) (*fakeChunksR return f, chks } -func (r *fakeChunksReader) Chunk(ref uint64) (chunkenc.Chunk, error) { +func (r *fakeChunksReader) Chunk(ref chunks.ChunkRef) (chunkenc.Chunk, error) { chk, ok := r.chks[ref] if !ok { return nil, errors.Errorf("chunk not found at ref %v", ref) @@ -918,7 +969,7 @@ func TestPopulateWithDelSeriesIterator_NextWithMinTime(t *testing.T) { // The subset are all equivalent so this does not capture merging of partial or non-overlapping sets well. // TODO(bwplotka): Merge with storage merged series set benchmark. func BenchmarkMergedSeriesSet(b *testing.B) { - var sel = func(sets []storage.SeriesSet) storage.SeriesSet { + sel := func(sets []storage.SeriesSet) storage.SeriesSet { return storage.NewMergeSeriesSet(sets, storage.ChainedSeriesMerge) } @@ -965,9 +1016,9 @@ func BenchmarkMergedSeriesSet(b *testing.B) { } } -type mockChunkReader map[uint64]chunkenc.Chunk +type mockChunkReader map[chunks.ChunkRef]chunkenc.Chunk -func (cr mockChunkReader) Chunk(id uint64) (chunkenc.Chunk, error) { +func (cr mockChunkReader) Chunk(id chunks.ChunkRef) (chunkenc.Chunk, error) { chk, ok := cr[id] if ok { return chk, nil @@ -1087,15 +1138,15 @@ type series struct { } type mockIndex struct { - series map[uint64]series - postings map[labels.Label][]uint64 + series map[storage.SeriesRef]series + postings map[labels.Label][]storage.SeriesRef symbols map[string]struct{} } func newMockIndex() mockIndex { ix := mockIndex{ - series: make(map[uint64]series), - postings: make(map[labels.Label][]uint64), + series: make(map[storage.SeriesRef]series), + postings: make(map[labels.Label][]storage.SeriesRef), symbols: make(map[string]struct{}), } return ix @@ -1110,7 +1161,7 @@ func (m mockIndex) Symbols() index.StringIter { return index.NewStringListIter(l) } -func (m *mockIndex) AddSeries(ref uint64, l labels.Labels, chunks ...chunks.Meta) error { +func (m *mockIndex) AddSeries(ref storage.SeriesRef, l labels.Labels, chunks ...chunks.Meta) error { if _, ok := m.series[ref]; ok { return errors.Errorf("series with reference %d already added", ref) } @@ -1177,11 +1228,11 @@ func (m mockIndex) LabelValues(name string, matchers ...*labels.Matcher) ([]stri return values, nil } -func (m mockIndex) LabelValueFor(id uint64, label string) (string, error) { +func (m mockIndex) LabelValueFor(id storage.SeriesRef, label string) (string, error) { return m.series[id].l.Get(label), nil } -func (m mockIndex) LabelNamesFor(ids ...uint64) ([]string, error) { +func (m mockIndex) LabelNamesFor(ids ...storage.SeriesRef) ([]string, error) { namesMap := make(map[string]bool) for _, id := range ids { for _, lbl := range m.series[id].l { @@ -1216,7 +1267,7 @@ func (m mockIndex) SortedPostings(p index.Postings) index.Postings { return index.NewListPostings(ep) } -func (m mockIndex) Series(ref uint64, lset *labels.Labels, chks *[]chunks.Meta) error { +func (m mockIndex) Series(ref storage.SeriesRef, lset *labels.Labels, chks *[]chunks.Meta) error { s, ok := m.series[ref] if !ok { return storage.ErrNotFound @@ -1860,7 +1911,6 @@ func TestPostingsForMatchers(t *testing.T) { t.Errorf("Evaluating %v, missing results %+v", c.matchers, exp) } } - } // TestClose ensures that calling Close more than once doesn't block and doesn't panic. @@ -2035,11 +2085,11 @@ func (m mockMatcherIndex) LabelValues(name string, matchers ...*labels.Matcher) return []string{}, errors.New("label values called") } -func (m mockMatcherIndex) LabelValueFor(id uint64, label string) (string, error) { +func (m mockMatcherIndex) LabelValueFor(id storage.SeriesRef, label string) (string, error) { return "", errors.New("label value for called") } -func (m mockMatcherIndex) LabelNamesFor(ids ...uint64) ([]string, error) { +func (m mockMatcherIndex) LabelNamesFor(ids ...storage.SeriesRef) ([]string, error) { return nil, errors.New("label names for for called") } @@ -2051,7 +2101,7 @@ func (m mockMatcherIndex) SortedPostings(p index.Postings) index.Postings { return index.EmptyPostings() } -func (m mockMatcherIndex) Series(ref uint64, lset *labels.Labels, chks *[]chunks.Meta) error { +func (m mockMatcherIndex) Series(ref storage.SeriesRef, lset *labels.Labels, chks *[]chunks.Meta) error { return nil } @@ -2101,13 +2151,13 @@ func TestBlockBaseSeriesSet(t *testing.T) { lset labels.Labels chunks []chunks.Meta - ref uint64 + ref storage.SeriesRef } cases := []struct { series []refdSeries // Postings should be in the sorted order of the series - postings []uint64 + postings []storage.SeriesRef expIdxs []int }{ @@ -2116,7 +2166,12 @@ func TestBlockBaseSeriesSet(t *testing.T) { { lset: labels.New([]labels.Label{{Name: "a", Value: "a"}}...), chunks: []chunks.Meta{ - {Ref: 29}, {Ref: 45}, {Ref: 245}, {Ref: 123}, {Ref: 4232}, {Ref: 5344}, + {Ref: 29}, + {Ref: 45}, + {Ref: 245}, + {Ref: 123}, + {Ref: 4232}, + {Ref: 5344}, {Ref: 121}, }, ref: 12, @@ -2141,7 +2196,7 @@ func TestBlockBaseSeriesSet(t *testing.T) { ref: 108, }, }, - postings: []uint64{12, 13, 10, 108}, // 13 doesn't exist and should just be skipped over. + postings: []storage.SeriesRef{12, 13, 10, 108}, // 13 doesn't exist and should just be skipped over. expIdxs: []int{0, 1, 3}, }, { @@ -2159,7 +2214,7 @@ func TestBlockBaseSeriesSet(t *testing.T) { ref: 3, }, }, - postings: []uint64{}, + postings: []storage.SeriesRef{}, expIdxs: []int{}, }, } diff --git a/tsdb/record/record.go b/tsdb/record/record.go index 0a3e3c2f4..40c9ab258 100644 --- a/tsdb/record/record.go +++ b/tsdb/record/record.go @@ -12,6 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +// Package record contains the various record types used for encoding various Head block data in the WAL and in-memory snapshot. package record import ( @@ -21,7 +22,9 @@ import ( "github.com/pkg/errors" "github.com/prometheus/prometheus/model/histogram" - "github.com/prometheus/prometheus/pkg/labels" + "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/storage" + "github.com/prometheus/prometheus/tsdb/chunks" "github.com/prometheus/prometheus/tsdb/encoding" "github.com/prometheus/prometheus/tsdb/tombstones" ) @@ -44,27 +47,25 @@ const ( Histograms Type = 5 ) -var ( - // ErrNotFound is returned if a looked up resource was not found. Duplicate ErrNotFound from head.go. - ErrNotFound = errors.New("not found") -) +// ErrNotFound is returned if a looked up resource was not found. Duplicate ErrNotFound from head.go. +var ErrNotFound = errors.New("not found") // RefSeries is the series labels with the series ID. type RefSeries struct { - Ref uint64 + Ref chunks.HeadSeriesRef Labels labels.Labels } // RefSample is a timestamp/value pair associated with a reference to a series. type RefSample struct { - Ref uint64 + Ref chunks.HeadSeriesRef T int64 V float64 } // RefExemplar is an exemplar with it's labels, timestamp, value the exemplar was collected/observed with, and a reference to a series. type RefExemplar struct { - Ref uint64 + Ref chunks.HeadSeriesRef T int64 V float64 Labels labels.Labels @@ -72,15 +73,14 @@ type RefExemplar struct { // RefHistogram is a histogram. type RefHistogram struct { - Ref uint64 + Ref chunks.HeadSeriesRef T int64 H *histogram.Histogram } // Decoder decodes series, sample, and tombstone records. // The zero value is ready to use. -type Decoder struct { -} +type Decoder struct{} // Type returns the type of the record. // Returns RecordUnknown if no valid record type is found. @@ -103,7 +103,7 @@ func (d *Decoder) Series(rec []byte, series []RefSeries) ([]RefSeries, error) { return nil, errors.New("invalid record type") } for len(dec.B) > 0 && dec.Err() == nil { - ref := dec.Be64() + ref := storage.SeriesRef(dec.Be64()) lset := make(labels.Labels, dec.Uvarint()) @@ -114,7 +114,7 @@ func (d *Decoder) Series(rec []byte, series []RefSeries) ([]RefSeries, error) { sort.Sort(lset) series = append(series, RefSeries{ - Ref: ref, + Ref: chunks.HeadSeriesRef(ref), Labels: lset, }) } @@ -147,7 +147,7 @@ func (d *Decoder) Samples(rec []byte, samples []RefSample) ([]RefSample, error) val := dec.Be64() samples = append(samples, RefSample{ - Ref: uint64(int64(baseRef) + dref), + Ref: chunks.HeadSeriesRef(int64(baseRef) + dref), T: baseTime + dtime, V: math.Float64frombits(val), }) @@ -171,7 +171,7 @@ func (d *Decoder) Tombstones(rec []byte, tstones []tombstones.Stone) ([]tombston } for dec.Len() > 0 && dec.Err() == nil { tstones = append(tstones, tombstones.Stone{ - Ref: dec.Be64(), + Ref: storage.SeriesRef(dec.Be64()), Intervals: tombstones.Intervals{ {Mint: dec.Varint64(), Maxt: dec.Varint64()}, }, @@ -217,7 +217,7 @@ func (d *Decoder) ExemplarsFromBuffer(dec *encoding.Decbuf, exemplars []RefExemp sort.Sort(lset) exemplars = append(exemplars, RefExemplar{ - Ref: baseRef + uint64(dref), + Ref: chunks.HeadSeriesRef(baseRef + uint64(dref)), T: baseTime + dtime, V: math.Float64frombits(val), Labels: lset, @@ -251,7 +251,7 @@ func (d *Decoder) Histograms(rec []byte, histograms []RefHistogram) ([]RefHistog dtime := dec.Varint64() rh := RefHistogram{ - Ref: baseRef + uint64(dref), + Ref: chunks.HeadSeriesRef(baseRef + uint64(dref)), T: baseTime + dtime, H: &histogram.Histogram{ Schema: 0, @@ -317,8 +317,7 @@ func (d *Decoder) Histograms(rec []byte, histograms []RefHistogram) ([]RefHistog // Encoder encodes series, sample, and tombstones records. // The zero value is ready to use. -type Encoder struct { -} +type Encoder struct{} // Series appends the encoded series to b and returns the resulting slice. func (e *Encoder) Series(series []RefSeries, b []byte) []byte { @@ -326,7 +325,7 @@ func (e *Encoder) Series(series []RefSeries, b []byte) []byte { buf.PutByte(byte(Series)) for _, s := range series { - buf.PutBE64(s.Ref) + buf.PutBE64(uint64(s.Ref)) buf.PutUvarint(len(s.Labels)) for _, l := range s.Labels { @@ -350,7 +349,7 @@ func (e *Encoder) Samples(samples []RefSample, b []byte) []byte { // All samples encode their timestamp and ref as delta to those. first := samples[0] - buf.PutBE64(first.Ref) + buf.PutBE64(uint64(first.Ref)) buf.PutBE64int64(first.T) for _, s := range samples { @@ -368,7 +367,7 @@ func (e *Encoder) Tombstones(tstones []tombstones.Stone, b []byte) []byte { for _, s := range tstones { for _, iv := range s.Intervals { - buf.PutBE64(s.Ref) + buf.PutBE64(uint64(s.Ref)) buf.PutVarint64(iv.Mint) buf.PutVarint64(iv.Maxt) } @@ -394,7 +393,7 @@ func (e *Encoder) EncodeExemplarsIntoBuffer(exemplars []RefExemplar, buf *encodi // All samples encode their timestamp and ref as delta to those. first := exemplars[0] - buf.PutBE64(first.Ref) + buf.PutBE64(uint64(first.Ref)) buf.PutBE64int64(first.T) for _, ex := range exemplars { @@ -421,7 +420,7 @@ func (e *Encoder) Histograms(histograms []RefHistogram, b []byte) []byte { // Store base timestamp and base reference number of first histogram. // All histograms encode their timestamp and ref as delta to those. first := histograms[0] - buf.PutBE64(first.Ref) + buf.PutBE64(uint64(first.Ref)) buf.PutBE64int64(first.T) for _, h := range histograms { diff --git a/tsdb/record/record_test.go b/tsdb/record/record_test.go index f69989d50..0e241fd2f 100644 --- a/tsdb/record/record_test.go +++ b/tsdb/record/record_test.go @@ -20,7 +20,7 @@ import ( "github.com/pkg/errors" "github.com/stretchr/testify/require" - "github.com/prometheus/prometheus/pkg/labels" + "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/tsdb/encoding" "github.com/prometheus/prometheus/tsdb/tombstones" ) diff --git a/tsdb/repair_test.go b/tsdb/repair_test.go index 7fb2720fd..35fb7bbe1 100644 --- a/tsdb/repair_test.go +++ b/tsdb/repair_test.go @@ -21,7 +21,7 @@ import ( "github.com/stretchr/testify/require" - "github.com/prometheus/prometheus/pkg/labels" + "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/tsdb/chunks" "github.com/prometheus/prometheus/tsdb/fileutil" "github.com/prometheus/prometheus/tsdb/index" @@ -78,7 +78,7 @@ func TestRepairBadIndexVersion(t *testing.T) { require.Error(t, err) // Touch chunks dir in block to imitate them. - require.NoError(t, os.MkdirAll(filepath.Join(tmpDbDir, "chunks"), 0777)) + require.NoError(t, os.MkdirAll(filepath.Join(tmpDbDir, "chunks"), 0o777)) // Read current index to check integrity. r, err := index.NewFileReader(filepath.Join(tmpDbDir, indexFilename)) diff --git a/tsdb/test/hash_test.go b/tsdb/test/hash_test.go index 1778f0f86..1242f5db5 100644 --- a/tsdb/test/hash_test.go +++ b/tsdb/test/hash_test.go @@ -81,7 +81,6 @@ func fnv64a(b []byte) uint64 { } func BenchmarkCRC32_diff(b *testing.B) { - data := [][]byte{} for i := 0; i < 1000; i++ { diff --git a/tsdb/test/labels_test.go b/tsdb/test/labels_test.go index 07242181d..354dbf836 100644 --- a/tsdb/test/labels_test.go +++ b/tsdb/test/labels_test.go @@ -18,7 +18,7 @@ import ( "crypto/rand" "testing" - "github.com/prometheus/prometheus/pkg/labels" + "github.com/prometheus/prometheus/model/labels" ) func BenchmarkMapClone(b *testing.B) { diff --git a/tsdb/tombstones/tombstones.go b/tsdb/tombstones/tombstones.go index 8b95481a4..621e10471 100644 --- a/tsdb/tombstones/tombstones.go +++ b/tsdb/tombstones/tombstones.go @@ -28,6 +28,7 @@ import ( "github.com/go-kit/log/level" "github.com/pkg/errors" + "github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/tsdb/encoding" tsdb_errors "github.com/prometheus/prometheus/tsdb/errors" "github.com/prometheus/prometheus/tsdb/fileutil" @@ -39,8 +40,10 @@ const ( // MagicTombstone is 4 bytes at the head of a tombstone file. MagicTombstone = 0x0130BA30 - tombstoneFormatV1 = 1 - tombstonesHeaderSize = 5 + tombstoneFormatV1 = 1 + tombstoneFormatVersionSize = 1 + tombstonesHeaderSize = 5 + tombstonesCRCSize = 4 ) // The table gets initialized with sync.Once but may still cause a race @@ -61,10 +64,10 @@ func newCRC32() hash.Hash32 { // Reader gives access to tombstone intervals by series reference. type Reader interface { // Get returns deletion intervals for the series with the given reference. - Get(ref uint64) (Intervals, error) + Get(ref storage.SeriesRef) (Intervals, error) // Iter calls the given function for each encountered interval. - Iter(func(uint64, Intervals) error) error + Iter(func(storage.SeriesRef, Intervals) error) error // Total returns the total count of tombstones. Total() uint64 @@ -110,7 +113,7 @@ func WriteFile(logger log.Logger, dir string, tr Reader) (int64, error) { } // Ignore first byte which is the format type. We do this for compatibility. - if _, err := hash.Write(bytes[1:]); err != nil { + if _, err := hash.Write(bytes[tombstoneFormatVersionSize:]); err != nil { return 0, errors.Wrap(err, "calculating hash for tombstones") } @@ -142,9 +145,9 @@ func WriteFile(logger log.Logger, dir string, tr Reader) (int64, error) { func Encode(tr Reader) ([]byte, error) { buf := encoding.Encbuf{} buf.PutByte(tombstoneFormatV1) - err := tr.Iter(func(ref uint64, ivs Intervals) error { + err := tr.Iter(func(ref storage.SeriesRef, ivs Intervals) error { for _, iv := range ivs { - buf.PutUvarint64(ref) + buf.PutUvarint64(uint64(ref)) buf.PutVarint64(iv.Mint) buf.PutVarint64(iv.Maxt) } @@ -167,7 +170,7 @@ func Decode(b []byte) (Reader, error) { stonesMap := NewMemTombstones() for d.Len() > 0 { - k := d.Uvarint64() + k := storage.SeriesRef(d.Uvarint64()) mint := d.Varint64() maxt := d.Varint64() if d.Err() != nil { @@ -182,7 +185,7 @@ func Decode(b []byte) (Reader, error) { // Stone holds the information on the posting and time-range // that is deleted. type Stone struct { - Ref uint64 + Ref storage.SeriesRef Intervals Intervals } @@ -198,7 +201,7 @@ func ReadTombstones(dir string) (Reader, int64, error) { return nil, 0, errors.Wrap(encoding.ErrInvalidSize, "tombstones header") } - d := &encoding.Decbuf{B: b[:len(b)-4]} // 4 for the checksum. + d := &encoding.Decbuf{B: b[:len(b)-tombstonesCRCSize]} if mg := d.Be32(); mg != MagicTombstone { return nil, 0, fmt.Errorf("invalid magic number %x", mg) } @@ -206,10 +209,10 @@ func ReadTombstones(dir string) (Reader, int64, error) { // Verify checksum. hash := newCRC32() // Ignore first byte which is the format type. - if _, err := hash.Write(d.Get()[1:]); err != nil { + if _, err := hash.Write(d.Get()[tombstoneFormatVersionSize:]); err != nil { return nil, 0, errors.Wrap(err, "write to hash") } - if binary.BigEndian.Uint32(b[len(b)-4:]) != hash.Sum32() { + if binary.BigEndian.Uint32(b[len(b)-tombstonesCRCSize:]) != hash.Sum32() { return nil, 0, errors.New("checksum did not match") } @@ -226,33 +229,33 @@ func ReadTombstones(dir string) (Reader, int64, error) { } type MemTombstones struct { - intvlGroups map[uint64]Intervals + intvlGroups map[storage.SeriesRef]Intervals mtx sync.RWMutex } // NewMemTombstones creates new in memory Tombstone Reader // that allows adding new intervals. func NewMemTombstones() *MemTombstones { - return &MemTombstones{intvlGroups: make(map[uint64]Intervals)} + return &MemTombstones{intvlGroups: make(map[storage.SeriesRef]Intervals)} } func NewTestMemTombstones(intervals []Intervals) *MemTombstones { ret := NewMemTombstones() for i, intervalsGroup := range intervals { for _, interval := range intervalsGroup { - ret.AddInterval(uint64(i+1), interval) + ret.AddInterval(storage.SeriesRef(i+1), interval) } } return ret } -func (t *MemTombstones) Get(ref uint64) (Intervals, error) { +func (t *MemTombstones) Get(ref storage.SeriesRef) (Intervals, error) { t.mtx.RLock() defer t.mtx.RUnlock() return t.intvlGroups[ref], nil } -func (t *MemTombstones) DeleteTombstones(refs map[uint64]struct{}) { +func (t *MemTombstones) DeleteTombstones(refs map[storage.SeriesRef]struct{}) { t.mtx.Lock() defer t.mtx.Unlock() for ref := range refs { @@ -280,7 +283,7 @@ func (t *MemTombstones) TruncateBefore(beforeT int64) { } } -func (t *MemTombstones) Iter(f func(uint64, Intervals) error) error { +func (t *MemTombstones) Iter(f func(storage.SeriesRef, Intervals) error) error { t.mtx.RLock() defer t.mtx.RUnlock() for ref, ivs := range t.intvlGroups { @@ -303,7 +306,7 @@ func (t *MemTombstones) Total() uint64 { } // AddInterval to an existing memTombstones. -func (t *MemTombstones) AddInterval(ref uint64, itvs ...Interval) { +func (t *MemTombstones) AddInterval(ref storage.SeriesRef, itvs ...Interval) { t.mtx.Lock() defer t.mtx.Unlock() for _, itv := range itvs { diff --git a/tsdb/tombstones/tombstones_test.go b/tsdb/tombstones/tombstones_test.go index fa1825d17..95fb12d69 100644 --- a/tsdb/tombstones/tombstones_test.go +++ b/tsdb/tombstones/tombstones_test.go @@ -25,6 +25,8 @@ import ( "github.com/go-kit/log" "github.com/stretchr/testify/require" "go.uber.org/goleak" + + "github.com/prometheus/prometheus/storage" ) func TestMain(m *testing.M) { @@ -50,7 +52,7 @@ func TestWriteAndReadbackTombstones(t *testing.T) { dranges = dranges.Add(Interval{mint, mint + rand.Int63n(1000)}) mint += rand.Int63n(1000) + 1 } - stones.AddInterval(ref, dranges...) + stones.AddInterval(storage.SeriesRef(ref), dranges...) } _, err := WriteFile(log.NewNopLogger(), tmpdir, stones) @@ -66,18 +68,18 @@ func TestWriteAndReadbackTombstones(t *testing.T) { func TestDeletingTombstones(t *testing.T) { stones := NewMemTombstones() - ref := uint64(42) + ref := storage.SeriesRef(42) mint := rand.Int63n(time.Now().UnixNano()) dranges := make(Intervals, 0, 1) dranges = dranges.Add(Interval{mint, mint + rand.Int63n(1000)}) stones.AddInterval(ref, dranges...) - stones.AddInterval(uint64(43), dranges...) + stones.AddInterval(storage.SeriesRef(43), dranges...) intervals, err := stones.Get(ref) require.NoError(t, err) require.Equal(t, intervals, dranges) - stones.DeleteTombstones(map[uint64]struct{}{ref: struct{}{}}) + stones.DeleteTombstones(map[storage.SeriesRef]struct{}{ref: {}}) intervals, err = stones.Get(ref) require.NoError(t, err) @@ -112,7 +114,7 @@ func TestTruncateBefore(t *testing.T) { }, } for _, c := range cases { - ref := uint64(42) + ref := storage.SeriesRef(42) stones := NewMemTombstones() stones.AddInterval(ref, c.before...) @@ -231,13 +233,13 @@ func TestMemTombstonesConcurrency(t *testing.T) { go func() { for x := 0; x < totalRuns; x++ { - tomb.AddInterval(uint64(x), Interval{int64(x), int64(x)}) + tomb.AddInterval(storage.SeriesRef(x), Interval{int64(x), int64(x)}) } wg.Done() }() go func() { for x := 0; x < totalRuns; x++ { - _, err := tomb.Get(uint64(x)) + _, err := tomb.Get(storage.SeriesRef(x)) require.NoError(t, err) } wg.Done() diff --git a/tsdb/tsdbblockutil.go b/tsdb/tsdbblockutil.go index 3e4ac5856..732446324 100644 --- a/tsdb/tsdbblockutil.go +++ b/tsdb/tsdbblockutil.go @@ -48,7 +48,7 @@ func CreateBlock(series []storage.Series, dir string, chunkRange int64, logger l app := w.Appender(ctx) for _, s := range series { - ref := uint64(0) + ref := storage.SeriesRef(0) it := s.Iterator() lset := s.Labels() for it.Next() { diff --git a/tsdb/tsdbutil/buffer.go b/tsdb/tsdbutil/buffer.go index 28cb04a7b..9433be77a 100644 --- a/tsdb/tsdbutil/buffer.go +++ b/tsdb/tsdbutil/buffer.go @@ -225,7 +225,7 @@ func (r *sampleRing) last() (int64, float64, bool) { func (r *sampleRing) samples() []sample { res := make([]sample, r.l) - var k = r.f + r.l + k := r.f + r.l var j int if k > len(r.buf) { k = len(r.buf) diff --git a/tsdb/tsdbutil/buffer_test.go b/tsdb/tsdbutil/buffer_test.go index de0e40308..6423871ca 100644 --- a/tsdb/tsdbutil/buffer_test.go +++ b/tsdb/tsdbutil/buffer_test.go @@ -18,9 +18,10 @@ import ( "sort" "testing" + "github.com/stretchr/testify/require" + "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/tsdb/chunkenc" - "github.com/stretchr/testify/require" ) func TestSampleRing(t *testing.T) { diff --git a/tsdb/tsdbutil/chunks.go b/tsdb/tsdbutil/chunks.go index 5b4e954ae..93af3acfd 100644 --- a/tsdb/tsdbutil/chunks.go +++ b/tsdb/tsdbutil/chunks.go @@ -69,7 +69,7 @@ func PopulatedChunk(numSamples int, minTime int64) chunks.Meta { } // GenerateSamples starting at start and counting up numSamples. -func GenerateSamples(start int, numSamples int) []Sample { +func GenerateSamples(start, numSamples int) []Sample { samples := make([]Sample, 0, numSamples) for i := start; i < start+numSamples; i++ { samples = append(samples, sample{ diff --git a/tsdb/tsdbutil/dir_locker.go b/tsdb/tsdbutil/dir_locker.go new file mode 100644 index 000000000..155f58641 --- /dev/null +++ b/tsdb/tsdbutil/dir_locker.go @@ -0,0 +1,104 @@ +// Copyright 2021 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tsdbutil + +import ( + "fmt" + "os" + "path/filepath" + + "github.com/go-kit/log" + "github.com/go-kit/log/level" + "github.com/pkg/errors" + "github.com/prometheus/client_golang/prometheus" + + tsdb_errors "github.com/prometheus/prometheus/tsdb/errors" + "github.com/prometheus/prometheus/tsdb/fileutil" +) + +const ( + lockfileDisabled = -1 + lockfileReplaced = 0 + lockfileCreatedCleanly = 1 +) + +type DirLocker struct { + logger log.Logger + + createdCleanly prometheus.Gauge + + releaser fileutil.Releaser + path string +} + +// NewDirLocker creates a DirLocker that can obtain an exclusive lock on dir. +func NewDirLocker(dir, subsystem string, l log.Logger, r prometheus.Registerer) (*DirLocker, error) { + lock := &DirLocker{ + logger: l, + createdCleanly: prometheus.NewGauge(prometheus.GaugeOpts{ + Name: fmt.Sprintf("prometheus_%s_clean_start", subsystem), + Help: "-1: lockfile is disabled. 0: a lockfile from a previous execution was replaced. 1: lockfile creation was clean", + }), + } + + if r != nil { + r.MustRegister(lock.createdCleanly) + } + + lock.createdCleanly.Set(lockfileDisabled) + + absdir, err := filepath.Abs(dir) + if err != nil { + return nil, err + } + lock.path = filepath.Join(absdir, "lock") + + return lock, nil +} + +// Lock obtains the lock on the locker directory. +func (l *DirLocker) Lock() error { + if l.releaser != nil { + return errors.New("DB lock already obtained") + } + + if _, err := os.Stat(l.path); err == nil { + level.Warn(l.logger).Log("msg", "A lockfile from a previous execution already existed. It was replaced", "file", l.path) + + l.createdCleanly.Set(lockfileReplaced) + } else { + l.createdCleanly.Set(lockfileCreatedCleanly) + } + + lockf, _, err := fileutil.Flock(l.path) + if err != nil { + return errors.Wrap(err, "lock DB directory") + } + l.releaser = lockf + return nil +} + +// Release releases the lock. No-op if the lock is not held. +func (l *DirLocker) Release() error { + if l.releaser == nil { + return nil + } + + errs := tsdb_errors.NewMulti() + errs.Add(l.releaser.Release()) + errs.Add(os.Remove(l.path)) + + l.releaser = nil + return errs.Err() +} diff --git a/tsdb/tsdbutil/dir_locker_test.go b/tsdb/tsdbutil/dir_locker_test.go new file mode 100644 index 000000000..fc7d905b2 --- /dev/null +++ b/tsdb/tsdbutil/dir_locker_test.go @@ -0,0 +1,38 @@ +// Copyright 2021 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tsdbutil + +import ( + "testing" + + "github.com/go-kit/log" + "github.com/stretchr/testify/require" + + "github.com/prometheus/prometheus/util/testutil" +) + +func TestLockfile(t *testing.T) { + TestDirLockerUsage(t, func(t *testing.T, data string, createLock bool) (*DirLocker, testutil.Closer) { + locker, err := NewDirLocker(data, "tsdbutil", log.NewNopLogger(), nil) + require.NoError(t, err) + + if createLock { + require.NoError(t, locker.Lock()) + } + + return locker, testutil.NewCallbackCloser(func() { + require.NoError(t, locker.Release()) + }) + }) +} diff --git a/tsdb/tsdbutil/dir_locker_testutil.go b/tsdb/tsdbutil/dir_locker_testutil.go new file mode 100644 index 000000000..cbb21e254 --- /dev/null +++ b/tsdb/tsdbutil/dir_locker_testutil.go @@ -0,0 +1,91 @@ +// Copyright 2021 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tsdbutil + +import ( + "fmt" + "io/ioutil" + "os" + "testing" + + "github.com/go-kit/log" + prom_testutil "github.com/prometheus/client_golang/prometheus/testutil" + "github.com/stretchr/testify/require" + + "github.com/prometheus/prometheus/util/testutil" +) + +// TestDirLockerUsage performs a set of tests which guarantee correct usage of +// DirLocker. open should use data as the storage directory, and createLock +// to determine if a lock file should be used. +func TestDirLockerUsage(t *testing.T, open func(t *testing.T, data string, createLock bool) (*DirLocker, testutil.Closer)) { + t.Helper() + + cases := []struct { + fileAlreadyExists bool + lockFileDisabled bool + expectedValue int + }{ + { + fileAlreadyExists: false, + lockFileDisabled: false, + expectedValue: lockfileCreatedCleanly, + }, + { + fileAlreadyExists: true, + lockFileDisabled: false, + expectedValue: lockfileReplaced, + }, + { + fileAlreadyExists: true, + lockFileDisabled: true, + expectedValue: lockfileDisabled, + }, + { + fileAlreadyExists: false, + lockFileDisabled: true, + expectedValue: lockfileDisabled, + }, + } + + for _, c := range cases { + t.Run(fmt.Sprintf("%+v", c), func(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "test") + require.NoError(t, err) + t.Cleanup(func() { + require.NoError(t, os.RemoveAll(tmpdir)) + }) + + // Test preconditions (file already exists + lockfile option) + if c.fileAlreadyExists { + tmpLocker, err := NewDirLocker(tmpdir, "tsdb", log.NewNopLogger(), nil) + require.NoError(t, err) + err = ioutil.WriteFile(tmpLocker.path, []byte{}, 0o644) + require.NoError(t, err) + } + + locker, closer := open(t, tmpdir, !c.lockFileDisabled) + require.Equal(t, float64(c.expectedValue), prom_testutil.ToFloat64(locker.createdCleanly)) + + // Close the client. This should delete the lockfile. + closer.Close() + + // Check that the lockfile is always deleted + if !c.lockFileDisabled { + _, err = os.Stat(locker.path) + require.True(t, os.IsNotExist(err), "lockfile was not deleted") + } + }) + } +} diff --git a/tsdb/wal.go b/tsdb/wal.go index 71fc0a44c..05661779f 100644 --- a/tsdb/wal.go +++ b/tsdb/wal.go @@ -32,7 +32,9 @@ import ( "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" - "github.com/prometheus/prometheus/pkg/labels" + "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/storage" + "github.com/prometheus/prometheus/tsdb/chunks" "github.com/prometheus/prometheus/tsdb/encoding" "github.com/prometheus/prometheus/tsdb/fileutil" "github.com/prometheus/prometheus/tsdb/record" @@ -113,8 +115,8 @@ type WALReader interface { // the truncation threshold can be compacted. type segmentFile struct { *os.File - maxTime int64 // highest tombstone or sample timestamp in segment - minSeries uint64 // lowerst series ID in segment + maxTime int64 // highest tombstone or sample timestamp in segment + minSeries chunks.HeadSeriesRef // lowerst series ID in segment } func newSegmentFile(f *os.File) *segmentFile { @@ -171,7 +173,7 @@ type SegmentWAL struct { // OpenSegmentWAL opens or creates a write ahead log in the given directory. // The WAL must be read completely before new data is written. func OpenSegmentWAL(dir string, logger log.Logger, flushInterval time.Duration, r prometheus.Registerer) (*SegmentWAL, error) { - if err := os.MkdirAll(dir, 0777); err != nil { + if err := os.MkdirAll(dir, 0o777); err != nil { return nil, err } df, err := fileutil.OpenDir(dir) @@ -292,7 +294,7 @@ func (w *SegmentWAL) putBuffer(b *encoding.Encbuf) { // Truncate deletes the values prior to mint and the series which the keep function // does not indicate to preserve. -func (w *SegmentWAL) Truncate(mint int64, keep func(uint64) bool) error { +func (w *SegmentWAL) Truncate(mint int64, keep func(chunks.HeadSeriesRef) bool) error { // The last segment is always active. if len(w.files) < 2 { return nil @@ -505,7 +507,7 @@ func (w *SegmentWAL) LogDeletes(stones []tombstones.Stone) error { func (w *SegmentWAL) openSegmentFile(name string) (*os.File, error) { // We must open all files in read/write mode as we may have to truncate along // the way and any file may become the head. - f, err := os.OpenFile(name, os.O_RDWR, 0666) + f, err := os.OpenFile(name, os.O_RDWR, 0o666) if err != nil { return nil, err } @@ -787,7 +789,7 @@ const ( func (w *SegmentWAL) encodeSeries(buf *encoding.Encbuf, series []record.RefSeries) uint8 { for _, s := range series { - buf.PutBE64(s.Ref) + buf.PutBE64(uint64(s.Ref)) buf.PutUvarint(len(s.Labels)) for _, l := range s.Labels { @@ -808,7 +810,7 @@ func (w *SegmentWAL) encodeSamples(buf *encoding.Encbuf, samples []record.RefSam // TODO(fabxc): optimize for all samples having the same timestamp. first := samples[0] - buf.PutBE64(first.Ref) + buf.PutBE64(uint64(first.Ref)) buf.PutBE64int64(first.T) for _, s := range samples { @@ -822,7 +824,7 @@ func (w *SegmentWAL) encodeSamples(buf *encoding.Encbuf, samples []record.RefSam func (w *SegmentWAL) encodeDeletes(buf *encoding.Encbuf, stones []tombstones.Stone) uint8 { for _, s := range stones { for _, iv := range s.Intervals { - buf.PutBE64(s.Ref) + buf.PutBE64(uint64(s.Ref)) buf.PutVarint64(iv.Mint) buf.PutVarint64(iv.Maxt) } @@ -1120,7 +1122,7 @@ func (r *walReader) decodeSeries(flag byte, b []byte, res *[]record.RefSeries) e dec := encoding.Decbuf{B: b} for len(dec.B) > 0 && dec.Err() == nil { - ref := dec.Be64() + ref := chunks.HeadSeriesRef(dec.Be64()) lset := make(labels.Labels, dec.Uvarint()) @@ -1161,7 +1163,7 @@ func (r *walReader) decodeSamples(flag byte, b []byte, res *[]record.RefSample) val := dec.Be64() *res = append(*res, record.RefSample{ - Ref: uint64(int64(baseRef) + dref), + Ref: chunks.HeadSeriesRef(int64(baseRef) + dref), T: baseTime + dtime, V: math.Float64frombits(val), }) @@ -1181,7 +1183,7 @@ func (r *walReader) decodeDeletes(flag byte, b []byte, res *[]tombstones.Stone) for dec.Len() > 0 && dec.Err() == nil { *res = append(*res, tombstones.Stone{ - Ref: dec.Be64(), + Ref: storage.SeriesRef(dec.Be64()), Intervals: tombstones.Intervals{ {Mint: dec.Varint64(), Maxt: dec.Varint64()}, }, diff --git a/tsdb/wal/checkpoint.go b/tsdb/wal/checkpoint.go index eb0d27034..9064beed0 100644 --- a/tsdb/wal/checkpoint.go +++ b/tsdb/wal/checkpoint.go @@ -29,6 +29,7 @@ import ( "github.com/go-kit/log/level" "github.com/pkg/errors" + "github.com/prometheus/prometheus/tsdb/chunks" tsdb_errors "github.com/prometheus/prometheus/tsdb/errors" "github.com/prometheus/prometheus/tsdb/fileutil" "github.com/prometheus/prometheus/tsdb/record" @@ -82,22 +83,21 @@ func DeleteCheckpoints(dir string, maxIndex int) error { const checkpointPrefix = "checkpoint." -// Checkpoint creates a compacted checkpoint of segments in range [first, last] in the given WAL. +// Checkpoint creates a compacted checkpoint of segments in range [from, to] in the given WAL. // It includes the most recent checkpoint if it exists. -// All series not satisfying keep and samples below mint are dropped. +// All series not satisfying keep and samples/tombstones/exemplars below mint are dropped. // // The checkpoint is stored in a directory named checkpoint.N in the same // segmented format as the original WAL itself. // This makes it easy to read it through the WAL package and concatenate // it with the original WAL. -func Checkpoint(logger log.Logger, w *WAL, from, to int, keep func(id uint64) bool, mint int64) (*CheckpointStats, error) { +func Checkpoint(logger log.Logger, w *WAL, from, to int, keep func(id chunks.HeadSeriesRef) bool, mint int64) (*CheckpointStats, error) { stats := &CheckpointStats{} var sgmReader io.ReadCloser level.Info(logger).Log("msg", "Creating checkpoint", "from_segment", from, "to_segment", to, "mint", mint) { - var sgmRange []SegmentRange dir, idx, err := LastCheckpoint(w.Dir()) if err != nil && err != record.ErrNotFound { @@ -129,7 +129,7 @@ func Checkpoint(logger log.Logger, w *WAL, from, to int, keep func(id uint64) bo return nil, errors.Wrap(err, "remove previous temporary checkpoint dir") } - if err := os.MkdirAll(cpdirtmp, 0777); err != nil { + if err := os.MkdirAll(cpdirtmp, 0o777); err != nil { return nil, errors.Wrap(err, "create checkpoint dir") } cp, err := New(nil, nil, cpdirtmp, w.CompressionEnabled()) diff --git a/tsdb/wal/checkpoint_test.go b/tsdb/wal/checkpoint_test.go index f84fb4dd8..554a4b5d2 100644 --- a/tsdb/wal/checkpoint_test.go +++ b/tsdb/wal/checkpoint_test.go @@ -26,51 +26,48 @@ import ( "github.com/pkg/errors" "github.com/stretchr/testify/require" - "github.com/prometheus/prometheus/pkg/labels" + "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/tsdb/chunks" "github.com/prometheus/prometheus/tsdb/record" ) func TestLastCheckpoint(t *testing.T) { - dir, err := ioutil.TempDir("", "test_checkpoint") - require.NoError(t, err) - defer func() { - require.NoError(t, os.RemoveAll(dir)) - }() + dir := t.TempDir() - _, _, err = LastCheckpoint(dir) + _, _, err := LastCheckpoint(dir) require.Equal(t, record.ErrNotFound, err) - require.NoError(t, os.MkdirAll(filepath.Join(dir, "checkpoint.0000"), 0777)) + require.NoError(t, os.MkdirAll(filepath.Join(dir, "checkpoint.0000"), 0o777)) s, k, err := LastCheckpoint(dir) require.NoError(t, err) require.Equal(t, filepath.Join(dir, "checkpoint.0000"), s) require.Equal(t, 0, k) - require.NoError(t, os.MkdirAll(filepath.Join(dir, "checkpoint.xyz"), 0777)) + require.NoError(t, os.MkdirAll(filepath.Join(dir, "checkpoint.xyz"), 0o777)) s, k, err = LastCheckpoint(dir) require.NoError(t, err) require.Equal(t, filepath.Join(dir, "checkpoint.0000"), s) require.Equal(t, 0, k) - require.NoError(t, os.MkdirAll(filepath.Join(dir, "checkpoint.1"), 0777)) + require.NoError(t, os.MkdirAll(filepath.Join(dir, "checkpoint.1"), 0o777)) s, k, err = LastCheckpoint(dir) require.NoError(t, err) require.Equal(t, filepath.Join(dir, "checkpoint.1"), s) require.Equal(t, 1, k) - require.NoError(t, os.MkdirAll(filepath.Join(dir, "checkpoint.1000"), 0777)) + require.NoError(t, os.MkdirAll(filepath.Join(dir, "checkpoint.1000"), 0o777)) s, k, err = LastCheckpoint(dir) require.NoError(t, err) require.Equal(t, filepath.Join(dir, "checkpoint.1000"), s) require.Equal(t, 1000, k) - require.NoError(t, os.MkdirAll(filepath.Join(dir, "checkpoint.99999999"), 0777)) + require.NoError(t, os.MkdirAll(filepath.Join(dir, "checkpoint.99999999"), 0o777)) s, k, err = LastCheckpoint(dir) require.NoError(t, err) require.Equal(t, filepath.Join(dir, "checkpoint.99999999"), s) require.Equal(t, 99999999, k) - require.NoError(t, os.MkdirAll(filepath.Join(dir, "checkpoint.100000000"), 0777)) + require.NoError(t, os.MkdirAll(filepath.Join(dir, "checkpoint.100000000"), 0o777)) s, k, err = LastCheckpoint(dir) require.NoError(t, err) require.Equal(t, filepath.Join(dir, "checkpoint.100000000"), s) @@ -78,18 +75,14 @@ func TestLastCheckpoint(t *testing.T) { } func TestDeleteCheckpoints(t *testing.T) { - dir, err := ioutil.TempDir("", "test_checkpoint") - require.NoError(t, err) - defer func() { - require.NoError(t, os.RemoveAll(dir)) - }() + dir := t.TempDir() require.NoError(t, DeleteCheckpoints(dir, 0)) - require.NoError(t, os.MkdirAll(filepath.Join(dir, "checkpoint.00"), 0777)) - require.NoError(t, os.MkdirAll(filepath.Join(dir, "checkpoint.01"), 0777)) - require.NoError(t, os.MkdirAll(filepath.Join(dir, "checkpoint.02"), 0777)) - require.NoError(t, os.MkdirAll(filepath.Join(dir, "checkpoint.03"), 0777)) + require.NoError(t, os.MkdirAll(filepath.Join(dir, "checkpoint.00"), 0o777)) + require.NoError(t, os.MkdirAll(filepath.Join(dir, "checkpoint.01"), 0o777)) + require.NoError(t, os.MkdirAll(filepath.Join(dir, "checkpoint.02"), 0o777)) + require.NoError(t, os.MkdirAll(filepath.Join(dir, "checkpoint.03"), 0o777)) require.NoError(t, DeleteCheckpoints(dir, 2)) @@ -101,9 +94,9 @@ func TestDeleteCheckpoints(t *testing.T) { } require.Equal(t, []string{"checkpoint.02", "checkpoint.03"}, fns) - require.NoError(t, os.MkdirAll(filepath.Join(dir, "checkpoint.99999999"), 0777)) - require.NoError(t, os.MkdirAll(filepath.Join(dir, "checkpoint.100000000"), 0777)) - require.NoError(t, os.MkdirAll(filepath.Join(dir, "checkpoint.100000001"), 0777)) + require.NoError(t, os.MkdirAll(filepath.Join(dir, "checkpoint.99999999"), 0o777)) + require.NoError(t, os.MkdirAll(filepath.Join(dir, "checkpoint.100000000"), 0o777)) + require.NoError(t, os.MkdirAll(filepath.Join(dir, "checkpoint.100000001"), 0o777)) require.NoError(t, DeleteCheckpoints(dir, 100000000)) @@ -119,11 +112,7 @@ func TestDeleteCheckpoints(t *testing.T) { func TestCheckpoint(t *testing.T) { for _, compress := range []bool{false, true} { t.Run(fmt.Sprintf("compress=%t", compress), func(t *testing.T) { - dir, err := ioutil.TempDir("", "test_checkpoint") - require.NoError(t, err) - defer func() { - require.NoError(t, os.RemoveAll(dir)) - }() + dir := t.TempDir() var enc record.Encoder // Create a dummy segment to bump the initial number. @@ -186,7 +175,7 @@ func TestCheckpoint(t *testing.T) { } require.NoError(t, w.Close()) - _, err = Checkpoint(log.NewNopLogger(), w, 100, 106, func(x uint64) bool { + _, err = Checkpoint(log.NewNopLogger(), w, 100, 106, func(x chunks.HeadSeriesRef) bool { return x%2 == 0 }, last/2) require.NoError(t, err) @@ -240,20 +229,17 @@ func TestCheckpoint(t *testing.T) { func TestCheckpointNoTmpFolderAfterError(t *testing.T) { // Create a new wal with invalid data. - dir, err := ioutil.TempDir("", "test_checkpoint") - require.NoError(t, err) - defer func() { - require.NoError(t, os.RemoveAll(dir)) - }() + dir := t.TempDir() w, err := NewSize(nil, nil, dir, 64*1024, false) require.NoError(t, err) var enc record.Encoder require.NoError(t, w.Log(enc.Series([]record.RefSeries{ - {Ref: 0, Labels: labels.FromStrings("a", "b", "c", "2")}}, nil))) + {Ref: 0, Labels: labels.FromStrings("a", "b", "c", "2")}, + }, nil))) require.NoError(t, w.Close()) // Corrupt data. - f, err := os.OpenFile(filepath.Join(w.Dir(), "00000000"), os.O_WRONLY, 0666) + f, err := os.OpenFile(filepath.Join(w.Dir(), "00000000"), os.O_WRONLY, 0o666) require.NoError(t, err) _, err = f.WriteAt([]byte{42}, 1) require.NoError(t, err) diff --git a/tsdb/wal/reader_test.go b/tsdb/wal/reader_test.go index 8d53612c0..a6516367d 100644 --- a/tsdb/wal/reader_test.go +++ b/tsdb/wal/reader_test.go @@ -59,100 +59,102 @@ var readerConstructors = map[string]func(io.Reader) reader{ }, } -var data = make([]byte, 100000) -var testReaderCases = []struct { - t []rec - exp [][]byte - fail bool -}{ - // Sequence of valid records. - { - t: []rec{ - {recFull, data[0:200]}, - {recFirst, data[200:300]}, - {recLast, data[300:400]}, - {recFirst, data[400:800]}, - {recMiddle, data[800:900]}, - {recPageTerm, make([]byte, pageSize-900-recordHeaderSize*5-1)}, // exactly lines up with page boundary. - {recLast, data[900:900]}, - {recFirst, data[900:1000]}, - {recMiddle, data[1000:1200]}, - {recMiddle, data[1200:30000]}, - {recMiddle, data[30000:30001]}, - {recMiddle, data[30001:30001]}, - {recLast, data[30001:32000]}, +var ( + data = make([]byte, 100000) + testReaderCases = []struct { + t []rec + exp [][]byte + fail bool + }{ + // Sequence of valid records. + { + t: []rec{ + {recFull, data[0:200]}, + {recFirst, data[200:300]}, + {recLast, data[300:400]}, + {recFirst, data[400:800]}, + {recMiddle, data[800:900]}, + {recPageTerm, make([]byte, pageSize-900-recordHeaderSize*5-1)}, // exactly lines up with page boundary. + {recLast, data[900:900]}, + {recFirst, data[900:1000]}, + {recMiddle, data[1000:1200]}, + {recMiddle, data[1200:30000]}, + {recMiddle, data[30000:30001]}, + {recMiddle, data[30001:30001]}, + {recLast, data[30001:32000]}, + }, + exp: [][]byte{ + data[0:200], + data[200:400], + data[400:900], + data[900:32000], + }, }, - exp: [][]byte{ - data[0:200], - data[200:400], - data[400:900], - data[900:32000], + // Exactly at the limit of one page minus the header size + { + t: []rec{ + {recFull, data[0 : pageSize-recordHeaderSize]}, + }, + exp: [][]byte{ + data[:pageSize-recordHeaderSize], + }, }, - }, - // Exactly at the limit of one page minus the header size - { - t: []rec{ - {recFull, data[0 : pageSize-recordHeaderSize]}, + // More than a full page, this exceeds our buffer and can never happen + // when written by the WAL. + { + t: []rec{ + {recFull, data[0 : pageSize+1]}, + }, + fail: true, }, - exp: [][]byte{ - data[:pageSize-recordHeaderSize], + // Two records the together are too big for a page. + // NB currently the non-live reader succeeds on this. I think this is a bug. + // but we've seen it in production. + { + t: []rec{ + {recFull, data[:pageSize/2]}, + {recFull, data[:pageSize/2]}, + }, + exp: [][]byte{ + data[:pageSize/2], + data[:pageSize/2], + }, }, - }, - // More than a full page, this exceeds our buffer and can never happen - // when written by the WAL. - { - t: []rec{ - {recFull, data[0 : pageSize+1]}, + // Invalid orders of record types. + { + t: []rec{{recMiddle, data[:200]}}, + fail: true, }, - fail: true, - }, - // Two records the together are too big for a page. - // NB currently the non-live reader succeeds on this. I think this is a bug. - // but we've seen it in production. - { - t: []rec{ - {recFull, data[:pageSize/2]}, - {recFull, data[:pageSize/2]}, + { + t: []rec{{recLast, data[:200]}}, + fail: true, }, - exp: [][]byte{ - data[:pageSize/2], - data[:pageSize/2], + { + t: []rec{ + {recFirst, data[:200]}, + {recFull, data[200:400]}, + }, + fail: true, }, - }, - // Invalid orders of record types. - { - t: []rec{{recMiddle, data[:200]}}, - fail: true, - }, - { - t: []rec{{recLast, data[:200]}}, - fail: true, - }, - { - t: []rec{ - {recFirst, data[:200]}, - {recFull, data[200:400]}, + { + t: []rec{ + {recFirst, data[:100]}, + {recMiddle, data[100:200]}, + {recFull, data[200:400]}, + }, + fail: true, }, - fail: true, - }, - { - t: []rec{ - {recFirst, data[:100]}, - {recMiddle, data[100:200]}, - {recFull, data[200:400]}, + // Non-zero data after page termination. + { + t: []rec{ + {recFull, data[:100]}, + {recPageTerm, append(make([]byte, pageSize-recordHeaderSize-102), 1)}, + }, + exp: [][]byte{data[:100]}, + fail: true, }, - fail: true, - }, - // Non-zero data after page termination. - { - t: []rec{ - {recFull, data[:100]}, - {recPageTerm, append(make([]byte, pageSize-recordHeaderSize-102), 1)}, - }, - exp: [][]byte{data[:100]}, - fail: true, - }, -} + } +) func encodedRecord(t recType, b []byte) []byte { if t == recPageTerm { @@ -279,6 +281,7 @@ type multiReadCloser struct { func (m *multiReadCloser) Read(p []byte) (n int, err error) { return m.reader.Read(p) } + func (m *multiReadCloser) Close() error { return tsdb_errors.NewMulti(tsdb_errors.CloseAll(m.closers)).Err() } @@ -310,11 +313,7 @@ func TestReaderFuzz(t *testing.T) { for name, fn := range readerConstructors { for _, compress := range []bool{false, true} { t.Run(fmt.Sprintf("%s,compress=%t", name, compress), func(t *testing.T) { - dir, err := ioutil.TempDir("", "wal_fuzz_live") - require.NoError(t, err) - defer func() { - require.NoError(t, os.RemoveAll(dir)) - }() + dir := t.TempDir() w, err := NewSize(nil, nil, dir, 128*pageSize, compress) require.NoError(t, err) @@ -335,7 +334,13 @@ func TestReaderFuzz(t *testing.T) { reader := fn(sr) for expected := range input { require.True(t, reader.Next(), "expected record: %v", reader.Err()) - require.Equal(t, expected, reader.Record(), "read wrong record") + r := reader.Record() + // Expected value may come as nil or empty slice, so it requires special comparison. + if len(expected) == 0 { + require.Len(t, r, 0) + } else { + require.Equal(t, expected, r, "read wrong record") + } } require.False(t, reader.Next(), "unexpected record") }) @@ -347,11 +352,7 @@ func TestReaderFuzz_Live(t *testing.T) { logger := testutil.NewLogger(t) for _, compress := range []bool{false, true} { t.Run(fmt.Sprintf("compress=%t", compress), func(t *testing.T) { - dir, err := ioutil.TempDir("", "wal_fuzz_live") - require.NoError(t, err) - defer func() { - require.NoError(t, os.RemoveAll(dir)) - }() + dir := t.TempDir() w, err := NewSize(nil, nil, dir, 128*pageSize, compress) require.NoError(t, err) @@ -385,7 +386,12 @@ func TestReaderFuzz_Live(t *testing.T) { rec := r.Record() expected, ok := <-input require.True(t, ok, "unexpected record") - require.Equal(t, expected, rec, "record does not match expected") + // Expected value may come as nil or empty slice, so it requires special comparison. + if len(expected) == 0 { + require.Len(t, rec, 0) + } else { + require.Equal(t, expected, rec, "record does not match expected") + } } require.Equal(t, io.EOF, r.Err(), "expected EOF, got: %v", r.Err()) return true @@ -432,11 +438,7 @@ func TestLiveReaderCorrupt_ShortFile(t *testing.T) { // Write a corrupt WAL segment, there is one record of pageSize in length, // but the segment is only half written. logger := testutil.NewLogger(t) - dir, err := ioutil.TempDir("", "wal_live_corrupt") - require.NoError(t, err) - defer func() { - require.NoError(t, os.RemoveAll(dir)) - }() + dir := t.TempDir() w, err := NewSize(nil, nil, dir, pageSize, false) require.NoError(t, err) @@ -451,7 +453,7 @@ func TestLiveReaderCorrupt_ShortFile(t *testing.T) { err = w.Close() require.NoError(t, err) - segmentFile, err := os.OpenFile(filepath.Join(dir, "00000000"), os.O_RDWR, 0666) + segmentFile, err := os.OpenFile(filepath.Join(dir, "00000000"), os.O_RDWR, 0o666) require.NoError(t, err) err = segmentFile.Truncate(pageSize / 2) @@ -476,11 +478,7 @@ func TestLiveReaderCorrupt_ShortFile(t *testing.T) { func TestLiveReaderCorrupt_RecordTooLongAndShort(t *testing.T) { // Write a corrupt WAL segment, when record len > page size. logger := testutil.NewLogger(t) - dir, err := ioutil.TempDir("", "wal_live_corrupt") - require.NoError(t, err) - defer func() { - require.NoError(t, os.RemoveAll(dir)) - }() + dir := t.TempDir() w, err := NewSize(nil, nil, dir, pageSize*2, false) require.NoError(t, err) @@ -495,7 +493,7 @@ func TestLiveReaderCorrupt_RecordTooLongAndShort(t *testing.T) { err = w.Close() require.NoError(t, err) - segmentFile, err := os.OpenFile(filepath.Join(dir, "00000000"), os.O_RDWR, 0666) + segmentFile, err := os.OpenFile(filepath.Join(dir, "00000000"), os.O_RDWR, 0o666) require.NoError(t, err) // Override the record length diff --git a/tsdb/wal/wal.go b/tsdb/wal/wal.go index c5023b0fd..3bc2894d3 100644 --- a/tsdb/wal/wal.go +++ b/tsdb/wal/wal.go @@ -118,7 +118,7 @@ func (e *CorruptionErr) Error() string { // OpenWriteSegment opens segment k in dir. The returned segment is ready for new appends. func OpenWriteSegment(logger log.Logger, dir string, k int) (*Segment, error) { segName := SegmentName(dir, k) - f, err := os.OpenFile(segName, os.O_WRONLY|os.O_APPEND, 0666) + f, err := os.OpenFile(segName, os.O_WRONLY|os.O_APPEND, 0o666) if err != nil { return nil, err } @@ -144,7 +144,7 @@ func OpenWriteSegment(logger log.Logger, dir string, k int) (*Segment, error) { // CreateSegment creates a new segment k in dir. func CreateSegment(dir string, k int) (*Segment, error) { - f, err := os.OpenFile(SegmentName(dir, k), os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0666) + f, err := os.OpenFile(SegmentName(dir, k), os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0o666) if err != nil { return nil, err } @@ -260,7 +260,7 @@ func NewSize(logger log.Logger, reg prometheus.Registerer, dir string, segmentSi if segmentSize%pageSize != 0 { return nil, errors.New("invalid segment size") } - if err := os.MkdirAll(dir, 0777); err != nil { + if err := os.MkdirAll(dir, 0o777); err != nil { return nil, errors.Wrap(err, "create dir") } if logger == nil { @@ -452,10 +452,7 @@ func (w *WAL) Repair(origErr error) error { if err != nil { return err } - if err := w.setSegment(s); err != nil { - return err - } - return nil + return w.setSegment(s) } // SegmentName builds a segment name for the directory. @@ -472,6 +469,10 @@ func (w *WAL) NextSegment() error { // nextSegment creates the next segment and closes the previous one. func (w *WAL) nextSegment() error { + if w.closed { + return errors.New("wal is closed") + } + // Only flush the current page if it actually holds data. if w.page.alloc > 0 { if err := w.flushPage(true); err != nil { @@ -875,19 +876,24 @@ type segmentBufReader struct { off int // Offset of read data into current segment. } -// nolint:golint // TODO: Consider exporting segmentBufReader +// nolint:revive // TODO: Consider exporting segmentBufReader func NewSegmentBufReader(segs ...*Segment) *segmentBufReader { + if len(segs) == 0 { + return &segmentBufReader{} + } + return &segmentBufReader{ buf: bufio.NewReaderSize(segs[0], 16*pageSize), segs: segs, } } -// nolint:golint +// nolint:revive func NewSegmentBufReaderWithOffset(offset int, segs ...*Segment) (sbr *segmentBufReader, err error) { - if offset == 0 { + if offset == 0 || len(segs) == 0 { return NewSegmentBufReader(segs...), nil } + sbr = &segmentBufReader{ buf: bufio.NewReaderSize(segs[0], 16*pageSize), segs: segs, @@ -909,6 +915,10 @@ func (r *segmentBufReader) Close() (err error) { // Read implements io.Reader. func (r *segmentBufReader) Read(b []byte) (n int, err error) { + if len(r.segs) == 0 { + return 0, io.EOF + } + n, err = r.buf.Read(b) r.off += n diff --git a/tsdb/wal/wal_test.go b/tsdb/wal/wal_test.go index b12c0d60c..55cd6caa1 100644 --- a/tsdb/wal/wal_test.go +++ b/tsdb/wal/wal_test.go @@ -18,7 +18,6 @@ import ( "bytes" "fmt" "io" - "io/ioutil" "math/rand" "os" "path/filepath" @@ -119,11 +118,7 @@ func TestWALRepair_ReadingError(t *testing.T) { }, } { t.Run(name, func(t *testing.T) { - dir, err := ioutil.TempDir("", "wal_repair") - require.NoError(t, err) - defer func() { - require.NoError(t, os.RemoveAll(dir)) - }() + dir := t.TempDir() // We create 3 segments with 3 records each and // then corrupt a given record in a given segment. @@ -146,7 +141,7 @@ func TestWALRepair_ReadingError(t *testing.T) { require.NoError(t, w.Close()) - f, err := os.OpenFile(SegmentName(dir, test.corrSgm), os.O_RDWR, 0666) + f, err := os.OpenFile(SegmentName(dir, test.corrSgm), os.O_RDWR, 0o666) require.NoError(t, err) // Apply corruption function. @@ -172,7 +167,7 @@ func TestWALRepair_ReadingError(t *testing.T) { for r.Next() { } - //Close the segment so we don't break things on Windows. + // Close the segment so we don't break things on Windows. s.Close() // No corruption in this segment. @@ -217,11 +212,7 @@ func TestWALRepair_ReadingError(t *testing.T) { // ensures that an error during reading that segment are correctly repaired before // moving to write more records to the WAL. func TestCorruptAndCarryOn(t *testing.T) { - dir, err := ioutil.TempDir("", "wal_repair") - require.NoError(t, err) - defer func() { - require.NoError(t, os.RemoveAll(dir)) - }() + dir := t.TempDir() var ( logger = testutil.NewLogger(t) @@ -253,7 +244,7 @@ func TestCorruptAndCarryOn(t *testing.T) { segments, err := listSegments(dir) require.NoError(t, err) for _, segment := range segments { - f, err := os.OpenFile(filepath.Join(dir, fmt.Sprintf("%08d", segment.index)), os.O_RDONLY, 0666) + f, err := os.OpenFile(filepath.Join(dir, fmt.Sprintf("%08d", segment.index)), os.O_RDONLY, 0o666) require.NoError(t, err) fi, err := f.Stat() @@ -270,7 +261,7 @@ func TestCorruptAndCarryOn(t *testing.T) { // Truncate the first file, splitting the middle record in the second // page in half, leaving 4 valid records. { - f, err := os.OpenFile(filepath.Join(dir, fmt.Sprintf("%08d", 0)), os.O_RDWR, 0666) + f, err := os.OpenFile(filepath.Join(dir, fmt.Sprintf("%08d", 0)), os.O_RDWR, 0o666) require.NoError(t, err) fi, err := f.Stat() @@ -345,11 +336,7 @@ func TestCorruptAndCarryOn(t *testing.T) { // TestClose ensures that calling Close more than once doesn't panic and doesn't block. func TestClose(t *testing.T) { - dir, err := ioutil.TempDir("", "wal_repair") - require.NoError(t, err) - defer func() { - require.NoError(t, os.RemoveAll(dir)) - }() + dir := t.TempDir() w, err := NewSize(nil, nil, dir, pageSize, false) require.NoError(t, err) require.NoError(t, w.Close()) @@ -362,11 +349,7 @@ func TestSegmentMetric(t *testing.T) { recordSize = (pageSize / 2) - recordHeaderSize ) - dir, err := ioutil.TempDir("", "segment_metric") - require.NoError(t, err) - defer func() { - require.NoError(t, os.RemoveAll(dir)) - }() + dir := t.TempDir() w, err := NewSize(nil, nil, dir, segmentSize, false) require.NoError(t, err) @@ -393,8 +376,7 @@ func TestCompression(t *testing.T) { records = 100 ) - dirPath, err := ioutil.TempDir("", fmt.Sprintf("TestCompression_%t", compressed)) - require.NoError(t, err) + dirPath := t.TempDir() w, err := NewSize(nil, nil, dirPath, segmentSize, compressed) require.NoError(t, err) @@ -454,9 +436,7 @@ func TestLogPartialWrite(t *testing.T) { for testName, testData := range tests { t.Run(testName, func(t *testing.T) { - dirPath, err := ioutil.TempDir("", "logpartialwrite") - require.NoError(t, err) - defer func() { require.NoError(t, os.RemoveAll(dirPath)) }() + dirPath := t.TempDir() w, err := NewSize(nil, nil, dirPath, segmentSize, false) require.NoError(t, err) @@ -527,11 +507,7 @@ func (f *faultySegmentFile) Write(p []byte) (int, error) { func BenchmarkWAL_LogBatched(b *testing.B) { for _, compress := range []bool{true, false} { b.Run(fmt.Sprintf("compress=%t", compress), func(b *testing.B) { - dir, err := ioutil.TempDir("", "bench_logbatch") - require.NoError(b, err) - defer func() { - require.NoError(b, os.RemoveAll(dir)) - }() + dir := b.TempDir() w, err := New(nil, nil, dir, compress) require.NoError(b, err) @@ -561,11 +537,7 @@ func BenchmarkWAL_LogBatched(b *testing.B) { func BenchmarkWAL_Log(b *testing.B) { for _, compress := range []bool{true, false} { b.Run(fmt.Sprintf("compress=%t", compress), func(b *testing.B) { - dir, err := ioutil.TempDir("", "bench_logsingle") - require.NoError(b, err) - defer func() { - require.NoError(b, os.RemoveAll(dir)) - }() + dir := b.TempDir() w, err := New(nil, nil, dir, compress) require.NoError(b, err) diff --git a/tsdb/wal/watcher.go b/tsdb/wal/watcher.go index accefa7aa..3c47b58d0 100644 --- a/tsdb/wal/watcher.go +++ b/tsdb/wal/watcher.go @@ -30,7 +30,7 @@ import ( "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" - "github.com/prometheus/prometheus/pkg/timestamp" + "github.com/prometheus/prometheus/model/timestamp" "github.com/prometheus/prometheus/tsdb/record" ) diff --git a/tsdb/wal/watcher_test.go b/tsdb/wal/watcher_test.go index 3ff7d1ad2..1c76ea585 100644 --- a/tsdb/wal/watcher_test.go +++ b/tsdb/wal/watcher_test.go @@ -14,7 +14,6 @@ package wal import ( "fmt" - "io/ioutil" "math/rand" "os" "path" @@ -26,13 +25,16 @@ import ( "github.com/prometheus/client_golang/prometheus" "github.com/stretchr/testify/require" - "github.com/prometheus/prometheus/pkg/labels" + "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/tsdb/chunks" "github.com/prometheus/prometheus/tsdb/record" ) -var defaultRetryInterval = 100 * time.Millisecond -var defaultRetries = 100 -var wMetrics = NewWatcherMetrics(prometheus.DefaultRegisterer) +var ( + defaultRetryInterval = 100 * time.Millisecond + defaultRetries = 100 + wMetrics = NewWatcherMetrics(prometheus.DefaultRegisterer) +) // retry executes f() n times at each interval until it returns true. func retry(t *testing.T, interval time.Duration, n int, f func() bool) { @@ -52,7 +54,7 @@ type writeToMock struct { samplesAppended int exemplarsAppended int seriesLock sync.Mutex - seriesSegmentIndexes map[uint64]int + seriesSegmentIndexes map[chunks.HeadSeriesRef]int } func (wtm *writeToMock) Append(s []record.RefSample) bool { @@ -97,7 +99,7 @@ func (wtm *writeToMock) checkNumLabels() int { func newWriteToMock() *writeToMock { return &writeToMock{ - seriesSegmentIndexes: make(map[uint64]int), + seriesSegmentIndexes: make(map[chunks.HeadSeriesRef]int), } } @@ -110,14 +112,10 @@ func TestTailSamples(t *testing.T) { t.Run(fmt.Sprintf("compress=%t", compress), func(t *testing.T) { now := time.Now() - dir, err := ioutil.TempDir("", "readCheckpoint") - require.NoError(t, err) - defer func() { - require.NoError(t, os.RemoveAll(dir)) - }() + dir := t.TempDir() wdir := path.Join(dir, "wal") - err = os.Mkdir(wdir, 0777) + err := os.Mkdir(wdir, 0o777) require.NoError(t, err) enc := record.Encoder{} @@ -132,7 +130,7 @@ func TestTailSamples(t *testing.T) { ref := i + 100 series := enc.Series([]record.RefSeries{ { - Ref: uint64(ref), + Ref: chunks.HeadSeriesRef(ref), Labels: labels.Labels{labels.Label{Name: "__name__", Value: fmt.Sprintf("metric_%d", i)}}, }, }, nil) @@ -142,7 +140,7 @@ func TestTailSamples(t *testing.T) { inner := rand.Intn(ref + 1) sample := enc.Samples([]record.RefSample{ { - Ref: uint64(inner), + Ref: chunks.HeadSeriesRef(inner), T: now.UnixNano() + 1, V: float64(i), }, @@ -154,7 +152,7 @@ func TestTailSamples(t *testing.T) { inner := rand.Intn(ref + 1) exemplar := enc.Exemplars([]record.RefExemplar{ { - Ref: uint64(inner), + Ref: chunks.HeadSeriesRef(inner), T: now.UnixNano() + 1, V: float64(i), Labels: labels.FromStrings("traceID", fmt.Sprintf("trace-%d", inner)), @@ -204,13 +202,9 @@ func TestReadToEndNoCheckpoint(t *testing.T) { for _, compress := range []bool{false, true} { t.Run(fmt.Sprintf("compress=%t", compress), func(t *testing.T) { - dir, err := ioutil.TempDir("", "readToEnd_noCheckpoint") - require.NoError(t, err) - defer func() { - require.NoError(t, os.RemoveAll(dir)) - }() + dir := t.TempDir() wdir := path.Join(dir, "wal") - err = os.Mkdir(wdir, 0777) + err := os.Mkdir(wdir, 0o777) require.NoError(t, err) w, err := NewSize(nil, nil, wdir, 128*pageSize, compress) @@ -226,7 +220,7 @@ func TestReadToEndNoCheckpoint(t *testing.T) { for i := 0; i < seriesCount; i++ { series := enc.Series([]record.RefSeries{ { - Ref: uint64(i), + Ref: chunks.HeadSeriesRef(i), Labels: labels.Labels{labels.Label{Name: "__name__", Value: fmt.Sprintf("metric_%d", i)}}, }, }, nil) @@ -234,7 +228,7 @@ func TestReadToEndNoCheckpoint(t *testing.T) { for j := 0; j < samplesCount; j++ { sample := enc.Samples([]record.RefSample{ { - Ref: uint64(j), + Ref: chunks.HeadSeriesRef(j), T: int64(i), V: float64(i), }, @@ -277,14 +271,10 @@ func TestReadToEndWithCheckpoint(t *testing.T) { for _, compress := range []bool{false, true} { t.Run(fmt.Sprintf("compress=%t", compress), func(t *testing.T) { - dir, err := ioutil.TempDir("", "readToEnd_withCheckpoint") - require.NoError(t, err) - defer func() { - require.NoError(t, os.RemoveAll(dir)) - }() + dir := t.TempDir() wdir := path.Join(dir, "wal") - err = os.Mkdir(wdir, 0777) + err := os.Mkdir(wdir, 0o777) require.NoError(t, err) enc := record.Encoder{} @@ -299,7 +289,7 @@ func TestReadToEndWithCheckpoint(t *testing.T) { ref := i + 100 series := enc.Series([]record.RefSeries{ { - Ref: uint64(ref), + Ref: chunks.HeadSeriesRef(ref), Labels: labels.Labels{labels.Label{Name: "__name__", Value: fmt.Sprintf("metric_%d", i)}}, }, }, nil) @@ -311,7 +301,7 @@ func TestReadToEndWithCheckpoint(t *testing.T) { inner := rand.Intn(ref + 1) sample := enc.Samples([]record.RefSample{ { - Ref: uint64(inner), + Ref: chunks.HeadSeriesRef(inner), T: int64(i), V: float64(i), }, @@ -320,14 +310,14 @@ func TestReadToEndWithCheckpoint(t *testing.T) { } } - Checkpoint(log.NewNopLogger(), w, 0, 1, func(x uint64) bool { return true }, 0) + Checkpoint(log.NewNopLogger(), w, 0, 1, func(x chunks.HeadSeriesRef) bool { return true }, 0) w.Truncate(1) // Write more records after checkpointing. for i := 0; i < seriesCount; i++ { series := enc.Series([]record.RefSeries{ { - Ref: uint64(i), + Ref: chunks.HeadSeriesRef(i), Labels: labels.Labels{labels.Label{Name: "__name__", Value: fmt.Sprintf("metric_%d", i)}}, }, }, nil) @@ -336,7 +326,7 @@ func TestReadToEndWithCheckpoint(t *testing.T) { for j := 0; j < samplesCount; j++ { sample := enc.Samples([]record.RefSample{ { - Ref: uint64(j), + Ref: chunks.HeadSeriesRef(j), T: int64(i), V: float64(i), }, @@ -368,14 +358,10 @@ func TestReadCheckpoint(t *testing.T) { for _, compress := range []bool{false, true} { t.Run(fmt.Sprintf("compress=%t", compress), func(t *testing.T) { - dir, err := ioutil.TempDir("", "readCheckpoint") - require.NoError(t, err) - defer func() { - require.NoError(t, os.RemoveAll(dir)) - }() + dir := t.TempDir() wdir := path.Join(dir, "wal") - err = os.Mkdir(wdir, 0777) + err := os.Mkdir(wdir, 0o777) require.NoError(t, err) os.Create(SegmentName(wdir, 30)) @@ -392,7 +378,7 @@ func TestReadCheckpoint(t *testing.T) { ref := i + 100 series := enc.Series([]record.RefSeries{ { - Ref: uint64(ref), + Ref: chunks.HeadSeriesRef(ref), Labels: labels.Labels{labels.Label{Name: "__name__", Value: fmt.Sprintf("metric_%d", i)}}, }, }, nil) @@ -402,7 +388,7 @@ func TestReadCheckpoint(t *testing.T) { inner := rand.Intn(ref + 1) sample := enc.Samples([]record.RefSample{ { - Ref: uint64(inner), + Ref: chunks.HeadSeriesRef(inner), T: int64(i), V: float64(i), }, @@ -410,7 +396,7 @@ func TestReadCheckpoint(t *testing.T) { require.NoError(t, w.Log(sample)) } } - Checkpoint(log.NewNopLogger(), w, 30, 31, func(x uint64) bool { return true }, 0) + Checkpoint(log.NewNopLogger(), w, 30, 31, func(x chunks.HeadSeriesRef) bool { return true }, 0) w.Truncate(32) // Start read after checkpoint, no more data written. @@ -440,14 +426,10 @@ func TestReadCheckpointMultipleSegments(t *testing.T) { for _, compress := range []bool{false, true} { t.Run(fmt.Sprintf("compress=%t", compress), func(t *testing.T) { - dir, err := ioutil.TempDir("", "readCheckpoint") - require.NoError(t, err) - defer func() { - require.NoError(t, os.RemoveAll(dir)) - }() + dir := t.TempDir() wdir := path.Join(dir, "wal") - err = os.Mkdir(wdir, 0777) + err := os.Mkdir(wdir, 0o777) require.NoError(t, err) enc := record.Encoder{} @@ -460,7 +442,7 @@ func TestReadCheckpointMultipleSegments(t *testing.T) { ref := j + (i * 100) series := enc.Series([]record.RefSeries{ { - Ref: uint64(ref), + Ref: chunks.HeadSeriesRef(ref), Labels: labels.Labels{labels.Label{Name: "__name__", Value: fmt.Sprintf("metric_%d", j)}}, }, }, nil) @@ -470,7 +452,7 @@ func TestReadCheckpointMultipleSegments(t *testing.T) { inner := rand.Intn(ref + 1) sample := enc.Samples([]record.RefSample{ { - Ref: uint64(inner), + Ref: chunks.HeadSeriesRef(inner), T: int64(i), V: float64(i), }, @@ -483,7 +465,7 @@ func TestReadCheckpointMultipleSegments(t *testing.T) { // At this point we should have at least 6 segments, lets create a checkpoint dir of the first 5. checkpointDir := dir + "/wal/checkpoint.000004" - err = os.Mkdir(checkpointDir, 0777) + err = os.Mkdir(checkpointDir, 0o777) require.NoError(t, err) for i := 0; i <= 4; i++ { err := os.Rename(SegmentName(dir+"/wal", i), SegmentName(checkpointDir, i)) @@ -522,14 +504,10 @@ func TestCheckpointSeriesReset(t *testing.T) { for _, tc := range testCases { t.Run(fmt.Sprintf("compress=%t", tc.compress), func(t *testing.T) { - dir, err := ioutil.TempDir("", "seriesReset") - require.NoError(t, err) - defer func() { - require.NoError(t, os.RemoveAll(dir)) - }() + dir := t.TempDir() wdir := path.Join(dir, "wal") - err = os.Mkdir(wdir, 0777) + err := os.Mkdir(wdir, 0o777) require.NoError(t, err) enc := record.Encoder{} @@ -544,7 +522,7 @@ func TestCheckpointSeriesReset(t *testing.T) { ref := i + 100 series := enc.Series([]record.RefSeries{ { - Ref: uint64(ref), + Ref: chunks.HeadSeriesRef(ref), Labels: labels.Labels{labels.Label{Name: "__name__", Value: fmt.Sprintf("metric_%d", i)}}, }, }, nil) @@ -554,7 +532,7 @@ func TestCheckpointSeriesReset(t *testing.T) { inner := rand.Intn(ref + 1) sample := enc.Samples([]record.RefSample{ { - Ref: uint64(inner), + Ref: chunks.HeadSeriesRef(inner), T: int64(i), V: float64(i), }, @@ -577,7 +555,7 @@ func TestCheckpointSeriesReset(t *testing.T) { }) require.Equal(t, seriesCount, wt.checkNumLabels()) - _, err = Checkpoint(log.NewNopLogger(), w, 2, 4, func(x uint64) bool { return true }, 0) + _, err = Checkpoint(log.NewNopLogger(), w, 2, 4, func(x chunks.HeadSeriesRef) bool { return true }, 0) require.NoError(t, err) err = w.Truncate(5) diff --git a/tsdb/wal_test.go b/tsdb/wal_test.go index fdcd6ce52..6b57cc230 100644 --- a/tsdb/wal_test.go +++ b/tsdb/wal_test.go @@ -30,7 +30,9 @@ import ( "github.com/go-kit/log" "github.com/stretchr/testify/require" - "github.com/prometheus/prometheus/pkg/labels" + "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/storage" + "github.com/prometheus/prometheus/tsdb/chunks" "github.com/prometheus/prometheus/tsdb/record" "github.com/prometheus/prometheus/tsdb/tombstones" "github.com/prometheus/prometheus/tsdb/wal" @@ -102,7 +104,7 @@ func TestSegmentWAL_Truncate(t *testing.T) { var rs []record.RefSeries for j, s := range series[i : i+batch] { - rs = append(rs, record.RefSeries{Labels: s, Ref: uint64(i+j) + 1}) + rs = append(rs, record.RefSeries{Labels: s, Ref: chunks.HeadSeriesRef(i+j) + 1}) } err := w.LogSeries(rs) require.NoError(t, err) @@ -117,11 +119,11 @@ func TestSegmentWAL_Truncate(t *testing.T) { boundarySeries := w.files[len(w.files)/2].minSeries // We truncate while keeping every 2nd series. - keep := map[uint64]struct{}{} + keep := map[chunks.HeadSeriesRef]struct{}{} for i := 1; i <= numMetrics; i += 2 { - keep[uint64(i)] = struct{}{} + keep[chunks.HeadSeriesRef(i)] = struct{}{} } - keepf := func(id uint64) bool { + keepf := func(id chunks.HeadSeriesRef) bool { _, ok := keep[id] return ok } @@ -132,8 +134,8 @@ func TestSegmentWAL_Truncate(t *testing.T) { var expected []record.RefSeries for i := 1; i <= numMetrics; i++ { - if i%2 == 1 || uint64(i) >= boundarySeries { - expected = append(expected, record.RefSeries{Ref: uint64(i), Labels: series[i-1]}) + if i%2 == 1 || chunks.HeadSeriesRef(i) >= boundarySeries { + expected = append(expected, record.RefSeries{Ref: chunks.HeadSeriesRef(i), Labels: series[i-1]}) } } @@ -238,7 +240,7 @@ func TestSegmentWAL_Log_Restore(t *testing.T) { for j := 0; j < i*10; j++ { samples = append(samples, record.RefSample{ - Ref: uint64(j % 10000), + Ref: chunks.HeadSeriesRef(j % 10000), T: int64(j * 2), V: rand.Float64(), }) @@ -246,14 +248,14 @@ func TestSegmentWAL_Log_Restore(t *testing.T) { for j := 0; j < i*20; j++ { ts := rand.Int63() - stones = append(stones, tombstones.Stone{Ref: rand.Uint64(), Intervals: tombstones.Intervals{{Mint: ts, Maxt: ts + rand.Int63n(10000)}}}) + stones = append(stones, tombstones.Stone{Ref: storage.SeriesRef(rand.Uint64()), Intervals: tombstones.Intervals{{Mint: ts, Maxt: ts + rand.Int63n(10000)}}}) } lbls := series[i : i+stepSize] series := make([]record.RefSeries, 0, len(series)) for j, l := range lbls { series = append(series, record.RefSeries{ - Ref: uint64(i + j), + Ref: chunks.HeadSeriesRef(i + j), Labels: l, }) } @@ -326,7 +328,7 @@ func TestWALRestoreCorrupted(t *testing.T) { { name: "truncate_checksum", f: func(t *testing.T, w *SegmentWAL) { - f, err := os.OpenFile(w.files[0].Name(), os.O_WRONLY, 0666) + f, err := os.OpenFile(w.files[0].Name(), os.O_WRONLY, 0o666) require.NoError(t, err) defer f.Close() @@ -339,7 +341,7 @@ func TestWALRestoreCorrupted(t *testing.T) { { name: "truncate_body", f: func(t *testing.T, w *SegmentWAL) { - f, err := os.OpenFile(w.files[0].Name(), os.O_WRONLY, 0666) + f, err := os.OpenFile(w.files[0].Name(), os.O_WRONLY, 0o666) require.NoError(t, err) defer f.Close() @@ -352,7 +354,7 @@ func TestWALRestoreCorrupted(t *testing.T) { { name: "body_content", f: func(t *testing.T, w *SegmentWAL) { - f, err := os.OpenFile(w.files[0].Name(), os.O_WRONLY, 0666) + f, err := os.OpenFile(w.files[0].Name(), os.O_WRONLY, 0o666) require.NoError(t, err) defer f.Close() @@ -367,7 +369,7 @@ func TestWALRestoreCorrupted(t *testing.T) { { name: "checksum", f: func(t *testing.T, w *SegmentWAL) { - f, err := os.OpenFile(w.files[0].Name(), os.O_WRONLY, 0666) + f, err := os.OpenFile(w.files[0].Name(), os.O_WRONLY, 0o666) require.NoError(t, err) defer f.Close() diff --git a/pkg/gate/gate.go b/util/gate/gate.go similarity index 100% rename from pkg/gate/gate.go rename to util/gate/gate.go diff --git a/pkg/logging/dedupe.go b/util/logging/dedupe.go similarity index 100% rename from pkg/logging/dedupe.go rename to util/logging/dedupe.go diff --git a/pkg/logging/dedupe_test.go b/util/logging/dedupe_test.go similarity index 100% rename from pkg/logging/dedupe_test.go rename to util/logging/dedupe_test.go diff --git a/pkg/logging/file.go b/util/logging/file.go similarity index 91% rename from pkg/logging/file.go rename to util/logging/file.go index 3c0c3e3b0..6b5751b01 100644 --- a/pkg/logging/file.go +++ b/util/logging/file.go @@ -21,11 +21,9 @@ import ( "github.com/pkg/errors" ) -var ( - timestampFormat = log.TimestampFormat( - func() time.Time { return time.Now().UTC() }, - "2006-01-02T15:04:05.000Z07:00", - ) +var timestampFormat = log.TimestampFormat( + func() time.Time { return time.Now().UTC() }, + "2006-01-02T15:04:05.000Z07:00", ) // JSONFileLogger represents a logger that writes JSON to a file. @@ -40,7 +38,7 @@ func NewJSONFileLogger(s string) (*JSONFileLogger, error) { return nil, nil } - f, err := os.OpenFile(s, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0666) + f, err := os.OpenFile(s, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0o666) if err != nil { return nil, errors.Wrap(err, "can't create json logger") } diff --git a/pkg/logging/file_test.go b/util/logging/file_test.go similarity index 100% rename from pkg/logging/file_test.go rename to util/logging/file_test.go diff --git a/pkg/logging/ratelimit.go b/util/logging/ratelimit.go similarity index 100% rename from pkg/logging/ratelimit.go rename to util/logging/ratelimit.go diff --git a/pkg/modtimevfs/modtimevfs.go b/util/modtimevfs/modtimevfs.go similarity index 100% rename from pkg/modtimevfs/modtimevfs.go rename to util/modtimevfs/modtimevfs.go diff --git a/util/osutil/hostname.go b/util/osutil/hostname.go index 224dffe7c..c44cb391b 100644 --- a/util/osutil/hostname.go +++ b/util/osutil/hostname.go @@ -49,14 +49,12 @@ func GetFQDN() (string, error) { if fqdn, err := lookup(ip); err == nil { return fqdn, nil } - } if ip := addr.To16(); ip != nil { if fqdn, err := lookup(ip); err == nil { return fqdn, nil } - } } return hostname, nil diff --git a/pkg/pool/pool.go b/util/pool/pool.go similarity index 100% rename from pkg/pool/pool.go rename to util/pool/pool.go diff --git a/pkg/pool/pool_test.go b/util/pool/pool_test.go similarity index 100% rename from pkg/pool/pool_test.go rename to util/pool/pool_test.go diff --git a/pkg/runtime/limits_default.go b/util/runtime/limits_default.go similarity index 100% rename from pkg/runtime/limits_default.go rename to util/runtime/limits_default.go diff --git a/pkg/runtime/limits_windows.go b/util/runtime/limits_windows.go similarity index 100% rename from pkg/runtime/limits_windows.go rename to util/runtime/limits_windows.go diff --git a/pkg/runtime/statfs.go b/util/runtime/statfs.go similarity index 100% rename from pkg/runtime/statfs.go rename to util/runtime/statfs.go diff --git a/pkg/runtime/statfs_default.go b/util/runtime/statfs_default.go similarity index 99% rename from pkg/runtime/statfs_default.go rename to util/runtime/statfs_default.go index a493a5cbb..f850f2cd6 100644 --- a/pkg/runtime/statfs_default.go +++ b/util/runtime/statfs_default.go @@ -23,7 +23,6 @@ import ( // Statfs returns the file system type (Unix only) func Statfs(path string) string { - // Types of file systems that may be returned by `statfs` fsTypes := map[int64]string{ 0xadf5: "ADFS_SUPER_MAGIC", diff --git a/pkg/runtime/statfs_linux_386.go b/util/runtime/statfs_linux_386.go similarity index 100% rename from pkg/runtime/statfs_linux_386.go rename to util/runtime/statfs_linux_386.go diff --git a/pkg/runtime/statfs_uint32.go b/util/runtime/statfs_uint32.go similarity index 100% rename from pkg/runtime/statfs_uint32.go rename to util/runtime/statfs_uint32.go diff --git a/pkg/runtime/uname_default.go b/util/runtime/uname_default.go similarity index 100% rename from pkg/runtime/uname_default.go rename to util/runtime/uname_default.go diff --git a/pkg/runtime/uname_linux.go b/util/runtime/uname_linux.go similarity index 100% rename from pkg/runtime/uname_linux.go rename to util/runtime/uname_linux.go diff --git a/pkg/runtime/vmlimits_default.go b/util/runtime/vmlimits_default.go similarity index 100% rename from pkg/runtime/vmlimits_default.go rename to util/runtime/vmlimits_default.go diff --git a/pkg/runtime/vmlimits_openbsd.go b/util/runtime/vmlimits_openbsd.go similarity index 100% rename from pkg/runtime/vmlimits_openbsd.go rename to util/runtime/vmlimits_openbsd.go diff --git a/util/strutil/strconv.go b/util/strutil/strconv.go index 3d96e4faf..eed0134ab 100644 --- a/util/strutil/strconv.go +++ b/util/strutil/strconv.go @@ -19,9 +19,7 @@ import ( "regexp" ) -var ( - invalidLabelCharRE = regexp.MustCompile(`[^a-zA-Z0-9_]`) -) +var invalidLabelCharRE = regexp.MustCompile(`[^a-zA-Z0-9_]`) // TableLinkForExpression creates an escaped relative link to the table view of // the provided expression. diff --git a/util/teststorage/storage.go b/util/teststorage/storage.go index 39aac596e..a82989477 100644 --- a/util/teststorage/storage.go +++ b/util/teststorage/storage.go @@ -21,8 +21,8 @@ import ( "github.com/prometheus/client_golang/prometheus" "github.com/stretchr/testify/require" - "github.com/prometheus/prometheus/pkg/exemplar" - "github.com/prometheus/prometheus/pkg/labels" + "github.com/prometheus/prometheus/model/exemplar" + "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/tsdb" "github.com/prometheus/prometheus/util/testutil" @@ -70,6 +70,6 @@ func (s TestStorage) ExemplarQueryable() storage.ExemplarQueryable { return s.exemplarStorage } -func (s TestStorage) AppendExemplar(ref uint64, l labels.Labels, e exemplar.Exemplar) (uint64, error) { +func (s TestStorage) AppendExemplar(ref storage.SeriesRef, l labels.Labels, e exemplar.Exemplar) (storage.SeriesRef, error) { return ref, s.exemplarStorage.AddExemplar(l, e) } diff --git a/util/testutil/port.go b/util/testutil/port.go new file mode 100644 index 000000000..1e449b123 --- /dev/null +++ b/util/testutil/port.go @@ -0,0 +1,35 @@ +// Copyright 2021 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package testutil + +import ( + "net" + "testing" +) + +// RandomUnprivilegedPort returns valid unprivileged random port number which can be used for testing. +func RandomUnprivilegedPort(t *testing.T) int { + t.Helper() + + listener, err := net.Listen("tcp", ":0") + if err != nil { + t.Fatalf("Listening on random port: %v", err) + } + + if err := listener.Close(); err != nil { + t.Fatalf("Closing listener: %v", err) + } + + return listener.Addr().(*net.TCPAddr).Port +} diff --git a/util/testutil/roundtrip.go b/util/testutil/roundtrip.go index 996d11f36..a93991a13 100644 --- a/util/testutil/roundtrip.go +++ b/util/testutil/roundtrip.go @@ -43,5 +43,7 @@ func NewRoundTripCheckRequest(checkRequest func(*http.Request), theResponse *htt checkRequest: checkRequest, roundTrip: roundTrip{ theResponse: theResponse, - theError: theError}} + theError: theError, + }, + } } diff --git a/web/api/v1/api.go b/web/api/v1/api.go index c1f7df92b..e6790c505 100644 --- a/web/api/v1/api.go +++ b/web/api/v1/api.go @@ -39,10 +39,10 @@ import ( "github.com/prometheus/common/route" "github.com/prometheus/prometheus/config" - "github.com/prometheus/prometheus/pkg/exemplar" - "github.com/prometheus/prometheus/pkg/labels" - "github.com/prometheus/prometheus/pkg/textparse" - "github.com/prometheus/prometheus/pkg/timestamp" + "github.com/prometheus/prometheus/model/exemplar" + "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/model/textparse" + "github.com/prometheus/prometheus/model/timestamp" "github.com/prometheus/prometheus/promql" "github.com/prometheus/prometheus/promql/parser" "github.com/prometheus/prometheus/rules" @@ -75,9 +75,7 @@ const ( errorNotFound errorType = "not_found" ) -var ( - LocalhostRepresentations = []string{"127.0.0.1", "localhost", "::1"} -) +var LocalhostRepresentations = []string{"127.0.0.1", "localhost", "::1"} type apiError struct { typ errorType @@ -181,6 +179,7 @@ type API struct { buildInfo *PrometheusVersion runtimeInfo func() (RuntimeInfo, error) gatherer prometheus.Gatherer + isAgent bool remoteWriteHandler http.Handler remoteReadHandler http.Handler @@ -211,6 +210,7 @@ func NewAPI( remoteReadSampleLimit int, remoteReadConcurrencyLimit int, remoteReadMaxBytesInFrame int, + isAgent bool, CORSOrigin *regexp.Regexp, runtimeInfo func() (RuntimeInfo, error), buildInfo *PrometheusVersion, @@ -239,6 +239,7 @@ func NewAPI( runtimeInfo: runtimeInfo, buildInfo: buildInfo, gatherer: gatherer, + isAgent: isAgent, remoteReadHandler: remote.NewReadHandler(logger, registerer, q, configFunc, remoteReadSampleLimit, remoteReadConcurrencyLimit, remoteReadMaxBytesInFrame), } @@ -282,26 +283,35 @@ func (api *API) Register(r *route.Router) { }.ServeHTTP) } + wrapAgent := func(f apiFunc) http.HandlerFunc { + return wrap(func(r *http.Request) apiFuncResult { + if api.isAgent { + return apiFuncResult{nil, &apiError{errorExec, errors.New("unavailable with Prometheus Agent")}, nil, nil} + } + return f(r) + }) + } + r.Options("/*path", wrap(api.options)) - r.Get("/query", wrap(api.query)) - r.Post("/query", wrap(api.query)) - r.Get("/query_range", wrap(api.queryRange)) - r.Post("/query_range", wrap(api.queryRange)) - r.Get("/query_exemplars", wrap(api.queryExemplars)) - r.Post("/query_exemplars", wrap(api.queryExemplars)) + r.Get("/query", wrapAgent(api.query)) + r.Post("/query", wrapAgent(api.query)) + r.Get("/query_range", wrapAgent(api.queryRange)) + r.Post("/query_range", wrapAgent(api.queryRange)) + r.Get("/query_exemplars", wrapAgent(api.queryExemplars)) + r.Post("/query_exemplars", wrapAgent(api.queryExemplars)) - r.Get("/labels", wrap(api.labelNames)) - r.Post("/labels", wrap(api.labelNames)) - r.Get("/label/:name/values", wrap(api.labelValues)) + r.Get("/labels", wrapAgent(api.labelNames)) + r.Post("/labels", wrapAgent(api.labelNames)) + r.Get("/label/:name/values", wrapAgent(api.labelValues)) - r.Get("/series", wrap(api.series)) - r.Post("/series", wrap(api.series)) - r.Del("/series", wrap(api.dropSeries)) + r.Get("/series", wrapAgent(api.series)) + r.Post("/series", wrapAgent(api.series)) + r.Del("/series", wrapAgent(api.dropSeries)) r.Get("/targets", wrap(api.targets)) r.Get("/targets/metadata", wrap(api.targetMetadata)) - r.Get("/alertmanagers", wrap(api.alertmanagers)) + r.Get("/alertmanagers", wrapAgent(api.alertmanagers)) r.Get("/metadata", wrap(api.metricMetadata)) @@ -309,22 +319,22 @@ func (api *API) Register(r *route.Router) { r.Get("/status/runtimeinfo", wrap(api.serveRuntimeInfo)) r.Get("/status/buildinfo", wrap(api.serveBuildInfo)) r.Get("/status/flags", wrap(api.serveFlags)) - r.Get("/status/tsdb", wrap(api.serveTSDBStatus)) + r.Get("/status/tsdb", wrapAgent(api.serveTSDBStatus)) r.Get("/status/walreplay", api.serveWALReplayStatus) r.Post("/read", api.ready(api.remoteRead)) r.Post("/write", api.ready(api.remoteWrite)) - r.Get("/alerts", wrap(api.alerts)) - r.Get("/rules", wrap(api.rules)) + r.Get("/alerts", wrapAgent(api.alerts)) + r.Get("/rules", wrapAgent(api.rules)) // Admin APIs - r.Post("/admin/tsdb/delete_series", wrap(api.deleteSeries)) - r.Post("/admin/tsdb/clean_tombstones", wrap(api.cleanTombstones)) - r.Post("/admin/tsdb/snapshot", wrap(api.snapshot)) + r.Post("/admin/tsdb/delete_series", wrapAgent(api.deleteSeries)) + r.Post("/admin/tsdb/clean_tombstones", wrapAgent(api.cleanTombstones)) + r.Post("/admin/tsdb/snapshot", wrapAgent(api.snapshot)) - r.Put("/admin/tsdb/delete_series", wrap(api.deleteSeries)) - r.Put("/admin/tsdb/clean_tombstones", wrap(api.cleanTombstones)) - r.Put("/admin/tsdb/snapshot", wrap(api.snapshot)) + r.Put("/admin/tsdb/delete_series", wrapAgent(api.deleteSeries)) + r.Put("/admin/tsdb/clean_tombstones", wrapAgent(api.cleanTombstones)) + r.Put("/admin/tsdb/snapshot", wrapAgent(api.snapshot)) } type queryData struct { @@ -1623,7 +1633,7 @@ func (api *API) snapshot(r *http.Request) apiFuncResult { rand.Int63()) dir = filepath.Join(snapdir, name) ) - if err := os.MkdirAll(dir, 0777); err != nil { + if err := os.MkdirAll(dir, 0o777); err != nil { return apiFuncResult{nil, &apiError{errorInternal, errors.Wrap(err, "create snapshot directory")}, nil, nil} } if err := api.db.Snapshot(dir, !skipHead); err != nil { @@ -1679,7 +1689,6 @@ func (api *API) respondError(w http.ResponseWriter, apiErr *apiError, data inter Error: apiErr.err.Error(), Data: data, }) - if err != nil { level.Error(api.logger).Log("msg", "error marshaling json response", "err", err) http.Error(w, err.Error(), http.StatusInternalServerError) @@ -1822,7 +1831,7 @@ func marshalExemplarJSON(ptr unsafe.Pointer, stream *jsoniter.Stream) { stream.WriteMore() stream.WriteObjectField(`timestamp`) marshalTimestamp(p.Ts, stream) - //marshalTimestamp(p.Ts, stream) + // marshalTimestamp(p.Ts, stream) stream.WriteObjectEnd() } diff --git a/web/api/v1/api_test.go b/web/api/v1/api_test.go index b839e61cd..5112201f4 100644 --- a/web/api/v1/api_test.go +++ b/web/api/v1/api_test.go @@ -40,10 +40,10 @@ import ( "github.com/stretchr/testify/require" "github.com/prometheus/prometheus/config" - "github.com/prometheus/prometheus/pkg/exemplar" - "github.com/prometheus/prometheus/pkg/labels" - "github.com/prometheus/prometheus/pkg/textparse" - "github.com/prometheus/prometheus/pkg/timestamp" + "github.com/prometheus/prometheus/model/exemplar" + "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/model/textparse" + "github.com/prometheus/prometheus/model/timestamp" "github.com/prometheus/prometheus/prompb" "github.com/prometheus/prometheus/promql" "github.com/prometheus/prometheus/promql/parser" @@ -126,9 +126,7 @@ func newTestTargetRetriever(targetsInfo []*testTargetParams) *testTargetRetrieve } } -var ( - scrapeStart = time.Now().Add(-11 * time.Second) -) +var scrapeStart = time.Now().Add(-11 * time.Second) func (t testTargetRetriever) TargetsActive() map[string][]*scrape.Target { return t.activeTargets @@ -452,7 +450,6 @@ func TestEndpoints(t *testing.T) { testEndpoints(t, api, testTargetRetriever, suite.ExemplarStorage(), false) }) - } func TestLabelNames(t *testing.T) { @@ -651,7 +648,7 @@ func testEndpoints(t *testing.T, api *API, tr *testTargetRetriever, es storage.E exemplars []exemplar.QueryResult } - var tests = []test{ + tests := []test{ { endpoint: api.query, query: url.Values{ @@ -2137,7 +2134,7 @@ func assertAPIError(t *testing.T, got *apiError, exp errorType) { } } -func assertAPIResponse(t *testing.T, got interface{}, exp interface{}) { +func assertAPIResponse(t *testing.T, got, exp interface{}) { t.Helper() require.Equal(t, exp, got) @@ -2179,6 +2176,7 @@ func (f *fakeDB) Stats(statsByLabelName string) (_ *tsdb.Stats, retErr error) { h, _ := tsdb.NewHead(nil, nil, nil, opts, nil) return h.Stats(statsByLabelName), nil } + func (f *fakeDB) WALReplayStatus() (tsdb.WALReplayStatus, error) { return tsdb.WALReplayStatus{}, nil } @@ -2449,7 +2447,7 @@ func TestParseTimeParam(t *testing.T) { ts, err := parseTime("1582468023986") require.NoError(t, err) - var tests = []struct { + tests := []struct { paramName string paramValue string defaultValue time.Time @@ -2508,7 +2506,7 @@ func TestParseTime(t *testing.T) { panic(err) } - var tests = []struct { + tests := []struct { input string fail bool result time.Time @@ -2516,25 +2514,32 @@ func TestParseTime(t *testing.T) { { input: "", fail: true, - }, { + }, + { input: "abc", fail: true, - }, { + }, + { input: "30s", fail: true, - }, { + }, + { input: "123", result: time.Unix(123, 0), - }, { + }, + { input: "123.123", result: time.Unix(123, 123000000), - }, { + }, + { input: "2015-06-03T13:21:58.555Z", result: ts, - }, { + }, + { input: "2015-06-03T14:21:58.555+01:00", result: ts, - }, { + }, + { // Test float rounding. input: "1543578564.705", result: time.Unix(1543578564, 705*1e6), @@ -2566,7 +2571,7 @@ func TestParseTime(t *testing.T) { } func TestParseDuration(t *testing.T) { - var tests = []struct { + tests := []struct { input string fail bool result time.Duration diff --git a/web/federate.go b/web/federate.go index 8d2af396d..393a96bb6 100644 --- a/web/federate.go +++ b/web/federate.go @@ -26,9 +26,9 @@ import ( "github.com/prometheus/common/expfmt" "github.com/prometheus/common/model" - "github.com/prometheus/prometheus/pkg/labels" - "github.com/prometheus/prometheus/pkg/timestamp" - "github.com/prometheus/prometheus/pkg/value" + "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/model/timestamp" + "github.com/prometheus/prometheus/model/value" "github.com/prometheus/prometheus/promql" "github.com/prometheus/prometheus/promql/parser" "github.com/prometheus/prometheus/storage" diff --git a/web/federate_test.go b/web/federate_test.go index 35a02bb44..8bdef3fa8 100644 --- a/web/federate_test.go +++ b/web/federate_test.go @@ -28,7 +28,7 @@ import ( "github.com/stretchr/testify/require" "github.com/prometheus/prometheus/config" - "github.com/prometheus/prometheus/pkg/labels" + "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/promql" "github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/tsdb" @@ -253,18 +253,19 @@ func (notReadyReadStorage) Stats(string) (*tsdb.Stats, error) { // Regression test for https://github.com/prometheus/prometheus/issues/7181. func TestFederation_NotReady(t *testing.T) { - h := &Handler{ - localStorage: notReadyReadStorage{}, - lookbackDelta: 5 * time.Minute, - now: func() model.Time { return 101 * 60 * 1000 }, // 101min after epoch. - config: &config.Config{ - GlobalConfig: config.GlobalConfig{}, - }, - } - for name, scenario := range scenarios { t.Run(name, func(t *testing.T) { - h.config.GlobalConfig.ExternalLabels = scenario.externalLabels + h := &Handler{ + localStorage: notReadyReadStorage{}, + lookbackDelta: 5 * time.Minute, + now: func() model.Time { return 101 * 60 * 1000 }, // 101min after epoch. + config: &config.Config{ + GlobalConfig: config.GlobalConfig{ + ExternalLabels: scenario.externalLabels, + }, + }, + } + req := httptest.NewRequest("GET", "http://example.org/federate?"+scenario.params, nil) res := httptest.NewRecorder() diff --git a/web/ui/assets_generate.go b/web/ui/assets_generate.go index 884329bd0..47f1a43c0 100644 --- a/web/ui/assets_generate.go +++ b/web/ui/assets_generate.go @@ -22,7 +22,7 @@ import ( "github.com/shurcooL/vfsgen" - "github.com/prometheus/prometheus/pkg/modtimevfs" + "github.com/prometheus/prometheus/util/modtimevfs" "github.com/prometheus/prometheus/web/ui" ) diff --git a/web/ui/module/codemirror-promql/README.md b/web/ui/module/codemirror-promql/README.md index bbc051893..42ed4e2c3 100644 --- a/web/ui/module/codemirror-promql/README.md +++ b/web/ui/module/codemirror-promql/README.md @@ -1,7 +1,7 @@ CodeMirror-promql ================= -[![CircleCI](https://circleci.com/gh/prometheus-community/codemirror-promql.svg?style=shield)](https://circleci.com/gh/prometheus-community/codemirror-promql) [![GitHub license](https://img.shields.io/badge/license-Apache-blue.svg)](./LICENSE) -[![NPM version](https://img.shields.io/npm/v/codemirror-promql.svg)](https://www.npmjs.org/package/codemirror-promql) [![codecov](https://codecov.io/gh/prometheus-community/codemirror-promql/branch/master/graph/badge.svg?token=1OSVPBDKZC)](https://codecov.io/gh/prometheus-community/codemirror-promql) +[![CircleCI](https://circleci.com/gh/prometheus/codemirror-promql.svg?style=shield)](https://circleci.com/gh/prometheus/codemirror-promql) [![GitHub license](https://img.shields.io/badge/license-Apache-blue.svg)](./LICENSE) +[![NPM version](https://img.shields.io/npm/v/codemirror-promql.svg)](https://www.npmjs.org/package/codemirror-promql) [![codecov](https://codecov.io/gh/prometheus/codemirror-promql/branch/main/graph/badge.svg?token=rBHsyXshfl)](https://codecov.io/gh/prometheus/codemirror-promql) ## Overview @@ -13,7 +13,7 @@ and autocompletion for PromQL ([Prometheus Query Language](https://prometheus.io ## Where does it come from? The authoritative copy of this code lives in `prometheus/prometheus` and is synced to -`prometheus-community/codemirror-promql` on a regular basis by a bot. Please contribute any code changes to the code +`prometheus/codemirror-promql` on a regular basis by a bot. Please contribute any code changes to the code in https://github.com/prometheus/prometheus/tree/main/web/ui/module/codemirror-promql. ### Installation @@ -222,7 +222,7 @@ const promQL = new PromQLExtension().setComplete({ ##### Override the default Prometheus client In case you are not satisfied by our default Prometheus client, you can still provide your own. It has to implement the -interface [PrometheusClient](https://github.com/prometheus-community/codemirror-promql/blob/master/src/lang-promql/client/prometheus.ts#L111-L117) +interface [PrometheusClient](https://github.com/prometheus/codemirror-promql/blob/main/src/client/prometheus.ts#L24-L39) . ```typescript @@ -246,4 +246,4 @@ Note: In case this parameter is provided, then the rest of the configuration is ## License -Apache License 2.0, see [LICENSE](https://github.com/prometheus-community/codemirror-promql/blob/master/LICENSE). +Apache License 2.0, see [LICENSE](https://github.com/prometheus/codemirror-promql/blob/main/LICENSE). diff --git a/web/ui/module/codemirror-promql/package.json b/web/ui/module/codemirror-promql/package.json index 825383d01..70482d9d3 100644 --- a/web/ui/module/codemirror-promql/package.json +++ b/web/ui/module/codemirror-promql/package.json @@ -16,7 +16,7 @@ }, "repository": { "type": "git", - "url": "git+https://github.com/prometheus-community/codemirror-promql.git" + "url": "git+https://github.com/prometheus/codemirror-promql.git" }, "keywords": [ "promql", @@ -27,40 +27,40 @@ "author": "Prometheus Authors ", "license": "Apache-2.0", "bugs": { - "url": "https://github.com/prometheus-community/codemirror-promql/issues" + "url": "https://github.com/prometheus/codemirror-promql/issues" }, - "homepage": "https://github.com/prometheus-community/codemirror-promql/blob/master/README.md", + "homepage": "https://github.com/prometheus/codemirror-promql/blob/master/README.md", "dependencies": { "lru-cache": "^6.0.0" }, "devDependencies": { "@codemirror/autocomplete": "^0.19.3", "@codemirror/basic-setup": "^0.19.0", - "@codemirror/highlight": "^0.19.5", - "@codemirror/language": "^0.19.3", + "@codemirror/highlight": "^0.19.6", + "@codemirror/language": "^0.19.4", "@codemirror/lint": "^0.19.1", - "@codemirror/state": "^0.19.2", + "@codemirror/state": "^0.19.5", "@codemirror/view": "^0.19.7", - "@lezer/common": "^0.15.5", - "@lezer/generator": "^0.15.1", - "@types/chai": "^4.2.12", + "@lezer/common": "^0.15.8", + "@lezer/generator": "^0.15.2", + "@types/chai": "^4.2.22", "@types/lru-cache": "^5.1.0", - "@types/mocha": "^8.0.3", - "@types/node": "^16.7.6", - "@typescript-eslint/eslint-plugin": "^4.31.0", - "@typescript-eslint/parser": "^4.31.0", + "@types/mocha": "^9.0.0", + "@types/node": "^16.11.7", + "@typescript-eslint/eslint-plugin": "^5.3.1", + "@typescript-eslint/parser": "^5.3.1", "chai": "^4.2.0", "codecov": "^3.8.1", - "eslint": "^7.32.0", + "eslint": "^8.2.0", "eslint-config-prettier": "^8.3.0", - "eslint-plugin-flowtype": "^5.9.2", - "eslint-plugin-import": "^2.24.2", + "eslint-plugin-flowtype": "^8.0.3", + "eslint-plugin-import": "^2.25.3", "eslint-plugin-prettier": "^4.0.0", "isomorphic-fetch": "^3.0.0", - "mocha": "^8.1.2", + "mocha": "^9.1.3", "nock": "^13.0.11", "nyc": "^15.1.0", - "prettier": "^2.3.2", + "prettier": "^2.4.1", "ts-loader": "^7.0.4", "ts-mocha": "^8.0.0", "ts-node": "^9.0.0", @@ -68,12 +68,12 @@ }, "peerDependencies": { "@codemirror/autocomplete": "^0.19.3", - "@codemirror/highlight": "^0.19.5", - "@codemirror/language": "^0.19.3", + "@codemirror/highlight": "^0.19.6", + "@codemirror/language": "^0.19.4", "@codemirror/lint": "^0.19.1", - "@codemirror/state": "^0.19.2", + "@codemirror/state": "^0.19.5", "@codemirror/view": "^0.19.7", - "@lezer/common": "^0.15.5" + "@lezer/common": "^0.15.8" }, "prettier": { "singleQuote": true, diff --git a/web/ui/module/codemirror-promql/src/complete/promql.terms.ts b/web/ui/module/codemirror-promql/src/complete/promql.terms.ts index 4aba94e9f..5b2da2088 100644 --- a/web/ui/module/codemirror-promql/src/complete/promql.terms.ts +++ b/web/ui/module/codemirror-promql/src/complete/promql.terms.ts @@ -142,12 +142,16 @@ export const functionIdentifierTerms = [ detail: 'function', info: 'Calculate the cosine, in radians, for input series', type: 'function', + // Avoid ranking higher than `count`. + boost: -1, }, { label: 'cosh', detail: 'function', info: 'Calculate the hyperbolic cosine, in radians, for input series', type: 'function', + // Avoid ranking higher than `count`. + boost: -1, }, { label: 'count_over_time', @@ -178,6 +182,8 @@ export const functionIdentifierTerms = [ detail: 'function', info: 'Convert radians to degrees for input series', type: 'function', + // Avoid ranking higher than `delta`. + boost: -1, }, { label: 'delta', @@ -328,6 +334,8 @@ export const functionIdentifierTerms = [ detail: 'function', info: 'Convert degrees to radians for input series', type: 'function', + // Avoid ranking higher than `rate`. + boost: -1, }, { label: 'rate', diff --git a/web/ui/package-lock.json b/web/ui/package-lock.json index b14cf0823..4f7e165d5 100644 --- a/web/ui/package-lock.json +++ b/web/ui/package-lock.json @@ -22,31 +22,31 @@ "devDependencies": { "@codemirror/autocomplete": "^0.19.3", "@codemirror/basic-setup": "^0.19.0", - "@codemirror/highlight": "^0.19.5", - "@codemirror/language": "^0.19.3", + "@codemirror/highlight": "^0.19.6", + "@codemirror/language": "^0.19.4", "@codemirror/lint": "^0.19.1", - "@codemirror/state": "^0.19.2", + "@codemirror/state": "^0.19.5", "@codemirror/view": "^0.19.7", - "@lezer/common": "^0.15.5", - "@lezer/generator": "^0.15.1", - "@types/chai": "^4.2.12", + "@lezer/common": "^0.15.8", + "@lezer/generator": "^0.15.2", + "@types/chai": "^4.2.22", "@types/lru-cache": "^5.1.0", - "@types/mocha": "^8.0.3", - "@types/node": "^16.7.6", - "@typescript-eslint/eslint-plugin": "^4.31.0", - "@typescript-eslint/parser": "^4.31.0", + "@types/mocha": "^9.0.0", + "@types/node": "^16.11.7", + "@typescript-eslint/eslint-plugin": "^5.3.1", + "@typescript-eslint/parser": "^5.3.1", "chai": "^4.2.0", "codecov": "^3.8.1", - "eslint": "^7.32.0", + "eslint": "^8.2.0", "eslint-config-prettier": "^8.3.0", - "eslint-plugin-flowtype": "^5.9.2", - "eslint-plugin-import": "^2.24.2", + "eslint-plugin-flowtype": "^8.0.3", + "eslint-plugin-import": "^2.25.3", "eslint-plugin-prettier": "^4.0.0", "isomorphic-fetch": "^3.0.0", - "mocha": "^8.1.2", + "mocha": "^9.1.3", "nock": "^13.0.11", "nyc": "^15.1.0", - "prettier": "^2.3.2", + "prettier": "^2.4.1", "ts-loader": "^7.0.4", "ts-mocha": "^8.0.0", "ts-node": "^9.0.0", @@ -57,14 +57,650 @@ }, "peerDependencies": { "@codemirror/autocomplete": "^0.19.3", - "@codemirror/highlight": "^0.19.5", - "@codemirror/language": "^0.19.3", + "@codemirror/highlight": "^0.19.6", + "@codemirror/language": "^0.19.4", "@codemirror/lint": "^0.19.1", - "@codemirror/state": "^0.19.2", + "@codemirror/state": "^0.19.5", "@codemirror/view": "^0.19.7", - "@lezer/common": "^0.15.5" + "@lezer/common": "^0.15.8" } }, + "module/codemirror-promql/node_modules/@eslint/eslintrc": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/@eslint/eslintrc/-/eslintrc-1.0.4.tgz", + "integrity": "sha512-h8Vx6MdxwWI2WM8/zREHMoqdgLNXEL4QX3MWSVMdyNJGvXVOs+6lp+m2hc3FnuMHDc4poxFNI20vCk0OmI4G0Q==", + "dev": true, + "dependencies": { + "ajv": "^6.12.4", + "debug": "^4.3.2", + "espree": "^9.0.0", + "globals": "^13.9.0", + "ignore": "^4.0.6", + "import-fresh": "^3.2.1", + "js-yaml": "^4.1.0", + "minimatch": "^3.0.4", + "strip-json-comments": "^3.1.1" + }, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + } + }, + "module/codemirror-promql/node_modules/@humanwhocodes/config-array": { + "version": "0.6.0", + "resolved": "https://registry.npmjs.org/@humanwhocodes/config-array/-/config-array-0.6.0.tgz", + "integrity": "sha512-JQlEKbcgEUjBFhLIF4iqM7u/9lwgHRBcpHrmUNCALK0Q3amXN6lxdoXLnF0sm11E9VqTmBALR87IlUg1bZ8A9A==", + "dev": true, + "dependencies": { + "@humanwhocodes/object-schema": "^1.2.0", + "debug": "^4.1.1", + "minimatch": "^3.0.4" + }, + "engines": { + "node": ">=10.10.0" + } + }, + "module/codemirror-promql/node_modules/@typescript-eslint/eslint-plugin": { + "version": "5.4.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/eslint-plugin/-/eslint-plugin-5.4.0.tgz", + "integrity": "sha512-9/yPSBlwzsetCsGEn9j24D8vGQgJkOTr4oMLas/w886ZtzKIs1iyoqFrwsX2fqYEeUwsdBpC21gcjRGo57u0eg==", + "dev": true, + "dependencies": { + "@typescript-eslint/experimental-utils": "5.4.0", + "@typescript-eslint/scope-manager": "5.4.0", + "debug": "^4.3.2", + "functional-red-black-tree": "^1.0.1", + "ignore": "^5.1.8", + "regexpp": "^3.2.0", + "semver": "^7.3.5", + "tsutils": "^3.21.0" + }, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependencies": { + "@typescript-eslint/parser": "^5.0.0", + "eslint": "^6.0.0 || ^7.0.0 || ^8.0.0" + }, + "peerDependenciesMeta": { + "typescript": { + "optional": true + } + } + }, + "module/codemirror-promql/node_modules/@typescript-eslint/eslint-plugin/node_modules/ignore": { + "version": "5.1.9", + "resolved": "https://registry.npmjs.org/ignore/-/ignore-5.1.9.tgz", + "integrity": "sha512-2zeMQpbKz5dhZ9IwL0gbxSW5w0NK/MSAMtNuhgIHEPmaU3vPdKPL0UdvUCXs5SS4JAwsBxysK5sFMW8ocFiVjQ==", + "dev": true, + "engines": { + "node": ">= 4" + } + }, + "module/codemirror-promql/node_modules/@typescript-eslint/experimental-utils": { + "version": "5.4.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/experimental-utils/-/experimental-utils-5.4.0.tgz", + "integrity": "sha512-Nz2JDIQUdmIGd6p33A+naQmwfkU5KVTLb/5lTk+tLVTDacZKoGQisj8UCxk7onJcrgjIvr8xWqkYI+DbI3TfXg==", + "dev": true, + "dependencies": { + "@types/json-schema": "^7.0.9", + "@typescript-eslint/scope-manager": "5.4.0", + "@typescript-eslint/types": "5.4.0", + "@typescript-eslint/typescript-estree": "5.4.0", + "eslint-scope": "^5.1.1", + "eslint-utils": "^3.0.0" + }, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependencies": { + "eslint": "*" + } + }, + "module/codemirror-promql/node_modules/@typescript-eslint/experimental-utils/node_modules/eslint-scope": { + "version": "5.1.1", + "resolved": "https://registry.npmjs.org/eslint-scope/-/eslint-scope-5.1.1.tgz", + "integrity": "sha512-2NxwbF/hZ0KpepYN0cNbo+FN6XoK7GaHlQhgx/hIZl6Va0bF45RQOOwhLIy8lQDbuCiadSLCBnH2CFYquit5bw==", + "dev": true, + "dependencies": { + "esrecurse": "^4.3.0", + "estraverse": "^4.1.1" + }, + "engines": { + "node": ">=8.0.0" + } + }, + "module/codemirror-promql/node_modules/@typescript-eslint/experimental-utils/node_modules/estraverse": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/estraverse/-/estraverse-4.3.0.tgz", + "integrity": "sha512-39nnKffWz8xN1BU/2c79n9nB9HDzo0niYUqx6xyqUnyoAnQyyWpOTdZEeiCch8BBu515t4wp9ZmgVfVhn9EBpw==", + "dev": true, + "engines": { + "node": ">=4.0" + } + }, + "module/codemirror-promql/node_modules/@typescript-eslint/parser": { + "version": "5.4.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/parser/-/parser-5.4.0.tgz", + "integrity": "sha512-JoB41EmxiYpaEsRwpZEYAJ9XQURPFer8hpkIW9GiaspVLX8oqbqNM8P4EP8HOZg96yaALiLEVWllA2E8vwsIKw==", + "dev": true, + "dependencies": { + "@typescript-eslint/scope-manager": "5.4.0", + "@typescript-eslint/types": "5.4.0", + "@typescript-eslint/typescript-estree": "5.4.0", + "debug": "^4.3.2" + }, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependencies": { + "eslint": "^6.0.0 || ^7.0.0 || ^8.0.0" + }, + "peerDependenciesMeta": { + "typescript": { + "optional": true + } + } + }, + "module/codemirror-promql/node_modules/@typescript-eslint/scope-manager": { + "version": "5.4.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/scope-manager/-/scope-manager-5.4.0.tgz", + "integrity": "sha512-pRxFjYwoi8R+n+sibjgF9iUiAELU9ihPBtHzocyW8v8D8G8KeQvXTsW7+CBYIyTYsmhtNk50QPGLE3vrvhM5KA==", + "dev": true, + "dependencies": { + "@typescript-eslint/types": "5.4.0", + "@typescript-eslint/visitor-keys": "5.4.0" + }, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + } + }, + "module/codemirror-promql/node_modules/@typescript-eslint/types": { + "version": "5.4.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/types/-/types-5.4.0.tgz", + "integrity": "sha512-GjXNpmn+n1LvnttarX+sPD6+S7giO+9LxDIGlRl4wK3a7qMWALOHYuVSZpPTfEIklYjaWuMtfKdeByx0AcaThA==", + "dev": true, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + } + }, + "module/codemirror-promql/node_modules/@typescript-eslint/typescript-estree": { + "version": "5.4.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/typescript-estree/-/typescript-estree-5.4.0.tgz", + "integrity": "sha512-nhlNoBdhKuwiLMx6GrybPT3SFILm5Gij2YBdPEPFlYNFAXUJWX6QRgvi/lwVoadaQEFsizohs6aFRMqsXI2ewA==", + "dev": true, + "dependencies": { + "@typescript-eslint/types": "5.4.0", + "@typescript-eslint/visitor-keys": "5.4.0", + "debug": "^4.3.2", + "globby": "^11.0.4", + "is-glob": "^4.0.3", + "semver": "^7.3.5", + "tsutils": "^3.21.0" + }, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependenciesMeta": { + "typescript": { + "optional": true + } + } + }, + "module/codemirror-promql/node_modules/@typescript-eslint/visitor-keys": { + "version": "5.4.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/visitor-keys/-/visitor-keys-5.4.0.tgz", + "integrity": "sha512-PVbax7MeE7tdLfW5SA0fs8NGVVr+buMPrcj+CWYWPXsZCH8qZ1THufDzbXm1xrZ2b2PA1iENJ0sRq5fuUtvsJg==", + "dev": true, + "dependencies": { + "@typescript-eslint/types": "5.4.0", + "eslint-visitor-keys": "^3.0.0" + }, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + } + }, + "module/codemirror-promql/node_modules/acorn": { + "version": "8.5.0", + "resolved": "https://registry.npmjs.org/acorn/-/acorn-8.5.0.tgz", + "integrity": "sha512-yXbYeFy+jUuYd3/CDcg2NkIYE991XYX/bje7LmjJigUciaeO1JR4XxXgCIV1/Zc/dRuFEyw1L0pbA+qynJkW5Q==", + "dev": true, + "bin": { + "acorn": "bin/acorn" + }, + "engines": { + "node": ">=0.4.0" + } + }, + "module/codemirror-promql/node_modules/argparse": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/argparse/-/argparse-2.0.1.tgz", + "integrity": "sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q==", + "dev": true + }, + "module/codemirror-promql/node_modules/chokidar": { + "version": "3.5.2", + "resolved": "https://registry.npmjs.org/chokidar/-/chokidar-3.5.2.tgz", + "integrity": "sha512-ekGhOnNVPgT77r4K/U3GDhu+FQ2S8TnK/s2KbIGXi0SZWuwkZ2QNyfWdZW+TVfn84DpEP7rLeCt2UI6bJ8GwbQ==", + "dev": true, + "dependencies": { + "anymatch": "~3.1.2", + "braces": "~3.0.2", + "glob-parent": "~5.1.2", + "is-binary-path": "~2.1.0", + "is-glob": "~4.0.1", + "normalize-path": "~3.0.0", + "readdirp": "~3.6.0" + }, + "engines": { + "node": ">= 8.10.0" + }, + "optionalDependencies": { + "fsevents": "~2.3.2" + } + }, + "module/codemirror-promql/node_modules/chokidar/node_modules/glob-parent": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-5.1.2.tgz", + "integrity": "sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow==", + "dev": true, + "dependencies": { + "is-glob": "^4.0.1" + }, + "engines": { + "node": ">= 6" + } + }, + "module/codemirror-promql/node_modules/eslint": { + "version": "8.2.0", + "resolved": "https://registry.npmjs.org/eslint/-/eslint-8.2.0.tgz", + "integrity": "sha512-erw7XmM+CLxTOickrimJ1SiF55jiNlVSp2qqm0NuBWPtHYQCegD5ZMaW0c3i5ytPqL+SSLaCxdvQXFPLJn+ABw==", + "dev": true, + "dependencies": { + "@eslint/eslintrc": "^1.0.4", + "@humanwhocodes/config-array": "^0.6.0", + "ajv": "^6.10.0", + "chalk": "^4.0.0", + "cross-spawn": "^7.0.2", + "debug": "^4.3.2", + "doctrine": "^3.0.0", + "enquirer": "^2.3.5", + "escape-string-regexp": "^4.0.0", + "eslint-scope": "^6.0.0", + "eslint-utils": "^3.0.0", + "eslint-visitor-keys": "^3.0.0", + "espree": "^9.0.0", + "esquery": "^1.4.0", + "esutils": "^2.0.2", + "fast-deep-equal": "^3.1.3", + "file-entry-cache": "^6.0.1", + "functional-red-black-tree": "^1.0.1", + "glob-parent": "^6.0.1", + "globals": "^13.6.0", + "ignore": "^4.0.6", + "import-fresh": "^3.0.0", + "imurmurhash": "^0.1.4", + "is-glob": "^4.0.0", + "js-yaml": "^4.1.0", + "json-stable-stringify-without-jsonify": "^1.0.1", + "levn": "^0.4.1", + "lodash.merge": "^4.6.2", + "minimatch": "^3.0.4", + "natural-compare": "^1.4.0", + "optionator": "^0.9.1", + "progress": "^2.0.0", + "regexpp": "^3.2.0", + "semver": "^7.2.1", + "strip-ansi": "^6.0.1", + "strip-json-comments": "^3.1.0", + "text-table": "^0.2.0", + "v8-compile-cache": "^2.0.3" + }, + "bin": { + "eslint": "bin/eslint.js" + }, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "funding": { + "url": "https://opencollective.com/eslint" + } + }, + "module/codemirror-promql/node_modules/eslint-plugin-flowtype": { + "version": "8.0.3", + "resolved": "https://registry.npmjs.org/eslint-plugin-flowtype/-/eslint-plugin-flowtype-8.0.3.tgz", + "integrity": "sha512-dX8l6qUL6O+fYPtpNRideCFSpmWOUVx5QcaGLVqe/vlDiBSe4vYljDWDETwnyFzpl7By/WVIu6rcrniCgH9BqQ==", + "dev": true, + "dependencies": { + "lodash": "^4.17.21", + "string-natural-compare": "^3.0.1" + }, + "engines": { + "node": ">=12.0.0" + }, + "peerDependencies": { + "@babel/plugin-syntax-flow": "^7.14.5", + "@babel/plugin-transform-react-jsx": "^7.14.9", + "eslint": "^8.1.0" + } + }, + "module/codemirror-promql/node_modules/eslint-scope": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/eslint-scope/-/eslint-scope-6.0.0.tgz", + "integrity": "sha512-uRDL9MWmQCkaFus8RF5K9/L/2fn+80yoW3jkD53l4shjCh26fCtvJGasxjUqP5OT87SYTxCVA3BwTUzuELx9kA==", + "dev": true, + "dependencies": { + "esrecurse": "^4.3.0", + "estraverse": "^5.2.0" + }, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + } + }, + "module/codemirror-promql/node_modules/eslint-visitor-keys": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/eslint-visitor-keys/-/eslint-visitor-keys-3.1.0.tgz", + "integrity": "sha512-yWJFpu4DtjsWKkt5GeNBBuZMlNcYVs6vRCLoCVEJrTjaSB6LC98gFipNK/erM2Heg/E8mIK+hXG/pJMLK+eRZA==", + "dev": true, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + } + }, + "module/codemirror-promql/node_modules/espree": { + "version": "9.0.0", + "resolved": "https://registry.npmjs.org/espree/-/espree-9.0.0.tgz", + "integrity": "sha512-r5EQJcYZ2oaGbeR0jR0fFVijGOcwai07/690YRXLINuhmVeRY4UKSAsQPe/0BNuDgwP7Ophoc1PRsr2E3tkbdQ==", + "dev": true, + "dependencies": { + "acorn": "^8.5.0", + "acorn-jsx": "^5.3.1", + "eslint-visitor-keys": "^3.0.0" + }, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + } + }, + "module/codemirror-promql/node_modules/estraverse": { + "version": "5.3.0", + "resolved": "https://registry.npmjs.org/estraverse/-/estraverse-5.3.0.tgz", + "integrity": "sha512-MMdARuVEQziNTeJD8DgMqmhwR11BRQ/cBP+pLtYdSTnf3MIO8fFeiINEbX36ZdNlfU/7A9f3gUw49B3oQsvwBA==", + "dev": true, + "engines": { + "node": ">=4.0" + } + }, + "module/codemirror-promql/node_modules/find-up": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/find-up/-/find-up-5.0.0.tgz", + "integrity": "sha512-78/PXT1wlLLDgTzDs7sjq9hzz0vXD+zn+7wypEe4fXQxCmdmqfGsEPQxmiCSQI3ajFV91bVSsvNtrJRiW6nGng==", + "dev": true, + "dependencies": { + "locate-path": "^6.0.0", + "path-exists": "^4.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "module/codemirror-promql/node_modules/glob": { + "version": "7.1.7", + "resolved": "https://registry.npmjs.org/glob/-/glob-7.1.7.tgz", + "integrity": "sha512-OvD9ENzPLbegENnYP5UUfJIirTg4+XwMWGaQfQTY0JenxNvvIKP3U3/tAQSPIu/lHxXYSZmpXlUHeqAIdKzBLQ==", + "dev": true, + "dependencies": { + "fs.realpath": "^1.0.0", + "inflight": "^1.0.4", + "inherits": "2", + "minimatch": "^3.0.4", + "once": "^1.3.0", + "path-is-absolute": "^1.0.0" + }, + "engines": { + "node": "*" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "module/codemirror-promql/node_modules/glob-parent": { + "version": "6.0.2", + "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-6.0.2.tgz", + "integrity": "sha512-XxwI8EOhVQgWp6iDL+3b0r86f4d6AX6zSU55HfB4ydCEuXLXc5FcYeOu+nnGftS4TEju/11rt4KJPTMgbfmv4A==", + "dev": true, + "dependencies": { + "is-glob": "^4.0.3" + }, + "engines": { + "node": ">=10.13.0" + } + }, + "module/codemirror-promql/node_modules/js-yaml": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-4.1.0.tgz", + "integrity": "sha512-wpxZs9NoxZaJESJGIZTyDEaYpl0FKSA+FB9aJiyemKhMwkxQg63h4T1KJgUGHpTqPDNRcmmYLugrRjJlBtWvRA==", + "dev": true, + "dependencies": { + "argparse": "^2.0.1" + }, + "bin": { + "js-yaml": "bin/js-yaml.js" + } + }, + "module/codemirror-promql/node_modules/locate-path": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-6.0.0.tgz", + "integrity": "sha512-iPZK6eYjbxRu3uB4/WZ3EsEIMJFMqAoopl3R+zuq0UjcAm/MO6KCweDgPfP3elTztoKP3KtnVHxTn2NHBSDVUw==", + "dev": true, + "dependencies": { + "p-locate": "^5.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "module/codemirror-promql/node_modules/log-symbols": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/log-symbols/-/log-symbols-4.1.0.tgz", + "integrity": "sha512-8XPvpAA8uyhfteu8pIvQxpJZ7SYYdpUivZpGy6sFsBuKRY/7rQGavedeB8aK+Zkyq6upMFVL/9AW6vOYzfRyLg==", + "dev": true, + "dependencies": { + "chalk": "^4.1.0", + "is-unicode-supported": "^0.1.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "module/codemirror-promql/node_modules/mocha": { + "version": "9.1.3", + "resolved": "https://registry.npmjs.org/mocha/-/mocha-9.1.3.tgz", + "integrity": "sha512-Xcpl9FqXOAYqI3j79pEtHBBnQgVXIhpULjGQa7DVb0Po+VzmSIK9kanAiWLHoRR/dbZ2qpdPshuXr8l1VaHCzw==", + "dev": true, + "dependencies": { + "@ungap/promise-all-settled": "1.1.2", + "ansi-colors": "4.1.1", + "browser-stdout": "1.3.1", + "chokidar": "3.5.2", + "debug": "4.3.2", + "diff": "5.0.0", + "escape-string-regexp": "4.0.0", + "find-up": "5.0.0", + "glob": "7.1.7", + "growl": "1.10.5", + "he": "1.2.0", + "js-yaml": "4.1.0", + "log-symbols": "4.1.0", + "minimatch": "3.0.4", + "ms": "2.1.3", + "nanoid": "3.1.25", + "serialize-javascript": "6.0.0", + "strip-json-comments": "3.1.1", + "supports-color": "8.1.1", + "which": "2.0.2", + "workerpool": "6.1.5", + "yargs": "16.2.0", + "yargs-parser": "20.2.4", + "yargs-unparser": "2.0.0" + }, + "bin": { + "_mocha": "bin/_mocha", + "mocha": "bin/mocha" + }, + "engines": { + "node": ">= 12.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/mochajs" + } + }, + "module/codemirror-promql/node_modules/ms": { + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz", + "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==", + "dev": true + }, + "module/codemirror-promql/node_modules/nanoid": { + "version": "3.1.25", + "resolved": "https://registry.npmjs.org/nanoid/-/nanoid-3.1.25.tgz", + "integrity": "sha512-rdwtIXaXCLFAQbnfqDRnI6jaRHp9fTcYBjtFKE8eezcZ7LuLjhUaQGNeMXf1HmRoCH32CLz6XwX0TtxEOS/A3Q==", + "dev": true, + "bin": { + "nanoid": "bin/nanoid.cjs" + }, + "engines": { + "node": "^10 || ^12 || ^13.7 || ^14 || >=15.0.1" + } + }, + "module/codemirror-promql/node_modules/p-limit": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-3.1.0.tgz", + "integrity": "sha512-TYOanM3wGwNGsZN2cVTYPArw454xnXj5qmWF1bEoAc4+cU/ol7GVh7odevjp1FNHduHc3KZMcFduxU5Xc6uJRQ==", + "dev": true, + "dependencies": { + "yocto-queue": "^0.1.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "module/codemirror-promql/node_modules/p-locate": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-5.0.0.tgz", + "integrity": "sha512-LaNjtRWUBY++zB5nE/NwcaoMylSPk+S+ZHNB1TzdbMJMny6dynpAGt7X/tl/QYq3TIeE6nxHppbo2LGymrG5Pw==", + "dev": true, + "dependencies": { + "p-limit": "^3.0.2" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "module/codemirror-promql/node_modules/path-exists": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-4.0.0.tgz", + "integrity": "sha512-ak9Qy5Q7jYb2Wwcey5Fpvg2KoAc/ZIhLSLOSBmRmygPsGwkVVt0fZa0qrtMz+m6tJTAHfZQ8FnmB4MG4LWy7/w==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "module/codemirror-promql/node_modules/readdirp": { + "version": "3.6.0", + "resolved": "https://registry.npmjs.org/readdirp/-/readdirp-3.6.0.tgz", + "integrity": "sha512-hOS089on8RduqdbhvQ5Z37A0ESjsqz6qnRcffsMU3495FuTdqSm+7bhJ29JvIOsBDEEnan5DPu9t3To9VRlMzA==", + "dev": true, + "dependencies": { + "picomatch": "^2.2.1" + }, + "engines": { + "node": ">=8.10.0" + } + }, + "module/codemirror-promql/node_modules/serialize-javascript": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/serialize-javascript/-/serialize-javascript-6.0.0.tgz", + "integrity": "sha512-Qr3TosvguFt8ePWqsvRfrKyQXIiW+nGbYpy8XK24NQHE83caxWt+mIymTT19DGFbNWNLfEwsrkSmN64lVWB9ag==", + "dev": true, + "dependencies": { + "randombytes": "^2.1.0" + } + }, + "module/codemirror-promql/node_modules/strip-ansi": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", + "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", + "dev": true, + "dependencies": { + "ansi-regex": "^5.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "module/codemirror-promql/node_modules/supports-color": { + "version": "8.1.1", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-8.1.1.tgz", + "integrity": "sha512-MpUEN2OodtUzxvKQl72cUF7RQ5EiHsGvSsVG0ia9c5RbWGL2CI4C7EpPS8UTBIplnlzZiNuV56w+FuNxy3ty2Q==", + "dev": true, + "dependencies": { + "has-flag": "^4.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/supports-color?sponsor=1" + } + }, + "module/codemirror-promql/node_modules/workerpool": { + "version": "6.1.5", + "resolved": "https://registry.npmjs.org/workerpool/-/workerpool-6.1.5.tgz", + "integrity": "sha512-XdKkCK0Zqc6w3iTxLckiuJ81tiD/o5rBE/m+nXpRCB+/Sq4DqkfXZ/x0jW02DG1tGsfUGXbTJyZDP+eu67haSw==", + "dev": true + }, "node_modules/@babel/code-frame": { "version": "7.12.11", "resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.12.11.tgz", @@ -148,6 +784,18 @@ "node": ">=6.9.0" } }, + "node_modules/@babel/helper-annotate-as-pure": { + "version": "7.16.0", + "resolved": "https://registry.npmjs.org/@babel/helper-annotate-as-pure/-/helper-annotate-as-pure-7.16.0.tgz", + "integrity": "sha512-ItmYF9vR4zA8cByDocY05o0LGUkp1zhbTQOH1NFyl5xXEqlTJQCEJjieriw+aFpxo16swMxUnUiKS7a/r4vtHg==", + "dev": true, + "dependencies": { + "@babel/types": "^7.16.0" + }, + "engines": { + "node": ">=6.9.0" + } + }, "node_modules/@babel/helper-compilation-targets": { "version": "7.15.4", "resolved": "https://registry.npmjs.org/@babel/helper-compilation-targets/-/helper-compilation-targets-7.15.4.tgz", @@ -226,12 +874,12 @@ } }, "node_modules/@babel/helper-module-imports": { - "version": "7.15.4", - "resolved": "https://registry.npmjs.org/@babel/helper-module-imports/-/helper-module-imports-7.15.4.tgz", - "integrity": "sha512-jeAHZbzUwdW/xHgHQ3QmWR4Jg6j15q4w/gCfwZvtqOxoo5DKtLHk8Bsf4c5RZRC7NmLEs+ohkdq8jFefuvIxAA==", + "version": "7.16.0", + "resolved": "https://registry.npmjs.org/@babel/helper-module-imports/-/helper-module-imports-7.16.0.tgz", + "integrity": "sha512-kkH7sWzKPq0xt3H1n+ghb4xEMP8k0U7XV3kkB+ZGy69kDk2ySFW1qPi06sjKzFY3t1j6XbJSqr4mF9L7CYVyhg==", "dev": true, "dependencies": { - "@babel/types": "^7.15.4" + "@babel/types": "^7.16.0" }, "engines": { "node": ">=6.9.0" @@ -268,6 +916,15 @@ "node": ">=6.9.0" } }, + "node_modules/@babel/helper-plugin-utils": { + "version": "7.14.5", + "resolved": "https://registry.npmjs.org/@babel/helper-plugin-utils/-/helper-plugin-utils-7.14.5.tgz", + "integrity": "sha512-/37qQCE3K0vvZKwoK4XU/irIJQdIfCJuhU5eKnNxpFDsOkgFaUAwbv+RYw6eYgsC0E4hS7r5KqGULUogqui0fQ==", + "dev": true, + "engines": { + "node": ">=6.9.0" + } + }, "node_modules/@babel/helper-replace-supers": { "version": "7.15.4", "resolved": "https://registry.npmjs.org/@babel/helper-replace-supers/-/helper-replace-supers-7.15.4.tgz", @@ -308,9 +965,9 @@ } }, "node_modules/@babel/helper-validator-identifier": { - "version": "7.14.9", - "resolved": "https://registry.npmjs.org/@babel/helper-validator-identifier/-/helper-validator-identifier-7.14.9.tgz", - "integrity": "sha512-pQYxPY0UP6IHISRitNe8bsijHex4TWZXi2HwKVsjPiltzlhse2znVcm9Ace510VT1kxIHjGJCZZQBX2gJDbo0g==", + "version": "7.15.7", + "resolved": "https://registry.npmjs.org/@babel/helper-validator-identifier/-/helper-validator-identifier-7.15.7.tgz", + "integrity": "sha512-K4JvCtQqad9OY2+yTU8w+E82ywk/fe+ELNlt1G8z3bVGlZfn/hOcQQsUhGhW/N+tb3fxK800wLtKOE/aM0m72w==", "dev": true, "engines": { "node": ">=6.9.0" @@ -436,6 +1093,55 @@ "node": ">=6.0.0" } }, + "node_modules/@babel/plugin-syntax-flow": { + "version": "7.16.0", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-flow/-/plugin-syntax-flow-7.16.0.tgz", + "integrity": "sha512-dH91yCo0RyqfzWgoM5Ji9ir8fQ+uFbt9KHM3d2x4jZOuHS6wNA+CRmRUP/BWCsHG2bjc7A2Way6AvH1eQk0wig==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.14.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-jsx": { + "version": "7.16.0", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-jsx/-/plugin-syntax-jsx-7.16.0.tgz", + "integrity": "sha512-8zv2+xiPHwly31RK4RmnEYY5zziuF3O7W2kIDW+07ewWDh6Oi0dRq8kwvulRkFgt6DB97RlKs5c1y068iPlCUg==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.14.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-react-jsx": { + "version": "7.16.0", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-react-jsx/-/plugin-transform-react-jsx-7.16.0.tgz", + "integrity": "sha512-rqDgIbukZ44pqq7NIRPGPGNklshPkvlmvqjdx3OZcGPk4zGIenYkxDTvl3LsSL8gqcc3ZzGmXPE6hR/u/voNOw==", + "dev": true, + "dependencies": { + "@babel/helper-annotate-as-pure": "^7.16.0", + "@babel/helper-module-imports": "^7.16.0", + "@babel/helper-plugin-utils": "^7.14.5", + "@babel/plugin-syntax-jsx": "^7.16.0", + "@babel/types": "^7.16.0" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, "node_modules/@babel/template": { "version": "7.15.4", "resolved": "https://registry.npmjs.org/@babel/template/-/template-7.15.4.tgz", @@ -504,12 +1210,12 @@ } }, "node_modules/@babel/types": { - "version": "7.15.6", - "resolved": "https://registry.npmjs.org/@babel/types/-/types-7.15.6.tgz", - "integrity": "sha512-BPU+7QhqNjmWyDO0/vitH/CuhpV8ZmK1wpKva8nuyNF5MJfuRNWMc+hc14+u9xT93kvykMdncrJT19h74uB1Ig==", + "version": "7.16.0", + "resolved": "https://registry.npmjs.org/@babel/types/-/types-7.16.0.tgz", + "integrity": "sha512-PJgg/k3SdLsGb3hhisFvtLOw5ts113klrpLuIPtCJIU+BB24fqq6lf8RWqKJEjzqXR9AEH1rIb5XTqwBHB+kQg==", "dev": true, "dependencies": { - "@babel/helper-validator-identifier": "^7.14.9", + "@babel/helper-validator-identifier": "^7.15.7", "to-fast-properties": "^2.0.0" }, "engines": { @@ -612,9 +1318,9 @@ } }, "node_modules/@codemirror/highlight": { - "version": "0.19.5", - "resolved": "https://registry.npmjs.org/@codemirror/highlight/-/highlight-0.19.5.tgz", - "integrity": "sha512-JDGEH/l/DGpxG2k+mgqMKcuFURIs42eoTB4H4tN7QmzyW/z/MlNKiHHv7pWXyN+H5QvftK5yctsxUu77EZikmw==", + "version": "0.19.6", + "resolved": "https://registry.npmjs.org/@codemirror/highlight/-/highlight-0.19.6.tgz", + "integrity": "sha512-+eibu6on9quY8uN3xJ/n3rH+YIDLlpX7YulVmFvqAIz/ukRQ5tWaBmB7fMixHmnmRIRBRZgB8rNtonuMwZSAHQ==", "dependencies": { "@codemirror/language": "^0.19.0", "@codemirror/rangeset": "^0.19.0", @@ -634,9 +1340,9 @@ } }, "node_modules/@codemirror/language": { - "version": "0.19.3", - "resolved": "https://registry.npmjs.org/@codemirror/language/-/language-0.19.3.tgz", - "integrity": "sha512-6vjkRYHRJg/z9wdAk75nU2fQwCJBsh2HpkIjKXIHfzISSgLt5qSDxVhPd8Uu8PD5WMmFFP8tX7I9kdIt873o0A==", + "version": "0.19.4", + "resolved": "https://registry.npmjs.org/@codemirror/language/-/language-0.19.4.tgz", + "integrity": "sha512-yLnLDUkK00BlRVXpPkoJMYEssYKuRLOmK+DdJJ8zOOD4D62T7bSQ05NPyWzWr3PQX1k7sxGICGKR7INzfv9Snw==", "dependencies": { "@codemirror/state": "^0.19.0", "@codemirror/text": "^0.19.0", @@ -710,9 +1416,9 @@ } }, "node_modules/@codemirror/state": { - "version": "0.19.2", - "resolved": "https://registry.npmjs.org/@codemirror/state/-/state-0.19.2.tgz", - "integrity": "sha512-dDqCrtkb0c/LYUlvQBLyLfkISEskbZnhvBbcVOF4j2AusJ1ptJ3EGMxBL9G16GP1TOdC1T613gA1J1qc3pbfGQ==", + "version": "0.19.5", + "resolved": "https://registry.npmjs.org/@codemirror/state/-/state-0.19.5.tgz", + "integrity": "sha512-a3bJnkFuh4Z36nuOzAYobWViQ9eq5ux2wOb/46jUl+0Sj2BGrdz+pY1L+y2NUZhwPyWGcIrBtranr5P0rEEq8A==", "dependencies": { "@codemirror/text": "^0.19.0" } @@ -888,14 +1594,14 @@ } }, "node_modules/@lezer/common": { - "version": "0.15.5", - "resolved": "https://registry.npmjs.org/@lezer/common/-/common-0.15.5.tgz", - "integrity": "sha512-ufcjclusHXGdhp4gSPbPD7sUd38SgOej7m5tAEuG2tNPzqzV0d1vwwLh57R6IwW79ml2mb3tUjAoDfqI7v1HEw==" + "version": "0.15.8", + "resolved": "https://registry.npmjs.org/@lezer/common/-/common-0.15.8.tgz", + "integrity": "sha512-zpS/xty48huX4uBidupmWDYCRBYpVtoTiFhzYhd6GsQwU67WsdSImdWzZJDrF/DhcQ462wyrZahHlo2grFB5ig==" }, "node_modules/@lezer/generator": { - "version": "0.15.1", - "resolved": "https://registry.npmjs.org/@lezer/generator/-/generator-0.15.1.tgz", - "integrity": "sha512-OlG6ogwrTUeCsKVzPjXX5cFLT3XGESZY75Ust7DLMwmEgH1Awu/E4PGMFQZeTfI5lBWVo10reqXowiOhNKwOYQ==", + "version": "0.15.2", + "resolved": "https://registry.npmjs.org/@lezer/generator/-/generator-0.15.2.tgz", + "integrity": "sha512-nxY6TTj0ZAcAvg1zEeaZnt1xODdyPhD0lTaPOgcGOVFHhwwx0Oz7CxZB7Rh+xRCXFr5kJWDtM1uXPp80UZjhAg==", "dev": true, "dependencies": { "@lezer/common": "^0.15.0", @@ -948,6 +1654,24 @@ "node": ">= 8" } }, + "node_modules/@sinonjs/commons": { + "version": "1.8.3", + "resolved": "https://registry.npmjs.org/@sinonjs/commons/-/commons-1.8.3.tgz", + "integrity": "sha512-xkNcLAn/wZaX14RPlwizcKicDk9G3F8m2nU3L7Ukm5zBgTwiT0wsoFAHx9Jq56fJA1z/7uKGtCRu16sOUCLIHQ==", + "dev": true, + "dependencies": { + "type-detect": "4.0.8" + } + }, + "node_modules/@sinonjs/fake-timers": { + "version": "7.1.2", + "resolved": "https://registry.npmjs.org/@sinonjs/fake-timers/-/fake-timers-7.1.2.tgz", + "integrity": "sha512-iQADsW4LBMISqZ6Ci1dupJL9pprqwcVFTcOsEmQOEhW+KLCVn/Y4Jrvg2k19fIHCp+iFprriYPTdRcQR8NbUPg==", + "dev": true, + "dependencies": { + "@sinonjs/commons": "^1.7.0" + } + }, "node_modules/@tootallnate/once": { "version": "1.1.2", "resolved": "https://registry.npmjs.org/@tootallnate/once/-/once-1.1.2.tgz", @@ -958,9 +1682,9 @@ } }, "node_modules/@types/chai": { - "version": "4.2.21", - "resolved": "https://registry.npmjs.org/@types/chai/-/chai-4.2.21.tgz", - "integrity": "sha512-yd+9qKmJxm496BOV9CMNaey8TWsikaZOwMRwPHQIjcOJM9oV+fi9ZMNw3JsVnbEEbo2gRTDnGEBv8pjyn67hNg==", + "version": "4.2.22", + "resolved": "https://registry.npmjs.org/@types/chai/-/chai-4.2.22.tgz", + "integrity": "sha512-tFfcE+DSTzWAgifkjik9AySNqIyNoYwmR+uecPwwD/XRNfvOjmC/FjCxpiUGDkDVDphPfCUecSQVFw+lN3M3kQ==", "dev": true }, "node_modules/@types/flot": { @@ -972,10 +1696,16 @@ "@types/jquery": "*" } }, + "node_modules/@types/history": { + "version": "4.7.9", + "resolved": "https://registry.npmjs.org/@types/history/-/history-4.7.9.tgz", + "integrity": "sha512-MUc6zSmU3tEVnkQ78q0peeEjKWPUADMlC/t++2bI8WnAG2tvYRPIgHG8lWkXwqc8MsUF6Z2MOf+Mh5sazOmhiQ==", + "dev": true + }, "node_modules/@types/jquery": { - "version": "3.5.6", - "resolved": "https://registry.npmjs.org/@types/jquery/-/jquery-3.5.6.tgz", - "integrity": "sha512-SmgCQRzGPId4MZQKDj9Hqc6kSXFNWZFHpELkyK8AQhf8Zr6HKfCzFv9ZC1Fv3FyQttJZOlap3qYb12h61iZAIg==", + "version": "3.5.8", + "resolved": "https://registry.npmjs.org/@types/jquery/-/jquery-3.5.8.tgz", + "integrity": "sha512-cXk6NwqjDYg+UI9p2l3x0YmPa4m7RrXqmbK4IpVVpRJiYXU/QTo+UZrn54qfE1+9Gao4qpYqUnxm5ZCy2FTXAw==", "dev": true, "dependencies": { "@types/sizzle": "*" @@ -1000,17 +1730,103 @@ "dev": true }, "node_modules/@types/mocha": { - "version": "8.2.3", - "resolved": "https://registry.npmjs.org/@types/mocha/-/mocha-8.2.3.tgz", - "integrity": "sha512-ekGvFhFgrc2zYQoX4JeZPmVzZxw6Dtllga7iGHzfbYIYkAMUx/sAFP2GdFpLff+vdHXu5fl7WX9AT+TtqYcsyw==", + "version": "9.0.0", + "resolved": "https://registry.npmjs.org/@types/mocha/-/mocha-9.0.0.tgz", + "integrity": "sha512-scN0hAWyLVAvLR9AyW7HoFF5sJZglyBsbPuHO4fv7JRvfmPBMfp1ozWqOf/e4wwPNxezBZXRfWzMb6iFLgEVRA==", "dev": true }, "node_modules/@types/node": { - "version": "16.9.1", - "resolved": "https://registry.npmjs.org/@types/node/-/node-16.9.1.tgz", - "integrity": "sha512-QpLcX9ZSsq3YYUUnD3nFDY8H7wctAhQj/TFKL8Ya8v5fMm3CFXxo8zStsLAl780ltoYoo1WvKUVGBQK+1ifr7g==", + "version": "16.11.7", + "resolved": "https://registry.npmjs.org/@types/node/-/node-16.11.7.tgz", + "integrity": "sha512-QB5D2sqfSjCmTuWcBWyJ+/44bcjO7VbjSbOE0ucoVbAsSNQc4Lt6QkgkVXkTDwkL4z/beecZNDvVX15D4P8Jbw==", "dev": true }, + "node_modules/@types/prop-types": { + "version": "15.7.4", + "resolved": "https://registry.npmjs.org/@types/prop-types/-/prop-types-15.7.4.tgz", + "integrity": "sha512-rZ5drC/jWjrArrS8BR6SIr4cWpW09RNTYt9AMZo3Jwwif+iacXAqgVjm0B0Bv/S1jhDXKHqRVNCbACkJ89RAnQ==", + "dev": true + }, + "node_modules/@types/react": { + "version": "17.0.34", + "resolved": "https://registry.npmjs.org/@types/react/-/react-17.0.34.tgz", + "integrity": "sha512-46FEGrMjc2+8XhHXILr+3+/sTe3OfzSPU9YGKILLrUYbQ1CLQC9Daqo1KzENGXAWwrFwiY0l4ZbF20gRvgpWTg==", + "dev": true, + "dependencies": { + "@types/prop-types": "*", + "@types/scheduler": "*", + "csstype": "^3.0.2" + } + }, + "node_modules/@types/react-copy-to-clipboard": { + "version": "5.0.2", + "resolved": "https://registry.npmjs.org/@types/react-copy-to-clipboard/-/react-copy-to-clipboard-5.0.2.tgz", + "integrity": "sha512-O29AThfxrkUFRsZXjfSWR2yaWo0ppB1yLEnHA+Oh24oNetjBAwTDu1PmolIqdJKzsZiO4J1jn6R6TmO96uBvGg==", + "dev": true, + "dependencies": { + "@types/react": "*" + } + }, + "node_modules/@types/react-resize-detector": { + "version": "6.1.0", + "resolved": "https://registry.npmjs.org/@types/react-resize-detector/-/react-resize-detector-6.1.0.tgz", + "integrity": "sha512-runvF8/keQK3ipyjb7Ez2RKtaOZgrpqEN2PVCp93B/WavgFEeogFMnplMu4OuhpQHwpcu9UbqFiT2cPWoCWmWQ==", + "deprecated": "This is a stub types definition. react-resize-detector provides its own type definitions, so you do not need this installed.", + "dev": true, + "dependencies": { + "react-resize-detector": "*" + } + }, + "node_modules/@types/react-router": { + "version": "5.1.17", + "resolved": "https://registry.npmjs.org/@types/react-router/-/react-router-5.1.17.tgz", + "integrity": "sha512-RNSXOyb3VyRs/EOGmjBhhGKTbnN6fHWvy5FNLzWfOWOGjgVUKqJZXfpKzLmgoU8h6Hj8mpALj/mbXQASOb92wQ==", + "dev": true, + "dependencies": { + "@types/history": "*", + "@types/react": "*" + } + }, + "node_modules/@types/react-router-dom": { + "version": "5.3.2", + "resolved": "https://registry.npmjs.org/@types/react-router-dom/-/react-router-dom-5.3.2.tgz", + "integrity": "sha512-ELEYRUie2czuJzaZ5+ziIp9Hhw+juEw8b7C11YNA4QdLCVbQ3qLi2l4aq8XnlqM7V31LZX8dxUuFUCrzHm6sqQ==", + "dev": true, + "dependencies": { + "@types/history": "*", + "@types/react": "*", + "@types/react-router": "*" + } + }, + "node_modules/@types/resize-observer-browser": { + "version": "0.1.6", + "resolved": "https://registry.npmjs.org/@types/resize-observer-browser/-/resize-observer-browser-0.1.6.tgz", + "integrity": "sha512-61IfTac0s9jvNtBCpyo86QeaN8qqpMGHdK0uGKCCIy2dt5/Yk84VduHIdWAcmkC5QvdkPL0p5eWYgUZtHKKUVg==" + }, + "node_modules/@types/sanitize-html": { + "version": "2.5.0", + "resolved": "https://registry.npmjs.org/@types/sanitize-html/-/sanitize-html-2.5.0.tgz", + "integrity": "sha512-PeFIEZsO9m1+ACJlXUaimgrR+5DEDiIXhz7Hso307jmq5Yz0lb5kDp8LiTr5dMMMliC/jNNx/qds7Zoxa4zexw==", + "dev": true, + "dependencies": { + "htmlparser2": "^6.0.0" + } + }, + "node_modules/@types/scheduler": { + "version": "0.16.2", + "resolved": "https://registry.npmjs.org/@types/scheduler/-/scheduler-0.16.2.tgz", + "integrity": "sha512-hppQEBDmlwhFAXKJX2KnWLYu5yMfi91yazPb2l+lbJiwW+wdo1gNeRA+3RgNSO39WYX2euey41KEwnqesU2Jew==", + "dev": true + }, + "node_modules/@types/sinon": { + "version": "10.0.6", + "resolved": "https://registry.npmjs.org/@types/sinon/-/sinon-10.0.6.tgz", + "integrity": "sha512-6EF+wzMWvBNeGrfP3Nx60hhx+FfwSg1JJBLAAP/IdIUq0EYkqCYf70VT3PhuhPX9eLD+Dp+lNdpb/ZeHG8Yezg==", + "dev": true, + "dependencies": { + "@sinonjs/fake-timers": "^7.1.0" + } + }, "node_modules/@types/sizzle": { "version": "2.3.3", "resolved": "https://registry.npmjs.org/@types/sizzle/-/sizzle-2.3.3.tgz", @@ -1018,15 +1834,16 @@ "dev": true }, "node_modules/@typescript-eslint/eslint-plugin": { - "version": "4.31.1", - "resolved": "https://registry.npmjs.org/@typescript-eslint/eslint-plugin/-/eslint-plugin-4.31.1.tgz", - "integrity": "sha512-UDqhWmd5i0TvPLmbK5xY3UZB0zEGseF+DHPghZ37Sb83Qd3p8ujhvAtkU4OF46Ka5Pm5kWvFIx0cCTBFKo0alA==", + "version": "4.33.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/eslint-plugin/-/eslint-plugin-4.33.0.tgz", + "integrity": "sha512-aINiAxGVdOl1eJyVjaWn/YcVAq4Gi/Yo35qHGCnqbWVz61g39D0h23veY/MA0rFFGfxK7TySg2uwDeNv+JgVpg==", "dev": true, "dependencies": { - "@typescript-eslint/experimental-utils": "4.31.1", - "@typescript-eslint/scope-manager": "4.31.1", + "@typescript-eslint/experimental-utils": "4.33.0", + "@typescript-eslint/scope-manager": "4.33.0", "debug": "^4.3.1", "functional-red-black-tree": "^1.0.1", + "ignore": "^5.1.8", "regexpp": "^3.1.0", "semver": "^7.3.5", "tsutils": "^3.21.0" @@ -1048,16 +1865,72 @@ } } }, + "node_modules/@typescript-eslint/eslint-plugin/node_modules/@typescript-eslint/scope-manager": { + "version": "4.33.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/scope-manager/-/scope-manager-4.33.0.tgz", + "integrity": "sha512-5IfJHpgTsTZuONKbODctL4kKuQje/bzBRkwHE8UOZ4f89Zeddg+EGZs8PD8NcN4LdM3ygHWYB3ukPAYjvl/qbQ==", + "dev": true, + "dependencies": { + "@typescript-eslint/types": "4.33.0", + "@typescript-eslint/visitor-keys": "4.33.0" + }, + "engines": { + "node": "^8.10.0 || ^10.13.0 || >=11.10.1" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + } + }, + "node_modules/@typescript-eslint/eslint-plugin/node_modules/@typescript-eslint/types": { + "version": "4.33.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/types/-/types-4.33.0.tgz", + "integrity": "sha512-zKp7CjQzLQImXEpLt2BUw1tvOMPfNoTAfb8l51evhYbOEEzdWyQNmHWWGPR6hwKJDAi+1VXSBmnhL9kyVTTOuQ==", + "dev": true, + "engines": { + "node": "^8.10.0 || ^10.13.0 || >=11.10.1" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + } + }, + "node_modules/@typescript-eslint/eslint-plugin/node_modules/@typescript-eslint/visitor-keys": { + "version": "4.33.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/visitor-keys/-/visitor-keys-4.33.0.tgz", + "integrity": "sha512-uqi/2aSz9g2ftcHWf8uLPJA70rUv6yuMW5Bohw+bwcuzaxQIHaKFZCKGoGXIrc9vkTJ3+0txM73K0Hq3d5wgIg==", + "dev": true, + "dependencies": { + "@typescript-eslint/types": "4.33.0", + "eslint-visitor-keys": "^2.0.0" + }, + "engines": { + "node": "^8.10.0 || ^10.13.0 || >=11.10.1" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + } + }, + "node_modules/@typescript-eslint/eslint-plugin/node_modules/ignore": { + "version": "5.1.9", + "resolved": "https://registry.npmjs.org/ignore/-/ignore-5.1.9.tgz", + "integrity": "sha512-2zeMQpbKz5dhZ9IwL0gbxSW5w0NK/MSAMtNuhgIHEPmaU3vPdKPL0UdvUCXs5SS4JAwsBxysK5sFMW8ocFiVjQ==", + "dev": true, + "engines": { + "node": ">= 4" + } + }, "node_modules/@typescript-eslint/experimental-utils": { - "version": "4.31.1", - "resolved": "https://registry.npmjs.org/@typescript-eslint/experimental-utils/-/experimental-utils-4.31.1.tgz", - "integrity": "sha512-NtoPsqmcSsWty0mcL5nTZXMf7Ei0Xr2MT8jWjXMVgRK0/1qeQ2jZzLFUh4QtyJ4+/lPUyMw5cSfeeME+Zrtp9Q==", + "version": "4.33.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/experimental-utils/-/experimental-utils-4.33.0.tgz", + "integrity": "sha512-zeQjOoES5JFjTnAhI5QY7ZviczMzDptls15GFsI6jyUOq0kOf9+WonkhtlIhh0RgHRnqj5gdNxW5j1EvAyYg6Q==", "dev": true, "dependencies": { "@types/json-schema": "^7.0.7", - "@typescript-eslint/scope-manager": "4.31.1", - "@typescript-eslint/types": "4.31.1", - "@typescript-eslint/typescript-estree": "4.31.1", + "@typescript-eslint/scope-manager": "4.33.0", + "@typescript-eslint/types": "4.33.0", + "@typescript-eslint/typescript-estree": "4.33.0", "eslint-scope": "^5.1.1", "eslint-utils": "^3.0.0" }, @@ -1072,6 +1945,80 @@ "eslint": "*" } }, + "node_modules/@typescript-eslint/experimental-utils/node_modules/@typescript-eslint/scope-manager": { + "version": "4.33.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/scope-manager/-/scope-manager-4.33.0.tgz", + "integrity": "sha512-5IfJHpgTsTZuONKbODctL4kKuQje/bzBRkwHE8UOZ4f89Zeddg+EGZs8PD8NcN4LdM3ygHWYB3ukPAYjvl/qbQ==", + "dev": true, + "dependencies": { + "@typescript-eslint/types": "4.33.0", + "@typescript-eslint/visitor-keys": "4.33.0" + }, + "engines": { + "node": "^8.10.0 || ^10.13.0 || >=11.10.1" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + } + }, + "node_modules/@typescript-eslint/experimental-utils/node_modules/@typescript-eslint/types": { + "version": "4.33.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/types/-/types-4.33.0.tgz", + "integrity": "sha512-zKp7CjQzLQImXEpLt2BUw1tvOMPfNoTAfb8l51evhYbOEEzdWyQNmHWWGPR6hwKJDAi+1VXSBmnhL9kyVTTOuQ==", + "dev": true, + "engines": { + "node": "^8.10.0 || ^10.13.0 || >=11.10.1" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + } + }, + "node_modules/@typescript-eslint/experimental-utils/node_modules/@typescript-eslint/typescript-estree": { + "version": "4.33.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/typescript-estree/-/typescript-estree-4.33.0.tgz", + "integrity": "sha512-rkWRY1MPFzjwnEVHsxGemDzqqddw2QbTJlICPD9p9I9LfsO8fdmfQPOX3uKfUaGRDFJbfrtm/sXhVXN4E+bzCA==", + "dev": true, + "dependencies": { + "@typescript-eslint/types": "4.33.0", + "@typescript-eslint/visitor-keys": "4.33.0", + "debug": "^4.3.1", + "globby": "^11.0.3", + "is-glob": "^4.0.1", + "semver": "^7.3.5", + "tsutils": "^3.21.0" + }, + "engines": { + "node": "^10.12.0 || >=12.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependenciesMeta": { + "typescript": { + "optional": true + } + } + }, + "node_modules/@typescript-eslint/experimental-utils/node_modules/@typescript-eslint/visitor-keys": { + "version": "4.33.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/visitor-keys/-/visitor-keys-4.33.0.tgz", + "integrity": "sha512-uqi/2aSz9g2ftcHWf8uLPJA70rUv6yuMW5Bohw+bwcuzaxQIHaKFZCKGoGXIrc9vkTJ3+0txM73K0Hq3d5wgIg==", + "dev": true, + "dependencies": { + "@typescript-eslint/types": "4.33.0", + "eslint-visitor-keys": "^2.0.0" + }, + "engines": { + "node": "^8.10.0 || ^10.13.0 || >=11.10.1" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + } + }, "node_modules/@typescript-eslint/parser": { "version": "4.31.1", "resolved": "https://registry.npmjs.org/@typescript-eslint/parser/-/parser-4.31.1.tgz", @@ -1329,16 +2276,16 @@ } }, "node_modules/array-includes": { - "version": "3.1.3", - "resolved": "https://registry.npmjs.org/array-includes/-/array-includes-3.1.3.tgz", - "integrity": "sha512-gcem1KlBU7c9rB+Rq8/3PPKsK2kjqeEBa3bD5kkQo4nYlOHQCJqIJFqBXDEfwaRuYTT4E+FxA9xez7Gf/e3Q7A==", + "version": "3.1.4", + "resolved": "https://registry.npmjs.org/array-includes/-/array-includes-3.1.4.tgz", + "integrity": "sha512-ZTNSQkmWumEbiHO2GF4GmWxYVTiQyJy2XOTa15sdQSrvKn7l+180egQMqlrMOUMCyLMD7pmyQe4mMDUT6Behrw==", "dev": true, "dependencies": { "call-bind": "^1.0.2", "define-properties": "^1.1.3", - "es-abstract": "^1.18.0-next.2", + "es-abstract": "^1.19.1", "get-intrinsic": "^1.1.1", - "is-string": "^1.0.5" + "is-string": "^1.0.7" }, "engines": { "node": ">= 0.4" @@ -1357,14 +2304,14 @@ } }, "node_modules/array.prototype.flat": { - "version": "1.2.4", - "resolved": "https://registry.npmjs.org/array.prototype.flat/-/array.prototype.flat-1.2.4.tgz", - "integrity": "sha512-4470Xi3GAPAjZqFcljX2xzckv1qeKPizoNkiS0+O4IoPR2ZNpcjE0pkhdihlDouK+x6QOast26B4Q/O9DJnwSg==", + "version": "1.2.5", + "resolved": "https://registry.npmjs.org/array.prototype.flat/-/array.prototype.flat-1.2.5.tgz", + "integrity": "sha512-KaYU+S+ndVqyUnignHftkwc58o3uVU1jzczILJ1tN2YaIZpFIKBiP/x/j97E5MVPsaCloPbqWLB/8qCTVvT2qg==", "dev": true, "dependencies": { - "call-bind": "^1.0.0", + "call-bind": "^1.0.2", "define-properties": "^1.1.3", - "es-abstract": "^1.18.0-next.1" + "es-abstract": "^1.19.0" }, "engines": { "node": ">= 0.4" @@ -1581,6 +2528,7 @@ "resolved": "https://registry.npmjs.org/chokidar/-/chokidar-3.5.1.tgz", "integrity": "sha512-9+s+Od+W0VJJzawDma/gvBNQqkTiqYTWLuZoyAsivsI4AaWTCzHG06/TMjsf1cYe9Cb97UCEhjz7HvnPk2p/tw==", "dev": true, + "peer": true, "dependencies": { "anymatch": "~3.1.1", "braces": "~3.0.2", @@ -1715,6 +2663,12 @@ "node": ">= 8" } }, + "node_modules/csstype": { + "version": "3.0.10", + "resolved": "https://registry.npmjs.org/csstype/-/csstype-3.0.10.tgz", + "integrity": "sha512-2u44ZG2OcNUO9HDp/Jl8C07x6pU/eTR3ncV91SiK3dhG9TWvRVsCoJw14Ckx5DgWkzGA3waZWO3d7pgqpUI/XA==", + "dev": true + }, "node_modules/debug": { "version": "4.3.2", "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.2.tgz", @@ -1815,6 +2769,57 @@ "node": ">=6.0.0" } }, + "node_modules/dom-serializer": { + "version": "1.3.2", + "resolved": "https://registry.npmjs.org/dom-serializer/-/dom-serializer-1.3.2.tgz", + "integrity": "sha512-5c54Bk5Dw4qAxNOI1pFEizPSjVsx5+bpJKmL2kPn8JhBUq2q09tTCa3mjijun2NfK78NMouDYNMBkOrPZiS+ig==", + "dependencies": { + "domelementtype": "^2.0.1", + "domhandler": "^4.2.0", + "entities": "^2.0.0" + }, + "funding": { + "url": "https://github.com/cheeriojs/dom-serializer?sponsor=1" + } + }, + "node_modules/domelementtype": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/domelementtype/-/domelementtype-2.2.0.tgz", + "integrity": "sha512-DtBMo82pv1dFtUmHyr48beiuq792Sxohr+8Hm9zoxklYPfa6n0Z3Byjj2IV7bmr2IyqClnqEQhfgHJJ5QF0R5A==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/fb55" + } + ] + }, + "node_modules/domhandler": { + "version": "4.2.2", + "resolved": "https://registry.npmjs.org/domhandler/-/domhandler-4.2.2.tgz", + "integrity": "sha512-PzE9aBMsdZO8TK4BnuJwH0QT41wgMbRzuZrHUcpYncEjmQazq8QEaBWgLG7ZyC/DAZKEgglpIA6j4Qn/HmxS3w==", + "dependencies": { + "domelementtype": "^2.2.0" + }, + "engines": { + "node": ">= 4" + }, + "funding": { + "url": "https://github.com/fb55/domhandler?sponsor=1" + } + }, + "node_modules/domutils": { + "version": "2.8.0", + "resolved": "https://registry.npmjs.org/domutils/-/domutils-2.8.0.tgz", + "integrity": "sha512-w96Cjofp72M5IIhpjgobBimYEfoPjx1Vx0BSX9P30WBdZW2WIKU0T1Bd0kz2eNZ9ikjKgHbEyKx8BB6H1L3h3A==", + "dependencies": { + "dom-serializer": "^1.0.1", + "domelementtype": "^2.2.0", + "domhandler": "^4.2.0" + }, + "funding": { + "url": "https://github.com/fb55/domutils?sponsor=1" + } + }, "node_modules/electron-to-chromium": { "version": "1.3.838", "resolved": "https://registry.npmjs.org/electron-to-chromium/-/electron-to-chromium-1.3.838.tgz", @@ -1862,6 +2867,14 @@ "node": ">=8.6" } }, + "node_modules/entities": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/entities/-/entities-2.2.0.tgz", + "integrity": "sha512-p92if5Nz619I0w+akJrLZH0MX0Pb5DX39XOwQTtXSdQQOaYH03S1uIQp4mhOZtAXrxq4ViO67YTiLBo2638o9A==", + "funding": { + "url": "https://github.com/fb55/entities?sponsor=1" + } + }, "node_modules/errno": { "version": "0.1.8", "resolved": "https://registry.npmjs.org/errno/-/errno-0.1.8.tgz", @@ -1884,9 +2897,9 @@ } }, "node_modules/es-abstract": { - "version": "1.18.6", - "resolved": "https://registry.npmjs.org/es-abstract/-/es-abstract-1.18.6.tgz", - "integrity": "sha512-kAeIT4cku5eNLNuUKhlmtuk1/TRZvQoYccn6TO0cSVdf1kzB0T7+dYuVK9MWM7l+/53W2Q8M7N2c6MQvhXFcUQ==", + "version": "1.19.1", + "resolved": "https://registry.npmjs.org/es-abstract/-/es-abstract-1.19.1.tgz", + "integrity": "sha512-2vJ6tjA/UfqLm2MPs7jxVybLoB8i1t1Jd9R3kISld20sIxPcTbLuggQOUxeWeAvIUkduv/CfMjuh4WmiXr2v9w==", "dev": true, "dependencies": { "call-bind": "^1.0.2", @@ -1900,7 +2913,9 @@ "is-callable": "^1.2.4", "is-negative-zero": "^2.0.1", "is-regex": "^1.1.4", + "is-shared-array-buffer": "^1.0.1", "is-string": "^1.0.7", + "is-weakref": "^1.0.1", "object-inspect": "^1.11.0", "object-keys": "^1.1.1", "object.assign": "^4.1.2", @@ -2048,12 +3063,13 @@ } }, "node_modules/eslint-module-utils": { - "version": "2.6.2", - "resolved": "https://registry.npmjs.org/eslint-module-utils/-/eslint-module-utils-2.6.2.tgz", - "integrity": "sha512-QG8pcgThYOuqxupd06oYTZoNOGaUdTY1PqK+oS6ElF6vs4pBdk/aYxFVQQXzcrAqp9m7cl7lb2ubazX+g16k2Q==", + "version": "2.7.1", + "resolved": "https://registry.npmjs.org/eslint-module-utils/-/eslint-module-utils-2.7.1.tgz", + "integrity": "sha512-fjoetBXQZq2tSTWZ9yWVl2KuFrTZZH3V+9iD1V1RfpDgxzJR+mPd/KZmMiA8gbPqdBzpNiEHOuT7IYEWxrH0zQ==", "dev": true, "dependencies": { "debug": "^3.2.7", + "find-up": "^2.1.0", "pkg-dir": "^2.0.0" }, "engines": { @@ -2086,24 +3102,22 @@ } }, "node_modules/eslint-plugin-import": { - "version": "2.24.2", - "resolved": "https://registry.npmjs.org/eslint-plugin-import/-/eslint-plugin-import-2.24.2.tgz", - "integrity": "sha512-hNVtyhiEtZmpsabL4neEj+6M5DCLgpYyG9nzJY8lZQeQXEn5UPW1DpUdsMHMXsq98dbNm7nt1w9ZMSVpfJdi8Q==", + "version": "2.25.3", + "resolved": "https://registry.npmjs.org/eslint-plugin-import/-/eslint-plugin-import-2.25.3.tgz", + "integrity": "sha512-RzAVbby+72IB3iOEL8clzPLzL3wpDrlwjsTBAQXgyp5SeTqqY+0bFubwuo+y/HLhNZcXV4XqTBO4LGsfyHIDXg==", "dev": true, "dependencies": { - "array-includes": "^3.1.3", - "array.prototype.flat": "^1.2.4", + "array-includes": "^3.1.4", + "array.prototype.flat": "^1.2.5", "debug": "^2.6.9", "doctrine": "^2.1.0", "eslint-import-resolver-node": "^0.3.6", - "eslint-module-utils": "^2.6.2", - "find-up": "^2.0.0", + "eslint-module-utils": "^2.7.1", "has": "^1.0.3", - "is-core-module": "^2.6.0", + "is-core-module": "^2.8.0", + "is-glob": "^4.0.3", "minimatch": "^3.0.4", - "object.values": "^1.1.4", - "pkg-up": "^2.0.0", - "read-pkg-up": "^3.0.0", + "object.values": "^1.1.5", "resolve": "^1.20.0", "tsconfig-paths": "^3.11.0" }, @@ -2111,7 +3125,7 @@ "node": ">=4" }, "peerDependencies": { - "eslint": "^2 || ^3 || ^4 || ^5 || ^6 || ^7.2.0" + "eslint": "^2 || ^3 || ^4 || ^5 || ^6 || ^7.2.0 || ^8" } }, "node_modules/eslint-plugin-import/node_modules/debug": { @@ -2870,6 +3884,24 @@ "integrity": "sha512-H2iMtd0I4Mt5eYiapRdIDjp+XzelXQ0tFE4JS7YFwFevXXMmOp9myNrUvCg0D6ws8iqkRPBfKHgbwig1SmlLfg==", "dev": true }, + "node_modules/htmlparser2": { + "version": "6.1.0", + "resolved": "https://registry.npmjs.org/htmlparser2/-/htmlparser2-6.1.0.tgz", + "integrity": "sha512-gyyPk6rgonLFEDGoeRgQNaEUvdJ4ktTmmUh/h2t7s+M8oPpIPxgNACWa+6ESR57kXstwqPiCut0V8NRpcwgU7A==", + "funding": [ + "https://github.com/fb55/htmlparser2?sponsor=1", + { + "type": "github", + "url": "https://github.com/sponsors/fb55" + } + ], + "dependencies": { + "domelementtype": "^2.0.1", + "domhandler": "^4.0.0", + "domutils": "^2.5.2", + "entities": "^2.0.0" + } + }, "node_modules/http-proxy-agent": { "version": "4.0.1", "resolved": "https://registry.npmjs.org/http-proxy-agent/-/http-proxy-agent-4.0.1.tgz", @@ -2897,6 +3929,14 @@ "node": ">= 6" } }, + "node_modules/i": { + "version": "0.3.7", + "resolved": "https://registry.npmjs.org/i/-/i-0.3.7.tgz", + "integrity": "sha512-FYz4wlXgkQwIPqhzC5TdNMLSE5+GS1IIDJZY/1ZiEPCT2S3COUVZeT5OW4BmW4r5LHLQuOosSwsvnroG9GR59Q==", + "engines": { + "node": ">=0.4" + } + }, "node_modules/ignore": { "version": "4.0.6", "resolved": "https://registry.npmjs.org/ignore/-/ignore-4.0.6.tgz", @@ -3037,9 +4077,9 @@ } }, "node_modules/is-core-module": { - "version": "2.6.0", - "resolved": "https://registry.npmjs.org/is-core-module/-/is-core-module-2.6.0.tgz", - "integrity": "sha512-wShG8vs60jKfPWpF2KZRaAtvt3a20OAn7+IJ6hLPECpSABLcKtFKTTI4ZtH5QcBruBHlq+WsdHWyz0BCZW7svQ==", + "version": "2.8.0", + "resolved": "https://registry.npmjs.org/is-core-module/-/is-core-module-2.8.0.tgz", + "integrity": "sha512-vd15qHsaqrRL7dtH6QNuy0ndJmRDrS9HAM1CAiSifNUFv4x1a0CCVsj18hJ1mShxIG6T2i1sO78MkP56r0nYRw==", "dev": true, "dependencies": { "has": "^1.0.3" @@ -3080,9 +4120,9 @@ } }, "node_modules/is-glob": { - "version": "4.0.1", - "resolved": "https://registry.npmjs.org/is-glob/-/is-glob-4.0.1.tgz", - "integrity": "sha512-5G0tKtBTFImOqDnLB2hG6Bp2qcKEFduo4tZu9MT/H6NQv/ghhy30o55ufafxJ/LdH79LLs2Kfrn85TLKyA7BUg==", + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/is-glob/-/is-glob-4.0.3.tgz", + "integrity": "sha512-xelSayHH36ZgE7ZWhli7pW34hNbNl8Ojv5KVmkJD4hBdD3th8Tfk9vYasLM+mXWOZhFkgZfxhLSnrwRr4elSSg==", "dependencies": { "is-extglob": "^2.1.1" }, @@ -3149,6 +4189,15 @@ "url": "https://github.com/sponsors/ljharb" } }, + "node_modules/is-shared-array-buffer": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/is-shared-array-buffer/-/is-shared-array-buffer-1.0.1.tgz", + "integrity": "sha512-IU0NmyknYZN0rChcKhRO1X8LYz5Isj/Fsqh8NJOSf+N/hCOTwy29F32Ik7a+QszE63IdvmwdTPDd6cZ5pg4cwA==", + "dev": true, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, "node_modules/is-stream": { "version": "2.0.1", "resolved": "https://registry.npmjs.org/is-stream/-/is-stream-2.0.1.tgz", @@ -3197,6 +4246,30 @@ "integrity": "sha1-5HnICFjfDBsR3dppQPlgEfzaSpo=", "dev": true }, + "node_modules/is-unicode-supported": { + "version": "0.1.0", + "resolved": "https://registry.npmjs.org/is-unicode-supported/-/is-unicode-supported-0.1.0.tgz", + "integrity": "sha512-knxG2q4UC3u8stRGyAVJCOdxFmv5DZiRcdlIaAQXAbSfJya+OhopNotLQrstBhququ4ZpuKbDc/8S6mgXgPFPw==", + "dev": true, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/is-weakref": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/is-weakref/-/is-weakref-1.0.1.tgz", + "integrity": "sha512-b2jKc2pQZjaeFYWEf7ScFj+Be1I+PXmlu572Q8coTXZ+LD/QQZ7ShPMst8h16riVgyXTQwUsFEl74mDvc/3MHQ==", + "dev": true, + "dependencies": { + "call-bind": "^1.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, "node_modules/is-windows": { "version": "1.0.2", "resolved": "https://registry.npmjs.org/is-windows/-/is-windows-1.0.2.tgz", @@ -3423,30 +4496,6 @@ "node": ">= 0.8.0" } }, - "node_modules/load-json-file": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/load-json-file/-/load-json-file-4.0.0.tgz", - "integrity": "sha1-L19Fq5HjMhYjT9U62rZo607AmTs=", - "dev": true, - "dependencies": { - "graceful-fs": "^4.1.2", - "parse-json": "^4.0.0", - "pify": "^3.0.0", - "strip-bom": "^3.0.0" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/load-json-file/node_modules/strip-bom": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/strip-bom/-/strip-bom-3.0.0.tgz", - "integrity": "sha1-IzTBjpx1n3vdVv3vfprj1YjmjtM=", - "dev": true, - "engines": { - "node": ">=4" - } - }, "node_modules/loader-utils": { "version": "1.4.0", "resolved": "https://registry.npmjs.org/loader-utils/-/loader-utils-1.4.0.tgz", @@ -3498,6 +4547,11 @@ "integrity": "sha1-4j8/nE+Pvd6HJSnBBxhXoIblzO8=", "dev": true }, + "node_modules/lodash.debounce": { + "version": "4.0.8", + "resolved": "https://registry.npmjs.org/lodash.debounce/-/lodash.debounce-4.0.8.tgz", + "integrity": "sha1-gteb/zCmfEAF/9XiUVMArZyk168=" + }, "node_modules/lodash.flattendeep": { "version": "4.4.0", "resolved": "https://registry.npmjs.org/lodash.flattendeep/-/lodash.flattendeep-4.4.0.tgz", @@ -3516,6 +4570,11 @@ "integrity": "sha1-2HV7HagH3eJIFrDWqEvqGnYjCyM=", "dev": true }, + "node_modules/lodash.throttle": { + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/lodash.throttle/-/lodash.throttle-4.1.1.tgz", + "integrity": "sha1-wj6RtxAkKscMN/HhzaknTMOb8vQ=" + }, "node_modules/lodash.truncate": { "version": "4.4.2", "resolved": "https://registry.npmjs.org/lodash.truncate/-/lodash.truncate-4.4.2.tgz", @@ -3527,6 +4586,7 @@ "resolved": "https://registry.npmjs.org/log-symbols/-/log-symbols-4.0.0.tgz", "integrity": "sha512-FN8JBzLx6CzeMrB0tg6pqlGU1wCrXW+ZXGH481kfsBqer0hToTIiHdjH4Mq8xJUbvATujKCvaREGWpGUionraA==", "dev": true, + "peer": true, "dependencies": { "chalk": "^4.0.0" }, @@ -3534,6 +4594,17 @@ "node": ">=10" } }, + "node_modules/loose-envify": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/loose-envify/-/loose-envify-1.4.0.tgz", + "integrity": "sha512-lyuxPGr/Wfhrlem2CL/UcnUc1zcqKAImBDzukY7Y5F/yQiNdko6+fRLevlw1HgMySw7f611UIY408EtxRSoK3Q==", + "dependencies": { + "js-tokens": "^3.0.0 || ^4.0.0" + }, + "bin": { + "loose-envify": "cli.js" + } + }, "node_modules/lru-cache": { "version": "6.0.0", "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-6.0.0.tgz", @@ -3645,6 +4716,7 @@ "resolved": "https://registry.npmjs.org/mocha/-/mocha-8.4.0.tgz", "integrity": "sha512-hJaO0mwDXmZS4ghXsvPVriOhsxQ7ofcpQdm8dE+jISUOKopitvnXFQmpRR7jd2K6VBG6E26gU3IAbXXGIbu4sQ==", "dev": true, + "peer": true, "dependencies": { "@ungap/promise-all-settled": "1.1.2", "ansi-colors": "4.1.1", @@ -3688,13 +4760,15 @@ "version": "2.0.1", "resolved": "https://registry.npmjs.org/argparse/-/argparse-2.0.1.tgz", "integrity": "sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q==", - "dev": true + "dev": true, + "peer": true }, "node_modules/mocha/node_modules/debug": { "version": "4.3.1", "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.1.tgz", "integrity": "sha512-doEwdvm4PCeK4K3RQN2ZC2BYUBaxwLARCqZmMjtF8a51J2Rb0xpVloFRnCODwqjpwnAoao4pelN8l3RJdv3gRQ==", "dev": true, + "peer": true, "dependencies": { "ms": "2.1.2" }, @@ -3711,13 +4785,15 @@ "version": "2.1.2", "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz", "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==", - "dev": true + "dev": true, + "peer": true }, "node_modules/mocha/node_modules/find-up": { "version": "5.0.0", "resolved": "https://registry.npmjs.org/find-up/-/find-up-5.0.0.tgz", "integrity": "sha512-78/PXT1wlLLDgTzDs7sjq9hzz0vXD+zn+7wypEe4fXQxCmdmqfGsEPQxmiCSQI3ajFV91bVSsvNtrJRiW6nGng==", "dev": true, + "peer": true, "dependencies": { "locate-path": "^6.0.0", "path-exists": "^4.0.0" @@ -3734,6 +4810,7 @@ "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-4.0.0.tgz", "integrity": "sha512-pqon0s+4ScYUvX30wxQi3PogGFAlUyH0awepWvwkj4jD4v+ova3RiYw8bmA6x2rDrEaj8i/oWKoRxpVNW+Re8Q==", "dev": true, + "peer": true, "dependencies": { "argparse": "^2.0.1" }, @@ -3746,6 +4823,7 @@ "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-6.0.0.tgz", "integrity": "sha512-iPZK6eYjbxRu3uB4/WZ3EsEIMJFMqAoopl3R+zuq0UjcAm/MO6KCweDgPfP3elTztoKP3KtnVHxTn2NHBSDVUw==", "dev": true, + "peer": true, "dependencies": { "p-locate": "^5.0.0" }, @@ -3760,13 +4838,15 @@ "version": "2.1.3", "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz", "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==", - "dev": true + "dev": true, + "peer": true }, "node_modules/mocha/node_modules/p-limit": { "version": "3.1.0", "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-3.1.0.tgz", "integrity": "sha512-TYOanM3wGwNGsZN2cVTYPArw454xnXj5qmWF1bEoAc4+cU/ol7GVh7odevjp1FNHduHc3KZMcFduxU5Xc6uJRQ==", "dev": true, + "peer": true, "dependencies": { "yocto-queue": "^0.1.0" }, @@ -3782,6 +4862,7 @@ "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-5.0.0.tgz", "integrity": "sha512-LaNjtRWUBY++zB5nE/NwcaoMylSPk+S+ZHNB1TzdbMJMny6dynpAGt7X/tl/QYq3TIeE6nxHppbo2LGymrG5Pw==", "dev": true, + "peer": true, "dependencies": { "p-limit": "^3.0.2" }, @@ -3797,6 +4878,7 @@ "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-4.0.0.tgz", "integrity": "sha512-ak9Qy5Q7jYb2Wwcey5Fpvg2KoAc/ZIhLSLOSBmRmygPsGwkVVt0fZa0qrtMz+m6tJTAHfZQ8FnmB4MG4LWy7/w==", "dev": true, + "peer": true, "engines": { "node": ">=8" } @@ -3806,6 +4888,7 @@ "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-8.1.1.tgz", "integrity": "sha512-MpUEN2OodtUzxvKQl72cUF7RQ5EiHsGvSsVG0ia9c5RbWGL2CI4C7EpPS8UTBIplnlzZiNuV56w+FuNxy3ty2Q==", "dev": true, + "peer": true, "dependencies": { "has-flag": "^4.0.0" }, @@ -3827,6 +4910,7 @@ "resolved": "https://registry.npmjs.org/nanoid/-/nanoid-3.1.20.tgz", "integrity": "sha512-a1cQNyczgKbLX9jwbS/+d7W8fX/RfgYR7lVWwWOGIPNgK2m0MWvrGF6/m4kk6U3QcFMnZf3RIhL0v2Jgh/0Uxw==", "dev": true, + "peer": true, "bin": { "nanoid": "bin/nanoid.cjs" }, @@ -4097,6 +5181,14 @@ "node": ">=6" } }, + "node_modules/object-assign": { + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/object-assign/-/object-assign-4.1.1.tgz", + "integrity": "sha1-IQmtx5ZYh8/AXLvUQsrIv7s2CGM=", + "engines": { + "node": ">=0.10.0" + } + }, "node_modules/object-inspect": { "version": "1.11.0", "resolved": "https://registry.npmjs.org/object-inspect/-/object-inspect-1.11.0.tgz", @@ -4133,14 +5225,14 @@ } }, "node_modules/object.values": { - "version": "1.1.4", - "resolved": "https://registry.npmjs.org/object.values/-/object.values-1.1.4.tgz", - "integrity": "sha512-TnGo7j4XSnKQoK3MfvkzqKCi0nVe/D9I9IjwTNYdb/fxYHpjrluHVOgw0AF6jrRFGMPHdfuidR09tIDiIvnaSg==", + "version": "1.1.5", + "resolved": "https://registry.npmjs.org/object.values/-/object.values-1.1.5.tgz", + "integrity": "sha512-QUZRW0ilQ3PnPpbNtgdNV1PDbEqLIiSFB3l+EnGtBQ/8SUTLj1PZwtQHABZtLgwpJZTSZhuGLOGk57Drx2IvYg==", "dev": true, "dependencies": { "call-bind": "^1.0.2", "define-properties": "^1.1.3", - "es-abstract": "^1.18.2" + "es-abstract": "^1.19.1" }, "engines": { "node": ">= 0.4" @@ -4247,19 +5339,6 @@ "node": ">=6" } }, - "node_modules/parse-json": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/parse-json/-/parse-json-4.0.0.tgz", - "integrity": "sha1-vjX1Qlvh9/bHRxhPmKeIy5lHfuA=", - "dev": true, - "dependencies": { - "error-ex": "^1.3.1", - "json-parse-better-errors": "^1.0.1" - }, - "engines": { - "node": ">=4" - } - }, "node_modules/path-exists": { "version": "3.0.0", "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-3.0.0.tgz", @@ -4322,15 +5401,6 @@ "url": "https://github.com/sponsors/jonschlinkert" } }, - "node_modules/pify": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/pify/-/pify-3.0.0.tgz", - "integrity": "sha1-5aSs0sEB/fPZpNB/DbxNtJ3SgXY=", - "dev": true, - "engines": { - "node": ">=4" - } - }, "node_modules/pkg-dir": { "version": "2.0.0", "resolved": "https://registry.npmjs.org/pkg-dir/-/pkg-dir-2.0.0.tgz", @@ -4343,18 +5413,6 @@ "node": ">=4" } }, - "node_modules/pkg-up": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/pkg-up/-/pkg-up-2.0.0.tgz", - "integrity": "sha1-yBmscoBZpGHKscOImivjxJoATX8=", - "dev": true, - "dependencies": { - "find-up": "^2.1.0" - }, - "engines": { - "node": ">=4" - } - }, "node_modules/prelude-ls": { "version": "1.2.1", "resolved": "https://registry.npmjs.org/prelude-ls/-/prelude-ls-1.2.1.tgz", @@ -4365,9 +5423,9 @@ } }, "node_modules/prettier": { - "version": "2.4.0", - "resolved": "https://registry.npmjs.org/prettier/-/prettier-2.4.0.tgz", - "integrity": "sha512-DsEPLY1dE5HF3BxCRBmD4uYZ+5DCbvatnolqTqcxEgKVZnL2kUfyu7b8pPQ5+hTBkdhU9SLUmK0/pHb07RE4WQ==", + "version": "2.4.1", + "resolved": "https://registry.npmjs.org/prettier/-/prettier-2.4.1.tgz", + "integrity": "sha512-9fbDAXSBcc6Bs1mZrDYb3XKzDLm4EXXL9sC1LqKP5rZkT6KRr/rf9amVUcODVXgguK/isJz0d0hP72WeaKWsvA==", "dev": true, "bin": { "prettier": "bin-prettier.js" @@ -4468,43 +5526,44 @@ "safe-buffer": "^5.1.0" } }, - "node_modules/read-pkg": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/read-pkg/-/read-pkg-3.0.0.tgz", - "integrity": "sha1-nLxoaXj+5l0WwA4rGcI3/Pbjg4k=", - "dev": true, + "node_modules/react": { + "version": "17.0.2", + "resolved": "https://registry.npmjs.org/react/-/react-17.0.2.tgz", + "integrity": "sha512-gnhPt75i/dq/z3/6q/0asP78D0u592D5L1pd7M8P+dck6Fu/jJeL6iVVK23fptSUZj8Vjf++7wXA8UNclGQcbA==", "dependencies": { - "load-json-file": "^4.0.0", - "normalize-package-data": "^2.3.2", - "path-type": "^3.0.0" + "loose-envify": "^1.1.0", + "object-assign": "^4.1.1" }, "engines": { - "node": ">=4" + "node": ">=0.10.0" } }, - "node_modules/read-pkg-up": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/read-pkg-up/-/read-pkg-up-3.0.0.tgz", - "integrity": "sha1-PtSWaF26D4/hGNBpHcUfSh/5bwc=", - "dev": true, + "node_modules/react-dom": { + "version": "17.0.2", + "resolved": "https://registry.npmjs.org/react-dom/-/react-dom-17.0.2.tgz", + "integrity": "sha512-s4h96KtLDUQlsENhMn1ar8t2bEa+q/YAtj8pPPdIjPDGBDIVNsrD9aXNWqspUe6AzKCIG0C1HZZLqLV7qpOBGA==", "dependencies": { - "find-up": "^2.0.0", - "read-pkg": "^3.0.0" + "loose-envify": "^1.1.0", + "object-assign": "^4.1.1", + "scheduler": "^0.20.2" }, - "engines": { - "node": ">=4" + "peerDependencies": { + "react": "17.0.2" } }, - "node_modules/read-pkg/node_modules/path-type": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/path-type/-/path-type-3.0.0.tgz", - "integrity": "sha512-T2ZUsdZFHgA3u4e5PfPbjd7HDDpxPnQb5jN0SrDsjNSuVXHJqtwTnWqG0B1jZrgmJ/7lj1EmVIByWt1gxGkWvg==", - "dev": true, + "node_modules/react-resize-detector": { + "version": "6.7.6", + "resolved": "https://registry.npmjs.org/react-resize-detector/-/react-resize-detector-6.7.6.tgz", + "integrity": "sha512-/6RZlul1yePSoYJxWxmmgjO320moeLC/khrwpEVIL+D2EjLKhqOwzFv+H8laMbImVj7Zu4FlMa0oA7au3/ChjQ==", "dependencies": { - "pify": "^3.0.0" + "@types/resize-observer-browser": "^0.1.6", + "lodash.debounce": "^4.0.8", + "lodash.throttle": "^4.1.1", + "resize-observer-polyfill": "^1.5.1" }, - "engines": { - "node": ">=4" + "peerDependencies": { + "react": "^16.0.0 || ^17.0.0", + "react-dom": "^16.0.0 || ^17.0.0" } }, "node_modules/readable-stream": { @@ -4527,6 +5586,7 @@ "resolved": "https://registry.npmjs.org/readdirp/-/readdirp-3.5.0.tgz", "integrity": "sha512-cMhu7c/8rdhkHXWsY+osBhfSy0JikwpHK/5+imo+LpeasTF8ouErHrlYkwT0++njiyuDvc7OFY5T3ukvZ8qmFQ==", "dev": true, + "peer": true, "dependencies": { "picomatch": "^2.2.1" }, @@ -4582,6 +5642,11 @@ "integrity": "sha512-NKN5kMDylKuldxYLSUfrbo5Tuzh4hd+2E8NPPX02mZtn1VuREQToYe/ZdlJy+J3uCpfaiGF05e7B8W0iXbQHmg==", "dev": true }, + "node_modules/resize-observer-polyfill": { + "version": "1.5.1", + "resolved": "https://registry.npmjs.org/resize-observer-polyfill/-/resize-observer-polyfill-1.5.1.tgz", + "integrity": "sha512-LwZrotdHOo12nQuZlHEmtuXdqGoOD0OhaxopaNFxWzInpEgaLWoVuAMbTzixuosCx2nEG58ngzW3vxdWoxIgdg==" + }, "node_modules/resolve": { "version": "1.20.0", "resolved": "https://registry.npmjs.org/resolve/-/resolve-1.20.0.tgz", @@ -4658,6 +5723,15 @@ "integrity": "sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g==", "dev": true }, + "node_modules/scheduler": { + "version": "0.20.2", + "resolved": "https://registry.npmjs.org/scheduler/-/scheduler-0.20.2.tgz", + "integrity": "sha512-2eWfGgAqqWFGqtdMmcL5zCMK1U8KlXv8SQFGglL3CEtd0aDVDWgeF/YoCmvln55m5zSk3J/20hTaSBeSObsQDQ==", + "dependencies": { + "loose-envify": "^1.1.0", + "object-assign": "^4.1.1" + } + }, "node_modules/semver": { "version": "7.3.5", "resolved": "https://registry.npmjs.org/semver/-/semver-7.3.5.tgz", @@ -5493,6 +6567,7 @@ "resolved": "https://registry.npmjs.org/wide-align/-/wide-align-1.1.3.tgz", "integrity": "sha512-QGkOQc8XL6Bt5PwnsExKBPuMKBxnGxWWW3fU55Xt4feHozMUhdUMaBCk290qpm/wG5u/RSKzwdAC4i51YigihA==", "dev": true, + "peer": true, "dependencies": { "string-width": "^1.0.2 || 2" } @@ -5502,6 +6577,7 @@ "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-3.0.0.tgz", "integrity": "sha1-7QMXwyIGT3lGbAKWa922Bas32Zg=", "dev": true, + "peer": true, "engines": { "node": ">=4" } @@ -5511,6 +6587,7 @@ "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-2.0.0.tgz", "integrity": "sha1-o7MKXE8ZkYMWeqq5O+764937ZU8=", "dev": true, + "peer": true, "engines": { "node": ">=4" } @@ -5520,6 +6597,7 @@ "resolved": "https://registry.npmjs.org/string-width/-/string-width-2.1.1.tgz", "integrity": "sha512-nOqH59deCq9SRHlxq1Aw85Jnt4w6KvLKqWVik6oA9ZklXLNIOlqg4F2yrT1MVaTjAqvVwdfeZ7w7aCvJD7ugkw==", "dev": true, + "peer": true, "dependencies": { "is-fullwidth-code-point": "^2.0.0", "strip-ansi": "^4.0.0" @@ -5533,6 +6611,7 @@ "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-4.0.0.tgz", "integrity": "sha1-qEeQIusaw2iocTibY1JixQXuNo8=", "dev": true, + "peer": true, "dependencies": { "ansi-regex": "^3.0.0" }, @@ -5553,7 +6632,8 @@ "version": "6.1.0", "resolved": "https://registry.npmjs.org/workerpool/-/workerpool-6.1.0.tgz", "integrity": "sha512-toV7q9rWNYha963Pl/qyeZ6wG+3nnsyvolaNUS8+R5Wtw6qJPTxIlOP1ZSvcGhEJw+l3HMMmtiNo9Gl61G4GVg==", - "dev": true + "dev": true, + "peer": true }, "node_modules/wrap-ansi": { "version": "7.0.0", @@ -5699,24 +6779,24 @@ "@codemirror/closebrackets": "^0.19.0", "@codemirror/commands": "^0.19.4", "@codemirror/comment": "^0.19.0", - "@codemirror/highlight": "^0.19.5", + "@codemirror/highlight": "^0.19.6", "@codemirror/history": "^0.19.0", - "@codemirror/language": "^0.19.3", + "@codemirror/language": "^0.19.4", "@codemirror/lint": "^0.19.1", "@codemirror/matchbrackets": "^0.19.1", "@codemirror/search": "^0.19.2", - "@codemirror/state": "^0.19.2", + "@codemirror/state": "^0.19.5", "@codemirror/view": "^0.19.7", "@forevolve/bootstrap-dark": "^1.0.0", "@fortawesome/fontawesome-svg-core": "^1.2.14", "@fortawesome/free-solid-svg-icons": "^5.7.1", "@fortawesome/react-fontawesome": "^0.1.4", "@nexucis/fuzzy": "^0.3.0", - "bootstrap": "^4.6.0", + "bootstrap": "^5.1.3", "codemirror-promql": "0.18.0", "css.escape": "^1.5.1", "downshift": "^3.4.8", - "i": "^0.3.6", + "i": "^0.3.7", "jquery": "^3.5.1", "jquery.flot.tooltip": "^0.9.0", "moment": "^2.24.0", @@ -5739,15 +6819,15 @@ "@types/enzyme": "^3.10.9", "@types/flot": "0.0.32", "@types/jest": "^27.0.1", - "@types/jquery": "^3.5.1", - "@types/node": "^16.7.6", + "@types/jquery": "^3.5.8", + "@types/node": "^16.11.7", "@types/react": "^17.0.19", - "@types/react-copy-to-clipboard": "^5.0.1", + "@types/react-copy-to-clipboard": "^5.0.2", "@types/react-dom": "^17.0.9", - "@types/react-resize-detector": "^5.0.0", - "@types/react-router-dom": "^5.1.8", - "@types/sanitize-html": "^1.20.2", - "@types/sinon": "^10.0.2", + "@types/react-resize-detector": "^6.1.0", + "@types/react-router-dom": "^5.3.2", + "@types/sanitize-html": "^2.5.0", + "@types/sinon": "^10.0.6", "@wojtekmaj/enzyme-adapter-react-17": "^0.6.3", "enzyme": "^3.11.0", "enzyme-to-json": "^3.6.2", @@ -5757,7 +6837,7 @@ "jest-canvas-mock": "^2.3.1", "jest-fetch-mock": "^3.0.3", "mutationobserver-shim": "^0.3.7", - "prettier": "^2.3.2", + "prettier": "^2.4.1", "react-scripts": "4.0.3", "sinon": "^11.1.2", "typescript": "^4.4.2" @@ -5777,17 +6857,6 @@ "node": ">=6.9.0" } }, - "react-app/node_modules/@babel/helper-annotate-as-pure": { - "version": "7.15.4", - "dev": true, - "license": "MIT", - "dependencies": { - "@babel/types": "^7.15.4" - }, - "engines": { - "node": ">=6.9.0" - } - }, "react-app/node_modules/@babel/helper-builder-binary-assignment-operator-visitor": { "version": "7.15.4", "dev": true, @@ -5871,14 +6940,6 @@ "node": ">=6.9.0" } }, - "react-app/node_modules/@babel/helper-plugin-utils": { - "version": "7.14.5", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=6.9.0" - } - }, "react-app/node_modules/@babel/helper-remap-async-to-generator": { "version": "7.15.4", "dev": true, @@ -6262,20 +7323,6 @@ "@babel/core": "^7.0.0-0" } }, - "react-app/node_modules/@babel/plugin-syntax-flow": { - "version": "7.14.5", - "dev": true, - "license": "MIT", - "dependencies": { - "@babel/helper-plugin-utils": "^7.14.5" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, "react-app/node_modules/@babel/plugin-syntax-import-meta": { "version": "7.10.4", "dev": true, @@ -6298,20 +7345,6 @@ "@babel/core": "^7.0.0-0" } }, - "react-app/node_modules/@babel/plugin-syntax-jsx": { - "version": "7.14.5", - "dev": true, - "license": "MIT", - "dependencies": { - "@babel/helper-plugin-utils": "^7.14.5" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, "react-app/node_modules/@babel/plugin-syntax-logical-assignment-operators": { "version": "7.10.4", "dev": true, @@ -6804,24 +7837,6 @@ "@babel/core": "^7.0.0-0" } }, - "react-app/node_modules/@babel/plugin-transform-react-jsx": { - "version": "7.14.9", - "dev": true, - "license": "MIT", - "dependencies": { - "@babel/helper-annotate-as-pure": "^7.14.5", - "@babel/helper-module-imports": "^7.14.5", - "@babel/helper-plugin-utils": "^7.14.5", - "@babel/plugin-syntax-jsx": "^7.14.5", - "@babel/types": "^7.14.9" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, "react-app/node_modules/@babel/plugin-transform-react-jsx-development": { "version": "7.14.5", "dev": true, @@ -8718,22 +9733,6 @@ "dev": true, "license": "MIT" }, - "react-app/node_modules/@sinonjs/commons": { - "version": "1.8.3", - "dev": true, - "license": "BSD-3-Clause", - "dependencies": { - "type-detect": "4.0.8" - } - }, - "react-app/node_modules/@sinonjs/fake-timers": { - "version": "7.1.2", - "dev": true, - "license": "BSD-3-Clause", - "dependencies": { - "@sinonjs/commons": "^1.7.0" - } - }, "react-app/node_modules/@sinonjs/samsam": { "version": "6.0.2", "dev": true, @@ -9078,11 +10077,6 @@ "@types/node": "*" } }, - "react-app/node_modules/@types/history": { - "version": "4.7.9", - "dev": true, - "license": "MIT" - }, "react-app/node_modules/@types/html-minifier-terser": { "version": "5.1.2", "dev": true, @@ -9138,34 +10132,11 @@ "dev": true, "license": "MIT" }, - "react-app/node_modules/@types/prop-types": { - "version": "15.7.4", - "dev": true, - "license": "MIT" - }, "react-app/node_modules/@types/q": { "version": "1.5.5", "dev": true, "license": "MIT" }, - "react-app/node_modules/@types/react": { - "version": "17.0.20", - "dev": true, - "license": "MIT", - "dependencies": { - "@types/prop-types": "*", - "@types/scheduler": "*", - "csstype": "^3.0.2" - } - }, - "react-app/node_modules/@types/react-copy-to-clipboard": { - "version": "5.0.1", - "dev": true, - "license": "MIT", - "dependencies": { - "@types/react": "*" - } - }, "react-app/node_modules/@types/react-dom": { "version": "17.0.9", "dev": true, @@ -9174,33 +10145,6 @@ "@types/react": "*" } }, - "react-app/node_modules/@types/react-resize-detector": { - "version": "5.0.0", - "dev": true, - "license": "MIT", - "dependencies": { - "@types/react": "*" - } - }, - "react-app/node_modules/@types/react-router": { - "version": "5.1.16", - "dev": true, - "license": "MIT", - "dependencies": { - "@types/history": "*", - "@types/react": "*" - } - }, - "react-app/node_modules/@types/react-router-dom": { - "version": "5.1.8", - "dev": true, - "license": "MIT", - "dependencies": { - "@types/history": "*", - "@types/react": "*", - "@types/react-router": "*" - } - }, "react-app/node_modules/@types/react-test-renderer": { "version": "17.0.1", "dev": true, @@ -9209,10 +10153,6 @@ "@types/react": "*" } }, - "react-app/node_modules/@types/resize-observer-browser": { - "version": "0.1.6", - "license": "MIT" - }, "react-app/node_modules/@types/resolve": { "version": "0.0.8", "dev": true, @@ -9221,27 +10161,6 @@ "@types/node": "*" } }, - "react-app/node_modules/@types/sanitize-html": { - "version": "1.27.2", - "dev": true, - "license": "MIT", - "dependencies": { - "htmlparser2": "^4.1.0" - } - }, - "react-app/node_modules/@types/scheduler": { - "version": "0.16.2", - "dev": true, - "license": "MIT" - }, - "react-app/node_modules/@types/sinon": { - "version": "10.0.2", - "dev": true, - "license": "MIT", - "dependencies": { - "@sinonjs/fake-timers": "^7.1.0" - } - }, "react-app/node_modules/@types/source-list-map": { "version": "0.1.2", "dev": true, @@ -11040,24 +11959,6 @@ "url": "https://github.com/sponsors/fb55" } }, - "react-app/node_modules/cheerio/node_modules/htmlparser2": { - "version": "6.1.0", - "dev": true, - "funding": [ - "https://github.com/fb55/htmlparser2?sponsor=1", - { - "type": "github", - "url": "https://github.com/sponsors/fb55" - } - ], - "license": "MIT", - "dependencies": { - "domelementtype": "^2.0.1", - "domhandler": "^4.0.0", - "domutils": "^2.5.2", - "entities": "^2.0.0" - } - }, "react-app/node_modules/chokidar": { "version": "3.5.2", "license": "MIT", @@ -12032,11 +12933,6 @@ "dev": true, "license": "MIT" }, - "react-app/node_modules/csstype": { - "version": "3.0.9", - "dev": true, - "license": "MIT" - }, "react-app/node_modules/cyclist": { "version": "1.0.1", "dev": true, @@ -12448,18 +13344,6 @@ "@babel/runtime": "^7.1.2" } }, - "react-app/node_modules/dom-serializer": { - "version": "1.3.2", - "license": "MIT", - "dependencies": { - "domelementtype": "^2.0.1", - "domhandler": "^4.2.0", - "entities": "^2.0.0" - }, - "funding": { - "url": "https://github.com/cheeriojs/dom-serializer?sponsor=1" - } - }, "react-app/node_modules/domain-browser": { "version": "1.2.0", "dev": true, @@ -12469,16 +13353,6 @@ "npm": ">=1.2" } }, - "react-app/node_modules/domelementtype": { - "version": "2.2.0", - "funding": [ - { - "type": "github", - "url": "https://github.com/sponsors/fb55" - } - ], - "license": "BSD-2-Clause" - }, "react-app/node_modules/domexception": { "version": "2.0.1", "dev": true, @@ -12498,31 +13372,6 @@ "node": ">=8" } }, - "react-app/node_modules/domhandler": { - "version": "4.2.2", - "license": "BSD-2-Clause", - "dependencies": { - "domelementtype": "^2.2.0" - }, - "engines": { - "node": ">= 4" - }, - "funding": { - "url": "https://github.com/fb55/domhandler?sponsor=1" - } - }, - "react-app/node_modules/domutils": { - "version": "2.8.0", - "license": "BSD-2-Clause", - "dependencies": { - "dom-serializer": "^1.0.1", - "domelementtype": "^2.2.0", - "domhandler": "^4.2.0" - }, - "funding": { - "url": "https://github.com/fb55/domutils?sponsor=1" - } - }, "react-app/node_modules/dot-case": { "version": "3.0.4", "dev": true, @@ -12662,13 +13511,6 @@ "once": "^1.4.0" } }, - "react-app/node_modules/entities": { - "version": "2.2.0", - "license": "BSD-2-Clause", - "funding": { - "url": "https://github.com/fb55/entities?sponsor=1" - } - }, "react-app/node_modules/enzyme": { "version": "3.11.0", "dev": true, @@ -14499,31 +15341,6 @@ "node": ">=4.0.0" } }, - "react-app/node_modules/htmlparser2": { - "version": "4.1.0", - "dev": true, - "license": "MIT", - "dependencies": { - "domelementtype": "^2.0.1", - "domhandler": "^3.0.0", - "domutils": "^2.0.0", - "entities": "^2.0.0" - } - }, - "react-app/node_modules/htmlparser2/node_modules/domhandler": { - "version": "3.3.0", - "dev": true, - "license": "BSD-2-Clause", - "dependencies": { - "domelementtype": "^2.0.1" - }, - "engines": { - "node": ">= 4" - }, - "funding": { - "url": "https://github.com/fb55/domhandler?sponsor=1" - } - }, "react-app/node_modules/http-deceiver": { "version": "1.2.7", "dev": true, @@ -14715,12 +15532,6 @@ "node": ">=8.12.0" } }, - "react-app/node_modules/i": { - "version": "0.3.6", - "engines": { - "node": ">=0.4" - } - }, "react-app/node_modules/iconv-lite": { "version": "0.4.24", "dev": true, @@ -18787,10 +19598,6 @@ "dev": true, "license": "MIT" }, - "react-app/node_modules/lodash.debounce": { - "version": "4.0.8", - "license": "MIT" - }, "react-app/node_modules/lodash.escape": { "version": "4.0.1", "dev": true, @@ -18828,10 +19635,6 @@ "lodash._reinterpolate": "^3.0.0" } }, - "react-app/node_modules/lodash.throttle": { - "version": "4.1.1", - "license": "MIT" - }, "react-app/node_modules/lodash.uniq": { "version": "4.5.0", "dev": true, @@ -18849,16 +19652,6 @@ "url": "https://tidelift.com/funding/github/npm/loglevel" } }, - "react-app/node_modules/loose-envify": { - "version": "1.4.0", - "license": "MIT", - "dependencies": { - "js-tokens": "^3.0.0 || ^4.0.0" - }, - "bin": { - "loose-envify": "cli.js" - } - }, "react-app/node_modules/lower-case": { "version": "2.0.2", "dev": true, @@ -19528,13 +20321,6 @@ "dev": true, "license": "MIT" }, - "react-app/node_modules/object-assign": { - "version": "4.1.1", - "license": "MIT", - "engines": { - "node": ">=0.10.0" - } - }, "react-app/node_modules/object-copy": { "version": "0.1.0", "dev": true, @@ -21540,17 +22326,6 @@ "node": ">= 0.8" } }, - "react-app/node_modules/react": { - "version": "17.0.2", - "license": "MIT", - "dependencies": { - "loose-envify": "^1.1.0", - "object-assign": "^4.1.1" - }, - "engines": { - "node": ">=0.10.0" - } - }, "react-app/node_modules/react-app-polyfill": { "version": "2.0.0", "dev": true, @@ -21793,18 +22568,6 @@ "node": ">=4" } }, - "react-app/node_modules/react-dom": { - "version": "17.0.2", - "license": "MIT", - "dependencies": { - "loose-envify": "^1.1.0", - "object-assign": "^4.1.1", - "scheduler": "^0.20.2" - }, - "peerDependencies": { - "react": "17.0.2" - } - }, "react-app/node_modules/react-error-boundary": { "version": "3.1.3", "dev": true, @@ -21857,20 +22620,6 @@ "node": ">=0.10.0" } }, - "react-app/node_modules/react-resize-detector": { - "version": "6.7.6", - "license": "MIT", - "dependencies": { - "@types/resize-observer-browser": "^0.1.6", - "lodash.debounce": "^4.0.8", - "lodash.throttle": "^4.1.1", - "resize-observer-polyfill": "^1.5.1" - }, - "peerDependencies": { - "react": "^16.0.0 || ^17.0.0", - "react-dom": "^16.0.0 || ^17.0.0" - } - }, "react-app/node_modules/react-router": { "version": "5.2.1", "license": "MIT", @@ -22312,24 +23061,6 @@ "node": ">=0.10.0" } }, - "react-app/node_modules/renderkid/node_modules/htmlparser2": { - "version": "6.1.0", - "dev": true, - "funding": [ - "https://github.com/fb55/htmlparser2?sponsor=1", - { - "type": "github", - "url": "https://github.com/sponsors/fb55" - } - ], - "license": "MIT", - "dependencies": { - "domelementtype": "^2.0.1", - "domhandler": "^4.0.0", - "domutils": "^2.5.2", - "entities": "^2.0.0" - } - }, "react-app/node_modules/renderkid/node_modules/strip-ansi": { "version": "3.0.1", "dev": true, @@ -22362,10 +23093,6 @@ "dev": true, "license": "MIT" }, - "react-app/node_modules/resize-observer-polyfill": { - "version": "1.5.1", - "license": "MIT" - }, "react-app/node_modules/resolve-cwd": { "version": "3.0.0", "dev": true, @@ -22940,23 +23667,6 @@ "url": "https://github.com/sponsors/sindresorhus" } }, - "react-app/node_modules/sanitize-html/node_modules/htmlparser2": { - "version": "6.1.0", - "funding": [ - "https://github.com/fb55/htmlparser2?sponsor=1", - { - "type": "github", - "url": "https://github.com/sponsors/fb55" - } - ], - "license": "MIT", - "dependencies": { - "domelementtype": "^2.0.1", - "domhandler": "^4.0.0", - "domutils": "^2.5.2", - "entities": "^2.0.0" - } - }, "react-app/node_modules/sanitize-html/node_modules/postcss": { "version": "8.3.6", "license": "MIT", @@ -23007,14 +23717,6 @@ "node": ">=10" } }, - "react-app/node_modules/scheduler": { - "version": "0.20.2", - "license": "MIT", - "dependencies": { - "loose-envify": "^1.1.0", - "object-assign": "^4.1.1" - } - }, "react-app/node_modules/schema-utils": { "version": "2.7.1", "dev": true, @@ -26880,6 +27582,15 @@ "source-map": "^0.5.0" } }, + "@babel/helper-annotate-as-pure": { + "version": "7.16.0", + "resolved": "https://registry.npmjs.org/@babel/helper-annotate-as-pure/-/helper-annotate-as-pure-7.16.0.tgz", + "integrity": "sha512-ItmYF9vR4zA8cByDocY05o0LGUkp1zhbTQOH1NFyl5xXEqlTJQCEJjieriw+aFpxo16swMxUnUiKS7a/r4vtHg==", + "dev": true, + "requires": { + "@babel/types": "^7.16.0" + } + }, "@babel/helper-compilation-targets": { "version": "7.15.4", "resolved": "https://registry.npmjs.org/@babel/helper-compilation-targets/-/helper-compilation-targets-7.15.4.tgz", @@ -26939,12 +27650,12 @@ } }, "@babel/helper-module-imports": { - "version": "7.15.4", - "resolved": "https://registry.npmjs.org/@babel/helper-module-imports/-/helper-module-imports-7.15.4.tgz", - "integrity": "sha512-jeAHZbzUwdW/xHgHQ3QmWR4Jg6j15q4w/gCfwZvtqOxoo5DKtLHk8Bsf4c5RZRC7NmLEs+ohkdq8jFefuvIxAA==", + "version": "7.16.0", + "resolved": "https://registry.npmjs.org/@babel/helper-module-imports/-/helper-module-imports-7.16.0.tgz", + "integrity": "sha512-kkH7sWzKPq0xt3H1n+ghb4xEMP8k0U7XV3kkB+ZGy69kDk2ySFW1qPi06sjKzFY3t1j6XbJSqr4mF9L7CYVyhg==", "dev": true, "requires": { - "@babel/types": "^7.15.4" + "@babel/types": "^7.16.0" } }, "@babel/helper-module-transforms": { @@ -26972,6 +27683,12 @@ "@babel/types": "^7.15.4" } }, + "@babel/helper-plugin-utils": { + "version": "7.14.5", + "resolved": "https://registry.npmjs.org/@babel/helper-plugin-utils/-/helper-plugin-utils-7.14.5.tgz", + "integrity": "sha512-/37qQCE3K0vvZKwoK4XU/irIJQdIfCJuhU5eKnNxpFDsOkgFaUAwbv+RYw6eYgsC0E4hS7r5KqGULUogqui0fQ==", + "dev": true + }, "@babel/helper-replace-supers": { "version": "7.15.4", "resolved": "https://registry.npmjs.org/@babel/helper-replace-supers/-/helper-replace-supers-7.15.4.tgz", @@ -27003,9 +27720,9 @@ } }, "@babel/helper-validator-identifier": { - "version": "7.14.9", - "resolved": "https://registry.npmjs.org/@babel/helper-validator-identifier/-/helper-validator-identifier-7.14.9.tgz", - "integrity": "sha512-pQYxPY0UP6IHISRitNe8bsijHex4TWZXi2HwKVsjPiltzlhse2znVcm9Ace510VT1kxIHjGJCZZQBX2gJDbo0g==", + "version": "7.15.7", + "resolved": "https://registry.npmjs.org/@babel/helper-validator-identifier/-/helper-validator-identifier-7.15.7.tgz", + "integrity": "sha512-K4JvCtQqad9OY2+yTU8w+E82ywk/fe+ELNlt1G8z3bVGlZfn/hOcQQsUhGhW/N+tb3fxK800wLtKOE/aM0m72w==", "dev": true }, "@babel/helper-validator-option": { @@ -27100,6 +27817,37 @@ "integrity": "sha512-S/TSCcsRuCkmpUuoWijua0Snt+f3ewU/8spLo+4AXJCZfT0bVCzLD5MuOKdrx0mlAptbKzn5AdgEIIKXxXkz9Q==", "dev": true }, + "@babel/plugin-syntax-flow": { + "version": "7.16.0", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-flow/-/plugin-syntax-flow-7.16.0.tgz", + "integrity": "sha512-dH91yCo0RyqfzWgoM5Ji9ir8fQ+uFbt9KHM3d2x4jZOuHS6wNA+CRmRUP/BWCsHG2bjc7A2Way6AvH1eQk0wig==", + "dev": true, + "requires": { + "@babel/helper-plugin-utils": "^7.14.5" + } + }, + "@babel/plugin-syntax-jsx": { + "version": "7.16.0", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-jsx/-/plugin-syntax-jsx-7.16.0.tgz", + "integrity": "sha512-8zv2+xiPHwly31RK4RmnEYY5zziuF3O7W2kIDW+07ewWDh6Oi0dRq8kwvulRkFgt6DB97RlKs5c1y068iPlCUg==", + "dev": true, + "requires": { + "@babel/helper-plugin-utils": "^7.14.5" + } + }, + "@babel/plugin-transform-react-jsx": { + "version": "7.16.0", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-react-jsx/-/plugin-transform-react-jsx-7.16.0.tgz", + "integrity": "sha512-rqDgIbukZ44pqq7NIRPGPGNklshPkvlmvqjdx3OZcGPk4zGIenYkxDTvl3LsSL8gqcc3ZzGmXPE6hR/u/voNOw==", + "dev": true, + "requires": { + "@babel/helper-annotate-as-pure": "^7.16.0", + "@babel/helper-module-imports": "^7.16.0", + "@babel/helper-plugin-utils": "^7.14.5", + "@babel/plugin-syntax-jsx": "^7.16.0", + "@babel/types": "^7.16.0" + } + }, "@babel/template": { "version": "7.15.4", "resolved": "https://registry.npmjs.org/@babel/template/-/template-7.15.4.tgz", @@ -27157,12 +27905,12 @@ } }, "@babel/types": { - "version": "7.15.6", - "resolved": "https://registry.npmjs.org/@babel/types/-/types-7.15.6.tgz", - "integrity": "sha512-BPU+7QhqNjmWyDO0/vitH/CuhpV8ZmK1wpKva8nuyNF5MJfuRNWMc+hc14+u9xT93kvykMdncrJT19h74uB1Ig==", + "version": "7.16.0", + "resolved": "https://registry.npmjs.org/@babel/types/-/types-7.16.0.tgz", + "integrity": "sha512-PJgg/k3SdLsGb3hhisFvtLOw5ts113klrpLuIPtCJIU+BB24fqq6lf8RWqKJEjzqXR9AEH1rIb5XTqwBHB+kQg==", "dev": true, "requires": { - "@babel/helper-validator-identifier": "^7.14.9", + "@babel/helper-validator-identifier": "^7.15.7", "to-fast-properties": "^2.0.0" } }, @@ -27262,9 +28010,9 @@ } }, "@codemirror/highlight": { - "version": "0.19.5", - "resolved": "https://registry.npmjs.org/@codemirror/highlight/-/highlight-0.19.5.tgz", - "integrity": "sha512-JDGEH/l/DGpxG2k+mgqMKcuFURIs42eoTB4H4tN7QmzyW/z/MlNKiHHv7pWXyN+H5QvftK5yctsxUu77EZikmw==", + "version": "0.19.6", + "resolved": "https://registry.npmjs.org/@codemirror/highlight/-/highlight-0.19.6.tgz", + "integrity": "sha512-+eibu6on9quY8uN3xJ/n3rH+YIDLlpX7YulVmFvqAIz/ukRQ5tWaBmB7fMixHmnmRIRBRZgB8rNtonuMwZSAHQ==", "requires": { "@codemirror/language": "^0.19.0", "@codemirror/rangeset": "^0.19.0", @@ -27284,9 +28032,9 @@ } }, "@codemirror/language": { - "version": "0.19.3", - "resolved": "https://registry.npmjs.org/@codemirror/language/-/language-0.19.3.tgz", - "integrity": "sha512-6vjkRYHRJg/z9wdAk75nU2fQwCJBsh2HpkIjKXIHfzISSgLt5qSDxVhPd8Uu8PD5WMmFFP8tX7I9kdIt873o0A==", + "version": "0.19.4", + "resolved": "https://registry.npmjs.org/@codemirror/language/-/language-0.19.4.tgz", + "integrity": "sha512-yLnLDUkK00BlRVXpPkoJMYEssYKuRLOmK+DdJJ8zOOD4D62T7bSQ05NPyWzWr3PQX1k7sxGICGKR7INzfv9Snw==", "requires": { "@codemirror/state": "^0.19.0", "@codemirror/text": "^0.19.0", @@ -27360,9 +28108,9 @@ } }, "@codemirror/state": { - "version": "0.19.2", - "resolved": "https://registry.npmjs.org/@codemirror/state/-/state-0.19.2.tgz", - "integrity": "sha512-dDqCrtkb0c/LYUlvQBLyLfkISEskbZnhvBbcVOF4j2AusJ1ptJ3EGMxBL9G16GP1TOdC1T613gA1J1qc3pbfGQ==", + "version": "0.19.5", + "resolved": "https://registry.npmjs.org/@codemirror/state/-/state-0.19.5.tgz", + "integrity": "sha512-a3bJnkFuh4Z36nuOzAYobWViQ9eq5ux2wOb/46jUl+0Sj2BGrdz+pY1L+y2NUZhwPyWGcIrBtranr5P0rEEq8A==", "requires": { "@codemirror/text": "^0.19.0" } @@ -27504,14 +28252,14 @@ "dev": true }, "@lezer/common": { - "version": "0.15.5", - "resolved": "https://registry.npmjs.org/@lezer/common/-/common-0.15.5.tgz", - "integrity": "sha512-ufcjclusHXGdhp4gSPbPD7sUd38SgOej7m5tAEuG2tNPzqzV0d1vwwLh57R6IwW79ml2mb3tUjAoDfqI7v1HEw==" + "version": "0.15.8", + "resolved": "https://registry.npmjs.org/@lezer/common/-/common-0.15.8.tgz", + "integrity": "sha512-zpS/xty48huX4uBidupmWDYCRBYpVtoTiFhzYhd6GsQwU67WsdSImdWzZJDrF/DhcQ462wyrZahHlo2grFB5ig==" }, "@lezer/generator": { - "version": "0.15.1", - "resolved": "https://registry.npmjs.org/@lezer/generator/-/generator-0.15.1.tgz", - "integrity": "sha512-OlG6ogwrTUeCsKVzPjXX5cFLT3XGESZY75Ust7DLMwmEgH1Awu/E4PGMFQZeTfI5lBWVo10reqXowiOhNKwOYQ==", + "version": "0.15.2", + "resolved": "https://registry.npmjs.org/@lezer/generator/-/generator-0.15.2.tgz", + "integrity": "sha512-nxY6TTj0ZAcAvg1zEeaZnt1xODdyPhD0lTaPOgcGOVFHhwwx0Oz7CxZB7Rh+xRCXFr5kJWDtM1uXPp80UZjhAg==", "dev": true, "requires": { "@lezer/common": "^0.15.0", @@ -27552,6 +28300,24 @@ "fastq": "^1.6.0" } }, + "@sinonjs/commons": { + "version": "1.8.3", + "resolved": "https://registry.npmjs.org/@sinonjs/commons/-/commons-1.8.3.tgz", + "integrity": "sha512-xkNcLAn/wZaX14RPlwizcKicDk9G3F8m2nU3L7Ukm5zBgTwiT0wsoFAHx9Jq56fJA1z/7uKGtCRu16sOUCLIHQ==", + "dev": true, + "requires": { + "type-detect": "4.0.8" + } + }, + "@sinonjs/fake-timers": { + "version": "7.1.2", + "resolved": "https://registry.npmjs.org/@sinonjs/fake-timers/-/fake-timers-7.1.2.tgz", + "integrity": "sha512-iQADsW4LBMISqZ6Ci1dupJL9pprqwcVFTcOsEmQOEhW+KLCVn/Y4Jrvg2k19fIHCp+iFprriYPTdRcQR8NbUPg==", + "dev": true, + "requires": { + "@sinonjs/commons": "^1.7.0" + } + }, "@tootallnate/once": { "version": "1.1.2", "resolved": "https://registry.npmjs.org/@tootallnate/once/-/once-1.1.2.tgz", @@ -27559,9 +28325,9 @@ "dev": true }, "@types/chai": { - "version": "4.2.21", - "resolved": "https://registry.npmjs.org/@types/chai/-/chai-4.2.21.tgz", - "integrity": "sha512-yd+9qKmJxm496BOV9CMNaey8TWsikaZOwMRwPHQIjcOJM9oV+fi9ZMNw3JsVnbEEbo2gRTDnGEBv8pjyn67hNg==", + "version": "4.2.22", + "resolved": "https://registry.npmjs.org/@types/chai/-/chai-4.2.22.tgz", + "integrity": "sha512-tFfcE+DSTzWAgifkjik9AySNqIyNoYwmR+uecPwwD/XRNfvOjmC/FjCxpiUGDkDVDphPfCUecSQVFw+lN3M3kQ==", "dev": true }, "@types/flot": { @@ -27573,10 +28339,16 @@ "@types/jquery": "*" } }, + "@types/history": { + "version": "4.7.9", + "resolved": "https://registry.npmjs.org/@types/history/-/history-4.7.9.tgz", + "integrity": "sha512-MUc6zSmU3tEVnkQ78q0peeEjKWPUADMlC/t++2bI8WnAG2tvYRPIgHG8lWkXwqc8MsUF6Z2MOf+Mh5sazOmhiQ==", + "dev": true + }, "@types/jquery": { - "version": "3.5.6", - "resolved": "https://registry.npmjs.org/@types/jquery/-/jquery-3.5.6.tgz", - "integrity": "sha512-SmgCQRzGPId4MZQKDj9Hqc6kSXFNWZFHpELkyK8AQhf8Zr6HKfCzFv9ZC1Fv3FyQttJZOlap3qYb12h61iZAIg==", + "version": "3.5.8", + "resolved": "https://registry.npmjs.org/@types/jquery/-/jquery-3.5.8.tgz", + "integrity": "sha512-cXk6NwqjDYg+UI9p2l3x0YmPa4m7RrXqmbK4IpVVpRJiYXU/QTo+UZrn54qfE1+9Gao4qpYqUnxm5ZCy2FTXAw==", "dev": true, "requires": { "@types/sizzle": "*" @@ -27601,17 +28373,102 @@ "dev": true }, "@types/mocha": { - "version": "8.2.3", - "resolved": "https://registry.npmjs.org/@types/mocha/-/mocha-8.2.3.tgz", - "integrity": "sha512-ekGvFhFgrc2zYQoX4JeZPmVzZxw6Dtllga7iGHzfbYIYkAMUx/sAFP2GdFpLff+vdHXu5fl7WX9AT+TtqYcsyw==", + "version": "9.0.0", + "resolved": "https://registry.npmjs.org/@types/mocha/-/mocha-9.0.0.tgz", + "integrity": "sha512-scN0hAWyLVAvLR9AyW7HoFF5sJZglyBsbPuHO4fv7JRvfmPBMfp1ozWqOf/e4wwPNxezBZXRfWzMb6iFLgEVRA==", "dev": true }, "@types/node": { - "version": "16.9.1", - "resolved": "https://registry.npmjs.org/@types/node/-/node-16.9.1.tgz", - "integrity": "sha512-QpLcX9ZSsq3YYUUnD3nFDY8H7wctAhQj/TFKL8Ya8v5fMm3CFXxo8zStsLAl780ltoYoo1WvKUVGBQK+1ifr7g==", + "version": "16.11.7", + "resolved": "https://registry.npmjs.org/@types/node/-/node-16.11.7.tgz", + "integrity": "sha512-QB5D2sqfSjCmTuWcBWyJ+/44bcjO7VbjSbOE0ucoVbAsSNQc4Lt6QkgkVXkTDwkL4z/beecZNDvVX15D4P8Jbw==", "dev": true }, + "@types/prop-types": { + "version": "15.7.4", + "resolved": "https://registry.npmjs.org/@types/prop-types/-/prop-types-15.7.4.tgz", + "integrity": "sha512-rZ5drC/jWjrArrS8BR6SIr4cWpW09RNTYt9AMZo3Jwwif+iacXAqgVjm0B0Bv/S1jhDXKHqRVNCbACkJ89RAnQ==", + "dev": true + }, + "@types/react": { + "version": "17.0.34", + "resolved": "https://registry.npmjs.org/@types/react/-/react-17.0.34.tgz", + "integrity": "sha512-46FEGrMjc2+8XhHXILr+3+/sTe3OfzSPU9YGKILLrUYbQ1CLQC9Daqo1KzENGXAWwrFwiY0l4ZbF20gRvgpWTg==", + "dev": true, + "requires": { + "@types/prop-types": "*", + "@types/scheduler": "*", + "csstype": "^3.0.2" + } + }, + "@types/react-copy-to-clipboard": { + "version": "5.0.2", + "resolved": "https://registry.npmjs.org/@types/react-copy-to-clipboard/-/react-copy-to-clipboard-5.0.2.tgz", + "integrity": "sha512-O29AThfxrkUFRsZXjfSWR2yaWo0ppB1yLEnHA+Oh24oNetjBAwTDu1PmolIqdJKzsZiO4J1jn6R6TmO96uBvGg==", + "dev": true, + "requires": { + "@types/react": "*" + } + }, + "@types/react-resize-detector": { + "version": "6.1.0", + "resolved": "https://registry.npmjs.org/@types/react-resize-detector/-/react-resize-detector-6.1.0.tgz", + "integrity": "sha512-runvF8/keQK3ipyjb7Ez2RKtaOZgrpqEN2PVCp93B/WavgFEeogFMnplMu4OuhpQHwpcu9UbqFiT2cPWoCWmWQ==", + "dev": true, + "requires": { + "react-resize-detector": "*" + } + }, + "@types/react-router": { + "version": "5.1.17", + "resolved": "https://registry.npmjs.org/@types/react-router/-/react-router-5.1.17.tgz", + "integrity": "sha512-RNSXOyb3VyRs/EOGmjBhhGKTbnN6fHWvy5FNLzWfOWOGjgVUKqJZXfpKzLmgoU8h6Hj8mpALj/mbXQASOb92wQ==", + "dev": true, + "requires": { + "@types/history": "*", + "@types/react": "*" + } + }, + "@types/react-router-dom": { + "version": "5.3.2", + "resolved": "https://registry.npmjs.org/@types/react-router-dom/-/react-router-dom-5.3.2.tgz", + "integrity": "sha512-ELEYRUie2czuJzaZ5+ziIp9Hhw+juEw8b7C11YNA4QdLCVbQ3qLi2l4aq8XnlqM7V31LZX8dxUuFUCrzHm6sqQ==", + "dev": true, + "requires": { + "@types/history": "*", + "@types/react": "*", + "@types/react-router": "*" + } + }, + "@types/resize-observer-browser": { + "version": "0.1.6", + "resolved": "https://registry.npmjs.org/@types/resize-observer-browser/-/resize-observer-browser-0.1.6.tgz", + "integrity": "sha512-61IfTac0s9jvNtBCpyo86QeaN8qqpMGHdK0uGKCCIy2dt5/Yk84VduHIdWAcmkC5QvdkPL0p5eWYgUZtHKKUVg==" + }, + "@types/sanitize-html": { + "version": "2.5.0", + "resolved": "https://registry.npmjs.org/@types/sanitize-html/-/sanitize-html-2.5.0.tgz", + "integrity": "sha512-PeFIEZsO9m1+ACJlXUaimgrR+5DEDiIXhz7Hso307jmq5Yz0lb5kDp8LiTr5dMMMliC/jNNx/qds7Zoxa4zexw==", + "dev": true, + "requires": { + "htmlparser2": "^6.0.0" + } + }, + "@types/scheduler": { + "version": "0.16.2", + "resolved": "https://registry.npmjs.org/@types/scheduler/-/scheduler-0.16.2.tgz", + "integrity": "sha512-hppQEBDmlwhFAXKJX2KnWLYu5yMfi91yazPb2l+lbJiwW+wdo1gNeRA+3RgNSO39WYX2euey41KEwnqesU2Jew==", + "dev": true + }, + "@types/sinon": { + "version": "10.0.6", + "resolved": "https://registry.npmjs.org/@types/sinon/-/sinon-10.0.6.tgz", + "integrity": "sha512-6EF+wzMWvBNeGrfP3Nx60hhx+FfwSg1JJBLAAP/IdIUq0EYkqCYf70VT3PhuhPX9eLD+Dp+lNdpb/ZeHG8Yezg==", + "dev": true, + "requires": { + "@sinonjs/fake-timers": "^7.1.0" + } + }, "@types/sizzle": { "version": "2.3.3", "resolved": "https://registry.npmjs.org/@types/sizzle/-/sizzle-2.3.3.tgz", @@ -27619,32 +28476,110 @@ "dev": true }, "@typescript-eslint/eslint-plugin": { - "version": "4.31.1", - "resolved": "https://registry.npmjs.org/@typescript-eslint/eslint-plugin/-/eslint-plugin-4.31.1.tgz", - "integrity": "sha512-UDqhWmd5i0TvPLmbK5xY3UZB0zEGseF+DHPghZ37Sb83Qd3p8ujhvAtkU4OF46Ka5Pm5kWvFIx0cCTBFKo0alA==", + "version": "4.33.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/eslint-plugin/-/eslint-plugin-4.33.0.tgz", + "integrity": "sha512-aINiAxGVdOl1eJyVjaWn/YcVAq4Gi/Yo35qHGCnqbWVz61g39D0h23veY/MA0rFFGfxK7TySg2uwDeNv+JgVpg==", "dev": true, "requires": { - "@typescript-eslint/experimental-utils": "4.31.1", - "@typescript-eslint/scope-manager": "4.31.1", + "@typescript-eslint/experimental-utils": "4.33.0", + "@typescript-eslint/scope-manager": "4.33.0", "debug": "^4.3.1", "functional-red-black-tree": "^1.0.1", + "ignore": "^5.1.8", "regexpp": "^3.1.0", "semver": "^7.3.5", "tsutils": "^3.21.0" + }, + "dependencies": { + "@typescript-eslint/scope-manager": { + "version": "4.33.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/scope-manager/-/scope-manager-4.33.0.tgz", + "integrity": "sha512-5IfJHpgTsTZuONKbODctL4kKuQje/bzBRkwHE8UOZ4f89Zeddg+EGZs8PD8NcN4LdM3ygHWYB3ukPAYjvl/qbQ==", + "dev": true, + "requires": { + "@typescript-eslint/types": "4.33.0", + "@typescript-eslint/visitor-keys": "4.33.0" + } + }, + "@typescript-eslint/types": { + "version": "4.33.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/types/-/types-4.33.0.tgz", + "integrity": "sha512-zKp7CjQzLQImXEpLt2BUw1tvOMPfNoTAfb8l51evhYbOEEzdWyQNmHWWGPR6hwKJDAi+1VXSBmnhL9kyVTTOuQ==", + "dev": true + }, + "@typescript-eslint/visitor-keys": { + "version": "4.33.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/visitor-keys/-/visitor-keys-4.33.0.tgz", + "integrity": "sha512-uqi/2aSz9g2ftcHWf8uLPJA70rUv6yuMW5Bohw+bwcuzaxQIHaKFZCKGoGXIrc9vkTJ3+0txM73K0Hq3d5wgIg==", + "dev": true, + "requires": { + "@typescript-eslint/types": "4.33.0", + "eslint-visitor-keys": "^2.0.0" + } + }, + "ignore": { + "version": "5.1.9", + "resolved": "https://registry.npmjs.org/ignore/-/ignore-5.1.9.tgz", + "integrity": "sha512-2zeMQpbKz5dhZ9IwL0gbxSW5w0NK/MSAMtNuhgIHEPmaU3vPdKPL0UdvUCXs5SS4JAwsBxysK5sFMW8ocFiVjQ==", + "dev": true + } } }, "@typescript-eslint/experimental-utils": { - "version": "4.31.1", - "resolved": "https://registry.npmjs.org/@typescript-eslint/experimental-utils/-/experimental-utils-4.31.1.tgz", - "integrity": "sha512-NtoPsqmcSsWty0mcL5nTZXMf7Ei0Xr2MT8jWjXMVgRK0/1qeQ2jZzLFUh4QtyJ4+/lPUyMw5cSfeeME+Zrtp9Q==", + "version": "4.33.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/experimental-utils/-/experimental-utils-4.33.0.tgz", + "integrity": "sha512-zeQjOoES5JFjTnAhI5QY7ZviczMzDptls15GFsI6jyUOq0kOf9+WonkhtlIhh0RgHRnqj5gdNxW5j1EvAyYg6Q==", "dev": true, "requires": { "@types/json-schema": "^7.0.7", - "@typescript-eslint/scope-manager": "4.31.1", - "@typescript-eslint/types": "4.31.1", - "@typescript-eslint/typescript-estree": "4.31.1", + "@typescript-eslint/scope-manager": "4.33.0", + "@typescript-eslint/types": "4.33.0", + "@typescript-eslint/typescript-estree": "4.33.0", "eslint-scope": "^5.1.1", "eslint-utils": "^3.0.0" + }, + "dependencies": { + "@typescript-eslint/scope-manager": { + "version": "4.33.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/scope-manager/-/scope-manager-4.33.0.tgz", + "integrity": "sha512-5IfJHpgTsTZuONKbODctL4kKuQje/bzBRkwHE8UOZ4f89Zeddg+EGZs8PD8NcN4LdM3ygHWYB3ukPAYjvl/qbQ==", + "dev": true, + "requires": { + "@typescript-eslint/types": "4.33.0", + "@typescript-eslint/visitor-keys": "4.33.0" + } + }, + "@typescript-eslint/types": { + "version": "4.33.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/types/-/types-4.33.0.tgz", + "integrity": "sha512-zKp7CjQzLQImXEpLt2BUw1tvOMPfNoTAfb8l51evhYbOEEzdWyQNmHWWGPR6hwKJDAi+1VXSBmnhL9kyVTTOuQ==", + "dev": true + }, + "@typescript-eslint/typescript-estree": { + "version": "4.33.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/typescript-estree/-/typescript-estree-4.33.0.tgz", + "integrity": "sha512-rkWRY1MPFzjwnEVHsxGemDzqqddw2QbTJlICPD9p9I9LfsO8fdmfQPOX3uKfUaGRDFJbfrtm/sXhVXN4E+bzCA==", + "dev": true, + "requires": { + "@typescript-eslint/types": "4.33.0", + "@typescript-eslint/visitor-keys": "4.33.0", + "debug": "^4.3.1", + "globby": "^11.0.3", + "is-glob": "^4.0.1", + "semver": "^7.3.5", + "tsutils": "^3.21.0" + } + }, + "@typescript-eslint/visitor-keys": { + "version": "4.33.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/visitor-keys/-/visitor-keys-4.33.0.tgz", + "integrity": "sha512-uqi/2aSz9g2ftcHWf8uLPJA70rUv6yuMW5Bohw+bwcuzaxQIHaKFZCKGoGXIrc9vkTJ3+0txM73K0Hq3d5wgIg==", + "dev": true, + "requires": { + "@typescript-eslint/types": "4.33.0", + "eslint-visitor-keys": "^2.0.0" + } + } } }, "@typescript-eslint/parser": { @@ -27817,16 +28752,16 @@ "dev": true }, "array-includes": { - "version": "3.1.3", - "resolved": "https://registry.npmjs.org/array-includes/-/array-includes-3.1.3.tgz", - "integrity": "sha512-gcem1KlBU7c9rB+Rq8/3PPKsK2kjqeEBa3bD5kkQo4nYlOHQCJqIJFqBXDEfwaRuYTT4E+FxA9xez7Gf/e3Q7A==", + "version": "3.1.4", + "resolved": "https://registry.npmjs.org/array-includes/-/array-includes-3.1.4.tgz", + "integrity": "sha512-ZTNSQkmWumEbiHO2GF4GmWxYVTiQyJy2XOTa15sdQSrvKn7l+180egQMqlrMOUMCyLMD7pmyQe4mMDUT6Behrw==", "dev": true, "requires": { "call-bind": "^1.0.2", "define-properties": "^1.1.3", - "es-abstract": "^1.18.0-next.2", + "es-abstract": "^1.19.1", "get-intrinsic": "^1.1.1", - "is-string": "^1.0.5" + "is-string": "^1.0.7" } }, "array-union": { @@ -27836,14 +28771,14 @@ "dev": true }, "array.prototype.flat": { - "version": "1.2.4", - "resolved": "https://registry.npmjs.org/array.prototype.flat/-/array.prototype.flat-1.2.4.tgz", - "integrity": "sha512-4470Xi3GAPAjZqFcljX2xzckv1qeKPizoNkiS0+O4IoPR2ZNpcjE0pkhdihlDouK+x6QOast26B4Q/O9DJnwSg==", + "version": "1.2.5", + "resolved": "https://registry.npmjs.org/array.prototype.flat/-/array.prototype.flat-1.2.5.tgz", + "integrity": "sha512-KaYU+S+ndVqyUnignHftkwc58o3uVU1jzczILJ1tN2YaIZpFIKBiP/x/j97E5MVPsaCloPbqWLB/8qCTVvT2qg==", "dev": true, "requires": { - "call-bind": "^1.0.0", + "call-bind": "^1.0.2", "define-properties": "^1.1.3", - "es-abstract": "^1.18.0-next.1" + "es-abstract": "^1.19.0" } }, "arrify": { @@ -27998,6 +28933,7 @@ "resolved": "https://registry.npmjs.org/chokidar/-/chokidar-3.5.1.tgz", "integrity": "sha512-9+s+Od+W0VJJzawDma/gvBNQqkTiqYTWLuZoyAsivsI4AaWTCzHG06/TMjsf1cYe9Cb97UCEhjz7HvnPk2p/tw==", "dev": true, + "peer": true, "requires": { "anymatch": "~3.1.1", "braces": "~3.0.2", @@ -28044,36 +28980,474 @@ "requires": { "@codemirror/autocomplete": "^0.19.3", "@codemirror/basic-setup": "^0.19.0", - "@codemirror/highlight": "^0.19.5", - "@codemirror/language": "^0.19.3", + "@codemirror/highlight": "^0.19.6", + "@codemirror/language": "^0.19.4", "@codemirror/lint": "^0.19.1", - "@codemirror/state": "^0.19.2", + "@codemirror/state": "^0.19.5", "@codemirror/view": "^0.19.7", - "@lezer/common": "^0.15.5", - "@lezer/generator": "^0.15.1", - "@types/chai": "^4.2.12", + "@lezer/common": "^0.15.8", + "@lezer/generator": "^0.15.2", + "@types/chai": "^4.2.22", "@types/lru-cache": "^5.1.0", - "@types/mocha": "^8.0.3", - "@types/node": "^16.7.6", - "@typescript-eslint/eslint-plugin": "^4.31.0", - "@typescript-eslint/parser": "^4.31.0", + "@types/mocha": "^9.0.0", + "@types/node": "^16.11.7", + "@typescript-eslint/eslint-plugin": "^5.3.1", + "@typescript-eslint/parser": "^5.3.1", "chai": "^4.2.0", "codecov": "^3.8.1", - "eslint": "^7.32.0", + "eslint": "^8.2.0", "eslint-config-prettier": "^8.3.0", - "eslint-plugin-flowtype": "^5.9.2", - "eslint-plugin-import": "^2.24.2", + "eslint-plugin-flowtype": "^8.0.3", + "eslint-plugin-import": "^2.25.3", "eslint-plugin-prettier": "^4.0.0", "isomorphic-fetch": "^3.0.0", "lru-cache": "^6.0.0", - "mocha": "^8.1.2", + "mocha": "^9.1.3", "nock": "^13.0.11", "nyc": "^15.1.0", - "prettier": "^2.3.2", + "prettier": "^2.4.1", "ts-loader": "^7.0.4", "ts-mocha": "^8.0.0", "ts-node": "^9.0.0", "typescript": "^4.2.3" + }, + "dependencies": { + "@eslint/eslintrc": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/@eslint/eslintrc/-/eslintrc-1.0.4.tgz", + "integrity": "sha512-h8Vx6MdxwWI2WM8/zREHMoqdgLNXEL4QX3MWSVMdyNJGvXVOs+6lp+m2hc3FnuMHDc4poxFNI20vCk0OmI4G0Q==", + "dev": true, + "requires": { + "ajv": "^6.12.4", + "debug": "^4.3.2", + "espree": "^9.0.0", + "globals": "^13.9.0", + "ignore": "^4.0.6", + "import-fresh": "^3.2.1", + "js-yaml": "^4.1.0", + "minimatch": "^3.0.4", + "strip-json-comments": "^3.1.1" + } + }, + "@humanwhocodes/config-array": { + "version": "0.6.0", + "resolved": "https://registry.npmjs.org/@humanwhocodes/config-array/-/config-array-0.6.0.tgz", + "integrity": "sha512-JQlEKbcgEUjBFhLIF4iqM7u/9lwgHRBcpHrmUNCALK0Q3amXN6lxdoXLnF0sm11E9VqTmBALR87IlUg1bZ8A9A==", + "dev": true, + "requires": { + "@humanwhocodes/object-schema": "^1.2.0", + "debug": "^4.1.1", + "minimatch": "^3.0.4" + } + }, + "@typescript-eslint/eslint-plugin": { + "version": "5.4.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/eslint-plugin/-/eslint-plugin-5.4.0.tgz", + "integrity": "sha512-9/yPSBlwzsetCsGEn9j24D8vGQgJkOTr4oMLas/w886ZtzKIs1iyoqFrwsX2fqYEeUwsdBpC21gcjRGo57u0eg==", + "dev": true, + "requires": { + "@typescript-eslint/experimental-utils": "5.4.0", + "@typescript-eslint/scope-manager": "5.4.0", + "debug": "^4.3.2", + "functional-red-black-tree": "^1.0.1", + "ignore": "^5.1.8", + "regexpp": "^3.2.0", + "semver": "^7.3.5", + "tsutils": "^3.21.0" + }, + "dependencies": { + "ignore": { + "version": "5.1.9", + "resolved": "https://registry.npmjs.org/ignore/-/ignore-5.1.9.tgz", + "integrity": "sha512-2zeMQpbKz5dhZ9IwL0gbxSW5w0NK/MSAMtNuhgIHEPmaU3vPdKPL0UdvUCXs5SS4JAwsBxysK5sFMW8ocFiVjQ==", + "dev": true + } + } + }, + "@typescript-eslint/experimental-utils": { + "version": "5.4.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/experimental-utils/-/experimental-utils-5.4.0.tgz", + "integrity": "sha512-Nz2JDIQUdmIGd6p33A+naQmwfkU5KVTLb/5lTk+tLVTDacZKoGQisj8UCxk7onJcrgjIvr8xWqkYI+DbI3TfXg==", + "dev": true, + "requires": { + "@types/json-schema": "^7.0.9", + "@typescript-eslint/scope-manager": "5.4.0", + "@typescript-eslint/types": "5.4.0", + "@typescript-eslint/typescript-estree": "5.4.0", + "eslint-scope": "^5.1.1", + "eslint-utils": "^3.0.0" + }, + "dependencies": { + "eslint-scope": { + "version": "5.1.1", + "resolved": "https://registry.npmjs.org/eslint-scope/-/eslint-scope-5.1.1.tgz", + "integrity": "sha512-2NxwbF/hZ0KpepYN0cNbo+FN6XoK7GaHlQhgx/hIZl6Va0bF45RQOOwhLIy8lQDbuCiadSLCBnH2CFYquit5bw==", + "dev": true, + "requires": { + "esrecurse": "^4.3.0", + "estraverse": "^4.1.1" + } + }, + "estraverse": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/estraverse/-/estraverse-4.3.0.tgz", + "integrity": "sha512-39nnKffWz8xN1BU/2c79n9nB9HDzo0niYUqx6xyqUnyoAnQyyWpOTdZEeiCch8BBu515t4wp9ZmgVfVhn9EBpw==", + "dev": true + } + } + }, + "@typescript-eslint/parser": { + "version": "5.4.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/parser/-/parser-5.4.0.tgz", + "integrity": "sha512-JoB41EmxiYpaEsRwpZEYAJ9XQURPFer8hpkIW9GiaspVLX8oqbqNM8P4EP8HOZg96yaALiLEVWllA2E8vwsIKw==", + "dev": true, + "requires": { + "@typescript-eslint/scope-manager": "5.4.0", + "@typescript-eslint/types": "5.4.0", + "@typescript-eslint/typescript-estree": "5.4.0", + "debug": "^4.3.2" + } + }, + "@typescript-eslint/scope-manager": { + "version": "5.4.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/scope-manager/-/scope-manager-5.4.0.tgz", + "integrity": "sha512-pRxFjYwoi8R+n+sibjgF9iUiAELU9ihPBtHzocyW8v8D8G8KeQvXTsW7+CBYIyTYsmhtNk50QPGLE3vrvhM5KA==", + "dev": true, + "requires": { + "@typescript-eslint/types": "5.4.0", + "@typescript-eslint/visitor-keys": "5.4.0" + } + }, + "@typescript-eslint/types": { + "version": "5.4.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/types/-/types-5.4.0.tgz", + "integrity": "sha512-GjXNpmn+n1LvnttarX+sPD6+S7giO+9LxDIGlRl4wK3a7qMWALOHYuVSZpPTfEIklYjaWuMtfKdeByx0AcaThA==", + "dev": true + }, + "@typescript-eslint/typescript-estree": { + "version": "5.4.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/typescript-estree/-/typescript-estree-5.4.0.tgz", + "integrity": "sha512-nhlNoBdhKuwiLMx6GrybPT3SFILm5Gij2YBdPEPFlYNFAXUJWX6QRgvi/lwVoadaQEFsizohs6aFRMqsXI2ewA==", + "dev": true, + "requires": { + "@typescript-eslint/types": "5.4.0", + "@typescript-eslint/visitor-keys": "5.4.0", + "debug": "^4.3.2", + "globby": "^11.0.4", + "is-glob": "^4.0.3", + "semver": "^7.3.5", + "tsutils": "^3.21.0" + } + }, + "@typescript-eslint/visitor-keys": { + "version": "5.4.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/visitor-keys/-/visitor-keys-5.4.0.tgz", + "integrity": "sha512-PVbax7MeE7tdLfW5SA0fs8NGVVr+buMPrcj+CWYWPXsZCH8qZ1THufDzbXm1xrZ2b2PA1iENJ0sRq5fuUtvsJg==", + "dev": true, + "requires": { + "@typescript-eslint/types": "5.4.0", + "eslint-visitor-keys": "^3.0.0" + } + }, + "acorn": { + "version": "8.5.0", + "resolved": "https://registry.npmjs.org/acorn/-/acorn-8.5.0.tgz", + "integrity": "sha512-yXbYeFy+jUuYd3/CDcg2NkIYE991XYX/bje7LmjJigUciaeO1JR4XxXgCIV1/Zc/dRuFEyw1L0pbA+qynJkW5Q==", + "dev": true + }, + "argparse": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/argparse/-/argparse-2.0.1.tgz", + "integrity": "sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q==", + "dev": true + }, + "chokidar": { + "version": "3.5.2", + "resolved": "https://registry.npmjs.org/chokidar/-/chokidar-3.5.2.tgz", + "integrity": "sha512-ekGhOnNVPgT77r4K/U3GDhu+FQ2S8TnK/s2KbIGXi0SZWuwkZ2QNyfWdZW+TVfn84DpEP7rLeCt2UI6bJ8GwbQ==", + "dev": true, + "requires": { + "anymatch": "~3.1.2", + "braces": "~3.0.2", + "fsevents": "~2.3.2", + "glob-parent": "~5.1.2", + "is-binary-path": "~2.1.0", + "is-glob": "~4.0.1", + "normalize-path": "~3.0.0", + "readdirp": "~3.6.0" + }, + "dependencies": { + "glob-parent": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-5.1.2.tgz", + "integrity": "sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow==", + "dev": true, + "requires": { + "is-glob": "^4.0.1" + } + } + } + }, + "eslint": { + "version": "8.2.0", + "resolved": "https://registry.npmjs.org/eslint/-/eslint-8.2.0.tgz", + "integrity": "sha512-erw7XmM+CLxTOickrimJ1SiF55jiNlVSp2qqm0NuBWPtHYQCegD5ZMaW0c3i5ytPqL+SSLaCxdvQXFPLJn+ABw==", + "dev": true, + "requires": { + "@eslint/eslintrc": "^1.0.4", + "@humanwhocodes/config-array": "^0.6.0", + "ajv": "^6.10.0", + "chalk": "^4.0.0", + "cross-spawn": "^7.0.2", + "debug": "^4.3.2", + "doctrine": "^3.0.0", + "enquirer": "^2.3.5", + "escape-string-regexp": "^4.0.0", + "eslint-scope": "^6.0.0", + "eslint-utils": "^3.0.0", + "eslint-visitor-keys": "^3.0.0", + "espree": "^9.0.0", + "esquery": "^1.4.0", + "esutils": "^2.0.2", + "fast-deep-equal": "^3.1.3", + "file-entry-cache": "^6.0.1", + "functional-red-black-tree": "^1.0.1", + "glob-parent": "^6.0.1", + "globals": "^13.6.0", + "ignore": "^4.0.6", + "import-fresh": "^3.0.0", + "imurmurhash": "^0.1.4", + "is-glob": "^4.0.0", + "js-yaml": "^4.1.0", + "json-stable-stringify-without-jsonify": "^1.0.1", + "levn": "^0.4.1", + "lodash.merge": "^4.6.2", + "minimatch": "^3.0.4", + "natural-compare": "^1.4.0", + "optionator": "^0.9.1", + "progress": "^2.0.0", + "regexpp": "^3.2.0", + "semver": "^7.2.1", + "strip-ansi": "^6.0.1", + "strip-json-comments": "^3.1.0", + "text-table": "^0.2.0", + "v8-compile-cache": "^2.0.3" + } + }, + "eslint-plugin-flowtype": { + "version": "8.0.3", + "resolved": "https://registry.npmjs.org/eslint-plugin-flowtype/-/eslint-plugin-flowtype-8.0.3.tgz", + "integrity": "sha512-dX8l6qUL6O+fYPtpNRideCFSpmWOUVx5QcaGLVqe/vlDiBSe4vYljDWDETwnyFzpl7By/WVIu6rcrniCgH9BqQ==", + "dev": true, + "requires": { + "lodash": "^4.17.21", + "string-natural-compare": "^3.0.1" + } + }, + "eslint-scope": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/eslint-scope/-/eslint-scope-6.0.0.tgz", + "integrity": "sha512-uRDL9MWmQCkaFus8RF5K9/L/2fn+80yoW3jkD53l4shjCh26fCtvJGasxjUqP5OT87SYTxCVA3BwTUzuELx9kA==", + "dev": true, + "requires": { + "esrecurse": "^4.3.0", + "estraverse": "^5.2.0" + } + }, + "eslint-visitor-keys": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/eslint-visitor-keys/-/eslint-visitor-keys-3.1.0.tgz", + "integrity": "sha512-yWJFpu4DtjsWKkt5GeNBBuZMlNcYVs6vRCLoCVEJrTjaSB6LC98gFipNK/erM2Heg/E8mIK+hXG/pJMLK+eRZA==", + "dev": true + }, + "espree": { + "version": "9.0.0", + "resolved": "https://registry.npmjs.org/espree/-/espree-9.0.0.tgz", + "integrity": "sha512-r5EQJcYZ2oaGbeR0jR0fFVijGOcwai07/690YRXLINuhmVeRY4UKSAsQPe/0BNuDgwP7Ophoc1PRsr2E3tkbdQ==", + "dev": true, + "requires": { + "acorn": "^8.5.0", + "acorn-jsx": "^5.3.1", + "eslint-visitor-keys": "^3.0.0" + } + }, + "estraverse": { + "version": "5.3.0", + "resolved": "https://registry.npmjs.org/estraverse/-/estraverse-5.3.0.tgz", + "integrity": "sha512-MMdARuVEQziNTeJD8DgMqmhwR11BRQ/cBP+pLtYdSTnf3MIO8fFeiINEbX36ZdNlfU/7A9f3gUw49B3oQsvwBA==", + "dev": true + }, + "find-up": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/find-up/-/find-up-5.0.0.tgz", + "integrity": "sha512-78/PXT1wlLLDgTzDs7sjq9hzz0vXD+zn+7wypEe4fXQxCmdmqfGsEPQxmiCSQI3ajFV91bVSsvNtrJRiW6nGng==", + "dev": true, + "requires": { + "locate-path": "^6.0.0", + "path-exists": "^4.0.0" + } + }, + "glob": { + "version": "7.1.7", + "resolved": "https://registry.npmjs.org/glob/-/glob-7.1.7.tgz", + "integrity": "sha512-OvD9ENzPLbegENnYP5UUfJIirTg4+XwMWGaQfQTY0JenxNvvIKP3U3/tAQSPIu/lHxXYSZmpXlUHeqAIdKzBLQ==", + "dev": true, + "requires": { + "fs.realpath": "^1.0.0", + "inflight": "^1.0.4", + "inherits": "2", + "minimatch": "^3.0.4", + "once": "^1.3.0", + "path-is-absolute": "^1.0.0" + } + }, + "glob-parent": { + "version": "6.0.2", + "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-6.0.2.tgz", + "integrity": "sha512-XxwI8EOhVQgWp6iDL+3b0r86f4d6AX6zSU55HfB4ydCEuXLXc5FcYeOu+nnGftS4TEju/11rt4KJPTMgbfmv4A==", + "dev": true, + "requires": { + "is-glob": "^4.0.3" + } + }, + "js-yaml": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-4.1.0.tgz", + "integrity": "sha512-wpxZs9NoxZaJESJGIZTyDEaYpl0FKSA+FB9aJiyemKhMwkxQg63h4T1KJgUGHpTqPDNRcmmYLugrRjJlBtWvRA==", + "dev": true, + "requires": { + "argparse": "^2.0.1" + } + }, + "locate-path": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-6.0.0.tgz", + "integrity": "sha512-iPZK6eYjbxRu3uB4/WZ3EsEIMJFMqAoopl3R+zuq0UjcAm/MO6KCweDgPfP3elTztoKP3KtnVHxTn2NHBSDVUw==", + "dev": true, + "requires": { + "p-locate": "^5.0.0" + } + }, + "log-symbols": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/log-symbols/-/log-symbols-4.1.0.tgz", + "integrity": "sha512-8XPvpAA8uyhfteu8pIvQxpJZ7SYYdpUivZpGy6sFsBuKRY/7rQGavedeB8aK+Zkyq6upMFVL/9AW6vOYzfRyLg==", + "dev": true, + "requires": { + "chalk": "^4.1.0", + "is-unicode-supported": "^0.1.0" + } + }, + "mocha": { + "version": "9.1.3", + "resolved": "https://registry.npmjs.org/mocha/-/mocha-9.1.3.tgz", + "integrity": "sha512-Xcpl9FqXOAYqI3j79pEtHBBnQgVXIhpULjGQa7DVb0Po+VzmSIK9kanAiWLHoRR/dbZ2qpdPshuXr8l1VaHCzw==", + "dev": true, + "requires": { + "@ungap/promise-all-settled": "1.1.2", + "ansi-colors": "4.1.1", + "browser-stdout": "1.3.1", + "chokidar": "3.5.2", + "debug": "4.3.2", + "diff": "5.0.0", + "escape-string-regexp": "4.0.0", + "find-up": "5.0.0", + "glob": "7.1.7", + "growl": "1.10.5", + "he": "1.2.0", + "js-yaml": "4.1.0", + "log-symbols": "4.1.0", + "minimatch": "3.0.4", + "ms": "2.1.3", + "nanoid": "3.1.25", + "serialize-javascript": "6.0.0", + "strip-json-comments": "3.1.1", + "supports-color": "8.1.1", + "which": "2.0.2", + "workerpool": "6.1.5", + "yargs": "16.2.0", + "yargs-parser": "20.2.4", + "yargs-unparser": "2.0.0" + } + }, + "ms": { + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz", + "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==", + "dev": true + }, + "nanoid": { + "version": "3.1.25", + "resolved": "https://registry.npmjs.org/nanoid/-/nanoid-3.1.25.tgz", + "integrity": "sha512-rdwtIXaXCLFAQbnfqDRnI6jaRHp9fTcYBjtFKE8eezcZ7LuLjhUaQGNeMXf1HmRoCH32CLz6XwX0TtxEOS/A3Q==", + "dev": true + }, + "p-limit": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-3.1.0.tgz", + "integrity": "sha512-TYOanM3wGwNGsZN2cVTYPArw454xnXj5qmWF1bEoAc4+cU/ol7GVh7odevjp1FNHduHc3KZMcFduxU5Xc6uJRQ==", + "dev": true, + "requires": { + "yocto-queue": "^0.1.0" + } + }, + "p-locate": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-5.0.0.tgz", + "integrity": "sha512-LaNjtRWUBY++zB5nE/NwcaoMylSPk+S+ZHNB1TzdbMJMny6dynpAGt7X/tl/QYq3TIeE6nxHppbo2LGymrG5Pw==", + "dev": true, + "requires": { + "p-limit": "^3.0.2" + } + }, + "path-exists": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-4.0.0.tgz", + "integrity": "sha512-ak9Qy5Q7jYb2Wwcey5Fpvg2KoAc/ZIhLSLOSBmRmygPsGwkVVt0fZa0qrtMz+m6tJTAHfZQ8FnmB4MG4LWy7/w==", + "dev": true + }, + "readdirp": { + "version": "3.6.0", + "resolved": "https://registry.npmjs.org/readdirp/-/readdirp-3.6.0.tgz", + "integrity": "sha512-hOS089on8RduqdbhvQ5Z37A0ESjsqz6qnRcffsMU3495FuTdqSm+7bhJ29JvIOsBDEEnan5DPu9t3To9VRlMzA==", + "dev": true, + "requires": { + "picomatch": "^2.2.1" + } + }, + "serialize-javascript": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/serialize-javascript/-/serialize-javascript-6.0.0.tgz", + "integrity": "sha512-Qr3TosvguFt8ePWqsvRfrKyQXIiW+nGbYpy8XK24NQHE83caxWt+mIymTT19DGFbNWNLfEwsrkSmN64lVWB9ag==", + "dev": true, + "requires": { + "randombytes": "^2.1.0" + } + }, + "strip-ansi": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", + "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", + "dev": true, + "requires": { + "ansi-regex": "^5.0.1" + } + }, + "supports-color": { + "version": "8.1.1", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-8.1.1.tgz", + "integrity": "sha512-MpUEN2OodtUzxvKQl72cUF7RQ5EiHsGvSsVG0ia9c5RbWGL2CI4C7EpPS8UTBIplnlzZiNuV56w+FuNxy3ty2Q==", + "dev": true, + "requires": { + "has-flag": "^4.0.0" + } + }, + "workerpool": { + "version": "6.1.5", + "resolved": "https://registry.npmjs.org/workerpool/-/workerpool-6.1.5.tgz", + "integrity": "sha512-XdKkCK0Zqc6w3iTxLckiuJ81tiD/o5rBE/m+nXpRCB+/Sq4DqkfXZ/x0jW02DG1tGsfUGXbTJyZDP+eu67haSw==", + "dev": true + } } }, "color-convert": { @@ -28145,6 +29519,12 @@ "which": "^2.0.1" } }, + "csstype": { + "version": "3.0.10", + "resolved": "https://registry.npmjs.org/csstype/-/csstype-3.0.10.tgz", + "integrity": "sha512-2u44ZG2OcNUO9HDp/Jl8C07x6pU/eTR3ncV91SiK3dhG9TWvRVsCoJw14Ckx5DgWkzGA3waZWO3d7pgqpUI/XA==", + "dev": true + }, "debug": { "version": "4.3.2", "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.2.tgz", @@ -28216,6 +29596,39 @@ "esutils": "^2.0.2" } }, + "dom-serializer": { + "version": "1.3.2", + "resolved": "https://registry.npmjs.org/dom-serializer/-/dom-serializer-1.3.2.tgz", + "integrity": "sha512-5c54Bk5Dw4qAxNOI1pFEizPSjVsx5+bpJKmL2kPn8JhBUq2q09tTCa3mjijun2NfK78NMouDYNMBkOrPZiS+ig==", + "requires": { + "domelementtype": "^2.0.1", + "domhandler": "^4.2.0", + "entities": "^2.0.0" + } + }, + "domelementtype": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/domelementtype/-/domelementtype-2.2.0.tgz", + "integrity": "sha512-DtBMo82pv1dFtUmHyr48beiuq792Sxohr+8Hm9zoxklYPfa6n0Z3Byjj2IV7bmr2IyqClnqEQhfgHJJ5QF0R5A==" + }, + "domhandler": { + "version": "4.2.2", + "resolved": "https://registry.npmjs.org/domhandler/-/domhandler-4.2.2.tgz", + "integrity": "sha512-PzE9aBMsdZO8TK4BnuJwH0QT41wgMbRzuZrHUcpYncEjmQazq8QEaBWgLG7ZyC/DAZKEgglpIA6j4Qn/HmxS3w==", + "requires": { + "domelementtype": "^2.2.0" + } + }, + "domutils": { + "version": "2.8.0", + "resolved": "https://registry.npmjs.org/domutils/-/domutils-2.8.0.tgz", + "integrity": "sha512-w96Cjofp72M5IIhpjgobBimYEfoPjx1Vx0BSX9P30WBdZW2WIKU0T1Bd0kz2eNZ9ikjKgHbEyKx8BB6H1L3h3A==", + "requires": { + "dom-serializer": "^1.0.1", + "domelementtype": "^2.2.0", + "domhandler": "^4.2.0" + } + }, "electron-to-chromium": { "version": "1.3.838", "resolved": "https://registry.npmjs.org/electron-to-chromium/-/electron-to-chromium-1.3.838.tgz", @@ -28254,6 +29667,11 @@ "ansi-colors": "^4.1.1" } }, + "entities": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/entities/-/entities-2.2.0.tgz", + "integrity": "sha512-p92if5Nz619I0w+akJrLZH0MX0Pb5DX39XOwQTtXSdQQOaYH03S1uIQp4mhOZtAXrxq4ViO67YTiLBo2638o9A==" + }, "errno": { "version": "0.1.8", "resolved": "https://registry.npmjs.org/errno/-/errno-0.1.8.tgz", @@ -28273,9 +29691,9 @@ } }, "es-abstract": { - "version": "1.18.6", - "resolved": "https://registry.npmjs.org/es-abstract/-/es-abstract-1.18.6.tgz", - "integrity": "sha512-kAeIT4cku5eNLNuUKhlmtuk1/TRZvQoYccn6TO0cSVdf1kzB0T7+dYuVK9MWM7l+/53W2Q8M7N2c6MQvhXFcUQ==", + "version": "1.19.1", + "resolved": "https://registry.npmjs.org/es-abstract/-/es-abstract-1.19.1.tgz", + "integrity": "sha512-2vJ6tjA/UfqLm2MPs7jxVybLoB8i1t1Jd9R3kISld20sIxPcTbLuggQOUxeWeAvIUkduv/CfMjuh4WmiXr2v9w==", "dev": true, "requires": { "call-bind": "^1.0.2", @@ -28289,7 +29707,9 @@ "is-callable": "^1.2.4", "is-negative-zero": "^2.0.1", "is-regex": "^1.1.4", + "is-shared-array-buffer": "^1.0.1", "is-string": "^1.0.7", + "is-weakref": "^1.0.1", "object-inspect": "^1.11.0", "object-keys": "^1.1.1", "object.assign": "^4.1.2", @@ -28423,12 +29843,13 @@ } }, "eslint-module-utils": { - "version": "2.6.2", - "resolved": "https://registry.npmjs.org/eslint-module-utils/-/eslint-module-utils-2.6.2.tgz", - "integrity": "sha512-QG8pcgThYOuqxupd06oYTZoNOGaUdTY1PqK+oS6ElF6vs4pBdk/aYxFVQQXzcrAqp9m7cl7lb2ubazX+g16k2Q==", + "version": "2.7.1", + "resolved": "https://registry.npmjs.org/eslint-module-utils/-/eslint-module-utils-2.7.1.tgz", + "integrity": "sha512-fjoetBXQZq2tSTWZ9yWVl2KuFrTZZH3V+9iD1V1RfpDgxzJR+mPd/KZmMiA8gbPqdBzpNiEHOuT7IYEWxrH0zQ==", "dev": true, "requires": { "debug": "^3.2.7", + "find-up": "^2.1.0", "pkg-dir": "^2.0.0" }, "dependencies": { @@ -28454,24 +29875,22 @@ } }, "eslint-plugin-import": { - "version": "2.24.2", - "resolved": "https://registry.npmjs.org/eslint-plugin-import/-/eslint-plugin-import-2.24.2.tgz", - "integrity": "sha512-hNVtyhiEtZmpsabL4neEj+6M5DCLgpYyG9nzJY8lZQeQXEn5UPW1DpUdsMHMXsq98dbNm7nt1w9ZMSVpfJdi8Q==", + "version": "2.25.3", + "resolved": "https://registry.npmjs.org/eslint-plugin-import/-/eslint-plugin-import-2.25.3.tgz", + "integrity": "sha512-RzAVbby+72IB3iOEL8clzPLzL3wpDrlwjsTBAQXgyp5SeTqqY+0bFubwuo+y/HLhNZcXV4XqTBO4LGsfyHIDXg==", "dev": true, "requires": { - "array-includes": "^3.1.3", - "array.prototype.flat": "^1.2.4", + "array-includes": "^3.1.4", + "array.prototype.flat": "^1.2.5", "debug": "^2.6.9", "doctrine": "^2.1.0", "eslint-import-resolver-node": "^0.3.6", - "eslint-module-utils": "^2.6.2", - "find-up": "^2.0.0", + "eslint-module-utils": "^2.7.1", "has": "^1.0.3", - "is-core-module": "^2.6.0", + "is-core-module": "^2.8.0", + "is-glob": "^4.0.3", "minimatch": "^3.0.4", - "object.values": "^1.1.4", - "pkg-up": "^2.0.0", - "read-pkg-up": "^3.0.0", + "object.values": "^1.1.5", "resolve": "^1.20.0", "tsconfig-paths": "^3.11.0" }, @@ -28938,13 +30357,13 @@ "@codemirror/closebrackets": "^0.19.0", "@codemirror/commands": "^0.19.4", "@codemirror/comment": "^0.19.0", - "@codemirror/highlight": "^0.19.5", + "@codemirror/highlight": "^0.19.6", "@codemirror/history": "^0.19.0", - "@codemirror/language": "^0.19.3", + "@codemirror/language": "^0.19.4", "@codemirror/lint": "^0.19.1", "@codemirror/matchbrackets": "^0.19.1", "@codemirror/search": "^0.19.2", - "@codemirror/state": "^0.19.2", + "@codemirror/state": "^0.19.5", "@codemirror/view": "^0.19.7", "@forevolve/bootstrap-dark": "^1.0.0", "@fortawesome/fontawesome-svg-core": "^1.2.14", @@ -28955,17 +30374,17 @@ "@types/enzyme": "^3.10.9", "@types/flot": "0.0.32", "@types/jest": "^27.0.1", - "@types/jquery": "^3.5.1", - "@types/node": "^16.7.6", + "@types/jquery": "^3.5.8", + "@types/node": "^16.11.7", "@types/react": "^17.0.19", - "@types/react-copy-to-clipboard": "^5.0.1", + "@types/react-copy-to-clipboard": "^5.0.2", "@types/react-dom": "^17.0.9", - "@types/react-resize-detector": "^5.0.0", - "@types/react-router-dom": "^5.1.8", - "@types/sanitize-html": "^1.20.2", - "@types/sinon": "^10.0.2", + "@types/react-resize-detector": "^6.1.0", + "@types/react-router-dom": "^5.3.2", + "@types/sanitize-html": "^2.5.0", + "@types/sinon": "^10.0.6", "@wojtekmaj/enzyme-adapter-react-17": "^0.6.3", - "bootstrap": "^4.6.0", + "bootstrap": "^5.1.3", "codemirror-promql": "0.18.0", "css.escape": "^1.5.1", "downshift": "^3.4.8", @@ -28975,7 +30394,7 @@ "eslint-config-react-app": "^6.0.0", "eslint-plugin-prettier": "^4.0.0", "fsevents": "^2.3.2", - "i": "^0.3.6", + "i": "^0.3.7", "jest-canvas-mock": "^2.3.1", "jest-fetch-mock": "^3.0.3", "jquery": "^3.5.1", @@ -28984,7 +30403,7 @@ "moment-timezone": "^0.5.23", "mutationobserver-shim": "^0.3.7", "popper.js": "^1.14.3", - "prettier": "^2.3.2", + "prettier": "^2.4.1", "react": "^17.0.2", "react-copy-to-clipboard": "^5.0.4", "react-dom": "^17.0.2", @@ -29008,13 +30427,6 @@ "@babel/highlight": "^7.14.5" } }, - "@babel/helper-annotate-as-pure": { - "version": "7.15.4", - "dev": true, - "requires": { - "@babel/types": "^7.15.4" - } - }, "@babel/helper-builder-binary-assignment-operator-visitor": { "version": "7.15.4", "dev": true, @@ -29070,10 +30482,6 @@ "@babel/types": "^7.15.4" } }, - "@babel/helper-plugin-utils": { - "version": "7.14.5", - "dev": true - }, "@babel/helper-remap-async-to-generator": { "version": "7.15.4", "dev": true, @@ -29295,13 +30703,6 @@ "@babel/helper-plugin-utils": "^7.8.3" } }, - "@babel/plugin-syntax-flow": { - "version": "7.14.5", - "dev": true, - "requires": { - "@babel/helper-plugin-utils": "^7.14.5" - } - }, "@babel/plugin-syntax-import-meta": { "version": "7.10.4", "dev": true, @@ -29316,13 +30717,6 @@ "@babel/helper-plugin-utils": "^7.8.0" } }, - "@babel/plugin-syntax-jsx": { - "version": "7.14.5", - "dev": true, - "requires": { - "@babel/helper-plugin-utils": "^7.14.5" - } - }, "@babel/plugin-syntax-logical-assignment-operators": { "version": "7.10.4", "dev": true, @@ -29591,17 +30985,6 @@ "@babel/helper-plugin-utils": "^7.14.5" } }, - "@babel/plugin-transform-react-jsx": { - "version": "7.14.9", - "dev": true, - "requires": { - "@babel/helper-annotate-as-pure": "^7.14.5", - "@babel/helper-module-imports": "^7.14.5", - "@babel/helper-plugin-utils": "^7.14.5", - "@babel/plugin-syntax-jsx": "^7.14.5", - "@babel/types": "^7.14.9" - } - }, "@babel/plugin-transform-react-jsx-development": { "version": "7.14.5", "dev": true, @@ -30849,20 +32232,6 @@ } } }, - "@sinonjs/commons": { - "version": "1.8.3", - "dev": true, - "requires": { - "type-detect": "4.0.8" - } - }, - "@sinonjs/fake-timers": { - "version": "7.1.2", - "dev": true, - "requires": { - "@sinonjs/commons": "^1.7.0" - } - }, "@sinonjs/samsam": { "version": "6.0.2", "dev": true, @@ -31065,10 +32434,6 @@ "@types/node": "*" } }, - "@types/history": { - "version": "4.7.9", - "dev": true - }, "@types/html-minifier-terser": { "version": "5.1.2", "dev": true @@ -31115,30 +32480,10 @@ "version": "2.3.2", "dev": true }, - "@types/prop-types": { - "version": "15.7.4", - "dev": true - }, "@types/q": { "version": "1.5.5", "dev": true }, - "@types/react": { - "version": "17.0.20", - "dev": true, - "requires": { - "@types/prop-types": "*", - "@types/scheduler": "*", - "csstype": "^3.0.2" - } - }, - "@types/react-copy-to-clipboard": { - "version": "5.0.1", - "dev": true, - "requires": { - "@types/react": "*" - } - }, "@types/react-dom": { "version": "17.0.9", "dev": true, @@ -31146,30 +32491,6 @@ "@types/react": "*" } }, - "@types/react-resize-detector": { - "version": "5.0.0", - "dev": true, - "requires": { - "@types/react": "*" - } - }, - "@types/react-router": { - "version": "5.1.16", - "dev": true, - "requires": { - "@types/history": "*", - "@types/react": "*" - } - }, - "@types/react-router-dom": { - "version": "5.1.8", - "dev": true, - "requires": { - "@types/history": "*", - "@types/react": "*", - "@types/react-router": "*" - } - }, "@types/react-test-renderer": { "version": "17.0.1", "dev": true, @@ -31177,9 +32498,6 @@ "@types/react": "*" } }, - "@types/resize-observer-browser": { - "version": "0.1.6" - }, "@types/resolve": { "version": "0.0.8", "dev": true, @@ -31187,24 +32505,6 @@ "@types/node": "*" } }, - "@types/sanitize-html": { - "version": "1.27.2", - "dev": true, - "requires": { - "htmlparser2": "^4.1.0" - } - }, - "@types/scheduler": { - "version": "0.16.2", - "dev": true - }, - "@types/sinon": { - "version": "10.0.2", - "dev": true, - "requires": { - "@sinonjs/fake-timers": "^7.1.0" - } - }, "@types/source-list-map": { "version": "0.1.2", "dev": true @@ -32508,18 +33808,6 @@ "parse5": "^6.0.1", "parse5-htmlparser2-tree-adapter": "^6.0.1", "tslib": "^2.2.0" - }, - "dependencies": { - "htmlparser2": { - "version": "6.1.0", - "dev": true, - "requires": { - "domelementtype": "^2.0.1", - "domhandler": "^4.0.0", - "domutils": "^2.5.2", - "entities": "^2.0.0" - } - } } }, "cheerio-select": { @@ -33217,10 +34505,6 @@ } } }, - "csstype": { - "version": "3.0.9", - "dev": true - }, "cyclist": { "version": "1.0.1", "dev": true @@ -33512,21 +34796,10 @@ "@babel/runtime": "^7.1.2" } }, - "dom-serializer": { - "version": "1.3.2", - "requires": { - "domelementtype": "^2.0.1", - "domhandler": "^4.2.0", - "entities": "^2.0.0" - } - }, "domain-browser": { "version": "1.2.0", "dev": true }, - "domelementtype": { - "version": "2.2.0" - }, "domexception": { "version": "2.0.1", "dev": true, @@ -33540,20 +34813,6 @@ } } }, - "domhandler": { - "version": "4.2.2", - "requires": { - "domelementtype": "^2.2.0" - } - }, - "domutils": { - "version": "2.8.0", - "requires": { - "dom-serializer": "^1.0.1", - "domelementtype": "^2.2.0", - "domhandler": "^4.2.0" - } - }, "dot-case": { "version": "3.0.4", "dev": true, @@ -33657,9 +34916,6 @@ "once": "^1.4.0" } }, - "entities": { - "version": "2.2.0" - }, "enzyme": { "version": "3.11.0", "dev": true, @@ -34894,25 +36150,6 @@ } } }, - "htmlparser2": { - "version": "4.1.0", - "dev": true, - "requires": { - "domelementtype": "^2.0.1", - "domhandler": "^3.0.0", - "domutils": "^2.0.0", - "entities": "^2.0.0" - }, - "dependencies": { - "domhandler": { - "version": "3.3.0", - "dev": true, - "requires": { - "domelementtype": "^2.0.1" - } - } - } - }, "http-deceiver": { "version": "1.2.7", "dev": true @@ -35058,9 +36295,6 @@ "version": "1.1.1", "dev": true }, - "i": { - "version": "0.3.6" - }, "iconv-lite": { "version": "0.4.24", "dev": true, @@ -37726,9 +38960,6 @@ "version": "3.0.0", "dev": true }, - "lodash.debounce": { - "version": "4.0.8" - }, "lodash.escape": { "version": "4.0.1", "dev": true @@ -37760,9 +38991,6 @@ "lodash._reinterpolate": "^3.0.0" } }, - "lodash.throttle": { - "version": "4.1.1" - }, "lodash.uniq": { "version": "4.5.0", "dev": true @@ -37771,12 +38999,6 @@ "version": "1.7.1", "dev": true }, - "loose-envify": { - "version": "1.4.0", - "requires": { - "js-tokens": "^3.0.0 || ^4.0.0" - } - }, "lower-case": { "version": "2.0.2", "dev": true, @@ -38255,9 +39477,6 @@ "version": "2.2.0", "dev": true }, - "object-assign": { - "version": "4.1.1" - }, "object-copy": { "version": "0.1.0", "dev": true, @@ -39689,13 +40908,6 @@ } } }, - "react": { - "version": "17.0.2", - "requires": { - "loose-envify": "^1.1.0", - "object-assign": "^4.1.1" - } - }, "react-app-polyfill": { "version": "2.0.0", "dev": true, @@ -39856,14 +41068,6 @@ } } }, - "react-dom": { - "version": "17.0.2", - "requires": { - "loose-envify": "^1.1.0", - "object-assign": "^4.1.1", - "scheduler": "^0.20.2" - } - }, "react-error-boundary": { "version": "3.1.3", "dev": true, @@ -39897,15 +41101,6 @@ "version": "0.8.3", "dev": true }, - "react-resize-detector": { - "version": "6.7.6", - "requires": { - "@types/resize-observer-browser": "^0.1.6", - "lodash.debounce": "^4.0.8", - "lodash.throttle": "^4.1.1", - "resize-observer-polyfill": "^1.5.1" - } - }, "react-router": { "version": "5.2.1", "requires": { @@ -40205,16 +41400,6 @@ "version": "2.1.1", "dev": true }, - "htmlparser2": { - "version": "6.1.0", - "dev": true, - "requires": { - "domelementtype": "^2.0.1", - "domhandler": "^4.0.0", - "domutils": "^2.5.2", - "entities": "^2.0.0" - } - }, "strip-ansi": { "version": "3.0.1", "dev": true, @@ -40236,9 +41421,6 @@ "version": "1.0.0", "dev": true }, - "resize-observer-polyfill": { - "version": "1.5.1" - }, "resolve-cwd": { "version": "3.0.0", "dev": true, @@ -40655,15 +41837,6 @@ "escape-string-regexp": { "version": "4.0.0" }, - "htmlparser2": { - "version": "6.1.0", - "requires": { - "domelementtype": "^2.0.1", - "domhandler": "^4.0.0", - "domutils": "^2.5.2", - "entities": "^2.0.0" - } - }, "postcss": { "version": "8.3.6", "requires": { @@ -40695,13 +41868,6 @@ "xmlchars": "^2.2.0" } }, - "scheduler": { - "version": "0.20.2", - "requires": { - "loose-envify": "^1.1.0", - "object-assign": "^4.1.1" - } - }, "schema-utils": { "version": "2.7.1", "dev": true, @@ -43466,6 +44632,17 @@ "integrity": "sha512-H2iMtd0I4Mt5eYiapRdIDjp+XzelXQ0tFE4JS7YFwFevXXMmOp9myNrUvCg0D6ws8iqkRPBfKHgbwig1SmlLfg==", "dev": true }, + "htmlparser2": { + "version": "6.1.0", + "resolved": "https://registry.npmjs.org/htmlparser2/-/htmlparser2-6.1.0.tgz", + "integrity": "sha512-gyyPk6rgonLFEDGoeRgQNaEUvdJ4ktTmmUh/h2t7s+M8oPpIPxgNACWa+6ESR57kXstwqPiCut0V8NRpcwgU7A==", + "requires": { + "domelementtype": "^2.0.1", + "domhandler": "^4.0.0", + "domutils": "^2.5.2", + "entities": "^2.0.0" + } + }, "http-proxy-agent": { "version": "4.0.1", "resolved": "https://registry.npmjs.org/http-proxy-agent/-/http-proxy-agent-4.0.1.tgz", @@ -43487,6 +44664,11 @@ "debug": "4" } }, + "i": { + "version": "0.3.7", + "resolved": "https://registry.npmjs.org/i/-/i-0.3.7.tgz", + "integrity": "sha512-FYz4wlXgkQwIPqhzC5TdNMLSE5+GS1IIDJZY/1ZiEPCT2S3COUVZeT5OW4BmW4r5LHLQuOosSwsvnroG9GR59Q==" + }, "ignore": { "version": "4.0.6", "resolved": "https://registry.npmjs.org/ignore/-/ignore-4.0.6.tgz", @@ -43591,9 +44773,9 @@ "dev": true }, "is-core-module": { - "version": "2.6.0", - "resolved": "https://registry.npmjs.org/is-core-module/-/is-core-module-2.6.0.tgz", - "integrity": "sha512-wShG8vs60jKfPWpF2KZRaAtvt3a20OAn7+IJ6hLPECpSABLcKtFKTTI4ZtH5QcBruBHlq+WsdHWyz0BCZW7svQ==", + "version": "2.8.0", + "resolved": "https://registry.npmjs.org/is-core-module/-/is-core-module-2.8.0.tgz", + "integrity": "sha512-vd15qHsaqrRL7dtH6QNuy0ndJmRDrS9HAM1CAiSifNUFv4x1a0CCVsj18hJ1mShxIG6T2i1sO78MkP56r0nYRw==", "dev": true, "requires": { "has": "^1.0.3" @@ -43619,9 +44801,9 @@ "dev": true }, "is-glob": { - "version": "4.0.1", - "resolved": "https://registry.npmjs.org/is-glob/-/is-glob-4.0.1.tgz", - "integrity": "sha512-5G0tKtBTFImOqDnLB2hG6Bp2qcKEFduo4tZu9MT/H6NQv/ghhy30o55ufafxJ/LdH79LLs2Kfrn85TLKyA7BUg==", + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/is-glob/-/is-glob-4.0.3.tgz", + "integrity": "sha512-xelSayHH36ZgE7ZWhli7pW34hNbNl8Ojv5KVmkJD4hBdD3th8Tfk9vYasLM+mXWOZhFkgZfxhLSnrwRr4elSSg==", "requires": { "is-extglob": "^2.1.1" } @@ -43661,6 +44843,12 @@ "has-tostringtag": "^1.0.0" } }, + "is-shared-array-buffer": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/is-shared-array-buffer/-/is-shared-array-buffer-1.0.1.tgz", + "integrity": "sha512-IU0NmyknYZN0rChcKhRO1X8LYz5Isj/Fsqh8NJOSf+N/hCOTwy29F32Ik7a+QszE63IdvmwdTPDd6cZ5pg4cwA==", + "dev": true + }, "is-stream": { "version": "2.0.1", "resolved": "https://registry.npmjs.org/is-stream/-/is-stream-2.0.1.tgz", @@ -43691,6 +44879,21 @@ "integrity": "sha1-5HnICFjfDBsR3dppQPlgEfzaSpo=", "dev": true }, + "is-unicode-supported": { + "version": "0.1.0", + "resolved": "https://registry.npmjs.org/is-unicode-supported/-/is-unicode-supported-0.1.0.tgz", + "integrity": "sha512-knxG2q4UC3u8stRGyAVJCOdxFmv5DZiRcdlIaAQXAbSfJya+OhopNotLQrstBhququ4ZpuKbDc/8S6mgXgPFPw==", + "dev": true + }, + "is-weakref": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/is-weakref/-/is-weakref-1.0.1.tgz", + "integrity": "sha512-b2jKc2pQZjaeFYWEf7ScFj+Be1I+PXmlu572Q8coTXZ+LD/QQZ7ShPMst8h16riVgyXTQwUsFEl74mDvc/3MHQ==", + "dev": true, + "requires": { + "call-bind": "^1.0.0" + } + }, "is-windows": { "version": "1.0.2", "resolved": "https://registry.npmjs.org/is-windows/-/is-windows-1.0.2.tgz", @@ -43873,26 +45076,6 @@ "type-check": "~0.4.0" } }, - "load-json-file": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/load-json-file/-/load-json-file-4.0.0.tgz", - "integrity": "sha1-L19Fq5HjMhYjT9U62rZo607AmTs=", - "dev": true, - "requires": { - "graceful-fs": "^4.1.2", - "parse-json": "^4.0.0", - "pify": "^3.0.0", - "strip-bom": "^3.0.0" - }, - "dependencies": { - "strip-bom": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/strip-bom/-/strip-bom-3.0.0.tgz", - "integrity": "sha1-IzTBjpx1n3vdVv3vfprj1YjmjtM=", - "dev": true - } - } - }, "loader-utils": { "version": "1.4.0", "resolved": "https://registry.npmjs.org/loader-utils/-/loader-utils-1.4.0.tgz", @@ -43937,6 +45120,11 @@ "integrity": "sha1-4j8/nE+Pvd6HJSnBBxhXoIblzO8=", "dev": true }, + "lodash.debounce": { + "version": "4.0.8", + "resolved": "https://registry.npmjs.org/lodash.debounce/-/lodash.debounce-4.0.8.tgz", + "integrity": "sha1-gteb/zCmfEAF/9XiUVMArZyk168=" + }, "lodash.flattendeep": { "version": "4.4.0", "resolved": "https://registry.npmjs.org/lodash.flattendeep/-/lodash.flattendeep-4.4.0.tgz", @@ -43955,6 +45143,11 @@ "integrity": "sha1-2HV7HagH3eJIFrDWqEvqGnYjCyM=", "dev": true }, + "lodash.throttle": { + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/lodash.throttle/-/lodash.throttle-4.1.1.tgz", + "integrity": "sha1-wj6RtxAkKscMN/HhzaknTMOb8vQ=" + }, "lodash.truncate": { "version": "4.4.2", "resolved": "https://registry.npmjs.org/lodash.truncate/-/lodash.truncate-4.4.2.tgz", @@ -43966,10 +45159,19 @@ "resolved": "https://registry.npmjs.org/log-symbols/-/log-symbols-4.0.0.tgz", "integrity": "sha512-FN8JBzLx6CzeMrB0tg6pqlGU1wCrXW+ZXGH481kfsBqer0hToTIiHdjH4Mq8xJUbvATujKCvaREGWpGUionraA==", "dev": true, + "peer": true, "requires": { "chalk": "^4.0.0" } }, + "loose-envify": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/loose-envify/-/loose-envify-1.4.0.tgz", + "integrity": "sha512-lyuxPGr/Wfhrlem2CL/UcnUc1zcqKAImBDzukY7Y5F/yQiNdko6+fRLevlw1HgMySw7f611UIY408EtxRSoK3Q==", + "requires": { + "js-tokens": "^3.0.0 || ^4.0.0" + } + }, "lru-cache": { "version": "6.0.0", "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-6.0.0.tgz", @@ -44056,6 +45258,7 @@ "resolved": "https://registry.npmjs.org/mocha/-/mocha-8.4.0.tgz", "integrity": "sha512-hJaO0mwDXmZS4ghXsvPVriOhsxQ7ofcpQdm8dE+jISUOKopitvnXFQmpRR7jd2K6VBG6E26gU3IAbXXGIbu4sQ==", "dev": true, + "peer": true, "requires": { "@ungap/promise-all-settled": "1.1.2", "ansi-colors": "4.1.1", @@ -44088,13 +45291,15 @@ "version": "2.0.1", "resolved": "https://registry.npmjs.org/argparse/-/argparse-2.0.1.tgz", "integrity": "sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q==", - "dev": true + "dev": true, + "peer": true }, "debug": { "version": "4.3.1", "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.1.tgz", "integrity": "sha512-doEwdvm4PCeK4K3RQN2ZC2BYUBaxwLARCqZmMjtF8a51J2Rb0xpVloFRnCODwqjpwnAoao4pelN8l3RJdv3gRQ==", "dev": true, + "peer": true, "requires": { "ms": "2.1.2" }, @@ -44103,7 +45308,8 @@ "version": "2.1.2", "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz", "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==", - "dev": true + "dev": true, + "peer": true } } }, @@ -44112,6 +45318,7 @@ "resolved": "https://registry.npmjs.org/find-up/-/find-up-5.0.0.tgz", "integrity": "sha512-78/PXT1wlLLDgTzDs7sjq9hzz0vXD+zn+7wypEe4fXQxCmdmqfGsEPQxmiCSQI3ajFV91bVSsvNtrJRiW6nGng==", "dev": true, + "peer": true, "requires": { "locate-path": "^6.0.0", "path-exists": "^4.0.0" @@ -44122,6 +45329,7 @@ "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-4.0.0.tgz", "integrity": "sha512-pqon0s+4ScYUvX30wxQi3PogGFAlUyH0awepWvwkj4jD4v+ova3RiYw8bmA6x2rDrEaj8i/oWKoRxpVNW+Re8Q==", "dev": true, + "peer": true, "requires": { "argparse": "^2.0.1" } @@ -44131,6 +45339,7 @@ "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-6.0.0.tgz", "integrity": "sha512-iPZK6eYjbxRu3uB4/WZ3EsEIMJFMqAoopl3R+zuq0UjcAm/MO6KCweDgPfP3elTztoKP3KtnVHxTn2NHBSDVUw==", "dev": true, + "peer": true, "requires": { "p-locate": "^5.0.0" } @@ -44139,13 +45348,15 @@ "version": "2.1.3", "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz", "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==", - "dev": true + "dev": true, + "peer": true }, "p-limit": { "version": "3.1.0", "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-3.1.0.tgz", "integrity": "sha512-TYOanM3wGwNGsZN2cVTYPArw454xnXj5qmWF1bEoAc4+cU/ol7GVh7odevjp1FNHduHc3KZMcFduxU5Xc6uJRQ==", "dev": true, + "peer": true, "requires": { "yocto-queue": "^0.1.0" } @@ -44155,6 +45366,7 @@ "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-5.0.0.tgz", "integrity": "sha512-LaNjtRWUBY++zB5nE/NwcaoMylSPk+S+ZHNB1TzdbMJMny6dynpAGt7X/tl/QYq3TIeE6nxHppbo2LGymrG5Pw==", "dev": true, + "peer": true, "requires": { "p-limit": "^3.0.2" } @@ -44163,13 +45375,15 @@ "version": "4.0.0", "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-4.0.0.tgz", "integrity": "sha512-ak9Qy5Q7jYb2Wwcey5Fpvg2KoAc/ZIhLSLOSBmRmygPsGwkVVt0fZa0qrtMz+m6tJTAHfZQ8FnmB4MG4LWy7/w==", - "dev": true + "dev": true, + "peer": true }, "supports-color": { "version": "8.1.1", "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-8.1.1.tgz", "integrity": "sha512-MpUEN2OodtUzxvKQl72cUF7RQ5EiHsGvSsVG0ia9c5RbWGL2CI4C7EpPS8UTBIplnlzZiNuV56w+FuNxy3ty2Q==", "dev": true, + "peer": true, "requires": { "has-flag": "^4.0.0" } @@ -44186,7 +45400,8 @@ "version": "3.1.20", "resolved": "https://registry.npmjs.org/nanoid/-/nanoid-3.1.20.tgz", "integrity": "sha512-a1cQNyczgKbLX9jwbS/+d7W8fX/RfgYR7lVWwWOGIPNgK2m0MWvrGF6/m4kk6U3QcFMnZf3RIhL0v2Jgh/0Uxw==", - "dev": true + "dev": true, + "peer": true }, "natural-compare": { "version": "1.4.0", @@ -44401,6 +45616,11 @@ } } }, + "object-assign": { + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/object-assign/-/object-assign-4.1.1.tgz", + "integrity": "sha1-IQmtx5ZYh8/AXLvUQsrIv7s2CGM=" + }, "object-inspect": { "version": "1.11.0", "resolved": "https://registry.npmjs.org/object-inspect/-/object-inspect-1.11.0.tgz", @@ -44425,14 +45645,14 @@ } }, "object.values": { - "version": "1.1.4", - "resolved": "https://registry.npmjs.org/object.values/-/object.values-1.1.4.tgz", - "integrity": "sha512-TnGo7j4XSnKQoK3MfvkzqKCi0nVe/D9I9IjwTNYdb/fxYHpjrluHVOgw0AF6jrRFGMPHdfuidR09tIDiIvnaSg==", + "version": "1.1.5", + "resolved": "https://registry.npmjs.org/object.values/-/object.values-1.1.5.tgz", + "integrity": "sha512-QUZRW0ilQ3PnPpbNtgdNV1PDbEqLIiSFB3l+EnGtBQ/8SUTLj1PZwtQHABZtLgwpJZTSZhuGLOGk57Drx2IvYg==", "dev": true, "requires": { "call-bind": "^1.0.2", "define-properties": "^1.1.3", - "es-abstract": "^1.18.2" + "es-abstract": "^1.19.1" } }, "once": { @@ -44512,16 +45732,6 @@ "callsites": "^3.0.0" } }, - "parse-json": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/parse-json/-/parse-json-4.0.0.tgz", - "integrity": "sha1-vjX1Qlvh9/bHRxhPmKeIy5lHfuA=", - "dev": true, - "requires": { - "error-ex": "^1.3.1", - "json-parse-better-errors": "^1.0.1" - } - }, "path-exists": { "version": "3.0.0", "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-3.0.0.tgz", @@ -44563,12 +45773,6 @@ "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-2.3.0.tgz", "integrity": "sha512-lY1Q/PiJGC2zOv/z391WOTD+Z02bCgsFfvxoXXf6h7kv9o+WmsmzYqrAwY63sNgOxE4xEdq0WyUnXfKeBrSvYw==" }, - "pify": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/pify/-/pify-3.0.0.tgz", - "integrity": "sha1-5aSs0sEB/fPZpNB/DbxNtJ3SgXY=", - "dev": true - }, "pkg-dir": { "version": "2.0.0", "resolved": "https://registry.npmjs.org/pkg-dir/-/pkg-dir-2.0.0.tgz", @@ -44578,15 +45782,6 @@ "find-up": "^2.1.0" } }, - "pkg-up": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/pkg-up/-/pkg-up-2.0.0.tgz", - "integrity": "sha1-yBmscoBZpGHKscOImivjxJoATX8=", - "dev": true, - "requires": { - "find-up": "^2.1.0" - } - }, "prelude-ls": { "version": "1.2.1", "resolved": "https://registry.npmjs.org/prelude-ls/-/prelude-ls-1.2.1.tgz", @@ -44594,9 +45789,9 @@ "dev": true }, "prettier": { - "version": "2.4.0", - "resolved": "https://registry.npmjs.org/prettier/-/prettier-2.4.0.tgz", - "integrity": "sha512-DsEPLY1dE5HF3BxCRBmD4uYZ+5DCbvatnolqTqcxEgKVZnL2kUfyu7b8pPQ5+hTBkdhU9SLUmK0/pHb07RE4WQ==", + "version": "2.4.1", + "resolved": "https://registry.npmjs.org/prettier/-/prettier-2.4.1.tgz", + "integrity": "sha512-9fbDAXSBcc6Bs1mZrDYb3XKzDLm4EXXL9sC1LqKP5rZkT6KRr/rf9amVUcODVXgguK/isJz0d0hP72WeaKWsvA==", "dev": true }, "prettier-linter-helpers": { @@ -44662,36 +45857,34 @@ "safe-buffer": "^5.1.0" } }, - "read-pkg": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/read-pkg/-/read-pkg-3.0.0.tgz", - "integrity": "sha1-nLxoaXj+5l0WwA4rGcI3/Pbjg4k=", - "dev": true, + "react": { + "version": "17.0.2", + "resolved": "https://registry.npmjs.org/react/-/react-17.0.2.tgz", + "integrity": "sha512-gnhPt75i/dq/z3/6q/0asP78D0u592D5L1pd7M8P+dck6Fu/jJeL6iVVK23fptSUZj8Vjf++7wXA8UNclGQcbA==", "requires": { - "load-json-file": "^4.0.0", - "normalize-package-data": "^2.3.2", - "path-type": "^3.0.0" - }, - "dependencies": { - "path-type": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/path-type/-/path-type-3.0.0.tgz", - "integrity": "sha512-T2ZUsdZFHgA3u4e5PfPbjd7HDDpxPnQb5jN0SrDsjNSuVXHJqtwTnWqG0B1jZrgmJ/7lj1EmVIByWt1gxGkWvg==", - "dev": true, - "requires": { - "pify": "^3.0.0" - } - } + "loose-envify": "^1.1.0", + "object-assign": "^4.1.1" } }, - "read-pkg-up": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/read-pkg-up/-/read-pkg-up-3.0.0.tgz", - "integrity": "sha1-PtSWaF26D4/hGNBpHcUfSh/5bwc=", - "dev": true, + "react-dom": { + "version": "17.0.2", + "resolved": "https://registry.npmjs.org/react-dom/-/react-dom-17.0.2.tgz", + "integrity": "sha512-s4h96KtLDUQlsENhMn1ar8t2bEa+q/YAtj8pPPdIjPDGBDIVNsrD9aXNWqspUe6AzKCIG0C1HZZLqLV7qpOBGA==", "requires": { - "find-up": "^2.0.0", - "read-pkg": "^3.0.0" + "loose-envify": "^1.1.0", + "object-assign": "^4.1.1", + "scheduler": "^0.20.2" + } + }, + "react-resize-detector": { + "version": "6.7.6", + "resolved": "https://registry.npmjs.org/react-resize-detector/-/react-resize-detector-6.7.6.tgz", + "integrity": "sha512-/6RZlul1yePSoYJxWxmmgjO320moeLC/khrwpEVIL+D2EjLKhqOwzFv+H8laMbImVj7Zu4FlMa0oA7au3/ChjQ==", + "requires": { + "@types/resize-observer-browser": "^0.1.6", + "lodash.debounce": "^4.0.8", + "lodash.throttle": "^4.1.1", + "resize-observer-polyfill": "^1.5.1" } }, "readable-stream": { @@ -44714,6 +45907,7 @@ "resolved": "https://registry.npmjs.org/readdirp/-/readdirp-3.5.0.tgz", "integrity": "sha512-cMhu7c/8rdhkHXWsY+osBhfSy0JikwpHK/5+imo+LpeasTF8ouErHrlYkwT0++njiyuDvc7OFY5T3ukvZ8qmFQ==", "dev": true, + "peer": true, "requires": { "picomatch": "^2.2.1" } @@ -44751,6 +45945,11 @@ "integrity": "sha512-NKN5kMDylKuldxYLSUfrbo5Tuzh4hd+2E8NPPX02mZtn1VuREQToYe/ZdlJy+J3uCpfaiGF05e7B8W0iXbQHmg==", "dev": true }, + "resize-observer-polyfill": { + "version": "1.5.1", + "resolved": "https://registry.npmjs.org/resize-observer-polyfill/-/resize-observer-polyfill-1.5.1.tgz", + "integrity": "sha512-LwZrotdHOo12nQuZlHEmtuXdqGoOD0OhaxopaNFxWzInpEgaLWoVuAMbTzixuosCx2nEG58ngzW3vxdWoxIgdg==" + }, "resolve": { "version": "1.20.0", "resolved": "https://registry.npmjs.org/resolve/-/resolve-1.20.0.tgz", @@ -44797,6 +45996,15 @@ "integrity": "sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g==", "dev": true }, + "scheduler": { + "version": "0.20.2", + "resolved": "https://registry.npmjs.org/scheduler/-/scheduler-0.20.2.tgz", + "integrity": "sha512-2eWfGgAqqWFGqtdMmcL5zCMK1U8KlXv8SQFGglL3CEtd0aDVDWgeF/YoCmvln55m5zSk3J/20hTaSBeSObsQDQ==", + "requires": { + "loose-envify": "^1.1.0", + "object-assign": "^4.1.1" + } + }, "semver": { "version": "7.3.5", "resolved": "https://registry.npmjs.org/semver/-/semver-7.3.5.tgz", @@ -45455,6 +46663,7 @@ "resolved": "https://registry.npmjs.org/wide-align/-/wide-align-1.1.3.tgz", "integrity": "sha512-QGkOQc8XL6Bt5PwnsExKBPuMKBxnGxWWW3fU55Xt4feHozMUhdUMaBCk290qpm/wG5u/RSKzwdAC4i51YigihA==", "dev": true, + "peer": true, "requires": { "string-width": "^1.0.2 || 2" }, @@ -45463,19 +46672,22 @@ "version": "3.0.0", "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-3.0.0.tgz", "integrity": "sha1-7QMXwyIGT3lGbAKWa922Bas32Zg=", - "dev": true + "dev": true, + "peer": true }, "is-fullwidth-code-point": { "version": "2.0.0", "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-2.0.0.tgz", "integrity": "sha1-o7MKXE8ZkYMWeqq5O+764937ZU8=", - "dev": true + "dev": true, + "peer": true }, "string-width": { "version": "2.1.1", "resolved": "https://registry.npmjs.org/string-width/-/string-width-2.1.1.tgz", "integrity": "sha512-nOqH59deCq9SRHlxq1Aw85Jnt4w6KvLKqWVik6oA9ZklXLNIOlqg4F2yrT1MVaTjAqvVwdfeZ7w7aCvJD7ugkw==", "dev": true, + "peer": true, "requires": { "is-fullwidth-code-point": "^2.0.0", "strip-ansi": "^4.0.0" @@ -45486,6 +46698,7 @@ "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-4.0.0.tgz", "integrity": "sha1-qEeQIusaw2iocTibY1JixQXuNo8=", "dev": true, + "peer": true, "requires": { "ansi-regex": "^3.0.0" } @@ -45502,7 +46715,8 @@ "version": "6.1.0", "resolved": "https://registry.npmjs.org/workerpool/-/workerpool-6.1.0.tgz", "integrity": "sha512-toV7q9rWNYha963Pl/qyeZ6wG+3nnsyvolaNUS8+R5Wtw6qJPTxIlOP1ZSvcGhEJw+l3HMMmtiNo9Gl61G4GVg==", - "dev": true + "dev": true, + "peer": true }, "wrap-ansi": { "version": "7.0.0", diff --git a/web/ui/react-app/.env b/web/ui/react-app/.env new file mode 100644 index 000000000..b4cad0c00 --- /dev/null +++ b/web/ui/react-app/.env @@ -0,0 +1,3 @@ +# This ensures that all links in the generated asset bundle will be relative, +# so that assets are loaded correctly even when a path prefix is used. +PUBLIC_URL=. diff --git a/web/ui/react-app/package.json b/web/ui/react-app/package.json index 28a83358a..8e9e8f02e 100644 --- a/web/ui/react-app/package.json +++ b/web/ui/react-app/package.json @@ -7,24 +7,24 @@ "@codemirror/closebrackets": "^0.19.0", "@codemirror/commands": "^0.19.4", "@codemirror/comment": "^0.19.0", - "@codemirror/highlight": "^0.19.5", + "@codemirror/highlight": "^0.19.6", "@codemirror/history": "^0.19.0", - "@codemirror/language": "^0.19.3", + "@codemirror/language": "^0.19.4", "@codemirror/lint": "^0.19.1", "@codemirror/matchbrackets": "^0.19.1", "@codemirror/search": "^0.19.2", - "@codemirror/state": "^0.19.2", + "@codemirror/state": "^0.19.5", "@codemirror/view": "^0.19.7", "@forevolve/bootstrap-dark": "^1.0.0", "@fortawesome/fontawesome-svg-core": "^1.2.14", "@fortawesome/free-solid-svg-icons": "^5.7.1", "@fortawesome/react-fontawesome": "^0.1.4", "@nexucis/fuzzy": "^0.3.0", - "bootstrap": "^4.6.0", + "bootstrap": "^5.1.3", "codemirror-promql": "0.18.0", "css.escape": "^1.5.1", "downshift": "^3.4.8", - "i": "^0.3.6", + "i": "^0.3.7", "jquery": "^3.5.1", "jquery.flot.tooltip": "^0.9.0", "moment": "^2.24.0", @@ -68,15 +68,15 @@ "@types/enzyme": "^3.10.9", "@types/flot": "0.0.32", "@types/jest": "^27.0.1", - "@types/jquery": "^3.5.1", - "@types/node": "^16.7.6", + "@types/jquery": "^3.5.8", + "@types/node": "^16.11.7", "@types/react": "^17.0.19", - "@types/react-copy-to-clipboard": "^5.0.1", + "@types/react-copy-to-clipboard": "^5.0.2", "@types/react-dom": "^17.0.9", - "@types/react-resize-detector": "^5.0.0", - "@types/react-router-dom": "^5.1.8", - "@types/sanitize-html": "^1.20.2", - "@types/sinon": "^10.0.2", + "@types/react-resize-detector": "^6.1.0", + "@types/react-router-dom": "^5.3.2", + "@types/sanitize-html": "^2.5.0", + "@types/sinon": "^10.0.6", "@wojtekmaj/enzyme-adapter-react-17": "^0.6.3", "enzyme": "^3.11.0", "enzyme-to-json": "^3.6.2", @@ -86,7 +86,7 @@ "jest-canvas-mock": "^2.3.1", "jest-fetch-mock": "^3.0.3", "mutationobserver-shim": "^0.3.7", - "prettier": "^2.3.2", + "prettier": "^2.4.1", "react-scripts": "4.0.3", "sinon": "^11.1.2", "typescript": "^4.4.2" diff --git a/web/ui/react-app/public/index.html b/web/ui/react-app/public/index.html index 2671dc6cd..eac493853 100755 --- a/web/ui/react-app/public/index.html +++ b/web/ui/react-app/public/index.html @@ -10,12 +10,15 @@