Merge branch 'main' into alexg/settable-user-agent
Some checks failed
CI / Go tests (push) Has been cancelled
CI / More Go tests (push) Has been cancelled
CI / Go tests with previous Go version (push) Has been cancelled
CI / UI tests (push) Has been cancelled
CI / Go tests on Windows (push) Has been cancelled
CI / Mixins tests (push) Has been cancelled
CI / Build Prometheus for common architectures (0) (push) Has been cancelled
CI / Build Prometheus for common architectures (1) (push) Has been cancelled
CI / Build Prometheus for common architectures (2) (push) Has been cancelled
CI / Build Prometheus for all architectures (0) (push) Has been cancelled
CI / Build Prometheus for all architectures (1) (push) Has been cancelled
CI / Build Prometheus for all architectures (10) (push) Has been cancelled
CI / Build Prometheus for all architectures (11) (push) Has been cancelled
CI / Build Prometheus for all architectures (2) (push) Has been cancelled
CI / Build Prometheus for all architectures (3) (push) Has been cancelled
CI / Build Prometheus for all architectures (4) (push) Has been cancelled
CI / Build Prometheus for all architectures (5) (push) Has been cancelled
CI / Build Prometheus for all architectures (6) (push) Has been cancelled
CI / Build Prometheus for all architectures (7) (push) Has been cancelled
CI / Build Prometheus for all architectures (8) (push) Has been cancelled
CI / Build Prometheus for all architectures (9) (push) Has been cancelled
CI / Check generated parser (push) Has been cancelled
CI / golangci-lint (push) Has been cancelled
CI / fuzzing (push) Has been cancelled
CI / codeql (push) Has been cancelled
CI / Report status of build Prometheus for all architectures (push) Has been cancelled
CI / Publish main branch artifacts (push) Has been cancelled
CI / Publish release artefacts (push) Has been cancelled
CI / Publish UI on npm Registry (push) Has been cancelled

Signed-off-by: Alex Greenbank <alex.greenbank@grafana.com>
This commit is contained in:
Alex Greenbank 2024-11-20 16:13:30 +00:00 committed by GitHub
commit 033eaea415
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
208 changed files with 7083 additions and 2996 deletions

View file

@ -12,7 +12,7 @@ jobs:
name: lint
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- uses: bufbuild/buf-setup-action@62ee92603c244ad0da98bab36a834a999a5329e6 # v1.43.0
with:
github_token: ${{ secrets.GITHUB_TOKEN }}

View file

@ -12,7 +12,7 @@ jobs:
runs-on: ubuntu-latest
if: github.repository_owner == 'prometheus'
steps:
- uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- uses: bufbuild/buf-setup-action@62ee92603c244ad0da98bab36a834a999a5329e6 # v1.43.0
with:
github_token: ${{ secrets.GITHUB_TOKEN }}

View file

@ -13,12 +13,12 @@ jobs:
# should also be updated.
image: quay.io/prometheus/golang-builder:1.23-base
steps:
- uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0
- uses: prometheus/promci@468927c440349ab56c4a1aafd453b312841503c2 # v0.4.4
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- uses: prometheus/promci@52c7012f5f0070d7281b8db4a119e21341d43c91 # v0.4.5
- uses: ./.github/promci/actions/setup_environment
with:
enable_npm: true
- run: make GOOPTS=--tags=stringlabels GO_ONLY=1 SKIP_GOLANGCI_LINT=1 test-flags=""
- run: make GOOPTS=--tags=stringlabels GO_ONLY=1 SKIP_GOLANGCI_LINT=1
- run: go test --tags=stringlabels ./tsdb/ -test.tsdb-isolation=false
- run: make -C documentation/examples/remote_storage
- run: make -C documentation/examples
@ -29,8 +29,8 @@ jobs:
container:
image: quay.io/prometheus/golang-builder:1.23-base
steps:
- uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0
- uses: prometheus/promci@468927c440349ab56c4a1aafd453b312841503c2 # v0.4.4
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- uses: prometheus/promci@52c7012f5f0070d7281b8db4a119e21341d43c91 # v0.4.5
- uses: ./.github/promci/actions/setup_environment
- run: go test --tags=dedupelabels ./...
- run: GOARCH=386 go test ./cmd/prometheus
@ -48,7 +48,7 @@ jobs:
# The go version in this image should be N-1 wrt test_go.
image: quay.io/prometheus/golang-builder:1.22-base
steps:
- uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- run: make build
# Don't run NPM build; don't run race-detector.
- run: make test GO_ONLY=1 test-flags=""
@ -62,8 +62,8 @@ jobs:
image: quay.io/prometheus/golang-builder:1.23-base
steps:
- uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0
- uses: prometheus/promci@468927c440349ab56c4a1aafd453b312841503c2 # v0.4.4
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- uses: prometheus/promci@52c7012f5f0070d7281b8db4a119e21341d43c91 # v0.4.5
- uses: ./.github/promci/actions/setup_environment
with:
enable_go: false
@ -79,7 +79,7 @@ jobs:
name: Go tests on Windows
runs-on: windows-latest
steps:
- uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 # v5.0.2
with:
go-version: 1.23.x
@ -96,7 +96,7 @@ jobs:
container:
image: quay.io/prometheus/golang-builder:1.23-base
steps:
- uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- run: go install ./cmd/promtool/.
- run: go install github.com/google/go-jsonnet/cmd/jsonnet@latest
- run: go install github.com/google/go-jsonnet/cmd/jsonnetfmt@latest
@ -121,8 +121,8 @@ jobs:
matrix:
thread: [ 0, 1, 2 ]
steps:
- uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0
- uses: prometheus/promci@468927c440349ab56c4a1aafd453b312841503c2 # v0.4.4
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- uses: prometheus/promci@52c7012f5f0070d7281b8db4a119e21341d43c91 # v0.4.5
- uses: ./.github/promci/actions/build
with:
promu_opts: "-p linux/amd64 -p windows/amd64 -p linux/arm64 -p darwin/amd64 -p darwin/arm64 -p linux/386"
@ -146,8 +146,8 @@ jobs:
# Whenever the Go version is updated here, .promu.yml
# should also be updated.
steps:
- uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0
- uses: prometheus/promci@468927c440349ab56c4a1aafd453b312841503c2 # v0.4.4
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- uses: prometheus/promci@52c7012f5f0070d7281b8db4a119e21341d43c91 # v0.4.5
- uses: ./.github/promci/actions/build
with:
parallelism: 12
@ -169,7 +169,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout repository
uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- name: Install Go
uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 # v5.0.2
with:
@ -182,7 +182,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout repository
uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- name: Install Go
uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 # v5.0.2
with:
@ -191,11 +191,11 @@ jobs:
run: sudo apt-get update && sudo apt-get -y install libsnmp-dev
if: github.repository == 'prometheus/snmp_exporter'
- name: Lint
uses: golangci/golangci-lint-action@aaa42aa0628b4ae2578232a66b541047968fac86 # v6.1.0
uses: golangci/golangci-lint-action@971e284b6050e8a5849b72094c50ab08da042db8 # v6.1.1
with:
args: --verbose
# Make sure to sync this with Makefile.common and scripts/golangci-lint.yml.
version: v1.60.2
version: v1.61.0
fuzzing:
uses: ./.github/workflows/fuzzing.yml
if: github.event_name == 'pull_request'
@ -208,8 +208,8 @@ jobs:
needs: [test_ui, test_go, test_go_more, test_go_oldest, test_windows, golangci, codeql, build_all]
if: github.event_name == 'push' && github.event.ref == 'refs/heads/main'
steps:
- uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0
- uses: prometheus/promci@468927c440349ab56c4a1aafd453b312841503c2 # v0.4.4
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- uses: prometheus/promci@52c7012f5f0070d7281b8db4a119e21341d43c91 # v0.4.5
- uses: ./.github/promci/actions/publish_main
with:
docker_hub_login: ${{ secrets.docker_hub_login }}
@ -225,8 +225,8 @@ jobs:
||
(github.event_name == 'push' && startsWith(github.ref, 'refs/tags/v3.'))
steps:
- uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0
- uses: prometheus/promci@468927c440349ab56c4a1aafd453b312841503c2 # v0.4.4
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- uses: prometheus/promci@52c7012f5f0070d7281b8db4a119e21341d43c91 # v0.4.5
- uses: ./.github/promci/actions/publish_release
with:
docker_hub_login: ${{ secrets.docker_hub_login }}
@ -240,10 +240,10 @@ jobs:
needs: [test_ui, codeql]
steps:
- name: Checkout
uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0
- uses: prometheus/promci@468927c440349ab56c4a1aafd453b312841503c2 # v0.4.4
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- uses: prometheus/promci@52c7012f5f0070d7281b8db4a119e21341d43c91 # v0.4.5
- name: Install nodejs
uses: actions/setup-node@0a44ba7841725637a19e28fa30b79a866c81b0a6 # v4.0.4
uses: actions/setup-node@39370e3970a6d050c480ffad4ff0ed4d3fdee5af # v4.1.0
with:
node-version-file: "web/ui/.nvmrc"
registry-url: "https://registry.npmjs.org"

View file

@ -24,7 +24,7 @@ jobs:
steps:
- name: Checkout repository
uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- name: Initialize CodeQL
uses: github/codeql-action/init@e2b3eafc8d227b0241d48be5f425d47c2d750a13 # v3.26.10

View file

@ -18,7 +18,7 @@ jobs:
if: github.repository_owner == 'prometheus' || github.repository_owner == 'prometheus-community' # Don't run this workflow on forks.
steps:
- name: git checkout
uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- name: Set docker hub repo name
run: echo "DOCKER_REPO_NAME=$(make docker-repo-name)" >> $GITHUB_ENV
- name: Push README to Dockerhub
@ -40,7 +40,7 @@ jobs:
if: github.repository_owner == 'prometheus' || github.repository_owner == 'prometheus-community' # Don't run this workflow on forks.
steps:
- name: git checkout
uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- name: Set quay.io org name
run: echo "DOCKER_REPO=$(echo quay.io/${GITHUB_REPOSITORY_OWNER} | tr -d '-')" >> $GITHUB_ENV
- name: Set quay.io repo name

View file

@ -21,7 +21,7 @@ jobs:
fuzz-seconds: 600
dry-run: false
- name: Upload Crash
uses: actions/upload-artifact@50769540e7f4bd5e21e526ee35c689e35e0d6874 # v4.4.0
uses: actions/upload-artifact@b4b15b8c7c6ac21ea08fcf65892d2ee8f75cf882 # v4.4.3
if: failure() && steps.build.outcome == 'success'
with:
name: artifacts

View file

@ -13,7 +13,7 @@ jobs:
container:
image: quay.io/prometheus/golang-builder
steps:
- uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- run: ./scripts/sync_repo_files.sh
env:
GITHUB_TOKEN: ${{ secrets.PROMBOT_GITHUB_TOKEN }}

View file

@ -21,7 +21,7 @@ jobs:
steps:
- name: "Checkout code"
uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # tag=v4.2.0
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # tag=v4.2.2
with:
persist-credentials: false
@ -37,7 +37,7 @@ jobs:
# Upload the results as artifacts (optional). Commenting out will disable uploads of run results in SARIF
# format to the repository Actions tab.
- name: "Upload artifact"
uses: actions/upload-artifact@50769540e7f4bd5e21e526ee35c689e35e0d6874 # tag=v4.4.0
uses: actions/upload-artifact@b4b15b8c7c6ac21ea08fcf65892d2ee8f75cf882 # tag=v4.4.3
with:
name: SARIF file
path: results.sarif

View file

@ -109,7 +109,7 @@ linters-settings:
extra-rules: true
perfsprint:
# Optimizes `fmt.Errorf`.
errorf: false
errorf: true
revive:
# By default, revive will enable only the linting rules that are named in the configuration file.
# So, it's needed to explicitly enable all required rules here.

View file

@ -1,7 +1,7 @@
---
extends: default
ignore: |
ui/react-app/node_modules
**/node_modules
rules:
braces:

View file

@ -3,45 +3,27 @@
## unreleased
* [CHANGE] Remote-Write: Ability to set User-Agent #15201
* [ENHANCEMENT] OTLP receiver: Convert also metric metadata. #15416
## 3.0.0 / 2024-11-14
This release includes new features such as a brand new UI and UTF-8 support enabled by default. As this marks the first new major version in seven years, several breaking changes are introduced. The breaking changes are mainly around the removal of deprecated feature flags and CLI arguments, and the full list can be found below. For users that want to upgrade we recommend to read through our [migration guide](https://prometheus.io/docs/prometheus/3.0/migration/).
* [CHANGE] Set the `GOMAXPROCS` variable automatically to match the Linux CPU quota. Use `--no-auto-gomaxprocs` to disable it. The `auto-gomaxprocs` feature flag was removed. #15376
* [CHANGE] Set the `GOMEMLIMIT` variable automatically to match the Linux container memory limit. Use `--no-auto-gomemlimit` to disable it. The `auto-gomemlimit` feature flag was removed. #15373
* [CHANGE] Scraping: Remove implicit fallback to the Prometheus text format in case of invalid/missing Content-Type and fail the scrape instead. Add ability to specify a `fallback_scrape_protocol` in the scrape config. #15136
* [BUGFIX] PromQL: Fix stddev+stdvar aggregations to always ignore native histograms. #14941
* [BUGFIX] PromQL: Fix stddev+stdvar aggregations to treat Infinity consistently. #14941
## 3.0.0-beta.1 / 2024-10-09
* [CHANGE] Remote-write: default enable_http2 to false. #15219
* [CHANGE] Scraping: normalize "le" and "quantile" label values upon ingestion. #15164
* [CHANGE] Scraping: config `scrape_classic_histograms` was renamed to `always_scrape_classic_histograms`. #15178
* [CHANGE] Config: remove expand-external-labels flag, expand external labels env vars by default. #14657
* [CHANGE] Disallow configuring AM with the v1 api. #13883
* [CHANGE] regexp `.` now matches all characters (performance improvement). #14505
* [CHANGE] `holt_winters` is now called `double_exponential_smoothing` and moves behind the [experimental-promql-functions feature flag](https://prometheus.io/docs/prometheus/latest/feature_flags/#experimental-promql-functions). #14930
* [CHANGE] API: The OTLP receiver endpoint can now be enabled using `--web.enable-otlp-receiver` instead of `--enable-feature=otlp-write-receiver`. #14894
* [CHANGE] Prometheus will not add or remove port numbers from the target address. `no-default-scrape-port` feature flag removed. #14160
* [CHANGE] Logging: the format of log lines has changed a little, along with the adoption of Go's Structured Logging package. #14906
* [CHANGE] Don't create extra `_created` timeseries if feature-flag `created-timestamp-zero-ingestion' is enabled. #14738
* [CHANGE] Don't create extra `_created` timeseries if feature-flag `created-timestamp-zero-ingestion` is enabled. #14738
* [CHANGE] Float literals and time durations being the same is now a stable fetaure. #15111
* [ENHANCEMENT] UI: Many fixes and improvements. #14898, #14899, #14907, #14908, #14912, #14913, #14914, #14931, #14940, #14945, #14946, #14972, #14981, #14982, #14994, #15096
* [ENHANCEMENT] UI: Web UI now displays notifications, e.g. when starting up and shutting down. #15082
* [ENHANCEMENT] PromQL: Introduce exponential interpolation for native histograms. #14677
* [ENHANCEMENT] TSDB: Add support for ingestion of out-of-order native histogram samples. #14850, #14546
* [ENHANCEMENT] Alerts: remove metrics for removed Alertmanagers. #13909
* [ENHANCEMENT] Scraping: support Created-Timestamp feature on native histograms. #14694
* [ENHANCEMENT] Kubernetes SD: Support sidecar containers in endpoint discovery. #14929
* [ENHANCEMENT] Consul SD: Support catalog filters. #11224
* [PERF] TSDB: Parallelize deletion of postings after head compaction. #14975
* [PERF] TSDB: Chunk encoding: shorten some write sequences. #14932
* [PERF] TSDB: Grow postings by doubling. #14721
* [PERF] Relabeling: Optimize adding a constant label pair. #12180
* [BUGFIX] Scraping: Unit was missing when using protobuf format. #15095
* [BUGFIX] PromQL: Only return "possible non-counter" annotation when `rate` returns points. #14910
* [BUGFIX] TSDB: Chunks could have one unnecessary zero byte at the end. #14854
* [BUGFIX] "superfluous response.WriteHeader call" messages in log. #14884
* [BUGFIX] PromQL: Unary negation of native histograms. #14821
* [BUGFIX] PromQL: Handle stale marker in native histogram series (e.g. if series goes away and comes back). #15025
* [BUGFIX] Autoreload: Reload invalid yaml files. #14947
## 3.0.0-beta.0 / 2024-09-05
Release 3.0.0-beta.0 includes new features such as a brand new UI and UTF-8 support enabled by default. As a new major version, several breaking changes are introduced. The breaking changes are mainly around the removal of deprecated feature flags and CLI arguments, and the full list can be found below. Most users should be able to try this release out of the box without any configuration changes.
As is traditional with a beta release, we do **not** recommend users install 3.0.0-beta on critical production systems, but we do want everyone to test it out and find bugs.
* [CHANGE] UI: The old web UI has been replaced by a completely new one that is less cluttered and adds a few new features (PromLens-style tree view, better metrics explorer, "Explain" tab). However, it is still missing some features of the old UI (notably, exemplar display and heatmaps). To switch back to the old UI, you can use the feature flag `--enable-feature=old-ui` for the time being. #14872
* [CHANGE] PromQL: Range selectors and the lookback delta are now left-open, i.e. a sample coinciding with the lower time limit is excluded rather than included. #13904
* [CHANGE] Kubernetes SD: Remove support for `discovery.k8s.io/v1beta1` API version of EndpointSlice. This version is no longer served as of Kubernetes v1.25. #14365
@ -52,12 +34,55 @@ As is traditional with a beta release, we do **not** recommend users install 3.0
* [CHANGE] Agent mode has been promoted to stable. The feature flag `agent` has been removed. To run Prometheus in Agent mode, use the new `--agent` cmdline arg instead. #14747
* [CHANGE] Remove deprecated `remote-write-receiver`,`promql-at-modifier`, and `promql-negative-offset` feature flags. #13456, #14526
* [CHANGE] Remove deprecated `storage.tsdb.allow-overlapping-blocks`, `alertmanager.timeout`, and `storage.tsdb.retention` flags. #14640, #14643
* [ENHANCEMENT] Move AM discovery page from "Monitoring status" to "Server status". #14875
* [FEATURE] OTLP receiver: Ability to skip UTF-8 normalization using `otlp.translation_strategy = NoUTF8EscapingWithSuffixes` configuration option. #15384
* [FEATURE] Support config reload automatically - feature flag `auto-reload-config`. #14769
* [ENHANCEMENT] Scraping, rules: handle targets reappearing, or rules moving group, when out-of-order is enabled. #14710
* [ENHANCEMENT] Tools: add debug printouts to promtool rules unit testing #15196
* [ENHANCEMENT] Scraping: support Created-Timestamp feature on native histograms. #14694
* [ENHANCEMENT] UI: Many fixes and improvements. #14898, #14899, #14907, #14908, #14912, #14913, #14914, #14931, #14940, #14945, #14946, #14972, #14981, #14982, #14994, #15096
* [ENHANCEMENT] UI: Web UI now displays notifications, e.g. when starting up and shutting down. #15082
* [ENHANCEMENT] PromQL: Introduce exponential interpolation for native histograms. #14677
* [ENHANCEMENT] TSDB: Add support for ingestion of out-of-order native histogram samples. #14850, #14546
* [ENHANCEMENT] Alerts: remove metrics for removed Alertmanagers. #13909
* [ENHANCEMENT] Kubernetes SD: Support sidecar containers in endpoint discovery. #14929
* [ENHANCEMENT] Consul SD: Support catalog filters. #11224
* [ENHANCEMENT] Move AM discovery page from "Monitoring status" to "Server status". #14875
* [PERF] TSDB: Parallelize deletion of postings after head compaction. #14975
* [PERF] TSDB: Chunk encoding: shorten some write sequences. #14932
* [PERF] TSDB: Grow postings by doubling. #14721
* [PERF] Relabeling: Optimize adding a constant label pair. #12180
* [BUGFIX] Scraping: Don't log errors on empty scrapes. #15357
* [BUGFIX] UI: fix selector / series formatting for empty metric names. #15341
* [BUGFIX] PromQL: Fix stddev+stdvar aggregations to always ignore native histograms. #14941
* [BUGFIX] PromQL: Fix stddev+stdvar aggregations to treat Infinity consistently. #14941
* [BUGFIX] OTLP receiver: Preserve colons when generating metric names in suffix adding mode (this mode is always enabled, unless one uses Prometheus as a library). #15251
* [BUGFIX] Scraping: Unit was missing when using protobuf format. #15095
* [BUGFIX] PromQL: Only return "possible non-counter" annotation when `rate` returns points. #14910
* [BUGFIX] TSDB: Chunks could have one unnecessary zero byte at the end. #14854
* [BUGFIX] "superfluous response.WriteHeader call" messages in log. #14884
* [BUGFIX] PromQL: Unary negation of native histograms. #14821
* [BUGFIX] PromQL: Handle stale marker in native histogram series (e.g. if series goes away and comes back). #15025
* [BUGFIX] Autoreload: Reload invalid yaml files. #14947
* [BUGFIX] Scrape: Do not override target parameter labels with config params. #11029
## 2.55.0-rc.0 / 2024-09-20
## 2.53.3 / 2024-11-04
* [BUGFIX] Scraping: allow multiple samples on same series, with explicit timestamps. #14685, #14740
## 2.53.2 / 2024-08-09
Fix a bug where Prometheus would crash with a segmentation fault if a remote-read
request accessed a block on disk at about the same time as TSDB created a new block.
[BUGFIX] Remote-Read: Resolve occasional segmentation fault on query. #14515,#14523
## 2.55.1 / 2024-11-04
* [BUGFIX] `round()` function did not remove `__name__` label. #15250
## 2.55.0 / 2024-10-22
* [FEATURE] PromQL: Add experimental `info` function. #14495
* [FEATURE] Support UTF-8 characters in label names - feature flag `utf8-names`. #14482, #14880, #14736, #14727
* [FEATURE] Scraping: Add the ability to set custom `http_headers` in config. #14817
* [FEATURE] Scraping: Support feature flag `created-timestamp-zero-ingestion` in OpenMetrics. #14356, #14815
@ -66,6 +91,7 @@ As is traditional with a beta release, we do **not** recommend users install 3.0
* [FEATURE] Remote-Write: Support Google Cloud Monitoring authorization. #14346
* [FEATURE] Promtool: `tsdb create-blocks` new option to add labels. #14403
* [FEATURE] Promtool: `promtool test` adds `--junit` flag to format results. #14506
* [FEATURE] TSDB: Add `delayed-compaction` feature flag, for people running many Prometheus to randomize timing. #12532
* [ENHANCEMENT] OTLP receiver: Warn on exponential histograms with zero count and non-zero sum. #14706
* [ENHANCEMENT] OTLP receiver: Interrupt translation on context cancellation/timeout. #14612
* [ENHANCEMENT] Remote Read client: Enable streaming remote read if the server supports it. #11379
@ -77,8 +103,9 @@ As is traditional with a beta release, we do **not** recommend users install 3.0
* [ENHANCEMENT] Tracing: Improve PromQL tracing, including showing the operation performed for aggregates, operators, and calls. #14816
* [ENHANCEMENT] API: Support multiple listening addresses. #14665
* [ENHANCEMENT] TSDB: Backward compatibility with upcoming index v3. #14934
* [PERF] TSDB: Query in-order and out-of-order series together. #14354, #14693, #14714, #14831, #14874, #14948
* [PERF] TSDB: Query in-order and out-of-order series together. #14354, #14693, #14714, #14831, #14874, #14948, #15120
* [PERF] TSDB: Streamline reading of overlapping out-of-order head chunks. #14729
* [BUGFIX] PromQL: make sort_by_label stable. #14985
* [BUGFIX] SD: Fix dropping targets (with feature flag `new-service-discovery-manager`). #13147
* [BUGFIX] SD: Stop storing stale targets (with feature flag `new-service-discovery-manager`). #13622
* [BUGFIX] Scraping: exemplars could be dropped in protobuf scraping. #14810

View file

@ -2,6 +2,7 @@ ARG ARCH="amd64"
ARG OS="linux"
FROM quay.io/prometheus/busybox-${OS}-${ARCH}:latest
LABEL maintainer="The Prometheus Authors <prometheus-developers@googlegroups.com>"
LABEL org.opencontainers.image.source="https://github.com/prometheus/prometheus"
ARG ARCH="amd64"
ARG OS="linux"

View file

@ -61,7 +61,7 @@ PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_
SKIP_GOLANGCI_LINT :=
GOLANGCI_LINT :=
GOLANGCI_LINT_OPTS ?=
GOLANGCI_LINT_VERSION ?= v1.60.2
GOLANGCI_LINT_VERSION ?= v1.61.0
# golangci-lint only supports linux, darwin and windows platforms on i386/amd64/arm64.
# windows isn't included here because of the path separator being different.
ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux darwin))

View file

@ -158,8 +158,19 @@ This is experimental.
### Prometheus code base
In order to comply with [go mod](https://go.dev/ref/mod#versions) rules,
Prometheus release number do not exactly match Go module releases. For the
Prometheus v2.y.z releases, we are publishing equivalent v0.y.z tags.
Prometheus release number do not exactly match Go module releases.
For the
Prometheus v3.y.z releases, we are publishing equivalent v0.3y.z tags. The y in v0.3y.z is always padded to two digits, with a leading zero if needed.
Therefore, a user that would want to use Prometheus v3.0.0 as a library could do:
```shell
go get github.com/prometheus/prometheus@v0.300.0
```
For the
Prometheus v2.y.z releases, we published the equivalent v0.y.z tags.
Therefore, a user that would want to use Prometheus v2.35.0 as a library could do:
@ -177,7 +188,7 @@ For more information on building, running, and developing on the React-based UI,
## More information
* Godoc documentation is available via [pkg.go.dev](https://pkg.go.dev/github.com/prometheus/prometheus). Due to peculiarities of Go Modules, v2.x.y will be displayed as v0.x.y.
* Godoc documentation is available via [pkg.go.dev](https://pkg.go.dev/github.com/prometheus/prometheus). Due to peculiarities of Go Modules, v3.y.z will be displayed as v0.3y.z (the y in v0.3y.z is always padded to two digits, with a leading zero if needed), while v2.y.z will be displayed as v0.y.z.
* See the [Community page](https://prometheus.io/community) for how to reach the Prometheus developers and users on various communication channels.
## Contributing

View file

@ -1 +1 @@
3.0.0-beta.1
3.0.0

View file

@ -27,6 +27,7 @@ import (
"os"
"os/signal"
"path/filepath"
goregexp "regexp" //nolint:depguard // The Prometheus client library requires us to pass a regexp from this package.
"runtime"
"runtime/debug"
"strconv"
@ -190,19 +191,19 @@ type flagConfig struct {
queryConcurrency int
queryMaxSamples int
RemoteFlushDeadline model.Duration
nameEscapingScheme string
maxNotificationsSubscribers int
enableAutoReload bool
autoReloadInterval model.Duration
featureList []string
memlimitRatio float64
maxprocsEnable bool
memlimitEnable bool
memlimitRatio float64
featureList []string
// These options are extracted from featureList
// for ease of use.
enablePerStepStats bool
enableAutoGOMAXPROCS bool
enableAutoGOMEMLIMIT bool
enableConcurrentRuleEval bool
prometheusURL string
@ -234,18 +235,12 @@ func (c *flagConfig) setFeatureListOptions(logger *slog.Logger) error {
case "promql-per-step-stats":
c.enablePerStepStats = true
logger.Info("Experimental per-step statistics reporting")
case "auto-gomaxprocs":
c.enableAutoGOMAXPROCS = true
logger.Info("Automatically set GOMAXPROCS to match Linux container CPU quota")
case "auto-reload-config":
c.enableAutoReload = true
if s := time.Duration(c.autoReloadInterval).Seconds(); s > 0 && s < 1 {
c.autoReloadInterval, _ = model.ParseDuration("1s")
}
logger.Info("Enabled automatic configuration file reloading. Checking for configuration changes every", "interval", c.autoReloadInterval)
case "auto-gomemlimit":
c.enableAutoGOMEMLIMIT = true
logger.Info("Automatically set GOMEMLIMIT to match Linux container or system memory limit")
case "concurrent-rule-eval":
c.enableConcurrentRuleEval = true
logger.Info("Experimental concurrent rule evaluation enabled.")
@ -301,6 +296,7 @@ func main() {
collectors.WithGoCollectorRuntimeMetrics(
collectors.MetricsGC,
collectors.MetricsScheduler,
collectors.GoRuntimeMetricsRule{Matcher: goregexp.MustCompile(`^/sync/mutex/wait/total:seconds$`)},
),
),
)
@ -332,6 +328,10 @@ func main() {
a.Flag("web.listen-address", "Address to listen on for UI, API, and telemetry. Can be repeated.").
Default("0.0.0.0:9090").StringsVar(&cfg.web.ListenAddresses)
a.Flag("auto-gomaxprocs", "Automatically set GOMAXPROCS to match Linux container CPU quota").
Default("true").BoolVar(&cfg.maxprocsEnable)
a.Flag("auto-gomemlimit", "Automatically set GOMEMLIMIT to match Linux container or system memory limit").
Default("true").BoolVar(&cfg.memlimitEnable)
a.Flag("auto-gomemlimit.ratio", "The ratio of reserved GOMEMLIMIT memory to the detected maximum container or system memory").
Default("0.9").FloatVar(&cfg.memlimitRatio)
@ -433,6 +433,9 @@ func main() {
serverOnlyFlag(a, "storage.tsdb.samples-per-chunk", "Target number of samples per chunk.").
Default("120").Hidden().IntVar(&cfg.tsdb.SamplesPerChunk)
serverOnlyFlag(a, "storage.tsdb.delayed-compaction.max-percent", "Sets the upper limit for the random compaction delay, specified as a percentage of the head chunk range. 100 means the compaction can be delayed by up to the entire head chunk range. Only effective when the delayed-compaction feature flag is enabled.").
Default("10").Hidden().IntVar(&cfg.tsdb.CompactionDelayMaxPercent)
agentOnlyFlag(a, "storage.agent.path", "Base path for metrics storage.").
Default("data-agent/").StringVar(&cfg.agentStoragePath)
@ -512,7 +515,7 @@ func main() {
a.Flag("scrape.discovery-reload-interval", "Interval used by scrape manager to throttle target groups updates.").
Hidden().Default("5s").SetValue(&cfg.scrape.DiscoveryReloadInterval)
a.Flag("enable-feature", "Comma separated feature names to enable. Valid options: auto-gomemlimit, exemplar-storage, expand-external-labels, memory-snapshot-on-shutdown, promql-per-step-stats, promql-experimental-functions, extra-scrape-metrics, auto-gomaxprocs, native-histograms, created-timestamp-zero-ingestion, concurrent-rule-eval, delayed-compaction, old-ui. See https://prometheus.io/docs/prometheus/latest/feature_flags/ for more details.").
a.Flag("enable-feature", "Comma separated feature names to enable. Valid options: exemplar-storage, expand-external-labels, memory-snapshot-on-shutdown, promql-per-step-stats, promql-experimental-functions, extra-scrape-metrics, auto-gomaxprocs, native-histograms, created-timestamp-zero-ingestion, concurrent-rule-eval, delayed-compaction, old-ui. See https://prometheus.io/docs/prometheus/latest/feature_flags/ for more details.").
Default("").StringsVar(&cfg.featureList)
a.Flag("agent", "Run Prometheus in 'Agent mode'.").BoolVar(&agentMode)
@ -548,15 +551,6 @@ func main() {
os.Exit(1)
}
if cfg.nameEscapingScheme != "" {
scheme, err := model.ToEscapingScheme(cfg.nameEscapingScheme)
if err != nil {
fmt.Fprintf(os.Stderr, `Invalid name escaping scheme: %q; Needs to be one of "values", "underscores", or "dots"`, cfg.nameEscapingScheme)
os.Exit(1)
}
model.NameEscapingScheme = scheme
}
if agentMode && len(serverOnlyFlags) > 0 {
fmt.Fprintf(os.Stderr, "The following flag(s) can not be used in agent mode: %q", serverOnlyFlags)
os.Exit(3)
@ -663,6 +657,12 @@ func main() {
cfg.tsdb.MaxBlockDuration = maxBlockDuration
}
// Delayed compaction checks
if cfg.tsdb.EnableDelayedCompaction && (cfg.tsdb.CompactionDelayMaxPercent > 100 || cfg.tsdb.CompactionDelayMaxPercent <= 0) {
logger.Warn("The --storage.tsdb.delayed-compaction.max-percent should have a value between 1 and 100. Using default", "default", tsdb.DefaultCompactionDelayMaxPercent)
cfg.tsdb.CompactionDelayMaxPercent = tsdb.DefaultCompactionDelayMaxPercent
}
}
noStepSubqueryInterval := &safePromQLNoStepSubqueryInterval{}
@ -757,7 +757,7 @@ func main() {
ruleManager *rules.Manager
)
if cfg.enableAutoGOMAXPROCS {
if cfg.maxprocsEnable {
l := func(format string, a ...interface{}) {
logger.Info(fmt.Sprintf(strings.TrimPrefix(format, "maxprocs: "), a...), "component", "automaxprocs")
}
@ -766,7 +766,7 @@ func main() {
}
}
if cfg.enableAutoGOMEMLIMIT {
if cfg.memlimitEnable {
if _, err := memlimit.SetGoMemLimitWithOpts(
memlimit.WithRatio(cfg.memlimitRatio),
memlimit.WithProvider(
@ -1144,9 +1144,8 @@ func main() {
if err := reloadConfig(cfg.configFile, cfg.tsdb.EnableExemplarStorage, logger, noStepSubqueryInterval, callback, reloaders...); err != nil {
logger.Error("Error reloading config", "err", err)
} else if cfg.enableAutoReload {
if currentChecksum, err := config.GenerateChecksum(cfg.configFile); err == nil {
checksum = currentChecksum
} else {
checksum, err = config.GenerateChecksum(cfg.configFile)
if err != nil {
logger.Error("Failed to generate checksum during configuration reload", "err", err)
}
}
@ -1157,9 +1156,8 @@ func main() {
} else {
rc <- nil
if cfg.enableAutoReload {
if currentChecksum, err := config.GenerateChecksum(cfg.configFile); err == nil {
checksum = currentChecksum
} else {
checksum, err = config.GenerateChecksum(cfg.configFile)
if err != nil {
logger.Error("Failed to generate checksum during configuration reload", "err", err)
}
}
@ -1170,6 +1168,7 @@ func main() {
}
currentChecksum, err := config.GenerateChecksum(cfg.configFile)
if err != nil {
checksum = currentChecksum
logger.Error("Failed to generate checksum during configuration reload", "err", err)
} else if currentChecksum == checksum {
continue
@ -1639,6 +1638,9 @@ func (s *readyStorage) Appender(ctx context.Context) storage.Appender {
type notReadyAppender struct{}
// SetOptions does nothing in this appender implementation.
func (n notReadyAppender) SetOptions(opts *storage.AppendOptions) {}
func (n notReadyAppender) Append(ref storage.SeriesRef, l labels.Labels, t int64, v float64) (storage.SeriesRef, error) {
return 0, tsdb.ErrNotReady
}
@ -1793,6 +1795,7 @@ type tsdbOptions struct {
EnableMemorySnapshotOnShutdown bool
EnableNativeHistograms bool
EnableDelayedCompaction bool
CompactionDelayMaxPercent int
EnableOverlappingCompaction bool
EnableOOONativeHistograms bool
}
@ -1817,6 +1820,7 @@ func (opts tsdbOptions) ToTSDBOptions() tsdb.Options {
EnableOOONativeHistograms: opts.EnableOOONativeHistograms,
OutOfOrderTimeWindow: opts.OutOfOrderTimeWindow,
EnableDelayedCompaction: opts.EnableDelayedCompaction,
CompactionDelayMaxPercent: opts.CompactionDelayMaxPercent,
EnableOverlappingCompaction: opts.EnableOverlappingCompaction,
}
}

View file

@ -0,0 +1,229 @@
// Copyright 2024 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package main
import (
"bufio"
"encoding/json"
"io"
"net/http"
"os"
"os/exec"
"path/filepath"
"strconv"
"strings"
"sync"
"testing"
"time"
"github.com/stretchr/testify/require"
"github.com/prometheus/prometheus/util/testutil"
)
const configReloadMetric = "prometheus_config_last_reload_successful"
func TestAutoReloadConfig_ValidToValid(t *testing.T) {
steps := []struct {
configText string
expectedInterval string
expectedMetric float64
}{
{
configText: `
global:
scrape_interval: 30s
`,
expectedInterval: "30s",
expectedMetric: 1,
},
{
configText: `
global:
scrape_interval: 15s
`,
expectedInterval: "15s",
expectedMetric: 1,
},
{
configText: `
global:
scrape_interval: 30s
`,
expectedInterval: "30s",
expectedMetric: 1,
},
}
runTestSteps(t, steps)
}
func TestAutoReloadConfig_ValidToInvalidToValid(t *testing.T) {
steps := []struct {
configText string
expectedInterval string
expectedMetric float64
}{
{
configText: `
global:
scrape_interval: 30s
`,
expectedInterval: "30s",
expectedMetric: 1,
},
{
configText: `
global:
scrape_interval: 15s
invalid_syntax
`,
expectedInterval: "30s",
expectedMetric: 0,
},
{
configText: `
global:
scrape_interval: 30s
`,
expectedInterval: "30s",
expectedMetric: 1,
},
}
runTestSteps(t, steps)
}
func runTestSteps(t *testing.T, steps []struct {
configText string
expectedInterval string
expectedMetric float64
},
) {
configDir := t.TempDir()
configFilePath := filepath.Join(configDir, "prometheus.yml")
t.Logf("Config file path: %s", configFilePath)
require.NoError(t, os.WriteFile(configFilePath, []byte(steps[0].configText), 0o644), "Failed to write initial config file")
port := testutil.RandomUnprivilegedPort(t)
runPrometheusWithLogging(t, configFilePath, port)
baseURL := "http://localhost:" + strconv.Itoa(port)
require.Eventually(t, func() bool {
resp, err := http.Get(baseURL + "/-/ready")
if err != nil {
return false
}
defer resp.Body.Close()
return resp.StatusCode == http.StatusOK
}, 5*time.Second, 100*time.Millisecond, "Prometheus didn't become ready in time")
for i, step := range steps {
t.Logf("Step %d", i)
require.NoError(t, os.WriteFile(configFilePath, []byte(step.configText), 0o644), "Failed to write config file for step")
require.Eventually(t, func() bool {
return verifyScrapeInterval(t, baseURL, step.expectedInterval) &&
verifyConfigReloadMetric(t, baseURL, step.expectedMetric)
}, 10*time.Second, 500*time.Millisecond, "Prometheus config reload didn't happen in time")
}
}
func verifyScrapeInterval(t *testing.T, baseURL, expectedInterval string) bool {
resp, err := http.Get(baseURL + "/api/v1/status/config")
require.NoError(t, err)
defer resp.Body.Close()
body, err := io.ReadAll(resp.Body)
require.NoError(t, err)
config := struct {
Data struct {
YAML string `json:"yaml"`
} `json:"data"`
}{}
require.NoError(t, json.Unmarshal(body, &config))
return strings.Contains(config.Data.YAML, "scrape_interval: "+expectedInterval)
}
func verifyConfigReloadMetric(t *testing.T, baseURL string, expectedValue float64) bool {
resp, err := http.Get(baseURL + "/metrics")
require.NoError(t, err)
defer resp.Body.Close()
body, err := io.ReadAll(resp.Body)
require.NoError(t, err)
lines := string(body)
var actualValue float64
found := false
for _, line := range strings.Split(lines, "\n") {
if strings.HasPrefix(line, configReloadMetric) {
parts := strings.Fields(line)
if len(parts) >= 2 {
actualValue, err = strconv.ParseFloat(parts[1], 64)
require.NoError(t, err)
found = true
break
}
}
}
return found && actualValue == expectedValue
}
func captureLogsToTLog(t *testing.T, r io.Reader) {
scanner := bufio.NewScanner(r)
for scanner.Scan() {
t.Log(scanner.Text())
}
if err := scanner.Err(); err != nil {
t.Logf("Error reading logs: %v", err)
}
}
func runPrometheusWithLogging(t *testing.T, configFilePath string, port int) {
stdoutPipe, stdoutWriter := io.Pipe()
stderrPipe, stderrWriter := io.Pipe()
var wg sync.WaitGroup
wg.Add(2)
prom := exec.Command(promPath, "-test.main", "--enable-feature=auto-reload-config", "--config.file="+configFilePath, "--config.auto-reload-interval=1s", "--web.listen-address=0.0.0.0:"+strconv.Itoa(port))
prom.Stdout = stdoutWriter
prom.Stderr = stderrWriter
go func() {
defer wg.Done()
captureLogsToTLog(t, stdoutPipe)
}()
go func() {
defer wg.Done()
captureLogsToTLog(t, stderrPipe)
}()
t.Cleanup(func() {
prom.Process.Kill()
prom.Wait()
stdoutWriter.Close()
stderrWriter.Close()
wg.Wait()
})
require.NoError(t, prom.Start())
}

View file

@ -34,8 +34,8 @@ import (
)
var (
errNotNativeHistogram = fmt.Errorf("not a native histogram")
errNotEnoughData = fmt.Errorf("not enough data")
errNotNativeHistogram = errors.New("not a native histogram")
errNotEnoughData = errors.New("not enough data")
outputHeader = `Bucket stats for each histogram series over time
------------------------------------------------
@ -169,7 +169,7 @@ func querySamples(ctx context.Context, api v1.API, query string, end time.Time)
matrix, ok := values.(model.Matrix)
if !ok {
return nil, fmt.Errorf("query of buckets resulted in non-Matrix")
return nil, errors.New("query of buckets resulted in non-Matrix")
}
return matrix, nil
@ -259,7 +259,7 @@ func getBucketCountsAtTime(matrix model.Matrix, numBuckets, timeIdx int) ([]int,
prev := matrix[i].Values[timeIdx]
// Assume the results are nicely aligned.
if curr.Timestamp != prev.Timestamp {
return counts, fmt.Errorf("matrix result is not time aligned")
return counts, errors.New("matrix result is not time aligned")
}
counts[i+1] = int(curr.Value - prev.Value)
}

View file

@ -109,6 +109,7 @@ func init() {
}
func TestGetBucketCountsAtTime(t *testing.T) {
t.Parallel()
cases := []struct {
matrix model.Matrix
length int
@ -137,6 +138,7 @@ func TestGetBucketCountsAtTime(t *testing.T) {
for _, c := range cases {
t.Run(fmt.Sprintf("exampleMatrix@%d", c.timeIdx), func(t *testing.T) {
t.Parallel()
res, err := getBucketCountsAtTime(c.matrix, c.length, c.timeIdx)
require.NoError(t, err)
require.Equal(t, c.expected, res)
@ -145,6 +147,7 @@ func TestGetBucketCountsAtTime(t *testing.T) {
}
func TestCalcClassicBucketStatistics(t *testing.T) {
t.Parallel()
cases := []struct {
matrix model.Matrix
expected *statistics
@ -162,6 +165,7 @@ func TestCalcClassicBucketStatistics(t *testing.T) {
for i, c := range cases {
t.Run(fmt.Sprintf("case %d", i), func(t *testing.T) {
t.Parallel()
res, err := calcClassicBucketStatistics(c.matrix)
require.NoError(t, err)
require.Equal(t, c.expected, res)

View file

@ -49,7 +49,7 @@ func getMinAndMaxTimestamps(p textparse.Parser) (int64, int64, error) {
_, ts, _ := p.Series()
if ts == nil {
return 0, 0, fmt.Errorf("expected timestamp for series got none")
return 0, 0, errors.New("expected timestamp for series got none")
}
if *ts > maxt {

View file

@ -86,6 +86,7 @@ func testBlocks(t *testing.T, db *tsdb.DB, expectedMinTime, expectedMaxTime, exp
}
func TestBackfill(t *testing.T) {
t.Parallel()
tests := []struct {
ToParse string
IsOk bool
@ -729,6 +730,7 @@ after_eof 1 2
}
for _, test := range tests {
t.Run(test.Description, func(t *testing.T) {
t.Parallel()
t.Logf("Test:%s", test.Description)
outputDir := t.TempDir()

View file

@ -444,7 +444,7 @@ func checkExperimental(f bool) {
}
}
var errLint = fmt.Errorf("lint error")
var errLint = errors.New("lint error")
type lintConfig struct {
all bool

View file

@ -60,6 +60,7 @@ func TestMain(m *testing.M) {
}
func TestQueryRange(t *testing.T) {
t.Parallel()
s, getRequest := mockServer(200, `{"status": "success", "data": {"resultType": "matrix", "result": []}}`)
defer s.Close()
@ -83,6 +84,7 @@ func TestQueryRange(t *testing.T) {
}
func TestQueryInstant(t *testing.T) {
t.Parallel()
s, getRequest := mockServer(200, `{"status": "success", "data": {"resultType": "vector", "result": []}}`)
defer s.Close()
@ -114,6 +116,7 @@ func mockServer(code int, body string) (*httptest.Server, func() *http.Request)
}
func TestCheckSDFile(t *testing.T) {
t.Parallel()
cases := []struct {
name string
file string
@ -144,6 +147,7 @@ func TestCheckSDFile(t *testing.T) {
}
for _, test := range cases {
t.Run(test.name, func(t *testing.T) {
t.Parallel()
_, err := checkSDFile(test.file)
if test.err != "" {
require.EqualErrorf(t, err, test.err, "Expected error %q, got %q", test.err, err.Error())
@ -155,6 +159,7 @@ func TestCheckSDFile(t *testing.T) {
}
func TestCheckDuplicates(t *testing.T) {
t.Parallel()
cases := []struct {
name string
ruleFile string
@ -179,6 +184,7 @@ func TestCheckDuplicates(t *testing.T) {
for _, test := range cases {
c := test
t.Run(c.name, func(t *testing.T) {
t.Parallel()
rgs, err := rulefmt.ParseFile(c.ruleFile)
require.Empty(t, err)
dups := checkDuplicates(rgs.Groups)
@ -198,6 +204,7 @@ func BenchmarkCheckDuplicates(b *testing.B) {
}
func TestCheckTargetConfig(t *testing.T) {
t.Parallel()
cases := []struct {
name string
file string
@ -226,6 +233,7 @@ func TestCheckTargetConfig(t *testing.T) {
}
for _, test := range cases {
t.Run(test.name, func(t *testing.T) {
t.Parallel()
_, err := checkConfig(false, "testdata/"+test.file, false)
if test.err != "" {
require.EqualErrorf(t, err, test.err, "Expected error %q, got %q", test.err, err.Error())
@ -237,6 +245,7 @@ func TestCheckTargetConfig(t *testing.T) {
}
func TestCheckConfigSyntax(t *testing.T) {
t.Parallel()
cases := []struct {
name string
file string
@ -309,6 +318,7 @@ func TestCheckConfigSyntax(t *testing.T) {
}
for _, test := range cases {
t.Run(test.name, func(t *testing.T) {
t.Parallel()
_, err := checkConfig(false, "testdata/"+test.file, test.syntaxOnly)
expectedErrMsg := test.err
if strings.Contains(runtime.GOOS, "windows") {
@ -324,6 +334,7 @@ func TestCheckConfigSyntax(t *testing.T) {
}
func TestAuthorizationConfig(t *testing.T) {
t.Parallel()
cases := []struct {
name string
file string
@ -343,6 +354,7 @@ func TestAuthorizationConfig(t *testing.T) {
for _, test := range cases {
t.Run(test.name, func(t *testing.T) {
t.Parallel()
_, err := checkConfig(false, "testdata/"+test.file, false)
if test.err != "" {
require.ErrorContains(t, err, test.err, "Expected error to contain %q, got %q", test.err, err.Error())
@ -357,6 +369,7 @@ func TestCheckMetricsExtended(t *testing.T) {
if runtime.GOOS == "windows" {
t.Skip("Skipping on windows")
}
t.Parallel()
f, err := os.Open("testdata/metrics-test.prom")
require.NoError(t, err)
@ -393,6 +406,7 @@ func TestExitCodes(t *testing.T) {
if testing.Short() {
t.Skip("skipping test in short mode.")
}
t.Parallel()
for _, c := range []struct {
file string
@ -417,8 +431,10 @@ func TestExitCodes(t *testing.T) {
},
} {
t.Run(c.file, func(t *testing.T) {
t.Parallel()
for _, lintFatal := range []bool{true, false} {
t.Run(strconv.FormatBool(lintFatal), func(t *testing.T) {
t.Parallel()
args := []string{"-test.main", "check", "config", "testdata/" + c.file}
if lintFatal {
args = append(args, "--lint-fatal")
@ -449,6 +465,7 @@ func TestDocumentation(t *testing.T) {
if runtime.GOOS == "windows" {
t.SkipNow()
}
t.Parallel()
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
@ -542,16 +559,19 @@ func TestCheckRules(t *testing.T) {
func TestCheckRulesWithRuleFiles(t *testing.T) {
t.Run("rules-good", func(t *testing.T) {
t.Parallel()
exitCode := CheckRules(newLintConfig(lintOptionDuplicateRules, false), "./testdata/rules.yml")
require.Equal(t, successExitCode, exitCode, "")
})
t.Run("rules-bad", func(t *testing.T) {
t.Parallel()
exitCode := CheckRules(newLintConfig(lintOptionDuplicateRules, false), "./testdata/rules-bad.yml")
require.Equal(t, failureExitCode, exitCode, "")
})
t.Run("rules-lint-fatal", func(t *testing.T) {
t.Parallel()
exitCode := CheckRules(newLintConfig(lintOptionDuplicateRules, true), "./testdata/prometheus-rules.lint.yml")
require.Equal(t, lintErrExitCode, exitCode, "")
})
@ -561,6 +581,7 @@ func TestTSDBDumpCommand(t *testing.T) {
if testing.Short() {
t.Skip("skipping test in short mode.")
}
t.Parallel()
storage := promqltest.LoadedStorage(t, `
load 1m
@ -593,6 +614,7 @@ func TestTSDBDumpCommand(t *testing.T) {
},
} {
t.Run(c.name, func(t *testing.T) {
t.Parallel()
args := []string{"-test.main", "tsdb", c.subCmd, storage.Dir()}
cmd := exec.Command(promtoolPath, args...)
require.NoError(t, cmd.Run())

View file

@ -43,6 +43,7 @@ const defaultBlockDuration = time.Duration(tsdb.DefaultBlockDuration) * time.Mil
// TestBackfillRuleIntegration is an integration test that runs all the rule importer code to confirm the parts work together.
func TestBackfillRuleIntegration(t *testing.T) {
t.Parallel()
const (
testMaxSampleCount = 50
testValue = 123
@ -72,6 +73,7 @@ func TestBackfillRuleIntegration(t *testing.T) {
}
for _, tt := range testCases {
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
tmpDir := t.TempDir()
ctx := context.Background()
@ -210,6 +212,7 @@ func createMultiRuleTestFiles(path string) error {
// TestBackfillLabels confirms that the labels in the rule file override the labels from the metrics
// received from Prometheus Query API, including the __name__ label.
func TestBackfillLabels(t *testing.T) {
t.Parallel()
tmpDir := t.TempDir()
ctx := context.Background()
@ -251,6 +254,7 @@ func TestBackfillLabels(t *testing.T) {
require.NoError(t, err)
t.Run("correct-labels", func(t *testing.T) {
t.Parallel()
selectedSeries := q.Select(ctx, false, nil, labels.MustNewMatcher(labels.MatchRegexp, "", ".*"))
for selectedSeries.Next() {
series := selectedSeries.At()

View file

@ -29,6 +29,7 @@ import (
)
func TestSDCheckResult(t *testing.T) {
t.Parallel()
targetGroups := []*targetgroup.Group{{
Targets: []model.LabelSet{
map[model.LabelName]model.LabelValue{"__address__": "localhost:8080", "foo": "bar"},

View file

@ -405,7 +405,7 @@ func openBlock(path, blockID string) (*tsdb.DBReadOnly, tsdb.BlockReader, error)
}
}
b, err := db.Block(blockID)
b, err := db.Block(blockID, tsdb.DefaultPostingsDecoderFactory)
if err != nil {
return nil, nil, err
}
@ -662,7 +662,7 @@ func analyzeCompaction(ctx context.Context, block tsdb.BlockReader, indexr tsdb.
histogramChunkSize = append(histogramChunkSize, len(chk.Bytes()))
fhchk, ok := chk.(*chunkenc.FloatHistogramChunk)
if !ok {
return fmt.Errorf("chunk is not FloatHistogramChunk")
return errors.New("chunk is not FloatHistogramChunk")
}
it := fhchk.Iterator(nil)
bucketCount := 0
@ -677,7 +677,7 @@ func analyzeCompaction(ctx context.Context, block tsdb.BlockReader, indexr tsdb.
histogramChunkSize = append(histogramChunkSize, len(chk.Bytes()))
hchk, ok := chk.(*chunkenc.HistogramChunk)
if !ok {
return fmt.Errorf("chunk is not HistogramChunk")
return errors.New("chunk is not HistogramChunk")
}
it := hchk.Iterator(nil)
bucketCount := 0
@ -733,7 +733,7 @@ func dumpSamples(ctx context.Context, dbDir, sandboxDirRoot string, mint, maxt i
for _, mset := range matcherSets {
sets = append(sets, q.Select(ctx, true, nil, mset...))
}
ss = storage.NewMergeSeriesSet(sets, storage.ChainedSeriesMerge)
ss = storage.NewMergeSeriesSet(sets, 0, storage.ChainedSeriesMerge)
} else {
ss = q.Select(ctx, false, nil, matcherSets[0]...)
}

View file

@ -32,6 +32,7 @@ import (
)
func TestGenerateBucket(t *testing.T) {
t.Parallel()
tcs := []struct {
min, max int
start, end, step int

View file

@ -26,6 +26,7 @@ import (
)
func TestRulesUnitTest(t *testing.T) {
t.Parallel()
type args struct {
files []string
}
@ -141,12 +142,14 @@ func TestRulesUnitTest(t *testing.T) {
reuseCount[tt.want] += len(tt.args.files)
}
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
if got := RulesUnitTest(tt.queryOpts, nil, false, false, tt.args.files...); got != tt.want {
t.Errorf("RulesUnitTest() = %v, want %v", got, tt.want)
}
})
}
t.Run("Junit xml output ", func(t *testing.T) {
t.Parallel()
var buf bytes.Buffer
if got := RulesUnitTestResult(&buf, promqltest.LazyLoaderOpts{}, nil, false, false, reuseFiles...); got != 1 {
t.Errorf("RulesUnitTestResults() = %v, want 1", got)
@ -185,6 +188,7 @@ func TestRulesUnitTest(t *testing.T) {
}
func TestRulesUnitTestRun(t *testing.T) {
t.Parallel()
type args struct {
run []string
files []string
@ -230,6 +234,7 @@ func TestRulesUnitTestRun(t *testing.T) {
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
got := RulesUnitTest(tt.queryOpts, tt.args.run, false, false, tt.args.files...)
require.Equal(t, tt.want, got)
})

View file

@ -30,7 +30,7 @@ import (
"github.com/grafana/regexp"
"github.com/prometheus/common/config"
"github.com/prometheus/common/model"
"github.com/prometheus/common/sigv4"
"github.com/prometheus/sigv4"
"gopkg.in/yaml.v2"
"github.com/prometheus/prometheus/discovery"
@ -106,6 +106,18 @@ func Load(s string, logger *slog.Logger) (*Config, error) {
if !b.Labels().IsEmpty() {
cfg.GlobalConfig.ExternalLabels = b.Labels()
}
switch cfg.OTLPConfig.TranslationStrategy {
case UnderscoreEscapingWithSuffixes:
case "":
case NoUTF8EscapingWithSuffixes:
if cfg.GlobalConfig.MetricNameValidationScheme == LegacyValidationConfig {
return nil, errors.New("OTLP translation strategy NoUTF8EscapingWithSuffixes is not allowed when UTF8 is disabled")
}
default:
return nil, fmt.Errorf("unsupported OTLP translation strategy %q", cfg.OTLPConfig.TranslationStrategy)
}
return cfg, nil
}
@ -181,13 +193,18 @@ var (
HTTPClientConfig: config.DefaultHTTPClientConfig,
}
DefaultRemoteWriteHTTPClientConfig = config.HTTPClientConfig{
FollowRedirects: true,
EnableHTTP2: false,
}
// DefaultRemoteWriteConfig is the default remote write configuration.
DefaultRemoteWriteConfig = RemoteWriteConfig{
RemoteTimeout: model.Duration(30 * time.Second),
ProtobufMessage: RemoteWriteProtoMsgV1,
QueueConfig: DefaultQueueConfig,
MetadataConfig: DefaultMetadataConfig,
HTTPClientConfig: config.DefaultHTTPClientConfig,
HTTPClientConfig: DefaultRemoteWriteHTTPClientConfig,
}
// DefaultQueueConfig is the default remote queue configuration.
@ -234,7 +251,9 @@ var (
}
// DefaultOTLPConfig is the default OTLP configuration.
DefaultOTLPConfig = OTLPConfig{}
DefaultOTLPConfig = OTLPConfig{
TranslationStrategy: UnderscoreEscapingWithSuffixes,
}
)
// Config is the top-level configuration for Prometheus's config files.
@ -1067,7 +1086,7 @@ func (c *AlertmanagerConfig) UnmarshalYAML(unmarshal func(interface{}) error) er
c.HTTPClientConfig.Authorization != nil || c.HTTPClientConfig.OAuth2 != nil
if httpClientConfigAuthEnabled && c.SigV4Config != nil {
return fmt.Errorf("at most one of basic_auth, authorization, oauth2, & sigv4 must be configured")
return errors.New("at most one of basic_auth, authorization, oauth2, & sigv4 must be configured")
}
// Check for users putting URLs in target groups.
@ -1176,6 +1195,7 @@ type RemoteWriteConfig struct {
Name string `yaml:"name,omitempty"`
SendExemplars bool `yaml:"send_exemplars,omitempty"`
SendNativeHistograms bool `yaml:"send_native_histograms,omitempty"`
RoundRobinDNS bool `yaml:"round_robin_dns,omitempty"`
// ProtobufMessage specifies the protobuf message to use against the remote
// receiver as specified in https://prometheus.io/docs/specs/remote_write_spec_2_0/
ProtobufMessage RemoteWriteProtoMsg `yaml:"protobuf_message,omitempty"`
@ -1397,9 +1417,20 @@ func getGoGCEnv() int {
return DefaultRuntimeConfig.GoGC
}
type translationStrategyOption string
var (
// NoUTF8EscapingWithSuffixes will keep UTF-8 characters as they are, units and type suffixes will still be added.
NoUTF8EscapingWithSuffixes translationStrategyOption = "NoUTF8EscapingWithSuffixes"
// UnderscoreEscapingWithSuffixes is the default option for translating OTLP to Prometheus.
// This option will translate all UTF-8 characters to underscores, while adding units and type suffixes.
UnderscoreEscapingWithSuffixes translationStrategyOption = "UnderscoreEscapingWithSuffixes"
)
// OTLPConfig is the configuration for writing to the OTLP endpoint.
type OTLPConfig struct {
PromoteResourceAttributes []string `yaml:"promote_resource_attributes,omitempty"`
PromoteResourceAttributes []string `yaml:"promote_resource_attributes,omitempty"`
TranslationStrategy translationStrategyOption `yaml:"translation_strategy,omitempty"`
}
// UnmarshalYAML implements the yaml.Unmarshaler interface.
@ -1415,7 +1446,7 @@ func (c *OTLPConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
for i, attr := range c.PromoteResourceAttributes {
attr = strings.TrimSpace(attr)
if attr == "" {
err = errors.Join(err, fmt.Errorf("empty promoted OTel resource attribute"))
err = errors.Join(err, errors.New("empty promoted OTel resource attribute"))
continue
}
if _, exists := seen[attr]; exists {

View file

@ -142,7 +142,7 @@ var expectedConf = &Config{
},
},
FollowRedirects: true,
EnableHTTP2: true,
EnableHTTP2: false,
},
},
{
@ -158,7 +158,7 @@ var expectedConf = &Config{
KeyFile: filepath.FromSlash("testdata/valid_key_file"),
},
FollowRedirects: true,
EnableHTTP2: true,
EnableHTTP2: false,
},
Headers: map[string]string{"name": "value"},
},
@ -168,6 +168,7 @@ var expectedConf = &Config{
PromoteResourceAttributes: []string{
"k8s.cluster.name", "k8s.job.name", "k8s.namespace.name",
},
TranslationStrategy: UnderscoreEscapingWithSuffixes,
},
RemoteReadConfigs: []*RemoteReadConfig{
@ -1553,6 +1554,67 @@ func TestOTLPSanitizeResourceAttributes(t *testing.T) {
})
}
func TestOTLPAllowUTF8(t *testing.T) {
t.Run("good config", func(t *testing.T) {
fpath := filepath.Join("testdata", "otlp_allow_utf8.good.yml")
verify := func(t *testing.T, conf *Config, err error) {
t.Helper()
require.NoError(t, err)
require.Equal(t, NoUTF8EscapingWithSuffixes, conf.OTLPConfig.TranslationStrategy)
}
t.Run("LoadFile", func(t *testing.T) {
conf, err := LoadFile(fpath, false, promslog.NewNopLogger())
verify(t, conf, err)
})
t.Run("Load", func(t *testing.T) {
content, err := os.ReadFile(fpath)
require.NoError(t, err)
conf, err := Load(string(content), promslog.NewNopLogger())
verify(t, conf, err)
})
})
t.Run("incompatible config", func(t *testing.T) {
fpath := filepath.Join("testdata", "otlp_allow_utf8.incompatible.yml")
verify := func(t *testing.T, err error) {
t.Helper()
require.ErrorContains(t, err, `OTLP translation strategy NoUTF8EscapingWithSuffixes is not allowed when UTF8 is disabled`)
}
t.Run("LoadFile", func(t *testing.T) {
_, err := LoadFile(fpath, false, promslog.NewNopLogger())
verify(t, err)
})
t.Run("Load", func(t *testing.T) {
content, err := os.ReadFile(fpath)
require.NoError(t, err)
_, err = Load(string(content), promslog.NewNopLogger())
t.Log("err", err)
verify(t, err)
})
})
t.Run("bad config", func(t *testing.T) {
fpath := filepath.Join("testdata", "otlp_allow_utf8.bad.yml")
verify := func(t *testing.T, err error) {
t.Helper()
require.ErrorContains(t, err, `unsupported OTLP translation strategy "Invalid"`)
}
t.Run("LoadFile", func(t *testing.T) {
_, err := LoadFile(fpath, false, promslog.NewNopLogger())
verify(t, err)
})
t.Run("Load", func(t *testing.T) {
content, err := os.ReadFile(fpath)
require.NoError(t, err)
_, err = Load(string(content), promslog.NewNopLogger())
verify(t, err)
})
})
}
func TestLoadConfig(t *testing.T) {
// Parse a valid file that sets a global scrape timeout. This tests whether parsing
// an overwritten default field in the global config permanently changes the default.

View file

@ -0,0 +1,4 @@
global:
metric_name_validation_scheme: legacy
otlp:
translation_strategy: Invalid

View file

@ -0,0 +1,2 @@
otlp:
translation_strategy: NoUTF8EscapingWithSuffixes

View file

@ -0,0 +1,4 @@
global:
metric_name_validation_scheme: legacy
otlp:
translation_strategy: NoUTF8EscapingWithSuffixes

View file

@ -161,7 +161,7 @@ type EC2Discovery struct {
func NewEC2Discovery(conf *EC2SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*EC2Discovery, error) {
m, ok := metrics.(*ec2Metrics)
if !ok {
return nil, fmt.Errorf("invalid discovery metrics type")
return nil, errors.New("invalid discovery metrics type")
}
if logger == nil {

View file

@ -134,7 +134,7 @@ type LightsailDiscovery struct {
func NewLightsailDiscovery(conf *LightsailSDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*LightsailDiscovery, error) {
m, ok := metrics.(*lightsailMetrics)
if !ok {
return nil, fmt.Errorf("invalid discovery metrics type")
return nil, errors.New("invalid discovery metrics type")
}
if logger == nil {

View file

@ -186,7 +186,7 @@ type Discovery struct {
func NewDiscovery(cfg *SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) {
m, ok := metrics.(*azureMetrics)
if !ok {
return nil, fmt.Errorf("invalid discovery metrics type")
return nil, errors.New("invalid discovery metrics type")
}
if logger == nil {

View file

@ -189,7 +189,7 @@ type Discovery struct {
func NewDiscovery(conf *SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) {
m, ok := metrics.(*consulMetrics)
if !ok {
return nil, fmt.Errorf("invalid discovery metrics type")
return nil, errors.New("invalid discovery metrics type")
}
if logger == nil {

View file

@ -15,6 +15,7 @@ package digitalocean
import (
"context"
"errors"
"fmt"
"log/slog"
"net"
@ -114,7 +115,7 @@ type Discovery struct {
func NewDiscovery(conf *SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) {
m, ok := metrics.(*digitaloceanMetrics)
if !ok {
return nil, fmt.Errorf("invalid discovery metrics type")
return nil, errors.New("invalid discovery metrics type")
}
d := &Discovery{

View file

@ -121,7 +121,7 @@ type Discovery struct {
func NewDiscovery(conf SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) {
m, ok := metrics.(*dnsMetrics)
if !ok {
return nil, fmt.Errorf("invalid discovery metrics type")
return nil, errors.New("invalid discovery metrics type")
}
if logger == nil {

View file

@ -15,7 +15,7 @@ package dns
import (
"context"
"fmt"
"errors"
"log/slog"
"net"
"testing"
@ -53,7 +53,7 @@ func TestDNS(t *testing.T) {
Type: "A",
},
lookup: func(name string, qtype uint16, logger *slog.Logger) (*dns.Msg, error) {
return nil, fmt.Errorf("some error")
return nil, errors.New("some error")
},
expected: []*targetgroup.Group{},
},

View file

@ -16,7 +16,6 @@ package eureka
import (
"context"
"errors"
"fmt"
"log/slog"
"net"
"net/http"
@ -129,7 +128,7 @@ type Discovery struct {
func NewDiscovery(conf *SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) {
m, ok := metrics.(*eurekaMetrics)
if !ok {
return nil, fmt.Errorf("invalid discovery metrics type")
return nil, errors.New("invalid discovery metrics type")
}
rt, err := config.NewRoundTripperFromConfig(conf.HTTPClientConfig, "eureka_sd")

View file

@ -184,7 +184,7 @@ type Discovery struct {
func NewDiscovery(conf *SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) {
fm, ok := metrics.(*fileMetrics)
if !ok {
return nil, fmt.Errorf("invalid discovery metrics type")
return nil, errors.New("invalid discovery metrics type")
}
if logger == nil {

View file

@ -132,7 +132,7 @@ type Discovery struct {
func NewDiscovery(conf SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) {
m, ok := metrics.(*gceMetrics)
if !ok {
return nil, fmt.Errorf("invalid discovery metrics type")
return nil, errors.New("invalid discovery metrics type")
}
d := &Discovery{

View file

@ -138,7 +138,7 @@ type Discovery struct {
func NewDiscovery(conf *SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*refresh.Discovery, error) {
m, ok := metrics.(*hetznerMetrics)
if !ok {
return nil, fmt.Errorf("invalid discovery metrics type")
return nil, errors.New("invalid discovery metrics type")
}
r, err := newRefresher(conf, logger)

View file

@ -41,8 +41,8 @@ import (
var (
// DefaultSDConfig is the default HTTP SD configuration.
DefaultSDConfig = SDConfig{
RefreshInterval: model.Duration(60 * time.Second),
HTTPClientConfig: config.DefaultHTTPClientConfig,
RefreshInterval: model.Duration(60 * time.Second),
}
userAgent = fmt.Sprintf("Prometheus/%s", version.Version)
matchContentType = regexp.MustCompile(`^(?i:application\/json(;\s*charset=("utf-8"|utf-8))?)$`)
@ -86,17 +86,17 @@ func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
return err
}
if c.URL == "" {
return fmt.Errorf("URL is missing")
return errors.New("URL is missing")
}
parsedURL, err := url.Parse(c.URL)
if err != nil {
return err
}
if parsedURL.Scheme != "http" && parsedURL.Scheme != "https" {
return fmt.Errorf("URL scheme must be 'http' or 'https'")
return errors.New("URL scheme must be 'http' or 'https'")
}
if parsedURL.Host == "" {
return fmt.Errorf("host is missing in URL")
return errors.New("host is missing in URL")
}
return c.HTTPClientConfig.Validate()
}
@ -118,7 +118,7 @@ type Discovery struct {
func NewDiscovery(conf *SDConfig, logger *slog.Logger, clientOpts []config.HTTPClientOption, metrics discovery.DiscovererMetrics) (*Discovery, error) {
m, ok := metrics.(*httpMetrics)
if !ok {
return nil, fmt.Errorf("invalid discovery metrics type")
return nil, errors.New("invalid discovery metrics type")
}
if logger == nil {

View file

@ -15,7 +15,6 @@ package ionos
import (
"errors"
"fmt"
"log/slog"
"time"
@ -46,7 +45,7 @@ type Discovery struct{}
func NewDiscovery(conf *SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*refresh.Discovery, error) {
m, ok := metrics.(*ionosMetrics)
if !ok {
return nil, fmt.Errorf("invalid discovery metrics type")
return nil, errors.New("invalid discovery metrics type")
}
if conf.ionosEndpoint == "" {

View file

@ -102,10 +102,7 @@ func NewEndpoints(l *slog.Logger, eps cache.SharedIndexInformer, svc, pod, node
return
}
ep := &apiv1.Endpoints{}
ep.Namespace = svc.Namespace
ep.Name = svc.Name
obj, exists, err := e.endpointsStore.Get(ep)
obj, exists, err := e.endpointsStore.GetByKey(namespacedName(svc.Namespace, svc.Name))
if exists && err == nil {
e.enqueue(obj.(*apiv1.Endpoints))
}
@ -457,11 +454,8 @@ func (e *Endpoints) resolvePodRef(ref *apiv1.ObjectReference) *apiv1.Pod {
if ref == nil || ref.Kind != "Pod" {
return nil
}
p := &apiv1.Pod{}
p.Namespace = ref.Namespace
p.Name = ref.Name
obj, exists, err := e.podStore.Get(p)
obj, exists, err := e.podStore.GetByKey(namespacedName(ref.Namespace, ref.Name))
if err != nil {
e.logger.Error("resolving pod ref failed", "err", err)
return nil
@ -473,11 +467,7 @@ func (e *Endpoints) resolvePodRef(ref *apiv1.ObjectReference) *apiv1.Pod {
}
func (e *Endpoints) addServiceLabels(ns, name string, tg *targetgroup.Group) {
svc := &apiv1.Service{}
svc.Namespace = ns
svc.Name = name
obj, exists, err := e.serviceStore.Get(svc)
obj, exists, err := e.serviceStore.GetByKey(namespacedName(ns, name))
if err != nil {
e.logger.Error("retrieving service failed", "err", err)
return
@ -485,7 +475,7 @@ func (e *Endpoints) addServiceLabels(ns, name string, tg *targetgroup.Group) {
if !exists {
return
}
svc = obj.(*apiv1.Service)
svc := obj.(*apiv1.Service)
tg.Labels = tg.Labels.Merge(serviceLabels(svc))
}

View file

@ -18,10 +18,12 @@ import (
"testing"
"github.com/prometheus/common/model"
"github.com/stretchr/testify/require"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/tools/cache"
"github.com/prometheus/prometheus/discovery/targetgroup"
)
@ -1257,3 +1259,22 @@ func TestEndpointsDiscoverySidecarContainer(t *testing.T) {
},
}.Run(t)
}
func BenchmarkResolvePodRef(b *testing.B) {
indexer := cache.NewIndexer(cache.DeletionHandlingMetaNamespaceKeyFunc, nil)
e := &Endpoints{
podStore: indexer,
}
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
p := e.resolvePodRef(&v1.ObjectReference{
Kind: "Pod",
Name: "testpod",
Namespace: "foo",
})
require.Nil(b, p)
}
}

View file

@ -467,11 +467,8 @@ func (e *EndpointSlice) resolvePodRef(ref *apiv1.ObjectReference) *apiv1.Pod {
if ref == nil || ref.Kind != "Pod" {
return nil
}
p := &apiv1.Pod{}
p.Namespace = ref.Namespace
p.Name = ref.Name
obj, exists, err := e.podStore.Get(p)
obj, exists, err := e.podStore.GetByKey(namespacedName(ref.Namespace, ref.Name))
if err != nil {
e.logger.Error("resolving pod ref failed", "err", err)
return nil
@ -484,19 +481,19 @@ func (e *EndpointSlice) resolvePodRef(ref *apiv1.ObjectReference) *apiv1.Pod {
func (e *EndpointSlice) addServiceLabels(esa endpointSliceAdaptor, tg *targetgroup.Group) {
var (
svc = &apiv1.Service{}
found bool
name string
)
svc.Namespace = esa.namespace()
ns := esa.namespace()
// Every EndpointSlice object has the Service they belong to in the
// kubernetes.io/service-name label.
svc.Name, found = esa.labels()[esa.labelServiceName()]
name, found = esa.labels()[esa.labelServiceName()]
if !found {
return
}
obj, exists, err := e.serviceStore.Get(svc)
obj, exists, err := e.serviceStore.GetByKey(namespacedName(ns, name))
if err != nil {
e.logger.Error("retrieving service failed", "err", err)
return
@ -504,7 +501,7 @@ func (e *EndpointSlice) addServiceLabels(esa endpointSliceAdaptor, tg *targetgro
if !exists {
return
}
svc = obj.(*apiv1.Service)
svc := obj.(*apiv1.Service)
tg.Labels = tg.Labels.Merge(serviceLabels(svc))
}

View file

@ -173,7 +173,7 @@ func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
return err
}
if c.Role == "" {
return fmt.Errorf("role missing (one of: pod, service, endpoints, endpointslice, node, ingress)")
return errors.New("role missing (one of: pod, service, endpoints, endpointslice, node, ingress)")
}
err = c.HTTPClientConfig.Validate()
if err != nil {
@ -181,20 +181,20 @@ func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
}
if c.APIServer.URL != nil && c.KubeConfig != "" {
// Api-server and kubeconfig_file are mutually exclusive
return fmt.Errorf("cannot use 'kubeconfig_file' and 'api_server' simultaneously")
return errors.New("cannot use 'kubeconfig_file' and 'api_server' simultaneously")
}
if c.KubeConfig != "" && !reflect.DeepEqual(c.HTTPClientConfig, config.DefaultHTTPClientConfig) {
// Kubeconfig_file and custom http config are mutually exclusive
return fmt.Errorf("cannot use a custom HTTP client configuration together with 'kubeconfig_file'")
return errors.New("cannot use a custom HTTP client configuration together with 'kubeconfig_file'")
}
if c.APIServer.URL == nil && !reflect.DeepEqual(c.HTTPClientConfig, config.DefaultHTTPClientConfig) {
return fmt.Errorf("to use custom HTTP client configuration please provide the 'api_server' URL explicitly")
return errors.New("to use custom HTTP client configuration please provide the 'api_server' URL explicitly")
}
if c.APIServer.URL != nil && c.NamespaceDiscovery.IncludeOwnNamespace {
return fmt.Errorf("cannot use 'api_server' and 'namespaces.own_namespace' simultaneously")
return errors.New("cannot use 'api_server' and 'namespaces.own_namespace' simultaneously")
}
if c.KubeConfig != "" && c.NamespaceDiscovery.IncludeOwnNamespace {
return fmt.Errorf("cannot use 'kubeconfig_file' and 'namespaces.own_namespace' simultaneously")
return errors.New("cannot use 'kubeconfig_file' and 'namespaces.own_namespace' simultaneously")
}
foundSelectorRoles := make(map[Role]struct{})
@ -288,7 +288,7 @@ func (d *Discovery) getNamespaces() []string {
func New(l *slog.Logger, metrics discovery.DiscovererMetrics, conf *SDConfig) (*Discovery, error) {
m, ok := metrics.(*kubernetesMetrics)
if !ok {
return nil, fmt.Errorf("invalid discovery metrics type")
return nil, errors.New("invalid discovery metrics type")
}
if l == nil {
@ -672,7 +672,7 @@ func (d *Discovery) newPodsByNodeInformer(plw *cache.ListWatch) cache.SharedInde
indexers[nodeIndex] = func(obj interface{}) ([]string, error) {
pod, ok := obj.(*apiv1.Pod)
if !ok {
return nil, fmt.Errorf("object is not a pod")
return nil, errors.New("object is not a pod")
}
return []string{pod.Spec.NodeName}, nil
}
@ -686,7 +686,7 @@ func (d *Discovery) newEndpointsByNodeInformer(plw *cache.ListWatch) cache.Share
indexers[podIndex] = func(obj interface{}) ([]string, error) {
e, ok := obj.(*apiv1.Endpoints)
if !ok {
return nil, fmt.Errorf("object is not endpoints")
return nil, errors.New("object is not endpoints")
}
var pods []string
for _, target := range e.Subsets {
@ -705,7 +705,7 @@ func (d *Discovery) newEndpointsByNodeInformer(plw *cache.ListWatch) cache.Share
indexers[nodeIndex] = func(obj interface{}) ([]string, error) {
e, ok := obj.(*apiv1.Endpoints)
if !ok {
return nil, fmt.Errorf("object is not endpoints")
return nil, errors.New("object is not endpoints")
}
var nodes []string
for _, target := range e.Subsets {
@ -751,7 +751,7 @@ func (d *Discovery) newEndpointSlicesByNodeInformer(plw *cache.ListWatch, object
}
}
default:
return nil, fmt.Errorf("object is not an endpointslice")
return nil, errors.New("object is not an endpointslice")
}
return nodes, nil

View file

@ -141,7 +141,7 @@ type Discovery struct {
func NewDiscovery(conf *SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) {
m, ok := metrics.(*linodeMetrics)
if !ok {
return nil, fmt.Errorf("invalid discovery metrics type")
return nil, errors.New("invalid discovery metrics type")
}
d := &Discovery{

View file

@ -15,6 +15,7 @@ package discovery
import (
"context"
"errors"
"fmt"
"sort"
"strconv"
@ -1209,9 +1210,9 @@ func TestGaugeFailedConfigs(t *testing.T) {
c := map[string]Configs{
"prometheus": {
errorConfig{fmt.Errorf("tests error 0")},
errorConfig{fmt.Errorf("tests error 1")},
errorConfig{fmt.Errorf("tests error 2")},
errorConfig{errors.New("tests error 0")},
errorConfig{errors.New("tests error 1")},
errorConfig{errors.New("tests error 2")},
},
}
discoveryManager.ApplyConfig(c)

View file

@ -143,7 +143,7 @@ type Discovery struct {
func NewDiscovery(conf SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) {
m, ok := metrics.(*marathonMetrics)
if !ok {
return nil, fmt.Errorf("invalid discovery metrics type")
return nil, errors.New("invalid discovery metrics type")
}
rt, err := config.NewRoundTripperFromConfig(conf.HTTPClientConfig, "marathon_sd")

View file

@ -15,6 +15,7 @@ package moby
import (
"context"
"errors"
"fmt"
"log/slog"
"net"
@ -110,7 +111,7 @@ func (c *DockerSDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error
return err
}
if c.Host == "" {
return fmt.Errorf("host missing")
return errors.New("host missing")
}
if _, err = url.Parse(c.Host); err != nil {
return err
@ -131,7 +132,7 @@ type DockerDiscovery struct {
func NewDockerDiscovery(conf *DockerSDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*DockerDiscovery, error) {
m, ok := metrics.(*dockerMetrics)
if !ok {
return nil, fmt.Errorf("invalid discovery metrics type")
return nil, errors.New("invalid discovery metrics type")
}
d := &DockerDiscovery{

View file

@ -15,6 +15,7 @@ package moby
import (
"context"
"errors"
"fmt"
"log/slog"
"net/http"
@ -99,7 +100,7 @@ func (c *DockerSwarmSDConfig) UnmarshalYAML(unmarshal func(interface{}) error) e
return err
}
if c.Host == "" {
return fmt.Errorf("host missing")
return errors.New("host missing")
}
if _, err = url.Parse(c.Host); err != nil {
return err
@ -107,7 +108,7 @@ func (c *DockerSwarmSDConfig) UnmarshalYAML(unmarshal func(interface{}) error) e
switch c.Role {
case "services", "nodes", "tasks":
case "":
return fmt.Errorf("role missing (one of: tasks, services, nodes)")
return errors.New("role missing (one of: tasks, services, nodes)")
default:
return fmt.Errorf("invalid role %s, expected tasks, services, or nodes", c.Role)
}
@ -128,7 +129,7 @@ type Discovery struct {
func NewDiscovery(conf *DockerSwarmSDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) {
m, ok := metrics.(*dockerswarmMetrics)
if !ok {
return nil, fmt.Errorf("invalid discovery metrics type")
return nil, errors.New("invalid discovery metrics type")
}
d := &Discovery{

View file

@ -124,7 +124,7 @@ type Discovery struct {
func NewDiscovery(conf *SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) {
m, ok := metrics.(*nomadMetrics)
if !ok {
return nil, fmt.Errorf("invalid discovery metrics type")
return nil, errors.New("invalid discovery metrics type")
}
d := &Discovery{

View file

@ -145,7 +145,7 @@ type refresher interface {
func NewDiscovery(conf *SDConfig, l *slog.Logger, metrics discovery.DiscovererMetrics) (*refresh.Discovery, error) {
m, ok := metrics.(*openstackMetrics)
if !ok {
return nil, fmt.Errorf("invalid discovery metrics type")
return nil, errors.New("invalid discovery metrics type")
}
r, err := newRefresher(conf, l)

View file

@ -151,7 +151,7 @@ func newRefresher(conf *SDConfig, logger *slog.Logger) (refresher, error) {
func NewDiscovery(conf *SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*refresh.Discovery, error) {
m, ok := metrics.(*ovhcloudMetrics)
if !ok {
return nil, fmt.Errorf("invalid discovery metrics type")
return nil, errors.New("invalid discovery metrics type")
}
r, err := newRefresher(conf, logger)

View file

@ -17,6 +17,7 @@ import (
"bytes"
"context"
"encoding/json"
"errors"
"fmt"
"io"
"log/slog"
@ -109,20 +110,20 @@ func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
return err
}
if c.URL == "" {
return fmt.Errorf("URL is missing")
return errors.New("URL is missing")
}
parsedURL, err := url.Parse(c.URL)
if err != nil {
return err
}
if parsedURL.Scheme != "http" && parsedURL.Scheme != "https" {
return fmt.Errorf("URL scheme must be 'http' or 'https'")
return errors.New("URL scheme must be 'http' or 'https'")
}
if parsedURL.Host == "" {
return fmt.Errorf("host is missing in URL")
return errors.New("host is missing in URL")
}
if c.Query == "" {
return fmt.Errorf("query missing")
return errors.New("query missing")
}
return c.HTTPClientConfig.Validate()
}
@ -142,7 +143,7 @@ type Discovery struct {
func NewDiscovery(conf *SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) {
m, ok := metrics.(*puppetdbMetrics)
if !ok {
return nil, fmt.Errorf("invalid discovery metrics type")
return nil, errors.New("invalid discovery metrics type")
}
if logger == nil {

View file

@ -15,7 +15,7 @@ package refresh
import (
"context"
"fmt"
"errors"
"testing"
"time"
@ -64,7 +64,7 @@ func TestRefresh(t *testing.T) {
case 2:
return tg2, nil
}
return nil, fmt.Errorf("some error")
return nil, errors.New("some error")
}
interval := time.Millisecond

View file

@ -267,7 +267,7 @@ func replaceYAMLTypeError(err error, oldTyp, newTyp reflect.Type) error {
func RegisterSDMetrics(registerer prometheus.Registerer, rmm RefreshMetricsManager) (map[string]DiscovererMetrics, error) {
err := rmm.Register()
if err != nil {
return nil, fmt.Errorf("failed to create service discovery refresh metrics")
return nil, errors.New("failed to create service discovery refresh metrics")
}
metrics := make(map[string]DiscovererMetrics)
@ -275,7 +275,7 @@ func RegisterSDMetrics(registerer prometheus.Registerer, rmm RefreshMetricsManag
currentSdMetrics := conf.NewDiscovererMetrics(registerer, rmm)
err = currentSdMetrics.Register()
if err != nil {
return nil, fmt.Errorf("failed to create service discovery metrics")
return nil, errors.New("failed to create service discovery metrics")
}
metrics[conf.Name()] = currentSdMetrics
}

View file

@ -188,7 +188,7 @@ type Discovery struct{}
func NewDiscovery(conf *SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*refresh.Discovery, error) {
m, ok := metrics.(*scalewayMetrics)
if !ok {
return nil, fmt.Errorf("invalid discovery metrics type")
return nil, errors.New("invalid discovery metrics type")
}
r, err := newRefresher(conf)

View file

@ -149,7 +149,7 @@ type Discovery struct {
func New(logger *slog.Logger, conf *SDConfig, metrics discovery.DiscovererMetrics) (*Discovery, error) {
m, ok := metrics.(*tritonMetrics)
if !ok {
return nil, fmt.Errorf("invalid discovery metrics type")
return nil, errors.New("invalid discovery metrics type")
}
tls, err := config.NewTLSConfig(&conf.TLSConfig)

View file

@ -215,7 +215,7 @@ func getEndpointInfoForSystems(
func NewDiscovery(conf *SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) {
m, ok := metrics.(*uyuniMetrics)
if !ok {
return nil, fmt.Errorf("invalid discovery metrics type")
return nil, errors.New("invalid discovery metrics type")
}
apiURL, err := url.Parse(conf.Server)

View file

@ -15,6 +15,7 @@ package vultr
import (
"context"
"errors"
"fmt"
"log/slog"
"net"
@ -117,7 +118,7 @@ type Discovery struct {
func NewDiscovery(conf *SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) {
m, ok := metrics.(*vultrMetrics)
if !ok {
return nil, fmt.Errorf("invalid discovery metrics type")
return nil, errors.New("invalid discovery metrics type")
}
d := &Discovery{

View file

@ -14,6 +14,7 @@
package xds
import (
"errors"
"fmt"
"log/slog"
"net/url"
@ -161,7 +162,7 @@ func kumaMadsV1ResourceParser(resources []*anypb.Any, typeURL string) ([]model.L
func NewKumaHTTPDiscovery(conf *KumaSDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (discovery.Discoverer, error) {
m, ok := metrics.(*xdsMetrics)
if !ok {
return nil, fmt.Errorf("invalid discovery metrics type")
return nil, errors.New("invalid discovery metrics type")
}
// Default to "prometheus" if hostname is unavailable.

View file

@ -17,6 +17,8 @@ The Prometheus monitoring server
| <code class="text-nowrap">--config.file</code> | Prometheus configuration file path. | `prometheus.yml` |
| <code class="text-nowrap">--config.auto-reload-interval</code> | Specifies the interval for checking and automatically reloading the Prometheus configuration file upon detecting changes. | `30s` |
| <code class="text-nowrap">--web.listen-address</code> <code class="text-nowrap">...<code class="text-nowrap"> | Address to listen on for UI, API, and telemetry. Can be repeated. | `0.0.0.0:9090` |
| <code class="text-nowrap">--auto-gomaxprocs</code> | Automatically set GOMAXPROCS to match Linux container CPU quota | `true` |
| <code class="text-nowrap">--auto-gomemlimit</code> | Automatically set GOMEMLIMIT to match Linux container or system memory limit | `true` |
| <code class="text-nowrap">--auto-gomemlimit.ratio</code> | The ratio of reserved GOMEMLIMIT memory to the detected maximum container or system memory | `0.9` |
| <code class="text-nowrap">--web.config.file</code> | [EXPERIMENTAL] Path to configuration file that can enable TLS or authentication. | |
| <code class="text-nowrap">--web.read-timeout</code> | Maximum duration before timing out read of the request, and closing idle connections. | `5m` |
@ -58,7 +60,7 @@ The Prometheus monitoring server
| <code class="text-nowrap">--query.timeout</code> | Maximum time a query may take before being aborted. Use with server mode only. | `2m` |
| <code class="text-nowrap">--query.max-concurrency</code> | Maximum number of queries executed concurrently. Use with server mode only. | `20` |
| <code class="text-nowrap">--query.max-samples</code> | Maximum number of samples a single query can load into memory. Note that queries will fail if they try to load more samples than this into memory, so this also limits the number of samples a query can return. Use with server mode only. | `50000000` |
| <code class="text-nowrap">--enable-feature</code> <code class="text-nowrap">...<code class="text-nowrap"> | Comma separated feature names to enable. Valid options: auto-gomemlimit, exemplar-storage, expand-external-labels, memory-snapshot-on-shutdown, promql-per-step-stats, promql-experimental-functions, extra-scrape-metrics, auto-gomaxprocs, native-histograms, created-timestamp-zero-ingestion, concurrent-rule-eval, delayed-compaction, old-ui. See https://prometheus.io/docs/prometheus/latest/feature_flags/ for more details. | |
| <code class="text-nowrap">--enable-feature</code> <code class="text-nowrap">...<code class="text-nowrap"> | Comma separated feature names to enable. Valid options: exemplar-storage, expand-external-labels, memory-snapshot-on-shutdown, promql-per-step-stats, promql-experimental-functions, extra-scrape-metrics, auto-gomaxprocs, native-histograms, created-timestamp-zero-ingestion, concurrent-rule-eval, delayed-compaction, old-ui. See https://prometheus.io/docs/prometheus/latest/feature_flags/ for more details. | |
| <code class="text-nowrap">--agent</code> | Run Prometheus in 'Agent mode'. | |
| <code class="text-nowrap">--log.level</code> | Only log messages with the given severity or above. One of: [debug, info, warn, error] | `info` |
| <code class="text-nowrap">--log.format</code> | Output format of log messages. One of: [logfmt, json] | `logfmt` |

View file

@ -171,8 +171,17 @@ remote_write:
[ - <remote_write> ... ]
# Settings related to the OTLP receiver feature.
# See https://prometheus.io/docs/guides/opentelemetry/ for best practices.
otlp:
[ promote_resource_attributes: [<string>, ...] | default = [ ] ]
# Configures translation of OTLP metrics when received through the OTLP metrics
# endpoint. Available values:
# - "UnderscoreEscapingWithSuffixes" refers to commonly agreed normalization used
# by OpenTelemetry in https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/pkg/translator/prometheus
# - "NoUTF8EscapingWithSuffixes" is a mode that relies on UTF-8 support in Prometheus.
# It preserves all special characters like dots, but it still add required suffixes
# for units and _total like in UnderscoreEscapingWithSuffixes.
[ translation_strategy: <string> | default = "UnderscoreEscapingWithSuffixes" ]
# Settings related to the remote read feature.
remote_read:
@ -2797,6 +2806,12 @@ write_relabel_configs:
# For the `io.prometheus.write.v2.Request` message, this option is noop (always true).
[ send_native_histograms: <boolean> | default = false ]
# When enabled, remote-write will resolve the URL host name via DNS, choose one of the IP addresses at random, and connect to it.
# When disabled, remote-write relies on Go's standard behavior, which is to try to connect to each address in turn.
# The connection timeout applies to the whole operation, i.e. in the latter case it is spread over all attempt.
# This is an experimental feature, and its behavior might still change, or even get removed.
[ round_robin_dns: <boolean> | default = false ]
# Optionally configures AWS's Signature Verification 4 signing process to
# sign requests. Cannot be set at the same time as basic_auth, authorization, oauth2, or azuread.
# To use the default credentials from the AWS SDK, use `sigv4: {}`.
@ -2889,6 +2904,7 @@ metadata_config:
# HTTP client settings, including authentication methods (such as basic auth and
# authorization), proxy configurations, TLS options, custom HTTP headers, etc.
# enable_http2 defaults to false for remote-write.
[ <http_config> ]
```
@ -2940,8 +2956,6 @@ with this feature.
`tsdb` lets you configure the runtime-reloadable configuration settings of the TSDB.
NOTE: Out-of-order ingestion is an experimental feature, but you do not need any additional flag to enable it. Setting `out_of_order_time_window` to a positive duration enables it.
```yaml
# Configures how old an out-of-order/out-of-bounds sample can be w.r.t. the TSDB max time.
# An out-of-order/out-of-bounds sample is ingested into the TSDB as long as the timestamp

View file

@ -23,9 +23,8 @@ Exemplar storage is implemented as a fixed size circular buffer that stores exem
`--enable-feature=memory-snapshot-on-shutdown`
This takes the snapshot of the chunks that are in memory along with the series information when shutting down and stores
it on disk. This will reduce the startup time since the memory state can be restored with this snapshot and m-mapped
chunks without the need of WAL replay.
This takes a snapshot of the chunks that are in memory along with the series information when shutting down and stores it on disk. This will reduce the startup time since the memory state can now be restored with this snapshot
and m-mapped chunks, while a WAL replay from disk is only needed for the parts of the WAL that are not part of the snapshot.
## Extra scrape metrics
@ -48,20 +47,6 @@ statistics. Currently this is limited to totalQueryableSamples.
When disabled in either the engine or the query, per-step statistics are not
computed at all.
## Auto GOMAXPROCS
`--enable-feature=auto-gomaxprocs`
When enabled, GOMAXPROCS variable is automatically set to match Linux container CPU quota.
## Auto GOMEMLIMIT
`--enable-feature=auto-gomemlimit`
When enabled, the GOMEMLIMIT variable is automatically set to match the Linux container memory limit. If there is no container limit, or the process is running outside of containers, the system memory total is used.
There is also an additional tuning flag, `--auto-gomemlimit.ratio`, which allows controlling how much of the memory is used for Prometheus. The remainder is reserved for memory outside the process. For example, kernel page cache. Page cache is important for Prometheus TSDB query performance. The default is `0.9`, which means 90% of the memory limit will be used for Prometheus.
## Native Histograms
`--enable-feature=native-histograms`

View file

@ -3,198 +3,216 @@ title: Migration
sort_rank: 10
---
# Prometheus 2.0 migration guide
# Prometheus 3.0 migration guide
In line with our [stability promise](https://prometheus.io/blog/2016/07/18/prometheus-1-0-released/#fine-print),
the Prometheus 2.0 release contains a number of backwards incompatible changes.
This document offers guidance on migrating from Prometheus 1.8 to Prometheus 2.0 and newer versions.
In line with our [stability promise](https://prometheus.io/docs/prometheus/latest/stability/),
the Prometheus 3.0 release contains a number of backwards incompatible changes.
This document offers guidance on migrating from Prometheus 2.x to Prometheus 3.0 and newer versions.
## Flags
The format of Prometheus command line flags has changed. Instead of a
single dash, all flags now use a double dash. Common flags (`--config.file`,
`--web.listen-address` and `--web.external-url`) remain but
almost all storage-related flags have been removed.
- The following feature flags have been removed and they have been added to the
default behavior of Prometheus v3:
- `promql-at-modifier`
- `promql-negative-offset`
- `remote-write-receiver`
- `new-service-discovery-manager`
- `expand-external-labels`
- Environment variable references `${var}` or `$var` in external label values
are replaced according to the values of the current environment variables.
- References to undefined variables are replaced by the empty string.
The `$` character can be escaped by using `$$`.
- `no-default-scrape-port`
- Prometheus v3 will no longer add ports to scrape targets according to the
specified scheme. Target will now appear in labels as configured.
- If you rely on scrape targets like
`https://example.com/metrics` or `http://exmaple.com/metrics` to be
represented as `https://example.com/metrics:443` and
`http://example.com/metrics:80` respectively, add them to your target URLs
- `agent`
- Instead use the dedicated `--agent` CLI flag.
- `auto-gomemlimit`
- Prometheus v3 will automatically set `GOMEMLIMIT` to match the Linux
container memory limit. If there is no container limit, or the process is
running outside of containers, the system memory total is used. To disable
this, `--no-auto-gomemlimit` is available.
- `auto-gomaxprocs`
- Prometheus v3 will automatically set `GOMAXPROCS` to match the Linux
container CPU quota. To disable this, `--no-auto-gomaxprocs` is available.
Some notable flags which have been removed:
Prometheus v3 will log a warning if you continue to pass these to
`--enable-feature`.
- `-alertmanager.url` In Prometheus 2.0, the command line flags for configuring
a static Alertmanager URL have been removed. Alertmanager must now be
discovered via service discovery, see [Alertmanager service discovery](#alertmanager-service-discovery).
## Configuration
- `-log.format` In Prometheus 2.0 logs can only be streamed to standard error.
- `-query.staleness-delta` has been renamed to `--query.lookback-delta`; Prometheus
2.0 introduces a new mechanism for handling staleness, see [staleness](querying/basics.md#staleness).
- `-storage.local.*` Prometheus 2.0 introduces a new storage engine; as such all
flags relating to the old engine have been removed. For information on the
new engine, see [Storage](#storage).
- `-storage.remote.*` Prometheus 2.0 has removed the deprecated remote
storage flags, and will fail to start if they are supplied. To write to
InfluxDB, Graphite, or OpenTSDB use the relevant storage adapter.
## Alertmanager service discovery
Alertmanager service discovery was introduced in Prometheus 1.4, allowing Prometheus
to dynamically discover Alertmanager replicas using the same mechanism as scrape
targets. In Prometheus 2.0, the command line flags for static Alertmanager config
have been removed, so the following command line flag:
```
./prometheus -alertmanager.url=http://alertmanager:9093/
```
Would be replaced with the following in the `prometheus.yml` config file:
```yaml
alerting:
alertmanagers:
- static_configs:
- targets:
- alertmanager:9093
```
You can also use all the usual Prometheus service discovery integrations and
relabeling in your Alertmanager configuration. This snippet instructs
Prometheus to search for Kubernetes pods, in the `default` namespace, with the
label `name: alertmanager` and with a non-empty port.
```yaml
alerting:
alertmanagers:
- kubernetes_sd_configs:
- role: pod
tls_config:
ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
relabel_configs:
- source_labels: [__meta_kubernetes_pod_label_name]
regex: alertmanager
action: keep
- source_labels: [__meta_kubernetes_namespace]
regex: default
action: keep
- source_labels: [__meta_kubernetes_pod_container_port_number]
regex:
action: drop
```
## Recording rules and alerts
The format for configuring alerting and recording rules has been changed to YAML.
An example of a recording rule and alert in the old format:
```
job:request_duration_seconds:histogram_quantile99 =
histogram_quantile(0.99, sum by (le, job) (rate(request_duration_seconds_bucket[1m])))
ALERT FrontendRequestLatency
IF job:request_duration_seconds:histogram_quantile99{job="frontend"} > 0.1
FOR 5m
ANNOTATIONS {
summary = "High frontend request latency",
}
```
Would look like this:
```yaml
groups:
- name: example.rules
rules:
- record: job:request_duration_seconds:histogram_quantile99
expr: histogram_quantile(0.99, sum by (le, job) (rate(request_duration_seconds_bucket[1m])))
- alert: FrontendRequestLatency
expr: job:request_duration_seconds:histogram_quantile99{job="frontend"} > 0.1
for: 5m
annotations:
summary: High frontend request latency
```
To help with the change, the `promtool` tool has a mode to automate the rules conversion. Given a `.rules` file, it will output a `.rules.yml` file in the
new format. For example:
```
$ promtool update rules example.rules
```
You will need to use `promtool` from [Prometheus 2.5](https://github.com/prometheus/prometheus/releases/tag/v2.5.0) as later versions no longer contain the above subcommand.
## Storage
The data format in Prometheus 2.0 has completely changed and is not backwards
compatible with 1.8 and older versions. To retain access to your historic monitoring data we
recommend you run a non-scraping Prometheus instance running at least version
1.8.1 in parallel with your Prometheus 2.0 instance, and have the new server
read existing data from the old one via the remote read protocol.
Your Prometheus 1.8 instance should be started with the following flags and an
config file containing only the `external_labels` setting (if any):
```
$ ./prometheus-1.8.1.linux-amd64/prometheus -web.listen-address ":9094" -config.file old.yml
```
Prometheus 2.0 can then be started (on the same machine) with the following flags:
```
$ ./prometheus-2.0.0.linux-amd64/prometheus --config.file prometheus.yml
```
Where `prometheus.yml` contains in addition to your full existing configuration, the stanza:
```yaml
remote_read:
- url: "http://localhost:9094/api/v1/read"
```
- The scrape job level configuration option `scrape_classic_histograms` has been
renamed to `always_scrape_classic_histograms`. If you use the
`--enable-feature=native-histograms` feature flag to ingest native histograms
and you also want to ingest classic histograms that an endpoint might expose
along with native histograms, be sure to add this configuration or change your
configuration from the old name.
- The `http_config.enable_http2` in `remote_write` items default has been
changed to `false`. In Prometheus v2 the remote write http client would
default to use http2. In order to parallelize multiple remote write queues
across multiple sockets its preferable to not default to http2.
If you prefer to use http2 for remote write you must now set
`http_config.enable_http2: true` in your `remote_write` configuration section.
## PromQL
The following features have been removed from PromQL:
- The `.` pattern in regular expressions in PromQL matches newline characters.
With this change a regular expressions like `.*` matches strings that include
`\n`. This applies to matchers in queries and relabel configs.
- For example, the following regular expressions now match the accompanying
strings, whereas in Prometheus v2 these combinations didn't match.
- `.*` additionally matches `foo\n` and `Foo\nBar`
- `foo.?bar` additionally matches `foo\nbar`
- `foo.+bar` additionally matches `foo\nbar`
- If you want Prometheus v3 to behave like v2, you will have to change your
regular expressions by replacing all `.` patterns with `[^\n]`, e.g.
`foo[^\n]*`.
- Lookback and range selectors are left open and right closed (previously left
closed and right closed). This change affects queries when the evaluation time
perfectly aligns with the sample timestamps. For example assume querying a
timeseries with evenly spaced samples exactly 1 minute apart. Before Prometheus
v3, a range query with `5m` would usually return 5 samples. But if the query
evaluation aligns perfectly with a scrape, it would return 6 samples. In
Prometheus v3 queries like this will always return 5 samples.
This change has likely few effects for everyday use, except for some subquery
use cases.
Query front-ends that align queries usually align subqueries to multiples of
the step size. These subqueries will likely be affected.
Tests are more likely to affected. To fix those either adjust the expected
number of samples or extend the range by less than one sample interval.
- The `holt_winters` function has been renamed to `double_exponential_smoothing`
and is now guarded by the `promql-experimental-functions` feature flag.
If you want to keep using `holt_winters`, you have to do both of these things:
- Rename `holt_winters` to `double_exponential_smoothing` in your queries.
- Pass `--enable-feature=promql-experimental-functions` in your Prometheus
CLI invocation.
- `drop_common_labels` function - the `without` aggregation modifier should be used
instead.
- `keep_common` aggregation modifier - the `by` modifier should be used instead.
- `count_scalar` function - use cases are better handled by `absent()` or correct
propagation of labels in operations.
## Scrape protocols
Prometheus v3 is more strict concerning the Content-Type header received when
scraping. Prometheus v2 would default to the standard Prometheus text protocol
if the target being scraped did not specify a Content-Type header or if the
header was unparsable or unrecognised. This could lead to incorrect data being
parsed in the scrape. Prometheus v3 will now fail the scrape in such cases.
See [issue #3060](https://github.com/prometheus/prometheus/issues/3060) for more
details.
If a scrape target is not providing the correct Content-Type header the
fallback protocol can be specified using the `fallback_scrape_protocol`
parameter. See [Prometheus scrape_config documentation.](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#scrape_config)
This is a breaking change as scrapes that may have succeeded with Prometheus v2
may now fail if this fallback protocol is not specified.
## Miscellaneous
### Prometheus non-root user
### TSDB format and downgrade
The Prometheus Docker image is now built to [run Prometheus
as a non-root user](https://github.com/prometheus/prometheus/pull/2859). If you
want the Prometheus UI/API to listen on a low port number (say, port 80), you'll
need to override it. For Kubernetes, you would use the following YAML:
The TSDB format has been changed slightly in Prometheus v2.55 in preparation for changes
to the index format. Consequently, a Prometheus v3 TSDB can only be read by a
Prometheus v2.55 or newer. Keep that in mind when upgrading to v3 -- you will be only
able to downgrade to v2.55, not lower, without loosing your TSDB persitent data.
As an extra safety measure, you could optionally consider upgrading to v2.55 first and
confirm Prometheus works as expected, before upgrading to v3.
### TSDB storage contract
TSDB compatible storage is now expected to return results matching the specified
selectors. This might impact some third party implementations, most likely
implementing `remote_read`.
This contract is not explicitly enforced, but can cause undefined behavior.
### UTF-8 names
Prometheus v3 supports UTF-8 in metric and label names. This means metric and
label names can change after upgrading according to what is exposed by
endpoints. Furthermore, metric and label names that would have previously been
flagged as invalid no longer will be.
Users wishing to preserve the original validation behavior can update their
Prometheus yaml configuration to specify the legacy validation scheme:
```
global:
metric_name_validation_scheme: legacy
```
Or on a per-scrape basis:
```
scrape_configs:
- job_name: job1
metric_name_validation_scheme: utf8
- job_name: job2
metric_name_validation_scheme: legacy
```
### Log message format
Prometheus v3 has adopted `log/slog` over the previous `go-kit/log`. This
results in a change of log message format. An example of the old log format is:
```
ts=2024-10-23T22:01:06.074Z caller=main.go:627 level=info msg="No time or size retention was set so using the default time retention" duration=15d
ts=2024-10-23T22:01:06.074Z caller=main.go:671 level=info msg="Starting Prometheus Server" mode=server version="(version=, branch=, revision=91d80252c3e528728b0f88d254dd720f6be07cb8-modified)"
ts=2024-10-23T22:01:06.074Z caller=main.go:676 level=info build_context="(go=go1.23.0, platform=linux/amd64, user=, date=, tags=unknown)"
ts=2024-10-23T22:01:06.074Z caller=main.go:677 level=info host_details="(Linux 5.15.0-124-generic #134-Ubuntu SMP Fri Sep 27 20:20:17 UTC 2024 x86_64 gigafips (none))"
```
a similar sequence in the new log format looks like this:
```
time=2024-10-24T00:03:07.542+02:00 level=INFO source=/home/user/go/src/github.com/prometheus/prometheus/cmd/prometheus/main.go:640 msg="No time or size retention was set so using the default time retention" duration=15d
time=2024-10-24T00:03:07.542+02:00 level=INFO source=/home/user/go/src/github.com/prometheus/prometheus/cmd/prometheus/main.go:681 msg="Starting Prometheus Server" mode=server version="(version=, branch=, revision=7c7116fea8343795cae6da42960cacd0207a2af8)"
time=2024-10-24T00:03:07.542+02:00 level=INFO source=/home/user/go/src/github.com/prometheus/prometheus/cmd/prometheus/main.go:686 msg="operational information" build_context="(go=go1.23.0, platform=linux/amd64, user=, date=, tags=unknown)" host_details="(Linux 5.15.0-124-generic #134-Ubuntu SMP Fri Sep 27 20:20:17 UTC 2024 x86_64 gigafips (none))" fd_limits="(soft=1048576, hard=1048576)" vm_limits="(soft=unlimited, hard=unlimited)"
```
### `le` and `quantile` label values
In Prometheus v3, the values of the `le` label of classic histograms and the
`quantile` label of summaries are normalized upon ingestion. In Prometheus v2
the value of these labels depended on the scrape protocol (protobuf vs text
format) in some situations. This led to label values changing based on the
scrape protocol. E.g. a metric exposed as `my_classic_hist{le="1"}` would be
ingested as `my_classic_hist{le="1"}` via the text format, but as
`my_classic_hist{le="1.0"}` via protobuf. This changed the identity of the
metric and caused problems when querying the metric.
In Prometheus v3 these label values will always be normalized to a float like
representation. I.e. the above example will always result in
`my_classic_hist{le="1.0"}` being ingested into prometheus, no matter via which
protocol. The effect of this change is that alerts, recording rules and
dashboards that directly reference label values as whole numbers such as
`le="1"` will stop working.
Ways to deal with this change either globally or on a per metric basis:
- Fix references to integer `le`, `quantile` label values, but otherwise do
nothing and accept that some queries that span the transition time will produce
inaccurate or unexpected results.
_This is the recommended solution._
- Use `metric_relabel_config` to retain the old labels when scraping targets.
This should **only** be applied to metrics that currently produce such labels.
```yaml
apiVersion: v1
kind: Pod
metadata:
name: security-context-demo-2
spec:
securityContext:
runAsUser: 0
...
metric_relabel_configs:
- source_labels:
- quantile
target_label: quantile
regex: (\d+)\.0+
- source_labels:
- le
- __name__
target_label: le
regex: (\d+)\.0+;.*_bucket
```
See [Configure a Security Context for a Pod or Container](https://kubernetes.io/docs/tasks/configure-pod-container/security-context/)
for more details.
### Disallow configuring Alertmanager with the v1 API
Prometheus 3 no longer supports Alertmanager's v1 API. Effectively Prometheus 3
requires [Alertmanager 0.16.0](https://github.com/prometheus/alertmanager/releases/tag/v0.16.0) or later. Users with older Alertmanager
versions or configurations that use `alerting: alertmanagers: [api_version: v1]`
need to upgrade Alertmanager and change their configuration to use `api_version: v2`.
If you're using Docker, then the following snippet would be used:
# Prometheus 2.0 migration guide
```
docker run -p 9090:9090 prom/prometheus:latest
```
### Prometheus lifecycle
If you use the Prometheus `/-/reload` HTTP endpoint to [automatically reload your
Prometheus config when it changes](configuration/configuration.md),
these endpoints are disabled by default for security reasons in Prometheus 2.0.
To enable them, set the `--web.enable-lifecycle` flag.
For the Prometheus 1.8 to 2.0 please refer to the [Prometheus v2.55 documentation](https://prometheus.io/docs/prometheus/2.55/migration/).

View file

@ -568,7 +568,7 @@ Instant vectors are returned as result type `vector`. The corresponding
Each series could have the `"value"` key, or the `"histogram"` key, but not both.
Series are not guaranteed to be returned in any particular order unless a function
such as [`sort`](functions.md#sort) or [`sort_by_label`](functions.md#sort_by_label)`
such as [`sort`](functions.md#sort) or [`sort_by_label`](functions.md#sort_by_label)
is used.
### Scalars
@ -905,7 +905,7 @@ curl -G http://localhost:9091/api/v1/targets/metadata \
```
The following example returns metadata for all metrics for all targets with
label `instance="127.0.0.1:9090`.
label `instance="127.0.0.1:9090"`.
```json
curl -G http://localhost:9091/api/v1/targets/metadata \
@ -1190,9 +1190,11 @@ The following endpoint returns various cardinality statistics about the Promethe
GET /api/v1/status/tsdb
```
URL query parameters:
- `limit=<number>`: Limit the number of returned items to a given number for each set of statistics. By default, 10 items are returned.
The `data` section of the query result consists of
The `data` section of the query result consists of:
- **headStats**: This provides the following data about the head block of the TSDB:
- **numSeries**: The number of series.
- **chunkCount**: The number of chunks.
@ -1268,13 +1270,13 @@ The following endpoint returns information about the WAL replay:
GET /api/v1/status/walreplay
```
**read**: The number of segments replayed so far.
**total**: The total number segments needed to be replayed.
**progress**: The progress of the replay (0 - 100%).
**state**: The state of the replay. Possible states:
- **waiting**: Waiting for the replay to start.
- **in progress**: The replay is in progress.
- **done**: The replay has finished.
- **read**: The number of segments replayed so far.
- **total**: The total number segments needed to be replayed.
- **progress**: The progress of the replay (0 - 100%).
- **state**: The state of the replay. Possible states:
- **waiting**: Waiting for the replay to start.
- **in progress**: The replay is in progress.
- **done**: The replay has finished.
```json
$ curl http://localhost:9090/api/v1/status/walreplay

View file

@ -464,6 +464,97 @@ by the number of seconds under the specified time range window, and should be
used primarily for human readability. Use `rate` in recording rules so that
increases are tracked consistently on a per-second basis.
## `info()` (experimental)
_The `info` function is an experiment to improve UX
around including labels from [info metrics](https://grafana.com/blog/2021/08/04/how-to-use-promql-joins-for-more-effective-queries-of-prometheus-metrics-at-scale/#info-metrics).
The behavior of this function may change in future versions of Prometheus,
including its removal from PromQL. `info` has to be enabled via the
[feature flag](../feature_flags.md#experimental-promql-functions) `--enable-feature=promql-experimental-functions`._
`info(v instant-vector, [data-label-selector instant-vector])` finds, for each time
series in `v`, all info series with matching _identifying_ labels (more on
this later), and adds the union of their _data_ (i.e., non-identifying) labels
to the time series. The second argument `data-label-selector` is optional.
It is not a real instant vector, but uses a subset of its syntax.
It must start and end with curly braces (`{ ... }`) and may only contain label matchers.
The label matchers are used to constrain which info series to consider
and which data labels to add to `v`.
Identifying labels of an info series are the subset of labels that uniquely
identify the info series. The remaining labels are considered
_data labels_ (also called non-identifying). (Note that Prometheus's concept
of time series identity always includes _all_ the labels. For the sake of the `info`
function, we “logically” define info series identity in a different way than
in the conventional Prometheus view.) The identifying labels of an info series
are used to join it to regular (non-info) series, i.e. those series that have
the same labels as the identifying labels of the info series. The data labels, which are
the ones added to the regular series by the `info` function, effectively encode
metadata key value pairs. (This implies that a change in the data labels
in the conventional Prometheus view constitutes the end of one info series and
the beginning of a new info series, while the “logical” view of the `info` function is
that the same info series continues to exist, just with different “data”.)
The conventional approach of adding data labels is sometimes called a “join query”,
as illustrated by the following example:
```
rate(http_server_request_duration_seconds_count[2m])
* on (job, instance) group_left (k8s_cluster_name)
target_info
```
The core of the query is the expression `rate(http_server_request_duration_seconds_count[2m])`.
But to add data labels from an info metric, the user has to use elaborate
(and not very obvious) syntax to specify which info metric to use (`target_info`), what the
identifying labels are (`on (job, instance)`), and which data labels to add
(`group_left (k8s_cluster_name)`).
This query is not only verbose and hard to write, it might also run into an “identity crisis”:
If any of the data labels of `target_info` changes, Prometheus sees that as a change of series
(as alluded to above, Prometheus just has no native concept of non-identifying labels).
If the old `target_info` series is not properly marked as stale (which can happen with certain ingestion paths),
the query above will fail for up to 5m (the lookback delta) because it will find a conflicting
match with both the old and the new version of `target_info`.
The `info` function not only resolves this conflict in favor of the newer series, it also simplifies the syntax
because it knows about the available info series and what their identifying labels are. The example query
looks like this with the `info` function:
```
info(
rate(http_server_request_duration_seconds_count[2m]),
{k8s_cluster_name=~".+"}
)
```
The common case of adding _all_ data labels can be achieved by
omitting the 2nd argument of the `info` function entirely, simplifying
the example even more:
```
info(rate(http_server_request_duration_seconds_count[2m]))
```
While `info` normally automatically finds all matching info series, it's possible to
restrict them by providing a `__name__` label matcher, e.g.
`{__name__="target_info"}`.
### Limitations
In its current iteration, `info` defaults to considering only info series with
the name `target_info`. It also assumes that the identifying info series labels are
`instance` and `job`. `info` does support other info series names however, through
`__name__` label matchers. E.g., one can explicitly say to consider both
`target_info` and `build_info` as follows:
`{__name__=~"(target|build)_info"}`. However, the identifying labels always
have to be `instance` and `job`.
These limitations are partially defeating the purpose of the `info` function.
At the current stage, this is an experiment to find out how useful the approach
turns out to be in practice. A final version of the `info` function will indeed
consider all matching info series and with their appropriate identifying labels.
## `irate()`
`irate(v range-vector)` calculates the per-second instant rate of increase of

View file

@ -9,7 +9,7 @@ Prometheus promises API stability within a major version, and strives to avoid
breaking changes for key features. Some features, which are cosmetic, still
under development, or depend on 3rd party services, are not covered by this.
Things considered stable for 2.x:
Things considered stable for 3.x:
* The query language and data model
* Alerting and recording rules
@ -18,21 +18,25 @@ Things considered stable for 2.x:
* Configuration file format (minus the service discovery remote read/write, see below)
* Rule/alert file format
* Console template syntax and semantics
* Remote write sending, per the [1.0 specification](https://prometheus.io/docs/concepts/remote_write_spec/).
* Remote write sending, per the [1.0 specification](https://prometheus.io/docs/concepts/remote_write_spec/) and receiving
* Agent mode
* OTLP receiver endpoint
Things considered unstable for 2.x:
Things considered unstable for 3.x:
* Any feature listed as experimental or subject to change, including:
* The [`holt_winters` PromQL function](https://github.com/prometheus/prometheus/issues/2458)
* Remote write receiving, remote read and the remote read endpoint
* The [`double_exponential_smoothing` PromQL function](https://github.com/prometheus/prometheus/issues/2458)
* Remote read and the remote read endpoint
* Server-side HTTPS and basic authentication
* Service discovery integrations, with the exception of `static_configs` and `file_sd_configs`
* Service discovery integrations, with the exception of `static_configs`, `file_sd_configs` and `http_sd_config`
* Go APIs of packages that are part of the server
* HTML generated by the web UI
* The metrics in the /metrics endpoint of Prometheus itself
* Exact on-disk format. Potential changes however, will be forward compatible and transparently handled by Prometheus
* The format of the logs
Prometheus 2.x stability guarantees can be found [in the 2.x documentation](https://prometheus.io/docs/prometheus/2.55/stability/).
As long as you are not using any features marked as experimental/unstable, an
upgrade within a major version can usually be performed without any operational
adjustments and very little risk that anything will break. Any breaking changes

View file

@ -0,0 +1,31 @@
# my global config
global:
evaluation_interval: 15s # Evaluate rules every 15 seconds. The default is every 1 minute.
otlp:
# Recommended attributes to be promoted to labels.
promote_resource_attributes:
- service.instance.id
- service.name
- service.namespace
- cloud.availability_zone
- cloud.region
- container.name
- deployment.environment.name
- k8s.cluster.name
- k8s.container.name
- k8s.cronjob.name
- k8s.daemonset.name
- k8s.deployment.name
- k8s.job.name
- k8s.namespace.name
- k8s.pod.name
- k8s.replicaset.name
- k8s.statefulset.name
# Ingest OTLP data keeping UTF-8 characters in metric/label names.
translation_strategy: NoUTF8EscapingWithSuffixes
storage:
# OTLP is a push-based protocol, Out of order samples is a common scenario.
tsdb:
out_of_order_time_window: 30m

View file

@ -6,9 +6,9 @@ require (
github.com/alecthomas/kingpin/v2 v2.4.0
github.com/gogo/protobuf v1.3.2
github.com/golang/snappy v0.0.4
github.com/influxdata/influxdb v1.11.6
github.com/prometheus/client_golang v1.20.4
github.com/prometheus/common v0.60.0
github.com/influxdata/influxdb v1.11.7
github.com/prometheus/client_golang v1.20.5
github.com/prometheus/common v0.60.1
github.com/prometheus/prometheus v0.53.1
github.com/stretchr/testify v1.9.0
)

View file

@ -166,8 +166,8 @@ github.com/hetznercloud/hcloud-go/v2 v2.9.0 h1:s0N6R7Zoi2DPfMtUF5o9VeUBzTtHVY6MI
github.com/hetznercloud/hcloud-go/v2 v2.9.0/go.mod h1:qtW/TuU7Bs16ibXl/ktJarWqU2LwHr7eGlwoilHxtgg=
github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4=
github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY=
github.com/influxdata/influxdb v1.11.6 h1:zS5MRY+RQ5/XFTer5R8xQRnY17JYSbacvO6OaP164wU=
github.com/influxdata/influxdb v1.11.6/go.mod h1:F10NoQb9qa04lME3pTPWQrYt4JZ/ke1Eei+1ttgHHrg=
github.com/influxdata/influxdb v1.11.7 h1:C31A+S9YfjTCOuAv9Qs0ZdQufslOZZBtejjxiV8QNQw=
github.com/influxdata/influxdb v1.11.7/go.mod h1:zRTAuk/Ie/V1LGxJUv8jfDmfv+ypz22lxfhc1MxC3rI=
github.com/ionos-cloud/sdk-go/v6 v6.1.11 h1:J/uRN4UWO3wCyGOeDdMKv8LWRzKu6UIkLEaes38Kzh8=
github.com/ionos-cloud/sdk-go/v6 v6.1.11/go.mod h1:EzEgRIDxBELvfoa/uBN0kOQaqovLjUWEB7iW4/Q+t4k=
github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg=
@ -253,8 +253,8 @@ github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXP
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
github.com/prometheus/client_golang v1.20.4 h1:Tgh3Yr67PaOv/uTqloMsCEdeuFTatm5zIq5+qNN23vI=
github.com/prometheus/client_golang v1.20.4/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE=
github.com/prometheus/client_golang v1.20.5 h1:cxppBPuYhUnsO6yo/aoRol4L7q7UFfdm+bR9r+8l63Y=
github.com/prometheus/client_golang v1.20.5/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE=
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
@ -264,8 +264,8 @@ github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y8
github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc=
github.com/prometheus/common v0.29.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
github.com/prometheus/common v0.60.0 h1:+V9PAREWNvJMAuJ1x1BaWl9dewMW4YrHZQbx0sJNllA=
github.com/prometheus/common v0.60.0/go.mod h1:h0LYf1R1deLSKtD4Vdg8gy4RuOvENW2J/h19V5NADQw=
github.com/prometheus/common v0.60.1 h1:FUas6GcOw66yB/73KC+BOZoFJmbo/1pojoILArPAaSc=
github.com/prometheus/common v0.60.1/go.mod h1:h0LYf1R1deLSKtD4Vdg8gy4RuOvENW2J/h19V5NADQw=
github.com/prometheus/common/sigv4 v0.1.0 h1:qoVebwtwwEhS85Czm2dSROY5fTo2PAPEVdDeppTwGX4=
github.com/prometheus/common/sigv4 v0.1.0/go.mod h1:2Jkxxk9yYvCkE5G1sQT7GuEXm57JrvHu9k5YwTjsNtI=
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=

59
go.mod
View file

@ -5,8 +5,8 @@ go 1.22.0
toolchain go1.23.0
require (
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.14.0
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.7.0
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.16.0
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.8.0
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.7.0
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4 v4.3.0
github.com/Code-Hex/go-generics-cache v1.5.1
@ -17,10 +17,10 @@ require (
github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3
github.com/cespare/xxhash/v2 v2.3.0
github.com/dennwc/varint v1.0.0
github.com/digitalocean/godo v1.126.0
github.com/digitalocean/godo v1.128.0
github.com/docker/docker v27.3.1+incompatible
github.com/edsrzf/mmap-go v1.1.0
github.com/envoyproxy/go-control-plane v0.13.0
github.com/edsrzf/mmap-go v1.2.0
github.com/envoyproxy/go-control-plane v0.13.1
github.com/envoyproxy/protoc-gen-validate v1.1.0
github.com/facette/natsort v0.0.0-20181210072756-2cd4dd1e2dcb
github.com/fsnotify/fsnotify v1.7.0
@ -34,14 +34,14 @@ require (
github.com/gophercloud/gophercloud v1.14.1
github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc
github.com/grpc-ecosystem/grpc-gateway v1.16.0
github.com/hashicorp/consul/api v1.29.4
github.com/hashicorp/consul/api v1.30.0
github.com/hashicorp/nomad/api v0.0.0-20240717122358-3d93bd3778f3
github.com/hetznercloud/hcloud-go/v2 v2.13.1
github.com/hetznercloud/hcloud-go/v2 v2.15.0
github.com/ionos-cloud/sdk-go/v6 v6.2.1
github.com/json-iterator/go v1.1.12
github.com/klauspost/compress v1.17.10
github.com/klauspost/compress v1.17.11
github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b
github.com/linode/linodego v1.41.0
github.com/linode/linodego v1.42.0
github.com/miekg/dns v1.1.62
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f
@ -52,23 +52,24 @@ require (
github.com/prometheus/alertmanager v0.27.0
github.com/prometheus/client_golang v1.20.5
github.com/prometheus/client_model v0.6.1
github.com/prometheus/common v0.60.0
github.com/prometheus/common v0.60.1
github.com/prometheus/common/assets v0.2.0
github.com/prometheus/common/sigv4 v0.1.0
github.com/prometheus/exporter-toolkit v0.13.0
github.com/prometheus/exporter-toolkit v0.13.1
github.com/prometheus/sigv4 v0.1.0
github.com/scaleway/scaleway-sdk-go v1.0.0-beta.30
github.com/shurcooL/httpfs v0.0.0-20230704072500-f1e31cf0ba5c
github.com/stretchr/testify v1.9.0
github.com/vultr/govultr/v2 v2.17.2
go.opentelemetry.io/collector/pdata v1.16.0
go.opentelemetry.io/collector/semconv v0.110.0
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.55.0
go.opentelemetry.io/otel v1.30.0
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.30.0
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.30.0
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.30.0
go.opentelemetry.io/otel/sdk v1.30.0
go.opentelemetry.io/otel/trace v1.30.0
go.opentelemetry.io/collector/pdata v1.18.0
go.opentelemetry.io/collector/semconv v0.112.0
go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.56.0
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.56.0
go.opentelemetry.io/otel v1.31.0
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.31.0
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.31.0
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.31.0
go.opentelemetry.io/otel/sdk v1.31.0
go.opentelemetry.io/otel/trace v1.31.0
go.uber.org/atomic v1.11.0
go.uber.org/automaxprocs v1.6.0
go.uber.org/goleak v1.3.0
@ -78,10 +79,10 @@ require (
golang.org/x/sys v0.26.0
golang.org/x/text v0.19.0
golang.org/x/tools v0.26.0
google.golang.org/api v0.199.0
google.golang.org/genproto/googleapis/api v0.0.0-20240903143218-8af14fe29dc1
google.golang.org/api v0.204.0
google.golang.org/genproto/googleapis/api v0.0.0-20241015192408-796eee8c2d53
google.golang.org/grpc v1.67.1
google.golang.org/protobuf v1.34.2
google.golang.org/protobuf v1.35.1
gopkg.in/yaml.v2 v2.4.0
gopkg.in/yaml.v3 v3.0.1
k8s.io/api v0.31.1
@ -92,8 +93,8 @@ require (
)
require (
cloud.google.com/go/auth v0.9.5 // indirect
cloud.google.com/go/auth/oauth2adapt v0.2.4 // indirect
cloud.google.com/go/auth v0.10.0 // indirect
cloud.google.com/go/auth/oauth2adapt v0.2.5 // indirect
cloud.google.com/go/compute/metadata v0.5.2 // indirect
github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 // indirect
github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 // indirect
@ -184,15 +185,15 @@ require (
github.com/xhit/go-str2duration/v2 v2.1.0 // indirect
go.mongodb.org/mongo-driver v1.14.0 // indirect
go.opencensus.io v0.24.0 // indirect
go.opentelemetry.io/otel/metric v1.30.0 // indirect
go.opentelemetry.io/otel/metric v1.31.0 // indirect
go.opentelemetry.io/proto/otlp v1.3.1 // indirect
golang.org/x/crypto v0.28.0 // indirect
golang.org/x/exp v0.0.0-20240119083558-1b970713d09a // indirect
golang.org/x/mod v0.21.0 // indirect
golang.org/x/net v0.30.0 // indirect
golang.org/x/term v0.25.0 // indirect
golang.org/x/time v0.6.0 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 // indirect
golang.org/x/time v0.7.0 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20241021214115-324edc3d5d38 // indirect
gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect
gopkg.in/inf.v0 v0.9.1 // indirect
gopkg.in/ini.v1 v1.67.0 // indirect

399
go.sum
View file

@ -1,45 +1,17 @@
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU=
cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU=
cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY=
cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc=
cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0=
cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To=
cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4=
cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M=
cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc=
cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk=
cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs=
cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY=
cloud.google.com/go/auth v0.9.5 h1:4CTn43Eynw40aFVr3GpPqsQponx2jv0BQpjvajsbbzw=
cloud.google.com/go/auth v0.9.5/go.mod h1:Xo0n7n66eHyOWWCnitop6870Ilwo3PiZyodVkkH1xWM=
cloud.google.com/go/auth/oauth2adapt v0.2.4 h1:0GWE/FUsXhf6C+jAkWgYm7X9tK8cuEIfy19DBn6B6bY=
cloud.google.com/go/auth/oauth2adapt v0.2.4/go.mod h1:jC/jOpwFP6JBxhB3P5Rr0a9HLMC/Pe3eaL4NmdvqPtc=
cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE=
cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc=
cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg=
cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc=
cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ=
cloud.google.com/go/auth v0.10.0 h1:tWlkvFAh+wwTOzXIjrwM64karR1iTBZ/GRr0S/DULYo=
cloud.google.com/go/auth v0.10.0/go.mod h1:xxA5AqpDrvS+Gkmo9RqrGGRh6WSNKKOXhY3zNOr38tI=
cloud.google.com/go/auth/oauth2adapt v0.2.5 h1:2p29+dePqsCHPP1bqDJcKj4qxRyYCcbzKpFyKGt3MTk=
cloud.google.com/go/auth/oauth2adapt v0.2.5/go.mod h1:AlmsELtlEBnaNTL7jCj8VQFLy6mbZv0s4Q7NGBeQ5E8=
cloud.google.com/go/compute/metadata v0.5.2 h1:UxK4uu/Tn+I3p2dYWTfiX4wva7aYlKixAHn3fyqngqo=
cloud.google.com/go/compute/metadata v0.5.2/go.mod h1:C66sj2AluDcIqakBq/M8lw8/ybHgOZqin2obFxa/E5k=
cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk=
cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw=
cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA=
cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU=
cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw=
cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos=
cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk=
cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs=
cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0=
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.14.0 h1:nyQWyZvwGTvunIMxi1Y9uXkcyr+I7TeNrr/foo4Kpk8=
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.14.0/go.mod h1:l38EPgmsp71HHLq9j7De57JcKOWPyhrsW1Awm1JS6K0=
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.7.0 h1:tfLQ34V6F7tVSwoTf/4lH5sE0o6eCJuNDTmH09nDpbc=
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.7.0/go.mod h1:9kIvujWAA58nmPmWB1m23fyWic1kYZMxD9CxaWn4Qpg=
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.16.0 h1:JZg6HRh6W6U4OLl6lk7BZ7BLisIzM9dG1R50zUk9C/M=
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.16.0/go.mod h1:YL1xnZ6QejvQHWJrX/AvhFl4WW4rqHVoKspWNVwFk0M=
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.8.0 h1:B/dfvscEQtew9dVuoxqxrUKKv8Ih2f55PydknDamU+g=
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.8.0/go.mod h1:fiPSssYvltE08HJchL04dOy+RD4hgrjph0cwGGMntdI=
github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache v0.3.0 h1:+m0M/LFxN43KvULkDNfdXOgrjtg6UYJPFBJyuEcRCAw=
github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache v0.3.0/go.mod h1:PwOyop78lveYMRs6oCxjiVyBdyCgIYH6XHIVZO9/SFQ=
github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 h1:ywEEhmNahHBihViHepv3xPBn1663uRv2t2q/ESv9seY=
github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0/go.mod h1:iZDifYGJTIgIIkYRNWPENUnqx6bJ2xnSDFI2tjwZNuY=
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.7.0 h1:LkHbJbgF3YyvC53aqYGR+wWQDn2Rdp9AQdGndf9QvY4=
@ -52,10 +24,11 @@ github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources v1.
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources v1.1.1/go.mod h1:c/wcGeGx5FUPbM/JltUYHZcKmigwyVLJlDq+4HdtXaw=
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8=
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E=
github.com/AzureAD/microsoft-authentication-extensions-for-go/cache v0.1.1 h1:WJTmL004Abzc5wDB5VtZG2PJk5ndYDgVacGqfirKxjM=
github.com/AzureAD/microsoft-authentication-extensions-for-go/cache v0.1.1/go.mod h1:tCcJZ0uHAmvjsVYzEFivsRTN00oz5BEsRgQHu5JZ9WE=
github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 h1:XHOnouVk1mxXfQidrMEnLlPk9UMeRtyBTnEFtxkV0kU=
github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
github.com/Code-Hex/go-generics-cache v1.5.1 h1:6vhZGc5M7Y/YD8cIUcY8kcuQLB4cHR7U+0KMqAA0KcU=
github.com/Code-Hex/go-generics-cache v1.5.1/go.mod h1:qxcC9kRVrct9rHeiYpFWSoW1vxyillCVzX13KZG8dl4=
github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ=
@ -69,7 +42,6 @@ github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuy
github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho=
github.com/alecthomas/units v0.0.0-20240626203959-61d1e3462e30 h1:t3eaIm0rUkzbrIewtiFmMK5RXHej2XnoXNhxVsAYUfg=
github.com/alecthomas/units v0.0.0-20240626203959-61d1e3462e30/go.mod h1:fvzegU4vN3H1qMT+8wDmzjAcDONcgo2/SZ/TyfdUOFs=
github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
@ -81,7 +53,6 @@ github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj
github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3dyBCFEj5IhUbnKptjxatkF07cF2ak3yi77so=
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw=
github.com/aws/aws-sdk-go v1.38.35/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro=
github.com/aws/aws-sdk-go v1.55.5 h1:KKUZBfBoyqy5d3swXyiC7Q76ic40rYcbqH7qjh59kzU=
github.com/aws/aws-sdk-go v1.55.5/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU=
github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3 h1:6df1vn4bBlDDo4tARvBm7l6KA9iVMnE3NWizDeWSrps=
@ -97,9 +68,6 @@ github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
github.com/cilium/ebpf v0.11.0 h1:V8gS/bTCCjX9uUnkUFUpPsksM8n1lXBAvHcpiFk1X2Y=
github.com/cilium/ebpf v0.11.0/go.mod h1:WE7CZAnqOL2RouJ4f1uyNhqr2P4CCvXFIqdRDUgWsVs=
github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag=
@ -121,8 +89,10 @@ github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/dennwc/varint v1.0.0 h1:kGNFFSSw8ToIy3obO/kKr8U9GZYUAxQEVuix4zfDWzE=
github.com/dennwc/varint v1.0.0/go.mod h1:hnItb35rvZvJrbTALZtY/iQfDs48JKRG1RPpgziApxA=
github.com/digitalocean/godo v1.126.0 h1:+Znh7VMQj/E8ArbjWnc7OKGjWfzC+I8OCSRp7r1MdD8=
github.com/digitalocean/godo v1.126.0/go.mod h1:PU8JB6I1XYkQIdHFop8lLAY9ojp6M0XcU0TWaQSxbrc=
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78=
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc=
github.com/digitalocean/godo v1.128.0 h1:cGn/ibMSRZ9+8etbzMv2MnnCEPTTGlEnx3HHTPwdk1U=
github.com/digitalocean/godo v1.128.0/go.mod h1:PU8JB6I1XYkQIdHFop8lLAY9ojp6M0XcU0TWaQSxbrc=
github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0=
github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E=
github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI=
@ -133,15 +103,15 @@ github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKoh
github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec=
github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4=
github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
github.com/edsrzf/mmap-go v1.1.0 h1:6EUwBLQ/Mcr1EYLE4Tn1VdW1A4ckqCQWZBw8Hr0kjpQ=
github.com/edsrzf/mmap-go v1.1.0/go.mod h1:19H/e8pUPLicwkyNgOykDXkJ9F0MHE+Z52B8EIth78Q=
github.com/edsrzf/mmap-go v1.2.0 h1:hXLYlkbaPzt1SaQk+anYwKSRNhufIDCchSPkUD6dD84=
github.com/edsrzf/mmap-go v1.2.0/go.mod h1:19H/e8pUPLicwkyNgOykDXkJ9F0MHE+Z52B8EIth78Q=
github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g=
github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
github.com/envoyproxy/go-control-plane v0.13.0 h1:HzkeUz1Knt+3bK+8LG1bxOO/jzWZmdxpwC51i202les=
github.com/envoyproxy/go-control-plane v0.13.0/go.mod h1:GRaKG3dwvFoTg4nj7aXdZnvMg4d7nvT/wl9WgVXn3Q8=
github.com/envoyproxy/go-control-plane v0.13.1 h1:vPfJZCkob6yTMEgS+0TwfTUfbHjfy/6vOJ8hUWX/uXE=
github.com/envoyproxy/go-control-plane v0.13.1/go.mod h1:X45hY0mufo6Fd0KW3rqsGvQMw58jvjymeCzBU3mWyHw=
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
github.com/envoyproxy/protoc-gen-validate v1.1.0 h1:tntQDh69XqOCOZsDz0lVJQez/2L6Uu2PdjCQwWCJ3bM=
github.com/envoyproxy/protoc-gen-validate v1.1.0/go.mod h1:sXRDRVmzEbkM7CVcM06s9shE/m23dg3wzjl0UWqJ2q4=
@ -162,15 +132,10 @@ github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv
github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ=
github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk=
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY=
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A=
github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas=
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY=
@ -212,38 +177,26 @@ github.com/golang-jwt/jwt/v5 v5.2.1/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVI
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
github.com/golang/glog v1.2.2 h1:1+mZ9upx1Dh6FmUTFR1naJ77miKiXgALjWOZ3NVFPmY=
github.com/golang/glog v1.2.2/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w=
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE=
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y=
github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk=
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM=
github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4=
github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA=
github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I=
@ -252,13 +205,9 @@ github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5a
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
@ -267,18 +216,8 @@ github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0=
github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
github.com/google/pprof v0.0.0-20240711041743-f6c9dda6c6da h1:xRmpO92tb8y+Z85iUOMOicpCfaYcv7o3Cg3wKrIpg8g=
github.com/google/pprof v0.0.0-20240711041743-f6c9dda6c6da/go.mod h1:K1liHPHnj73Fdn/EKuT8nrFqBihUSKXoLYU0BuatOYo=
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
github.com/google/s2a-go v0.1.8 h1:zZDs9gcbt9ZPLV0ndSyQk6Kacx2g/X+SKYovpnz3SMM=
github.com/google/s2a-go v0.1.8/go.mod h1:6iNWHTpQ+nfNRN5E00MSdfDwVesa8hhS32PhPO8deJA=
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
@ -286,8 +225,6 @@ github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/googleapis/enterprise-certificate-proxy v0.3.4 h1:XYIDZApgAnrN1c855gTgghdIA6Stxb52D5RnLI1SLyw=
github.com/googleapis/enterprise-certificate-proxy v0.3.4/go.mod h1:YKe7cfqYXjKGpGvmSg28/fFvhNzinZQm8DGnaburhGA=
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
github.com/googleapis/gax-go/v2 v2.13.0 h1:yitjD5f7jQHhyDsnhKEBU52NdvvdSeGzlAnDPT0hH1s=
github.com/googleapis/gax-go/v2 v2.13.0/go.mod h1:Z/fvTZXF8/uw7Xu5GuslPw+bplx6SS338j1Is2S+B7A=
github.com/gophercloud/gophercloud v1.14.1 h1:DTCNaTVGl8/cFu58O1JwWgis9gtISAFONqpMKNg/Vpw=
@ -300,10 +237,8 @@ github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4
github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0 h1:asbCHRVmodnJTuQ3qamDwqVOIjwqUPTYmYuemVOx+Ys=
github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0/go.mod h1:ggCgvZ2r7uOoQjOyu2Y1NhHmEPPzzuhWgcza5M1Ji1I=
github.com/hashicorp/consul/api v1.29.4 h1:P6slzxDLBOxUSj3fWo2o65VuKtbtOXFi7TSSgtXutuE=
github.com/hashicorp/consul/api v1.29.4/go.mod h1:HUlfw+l2Zy68ceJavv2zAyArl2fqhGWnMycyt56sBgg=
github.com/hashicorp/consul/proto-public v0.6.2 h1:+DA/3g/IiKlJZb88NBn0ZgXrxJp2NlvCZdEyl+qxvL0=
github.com/hashicorp/consul/proto-public v0.6.2/go.mod h1:cXXbOg74KBNGajC+o8RlA502Esf0R9prcoJgiOX/2Tg=
github.com/hashicorp/consul/api v1.30.0 h1:ArHVMMILb1nQv8vZSGIwwQd2gtc+oSQZ6CalyiyH2XQ=
github.com/hashicorp/consul/api v1.30.0/go.mod h1:B2uGchvaXVW2JhFoS8nqTxMD5PBykr4ebY4JWHTTeLM=
github.com/hashicorp/consul/sdk v0.16.1 h1:V8TxTnImoPD5cj0U9Spl0TUxcytjcbbJeADFF07KdHg=
github.com/hashicorp/consul/sdk v0.16.1/go.mod h1:fSXvwxB2hmh1FMZCNl6PwX0Q/1wdWtHJcZ7Ea5tns0s=
github.com/hashicorp/cronexpr v1.1.2 h1:wG/ZYIKT+RT3QkOdgYc+xsKWVRgnxJ1OJtjjy84fJ9A=
@ -342,7 +277,6 @@ github.com/hashicorp/go-uuid v1.0.3/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/b
github.com/hashicorp/go-version v1.7.0 h1:5tqGy27NaOTB8yJKUZELlFAS/LTKJkrmONwQKeRZfjY=
github.com/hashicorp/go-version v1.7.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hashicorp/golang-lru v0.6.0 h1:uL2shRDx7RTrOrTCUZEGP/wJUFiUI8QT6E7z5o8jga4=
github.com/hashicorp/golang-lru v0.6.0/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4=
github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64=
@ -353,9 +287,8 @@ github.com/hashicorp/nomad/api v0.0.0-20240717122358-3d93bd3778f3 h1:fgVfQ4AC1av
github.com/hashicorp/nomad/api v0.0.0-20240717122358-3d93bd3778f3/go.mod h1:svtxn6QnrQ69P23VvIWMR34tg3vmwLz4UdUzm1dSCgE=
github.com/hashicorp/serf v0.10.1 h1:Z1H2J60yRKvfDYAOZLd2MU0ND4AH/WDz7xYHDWQsIPY=
github.com/hashicorp/serf v0.10.1/go.mod h1:yL2t6BqATOLGc5HF7qbFkTfXoPIY0WZdWHfEvMqbG+4=
github.com/hetznercloud/hcloud-go/v2 v2.13.1 h1:jq0GP4QaYE5d8xR/Zw17s9qoaESRJMXfGmtD1a/qckQ=
github.com/hetznercloud/hcloud-go/v2 v2.13.1/go.mod h1:dhix40Br3fDiBhwaSG/zgaYOFFddpfBm/6R1Zz0IiF0=
github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
github.com/hetznercloud/hcloud-go/v2 v2.15.0 h1:6mpMJ/RuX1woZj+MCJdyKNEX9129KDkEIDeeyfr4GD4=
github.com/hetznercloud/hcloud-go/v2 v2.15.0/go.mod h1:h8sHav+27Xa+48cVMAvAUMELov5h298Ilg2vflyTHgg=
github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4=
github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY=
github.com/ionos-cloud/sdk-go/v6 v6.2.1 h1:mxxN+frNVmbFrmmFfXnBC3g2USYJrl6mc1LW2iNYbFY=
@ -372,23 +305,20 @@ github.com/jpillora/backoff v1.0.0 h1:uvFg412JmmHBHw7iwprIxkPMI+sGQ4kzOWsMeHnm2E
github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4=
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk=
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
github.com/julienschmidt/httprouter v1.3.0 h1:U0609e9tgbseu3rBINet9P48AI/D3oJs4dN7jwJOQ1U=
github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM=
github.com/keybase/go-keychain v0.0.0-20231219164618-57a3676c3af6 h1:IsMZxCuZqKuao2vNdfD82fjjgPLfyHLpR41Z88viRWs=
github.com/keybase/go-keychain v0.0.0-20231219164618-57a3676c3af6/go.mod h1:3VeWNIJaW+O5xpRQbPp0Ybqu1vJd/pm7s2F473HRrkw=
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/klauspost/compress v1.17.10 h1:oXAz+Vh0PMUvJczoi+flxpnBEPxoER1IaAnU/NMPtT0=
github.com/klauspost/compress v1.17.10/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0=
github.com/klauspost/compress v1.17.11 h1:In6xLpyWOi1+C7tXUUWv2ot1QvBjxevKAaI6IXrJmUc=
github.com/klauspost/compress v1.17.11/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0=
github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b h1:udzkj9S/zlT5X367kqJis0QP7YMxobob6zhzq6Yre00=
github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b/go.mod h1:pcaDhQK0/NJZEvtCO0qQPPropqV0sJOJ6YW7X+9kRwM=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
@ -399,8 +329,8 @@ github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
github.com/linode/linodego v1.41.0 h1:GcP7JIBr9iLRJ9FwAtb9/WCT1DuPJS/xUApapfdjtiY=
github.com/linode/linodego v1.41.0/go.mod h1:Ow4/XZ0yvWBzt3iAHwchvhSx30AyLintsSMvvQ2/SJY=
github.com/linode/linodego v1.42.0 h1:ZSbi4MtvwrfB9Y6bknesorvvueBGGilcmh2D5dq76RM=
github.com/linode/linodego v1.42.0/go.mod h1:2yzmY6pegPBDgx2HDllmt0eIk2IlzqcgK6NR0wFCFRY=
github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0=
github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
@ -498,8 +428,6 @@ github.com/prometheus/alertmanager v0.27.0/go.mod h1:8Ia/R3urPmbzJ8OsdvmZvIprDwv
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU=
github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
github.com/prometheus/client_golang v1.20.5 h1:cxppBPuYhUnsO6yo/aoRol4L7q7UFfdm+bR9r+8l63Y=
github.com/prometheus/client_golang v1.20.5/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE=
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
@ -510,28 +438,24 @@ github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p
github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY=
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4=
github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc=
github.com/prometheus/common v0.29.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
github.com/prometheus/common v0.60.0 h1:+V9PAREWNvJMAuJ1x1BaWl9dewMW4YrHZQbx0sJNllA=
github.com/prometheus/common v0.60.0/go.mod h1:h0LYf1R1deLSKtD4Vdg8gy4RuOvENW2J/h19V5NADQw=
github.com/prometheus/common v0.60.1 h1:FUas6GcOw66yB/73KC+BOZoFJmbo/1pojoILArPAaSc=
github.com/prometheus/common v0.60.1/go.mod h1:h0LYf1R1deLSKtD4Vdg8gy4RuOvENW2J/h19V5NADQw=
github.com/prometheus/common/assets v0.2.0 h1:0P5OrzoHrYBOSM1OigWL3mY8ZvV2N4zIE/5AahrSrfM=
github.com/prometheus/common/assets v0.2.0/go.mod h1:D17UVUE12bHbim7HzwUvtqm6gwBEaDQ0F+hIGbFbccI=
github.com/prometheus/common/sigv4 v0.1.0 h1:qoVebwtwwEhS85Czm2dSROY5fTo2PAPEVdDeppTwGX4=
github.com/prometheus/common/sigv4 v0.1.0/go.mod h1:2Jkxxk9yYvCkE5G1sQT7GuEXm57JrvHu9k5YwTjsNtI=
github.com/prometheus/exporter-toolkit v0.13.0 h1:lmA0Q+8IaXgmFRKw09RldZmZdnvu9wwcDLIXGmTPw1c=
github.com/prometheus/exporter-toolkit v0.13.0/go.mod h1:2uop99EZl80KdXhv/MxVI2181fMcwlsumFOqBecGkG0=
github.com/prometheus/exporter-toolkit v0.13.1 h1:Evsh0gWQo2bdOHlnz9+0Nm7/OFfIwhE2Ws4A2jIlR04=
github.com/prometheus/exporter-toolkit v0.13.1/go.mod h1:ujdv2YIOxtdFxxqtloLpbqmxd5J0Le6IITUvIRSWjj0=
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A=
github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc=
github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk=
github.com/prometheus/sigv4 v0.1.0 h1:FgxH+m1qf9dGQ4w8Dd6VkthmpFQfGTzUeavMoQeG1LA=
github.com/prometheus/sigv4 v0.1.0/go.mod h1:doosPW9dOitMzYe2I2BN0jZqUuBrGPbXrNsTScN18iU=
github.com/redis/go-redis/v9 v9.6.1 h1:HHDteefn6ZkTtY5fGUE8tj8uy85AHk6zP7CpzIAM0y4=
github.com/redis/go-redis/v9 v9.6.1/go.mod h1:0C0c6ycQsdpVNQpxb1njEQIqkx5UcsM8FJCQLgE9+RA=
github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8=
github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4=
github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII=
github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o=
github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
github.com/scaleway/scaleway-sdk-go v1.0.0-beta.30 h1:yoKAVkEVwAqbGbR8n87rHQ1dulL25rKloGadb3vm770=
github.com/scaleway/scaleway-sdk-go v1.0.0-beta.30/go.mod h1:sH0u6fq6x4R5M7WxkoQFY/o7UaiItec0o1LinLCJNq8=
@ -543,7 +467,6 @@ github.com/shurcooL/httpfs v0.0.0-20230704072500-f1e31cf0ba5c h1:aqg5Vm5dwtvL+Yg
github.com/shurcooL/httpfs v0.0.0-20230704072500-f1e31cf0ba5c/go.mod h1:owqhoLW1qZoYLZzLnBw+QkPP9WZnjlSWihhxAJC1+/M=
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=
github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
@ -573,40 +496,35 @@ github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM=
github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg=
github.com/xhit/go-str2duration/v2 v2.1.0 h1:lxklc02Drh6ynqX+DdPyp5pCKLUQpRT8bp8Ydu2Bstc=
github.com/xhit/go-str2duration/v2 v2.1.0/go.mod h1:ohY8p+0f07DiV6Em5LKB0s2YpLtXVyJfNt1+BlmyAsU=
github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
go.mongodb.org/mongo-driver v1.14.0 h1:P98w8egYRjYe3XDjxhYJagTokP/H6HzlsnojRgZRd80=
go.mongodb.org/mongo-driver v1.14.0/go.mod h1:Vzb0Mk/pa7e6cWw85R4F/endUC3u0U9jGcNU603k65c=
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0=
go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo=
go.opentelemetry.io/collector/pdata v1.16.0 h1:g02K8jlRnmQ7TQDuXpdgVL6vIxIVqr5Gbb1qIR27rto=
go.opentelemetry.io/collector/pdata v1.16.0/go.mod h1:YZZJIt2ehxosYf/Y1pbvexjNWsIGNNrzzlCTO9jC1F4=
go.opentelemetry.io/collector/semconv v0.110.0 h1:KHQnOHe3gUz0zsxe8ph9kN5OTypCFD4V+06AiBTfeNk=
go.opentelemetry.io/collector/semconv v0.110.0/go.mod h1:zCJ5njhWpejR+A40kiEoeFm1xq1uzyZwMnRNX6/D82A=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.55.0 h1:ZIg3ZT/aQ7AfKqdwp7ECpOK6vHqquXXuyTjIO8ZdmPs=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.55.0/go.mod h1:DQAwmETtZV00skUwgD6+0U89g80NKsJE3DCKeLLPQMI=
go.opentelemetry.io/otel v1.30.0 h1:F2t8sK4qf1fAmY9ua4ohFS/K+FUuOPemHUIXHtktrts=
go.opentelemetry.io/otel v1.30.0/go.mod h1:tFw4Br9b7fOS+uEao81PJjVMjW/5fvNCbpsDIXqP0pc=
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.30.0 h1:lsInsfvhVIfOI6qHVyysXMNDnjO9Npvl7tlDPJFBVd4=
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.30.0/go.mod h1:KQsVNh4OjgjTG0G6EiNi1jVpnaeeKsKMRwbLN+f1+8M=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.30.0 h1:m0yTiGDLUvVYaTFbAvCkVYIYcvwKt3G7OLoN77NUs/8=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.30.0/go.mod h1:wBQbT4UekBfegL2nx0Xk1vBcnzyBPsIVm9hRG4fYcr4=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.30.0 h1:umZgi92IyxfXd/l4kaDhnKgY8rnN/cZcF1LKc6I8OQ8=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.30.0/go.mod h1:4lVs6obhSVRb1EW5FhOuBTyiQhtRtAnnva9vD3yRfq8=
go.opentelemetry.io/otel/metric v1.30.0 h1:4xNulvn9gjzo4hjg+wzIKG7iNFEaBMX00Qd4QIZs7+w=
go.opentelemetry.io/otel/metric v1.30.0/go.mod h1:aXTfST94tswhWEb+5QjlSqG+cZlmyXy/u8jFpor3WqQ=
go.opentelemetry.io/otel/sdk v1.30.0 h1:cHdik6irO49R5IysVhdn8oaiR9m8XluDaJAs4DfOrYE=
go.opentelemetry.io/otel/sdk v1.30.0/go.mod h1:p14X4Ok8S+sygzblytT1nqG98QG2KYKv++HE0LY/mhg=
go.opentelemetry.io/otel/trace v1.30.0 h1:7UBkkYzeg3C7kQX8VAidWh2biiQbtAKjyIML8dQ9wmc=
go.opentelemetry.io/otel/trace v1.30.0/go.mod h1:5EyKqTzzmyqB9bwtCCq6pDLktPK6fmGf/Dph+8VI02o=
go.opentelemetry.io/collector/pdata v1.18.0 h1:/yg2rO2dxqDM2p6GutsMCxXN6sKlXwyIz/ZYyUPONBg=
go.opentelemetry.io/collector/pdata v1.18.0/go.mod h1:Ox1YVLe87cZDB/TL30i4SUz1cA5s6AM6SpFMfY61ICs=
go.opentelemetry.io/collector/semconv v0.112.0 h1:JPQyvZhlNLVSuVI+FScONaiFygB7h7NTZceUEKIQUEc=
go.opentelemetry.io/collector/semconv v0.112.0/go.mod h1:zCJ5njhWpejR+A40kiEoeFm1xq1uzyZwMnRNX6/D82A=
go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.56.0 h1:4BZHA+B1wXEQoGNHxW8mURaLhcdGwvRnmhGbm+odRbc=
go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.56.0/go.mod h1:3qi2EEwMgB4xnKgPLqsDP3j9qxnHDZeHsnAxfjQqTko=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.56.0 h1:UP6IpuHFkUgOQL9FFQFrZ+5LiwhhYRbi7VZSIx6Nj5s=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.56.0/go.mod h1:qxuZLtbq5QDtdeSHsS7bcf6EH6uO6jUAgk764zd3rhM=
go.opentelemetry.io/otel v1.31.0 h1:NsJcKPIW0D0H3NgzPDHmo0WW6SptzPdqg/L1zsIm2hY=
go.opentelemetry.io/otel v1.31.0/go.mod h1:O0C14Yl9FgkjqcCZAsE053C13OaddMYr/hz6clDkEJE=
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.31.0 h1:K0XaT3DwHAcV4nKLzcQvwAgSyisUghWoY20I7huthMk=
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.31.0/go.mod h1:B5Ki776z/MBnVha1Nzwp5arlzBbE3+1jk+pGmaP5HME=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.31.0 h1:FFeLy03iVTXP6ffeN2iXrxfGsZGCjVx0/4KlizjyBwU=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.31.0/go.mod h1:TMu73/k1CP8nBUpDLc71Wj/Kf7ZS9FK5b53VapRsP9o=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.31.0 h1:lUsI2TYsQw2r1IASwoROaCnjdj2cvC2+Jbxvk6nHnWU=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.31.0/go.mod h1:2HpZxxQurfGxJlJDblybejHB6RX6pmExPNe517hREw4=
go.opentelemetry.io/otel/metric v1.31.0 h1:FSErL0ATQAmYHUIzSezZibnyVlft1ybhy4ozRPcF2fE=
go.opentelemetry.io/otel/metric v1.31.0/go.mod h1:C3dEloVbLuYoX41KpmAhOqNriGbA+qqH6PQ5E5mUfnY=
go.opentelemetry.io/otel/sdk v1.31.0 h1:xLY3abVHYZ5HSfOg3l2E5LUj2Cwva5Y7yGxnSW9H5Gk=
go.opentelemetry.io/otel/sdk v1.31.0/go.mod h1:TfRbMdhvxIIr/B2N2LQW2S5v9m3gOQ/08KsbbO5BPT0=
go.opentelemetry.io/otel/trace v1.31.0 h1:ffjsj1aRouKewfr85U2aGagJ46+MvodynlQ1HYdmJys=
go.opentelemetry.io/otel/trace v1.31.0/go.mod h1:TXZkRk7SM2ZQLtR6eoAWQFIHPvzQ06FJAsO1tJg480A=
go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0=
go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8=
go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE=
@ -619,8 +537,6 @@ go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
@ -631,35 +547,11 @@ golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v
golang.org/x/crypto v0.28.0 h1:GBDwsMXVQi34v5CCYUm2jkJvu4cbtru2U4TN2PSyQnw=
golang.org/x/crypto v0.28.0/go.mod h1:rmgy+3RHxRZMyY0jjAJShp2zgEdOqj2AO7U0pYmeQ7U=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek=
golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY=
golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
golang.org/x/exp v0.0.0-20240119083558-1b970713d09a h1:Q8/wZp0KX97QFTc2ywcOE0YRjZPVIx+MXInMzdvQqcA=
golang.org/x/exp v0.0.0-20240119083558-1b970713d09a/go.mod h1:idGWGoKP1toJGkd5/ig9ZLuPcZBC3ewk7SzmH0uou08=
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs=
golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE=
golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o=
golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY=
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
@ -673,32 +565,15 @@ golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73r
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20210410081132-afb366fc7cd1/go.mod h1:9tjilg8BloeKEkVJvy7fQ90B1CfIiPueXVOjqfkSzI8=
golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
@ -708,23 +583,15 @@ golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM=
golang.org/x/net v0.30.0 h1:AcW1SDZMkb8IpzCdQUaIq2sP4sZ4zw+55h6ynffypl4=
golang.org/x/net v0.30.0/go.mod h1:2wGyMJ5iFasEhkwi13ChkO/t1ECNC4X4eBKkVFyYFlU=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.23.0 h1:PbgcYx2W7i4LvjJWEbf0ngHV6qJYr86PkAV3bXdLEbs=
golang.org/x/oauth2 v0.23.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
@ -736,43 +603,20 @@ golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5h
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190922100055-0a153f010e69/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210303074136-134d130e1a04/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
@ -799,9 +643,7 @@ golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk=
golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY=
golang.org/x/term v0.25.0 h1:WtHI/ltw4NvSUig5KARz9h521QvRC8RmF/cuYqifU24=
golang.org/x/term v0.25.0/go.mod h1:RPyXicDX+6vLxogjjRxjgD2TKtmAO6NZBsBRfrOLu7M=
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
@ -812,53 +654,18 @@ golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
golang.org/x/text v0.19.0 h1:kTxAhCbGbxhK0IwgSKiMO5awPoDQ0RpfiVYBfK860YM=
golang.org/x/text v0.19.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
golang.org/x/time v0.6.0 h1:eTDhh4ZXt5Qf0augr54TN6suAUudPcawVZeIAPU7D4U=
golang.org/x/time v0.6.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
golang.org/x/time v0.7.0 h1:ntUhktv3OPE6TgYxXWv9vKvUSJyIFJlyohwbkEwPrKQ=
golang.org/x/time v0.7.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20190907020128-2ca718005c18/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8=
golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
@ -868,71 +675,22 @@ golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8T
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M=
google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
google.golang.org/api v0.199.0 h1:aWUXClp+VFJmqE0JPvpZOK3LDQMyFKYIow4etYd9qxs=
google.golang.org/api v0.199.0/go.mod h1:ohG4qSztDJmZdjK/Ar6MhbAmb/Rpi4JHOqagsh90K28=
google.golang.org/api v0.204.0 h1:3PjmQQEDkR/ENVZZwIYB4W/KzYtN8OrqnNcHWpeR8E4=
google.golang.org/api v0.204.0/go.mod h1:69y8QSoKIbL9F94bWgWAq6wGqGwyjBgi2y8rAK8zLag=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=
google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8=
google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA=
google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U=
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA=
google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto/googleapis/api v0.0.0-20240903143218-8af14fe29dc1 h1:hjSy6tcFQZ171igDaN5QHOw2n6vx40juYbC/x67CEhc=
google.golang.org/genproto/googleapis/api v0.0.0-20240903143218-8af14fe29dc1/go.mod h1:qpvKtACPCQhAdu3PyQgV4l3LMXZEtft7y8QcarRsp9I=
google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 h1:pPJltXNxVzT4pK9yD8vR9X75DaWYYmLGMsEvBfFQZzQ=
google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU=
google.golang.org/genproto/googleapis/api v0.0.0-20241015192408-796eee8c2d53 h1:fVoAXEKA4+yufmbdVYv+SE73+cPZbbbe8paLsHfkK+U=
google.golang.org/genproto/googleapis/api v0.0.0-20241015192408-796eee8c2d53/go.mod h1:riSXTwQ4+nqmPGtobMFyW5FqVAmIs0St6VPp4Ug7CE4=
google.golang.org/genproto/googleapis/rpc v0.0.0-20241021214115-324edc3d5d38 h1:zciRKQ4kBpFgpfC5QQCVtnnNAcLIqweL7plyZRQHVpI=
google.golang.org/genproto/googleapis/rpc v0.0.0-20241021214115-324edc3d5d38/go.mod h1:GX3210XPVPUjJbTUbvwI8f2IpZDMZuPJWDzDuebbviI=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60=
google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk=
google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0=
google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=
google.golang.org/grpc v1.67.1 h1:zWnc1Vrcno+lHZCOofnIMvycFcc0QRGIzm9dhnDX68E=
@ -945,18 +703,14 @@ google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzi
google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4=
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg=
google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw=
google.golang.org/protobuf v1.35.1 h1:m3LfL6/Ca+fqnjnlqQXNpFPABW1UD7mjh8KO2mKFytA=
google.golang.org/protobuf v1.35.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
gopkg.in/evanphx/json-patch.v4 v4.12.0 h1:n6jtcsulIzXPJaxegRbvFNNrZDjbij7ny3gmSPG+6V4=
gopkg.in/evanphx/json-patch.v4 v4.12.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M=
gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
@ -969,7 +723,6 @@ gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
@ -979,12 +732,7 @@ gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk=
gotest.tools/v3 v3.0.3 h1:4AuOwCGf4lLR9u3YOe2awrHygurzhO/HeQ6laiA6Sx0=
gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
k8s.io/api v0.31.1 h1:Xe1hX/fPW3PXYYv8BlozYqw63ytA92snr96zMW9gWTU=
k8s.io/api v0.31.1/go.mod h1:sbN1g6eY6XVLeqNsZGLnI5FwVseTrZX7Fv3O26rhAaI=
k8s.io/apimachinery v0.31.1 h1:mhcUBbj7KUjaVhyXILglcVjuS4nYXiwC+KKFBgIVy7U=
@ -999,9 +747,6 @@ k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 h1:BZqlfIlq5YbRMFko6/PM7F
k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340/go.mod h1:yD4MZYeKMBwQKVht279WycxKyM84kkAx2DPrTXaeb98=
k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 h1:pUdcCO1Lk/tbT5ztQWOBi5HBgbBP1J8+AsQnQCKsi8A=
k8s.io/utils v0.0.0-20240711033017-18e509b52bc8/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo=
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0=
sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4=

View file

@ -14,6 +14,7 @@
package histogram
import (
"errors"
"fmt"
"math"
"strings"
@ -784,16 +785,16 @@ func (h *FloatHistogram) Validate() error {
return fmt.Errorf("custom buckets: %w", err)
}
if h.ZeroCount != 0 {
return fmt.Errorf("custom buckets: must have zero count of 0")
return errors.New("custom buckets: must have zero count of 0")
}
if h.ZeroThreshold != 0 {
return fmt.Errorf("custom buckets: must have zero threshold of 0")
return errors.New("custom buckets: must have zero threshold of 0")
}
if len(h.NegativeSpans) > 0 {
return fmt.Errorf("custom buckets: must not have negative spans")
return errors.New("custom buckets: must not have negative spans")
}
if len(h.NegativeBuckets) > 0 {
return fmt.Errorf("custom buckets: must not have negative buckets")
return errors.New("custom buckets: must not have negative buckets")
}
} else {
if err := checkHistogramSpans(h.PositiveSpans, len(h.PositiveBuckets)); err != nil {
@ -807,7 +808,7 @@ func (h *FloatHistogram) Validate() error {
return fmt.Errorf("negative side: %w", err)
}
if h.CustomValues != nil {
return fmt.Errorf("histogram with exponential schema must not have custom bounds")
return errors.New("histogram with exponential schema must not have custom bounds")
}
}
err := checkHistogramBuckets(h.PositiveBuckets, &pCount, false)
@ -948,10 +949,10 @@ func (h *FloatHistogram) floatBucketIterator(
positive bool, absoluteStartValue float64, targetSchema int32,
) floatBucketIterator {
if h.UsesCustomBuckets() && targetSchema != h.Schema {
panic(fmt.Errorf("cannot merge from custom buckets schema to exponential schema"))
panic(errors.New("cannot merge from custom buckets schema to exponential schema"))
}
if !h.UsesCustomBuckets() && IsCustomBucketsSchema(targetSchema) {
panic(fmt.Errorf("cannot merge from exponential buckets schema to custom schema"))
panic(errors.New("cannot merge from exponential buckets schema to custom schema"))
}
if targetSchema > h.Schema {
panic(fmt.Errorf("cannot merge from schema %d to %d", h.Schema, targetSchema))

View file

@ -14,6 +14,7 @@
package histogram
import (
"errors"
"fmt"
"math"
"slices"
@ -432,16 +433,16 @@ func (h *Histogram) Validate() error {
return fmt.Errorf("custom buckets: %w", err)
}
if h.ZeroCount != 0 {
return fmt.Errorf("custom buckets: must have zero count of 0")
return errors.New("custom buckets: must have zero count of 0")
}
if h.ZeroThreshold != 0 {
return fmt.Errorf("custom buckets: must have zero threshold of 0")
return errors.New("custom buckets: must have zero threshold of 0")
}
if len(h.NegativeSpans) > 0 {
return fmt.Errorf("custom buckets: must not have negative spans")
return errors.New("custom buckets: must not have negative spans")
}
if len(h.NegativeBuckets) > 0 {
return fmt.Errorf("custom buckets: must not have negative buckets")
return errors.New("custom buckets: must not have negative buckets")
}
} else {
if err := checkHistogramSpans(h.PositiveSpans, len(h.PositiveBuckets)); err != nil {
@ -455,7 +456,7 @@ func (h *Histogram) Validate() error {
return fmt.Errorf("negative side: %w", err)
}
if h.CustomValues != nil {
return fmt.Errorf("histogram with exponential schema must not have custom bounds")
return errors.New("histogram with exponential schema must not have custom bounds")
}
}
err := checkHistogramBuckets(h.PositiveBuckets, &pCount, true)

View file

@ -802,7 +802,7 @@ type equalMultiStringMapMatcher struct {
func (m *equalMultiStringMapMatcher) add(s string) {
if !m.caseSensitive {
s = toNormalisedLower(s)
s = toNormalisedLower(s, nil) // Don't pass a stack buffer here - it will always escape to heap.
}
m.values[s] = struct{}{}
@ -840,15 +840,24 @@ func (m *equalMultiStringMapMatcher) setMatches() []string {
}
func (m *equalMultiStringMapMatcher) Matches(s string) bool {
if !m.caseSensitive {
s = toNormalisedLower(s)
if len(m.values) > 0 {
sNorm := s
var a [32]byte
if !m.caseSensitive {
sNorm = toNormalisedLower(s, a[:])
}
if _, ok := m.values[sNorm]; ok {
return true
}
}
if _, ok := m.values[s]; ok {
return true
}
if m.minPrefixLen > 0 && len(s) >= m.minPrefixLen {
for _, matcher := range m.prefixes[s[:m.minPrefixLen]] {
prefix := s[:m.minPrefixLen]
var a [32]byte
if !m.caseSensitive {
prefix = toNormalisedLower(s[:m.minPrefixLen], a[:])
}
for _, matcher := range m.prefixes[prefix] {
if matcher.Matches(s) {
return true
}
@ -859,22 +868,37 @@ func (m *equalMultiStringMapMatcher) Matches(s string) bool {
// toNormalisedLower normalise the input string using "Unicode Normalization Form D" and then convert
// it to lower case.
func toNormalisedLower(s string) string {
var buf []byte
func toNormalisedLower(s string, a []byte) string {
for i := 0; i < len(s); i++ {
c := s[i]
if c >= utf8.RuneSelf {
return strings.Map(unicode.ToLower, norm.NFKD.String(s))
}
if 'A' <= c && c <= 'Z' {
if buf == nil {
buf = []byte(s)
}
buf[i] = c + 'a' - 'A'
return toNormalisedLowerSlow(s, i, a)
}
}
if buf == nil {
return s
return s
}
// toNormalisedLowerSlow is split from toNormalisedLower because having a call
// to `copy` slows it down even when it is not called.
func toNormalisedLowerSlow(s string, i int, a []byte) string {
var buf []byte
if cap(a) > len(s) {
buf = a[:len(s)]
copy(buf, s)
} else {
buf = []byte(s)
}
for ; i < len(s); i++ {
c := s[i]
if c >= utf8.RuneSelf {
return strings.Map(unicode.ToLower, norm.NFKD.String(s))
}
if 'A' <= c && c <= 'Z' {
buf[i] = c + 'a' - 'A'
}
}
return yoloString(buf)
}

View file

@ -333,7 +333,8 @@ func BenchmarkToNormalizedLower(b *testing.B) {
}
b.ResetTimer()
for n := 0; n < b.N; n++ {
toNormalisedLower(inputs[n%len(inputs)])
var a [256]byte
toNormalisedLower(inputs[n%len(inputs)], a[:])
}
})
}
@ -1390,6 +1391,6 @@ func TestToNormalisedLower(t *testing.T) {
"ſſAſſa": "ssassa",
}
for input, expectedOutput := range testCases {
require.Equal(t, expectedOutput, toNormalisedLower(input))
require.Equal(t, expectedOutput, toNormalisedLower(input, nil))
}
}

View file

@ -16,6 +16,7 @@ package relabel
import (
"crypto/md5"
"encoding/binary"
"errors"
"fmt"
"strconv"
"strings"
@ -114,10 +115,10 @@ func (c *Config) UnmarshalYAML(unmarshal func(interface{}) error) error {
func (c *Config) Validate() error {
if c.Action == "" {
return fmt.Errorf("relabel action cannot be empty")
return errors.New("relabel action cannot be empty")
}
if c.Modulus == 0 && c.Action == HashMod {
return fmt.Errorf("relabel configuration for hashmod requires non-zero modulus")
return errors.New("relabel configuration for hashmod requires non-zero modulus")
}
if (c.Action == Replace || c.Action == HashMod || c.Action == Lowercase || c.Action == Uppercase || c.Action == KeepEqual || c.Action == DropEqual) && c.TargetLabel == "" {
return fmt.Errorf("relabel configuration for %s action requires 'target_label' value", c.Action)

View file

@ -184,14 +184,14 @@ type RuleNode struct {
func (r *RuleNode) Validate() (nodes []WrappedError) {
if r.Record.Value != "" && r.Alert.Value != "" {
nodes = append(nodes, WrappedError{
err: fmt.Errorf("only one of 'record' and 'alert' must be set"),
err: errors.New("only one of 'record' and 'alert' must be set"),
node: &r.Record,
nodeAlt: &r.Alert,
})
}
if r.Record.Value == "" && r.Alert.Value == "" {
nodes = append(nodes, WrappedError{
err: fmt.Errorf("one of 'record' or 'alert' must be set"),
err: errors.New("one of 'record' or 'alert' must be set"),
node: &r.Record,
nodeAlt: &r.Alert,
})
@ -199,7 +199,7 @@ func (r *RuleNode) Validate() (nodes []WrappedError) {
if r.Expr.Value == "" {
nodes = append(nodes, WrappedError{
err: fmt.Errorf("field 'expr' must be set in rule"),
err: errors.New("field 'expr' must be set in rule"),
node: &r.Expr,
})
} else if _, err := parser.ParseExpr(r.Expr.Value); err != nil {
@ -211,19 +211,19 @@ func (r *RuleNode) Validate() (nodes []WrappedError) {
if r.Record.Value != "" {
if len(r.Annotations) > 0 {
nodes = append(nodes, WrappedError{
err: fmt.Errorf("invalid field 'annotations' in recording rule"),
err: errors.New("invalid field 'annotations' in recording rule"),
node: &r.Record,
})
}
if r.For != 0 {
nodes = append(nodes, WrappedError{
err: fmt.Errorf("invalid field 'for' in recording rule"),
err: errors.New("invalid field 'for' in recording rule"),
node: &r.Record,
})
}
if r.KeepFiringFor != 0 {
nodes = append(nodes, WrappedError{
err: fmt.Errorf("invalid field 'keep_firing_for' in recording rule"),
err: errors.New("invalid field 'keep_firing_for' in recording rule"),
node: &r.Record,
})
}

View file

@ -239,13 +239,13 @@ func testParse(t *testing.T, p Parser) (ret []parsedEntry) {
}
p.Metric(&got.lset)
for e := (exemplar.Exemplar{}); p.Exemplar(&e); {
got.es = append(got.es, e)
}
// Parser reuses int pointer.
if ct := p.CreatedTimestamp(); ct != nil {
got.ct = int64p(*ct)
}
for e := (exemplar.Exemplar{}); p.Exemplar(&e); {
got.es = append(got.es, e)
}
case EntryType:
m, got.typ = p.Type()
got.m = string(m)

View file

@ -84,6 +84,7 @@ type NHCBParser struct {
fhNHCB *histogram.FloatHistogram
lsetNHCB labels.Labels
exemplars []exemplar.Exemplar
ctNHCB *int64
metricStringNHCB string
// Collates values from the classic histogram series to build
@ -92,13 +93,16 @@ type NHCBParser struct {
tempNHCB convertnhcb.TempHistogram
tempExemplars []exemplar.Exemplar
tempExemplarCount int
tempCT *int64
// Remembers the last base histogram metric name (assuming it's
// a classic histogram) so we can tell if the next float series
// is part of the same classic histogram.
lastHistogramName string
lastHistogramLabelsHash uint64
hBuffer []byte
lastHistogramName string
lastHistogramLabelsHash uint64
lastHistogramExponential bool
// Reused buffer for hashing labels.
hBuffer []byte
}
func NewNHCBParser(p Parser, st *labels.SymbolTable, keepClassicHistograms bool) Parser {
@ -159,6 +163,16 @@ func (p *NHCBParser) Exemplar(ex *exemplar.Exemplar) bool {
}
func (p *NHCBParser) CreatedTimestamp() *int64 {
switch p.state {
case stateStart:
if p.entry == EntrySeries || p.entry == EntryHistogram {
return p.parser.CreatedTimestamp()
}
case stateCollecting:
return p.tempCT
case stateEmitting:
return p.ctNHCB
}
return nil
}
@ -174,41 +188,50 @@ func (p *NHCBParser) Next() (Entry, error) {
}
return p.entry, p.err
}
et, err := p.parser.Next()
if err != nil {
if errors.Is(err, io.EOF) && p.processNHCB() {
p.entry = et
p.err = err
p.entry, p.err = p.parser.Next()
if p.err != nil {
if errors.Is(p.err, io.EOF) && p.processNHCB() {
return EntryHistogram, nil
}
return EntryInvalid, err
return EntryInvalid, p.err
}
switch et {
switch p.entry {
case EntrySeries:
p.bytes, p.ts, p.value = p.parser.Series()
p.metricString = p.parser.Metric(&p.lset)
// Check the label set to see if we can continue or need to emit the NHCB.
if p.compareLabels() && p.processNHCB() {
p.entry = et
return EntryHistogram, nil
var isNHCB bool
if p.compareLabels() {
// Labels differ. Check if we can emit the NHCB.
if p.processNHCB() {
return EntryHistogram, nil
}
isNHCB = p.handleClassicHistogramSeries(p.lset)
} else {
// Labels are the same. Check if after an exponential histogram.
if p.lastHistogramExponential {
isNHCB = false
} else {
isNHCB = p.handleClassicHistogramSeries(p.lset)
}
}
isNHCB := p.handleClassicHistogramSeries(p.lset)
if isNHCB && !p.keepClassicHistograms {
// Do not return the classic histogram series if it was converted to NHCB and we are not keeping classic histograms.
return p.Next()
}
return et, err
return p.entry, p.err
case EntryHistogram:
p.bytes, p.ts, p.h, p.fh = p.parser.Histogram()
p.metricString = p.parser.Metric(&p.lset)
p.storeExponentialLabels()
case EntryType:
p.bName, p.typ = p.parser.Type()
}
if p.processNHCB() {
p.entry = et
return EntryHistogram, nil
}
return et, err
return p.entry, p.err
}
// Return true if labels have changed and we should emit the NHCB.
@ -230,9 +253,16 @@ func (p *NHCBParser) compareLabels() bool {
}
// Save the label set of the classic histogram without suffix and bucket `le` label.
func (p *NHCBParser) storeBaseLabels() {
func (p *NHCBParser) storeClassicLabels() {
p.lastHistogramName = convertnhcb.GetHistogramMetricBaseName(p.lset.Get(labels.MetricName))
p.lastHistogramLabelsHash, _ = p.lset.HashWithoutLabels(p.hBuffer, labels.BucketLabel)
p.lastHistogramExponential = false
}
func (p *NHCBParser) storeExponentialLabels() {
p.lastHistogramName = p.lset.Get(labels.MetricName)
p.lastHistogramLabelsHash, _ = p.lset.HashWithoutLabels(p.hBuffer)
p.lastHistogramExponential = true
}
// handleClassicHistogramSeries collates the classic histogram series to be converted to NHCB
@ -253,18 +283,18 @@ func (p *NHCBParser) handleClassicHistogramSeries(lset labels.Labels) bool {
le, err := strconv.ParseFloat(lset.Get(labels.BucketLabel), 64)
if err == nil && !math.IsNaN(le) {
p.processClassicHistogramSeries(lset, "_bucket", func(hist *convertnhcb.TempHistogram) {
hist.BucketCounts[le] = p.value
_ = hist.SetBucketCount(le, p.value)
})
return true
}
case strings.HasSuffix(mName, "_count"):
p.processClassicHistogramSeries(lset, "_count", func(hist *convertnhcb.TempHistogram) {
hist.Count = p.value
_ = hist.SetCount(p.value)
})
return true
case strings.HasSuffix(mName, "_sum"):
p.processClassicHistogramSeries(lset, "_sum", func(hist *convertnhcb.TempHistogram) {
hist.Sum = p.value
_ = hist.SetSum(p.value)
})
return true
}
@ -273,10 +303,11 @@ func (p *NHCBParser) handleClassicHistogramSeries(lset labels.Labels) bool {
func (p *NHCBParser) processClassicHistogramSeries(lset labels.Labels, suffix string, updateHist func(*convertnhcb.TempHistogram)) {
if p.state != stateCollecting {
p.storeBaseLabels()
p.storeClassicLabels()
p.tempCT = p.parser.CreatedTimestamp()
p.state = stateCollecting
p.tempLsetNHCB = convertnhcb.GetHistogramMetricBase(lset, suffix)
}
p.state = stateCollecting
p.tempLsetNHCB = convertnhcb.GetHistogramMetricBase(lset, suffix)
p.storeExemplars()
updateHist(&p.tempNHCB)
}
@ -304,7 +335,6 @@ func (p *NHCBParser) nextExemplarPtr() *exemplar.Exemplar {
func (p *NHCBParser) swapExemplars() {
p.exemplars = p.tempExemplars[:p.tempExemplarCount]
p.tempExemplars = p.tempExemplars[:0]
p.tempExemplarCount = 0
}
// processNHCB converts the collated classic histogram series to NHCB and caches the info
@ -313,31 +343,32 @@ func (p *NHCBParser) processNHCB() bool {
if p.state != stateCollecting {
return false
}
ub := make([]float64, 0, len(p.tempNHCB.BucketCounts))
for b := range p.tempNHCB.BucketCounts {
ub = append(ub, b)
}
upperBounds, hBase := convertnhcb.ProcessUpperBoundsAndCreateBaseHistogram(ub, false)
fhBase := hBase.ToFloat(nil)
h, fh := convertnhcb.NewHistogram(p.tempNHCB, upperBounds, hBase, fhBase)
if h != nil {
if err := h.Validate(); err != nil {
return false
h, fh, err := p.tempNHCB.Convert()
if err == nil {
if h != nil {
if err := h.Validate(); err != nil {
return false
}
p.hNHCB = h
p.fhNHCB = nil
} else if fh != nil {
if err := fh.Validate(); err != nil {
return false
}
p.hNHCB = nil
p.fhNHCB = fh
}
p.hNHCB = h
p.fhNHCB = nil
} else if fh != nil {
if err := fh.Validate(); err != nil {
return false
}
p.hNHCB = nil
p.fhNHCB = fh
p.metricStringNHCB = p.tempLsetNHCB.Get(labels.MetricName) + strings.ReplaceAll(p.tempLsetNHCB.DropMetricName().String(), ", ", ",")
p.bytesNHCB = []byte(p.metricStringNHCB)
p.lsetNHCB = p.tempLsetNHCB
p.swapExemplars()
p.ctNHCB = p.tempCT
p.state = stateEmitting
} else {
p.state = stateStart
}
p.metricStringNHCB = p.tempLsetNHCB.Get(labels.MetricName) + strings.ReplaceAll(p.tempLsetNHCB.DropMetricName().String(), ", ", ",")
p.bytesNHCB = []byte(p.metricStringNHCB)
p.lsetNHCB = p.tempLsetNHCB
p.swapExemplars()
p.tempNHCB = convertnhcb.NewTempHistogram()
p.state = stateEmitting
return true
p.tempNHCB.Reset()
p.tempExemplarCount = 0
p.tempCT = nil
return err == nil
}

View file

@ -16,6 +16,7 @@ package textparse
import (
"bytes"
"encoding/binary"
"strconv"
"testing"
"github.com/gogo/protobuf/proto"
@ -292,14 +293,14 @@ foobar{quantile="0.99"} 150.1`
lset: labels.FromStrings("__name__", "foo_total"),
t: int64p(1520879607789),
es: []exemplar.Exemplar{{Labels: labels.FromStrings("id", "counter-test"), Value: 5}},
// TODO(krajorama): ct: int64p(1520872607123),
ct: int64p(1520872607123),
}, {
m: `foo_total{a="b"}`,
v: 17.0,
lset: labels.FromStrings("__name__", "foo_total", "a", "b"),
t: int64p(1520879607789),
es: []exemplar.Exemplar{{Labels: labels.FromStrings("id", "counter-test"), Value: 5}},
// TODO(krajorama): ct: int64p(1520872607123),
ct: int64p(1520872607123),
}, {
m: "bar",
help: "Summary with CT at the end, making sure we find CT even if it's multiple lines a far",
@ -310,22 +311,22 @@ foobar{quantile="0.99"} 150.1`
m: "bar_count",
v: 17.0,
lset: labels.FromStrings("__name__", "bar_count"),
// TODO(krajorama): ct: int64p(1520872608124),
ct: int64p(1520872608124),
}, {
m: "bar_sum",
v: 324789.3,
lset: labels.FromStrings("__name__", "bar_sum"),
// TODO(krajorama): ct: int64p(1520872608124),
ct: int64p(1520872608124),
}, {
m: `bar{quantile="0.95"}`,
v: 123.7,
lset: labels.FromStrings("__name__", "bar", "quantile", "0.95"),
// TODO(krajorama): ct: int64p(1520872608124),
ct: int64p(1520872608124),
}, {
m: `bar{quantile="0.99"}`,
v: 150.0,
lset: labels.FromStrings("__name__", "bar", "quantile", "0.99"),
// TODO(krajorama): ct: int64p(1520872608124),
ct: int64p(1520872608124),
}, {
m: "baz",
help: "Histogram with the same objective as above's summary",
@ -343,7 +344,7 @@ foobar{quantile="0.99"} 150.1`
CustomValues: []float64{0.0}, // We do not store the +Inf boundary.
},
lset: labels.FromStrings("__name__", "baz"),
// TODO(krajorama): ct: int64p(1520872609125),
ct: int64p(1520872609125),
}, {
m: "fizz_created",
help: "Gauge which shouldn't be parsed as CT",
@ -371,7 +372,7 @@ foobar{quantile="0.99"} 150.1`
CustomValues: []float64{0.0}, // We do not store the +Inf boundary.
},
lset: labels.FromStrings("__name__", "something"),
// TODO(krajorama): ct: int64p(1520430001000),
ct: int64p(1520430001000),
}, {
m: `something{a="b"}`,
shs: &histogram.Histogram{
@ -383,7 +384,7 @@ foobar{quantile="0.99"} 150.1`
CustomValues: []float64{0.0}, // We do not store the +Inf boundary.
},
lset: labels.FromStrings("__name__", "something", "a", "b"),
// TODO(krajorama): ct: int64p(1520430002000),
ct: int64p(1520430002000),
}, {
m: "yum",
help: "Summary with _created between sum and quantiles",
@ -394,22 +395,22 @@ foobar{quantile="0.99"} 150.1`
m: `yum_count`,
v: 20,
lset: labels.FromStrings("__name__", "yum_count"),
// TODO(krajorama): ct: int64p(1520430003000),
ct: int64p(1520430003000),
}, {
m: `yum_sum`,
v: 324789.5,
lset: labels.FromStrings("__name__", "yum_sum"),
// TODO(krajorama): ct: int64p(1520430003000),
ct: int64p(1520430003000),
}, {
m: `yum{quantile="0.95"}`,
v: 123.7,
lset: labels.FromStrings("__name__", "yum", "quantile", "0.95"),
// TODO(krajorama): ct: int64p(1520430003000),
ct: int64p(1520430003000),
}, {
m: `yum{quantile="0.99"}`,
v: 150.0,
lset: labels.FromStrings("__name__", "yum", "quantile", "0.99"),
// TODO(krajorama): ct: int64p(1520430003000),
ct: int64p(1520430003000),
}, {
m: "foobar",
help: "Summary with _created as the first line",
@ -420,22 +421,22 @@ foobar{quantile="0.99"} 150.1`
m: `foobar_count`,
v: 21,
lset: labels.FromStrings("__name__", "foobar_count"),
// TODO(krajorama): ct: int64p(1520430004000),
ct: int64p(1520430004000),
}, {
m: `foobar_sum`,
v: 324789.6,
lset: labels.FromStrings("__name__", "foobar_sum"),
// TODO(krajorama): ct: int64p(1520430004000),
ct: int64p(1520430004000),
}, {
m: `foobar{quantile="0.95"}`,
v: 123.8,
lset: labels.FromStrings("__name__", "foobar", "quantile", "0.95"),
// TODO(krajorama): ct: int64p(1520430004000),
ct: int64p(1520430004000),
}, {
m: `foobar{quantile="0.99"}`,
v: 150.1,
lset: labels.FromStrings("__name__", "foobar", "quantile", "0.99"),
// TODO(krajorama): ct: int64p(1520430004000),
ct: int64p(1520430004000),
}, {
m: "metric",
help: "foo\x00bar",
@ -493,15 +494,14 @@ something_bucket{a="b",le="+Inf"} 9 # {id="something-test"} 2e100 123.000
{Labels: labels.FromStrings("id", "something-test"), Value: 0.5},
{Labels: labels.FromStrings("id", "something-test"), Value: 8.0},
},
// TODO(krajorama): ct: int64p(1520430001000),
}, {
m: `something{a="b"}`,
shs: &histogram.Histogram{
Schema: histogram.CustomBucketsSchema,
Count: 9,
Sum: 42123.0,
PositiveSpans: []histogram.Span{{Offset: 0, Length: 1}, {Offset: 1, Length: 1}},
PositiveBuckets: []int64{8, -7},
PositiveSpans: []histogram.Span{{Offset: 0, Length: 3}},
PositiveBuckets: []int64{8, -8, 1},
CustomValues: []float64{0.0, 1.0}, // We do not store the +Inf boundary.
},
lset: labels.FromStrings("__name__", "something", "a", "b"),
@ -509,7 +509,6 @@ something_bucket{a="b",le="+Inf"} 9 # {id="something-test"} 2e100 123.000
{Labels: labels.FromStrings("id", "something-test"), Value: 0.0, HasTs: true, Ts: 123321},
{Labels: labels.FromStrings("id", "something-test"), Value: 2e100, HasTs: true, Ts: 123000},
},
// TODO(krajorama): ct: int64p(1520430002000),
},
}
@ -520,107 +519,268 @@ something_bucket{a="b",le="+Inf"} 9 # {id="something-test"} 2e100 123.000
requireEntries(t, exp, got)
}
// Verify that the NHCBParser does not parse the NHCB when the exponential is present.
func TestNHCBParserProtoBufParser_NoNHCBWhenExponential(t *testing.T) {
inputBuf := createTestProtoBufHistogram(t)
// Initialize the protobuf parser so that it returns classic histograms as
// well when there's both classic and exponential histograms.
p := NewProtobufParser(inputBuf.Bytes(), true, labels.NewSymbolTable())
// Verify the requirement tables from
// https://github.com/prometheus/prometheus/issues/13532 .
// "classic" means the option "always_scrape_classic_histograms".
// "nhcb" means the option "convert_classic_histograms_to_nhcb".
//
// Case 1. Only classic histogram is exposed.
//
// | Scrape Config | Expect classic | Expect exponential | Expect NHCB |.
// | classic=false, nhcb=false | YES | NO | NO |.
// | classic=true, nhcb=false | YES | NO | NO |.
// | classic=false, nhcb=true | NO | NO | YES |.
// | classic=true, nhcb=true | YES | NO | YES |.
//
// Case 2. Both classic and exponential histograms are exposed.
//
// | Scrape Config | Expect classic | Expect exponential | Expect NHCB |.
// | classic=false, nhcb=false | NO | YES | NO |.
// | classic=true, nhcb=false | YES | YES | NO |.
// | classic=false, nhcb=true | NO | YES | NO |.
// | classic=true, nhcb=true | YES | YES | NO |.
//
// Case 3. Only exponential histogram is exposed.
//
// | Scrape Config | Expect classic | Expect exponential | Expect NHCB |.
// | classic=false, nhcb=false | NO | YES | NO |.
// | classic=true, nhcb=false | NO | YES | NO |.
// | classic=false, nhcb=true | NO | YES | NO |.
// | classic=true, nhcb=true | NO | YES | NO |.
func TestNHCBParser_NoNHCBWhenExponential(t *testing.T) {
type requirement struct {
expectClassic bool
expectExponential bool
expectNHCB bool
}
// Initialize the NHCBParser so that it returns classic histograms as well
// when there's both classic and exponential histograms.
p = NewNHCBParser(p, labels.NewSymbolTable(), true)
exp := []parsedEntry{
cases := []map[string]requirement{
// Case 1.
{
m: "test_histogram",
help: "Test histogram with classic and exponential buckets.",
"classic=false, nhcb=false": {expectClassic: true, expectExponential: false, expectNHCB: false},
"classic=true, nhcb=false": {expectClassic: true, expectExponential: false, expectNHCB: false},
"classic=false, nhcb=true": {expectClassic: false, expectExponential: false, expectNHCB: true},
"classic=true, nhcb=true": {expectClassic: true, expectExponential: false, expectNHCB: true},
},
// Case 2.
{
m: "test_histogram",
typ: model.MetricTypeHistogram,
"classic=false, nhcb=false": {expectClassic: false, expectExponential: true, expectNHCB: false},
"classic=true, nhcb=false": {expectClassic: true, expectExponential: true, expectNHCB: false},
"classic=false, nhcb=true": {expectClassic: false, expectExponential: true, expectNHCB: false},
"classic=true, nhcb=true": {expectClassic: true, expectExponential: true, expectNHCB: false},
},
// Case 3.
{
m: "test_histogram",
shs: &histogram.Histogram{
Schema: 3,
Count: 175,
Sum: 0.0008280461746287094,
ZeroThreshold: 2.938735877055719e-39,
ZeroCount: 2,
PositiveSpans: []histogram.Span{{Offset: -161, Length: 1}, {Offset: 8, Length: 3}},
NegativeSpans: []histogram.Span{{Offset: -162, Length: 1}, {Offset: 23, Length: 4}},
PositiveBuckets: []int64{1, 2, -1, -1},
NegativeBuckets: []int64{1, 3, -2, -1, 1},
},
lset: labels.FromStrings("__name__", "test_histogram"),
t: int64p(1234568),
},
{
m: "test_histogram_count",
v: 175,
lset: labels.FromStrings("__name__", "test_histogram_count"),
t: int64p(1234568),
},
{
m: "test_histogram_sum",
v: 0.0008280461746287094,
lset: labels.FromStrings("__name__", "test_histogram_sum"),
t: int64p(1234568),
},
{
m: "test_histogram_bucket\xffle\xff-0.0004899999999999998",
v: 2,
lset: labels.FromStrings("__name__", "test_histogram_bucket", "le", "-0.0004899999999999998"),
t: int64p(1234568),
},
{
m: "test_histogram_bucket\xffle\xff-0.0003899999999999998",
v: 4,
lset: labels.FromStrings("__name__", "test_histogram_bucket", "le", "-0.0003899999999999998"),
t: int64p(1234568),
},
{
m: "test_histogram_bucket\xffle\xff-0.0002899999999999998",
v: 16,
lset: labels.FromStrings("__name__", "test_histogram_bucket", "le", "-0.0002899999999999998"),
t: int64p(1234568),
},
{
m: "test_histogram_bucket\xffle\xff+Inf",
v: 175,
lset: labels.FromStrings("__name__", "test_histogram_bucket", "le", "+Inf"),
t: int64p(1234568),
},
{
// TODO(krajorama): optimize: this should not be here. In case there's
// an exponential histogram we should not convert the classic histogram
// to NHCB. In the end TSDB will throw this away with
// storage.errDuplicateSampleForTimestamp error at Commit(), but it
// is better to avoid this conversion in the first place.
m: "test_histogram{}",
shs: &histogram.Histogram{
Schema: histogram.CustomBucketsSchema,
Count: 175,
Sum: 0.0008280461746287094,
PositiveSpans: []histogram.Span{{Length: 4}},
PositiveBuckets: []int64{2, 0, 10, 147},
CustomValues: []float64{-0.0004899999999999998, -0.0003899999999999998, -0.0002899999999999998},
},
lset: labels.FromStrings("__name__", "test_histogram"),
t: int64p(1234568),
"classic=false, nhcb=false": {expectClassic: false, expectExponential: true, expectNHCB: false},
"classic=true, nhcb=false": {expectClassic: false, expectExponential: true, expectNHCB: false},
"classic=false, nhcb=true": {expectClassic: false, expectExponential: true, expectNHCB: false},
"classic=true, nhcb=true": {expectClassic: false, expectExponential: true, expectNHCB: false},
},
}
got := testParse(t, p)
requireEntries(t, exp, got)
// Create parser from keep classic option.
type parserFactory func(bool) Parser
type testCase struct {
name string
parser parserFactory
classic bool
nhcb bool
exp []parsedEntry
}
type parserOptions struct {
useUTF8sep bool
hasCreatedTimeStamp bool
}
// Defines the parser name, the Parser factory and the test cases
// supported by the parser and parser options.
parsers := []func() (string, parserFactory, []int, parserOptions){
func() (string, parserFactory, []int, parserOptions) {
factory := func(keepClassic bool) Parser {
inputBuf := createTestProtoBufHistogram(t)
return NewProtobufParser(inputBuf.Bytes(), keepClassic, labels.NewSymbolTable())
}
return "ProtoBuf", factory, []int{1, 2, 3}, parserOptions{useUTF8sep: true, hasCreatedTimeStamp: true}
},
func() (string, parserFactory, []int, parserOptions) {
factory := func(keepClassic bool) Parser {
input := createTestOpenMetricsHistogram()
return NewOpenMetricsParser([]byte(input), labels.NewSymbolTable(), WithOMParserCTSeriesSkipped())
}
return "OpenMetrics", factory, []int{1}, parserOptions{hasCreatedTimeStamp: true}
},
func() (string, parserFactory, []int, parserOptions) {
factory := func(keepClassic bool) Parser {
input := createTestPromHistogram()
return NewPromParser([]byte(input), labels.NewSymbolTable())
}
return "Prometheus", factory, []int{1}, parserOptions{}
},
}
testCases := []testCase{}
for _, parser := range parsers {
for _, classic := range []bool{false, true} {
for _, nhcb := range []bool{false, true} {
parserName, parser, supportedCases, options := parser()
requirementName := "classic=" + strconv.FormatBool(classic) + ", nhcb=" + strconv.FormatBool(nhcb)
tc := testCase{
name: "parser=" + parserName + ", " + requirementName,
parser: parser,
classic: classic,
nhcb: nhcb,
exp: []parsedEntry{},
}
for _, caseNumber := range supportedCases {
caseI := cases[caseNumber-1]
req, ok := caseI[requirementName]
require.True(t, ok, "Case %d does not have requirement %s", caseNumber, requirementName)
metric := "test_histogram" + strconv.Itoa(caseNumber)
tc.exp = append(tc.exp, parsedEntry{
m: metric,
help: "Test histogram " + strconv.Itoa(caseNumber),
})
tc.exp = append(tc.exp, parsedEntry{
m: metric,
typ: model.MetricTypeHistogram,
})
var ct *int64
if options.hasCreatedTimeStamp {
ct = int64p(1000)
}
var bucketForMetric func(string) string
if options.useUTF8sep {
bucketForMetric = func(s string) string {
return "_bucket\xffle\xff" + s
}
} else {
bucketForMetric = func(s string) string {
return "_bucket{le=\"" + s + "\"}"
}
}
if req.expectExponential {
// Always expect exponential histogram first.
exponentialSeries := []parsedEntry{
{
m: metric,
shs: &histogram.Histogram{
Schema: 3,
Count: 175,
Sum: 0.0008280461746287094,
ZeroThreshold: 2.938735877055719e-39,
ZeroCount: 2,
PositiveSpans: []histogram.Span{{Offset: -161, Length: 1}, {Offset: 8, Length: 3}},
NegativeSpans: []histogram.Span{{Offset: -162, Length: 1}, {Offset: 23, Length: 4}},
PositiveBuckets: []int64{1, 2, -1, -1},
NegativeBuckets: []int64{1, 3, -2, -1, 1},
},
lset: labels.FromStrings("__name__", metric),
t: int64p(1234568),
ct: ct,
},
}
tc.exp = append(tc.exp, exponentialSeries...)
}
if req.expectClassic {
// Always expect classic histogram series after exponential.
classicSeries := []parsedEntry{
{
m: metric + "_count",
v: 175,
lset: labels.FromStrings("__name__", metric+"_count"),
t: int64p(1234568),
ct: ct,
},
{
m: metric + "_sum",
v: 0.0008280461746287094,
lset: labels.FromStrings("__name__", metric+"_sum"),
t: int64p(1234568),
ct: ct,
},
{
m: metric + bucketForMetric("-0.0004899999999999998"),
v: 2,
lset: labels.FromStrings("__name__", metric+"_bucket", "le", "-0.0004899999999999998"),
t: int64p(1234568),
ct: ct,
},
{
m: metric + bucketForMetric("-0.0003899999999999998"),
v: 4,
lset: labels.FromStrings("__name__", metric+"_bucket", "le", "-0.0003899999999999998"),
t: int64p(1234568),
ct: ct,
},
{
m: metric + bucketForMetric("-0.0002899999999999998"),
v: 16,
lset: labels.FromStrings("__name__", metric+"_bucket", "le", "-0.0002899999999999998"),
t: int64p(1234568),
ct: ct,
},
{
m: metric + bucketForMetric("+Inf"),
v: 175,
lset: labels.FromStrings("__name__", metric+"_bucket", "le", "+Inf"),
t: int64p(1234568),
ct: ct,
},
}
tc.exp = append(tc.exp, classicSeries...)
}
if req.expectNHCB {
// Always expect NHCB series after classic.
nhcbSeries := []parsedEntry{
{
m: metric + "{}",
shs: &histogram.Histogram{
Schema: histogram.CustomBucketsSchema,
Count: 175,
Sum: 0.0008280461746287094,
PositiveSpans: []histogram.Span{{Length: 4}},
PositiveBuckets: []int64{2, 0, 10, 147},
CustomValues: []float64{-0.0004899999999999998, -0.0003899999999999998, -0.0002899999999999998},
},
lset: labels.FromStrings("__name__", metric),
t: int64p(1234568),
ct: ct,
},
}
tc.exp = append(tc.exp, nhcbSeries...)
}
}
testCases = append(testCases, tc)
}
}
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
p := tc.parser(tc.classic)
if tc.nhcb {
p = NewNHCBParser(p, labels.NewSymbolTable(), tc.classic)
}
got := testParse(t, p)
requireEntries(t, tc.exp, got)
})
}
}
func createTestProtoBufHistogram(t *testing.T) *bytes.Buffer {
testMetricFamilies := []string{`name: "test_histogram"
help: "Test histogram with classic and exponential buckets."
testMetricFamilies := []string{`name: "test_histogram1"
help: "Test histogram 1"
type: HISTOGRAM
metric: <
histogram: <
created_timestamp: <
seconds: 1
nanos: 1
>
sample_count: 175
sample_sum: 0.0008280461746287094
bucket: <
@ -635,6 +795,72 @@ metric: <
cumulative_count: 16
upper_bound: -0.0002899999999999998
>
>
timestamp_ms: 1234568
>`, `name: "test_histogram2"
help: "Test histogram 2"
type: HISTOGRAM
metric: <
histogram: <
created_timestamp: <
seconds: 1
nanos: 1
>
sample_count: 175
sample_sum: 0.0008280461746287094
bucket: <
cumulative_count: 2
upper_bound: -0.0004899999999999998
>
bucket: <
cumulative_count: 4
upper_bound: -0.0003899999999999998
>
bucket: <
cumulative_count: 16
upper_bound: -0.0002899999999999998
>
schema: 3
zero_threshold: 2.938735877055719e-39
zero_count: 2
negative_span: <
offset: -162
length: 1
>
negative_span: <
offset: 23
length: 4
>
negative_delta: 1
negative_delta: 3
negative_delta: -2
negative_delta: -1
negative_delta: 1
positive_span: <
offset: -161
length: 1
>
positive_span: <
offset: 8
length: 3
>
positive_delta: 1
positive_delta: 2
positive_delta: -1
positive_delta: -1
>
timestamp_ms: 1234568
>`, `name: "test_histogram3"
help: "Test histogram 3"
type: HISTOGRAM
metric: <
histogram: <
created_timestamp: <
seconds: 1
nanos: 1
>
sample_count: 175
sample_sum: 0.0008280461746287094
schema: 3
zero_threshold: 2.938735877055719e-39
zero_count: 2
@ -687,3 +913,72 @@ metric: <
return buf
}
func createTestOpenMetricsHistogram() string {
return `# HELP test_histogram1 Test histogram 1
# TYPE test_histogram1 histogram
test_histogram1_count 175 1234.568
test_histogram1_sum 0.0008280461746287094 1234.568
test_histogram1_bucket{le="-0.0004899999999999998"} 2 1234.568
test_histogram1_bucket{le="-0.0003899999999999998"} 4 1234.568
test_histogram1_bucket{le="-0.0002899999999999998"} 16 1234.568
test_histogram1_bucket{le="+Inf"} 175 1234.568
test_histogram1_created 1
# EOF`
}
func createTestPromHistogram() string {
return `# HELP test_histogram1 Test histogram 1
# TYPE test_histogram1 histogram
test_histogram1_count 175 1234568
test_histogram1_sum 0.0008280461746287094 1234768
test_histogram1_bucket{le="-0.0004899999999999998"} 2 1234568
test_histogram1_bucket{le="-0.0003899999999999998"} 4 1234568
test_histogram1_bucket{le="-0.0002899999999999998"} 16 1234568
test_histogram1_bucket{le="+Inf"} 175 1234568`
}
func TestNHCBParserErrorHandling(t *testing.T) {
input := `# HELP something Histogram with non cumulative buckets
# TYPE something histogram
something_count 18
something_sum 324789.4
something_created 1520430001
something_bucket{le="0.0"} 18
something_bucket{le="+Inf"} 1
something_count{a="b"} 9
something_sum{a="b"} 42123
something_created{a="b"} 1520430002
something_bucket{a="b",le="0.0"} 1
something_bucket{a="b",le="+Inf"} 9
# EOF`
exp := []parsedEntry{
{
m: "something",
help: "Histogram with non cumulative buckets",
},
{
m: "something",
typ: model.MetricTypeHistogram,
},
// The parser should skip the series with non-cumulative buckets.
{
m: `something{a="b"}`,
shs: &histogram.Histogram{
Schema: histogram.CustomBucketsSchema,
Count: 9,
Sum: 42123.0,
PositiveSpans: []histogram.Span{{Offset: 0, Length: 2}},
PositiveBuckets: []int64{1, 7},
CustomValues: []float64{0.0}, // We do not store the +Inf boundary.
},
lset: labels.FromStrings("__name__", "something", "a", "b"),
ct: int64p(1520430002000),
},
}
p := NewOpenMetricsParser([]byte(input), labels.NewSymbolTable(), WithOMParserCTSeriesSkipped())
p = NewNHCBParser(p, labels.NewSymbolTable(), false)
got := testParse(t, p)
requireEntries(t, exp, got)
}

View file

@ -102,6 +102,8 @@ type OpenMetricsParser struct {
// Created timestamp parsing state.
ct int64
ctHashSet uint64
// ignoreExemplar instructs the parser to not overwrite exemplars (to keep them while peeking ahead).
ignoreExemplar bool
// visitedMFName is the metric family name of the last visited metric when peeking ahead
// for _created series during the execution of the CreatedTimestamp method.
visitedMFName []byte
@ -296,6 +298,14 @@ func (p *OpenMetricsParser) CreatedTimestamp() *int64 {
p.skipCTSeries = false
p.ignoreExemplar = true
savedStart := p.start
defer func() {
p.ignoreExemplar = false
p.start = savedStart
p.l = resetLexer
}()
for {
eType, err := p.Next()
if err != nil {
@ -303,12 +313,12 @@ func (p *OpenMetricsParser) CreatedTimestamp() *int64 {
// This might result in partial scrape with wrong/missing CT, but only
// spec improvement would help.
// TODO: Make sure OM 1.1/2.0 pass CT via metadata or exemplar-like to avoid this.
p.resetCTParseValues(resetLexer)
p.resetCTParseValues()
return nil
}
if eType != EntrySeries {
// Assume we hit different family, no CT line found.
p.resetCTParseValues(resetLexer)
p.resetCTParseValues()
return nil
}
@ -322,14 +332,14 @@ func (p *OpenMetricsParser) CreatedTimestamp() *int64 {
peekedHash := p.seriesHash(&buf, peekedName[:len(peekedName)-8])
if peekedHash != currHash {
// Found CT line for a different series, for our series no CT.
p.resetCTParseValues(resetLexer)
p.resetCTParseValues()
return nil
}
// All timestamps in OpenMetrics are Unix Epoch in seconds. Convert to milliseconds.
// https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#timestamps
ct := int64(p.val * 1000.0)
p.setCTParseValues(ct, currHash, currName, true, resetLexer)
p.setCTParseValues(ct, currHash, currName, true)
return &ct
}
}
@ -371,17 +381,15 @@ func (p *OpenMetricsParser) seriesHash(offsetsArr *[]byte, metricFamilyName []by
// setCTParseValues sets the parser to the state after CreatedTimestamp method was called and CT was found.
// This is useful to prevent re-parsing the same series again and early return the CT value.
func (p *OpenMetricsParser) setCTParseValues(ct int64, ctHashSet uint64, mfName []byte, skipCTSeries bool, resetLexer *openMetricsLexer) {
func (p *OpenMetricsParser) setCTParseValues(ct int64, ctHashSet uint64, mfName []byte, skipCTSeries bool) {
p.ct = ct
p.l = resetLexer
p.ctHashSet = ctHashSet
p.visitedMFName = mfName
p.skipCTSeries = skipCTSeries // Do we need to set it?
}
// resetCtParseValues resets the parser to the state before CreatedTimestamp method was called.
func (p *OpenMetricsParser) resetCTParseValues(resetLexer *openMetricsLexer) {
p.l = resetLexer
// resetCTParseValues resets the parser to the state before CreatedTimestamp method was called.
func (p *OpenMetricsParser) resetCTParseValues() {
p.ctHashSet = 0
p.skipCTSeries = true
}
@ -417,10 +425,12 @@ func (p *OpenMetricsParser) Next() (Entry, error) {
p.start = p.l.i
p.offsets = p.offsets[:0]
p.eOffsets = p.eOffsets[:0]
p.exemplar = p.exemplar[:0]
p.exemplarVal = 0
p.hasExemplarTs = false
if !p.ignoreExemplar {
p.eOffsets = p.eOffsets[:0]
p.exemplar = p.exemplar[:0]
p.exemplarVal = 0
p.hasExemplarTs = false
}
switch t := p.nextToken(); t {
case tEOFWord:
@ -545,6 +555,16 @@ func (p *OpenMetricsParser) Next() (Entry, error) {
func (p *OpenMetricsParser) parseComment() error {
var err error
if p.ignoreExemplar {
for t := p.nextToken(); t != tLinebreak; t = p.nextToken() {
if t == tEOF {
return errors.New("data does not end with # EOF")
}
}
return nil
}
// Parse the labels.
p.eOffsets, err = p.parseLVals(p.eOffsets, true)
if err != nil {

View file

@ -509,7 +509,7 @@ func yoloString(b []byte) string {
func parseFloat(s string) (float64, error) {
// Keep to pre-Go 1.13 float formats.
if strings.ContainsAny(s, "pP_") {
return 0, fmt.Errorf("unsupported character in float")
return 0, errors.New("unsupported character in float")
}
return strconv.ParseFloat(s, 64)
}

View file

@ -34,8 +34,8 @@ import (
config_util "github.com/prometheus/common/config"
"github.com/prometheus/common/model"
"github.com/prometheus/common/promslog"
"github.com/prometheus/common/sigv4"
"github.com/prometheus/common/version"
"github.com/prometheus/sigv4"
"go.uber.org/atomic"
"gopkg.in/yaml.v2"

View file

@ -21,6 +21,8 @@ import (
"testing"
"time"
"github.com/stretchr/testify/require"
"github.com/prometheus/prometheus/model/histogram"
"github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/promql"
@ -380,6 +382,126 @@ func BenchmarkNativeHistograms(b *testing.B) {
}
}
func BenchmarkInfoFunction(b *testing.B) {
// Initialize test storage and generate test series data.
testStorage := teststorage.New(b)
defer testStorage.Close()
start := time.Unix(0, 0)
end := start.Add(2 * time.Hour)
step := 30 * time.Second
// Generate time series data for the benchmark.
generateInfoFunctionTestSeries(b, testStorage, 100, 2000, 3600)
// Define test cases with queries to benchmark.
cases := []struct {
name string
query string
}{
{
name: "Joining info metrics with other metrics with group_left example 1",
query: "rate(http_server_request_duration_seconds_count[2m]) * on (job, instance) group_left (k8s_cluster_name) target_info{k8s_cluster_name=\"us-east\"}",
},
{
name: "Joining info metrics with other metrics with info() example 1",
query: `info(rate(http_server_request_duration_seconds_count[2m]), {k8s_cluster_name="us-east"})`,
},
{
name: "Joining info metrics with other metrics with group_left example 2",
query: "sum by (k8s_cluster_name, http_status_code) (rate(http_server_request_duration_seconds_count[2m]) * on (job, instance) group_left (k8s_cluster_name) target_info)",
},
{
name: "Joining info metrics with other metrics with info() example 2",
query: `sum by (k8s_cluster_name, http_status_code) (info(rate(http_server_request_duration_seconds_count[2m]), {k8s_cluster_name=~".+"}))`,
},
}
// Benchmark each query type.
for _, tc := range cases {
// Initialize the PromQL engine once for all benchmarks.
opts := promql.EngineOpts{
Logger: nil,
Reg: nil,
MaxSamples: 50000000,
Timeout: 100 * time.Second,
EnableAtModifier: true,
EnableNegativeOffset: true,
}
engine := promql.NewEngine(opts)
b.Run(tc.name, func(b *testing.B) {
b.ResetTimer()
for i := 0; i < b.N; i++ {
b.StopTimer() // Stop the timer to exclude setup time.
qry, err := engine.NewRangeQuery(context.Background(), testStorage, nil, tc.query, start, end, step)
require.NoError(b, err)
b.StartTimer()
result := qry.Exec(context.Background())
require.NoError(b, result.Err)
}
})
}
// Report allocations.
b.ReportAllocs()
}
// Helper function to generate target_info and http_server_request_duration_seconds_count series for info function benchmarking.
func generateInfoFunctionTestSeries(tb testing.TB, stor *teststorage.TestStorage, infoSeriesNum, interval, numIntervals int) {
tb.Helper()
ctx := context.Background()
statusCodes := []string{"200", "400", "500"}
// Generate target_info metrics with instance and job labels, and k8s_cluster_name label.
// Generate http_server_request_duration_seconds_count metrics with instance and job labels, and http_status_code label.
// the classic target_info metrics is gauge type.
metrics := make([]labels.Labels, 0, infoSeriesNum+len(statusCodes))
for i := 0; i < infoSeriesNum; i++ {
clusterName := "us-east"
if i >= infoSeriesNum/2 {
clusterName = "eu-south"
}
metrics = append(metrics, labels.FromStrings(
"__name__", "target_info",
"instance", "instance"+strconv.Itoa(i),
"job", "job"+strconv.Itoa(i),
"k8s_cluster_name", clusterName,
))
}
for _, statusCode := range statusCodes {
metrics = append(metrics, labels.FromStrings(
"__name__", "http_server_request_duration_seconds_count",
"instance", "instance0",
"job", "job0",
"http_status_code", statusCode,
))
}
// Append the generated metrics and samples to the storage.
refs := make([]storage.SeriesRef, len(metrics))
for i := 0; i < numIntervals; i++ {
a := stor.Appender(context.Background())
ts := int64(i * interval)
for j, metric := range metrics[:infoSeriesNum] {
ref, _ := a.Append(refs[j], metric, ts, 1)
refs[j] = ref
}
for j, metric := range metrics[infoSeriesNum:] {
ref, _ := a.Append(refs[j+infoSeriesNum], metric, ts, float64(i))
refs[j+infoSeriesNum] = ref
}
require.NoError(tb, a.Commit())
}
stor.DB.ForceHeadMMap() // Ensure we have at most one head chunk for every series.
stor.DB.Compact(ctx)
}
func generateNativeHistogramSeries(app storage.Appender, numSeries int) error {
commonLabels := []string{labels.MetricName, "native_histogram_series", "foo", "bar"}
series := make([][]*histogram.Histogram, numSeries)

View file

@ -739,6 +739,7 @@ func (ng *Engine) execEvalStmt(ctx context.Context, query *query, s *parser.Eval
samplesStats: query.sampleStats,
noStepSubqueryIntervalFn: ng.noStepSubqueryIntervalFn,
enableDelayedNameRemoval: ng.enableDelayedNameRemoval,
querier: querier,
}
query.sampleStats.InitStepTracking(start, start, 1)
@ -797,6 +798,7 @@ func (ng *Engine) execEvalStmt(ctx context.Context, query *query, s *parser.Eval
samplesStats: query.sampleStats,
noStepSubqueryIntervalFn: ng.noStepSubqueryIntervalFn,
enableDelayedNameRemoval: ng.enableDelayedNameRemoval,
querier: querier,
}
query.sampleStats.InitStepTracking(evaluator.startTimestamp, evaluator.endTimestamp, evaluator.interval)
val, warnings, err := evaluator.Eval(ctxInnerEval, s.Expr)
@ -1069,6 +1071,7 @@ type evaluator struct {
samplesStats *stats.QuerySamples
noStepSubqueryIntervalFn func(rangeMillis int64) int64
enableDelayedNameRemoval bool
querier storage.Querier
}
// errorf causes a panic with the input formatted into an error.
@ -1441,19 +1444,18 @@ func (ev *evaluator) rangeEvalAgg(ctx context.Context, aggExpr *parser.Aggregate
return result, warnings
}
// evalVectorSelector generates a Matrix between ev.startTimestamp and ev.endTimestamp (inclusive), each point spaced ev.interval apart, from vs.
// vs.Series has to be expanded before calling this method.
// For every series iterator in vs.Series, the method iterates in ev.interval sized steps from ev.startTimestamp until and including ev.endTimestamp,
// evalSeries generates a Matrix between ev.startTimestamp and ev.endTimestamp (inclusive), each point spaced ev.interval apart, from series given offset.
// For every storage.Series iterator in series, the method iterates in ev.interval sized steps from ev.startTimestamp until and including ev.endTimestamp,
// collecting every corresponding sample (obtained via ev.vectorSelectorSingle) into a Series.
// All of the generated Series are collected into a Matrix, that gets returned.
func (ev *evaluator) evalVectorSelector(ctx context.Context, vs *parser.VectorSelector) Matrix {
func (ev *evaluator) evalSeries(ctx context.Context, series []storage.Series, offset time.Duration, recordOrigT bool) Matrix {
numSteps := int((ev.endTimestamp-ev.startTimestamp)/ev.interval) + 1
mat := make(Matrix, 0, len(vs.Series))
mat := make(Matrix, 0, len(series))
var prevSS *Series
it := storage.NewMemoizedEmptyIterator(durationMilliseconds(ev.lookbackDelta))
var chkIter chunkenc.Iterator
for _, s := range vs.Series {
for _, s := range series {
if err := contextDone(ctx, "expression evaluation"); err != nil {
ev.error(err)
}
@ -1466,7 +1468,7 @@ func (ev *evaluator) evalVectorSelector(ctx context.Context, vs *parser.VectorSe
for ts, step := ev.startTimestamp, -1; ts <= ev.endTimestamp; ts += ev.interval {
step++
_, f, h, ok := ev.vectorSelectorSingle(it, vs, ts)
origT, f, h, ok := ev.vectorSelectorSingle(it, offset, ts)
if !ok {
continue
}
@ -1480,8 +1482,18 @@ func (ev *evaluator) evalVectorSelector(ctx context.Context, vs *parser.VectorSe
if ss.Floats == nil {
ss.Floats = reuseOrGetFPointSlices(prevSS, numSteps)
}
if recordOrigT {
// This is an info metric, where we want to track the original sample timestamp.
// Info metric values should be 1 by convention, therefore we can re-use this
// space in the sample.
f = float64(origT)
}
ss.Floats = append(ss.Floats, FPoint{F: f, T: ts})
} else {
if recordOrigT {
ev.error(fmt.Errorf("this should be an info metric, with float samples: %s", ss.Metric))
}
point := HPoint{H: h, T: ts}
histSize := point.size()
ev.currentSamples += histSize
@ -1651,6 +1663,8 @@ func (ev *evaluator) eval(ctx context.Context, expr parser.Expr) (parser.Value,
return ev.evalLabelReplace(ctx, e.Args)
case "label_join":
return ev.evalLabelJoin(ctx, e.Args)
case "info":
return ev.evalInfo(ctx, e.Args)
}
if !matrixArg {
@ -1942,7 +1956,7 @@ func (ev *evaluator) eval(ctx context.Context, expr parser.Expr) (parser.Value,
if err != nil {
ev.error(errWithWarnings{fmt.Errorf("expanding series: %w", err), ws})
}
mat := ev.evalVectorSelector(ctx, e)
mat := ev.evalSeries(ctx, e.Series, e.Offset, false)
return mat, ws
case *parser.MatrixSelector:
@ -1963,6 +1977,7 @@ func (ev *evaluator) eval(ctx context.Context, expr parser.Expr) (parser.Value,
samplesStats: ev.samplesStats.NewChild(),
noStepSubqueryIntervalFn: ev.noStepSubqueryIntervalFn,
enableDelayedNameRemoval: ev.enableDelayedNameRemoval,
querier: ev.querier,
}
if e.Step != 0 {
@ -2007,6 +2022,7 @@ func (ev *evaluator) eval(ctx context.Context, expr parser.Expr) (parser.Value,
samplesStats: ev.samplesStats.NewChild(),
noStepSubqueryIntervalFn: ev.noStepSubqueryIntervalFn,
enableDelayedNameRemoval: ev.enableDelayedNameRemoval,
querier: ev.querier,
}
res, ws := newEv.eval(ctx, e.Expr)
ev.currentSamples = newEv.currentSamples
@ -2031,7 +2047,7 @@ func (ev *evaluator) eval(ctx context.Context, expr parser.Expr) (parser.Value,
}
for i := range mat {
if len(mat[i].Floats)+len(mat[i].Histograms) != 1 {
panic(fmt.Errorf("unexpected number of samples"))
panic(errors.New("unexpected number of samples"))
}
for ts := ev.startTimestamp + ev.interval; ts <= ev.endTimestamp; ts += ev.interval {
if len(mat[i].Floats) > 0 {
@ -2107,7 +2123,7 @@ func (ev *evaluator) rangeEvalTimestampFunctionOverVectorSelector(ctx context.Co
vec := make(Vector, 0, len(vs.Series))
for i, s := range vs.Series {
it := seriesIterators[i]
t, _, _, ok := ev.vectorSelectorSingle(it, vs, enh.Ts)
t, _, _, ok := ev.vectorSelectorSingle(it, vs.Offset, enh.Ts)
if !ok {
continue
}
@ -2131,10 +2147,10 @@ func (ev *evaluator) rangeEvalTimestampFunctionOverVectorSelector(ctx context.Co
}
// vectorSelectorSingle evaluates an instant vector for the iterator of one time series.
func (ev *evaluator) vectorSelectorSingle(it *storage.MemoizedSeriesIterator, node *parser.VectorSelector, ts int64) (
func (ev *evaluator) vectorSelectorSingle(it *storage.MemoizedSeriesIterator, offset time.Duration, ts int64) (
int64, float64, *histogram.FloatHistogram, bool,
) {
refTime := ts - durationMilliseconds(node.Offset)
refTime := ts - durationMilliseconds(offset)
var t int64
var v float64
var h *histogram.FloatHistogram
@ -2591,9 +2607,11 @@ func (ev *evaluator) VectorBinop(op parser.ItemType, lhs, rhs Vector, matching *
floatValue, histogramValue, keep, err := vectorElemBinop(op, fl, fr, hl, hr, pos)
if err != nil {
lastErr = err
continue
}
switch {
case returnBool:
histogramValue = nil
if keep {
floatValue = 1.0
} else {
@ -2712,6 +2730,7 @@ func (ev *evaluator) VectorscalarBinop(op parser.ItemType, lhs Vector, rhs Scala
float, histogram, keep, err := vectorElemBinop(op, lf, rf, lh, rh, pos)
if err != nil {
lastErr = err
continue
}
// Catch cases where the scalar is the LHS in a scalar-vector comparison operation.
// We want to always keep the vector element value as the output value, even if it's on the RHS.
@ -2777,77 +2796,84 @@ func scalarBinop(op parser.ItemType, lhs, rhs float64) float64 {
// vectorElemBinop evaluates a binary operation between two Vector elements.
func vectorElemBinop(op parser.ItemType, lhs, rhs float64, hlhs, hrhs *histogram.FloatHistogram, pos posrange.PositionRange) (float64, *histogram.FloatHistogram, bool, error) {
switch op {
case parser.ADD:
if hlhs != nil && hrhs != nil {
res, err := hlhs.Copy().Add(hrhs)
if err != nil {
return 0, nil, false, err
opName := parser.ItemTypeStr[op]
switch {
case hlhs == nil && hrhs == nil:
{
switch op {
case parser.ADD:
return lhs + rhs, nil, true, nil
case parser.SUB:
return lhs - rhs, nil, true, nil
case parser.MUL:
return lhs * rhs, nil, true, nil
case parser.DIV:
return lhs / rhs, nil, true, nil
case parser.POW:
return math.Pow(lhs, rhs), nil, true, nil
case parser.MOD:
return math.Mod(lhs, rhs), nil, true, nil
case parser.EQLC:
return lhs, nil, lhs == rhs, nil
case parser.NEQ:
return lhs, nil, lhs != rhs, nil
case parser.GTR:
return lhs, nil, lhs > rhs, nil
case parser.LSS:
return lhs, nil, lhs < rhs, nil
case parser.GTE:
return lhs, nil, lhs >= rhs, nil
case parser.LTE:
return lhs, nil, lhs <= rhs, nil
case parser.ATAN2:
return math.Atan2(lhs, rhs), nil, true, nil
}
return 0, res.Compact(0), true, nil
}
if hlhs == nil && hrhs == nil {
return lhs + rhs, nil, true, nil
}
if hlhs != nil {
return 0, nil, false, annotations.NewIncompatibleTypesInBinOpInfo("histogram", "+", "float", pos)
}
return 0, nil, false, annotations.NewIncompatibleTypesInBinOpInfo("float", "+", "histogram", pos)
case parser.SUB:
if hlhs != nil && hrhs != nil {
res, err := hlhs.Copy().Sub(hrhs)
if err != nil {
return 0, nil, false, err
case hlhs == nil && hrhs != nil:
{
switch op {
case parser.MUL:
return 0, hrhs.Copy().Mul(lhs).Compact(0), true, nil
case parser.ADD, parser.SUB, parser.DIV, parser.POW, parser.MOD, parser.EQLC, parser.NEQ, parser.GTR, parser.LSS, parser.GTE, parser.LTE, parser.ATAN2:
return 0, nil, false, annotations.NewIncompatibleTypesInBinOpInfo("float", opName, "histogram", pos)
}
return 0, res.Compact(0), true, nil
}
if hlhs == nil && hrhs == nil {
return lhs - rhs, nil, true, nil
}
if hlhs != nil {
return 0, nil, false, annotations.NewIncompatibleTypesInBinOpInfo("histogram", "-", "float", pos)
}
return 0, nil, false, annotations.NewIncompatibleTypesInBinOpInfo("float", "-", "histogram", pos)
case parser.MUL:
if hlhs != nil && hrhs == nil {
return 0, hlhs.Copy().Mul(rhs), true, nil
}
if hlhs == nil && hrhs != nil {
return 0, hrhs.Copy().Mul(lhs), true, nil
}
if hlhs != nil && hrhs != nil {
return 0, nil, false, annotations.NewIncompatibleTypesInBinOpInfo("histogram", "*", "histogram", pos)
}
return lhs * rhs, nil, true, nil
case parser.DIV:
if hlhs != nil && hrhs == nil {
return 0, hlhs.Copy().Div(rhs), true, nil
}
if hrhs != nil {
if hlhs != nil {
return 0, nil, false, annotations.NewIncompatibleTypesInBinOpInfo("histogram", "/", "histogram", pos)
case hlhs != nil && hrhs == nil:
{
switch op {
case parser.MUL:
return 0, hlhs.Copy().Mul(rhs).Compact(0), true, nil
case parser.DIV:
return 0, hlhs.Copy().Div(rhs).Compact(0), true, nil
case parser.ADD, parser.SUB, parser.POW, parser.MOD, parser.EQLC, parser.NEQ, parser.GTR, parser.LSS, parser.GTE, parser.LTE, parser.ATAN2:
return 0, nil, false, annotations.NewIncompatibleTypesInBinOpInfo("histogram", opName, "float", pos)
}
}
case hlhs != nil && hrhs != nil:
{
switch op {
case parser.ADD:
res, err := hlhs.Copy().Add(hrhs)
if err != nil {
return 0, nil, false, err
}
return 0, res.Compact(0), true, nil
case parser.SUB:
res, err := hlhs.Copy().Sub(hrhs)
if err != nil {
return 0, nil, false, err
}
return 0, res.Compact(0), true, nil
case parser.EQLC:
// This operation expects that both histograms are compacted.
return 0, hlhs, hlhs.Equals(hrhs), nil
case parser.NEQ:
// This operation expects that both histograms are compacted.
return 0, hlhs, !hlhs.Equals(hrhs), nil
case parser.MUL, parser.DIV, parser.POW, parser.MOD, parser.GTR, parser.LSS, parser.GTE, parser.LTE, parser.ATAN2:
return 0, nil, false, annotations.NewIncompatibleTypesInBinOpInfo("histogram", opName, "histogram", pos)
}
return 0, nil, false, annotations.NewIncompatibleTypesInBinOpInfo("float", "/", "histogram", pos)
}
return lhs / rhs, nil, true, nil
case parser.POW:
return math.Pow(lhs, rhs), nil, true, nil
case parser.MOD:
return math.Mod(lhs, rhs), nil, true, nil
case parser.EQLC:
return lhs, nil, lhs == rhs, nil
case parser.NEQ:
return lhs, nil, lhs != rhs, nil
case parser.GTR:
return lhs, nil, lhs > rhs, nil
case parser.LSS:
return lhs, nil, lhs < rhs, nil
case parser.GTE:
return lhs, nil, lhs >= rhs, nil
case parser.LTE:
return lhs, nil, lhs <= rhs, nil
case parser.ATAN2:
return math.Atan2(lhs, rhs), nil, true, nil
}
panic(fmt.Errorf("operator %q not allowed for operations between Vectors", op))
}
@ -2909,16 +2935,34 @@ func (ev *evaluator) aggregation(e *parser.AggregateExpr, q float64, inputMatrix
case h != nil:
// Ignore histograms for STDVAR and STDDEV.
group.seen = false
if op == parser.STDVAR {
annos.Add(annotations.NewHistogramIgnoredInAggregationInfo("stdvar", e.Expr.PositionRange()))
} else {
annos.Add(annotations.NewHistogramIgnoredInAggregationInfo("stddev", e.Expr.PositionRange()))
}
case math.IsNaN(f), math.IsInf(f, 0):
group.floatValue = math.NaN()
default:
group.floatValue = 0
}
case parser.QUANTILE:
if h != nil {
group.seen = false
annos.Add(annotations.NewHistogramIgnoredInAggregationInfo("quantile", e.Expr.PositionRange()))
}
group.heap = make(vectorByValueHeap, 1)
group.heap[0] = Sample{F: f}
case parser.GROUP:
group.floatValue = 1
case parser.MIN, parser.MAX:
if h != nil {
group.seen = false
if op == parser.MIN {
annos.Add(annotations.NewHistogramIgnoredInAggregationInfo("min", e.Expr.PositionRange()))
} else {
annos.Add(annotations.NewHistogramIgnoredInAggregationInfo("max", e.Expr.PositionRange()))
}
}
}
continue
}
@ -3017,11 +3061,19 @@ func (ev *evaluator) aggregation(e *parser.AggregateExpr, q float64, inputMatrix
// Do nothing. Required to avoid the panic in `default:` below.
case parser.MAX:
if h != nil {
annos.Add(annotations.NewHistogramIgnoredInAggregationInfo("max", e.Expr.PositionRange()))
continue
}
if group.floatValue < f || math.IsNaN(group.floatValue) {
group.floatValue = f
}
case parser.MIN:
if h != nil {
annos.Add(annotations.NewHistogramIgnoredInAggregationInfo("min", e.Expr.PositionRange()))
continue
}
if group.floatValue > f || math.IsNaN(group.floatValue) {
group.floatValue = f
}
@ -3035,9 +3087,19 @@ func (ev *evaluator) aggregation(e *parser.AggregateExpr, q float64, inputMatrix
delta := f - group.floatMean
group.floatMean += delta / group.groupCount
group.floatValue += delta * (f - group.floatMean)
} else {
if op == parser.STDVAR {
annos.Add(annotations.NewHistogramIgnoredInAggregationInfo("stdvar", e.Expr.PositionRange()))
} else {
annos.Add(annotations.NewHistogramIgnoredInAggregationInfo("stddev", e.Expr.PositionRange()))
}
}
case parser.QUANTILE:
if h != nil {
annos.Add(annotations.NewHistogramIgnoredInAggregationInfo("quantile", e.Expr.PositionRange()))
continue
}
group.heap = append(group.heap, Sample{F: f})
default:
@ -3273,7 +3335,11 @@ func (ev *evaluator) aggregationCountValues(e *parser.AggregateExpr, grouping []
var buf []byte
for _, s := range vec {
enh.resetBuilder(s.Metric)
enh.lb.Set(valueLabel, strconv.FormatFloat(s.F, 'f', -1, 64))
if s.H == nil {
enh.lb.Set(valueLabel, strconv.FormatFloat(s.F, 'f', -1, 64))
} else {
enh.lb.Set(valueLabel, s.H.String())
}
metric := enh.lb.Labels()
// Considering the count_values()
@ -3385,7 +3451,7 @@ func handleVectorBinopError(err error, e *parser.BinaryExpr) annotations.Annotat
return nil
}
// groupingKey builds and returns the grouping key for the given metric and
// generateGroupingKey builds and returns the grouping key for the given metric and
// grouping labels.
func generateGroupingKey(metric labels.Labels, grouping []string, without bool, buf []byte) (uint64, []byte) {
if without {
@ -3610,7 +3676,7 @@ func detectHistogramStatsDecoding(expr parser.Expr) {
if n, ok := node.(*parser.BinaryExpr); ok {
detectHistogramStatsDecoding(n.LHS)
detectHistogramStatsDecoding(n.RHS)
return fmt.Errorf("stop")
return errors.New("stop")
}
n, ok := (node).(*parser.VectorSelector)
@ -3632,7 +3698,7 @@ func detectHistogramStatsDecoding(expr parser.Expr) {
break
}
}
return fmt.Errorf("stop")
return errors.New("stop")
})
}

View file

@ -19,7 +19,6 @@ import (
"fmt"
"math"
"sort"
"strconv"
"strings"
"sync"
"testing"
@ -39,7 +38,6 @@ import (
"github.com/prometheus/prometheus/tsdb/chunkenc"
"github.com/prometheus/prometheus/util/annotations"
"github.com/prometheus/prometheus/util/stats"
"github.com/prometheus/prometheus/util/teststorage"
"github.com/prometheus/prometheus/util/testutil"
)
@ -3150,452 +3148,6 @@ func TestInstantQueryWithRangeVectorSelector(t *testing.T) {
}
}
func TestNativeHistogram_Sum_Count_Add_AvgOperator(t *testing.T) {
// TODO(codesome): Integrate histograms into the PromQL testing framework
// and write more tests there.
cases := []struct {
histograms []histogram.Histogram
expected histogram.FloatHistogram
expectedAvg histogram.FloatHistogram
}{
{
histograms: []histogram.Histogram{
{
CounterResetHint: histogram.GaugeType,
Schema: 0,
Count: 25,
Sum: 1234.5,
ZeroThreshold: 0.001,
ZeroCount: 4,
PositiveSpans: []histogram.Span{
{Offset: 0, Length: 2},
{Offset: 1, Length: 2},
},
PositiveBuckets: []int64{1, 1, -1, 0},
NegativeSpans: []histogram.Span{
{Offset: 0, Length: 2},
{Offset: 2, Length: 2},
},
NegativeBuckets: []int64{2, 2, -3, 8},
},
{
CounterResetHint: histogram.GaugeType,
Schema: 0,
Count: 41,
Sum: 2345.6,
ZeroThreshold: 0.001,
ZeroCount: 5,
PositiveSpans: []histogram.Span{
{Offset: 0, Length: 4},
{Offset: 0, Length: 0},
{Offset: 0, Length: 3},
},
PositiveBuckets: []int64{1, 2, -2, 1, -1, 0, 0},
NegativeSpans: []histogram.Span{
{Offset: 1, Length: 4},
{Offset: 2, Length: 0},
{Offset: 2, Length: 3},
},
NegativeBuckets: []int64{1, 3, -2, 5, -2, 0, -3},
},
{
CounterResetHint: histogram.GaugeType,
Schema: 0,
Count: 41,
Sum: 1111.1,
ZeroThreshold: 0.001,
ZeroCount: 5,
PositiveSpans: []histogram.Span{
{Offset: 0, Length: 4},
{Offset: 0, Length: 0},
{Offset: 0, Length: 3},
},
PositiveBuckets: []int64{1, 2, -2, 1, -1, 0, 0},
NegativeSpans: []histogram.Span{
{Offset: 1, Length: 4},
{Offset: 2, Length: 0},
{Offset: 2, Length: 3},
},
NegativeBuckets: []int64{1, 3, -2, 5, -2, 0, -3},
},
{
CounterResetHint: histogram.GaugeType,
Schema: 1, // Everything is 0 just to make the count 4 so avg has nicer numbers.
},
},
expected: histogram.FloatHistogram{
CounterResetHint: histogram.GaugeType,
Schema: 0,
ZeroThreshold: 0.001,
ZeroCount: 14,
Count: 107,
Sum: 4691.2,
PositiveSpans: []histogram.Span{
{Offset: 0, Length: 7},
},
PositiveBuckets: []float64{3, 8, 2, 5, 3, 2, 2},
NegativeSpans: []histogram.Span{
{Offset: 0, Length: 6},
{Offset: 3, Length: 3},
},
NegativeBuckets: []float64{2, 6, 8, 4, 15, 9, 10, 10, 4},
},
expectedAvg: histogram.FloatHistogram{
CounterResetHint: histogram.GaugeType,
Schema: 0,
ZeroThreshold: 0.001,
ZeroCount: 3.5,
Count: 26.75,
Sum: 1172.8,
PositiveSpans: []histogram.Span{
{Offset: 0, Length: 7},
},
PositiveBuckets: []float64{0.75, 2, 0.5, 1.25, 0.75, 0.5, 0.5},
NegativeSpans: []histogram.Span{
{Offset: 0, Length: 6},
{Offset: 3, Length: 3},
},
NegativeBuckets: []float64{0.5, 1.5, 2, 1, 3.75, 2.25, 2.5, 2.5, 1},
},
},
}
idx0 := int64(0)
for _, c := range cases {
for _, floatHisto := range []bool{true, false} {
t.Run(fmt.Sprintf("floatHistogram=%t %d", floatHisto, idx0), func(t *testing.T) {
storage := teststorage.New(t)
t.Cleanup(func() { storage.Close() })
seriesName := "sparse_histogram_series"
seriesNameOverTime := "sparse_histogram_series_over_time"
engine := newTestEngine(t)
ts := idx0 * int64(10*time.Minute/time.Millisecond)
app := storage.Appender(context.Background())
_, err := app.Append(0, labels.FromStrings("__name__", "float_series", "idx", "0"), ts, 42)
require.NoError(t, err)
for idx1, h := range c.histograms {
lbls := labels.FromStrings("__name__", seriesName, "idx", strconv.Itoa(idx1))
// Since we mutate h later, we need to create a copy here.
var err error
if floatHisto {
_, err = app.AppendHistogram(0, lbls, ts, nil, h.Copy().ToFloat(nil))
} else {
_, err = app.AppendHistogram(0, lbls, ts, h.Copy(), nil)
}
require.NoError(t, err)
lbls = labels.FromStrings("__name__", seriesNameOverTime)
newTs := ts + int64(idx1)*int64(time.Minute/time.Millisecond)
// Since we mutate h later, we need to create a copy here.
if floatHisto {
_, err = app.AppendHistogram(0, lbls, newTs, nil, h.Copy().ToFloat(nil))
} else {
_, err = app.AppendHistogram(0, lbls, newTs, h.Copy(), nil)
}
require.NoError(t, err)
}
require.NoError(t, app.Commit())
queryAndCheck := func(queryString string, ts int64, exp promql.Vector) {
qry, err := engine.NewInstantQuery(context.Background(), storage, nil, queryString, timestamp.Time(ts))
require.NoError(t, err)
res := qry.Exec(context.Background())
require.NoError(t, res.Err)
require.Empty(t, res.Warnings)
vector, err := res.Vector()
require.NoError(t, err)
testutil.RequireEqual(t, exp, vector)
}
queryAndCheckAnnotations := func(queryString string, ts int64, expWarnings annotations.Annotations) {
qry, err := engine.NewInstantQuery(context.Background(), storage, nil, queryString, timestamp.Time(ts))
require.NoError(t, err)
res := qry.Exec(context.Background())
require.NoError(t, res.Err)
require.Equal(t, expWarnings, res.Warnings)
}
// sum().
queryString := fmt.Sprintf("sum(%s)", seriesName)
queryAndCheck(queryString, ts, []promql.Sample{{T: ts, H: &c.expected, Metric: labels.EmptyLabels()}})
queryString = `sum({idx="0"})`
var annos annotations.Annotations
annos.Add(annotations.NewMixedFloatsHistogramsAggWarning(posrange.PositionRange{Start: 4, End: 13}))
queryAndCheckAnnotations(queryString, ts, annos)
// + operator.
queryString = fmt.Sprintf(`%s{idx="0"}`, seriesName)
for idx := 1; idx < len(c.histograms); idx++ {
queryString += fmt.Sprintf(` + ignoring(idx) %s{idx="%d"}`, seriesName, idx)
}
queryAndCheck(queryString, ts, []promql.Sample{{T: ts, H: &c.expected, Metric: labels.EmptyLabels()}})
// count().
queryString = fmt.Sprintf("count(%s)", seriesName)
queryAndCheck(queryString, ts, []promql.Sample{{T: ts, F: 4, Metric: labels.EmptyLabels()}})
// avg().
queryString = fmt.Sprintf("avg(%s)", seriesName)
queryAndCheck(queryString, ts, []promql.Sample{{T: ts, H: &c.expectedAvg, Metric: labels.EmptyLabels()}})
offset := int64(len(c.histograms) - 1)
newTs := ts + offset*int64(time.Minute/time.Millisecond)
// sum_over_time().
queryString = fmt.Sprintf("sum_over_time(%s[%dm:1m])", seriesNameOverTime, offset+1)
queryAndCheck(queryString, newTs, []promql.Sample{{T: newTs, H: &c.expected, Metric: labels.EmptyLabels(), DropName: true}})
// avg_over_time().
queryString = fmt.Sprintf("avg_over_time(%s[%dm:1m])", seriesNameOverTime, offset+1)
queryAndCheck(queryString, newTs, []promql.Sample{{T: newTs, H: &c.expectedAvg, Metric: labels.EmptyLabels(), DropName: true}})
})
idx0++
}
}
}
func TestNativeHistogram_SubOperator(t *testing.T) {
// TODO(codesome): Integrate histograms into the PromQL testing framework
// and write more tests there.
cases := []struct {
histograms []histogram.Histogram
expected histogram.FloatHistogram
}{
{
histograms: []histogram.Histogram{
{
Schema: 0,
Count: 41,
Sum: 2345.6,
ZeroThreshold: 0.001,
ZeroCount: 5,
PositiveSpans: []histogram.Span{
{Offset: 0, Length: 4},
{Offset: 0, Length: 0},
{Offset: 0, Length: 3},
},
PositiveBuckets: []int64{1, 2, -2, 1, -1, 0, 0},
NegativeSpans: []histogram.Span{
{Offset: 1, Length: 4},
{Offset: 2, Length: 0},
{Offset: 2, Length: 3},
},
NegativeBuckets: []int64{1, 3, -2, 5, -2, 0, -3},
},
{
Schema: 0,
Count: 11,
Sum: 1234.5,
ZeroThreshold: 0.001,
ZeroCount: 3,
PositiveSpans: []histogram.Span{
{Offset: 1, Length: 2},
},
PositiveBuckets: []int64{2, -1},
NegativeSpans: []histogram.Span{
{Offset: 2, Length: 2},
},
NegativeBuckets: []int64{3, -1},
},
},
expected: histogram.FloatHistogram{
Schema: 0,
Count: 30,
Sum: 1111.1,
ZeroThreshold: 0.001,
ZeroCount: 2,
PositiveSpans: []histogram.Span{
{Offset: 0, Length: 2},
{Offset: 1, Length: 4},
},
PositiveBuckets: []float64{1, 1, 2, 1, 1, 1},
NegativeSpans: []histogram.Span{
{Offset: 1, Length: 2},
{Offset: 1, Length: 1},
{Offset: 4, Length: 3},
},
NegativeBuckets: []float64{1, 1, 7, 5, 5, 2},
},
},
{
histograms: []histogram.Histogram{
{
Schema: 0,
Count: 41,
Sum: 2345.6,
ZeroThreshold: 0.001,
ZeroCount: 5,
PositiveSpans: []histogram.Span{
{Offset: 0, Length: 4},
{Offset: 0, Length: 0},
{Offset: 0, Length: 3},
},
PositiveBuckets: []int64{1, 2, -2, 1, -1, 0, 0},
NegativeSpans: []histogram.Span{
{Offset: 1, Length: 4},
{Offset: 2, Length: 0},
{Offset: 2, Length: 3},
},
NegativeBuckets: []int64{1, 3, -2, 5, -2, 0, -3},
},
{
Schema: 1,
Count: 11,
Sum: 1234.5,
ZeroThreshold: 0.001,
ZeroCount: 3,
PositiveSpans: []histogram.Span{
{Offset: 1, Length: 2},
},
PositiveBuckets: []int64{2, -1},
NegativeSpans: []histogram.Span{
{Offset: 2, Length: 2},
},
NegativeBuckets: []int64{3, -1},
},
},
expected: histogram.FloatHistogram{
Schema: 0,
Count: 30,
Sum: 1111.1,
ZeroThreshold: 0.001,
ZeroCount: 2,
PositiveSpans: []histogram.Span{
{Offset: 0, Length: 1},
{Offset: 1, Length: 5},
},
PositiveBuckets: []float64{1, 1, 2, 1, 1, 1},
NegativeSpans: []histogram.Span{
{Offset: 1, Length: 4},
{Offset: 4, Length: 3},
},
NegativeBuckets: []float64{-2, 2, 2, 7, 5, 5, 2},
},
},
{
histograms: []histogram.Histogram{
{
Schema: 1,
Count: 11,
Sum: 1234.5,
ZeroThreshold: 0.001,
ZeroCount: 3,
PositiveSpans: []histogram.Span{
{Offset: 1, Length: 2},
},
PositiveBuckets: []int64{2, -1},
NegativeSpans: []histogram.Span{
{Offset: 2, Length: 2},
},
NegativeBuckets: []int64{3, -1},
},
{
Schema: 0,
Count: 41,
Sum: 2345.6,
ZeroThreshold: 0.001,
ZeroCount: 5,
PositiveSpans: []histogram.Span{
{Offset: 0, Length: 4},
{Offset: 0, Length: 0},
{Offset: 0, Length: 3},
},
PositiveBuckets: []int64{1, 2, -2, 1, -1, 0, 0},
NegativeSpans: []histogram.Span{
{Offset: 1, Length: 4},
{Offset: 2, Length: 0},
{Offset: 2, Length: 3},
},
NegativeBuckets: []int64{1, 3, -2, 5, -2, 0, -3},
},
},
expected: histogram.FloatHistogram{
Schema: 0,
Count: -30,
Sum: -1111.1,
ZeroThreshold: 0.001,
ZeroCount: -2,
PositiveSpans: []histogram.Span{
{Offset: 0, Length: 1},
{Offset: 1, Length: 5},
},
PositiveBuckets: []float64{-1, -1, -2, -1, -1, -1},
NegativeSpans: []histogram.Span{
{Offset: 1, Length: 4},
{Offset: 4, Length: 3},
},
NegativeBuckets: []float64{2, -2, -2, -7, -5, -5, -2},
},
},
}
idx0 := int64(0)
for _, c := range cases {
for _, floatHisto := range []bool{true, false} {
t.Run(fmt.Sprintf("floatHistogram=%t %d", floatHisto, idx0), func(t *testing.T) {
engine := newTestEngine(t)
storage := teststorage.New(t)
t.Cleanup(func() { storage.Close() })
seriesName := "sparse_histogram_series"
ts := idx0 * int64(10*time.Minute/time.Millisecond)
app := storage.Appender(context.Background())
for idx1, h := range c.histograms {
lbls := labels.FromStrings("__name__", seriesName, "idx", strconv.Itoa(idx1))
// Since we mutate h later, we need to create a copy here.
var err error
if floatHisto {
_, err = app.AppendHistogram(0, lbls, ts, nil, h.Copy().ToFloat(nil))
} else {
_, err = app.AppendHistogram(0, lbls, ts, h.Copy(), nil)
}
require.NoError(t, err)
}
require.NoError(t, app.Commit())
queryAndCheck := func(queryString string, exp promql.Vector) {
qry, err := engine.NewInstantQuery(context.Background(), storage, nil, queryString, timestamp.Time(ts))
require.NoError(t, err)
res := qry.Exec(context.Background())
require.NoError(t, res.Err)
vector, err := res.Vector()
require.NoError(t, err)
if len(vector) == len(exp) {
for i, e := range exp {
got := vector[i].H
if got != e.H {
// Error messages are better if we compare structs, not pointers.
require.Equal(t, *e.H, *got)
}
}
}
testutil.RequireEqual(t, exp, vector)
}
// - operator.
queryString := fmt.Sprintf(`%s{idx="0"}`, seriesName)
for idx := 1; idx < len(c.histograms); idx++ {
queryString += fmt.Sprintf(` - ignoring(idx) %s{idx="%d"}`, seriesName, idx)
}
queryAndCheck(queryString, []promql.Sample{{T: ts, H: &c.expected, Metric: labels.EmptyLabels()}})
})
}
idx0++
}
}
func TestQueryLookbackDelta(t *testing.T) {
var (
load = `load 5m
@ -3928,3 +3480,65 @@ func (s mockSeries) Iterator(it chunkenc.Iterator) chunkenc.Iterator {
}
return storage.ChainSampleIteratorFromIterators(it, iterables)
}
func TestEvaluationWithDelayedNameRemovalDisabled(t *testing.T) {
opts := promql.EngineOpts{
Logger: nil,
Reg: nil,
EnableAtModifier: true,
MaxSamples: 10000,
Timeout: 10 * time.Second,
EnableDelayedNameRemoval: false,
}
engine := promqltest.NewTestEngineWithOpts(t, opts)
promqltest.RunTest(t, `
load 5m
metric{env="1"} 0 60 120
another_metric{env="1"} 60 120 180
# Does not drop __name__ for vector selector
eval instant at 10m metric{env="1"}
metric{env="1"} 120
# Drops __name__ for unary operators
eval instant at 10m -metric
{env="1"} -120
# Drops __name__ for binary operators
eval instant at 10m metric + another_metric
{env="1"} 300
# Does not drop __name__ for binary comparison operators
eval instant at 10m metric <= another_metric
metric{env="1"} 120
# Drops __name__ for binary comparison operators with "bool" modifier
eval instant at 10m metric <= bool another_metric
{env="1"} 1
# Drops __name__ for vector-scalar operations
eval instant at 10m metric * 2
{env="1"} 240
# Drops __name__ for instant-vector functions
eval instant at 10m clamp(metric, 0, 100)
{env="1"} 100
# Drops __name__ for round function
eval instant at 10m round(metric)
{env="1"} 120
# Drops __name__ for range-vector functions
eval instant at 10m rate(metric{env="1"}[10m])
{env="1"} 0.2
# Does not drop __name__ for last_over_time function
eval instant at 10m last_over_time(metric{env="1"}[10m])
metric{env="1"} 120
# Drops name for other _over_time functions
eval instant at 10m max_over_time(metric{env="1"}[10m])
{env="1"} 120
`, engine)
}

View file

@ -474,6 +474,10 @@ func funcClamp(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper
return enh.Out, nil
}
for _, el := range vec {
if el.H != nil {
// Process only float samples.
continue
}
if !enh.enableDelayedNameRemoval {
el.Metric = el.Metric.DropMetricName()
}
@ -491,6 +495,10 @@ func funcClampMax(vals []parser.Value, args parser.Expressions, enh *EvalNodeHel
vec := vals[0].(Vector)
maxVal := vals[1].(Vector)[0].F
for _, el := range vec {
if el.H != nil {
// Process only float samples.
continue
}
if !enh.enableDelayedNameRemoval {
el.Metric = el.Metric.DropMetricName()
}
@ -508,6 +516,10 @@ func funcClampMin(vals []parser.Value, args parser.Expressions, enh *EvalNodeHel
vec := vals[0].(Vector)
minVal := vals[1].(Vector)[0].F
for _, el := range vec {
if el.H != nil {
// Process only float samples.
continue
}
if !enh.enableDelayedNameRemoval {
el.Metric = el.Metric.DropMetricName()
}
@ -538,6 +550,9 @@ func funcRound(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper
continue
}
f := math.Floor(el.F*toNearestInverse+0.5) / toNearestInverse
if !enh.enableDelayedNameRemoval {
el.Metric = el.Metric.DropMetricName()
}
enh.Out = append(enh.Out, Sample{
Metric: el.Metric,
F: f,
@ -1671,6 +1686,7 @@ var FunctionCalls = map[string]FunctionCall{
"hour": funcHour,
"idelta": funcIdelta,
"increase": funcIncrease,
"info": nil,
"irate": funcIrate,
"label_replace": nil, // evalLabelReplace not called via this map.
"label_join": nil, // evalLabelJoin not called via this map.

454
promql/info.go Normal file
View file

@ -0,0 +1,454 @@
// Copyright 2024 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package promql
import (
"context"
"errors"
"fmt"
"slices"
"strings"
"github.com/grafana/regexp"
"github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/promql/parser"
"github.com/prometheus/prometheus/storage"
"github.com/prometheus/prometheus/util/annotations"
)
const targetInfo = "target_info"
// identifyingLabels are the labels we consider as identifying for info metrics.
// Currently hard coded, so we don't need knowledge of individual info metrics.
var identifyingLabels = []string{"instance", "job"}
// evalInfo implements the info PromQL function.
func (ev *evaluator) evalInfo(ctx context.Context, args parser.Expressions) (parser.Value, annotations.Annotations) {
val, annots := ev.eval(ctx, args[0])
mat := val.(Matrix)
// Map from data label name to matchers.
dataLabelMatchers := map[string][]*labels.Matcher{}
var infoNameMatchers []*labels.Matcher
if len(args) > 1 {
// TODO: Introduce a dedicated LabelSelector type.
labelSelector := args[1].(*parser.VectorSelector)
for _, m := range labelSelector.LabelMatchers {
dataLabelMatchers[m.Name] = append(dataLabelMatchers[m.Name], m)
if m.Name == labels.MetricName {
infoNameMatchers = append(infoNameMatchers, m)
}
}
} else {
infoNameMatchers = []*labels.Matcher{labels.MustNewMatcher(labels.MatchEqual, labels.MetricName, targetInfo)}
}
// Don't try to enrich info series.
ignoreSeries := map[int]struct{}{}
loop:
for i, s := range mat {
name := s.Metric.Get(labels.MetricName)
for _, m := range infoNameMatchers {
if m.Matches(name) {
ignoreSeries[i] = struct{}{}
continue loop
}
}
}
selectHints := ev.infoSelectHints(args[0])
infoSeries, ws, err := ev.fetchInfoSeries(ctx, mat, ignoreSeries, dataLabelMatchers, selectHints)
if err != nil {
ev.error(err)
}
annots.Merge(ws)
res, ws := ev.combineWithInfoSeries(ctx, mat, infoSeries, ignoreSeries, dataLabelMatchers)
annots.Merge(ws)
return res, annots
}
// infoSelectHints calculates the storage.SelectHints for selecting info series, given expr (first argument to info call).
func (ev *evaluator) infoSelectHints(expr parser.Expr) storage.SelectHints {
var nodeTimestamp *int64
var offset int64
parser.Inspect(expr, func(node parser.Node, path []parser.Node) error {
switch n := node.(type) {
case *parser.VectorSelector:
if n.Timestamp != nil {
nodeTimestamp = n.Timestamp
}
offset = durationMilliseconds(n.OriginalOffset)
return errors.New("end traversal")
default:
return nil
}
})
start := ev.startTimestamp
end := ev.endTimestamp
if nodeTimestamp != nil {
// The timestamp on the selector overrides everything.
start = *nodeTimestamp
end = *nodeTimestamp
}
// Reduce the start by one fewer ms than the lookback delta
// because wo want to exclude samples that are precisely the
// lookback delta before the eval time.
start -= durationMilliseconds(ev.lookbackDelta) - 1
start -= offset
end -= offset
return storage.SelectHints{
Start: start,
End: end,
Step: ev.interval,
Func: "info",
}
}
// fetchInfoSeries fetches info series given matching identifying labels in mat.
// Series in ignoreSeries are not fetched.
// dataLabelMatchers may be mutated.
func (ev *evaluator) fetchInfoSeries(ctx context.Context, mat Matrix, ignoreSeries map[int]struct{}, dataLabelMatchers map[string][]*labels.Matcher, selectHints storage.SelectHints) (Matrix, annotations.Annotations, error) {
// A map of values for all identifying labels we are interested in.
idLblValues := map[string]map[string]struct{}{}
for i, s := range mat {
if _, exists := ignoreSeries[i]; exists {
continue
}
// Register relevant values per identifying label for this series.
for _, l := range identifyingLabels {
val := s.Metric.Get(l)
if val == "" {
continue
}
if idLblValues[l] == nil {
idLblValues[l] = map[string]struct{}{}
}
idLblValues[l][val] = struct{}{}
}
}
if len(idLblValues) == 0 {
return nil, nil, nil
}
// Generate regexps for every interesting value per identifying label.
var sb strings.Builder
idLblRegexps := make(map[string]string, len(idLblValues))
for name, vals := range idLblValues {
sb.Reset()
i := 0
for v := range vals {
if i > 0 {
sb.WriteRune('|')
}
sb.WriteString(regexp.QuoteMeta(v))
i++
}
idLblRegexps[name] = sb.String()
}
var infoLabelMatchers []*labels.Matcher
for name, re := range idLblRegexps {
infoLabelMatchers = append(infoLabelMatchers, labels.MustNewMatcher(labels.MatchRegexp, name, re))
}
var nameMatcher *labels.Matcher
for name, ms := range dataLabelMatchers {
for i, m := range ms {
if m.Name == labels.MetricName {
nameMatcher = m
ms = slices.Delete(ms, i, i+1)
}
infoLabelMatchers = append(infoLabelMatchers, m)
}
if len(ms) > 0 {
dataLabelMatchers[name] = ms
} else {
delete(dataLabelMatchers, name)
}
}
if nameMatcher == nil {
// Default to using the target_info metric.
infoLabelMatchers = append([]*labels.Matcher{labels.MustNewMatcher(labels.MatchEqual, labels.MetricName, targetInfo)}, infoLabelMatchers...)
}
infoIt := ev.querier.Select(ctx, false, &selectHints, infoLabelMatchers...)
infoSeries, ws, err := expandSeriesSet(ctx, infoIt)
if err != nil {
return nil, ws, err
}
infoMat := ev.evalSeries(ctx, infoSeries, 0, true)
return infoMat, ws, nil
}
// combineWithInfoSeries combines mat with select data labels from infoMat.
func (ev *evaluator) combineWithInfoSeries(ctx context.Context, mat, infoMat Matrix, ignoreSeries map[int]struct{}, dataLabelMatchers map[string][]*labels.Matcher) (Matrix, annotations.Annotations) {
buf := make([]byte, 0, 1024)
lb := labels.NewScratchBuilder(0)
sigFunction := func(name string) func(labels.Labels) string {
return func(lset labels.Labels) string {
lb.Reset()
lb.Add(labels.MetricName, name)
lset.MatchLabels(true, identifyingLabels...).Range(func(l labels.Label) {
lb.Add(l.Name, l.Value)
})
lb.Sort()
return string(lb.Labels().Bytes(buf))
}
}
infoMetrics := map[string]struct{}{}
for _, is := range infoMat {
lblMap := is.Metric.Map()
infoMetrics[lblMap[labels.MetricName]] = struct{}{}
}
sigfs := make(map[string]func(labels.Labels) string, len(infoMetrics))
for name := range infoMetrics {
sigfs[name] = sigFunction(name)
}
// Keep a copy of the original point slices so they can be returned to the pool.
origMatrices := []Matrix{
make(Matrix, len(mat)),
make(Matrix, len(infoMat)),
}
copy(origMatrices[0], mat)
copy(origMatrices[1], infoMat)
numSteps := int((ev.endTimestamp-ev.startTimestamp)/ev.interval) + 1
originalNumSamples := ev.currentSamples
// Create an output vector that is as big as the input matrix with
// the most time series.
biggestLen := max(len(mat), len(infoMat))
baseVector := make(Vector, 0, len(mat))
infoVector := make(Vector, 0, len(infoMat))
enh := &EvalNodeHelper{
Out: make(Vector, 0, biggestLen),
}
type seriesAndTimestamp struct {
Series
ts int64
}
seriess := make(map[uint64]seriesAndTimestamp, biggestLen) // Output series by series hash.
tempNumSamples := ev.currentSamples
// For every base series, compute signature per info metric.
baseSigs := make([]map[string]string, 0, len(mat))
for _, s := range mat {
sigs := make(map[string]string, len(infoMetrics))
for infoName := range infoMetrics {
sigs[infoName] = sigfs[infoName](s.Metric)
}
baseSigs = append(baseSigs, sigs)
}
infoSigs := make([]string, 0, len(infoMat))
for _, s := range infoMat {
name := s.Metric.Map()[labels.MetricName]
infoSigs = append(infoSigs, sigfs[name](s.Metric))
}
var warnings annotations.Annotations
for ts := ev.startTimestamp; ts <= ev.endTimestamp; ts += ev.interval {
if err := contextDone(ctx, "expression evaluation"); err != nil {
ev.error(err)
}
// Reset number of samples in memory after each timestamp.
ev.currentSamples = tempNumSamples
// Gather input vectors for this timestamp.
baseVector, _ = ev.gatherVector(ts, mat, baseVector, nil, nil)
infoVector, _ = ev.gatherVector(ts, infoMat, infoVector, nil, nil)
enh.Ts = ts
result, err := ev.combineWithInfoVector(baseVector, infoVector, ignoreSeries, baseSigs, infoSigs, enh, dataLabelMatchers)
if err != nil {
ev.error(err)
}
enh.Out = result[:0] // Reuse result vector.
vecNumSamples := result.TotalSamples()
ev.currentSamples += vecNumSamples
// When we reset currentSamples to tempNumSamples during the next iteration of the loop it also
// needs to include the samples from the result here, as they're still in memory.
tempNumSamples += vecNumSamples
ev.samplesStats.UpdatePeak(ev.currentSamples)
if ev.currentSamples > ev.maxSamples {
ev.error(ErrTooManySamples(env))
}
// Add samples in result vector to output series.
for _, sample := range result {
h := sample.Metric.Hash()
ss, exists := seriess[h]
if exists {
if ss.ts == ts { // If we've seen this output series before at this timestamp, it's a duplicate.
ev.errorf("vector cannot contain metrics with the same labelset")
}
ss.ts = ts
} else {
ss = seriesAndTimestamp{Series{Metric: sample.Metric}, ts}
}
addToSeries(&ss.Series, enh.Ts, sample.F, sample.H, numSteps)
seriess[h] = ss
}
}
// Reuse the original point slices.
for _, m := range origMatrices {
for _, s := range m {
putFPointSlice(s.Floats)
putHPointSlice(s.Histograms)
}
}
// Assemble the output matrix. By the time we get here we know we don't have too many samples.
numSamples := 0
output := make(Matrix, 0, len(seriess))
for _, ss := range seriess {
numSamples += len(ss.Floats) + totalHPointSize(ss.Histograms)
output = append(output, ss.Series)
}
ev.currentSamples = originalNumSamples + numSamples
ev.samplesStats.UpdatePeak(ev.currentSamples)
return output, warnings
}
// combineWithInfoVector combines base and info Vectors.
// Base series in ignoreSeries are not combined.
func (ev *evaluator) combineWithInfoVector(base, info Vector, ignoreSeries map[int]struct{}, baseSigs []map[string]string, infoSigs []string, enh *EvalNodeHelper, dataLabelMatchers map[string][]*labels.Matcher) (Vector, error) {
if len(base) == 0 {
return nil, nil // Short-circuit: nothing is going to match.
}
// All samples from the info Vector hashed by the matching label/values.
if enh.rightSigs == nil {
enh.rightSigs = make(map[string]Sample, len(enh.Out))
} else {
clear(enh.rightSigs)
}
for i, s := range info {
if s.H != nil {
ev.error(errors.New("info sample should be float"))
}
// We encode original info sample timestamps via the float value.
origT := int64(s.F)
sig := infoSigs[i]
if existing, exists := enh.rightSigs[sig]; exists {
// We encode original info sample timestamps via the float value.
existingOrigT := int64(existing.F)
switch {
case existingOrigT > origT:
// Keep the other info sample, since it's newer.
case existingOrigT < origT:
// Keep this info sample, since it's newer.
enh.rightSigs[sig] = s
default:
// The two info samples have the same timestamp - conflict.
name := s.Metric.Map()[labels.MetricName]
ev.errorf("found duplicate series for info metric %s", name)
}
} else {
enh.rightSigs[sig] = s
}
}
for i, bs := range base {
if _, exists := ignoreSeries[i]; exists {
// This series should not be enriched with info metric data labels.
enh.Out = append(enh.Out, Sample{
Metric: bs.Metric,
F: bs.F,
H: bs.H,
})
continue
}
baseLabels := bs.Metric.Map()
enh.resetBuilder(labels.Labels{})
// For every info metric name, try to find an info series with the same signature.
seenInfoMetrics := map[string]struct{}{}
for infoName, sig := range baseSigs[i] {
is, exists := enh.rightSigs[sig]
if !exists {
continue
}
if _, exists := seenInfoMetrics[infoName]; exists {
continue
}
err := is.Metric.Validate(func(l labels.Label) error {
if l.Name == labels.MetricName {
return nil
}
if _, exists := dataLabelMatchers[l.Name]; len(dataLabelMatchers) > 0 && !exists {
// Not among the specified data label matchers.
return nil
}
if v := enh.lb.Get(l.Name); v != "" && v != l.Value {
return fmt.Errorf("conflicting label: %s", l.Name)
}
if _, exists := baseLabels[l.Name]; exists {
// Skip labels already on the base metric.
return nil
}
enh.lb.Set(l.Name, l.Value)
return nil
})
if err != nil {
return nil, err
}
seenInfoMetrics[infoName] = struct{}{}
}
infoLbls := enh.lb.Labels()
if infoLbls.Len() == 0 {
// If there's at least one data label matcher not matching the empty string,
// we have to ignore this series as there are no matching info series.
allMatchersMatchEmpty := true
for _, ms := range dataLabelMatchers {
for _, m := range ms {
if !m.Matches("") {
allMatchersMatchEmpty = false
break
}
}
}
if !allMatchersMatchEmpty {
continue
}
}
enh.resetBuilder(bs.Metric)
infoLbls.Range(func(l labels.Label) {
enh.lb.Set(l.Name, l.Value)
})
enh.Out = append(enh.Out, Sample{
Metric: enh.lb.Labels(),
F: bs.F,
H: bs.H,
})
}
return enh.Out, nil
}

140
promql/info_test.go Normal file
View file

@ -0,0 +1,140 @@
// Copyright 2024 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package promql_test
import (
"testing"
"github.com/prometheus/prometheus/promql/promqltest"
)
// The "info" function is experimental. This is why we write those tests here for now instead of promqltest/testdata/info.test.
func TestInfo(t *testing.T) {
engine := promqltest.NewTestEngine(t, false, 0, promqltest.DefaultMaxSamplesPerQuery)
promqltest.RunTest(t, `
load 5m
metric{instance="a", job="1", label="value"} 0 1 2
metric_not_matching_target_info{instance="a", job="2", label="value"} 0 1 2
metric_with_overlapping_label{instance="a", job="1", label="value", data="base"} 0 1 2
target_info{instance="a", job="1", data="info", another_data="another info"} 1 1 1
build_info{instance="a", job="1", build_data="build"} 1 1 1
# Include one info metric data label.
eval range from 0m to 10m step 5m info(metric, {data=~".+"})
metric{data="info", instance="a", job="1", label="value"} 0 1 2
# Include all info metric data labels.
eval range from 0m to 10m step 5m info(metric)
metric{data="info", instance="a", job="1", label="value", another_data="another info"} 0 1 2
# Try including all info metric data labels, but non-matching identifying labels.
eval range from 0m to 10m step 5m info(metric_not_matching_target_info)
metric_not_matching_target_info{instance="a", job="2", label="value"} 0 1 2
# Try including a certain info metric data label with a non-matching matcher not accepting empty labels.
# Metric is ignored, due there being a data label matcher not matching empty labels,
# and there being no info series matches.
eval range from 0m to 10m step 5m info(metric, {non_existent=~".+"})
# Include a certain info metric data label together with a non-matching matcher accepting empty labels.
# Since the non_existent matcher matches empty labels, it's simply ignored when there's no match.
# XXX: This case has to include a matcher not matching empty labels, due the PromQL limitation
# that vector selectors have to contain at least one matcher not accepting empty labels.
# We might need another construct than vector selector to get around this limitation.
eval range from 0m to 10m step 5m info(metric, {data=~".+", non_existent=~".*"})
metric{data="info", instance="a", job="1", label="value"} 0 1 2
# Info series data labels overlapping with those of base series are ignored.
eval range from 0m to 10m step 5m info(metric_with_overlapping_label)
metric_with_overlapping_label{data="base", instance="a", job="1", label="value", another_data="another info"} 0 1 2
# Include data labels from target_info specifically.
eval range from 0m to 10m step 5m info(metric, {__name__="target_info"})
metric{data="info", instance="a", job="1", label="value", another_data="another info"} 0 1 2
# Try to include all data labels from a non-existent info metric.
eval range from 0m to 10m step 5m info(metric, {__name__="non_existent"})
metric{instance="a", job="1", label="value"} 0 1 2
# Try to include a certain data label from a non-existent info metric.
eval range from 0m to 10m step 5m info(metric, {__name__="non_existent", data=~".+"})
# Include data labels from build_info.
eval range from 0m to 10m step 5m info(metric, {__name__="build_info"})
metric{instance="a", job="1", label="value", build_data="build"} 0 1 2
# Include data labels from build_info and target_info.
eval range from 0m to 10m step 5m info(metric, {__name__=~".+_info"})
metric{instance="a", job="1", label="value", build_data="build", data="info", another_data="another info"} 0 1 2
# Info metrics themselves are ignored when it comes to enriching with info metric data labels.
eval range from 0m to 10m step 5m info(build_info, {__name__=~".+_info", build_data=~".+"})
build_info{instance="a", job="1", build_data="build"} 1 1 1
clear
# Overlapping target_info series.
load 5m
metric{instance="a", job="1", label="value"} 0 1 2
target_info{instance="a", job="1", data="info", another_data="another info"} 1 1 _
target_info{instance="a", job="1", data="updated info", another_data="another info"} _ _ 1
# Conflicting info series are resolved through picking the latest sample.
eval range from 0m to 10m step 5m info(metric)
metric{data="info", instance="a", job="1", label="value", another_data="another info"} 0 1 _
metric{data="updated info", instance="a", job="1", label="value", another_data="another info"} _ _ 2
clear
# Non-overlapping target_info series.
load 5m
metric{instance="a", job="1", label="value"} 0 1 2
target_info{instance="a", job="1", data="info"} 1 1 stale
target_info{instance="a", job="1", data="updated info"} _ _ 1
# Include info metric data labels from a metric which data labels change over time.
eval range from 0m to 10m step 5m info(metric)
metric{data="info", instance="a", job="1", label="value"} 0 1 _
metric{data="updated info", instance="a", job="1", label="value"} _ _ 2
clear
# Info series selector matches histogram series, info metrics should be float type.
load 5m
metric{instance="a", job="1", label="value"} 0 1 2
histogram{instance="a", job="1"} {{schema:1 sum:3 count:22 buckets:[5 10 7]}}
eval_fail range from 0m to 10m step 5m info(metric, {__name__="histogram"})
clear
# Series with skipped scrape.
load 1m
metric{instance="a", job="1", label="value"} 0 _ 2 3 4
target_info{instance="a", job="1", data="info"} 1 _ 1 1 1
# Lookback works also for the info series.
eval range from 1m to 4m step 1m info(metric)
metric{data="info", instance="a", job="1", label="value"} 0 2 3 4
# @ operator works also with info.
# Note that we pick the timestamp missing a sample, lookback should pick previous sample.
eval range from 1m to 4m step 1m info(metric @ 60)
metric{data="info", instance="a", job="1", label="value"} 0 0 0 0
# offset operator works also with info.
eval range from 1m to 4m step 1m info(metric offset 1m)
metric{data="info", instance="a", job="1", label="value"} 0 0 2 3
`, engine)
}

View file

@ -208,6 +208,10 @@ type VectorSelector struct {
UnexpandedSeriesSet storage.SeriesSet
Series []storage.Series
// BypassEmptyMatcherCheck is true when the VectorSelector isn't required to have at least one matcher matching the empty string.
// This is the case when VectorSelector is used to represent the info function's second argument.
BypassEmptyMatcherCheck bool
PosRange posrange.PositionRange
}

View file

@ -224,6 +224,13 @@ var Functions = map[string]*Function{
ArgTypes: []ValueType{ValueTypeMatrix},
ReturnType: ValueTypeVector,
},
"info": {
Name: "info",
ArgTypes: []ValueType{ValueTypeVector, ValueTypeVector},
ReturnType: ValueTypeVector,
Experimental: true,
Variadic: 1,
},
"irate": {
Name: "irate",
ArgTypes: []ValueType{ValueTypeMatrix},

View file

@ -68,6 +68,12 @@ func (i ItemType) IsAggregatorWithParam() bool {
return i == TOPK || i == BOTTOMK || i == COUNT_VALUES || i == QUANTILE || i == LIMITK || i == LIMIT_RATIO
}
// IsExperimentalAggregator defines the experimental aggregation functions that are controlled
// with EnableExperimentalFunctions.
func (i ItemType) IsExperimentalAggregator() bool {
return i == LIMITK || i == LIMIT_RATIO
}
// IsKeyword returns true if the Item corresponds to a keyword.
// Returns false otherwise.
func (i ItemType) IsKeyword() bool { return i > keywordsStart && i < keywordsEnd }

View file

@ -447,8 +447,8 @@ func (p *parser) newAggregateExpr(op Item, modifier, args Node) (ret *AggregateE
desiredArgs := 1
if ret.Op.IsAggregatorWithParam() {
if !EnableExperimentalFunctions && (ret.Op == LIMITK || ret.Op == LIMIT_RATIO) {
p.addParseErrf(ret.PositionRange(), "limitk() and limit_ratio() are experimental and must be enabled with --enable-feature=promql-experimental-functions")
if !EnableExperimentalFunctions && ret.Op.IsExperimentalAggregator() {
p.addParseErrf(ret.PositionRange(), "%s() is experimental and must be enabled with --enable-feature=promql-experimental-functions", ret.Op)
return
}
desiredArgs = 2
@ -784,6 +784,19 @@ func (p *parser) checkAST(node Node) (typ ValueType) {
}
}
if n.Func.Name == "info" && len(n.Args) > 1 {
// Check the type is correct first
if n.Args[1].Type() != ValueTypeVector {
p.addParseErrf(node.PositionRange(), "expected type %s in %s, got %s", DocumentedType(ValueTypeVector), fmt.Sprintf("call to function %q", n.Func.Name), DocumentedType(n.Args[1].Type()))
}
// Check the vector selector in the input doesn't contain a metric name
if n.Args[1].(*VectorSelector).Name != "" {
p.addParseErrf(n.Args[1].PositionRange(), "expected label selectors only, got vector selector instead")
}
// Set Vector Selector flag to bypass empty matcher check
n.Args[1].(*VectorSelector).BypassEmptyMatcherCheck = true
}
for i, arg := range n.Args {
if i >= len(n.Func.ArgTypes) {
if n.Func.Variadic == 0 {
@ -830,17 +843,19 @@ func (p *parser) checkAST(node Node) (typ ValueType) {
// metric name is a non-empty matcher.
break
}
// A Vector selector must contain at least one non-empty matcher to prevent
// implicit selection of all metrics (e.g. by a typo).
notEmpty := false
for _, lm := range n.LabelMatchers {
if lm != nil && !lm.Matches("") {
notEmpty = true
break
if !n.BypassEmptyMatcherCheck {
// A Vector selector must contain at least one non-empty matcher to prevent
// implicit selection of all metrics (e.g. by a typo).
notEmpty := false
for _, lm := range n.LabelMatchers {
if lm != nil && !lm.Matches("") {
notEmpty = true
break
}
}
if !notEmpty {
p.addParseErrf(n.PositionRange(), "vector selector must contain at least one non-empty matcher")
}
}
if !notEmpty {
p.addParseErrf(n.PositionRange(), "vector selector must contain at least one non-empty matcher")
}
case *NumberLiteral, *StringLiteral:

Some files were not shown because too many files have changed in this diff Show more