Merge branch 'main' into jhesketh/clamp

Signed-off-by: Joshua Hesketh <josh@nitrotech.org>
This commit is contained in:
Joshua Hesketh 2024-11-12 10:20:58 +11:00 committed by GitHub
commit ed2668bbda
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
205 changed files with 8509 additions and 2089 deletions

View file

@ -12,7 +12,7 @@ jobs:
name: lint name: lint
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0 - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- uses: bufbuild/buf-setup-action@62ee92603c244ad0da98bab36a834a999a5329e6 # v1.43.0 - uses: bufbuild/buf-setup-action@62ee92603c244ad0da98bab36a834a999a5329e6 # v1.43.0
with: with:
github_token: ${{ secrets.GITHUB_TOKEN }} github_token: ${{ secrets.GITHUB_TOKEN }}

View file

@ -12,7 +12,7 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
if: github.repository_owner == 'prometheus' if: github.repository_owner == 'prometheus'
steps: steps:
- uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0 - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- uses: bufbuild/buf-setup-action@62ee92603c244ad0da98bab36a834a999a5329e6 # v1.43.0 - uses: bufbuild/buf-setup-action@62ee92603c244ad0da98bab36a834a999a5329e6 # v1.43.0
with: with:
github_token: ${{ secrets.GITHUB_TOKEN }} github_token: ${{ secrets.GITHUB_TOKEN }}

View file

@ -13,12 +13,12 @@ jobs:
# should also be updated. # should also be updated.
image: quay.io/prometheus/golang-builder:1.23-base image: quay.io/prometheus/golang-builder:1.23-base
steps: steps:
- uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0 - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- uses: prometheus/promci@468927c440349ab56c4a1aafd453b312841503c2 # v0.4.4 - uses: prometheus/promci@52c7012f5f0070d7281b8db4a119e21341d43c91 # v0.4.5
- uses: ./.github/promci/actions/setup_environment - uses: ./.github/promci/actions/setup_environment
with: with:
enable_npm: true enable_npm: true
- run: make GOOPTS=--tags=stringlabels GO_ONLY=1 SKIP_GOLANGCI_LINT=1 test-flags="" - run: make GOOPTS=--tags=stringlabels GO_ONLY=1 SKIP_GOLANGCI_LINT=1
- run: go test --tags=stringlabels ./tsdb/ -test.tsdb-isolation=false - run: go test --tags=stringlabels ./tsdb/ -test.tsdb-isolation=false
- run: make -C documentation/examples/remote_storage - run: make -C documentation/examples/remote_storage
- run: make -C documentation/examples - run: make -C documentation/examples
@ -29,8 +29,8 @@ jobs:
container: container:
image: quay.io/prometheus/golang-builder:1.23-base image: quay.io/prometheus/golang-builder:1.23-base
steps: steps:
- uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0 - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- uses: prometheus/promci@468927c440349ab56c4a1aafd453b312841503c2 # v0.4.4 - uses: prometheus/promci@52c7012f5f0070d7281b8db4a119e21341d43c91 # v0.4.5
- uses: ./.github/promci/actions/setup_environment - uses: ./.github/promci/actions/setup_environment
- run: go test --tags=dedupelabels ./... - run: go test --tags=dedupelabels ./...
- run: GOARCH=386 go test ./cmd/prometheus - run: GOARCH=386 go test ./cmd/prometheus
@ -48,7 +48,7 @@ jobs:
# The go version in this image should be N-1 wrt test_go. # The go version in this image should be N-1 wrt test_go.
image: quay.io/prometheus/golang-builder:1.22-base image: quay.io/prometheus/golang-builder:1.22-base
steps: steps:
- uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0 - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- run: make build - run: make build
# Don't run NPM build; don't run race-detector. # Don't run NPM build; don't run race-detector.
- run: make test GO_ONLY=1 test-flags="" - run: make test GO_ONLY=1 test-flags=""
@ -62,8 +62,8 @@ jobs:
image: quay.io/prometheus/golang-builder:1.23-base image: quay.io/prometheus/golang-builder:1.23-base
steps: steps:
- uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0 - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- uses: prometheus/promci@468927c440349ab56c4a1aafd453b312841503c2 # v0.4.4 - uses: prometheus/promci@52c7012f5f0070d7281b8db4a119e21341d43c91 # v0.4.5
- uses: ./.github/promci/actions/setup_environment - uses: ./.github/promci/actions/setup_environment
with: with:
enable_go: false enable_go: false
@ -79,7 +79,7 @@ jobs:
name: Go tests on Windows name: Go tests on Windows
runs-on: windows-latest runs-on: windows-latest
steps: steps:
- uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0 - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 # v5.0.2 - uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 # v5.0.2
with: with:
go-version: 1.23.x go-version: 1.23.x
@ -96,7 +96,7 @@ jobs:
container: container:
image: quay.io/prometheus/golang-builder:1.23-base image: quay.io/prometheus/golang-builder:1.23-base
steps: steps:
- uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0 - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- run: go install ./cmd/promtool/. - run: go install ./cmd/promtool/.
- run: go install github.com/google/go-jsonnet/cmd/jsonnet@latest - run: go install github.com/google/go-jsonnet/cmd/jsonnet@latest
- run: go install github.com/google/go-jsonnet/cmd/jsonnetfmt@latest - run: go install github.com/google/go-jsonnet/cmd/jsonnetfmt@latest
@ -121,8 +121,8 @@ jobs:
matrix: matrix:
thread: [ 0, 1, 2 ] thread: [ 0, 1, 2 ]
steps: steps:
- uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0 - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- uses: prometheus/promci@468927c440349ab56c4a1aafd453b312841503c2 # v0.4.4 - uses: prometheus/promci@52c7012f5f0070d7281b8db4a119e21341d43c91 # v0.4.5
- uses: ./.github/promci/actions/build - uses: ./.github/promci/actions/build
with: with:
promu_opts: "-p linux/amd64 -p windows/amd64 -p linux/arm64 -p darwin/amd64 -p darwin/arm64 -p linux/386" promu_opts: "-p linux/amd64 -p windows/amd64 -p linux/arm64 -p darwin/amd64 -p darwin/arm64 -p linux/386"
@ -146,8 +146,8 @@ jobs:
# Whenever the Go version is updated here, .promu.yml # Whenever the Go version is updated here, .promu.yml
# should also be updated. # should also be updated.
steps: steps:
- uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0 - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- uses: prometheus/promci@468927c440349ab56c4a1aafd453b312841503c2 # v0.4.4 - uses: prometheus/promci@52c7012f5f0070d7281b8db4a119e21341d43c91 # v0.4.5
- uses: ./.github/promci/actions/build - uses: ./.github/promci/actions/build
with: with:
parallelism: 12 parallelism: 12
@ -169,7 +169,7 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- name: Checkout repository - name: Checkout repository
uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0 uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- name: Install Go - name: Install Go
uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 # v5.0.2 uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 # v5.0.2
with: with:
@ -182,7 +182,7 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- name: Checkout repository - name: Checkout repository
uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0 uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- name: Install Go - name: Install Go
uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 # v5.0.2 uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 # v5.0.2
with: with:
@ -191,11 +191,11 @@ jobs:
run: sudo apt-get update && sudo apt-get -y install libsnmp-dev run: sudo apt-get update && sudo apt-get -y install libsnmp-dev
if: github.repository == 'prometheus/snmp_exporter' if: github.repository == 'prometheus/snmp_exporter'
- name: Lint - name: Lint
uses: golangci/golangci-lint-action@aaa42aa0628b4ae2578232a66b541047968fac86 # v6.1.0 uses: golangci/golangci-lint-action@971e284b6050e8a5849b72094c50ab08da042db8 # v6.1.1
with: with:
args: --verbose args: --verbose
# Make sure to sync this with Makefile.common and scripts/golangci-lint.yml. # Make sure to sync this with Makefile.common and scripts/golangci-lint.yml.
version: v1.60.2 version: v1.61.0
fuzzing: fuzzing:
uses: ./.github/workflows/fuzzing.yml uses: ./.github/workflows/fuzzing.yml
if: github.event_name == 'pull_request' if: github.event_name == 'pull_request'
@ -208,8 +208,8 @@ jobs:
needs: [test_ui, test_go, test_go_more, test_go_oldest, test_windows, golangci, codeql, build_all] needs: [test_ui, test_go, test_go_more, test_go_oldest, test_windows, golangci, codeql, build_all]
if: github.event_name == 'push' && github.event.ref == 'refs/heads/main' if: github.event_name == 'push' && github.event.ref == 'refs/heads/main'
steps: steps:
- uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0 - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- uses: prometheus/promci@468927c440349ab56c4a1aafd453b312841503c2 # v0.4.4 - uses: prometheus/promci@52c7012f5f0070d7281b8db4a119e21341d43c91 # v0.4.5
- uses: ./.github/promci/actions/publish_main - uses: ./.github/promci/actions/publish_main
with: with:
docker_hub_login: ${{ secrets.docker_hub_login }} docker_hub_login: ${{ secrets.docker_hub_login }}
@ -225,8 +225,8 @@ jobs:
|| ||
(github.event_name == 'push' && startsWith(github.ref, 'refs/tags/v3.')) (github.event_name == 'push' && startsWith(github.ref, 'refs/tags/v3.'))
steps: steps:
- uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0 - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- uses: prometheus/promci@468927c440349ab56c4a1aafd453b312841503c2 # v0.4.4 - uses: prometheus/promci@52c7012f5f0070d7281b8db4a119e21341d43c91 # v0.4.5
- uses: ./.github/promci/actions/publish_release - uses: ./.github/promci/actions/publish_release
with: with:
docker_hub_login: ${{ secrets.docker_hub_login }} docker_hub_login: ${{ secrets.docker_hub_login }}
@ -240,10 +240,10 @@ jobs:
needs: [test_ui, codeql] needs: [test_ui, codeql]
steps: steps:
- name: Checkout - name: Checkout
uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0 uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- uses: prometheus/promci@468927c440349ab56c4a1aafd453b312841503c2 # v0.4.4 - uses: prometheus/promci@52c7012f5f0070d7281b8db4a119e21341d43c91 # v0.4.5
- name: Install nodejs - name: Install nodejs
uses: actions/setup-node@0a44ba7841725637a19e28fa30b79a866c81b0a6 # v4.0.4 uses: actions/setup-node@39370e3970a6d050c480ffad4ff0ed4d3fdee5af # v4.1.0
with: with:
node-version-file: "web/ui/.nvmrc" node-version-file: "web/ui/.nvmrc"
registry-url: "https://registry.npmjs.org" registry-url: "https://registry.npmjs.org"

View file

@ -24,7 +24,7 @@ jobs:
steps: steps:
- name: Checkout repository - name: Checkout repository
uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0 uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- name: Initialize CodeQL - name: Initialize CodeQL
uses: github/codeql-action/init@e2b3eafc8d227b0241d48be5f425d47c2d750a13 # v3.26.10 uses: github/codeql-action/init@e2b3eafc8d227b0241d48be5f425d47c2d750a13 # v3.26.10

View file

@ -18,7 +18,7 @@ jobs:
if: github.repository_owner == 'prometheus' || github.repository_owner == 'prometheus-community' # Don't run this workflow on forks. if: github.repository_owner == 'prometheus' || github.repository_owner == 'prometheus-community' # Don't run this workflow on forks.
steps: steps:
- name: git checkout - name: git checkout
uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0 uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- name: Set docker hub repo name - name: Set docker hub repo name
run: echo "DOCKER_REPO_NAME=$(make docker-repo-name)" >> $GITHUB_ENV run: echo "DOCKER_REPO_NAME=$(make docker-repo-name)" >> $GITHUB_ENV
- name: Push README to Dockerhub - name: Push README to Dockerhub
@ -40,7 +40,7 @@ jobs:
if: github.repository_owner == 'prometheus' || github.repository_owner == 'prometheus-community' # Don't run this workflow on forks. if: github.repository_owner == 'prometheus' || github.repository_owner == 'prometheus-community' # Don't run this workflow on forks.
steps: steps:
- name: git checkout - name: git checkout
uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0 uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- name: Set quay.io org name - name: Set quay.io org name
run: echo "DOCKER_REPO=$(echo quay.io/${GITHUB_REPOSITORY_OWNER} | tr -d '-')" >> $GITHUB_ENV run: echo "DOCKER_REPO=$(echo quay.io/${GITHUB_REPOSITORY_OWNER} | tr -d '-')" >> $GITHUB_ENV
- name: Set quay.io repo name - name: Set quay.io repo name

View file

@ -21,7 +21,7 @@ jobs:
fuzz-seconds: 600 fuzz-seconds: 600
dry-run: false dry-run: false
- name: Upload Crash - name: Upload Crash
uses: actions/upload-artifact@50769540e7f4bd5e21e526ee35c689e35e0d6874 # v4.4.0 uses: actions/upload-artifact@b4b15b8c7c6ac21ea08fcf65892d2ee8f75cf882 # v4.4.3
if: failure() && steps.build.outcome == 'success' if: failure() && steps.build.outcome == 'success'
with: with:
name: artifacts name: artifacts

View file

@ -13,7 +13,7 @@ jobs:
container: container:
image: quay.io/prometheus/golang-builder image: quay.io/prometheus/golang-builder
steps: steps:
- uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0 - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- run: ./scripts/sync_repo_files.sh - run: ./scripts/sync_repo_files.sh
env: env:
GITHUB_TOKEN: ${{ secrets.PROMBOT_GITHUB_TOKEN }} GITHUB_TOKEN: ${{ secrets.PROMBOT_GITHUB_TOKEN }}

View file

@ -21,7 +21,7 @@ jobs:
steps: steps:
- name: "Checkout code" - name: "Checkout code"
uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # tag=v4.2.0 uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # tag=v4.2.2
with: with:
persist-credentials: false persist-credentials: false
@ -37,7 +37,7 @@ jobs:
# Upload the results as artifacts (optional). Commenting out will disable uploads of run results in SARIF # Upload the results as artifacts (optional). Commenting out will disable uploads of run results in SARIF
# format to the repository Actions tab. # format to the repository Actions tab.
- name: "Upload artifact" - name: "Upload artifact"
uses: actions/upload-artifact@50769540e7f4bd5e21e526ee35c689e35e0d6874 # tag=v4.4.0 uses: actions/upload-artifact@b4b15b8c7c6ac21ea08fcf65892d2ee8f75cf882 # tag=v4.4.3
with: with:
name: SARIF file name: SARIF file
path: results.sarif path: results.sarif

View file

@ -109,7 +109,7 @@ linters-settings:
extra-rules: true extra-rules: true
perfsprint: perfsprint:
# Optimizes `fmt.Errorf`. # Optimizes `fmt.Errorf`.
errorf: false errorf: true
revive: revive:
# By default, revive will enable only the linting rules that are named in the configuration file. # By default, revive will enable only the linting rules that are named in the configuration file.
# So, it's needed to explicitly enable all required rules here. # So, it's needed to explicitly enable all required rules here.

View file

@ -2,6 +2,18 @@
## unreleased ## unreleased
* [CHANGE] Scraping: Remove implicit fallback to the Prometheus text format in case of invalid/missing Content-Type and fail the scrape instead. Add ability to specify a `fallback_scrape_protocol` in the scrape config. #15136
* [CHANGE] Remote-write: default enable_http2 to false. #15219
* [CHANGE] Scraping: normalize "le" and "quantile" label values upon ingestion. #15164
* [CHANGE] Scraping: config `scrape_classic_histograms` was renamed to `always_scrape_classic_histograms`. #15178
* [CHANGE] Config: remove expand-external-labels flag, expand external labels env vars by default. #14657
* [CHANGE] Disallow configuring AM with the v1 api. #13883
* [ENHANCEMENT] Scraping, rules: handle targets reappearing, or rules moving group, when out-of-order is enabled. #14710
* [ENHANCEMENT] Tools: add debug printouts to promtool rules unit testing #15196
* [ENHANCEMENT] Scraping: support Created-Timestamp feature on native histograms. #14694
* [BUGFIX] PromQL: Fix stddev+stdvar aggregations to always ignore native histograms. #14941
* [BUGFIX] PromQL: Fix stddev+stdvar aggregations to treat Infinity consistently. #14941
* [BUGFIX] OTLP receiver: Preserve colons when generating metric names in suffix adding mode (this mode is always enabled, unless one uses Prometheus as a library). #15251
* [BUGFIX] Clamp functions: Ignore any points with native histograms. #15169 * [BUGFIX] Clamp functions: Ignore any points with native histograms. #15169
## 3.0.0-beta.1 / 2024-10-09 ## 3.0.0-beta.1 / 2024-10-09
@ -18,7 +30,6 @@
* [ENHANCEMENT] PromQL: Introduce exponential interpolation for native histograms. #14677 * [ENHANCEMENT] PromQL: Introduce exponential interpolation for native histograms. #14677
* [ENHANCEMENT] TSDB: Add support for ingestion of out-of-order native histogram samples. #14850, #14546 * [ENHANCEMENT] TSDB: Add support for ingestion of out-of-order native histogram samples. #14850, #14546
* [ENHANCEMENT] Alerts: remove metrics for removed Alertmanagers. #13909 * [ENHANCEMENT] Alerts: remove metrics for removed Alertmanagers. #13909
* [ENHANCEMENT] Scraping: support Created-Timestamp feature on native histograms. #14694
* [ENHANCEMENT] Kubernetes SD: Support sidecar containers in endpoint discovery. #14929 * [ENHANCEMENT] Kubernetes SD: Support sidecar containers in endpoint discovery. #14929
* [ENHANCEMENT] Consul SD: Support catalog filters. #11224 * [ENHANCEMENT] Consul SD: Support catalog filters. #11224
* [PERF] TSDB: Parallelize deletion of postings after head compaction. #14975 * [PERF] TSDB: Parallelize deletion of postings after head compaction. #14975
@ -50,12 +61,17 @@ As is traditional with a beta release, we do **not** recommend users install 3.0
* [CHANGE] Remove deprecated `remote-write-receiver`,`promql-at-modifier`, and `promql-negative-offset` feature flags. #13456, #14526 * [CHANGE] Remove deprecated `remote-write-receiver`,`promql-at-modifier`, and `promql-negative-offset` feature flags. #13456, #14526
* [CHANGE] Remove deprecated `storage.tsdb.allow-overlapping-blocks`, `alertmanager.timeout`, and `storage.tsdb.retention` flags. #14640, #14643 * [CHANGE] Remove deprecated `storage.tsdb.allow-overlapping-blocks`, `alertmanager.timeout`, and `storage.tsdb.retention` flags. #14640, #14643
* [ENHANCEMENT] Move AM discovery page from "Monitoring status" to "Server status". #14875 * [ENHANCEMENT] Move AM discovery page from "Monitoring status" to "Server status". #14875
* [FEATURE] Support config reload automatically - feature flag `auto-reload-config`. #14769
* [BUGFIX] Scrape: Do not override target parameter labels with config params. #11029 * [BUGFIX] Scrape: Do not override target parameter labels with config params. #11029
## 2.55.0-rc.0 / 2024-09-20 ## 2.55.1 / 2024-01-04
* [BUGFIX] `round()` function did not remove `__name__` label. #15250
## 2.55.0 / 2024-10-22
* [FEATURE] PromQL: Add experimental `info` function. #14495
* [FEATURE] Support UTF-8 characters in label names - feature flag `utf8-names`. #14482, #14880, #14736, #14727 * [FEATURE] Support UTF-8 characters in label names - feature flag `utf8-names`. #14482, #14880, #14736, #14727
* [FEATURE] Support config reload automatically - feature flag `auto-reload-config`. #14769
* [FEATURE] Scraping: Add the ability to set custom `http_headers` in config. #14817 * [FEATURE] Scraping: Add the ability to set custom `http_headers` in config. #14817
* [FEATURE] Scraping: Support feature flag `created-timestamp-zero-ingestion` in OpenMetrics. #14356, #14815 * [FEATURE] Scraping: Support feature flag `created-timestamp-zero-ingestion` in OpenMetrics. #14356, #14815
* [FEATURE] Scraping: `scrape_failure_log_file` option to log failures to a file. #14734 * [FEATURE] Scraping: `scrape_failure_log_file` option to log failures to a file. #14734
@ -63,19 +79,21 @@ As is traditional with a beta release, we do **not** recommend users install 3.0
* [FEATURE] Remote-Write: Support Google Cloud Monitoring authorization. #14346 * [FEATURE] Remote-Write: Support Google Cloud Monitoring authorization. #14346
* [FEATURE] Promtool: `tsdb create-blocks` new option to add labels. #14403 * [FEATURE] Promtool: `tsdb create-blocks` new option to add labels. #14403
* [FEATURE] Promtool: `promtool test` adds `--junit` flag to format results. #14506 * [FEATURE] Promtool: `promtool test` adds `--junit` flag to format results. #14506
* [FEATURE] TSDB: Add `delayed-compaction` feature flag, for people running many Prometheus to randomize timing. #12532
* [ENHANCEMENT] OTLP receiver: Warn on exponential histograms with zero count and non-zero sum. #14706 * [ENHANCEMENT] OTLP receiver: Warn on exponential histograms with zero count and non-zero sum. #14706
* [ENHANCEMENT] OTLP receiver: Interrupt translation on context cancellation/timeout. #14612 * [ENHANCEMENT] OTLP receiver: Interrupt translation on context cancellation/timeout. #14612
* [ENHANCEMENT] Remote Read client: Enable streaming remote read if the server supports it. #11379 * [ENHANCEMENT] Remote Read client: Enable streaming remote read if the server supports it. #11379
* [ENHANCEMENT] Remote-Write: Don't reshard if we haven't successfully sent a sample since last update. #14450 * [ENHANCEMENT] Remote-Write: Don't reshard if we haven't successfully sent a sample since last update. #14450
* [ENHANCEMENT] PromQL: Delay deletion of `__name__` label to the end of the query evaluation. This is **experimental** and enabled under the feature-flag `promql-delayed-name-removal`. #14477 * [ENHANCEMENT] PromQL: Delay deletion of `__name__` label to the end of the query evaluation. This is **experimental** and enabled under the feature-flag `promql-delayed-name-removal`. #14477
* [ENHANCEMENT] PromQL: Experimental `sort_by_label` and `sort_by_label_desc` sort by all labels when label is equal. #14655 * [ENHANCEMENT] PromQL: Experimental `sort_by_label` and `sort_by_label_desc` sort by all labels when label is equal. #14655, #14985
* [ENHANCEMENT] PromQL: Clarify error message logged when Go runtime panic occurs during query evaluation. #14621 * [ENHANCEMENT] PromQL: Clarify error message logged when Go runtime panic occurs during query evaluation. #14621
* [ENHANCEMENT] PromQL: Use Kahan summation for better accuracy in `avg` and `avg_over_time`. #14413 * [ENHANCEMENT] PromQL: Use Kahan summation for better accuracy in `avg` and `avg_over_time`. #14413
* [ENHANCEMENT] Tracing: Improve PromQL tracing, including showing the operation performed for aggregates, operators, and calls. #14816 * [ENHANCEMENT] Tracing: Improve PromQL tracing, including showing the operation performed for aggregates, operators, and calls. #14816
* [ENHANCEMENT] API: Support multiple listening addresses. #14665 * [ENHANCEMENT] API: Support multiple listening addresses. #14665
* [ENHANCEMENT] TSDB: Backward compatibility with upcoming index v3. #14934 * [ENHANCEMENT] TSDB: Backward compatibility with upcoming index v3. #14934
* [PERF] TSDB: Query in-order and out-of-order series together. #14354, #14693, #14714, #14831, #14874, #14948 * [PERF] TSDB: Query in-order and out-of-order series together. #14354, #14693, #14714, #14831, #14874, #14948, #15120
* [PERF] TSDB: Streamline reading of overlapping out-of-order head chunks. #14729 * [PERF] TSDB: Streamline reading of overlapping out-of-order head chunks. #14729
* [BUGFIX] PromQL: make sort_by_label stable. #14985
* [BUGFIX] SD: Fix dropping targets (with feature flag `new-service-discovery-manager`). #13147 * [BUGFIX] SD: Fix dropping targets (with feature flag `new-service-discovery-manager`). #13147
* [BUGFIX] SD: Stop storing stale targets (with feature flag `new-service-discovery-manager`). #13622 * [BUGFIX] SD: Stop storing stale targets (with feature flag `new-service-discovery-manager`). #13622
* [BUGFIX] Scraping: exemplars could be dropped in protobuf scraping. #14810 * [BUGFIX] Scraping: exemplars could be dropped in protobuf scraping. #14810

View file

@ -2,6 +2,7 @@ ARG ARCH="amd64"
ARG OS="linux" ARG OS="linux"
FROM quay.io/prometheus/busybox-${OS}-${ARCH}:latest FROM quay.io/prometheus/busybox-${OS}-${ARCH}:latest
LABEL maintainer="The Prometheus Authors <prometheus-developers@googlegroups.com>" LABEL maintainer="The Prometheus Authors <prometheus-developers@googlegroups.com>"
LABEL org.opencontainers.image.source="https://github.com/prometheus/prometheus"
ARG ARCH="amd64" ARG ARCH="amd64"
ARG OS="linux" ARG OS="linux"

View file

@ -190,21 +190,20 @@ type flagConfig struct {
queryConcurrency int queryConcurrency int
queryMaxSamples int queryMaxSamples int
RemoteFlushDeadline model.Duration RemoteFlushDeadline model.Duration
nameEscapingScheme string
maxNotificationsSubscribers int maxNotificationsSubscribers int
enableAutoReload bool enableAutoReload bool
autoReloadInterval model.Duration autoReloadInterval model.Duration
featureList []string memlimitEnable bool
memlimitRatio float64 memlimitRatio float64
featureList []string
// These options are extracted from featureList // These options are extracted from featureList
// for ease of use. // for ease of use.
enableExpandExternalLabels bool enablePerStepStats bool
enablePerStepStats bool enableAutoGOMAXPROCS bool
enableAutoGOMAXPROCS bool enableConcurrentRuleEval bool
enableAutoGOMEMLIMIT bool
enableConcurrentRuleEval bool
prometheusURL string prometheusURL string
corsRegexString string corsRegexString string
@ -220,9 +219,6 @@ func (c *flagConfig) setFeatureListOptions(logger *slog.Logger) error {
opts := strings.Split(f, ",") opts := strings.Split(f, ",")
for _, o := range opts { for _, o := range opts {
switch o { switch o {
case "expand-external-labels":
c.enableExpandExternalLabels = true
logger.Info("Experimental expand-external-labels enabled")
case "exemplar-storage": case "exemplar-storage":
c.tsdb.EnableExemplarStorage = true c.tsdb.EnableExemplarStorage = true
logger.Info("Experimental in-memory exemplar storage enabled") logger.Info("Experimental in-memory exemplar storage enabled")
@ -247,9 +243,6 @@ func (c *flagConfig) setFeatureListOptions(logger *slog.Logger) error {
c.autoReloadInterval, _ = model.ParseDuration("1s") c.autoReloadInterval, _ = model.ParseDuration("1s")
} }
logger.Info("Enabled automatic configuration file reloading. Checking for configuration changes every", "interval", c.autoReloadInterval) logger.Info("Enabled automatic configuration file reloading. Checking for configuration changes every", "interval", c.autoReloadInterval)
case "auto-gomemlimit":
c.enableAutoGOMEMLIMIT = true
logger.Info("Automatically set GOMEMLIMIT to match Linux container or system memory limit")
case "concurrent-rule-eval": case "concurrent-rule-eval":
c.enableConcurrentRuleEval = true c.enableConcurrentRuleEval = true
logger.Info("Experimental concurrent rule evaluation enabled.") logger.Info("Experimental concurrent rule evaluation enabled.")
@ -336,6 +329,8 @@ func main() {
a.Flag("web.listen-address", "Address to listen on for UI, API, and telemetry. Can be repeated."). a.Flag("web.listen-address", "Address to listen on for UI, API, and telemetry. Can be repeated.").
Default("0.0.0.0:9090").StringsVar(&cfg.web.ListenAddresses) Default("0.0.0.0:9090").StringsVar(&cfg.web.ListenAddresses)
a.Flag("auto-gomemlimit", "Automatically set GOMEMLIMIT to match Linux container or system memory limit").
Default("true").BoolVar(&cfg.memlimitEnable)
a.Flag("auto-gomemlimit.ratio", "The ratio of reserved GOMEMLIMIT memory to the detected maximum container or system memory"). a.Flag("auto-gomemlimit.ratio", "The ratio of reserved GOMEMLIMIT memory to the detected maximum container or system memory").
Default("0.9").FloatVar(&cfg.memlimitRatio) Default("0.9").FloatVar(&cfg.memlimitRatio)
@ -437,6 +432,9 @@ func main() {
serverOnlyFlag(a, "storage.tsdb.samples-per-chunk", "Target number of samples per chunk."). serverOnlyFlag(a, "storage.tsdb.samples-per-chunk", "Target number of samples per chunk.").
Default("120").Hidden().IntVar(&cfg.tsdb.SamplesPerChunk) Default("120").Hidden().IntVar(&cfg.tsdb.SamplesPerChunk)
serverOnlyFlag(a, "storage.tsdb.delayed-compaction.max-percent", "Sets the upper limit for the random compaction delay, specified as a percentage of the head chunk range. 100 means the compaction can be delayed by up to the entire head chunk range. Only effective when the delayed-compaction feature flag is enabled.").
Default("10").Hidden().IntVar(&cfg.tsdb.CompactionDelayMaxPercent)
agentOnlyFlag(a, "storage.agent.path", "Base path for metrics storage."). agentOnlyFlag(a, "storage.agent.path", "Base path for metrics storage.").
Default("data-agent/").StringVar(&cfg.agentStoragePath) Default("data-agent/").StringVar(&cfg.agentStoragePath)
@ -516,7 +514,7 @@ func main() {
a.Flag("scrape.discovery-reload-interval", "Interval used by scrape manager to throttle target groups updates."). a.Flag("scrape.discovery-reload-interval", "Interval used by scrape manager to throttle target groups updates.").
Hidden().Default("5s").SetValue(&cfg.scrape.DiscoveryReloadInterval) Hidden().Default("5s").SetValue(&cfg.scrape.DiscoveryReloadInterval)
a.Flag("enable-feature", "Comma separated feature names to enable. Valid options: auto-gomemlimit, exemplar-storage, expand-external-labels, memory-snapshot-on-shutdown, promql-per-step-stats, promql-experimental-functions, extra-scrape-metrics, auto-gomaxprocs, native-histograms, created-timestamp-zero-ingestion, concurrent-rule-eval, delayed-compaction, old-ui. See https://prometheus.io/docs/prometheus/latest/feature_flags/ for more details."). a.Flag("enable-feature", "Comma separated feature names to enable. Valid options: exemplar-storage, expand-external-labels, memory-snapshot-on-shutdown, promql-per-step-stats, promql-experimental-functions, extra-scrape-metrics, auto-gomaxprocs, native-histograms, created-timestamp-zero-ingestion, concurrent-rule-eval, delayed-compaction, old-ui. See https://prometheus.io/docs/prometheus/latest/feature_flags/ for more details.").
Default("").StringsVar(&cfg.featureList) Default("").StringsVar(&cfg.featureList)
a.Flag("agent", "Run Prometheus in 'Agent mode'.").BoolVar(&agentMode) a.Flag("agent", "Run Prometheus in 'Agent mode'.").BoolVar(&agentMode)
@ -552,15 +550,6 @@ func main() {
os.Exit(1) os.Exit(1)
} }
if cfg.nameEscapingScheme != "" {
scheme, err := model.ToEscapingScheme(cfg.nameEscapingScheme)
if err != nil {
fmt.Fprintf(os.Stderr, `Invalid name escaping scheme: %q; Needs to be one of "values", "underscores", or "dots"`, cfg.nameEscapingScheme)
os.Exit(1)
}
model.NameEscapingScheme = scheme
}
if agentMode && len(serverOnlyFlags) > 0 { if agentMode && len(serverOnlyFlags) > 0 {
fmt.Fprintf(os.Stderr, "The following flag(s) can not be used in agent mode: %q", serverOnlyFlags) fmt.Fprintf(os.Stderr, "The following flag(s) can not be used in agent mode: %q", serverOnlyFlags)
os.Exit(3) os.Exit(3)
@ -595,7 +584,7 @@ func main() {
// Throw error for invalid config before starting other components. // Throw error for invalid config before starting other components.
var cfgFile *config.Config var cfgFile *config.Config
if cfgFile, err = config.LoadFile(cfg.configFile, agentMode, false, promslog.NewNopLogger()); err != nil { if cfgFile, err = config.LoadFile(cfg.configFile, agentMode, promslog.NewNopLogger()); err != nil {
absPath, pathErr := filepath.Abs(cfg.configFile) absPath, pathErr := filepath.Abs(cfg.configFile)
if pathErr != nil { if pathErr != nil {
absPath = cfg.configFile absPath = cfg.configFile
@ -667,6 +656,12 @@ func main() {
cfg.tsdb.MaxBlockDuration = maxBlockDuration cfg.tsdb.MaxBlockDuration = maxBlockDuration
} }
// Delayed compaction checks
if cfg.tsdb.EnableDelayedCompaction && (cfg.tsdb.CompactionDelayMaxPercent > 100 || cfg.tsdb.CompactionDelayMaxPercent <= 0) {
logger.Warn("The --storage.tsdb.delayed-compaction.max-percent should have a value between 1 and 100. Using default", "default", tsdb.DefaultCompactionDelayMaxPercent)
cfg.tsdb.CompactionDelayMaxPercent = tsdb.DefaultCompactionDelayMaxPercent
}
} }
noStepSubqueryInterval := &safePromQLNoStepSubqueryInterval{} noStepSubqueryInterval := &safePromQLNoStepSubqueryInterval{}
@ -770,7 +765,7 @@ func main() {
} }
} }
if cfg.enableAutoGOMEMLIMIT { if cfg.memlimitEnable {
if _, err := memlimit.SetGoMemLimitWithOpts( if _, err := memlimit.SetGoMemLimitWithOpts(
memlimit.WithRatio(cfg.memlimitRatio), memlimit.WithRatio(cfg.memlimitRatio),
memlimit.WithProvider( memlimit.WithProvider(
@ -1145,7 +1140,7 @@ func main() {
for { for {
select { select {
case <-hup: case <-hup:
if err := reloadConfig(cfg.configFile, cfg.enableExpandExternalLabels, cfg.tsdb.EnableExemplarStorage, logger, noStepSubqueryInterval, callback, reloaders...); err != nil { if err := reloadConfig(cfg.configFile, cfg.tsdb.EnableExemplarStorage, logger, noStepSubqueryInterval, callback, reloaders...); err != nil {
logger.Error("Error reloading config", "err", err) logger.Error("Error reloading config", "err", err)
} else if cfg.enableAutoReload { } else if cfg.enableAutoReload {
if currentChecksum, err := config.GenerateChecksum(cfg.configFile); err == nil { if currentChecksum, err := config.GenerateChecksum(cfg.configFile); err == nil {
@ -1155,7 +1150,7 @@ func main() {
} }
} }
case rc := <-webHandler.Reload(): case rc := <-webHandler.Reload():
if err := reloadConfig(cfg.configFile, cfg.enableExpandExternalLabels, cfg.tsdb.EnableExemplarStorage, logger, noStepSubqueryInterval, callback, reloaders...); err != nil { if err := reloadConfig(cfg.configFile, cfg.tsdb.EnableExemplarStorage, logger, noStepSubqueryInterval, callback, reloaders...); err != nil {
logger.Error("Error reloading config", "err", err) logger.Error("Error reloading config", "err", err)
rc <- err rc <- err
} else { } else {
@ -1180,7 +1175,7 @@ func main() {
} }
logger.Info("Configuration file change detected, reloading the configuration.") logger.Info("Configuration file change detected, reloading the configuration.")
if err := reloadConfig(cfg.configFile, cfg.enableExpandExternalLabels, cfg.tsdb.EnableExemplarStorage, logger, noStepSubqueryInterval, callback, reloaders...); err != nil { if err := reloadConfig(cfg.configFile, cfg.tsdb.EnableExemplarStorage, logger, noStepSubqueryInterval, callback, reloaders...); err != nil {
logger.Error("Error reloading config", "err", err) logger.Error("Error reloading config", "err", err)
} else { } else {
checksum = currentChecksum checksum = currentChecksum
@ -1210,7 +1205,7 @@ func main() {
return nil return nil
} }
if err := reloadConfig(cfg.configFile, cfg.enableExpandExternalLabels, cfg.tsdb.EnableExemplarStorage, logger, noStepSubqueryInterval, func(bool) {}, reloaders...); err != nil { if err := reloadConfig(cfg.configFile, cfg.tsdb.EnableExemplarStorage, logger, noStepSubqueryInterval, func(bool) {}, reloaders...); err != nil {
return fmt.Errorf("error loading config from %q: %w", cfg.configFile, err) return fmt.Errorf("error loading config from %q: %w", cfg.configFile, err)
} }
@ -1437,7 +1432,7 @@ type reloader struct {
reloader func(*config.Config) error reloader func(*config.Config) error
} }
func reloadConfig(filename string, expandExternalLabels, enableExemplarStorage bool, logger *slog.Logger, noStepSuqueryInterval *safePromQLNoStepSubqueryInterval, callback func(bool), rls ...reloader) (err error) { func reloadConfig(filename string, enableExemplarStorage bool, logger *slog.Logger, noStepSuqueryInterval *safePromQLNoStepSubqueryInterval, callback func(bool), rls ...reloader) (err error) {
start := time.Now() start := time.Now()
timingsLogger := logger timingsLogger := logger
logger.Info("Loading configuration file", "filename", filename) logger.Info("Loading configuration file", "filename", filename)
@ -1453,7 +1448,7 @@ func reloadConfig(filename string, expandExternalLabels, enableExemplarStorage b
} }
}() }()
conf, err := config.LoadFile(filename, agentMode, expandExternalLabels, logger) conf, err := config.LoadFile(filename, agentMode, logger)
if err != nil { if err != nil {
return fmt.Errorf("couldn't load configuration (--config.file=%q): %w", filename, err) return fmt.Errorf("couldn't load configuration (--config.file=%q): %w", filename, err)
} }
@ -1643,6 +1638,9 @@ func (s *readyStorage) Appender(ctx context.Context) storage.Appender {
type notReadyAppender struct{} type notReadyAppender struct{}
// SetOptions does nothing in this appender implementation.
func (n notReadyAppender) SetOptions(opts *storage.AppendOptions) {}
func (n notReadyAppender) Append(ref storage.SeriesRef, l labels.Labels, t int64, v float64) (storage.SeriesRef, error) { func (n notReadyAppender) Append(ref storage.SeriesRef, l labels.Labels, t int64, v float64) (storage.SeriesRef, error) {
return 0, tsdb.ErrNotReady return 0, tsdb.ErrNotReady
} }
@ -1797,6 +1795,7 @@ type tsdbOptions struct {
EnableMemorySnapshotOnShutdown bool EnableMemorySnapshotOnShutdown bool
EnableNativeHistograms bool EnableNativeHistograms bool
EnableDelayedCompaction bool EnableDelayedCompaction bool
CompactionDelayMaxPercent int
EnableOverlappingCompaction bool EnableOverlappingCompaction bool
EnableOOONativeHistograms bool EnableOOONativeHistograms bool
} }
@ -1821,6 +1820,7 @@ func (opts tsdbOptions) ToTSDBOptions() tsdb.Options {
EnableOOONativeHistograms: opts.EnableOOONativeHistograms, EnableOOONativeHistograms: opts.EnableOOONativeHistograms,
OutOfOrderTimeWindow: opts.OutOfOrderTimeWindow, OutOfOrderTimeWindow: opts.OutOfOrderTimeWindow,
EnableDelayedCompaction: opts.EnableDelayedCompaction, EnableDelayedCompaction: opts.EnableDelayedCompaction,
CompactionDelayMaxPercent: opts.CompactionDelayMaxPercent,
EnableOverlappingCompaction: opts.EnableOverlappingCompaction, EnableOverlappingCompaction: opts.EnableOverlappingCompaction,
} }
} }

View file

@ -125,6 +125,7 @@ func TestFailedStartupExitCode(t *testing.T) {
if testing.Short() { if testing.Short() {
t.Skip("skipping test in short mode.") t.Skip("skipping test in short mode.")
} }
t.Parallel()
fakeInputFile := "fake-input-file" fakeInputFile := "fake-input-file"
expectedExitStatus := 2 expectedExitStatus := 2
@ -211,83 +212,125 @@ func TestWALSegmentSizeBounds(t *testing.T) {
if testing.Short() { if testing.Short() {
t.Skip("skipping test in short mode.") t.Skip("skipping test in short mode.")
} }
t.Parallel()
for size, expectedExitStatus := range map[string]int{"9MB": 1, "257MB": 1, "10": 2, "1GB": 1, "12MB": 0} { for _, tc := range []struct {
prom := exec.Command(promPath, "-test.main", "--storage.tsdb.wal-segment-size="+size, "--web.listen-address=0.0.0.0:0", "--config.file="+promConfig, "--storage.tsdb.path="+filepath.Join(t.TempDir(), "data")) size string
exitCode int
}{
{
size: "9MB",
exitCode: 1,
},
{
size: "257MB",
exitCode: 1,
},
{
size: "10",
exitCode: 2,
},
{
size: "1GB",
exitCode: 1,
},
{
size: "12MB",
exitCode: 0,
},
} {
t.Run(tc.size, func(t *testing.T) {
t.Parallel()
prom := exec.Command(promPath, "-test.main", "--storage.tsdb.wal-segment-size="+tc.size, "--web.listen-address=0.0.0.0:0", "--config.file="+promConfig, "--storage.tsdb.path="+filepath.Join(t.TempDir(), "data"))
// Log stderr in case of failure. // Log stderr in case of failure.
stderr, err := prom.StderrPipe() stderr, err := prom.StderrPipe()
require.NoError(t, err) require.NoError(t, err)
go func() { go func() {
slurp, _ := io.ReadAll(stderr) slurp, _ := io.ReadAll(stderr)
t.Log(string(slurp)) t.Log(string(slurp))
}() }()
err = prom.Start() err = prom.Start()
require.NoError(t, err) require.NoError(t, err)
if expectedExitStatus == 0 { if tc.exitCode == 0 {
done := make(chan error, 1) done := make(chan error, 1)
go func() { done <- prom.Wait() }() go func() { done <- prom.Wait() }()
select { select {
case err := <-done: case err := <-done:
require.Fail(t, "prometheus should be still running: %v", err) require.Fail(t, "prometheus should be still running: %v", err)
case <-time.After(startupTime): case <-time.After(startupTime):
prom.Process.Kill() prom.Process.Kill()
<-done <-done
}
return
} }
continue
}
err = prom.Wait() err = prom.Wait()
require.Error(t, err) require.Error(t, err)
var exitError *exec.ExitError var exitError *exec.ExitError
require.ErrorAs(t, err, &exitError) require.ErrorAs(t, err, &exitError)
status := exitError.Sys().(syscall.WaitStatus) status := exitError.Sys().(syscall.WaitStatus)
require.Equal(t, expectedExitStatus, status.ExitStatus()) require.Equal(t, tc.exitCode, status.ExitStatus())
})
} }
} }
func TestMaxBlockChunkSegmentSizeBounds(t *testing.T) { func TestMaxBlockChunkSegmentSizeBounds(t *testing.T) {
t.Parallel()
if testing.Short() { if testing.Short() {
t.Skip("skipping test in short mode.") t.Skip("skipping test in short mode.")
} }
t.Parallel()
for size, expectedExitStatus := range map[string]int{"512KB": 1, "1MB": 0} { for _, tc := range []struct {
prom := exec.Command(promPath, "-test.main", "--storage.tsdb.max-block-chunk-segment-size="+size, "--web.listen-address=0.0.0.0:0", "--config.file="+promConfig, "--storage.tsdb.path="+filepath.Join(t.TempDir(), "data")) size string
exitCode int
}{
{
size: "512KB",
exitCode: 1,
},
{
size: "1MB",
exitCode: 0,
},
} {
t.Run(tc.size, func(t *testing.T) {
t.Parallel()
prom := exec.Command(promPath, "-test.main", "--storage.tsdb.max-block-chunk-segment-size="+tc.size, "--web.listen-address=0.0.0.0:0", "--config.file="+promConfig, "--storage.tsdb.path="+filepath.Join(t.TempDir(), "data"))
// Log stderr in case of failure. // Log stderr in case of failure.
stderr, err := prom.StderrPipe() stderr, err := prom.StderrPipe()
require.NoError(t, err) require.NoError(t, err)
go func() { go func() {
slurp, _ := io.ReadAll(stderr) slurp, _ := io.ReadAll(stderr)
t.Log(string(slurp)) t.Log(string(slurp))
}() }()
err = prom.Start() err = prom.Start()
require.NoError(t, err) require.NoError(t, err)
if expectedExitStatus == 0 { if tc.exitCode == 0 {
done := make(chan error, 1) done := make(chan error, 1)
go func() { done <- prom.Wait() }() go func() { done <- prom.Wait() }()
select { select {
case err := <-done: case err := <-done:
require.Fail(t, "prometheus should be still running: %v", err) require.Fail(t, "prometheus should be still running: %v", err)
case <-time.After(startupTime): case <-time.After(startupTime):
prom.Process.Kill() prom.Process.Kill()
<-done <-done
}
return
} }
continue
}
err = prom.Wait() err = prom.Wait()
require.Error(t, err) require.Error(t, err)
var exitError *exec.ExitError var exitError *exec.ExitError
require.ErrorAs(t, err, &exitError) require.ErrorAs(t, err, &exitError)
status := exitError.Sys().(syscall.WaitStatus) status := exitError.Sys().(syscall.WaitStatus)
require.Equal(t, expectedExitStatus, status.ExitStatus()) require.Equal(t, tc.exitCode, status.ExitStatus())
})
} }
} }
@ -353,6 +396,8 @@ func getCurrentGaugeValuesFor(t *testing.T, reg prometheus.Gatherer, metricNames
} }
func TestAgentSuccessfulStartup(t *testing.T) { func TestAgentSuccessfulStartup(t *testing.T) {
t.Parallel()
prom := exec.Command(promPath, "-test.main", "--agent", "--web.listen-address=0.0.0.0:0", "--config.file="+agentConfig) prom := exec.Command(promPath, "-test.main", "--agent", "--web.listen-address=0.0.0.0:0", "--config.file="+agentConfig)
require.NoError(t, prom.Start()) require.NoError(t, prom.Start())
@ -371,6 +416,8 @@ func TestAgentSuccessfulStartup(t *testing.T) {
} }
func TestAgentFailedStartupWithServerFlag(t *testing.T) { func TestAgentFailedStartupWithServerFlag(t *testing.T) {
t.Parallel()
prom := exec.Command(promPath, "-test.main", "--agent", "--storage.tsdb.path=.", "--web.listen-address=0.0.0.0:0", "--config.file="+promConfig) prom := exec.Command(promPath, "-test.main", "--agent", "--storage.tsdb.path=.", "--web.listen-address=0.0.0.0:0", "--config.file="+promConfig)
output := bytes.Buffer{} output := bytes.Buffer{}
@ -398,6 +445,8 @@ func TestAgentFailedStartupWithServerFlag(t *testing.T) {
} }
func TestAgentFailedStartupWithInvalidConfig(t *testing.T) { func TestAgentFailedStartupWithInvalidConfig(t *testing.T) {
t.Parallel()
prom := exec.Command(promPath, "-test.main", "--agent", "--web.listen-address=0.0.0.0:0", "--config.file="+promConfig) prom := exec.Command(promPath, "-test.main", "--agent", "--web.listen-address=0.0.0.0:0", "--config.file="+promConfig)
require.NoError(t, prom.Start()) require.NoError(t, prom.Start())
@ -419,6 +468,7 @@ func TestModeSpecificFlags(t *testing.T) {
if testing.Short() { if testing.Short() {
t.Skip("skipping test in short mode.") t.Skip("skipping test in short mode.")
} }
t.Parallel()
testcases := []struct { testcases := []struct {
mode string mode string
@ -433,6 +483,7 @@ func TestModeSpecificFlags(t *testing.T) {
for _, tc := range testcases { for _, tc := range testcases {
t.Run(fmt.Sprintf("%s mode with option %s", tc.mode, tc.arg), func(t *testing.T) { t.Run(fmt.Sprintf("%s mode with option %s", tc.mode, tc.arg), func(t *testing.T) {
t.Parallel()
args := []string{"-test.main", tc.arg, t.TempDir(), "--web.listen-address=0.0.0.0:0"} args := []string{"-test.main", tc.arg, t.TempDir(), "--web.listen-address=0.0.0.0:0"}
if tc.mode == "agent" { if tc.mode == "agent" {
@ -484,6 +535,8 @@ func TestDocumentation(t *testing.T) {
if runtime.GOOS == "windows" { if runtime.GOOS == "windows" {
t.SkipNow() t.SkipNow()
} }
t.Parallel()
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel() defer cancel()
@ -508,6 +561,8 @@ func TestDocumentation(t *testing.T) {
} }
func TestRwProtoMsgFlagParser(t *testing.T) { func TestRwProtoMsgFlagParser(t *testing.T) {
t.Parallel()
defaultOpts := config.RemoteWriteProtoMsgs{ defaultOpts := config.RemoteWriteProtoMsgs{
config.RemoteWriteProtoMsgV1, config.RemoteWriteProtoMsgV2, config.RemoteWriteProtoMsgV1, config.RemoteWriteProtoMsgV2,
} }

View file

@ -34,6 +34,7 @@ func TestStartupInterrupt(t *testing.T) {
if testing.Short() { if testing.Short() {
t.Skip("skipping test in short mode.") t.Skip("skipping test in short mode.")
} }
t.Parallel()
port := fmt.Sprintf(":%d", testutil.RandomUnprivilegedPort(t)) port := fmt.Sprintf(":%d", testutil.RandomUnprivilegedPort(t))

View file

@ -456,6 +456,7 @@ func TestQueryLog(t *testing.T) {
if testing.Short() { if testing.Short() {
t.Skip("skipping test in short mode.") t.Skip("skipping test in short mode.")
} }
t.Parallel()
cwd, err := os.Getwd() cwd, err := os.Getwd()
require.NoError(t, err) require.NoError(t, err)
@ -474,6 +475,7 @@ func TestQueryLog(t *testing.T) {
} }
t.Run(p.String(), func(t *testing.T) { t.Run(p.String(), func(t *testing.T) {
t.Parallel()
p.run(t) p.run(t)
}) })
} }

View file

@ -34,8 +34,8 @@ import (
) )
var ( var (
errNotNativeHistogram = fmt.Errorf("not a native histogram") errNotNativeHistogram = errors.New("not a native histogram")
errNotEnoughData = fmt.Errorf("not enough data") errNotEnoughData = errors.New("not enough data")
outputHeader = `Bucket stats for each histogram series over time outputHeader = `Bucket stats for each histogram series over time
------------------------------------------------ ------------------------------------------------
@ -169,7 +169,7 @@ func querySamples(ctx context.Context, api v1.API, query string, end time.Time)
matrix, ok := values.(model.Matrix) matrix, ok := values.(model.Matrix)
if !ok { if !ok {
return nil, fmt.Errorf("query of buckets resulted in non-Matrix") return nil, errors.New("query of buckets resulted in non-Matrix")
} }
return matrix, nil return matrix, nil
@ -259,7 +259,7 @@ func getBucketCountsAtTime(matrix model.Matrix, numBuckets, timeIdx int) ([]int,
prev := matrix[i].Values[timeIdx] prev := matrix[i].Values[timeIdx]
// Assume the results are nicely aligned. // Assume the results are nicely aligned.
if curr.Timestamp != prev.Timestamp { if curr.Timestamp != prev.Timestamp {
return counts, fmt.Errorf("matrix result is not time aligned") return counts, errors.New("matrix result is not time aligned")
} }
counts[i+1] = int(curr.Value - prev.Value) counts[i+1] = int(curr.Value - prev.Value)
} }

View file

@ -49,7 +49,7 @@ func getMinAndMaxTimestamps(p textparse.Parser) (int64, int64, error) {
_, ts, _ := p.Series() _, ts, _ := p.Series()
if ts == nil { if ts == nil {
return 0, 0, fmt.Errorf("expected timestamp for series got none") return 0, 0, errors.New("expected timestamp for series got none")
} }
if *ts > maxt { if *ts > maxt {

View file

@ -58,6 +58,7 @@ import (
_ "github.com/prometheus/prometheus/plugins" // Register plugins. _ "github.com/prometheus/prometheus/plugins" // Register plugins.
"github.com/prometheus/prometheus/promql/parser" "github.com/prometheus/prometheus/promql/parser"
"github.com/prometheus/prometheus/promql/promqltest" "github.com/prometheus/prometheus/promql/promqltest"
"github.com/prometheus/prometheus/rules"
"github.com/prometheus/prometheus/scrape" "github.com/prometheus/prometheus/scrape"
"github.com/prometheus/prometheus/util/documentcli" "github.com/prometheus/prometheus/util/documentcli"
) )
@ -216,6 +217,7 @@ func main() {
"test-rule-file", "test-rule-file",
"The unit test file.", "The unit test file.",
).Required().ExistingFiles() ).Required().ExistingFiles()
testRulesDebug := testRulesCmd.Flag("debug", "Enable unit test debugging.").Default("false").Bool()
testRulesDiff := testRulesCmd.Flag("diff", "[Experimental] Print colored differential output between expected & received output.").Default("false").Bool() testRulesDiff := testRulesCmd.Flag("diff", "[Experimental] Print colored differential output between expected & received output.").Default("false").Bool()
defaultDBPath := "data/" defaultDBPath := "data/"
@ -391,6 +393,7 @@ func main() {
}, },
*testRulesRun, *testRulesRun,
*testRulesDiff, *testRulesDiff,
*testRulesDebug,
*testRulesFiles...), *testRulesFiles...),
) )
@ -441,7 +444,7 @@ func checkExperimental(f bool) {
} }
} }
var errLint = fmt.Errorf("lint error") var errLint = errors.New("lint error")
type lintConfig struct { type lintConfig struct {
all bool all bool
@ -575,7 +578,7 @@ func checkFileExists(fn string) error {
func checkConfig(agentMode bool, filename string, checkSyntaxOnly bool) ([]string, error) { func checkConfig(agentMode bool, filename string, checkSyntaxOnly bool) ([]string, error) {
fmt.Println("Checking", filename) fmt.Println("Checking", filename)
cfg, err := config.LoadFile(filename, agentMode, false, promslog.NewNopLogger()) cfg, err := config.LoadFile(filename, agentMode, promslog.NewNopLogger())
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -895,30 +898,30 @@ func compare(a, b compareRuleType) int {
func checkDuplicates(groups []rulefmt.RuleGroup) []compareRuleType { func checkDuplicates(groups []rulefmt.RuleGroup) []compareRuleType {
var duplicates []compareRuleType var duplicates []compareRuleType
var rules compareRuleTypes var cRules compareRuleTypes
for _, group := range groups { for _, group := range groups {
for _, rule := range group.Rules { for _, rule := range group.Rules {
rules = append(rules, compareRuleType{ cRules = append(cRules, compareRuleType{
metric: ruleMetric(rule), metric: ruleMetric(rule),
label: labels.FromMap(rule.Labels), label: rules.FromMaps(group.Labels, rule.Labels),
}) })
} }
} }
if len(rules) < 2 { if len(cRules) < 2 {
return duplicates return duplicates
} }
sort.Sort(rules) sort.Sort(cRules)
last := rules[0] last := cRules[0]
for i := 1; i < len(rules); i++ { for i := 1; i < len(cRules); i++ {
if compare(last, rules[i]) == 0 { if compare(last, cRules[i]) == 0 {
// Don't add a duplicated rule multiple times. // Don't add a duplicated rule multiple times.
if len(duplicates) == 0 || compare(last, duplicates[len(duplicates)-1]) != 0 { if len(duplicates) == 0 || compare(last, duplicates[len(duplicates)-1]) != 0 {
duplicates = append(duplicates, rules[i]) duplicates = append(duplicates, cRules[i])
} }
} }
last = rules[i] last = cRules[i]
} }
return duplicates return duplicates

View file

@ -41,7 +41,7 @@ type sdCheckResult struct {
func CheckSD(sdConfigFiles, sdJobName string, sdTimeout time.Duration, registerer prometheus.Registerer) int { func CheckSD(sdConfigFiles, sdJobName string, sdTimeout time.Duration, registerer prometheus.Registerer) int {
logger := promslog.New(&promslog.Config{}) logger := promslog.New(&promslog.Config{})
cfg, err := config.LoadFile(sdConfigFiles, false, false, logger) cfg, err := config.LoadFile(sdConfigFiles, false, logger)
if err != nil { if err != nil {
fmt.Fprintln(os.Stderr, "Cannot load config", err) fmt.Fprintln(os.Stderr, "Cannot load config", err)
return failureExitCode return failureExitCode

View file

@ -6,7 +6,7 @@ scrape_configs:
alerting: alerting:
alertmanagers: alertmanagers:
- scheme: http - scheme: http
api_version: v1 api_version: v2
file_sd_configs: file_sd_configs:
- files: - files:
- nonexistent_file.yml - nonexistent_file.yml

View file

@ -405,7 +405,7 @@ func openBlock(path, blockID string) (*tsdb.DBReadOnly, tsdb.BlockReader, error)
} }
} }
b, err := db.Block(blockID) b, err := db.Block(blockID, tsdb.DefaultPostingsDecoderFactory)
if err != nil { if err != nil {
return nil, nil, err return nil, nil, err
} }
@ -662,7 +662,7 @@ func analyzeCompaction(ctx context.Context, block tsdb.BlockReader, indexr tsdb.
histogramChunkSize = append(histogramChunkSize, len(chk.Bytes())) histogramChunkSize = append(histogramChunkSize, len(chk.Bytes()))
fhchk, ok := chk.(*chunkenc.FloatHistogramChunk) fhchk, ok := chk.(*chunkenc.FloatHistogramChunk)
if !ok { if !ok {
return fmt.Errorf("chunk is not FloatHistogramChunk") return errors.New("chunk is not FloatHistogramChunk")
} }
it := fhchk.Iterator(nil) it := fhchk.Iterator(nil)
bucketCount := 0 bucketCount := 0
@ -677,7 +677,7 @@ func analyzeCompaction(ctx context.Context, block tsdb.BlockReader, indexr tsdb.
histogramChunkSize = append(histogramChunkSize, len(chk.Bytes())) histogramChunkSize = append(histogramChunkSize, len(chk.Bytes()))
hchk, ok := chk.(*chunkenc.HistogramChunk) hchk, ok := chk.(*chunkenc.HistogramChunk)
if !ok { if !ok {
return fmt.Errorf("chunk is not HistogramChunk") return errors.New("chunk is not HistogramChunk")
} }
it := hchk.Iterator(nil) it := hchk.Iterator(nil)
bucketCount := 0 bucketCount := 0

View file

@ -46,11 +46,11 @@ import (
// RulesUnitTest does unit testing of rules based on the unit testing files provided. // RulesUnitTest does unit testing of rules based on the unit testing files provided.
// More info about the file format can be found in the docs. // More info about the file format can be found in the docs.
func RulesUnitTest(queryOpts promqltest.LazyLoaderOpts, runStrings []string, diffFlag bool, files ...string) int { func RulesUnitTest(queryOpts promqltest.LazyLoaderOpts, runStrings []string, diffFlag, debug bool, files ...string) int {
return RulesUnitTestResult(io.Discard, queryOpts, runStrings, diffFlag, files...) return RulesUnitTestResult(io.Discard, queryOpts, runStrings, diffFlag, debug, files...)
} }
func RulesUnitTestResult(results io.Writer, queryOpts promqltest.LazyLoaderOpts, runStrings []string, diffFlag bool, files ...string) int { func RulesUnitTestResult(results io.Writer, queryOpts promqltest.LazyLoaderOpts, runStrings []string, diffFlag, debug bool, files ...string) int {
failed := false failed := false
junit := &junitxml.JUnitXML{} junit := &junitxml.JUnitXML{}
@ -60,7 +60,7 @@ func RulesUnitTestResult(results io.Writer, queryOpts promqltest.LazyLoaderOpts,
} }
for _, f := range files { for _, f := range files {
if errs := ruleUnitTest(f, queryOpts, run, diffFlag, junit.Suite(f)); errs != nil { if errs := ruleUnitTest(f, queryOpts, run, diffFlag, debug, junit.Suite(f)); errs != nil {
fmt.Fprintln(os.Stderr, " FAILED:") fmt.Fprintln(os.Stderr, " FAILED:")
for _, e := range errs { for _, e := range errs {
fmt.Fprintln(os.Stderr, e.Error()) fmt.Fprintln(os.Stderr, e.Error())
@ -82,7 +82,7 @@ func RulesUnitTestResult(results io.Writer, queryOpts promqltest.LazyLoaderOpts,
return successExitCode return successExitCode
} }
func ruleUnitTest(filename string, queryOpts promqltest.LazyLoaderOpts, run *regexp.Regexp, diffFlag bool, ts *junitxml.TestSuite) []error { func ruleUnitTest(filename string, queryOpts promqltest.LazyLoaderOpts, run *regexp.Regexp, diffFlag, debug bool, ts *junitxml.TestSuite) []error {
b, err := os.ReadFile(filename) b, err := os.ReadFile(filename)
if err != nil { if err != nil {
ts.Abort(err) ts.Abort(err)
@ -131,7 +131,7 @@ func ruleUnitTest(filename string, queryOpts promqltest.LazyLoaderOpts, run *reg
if t.Interval == 0 { if t.Interval == 0 {
t.Interval = unitTestInp.EvaluationInterval t.Interval = unitTestInp.EvaluationInterval
} }
ers := t.test(evalInterval, groupOrderMap, queryOpts, diffFlag, unitTestInp.RuleFiles...) ers := t.test(testname, evalInterval, groupOrderMap, queryOpts, diffFlag, debug, unitTestInp.RuleFiles...)
if ers != nil { if ers != nil {
for _, e := range ers { for _, e := range ers {
tc.Fail(e.Error()) tc.Fail(e.Error())
@ -198,7 +198,14 @@ type testGroup struct {
} }
// test performs the unit tests. // test performs the unit tests.
func (tg *testGroup) test(evalInterval time.Duration, groupOrderMap map[string]int, queryOpts promqltest.LazyLoaderOpts, diffFlag bool, ruleFiles ...string) (outErr []error) { func (tg *testGroup) test(testname string, evalInterval time.Duration, groupOrderMap map[string]int, queryOpts promqltest.LazyLoaderOpts, diffFlag, debug bool, ruleFiles ...string) (outErr []error) {
if debug {
testStart := time.Now()
fmt.Printf("DEBUG: Starting test %s\n", testname)
defer func() {
fmt.Printf("DEBUG: Test %s finished, took %v\n", testname, time.Since(testStart))
}()
}
// Setup testing suite. // Setup testing suite.
suite, err := promqltest.NewLazyLoader(tg.seriesLoadingString(), queryOpts) suite, err := promqltest.NewLazyLoader(tg.seriesLoadingString(), queryOpts)
if err != nil { if err != nil {
@ -482,6 +489,32 @@ Outer:
} }
} }
if debug {
ts := tg.maxEvalTime()
// Potentially a test can be specified at a time with fractional seconds,
// which PromQL cannot represent, so round up to the next whole second.
ts = (ts + time.Second).Truncate(time.Second)
expr := fmt.Sprintf(`{__name__=~".+"}[%v]`, ts)
q, err := suite.QueryEngine().NewInstantQuery(context.Background(), suite.Queryable(), nil, expr, mint.Add(ts))
if err != nil {
fmt.Printf("DEBUG: Failed querying, expr: %q, err: %v\n", expr, err)
return errs
}
res := q.Exec(suite.Context())
if res.Err != nil {
fmt.Printf("DEBUG: Failed query exec, expr: %q, err: %v\n", expr, res.Err)
return errs
}
switch v := res.Value.(type) {
case promql.Matrix:
fmt.Printf("DEBUG: Dump of all data (input_series and rules) at %v:\n", ts)
fmt.Println(v.String())
default:
fmt.Printf("DEBUG: Got unexpected type %T\n", v)
return errs
}
}
if len(errs) > 0 { if len(errs) > 0 {
return errs return errs
} }

View file

@ -141,14 +141,14 @@ func TestRulesUnitTest(t *testing.T) {
reuseCount[tt.want] += len(tt.args.files) reuseCount[tt.want] += len(tt.args.files)
} }
t.Run(tt.name, func(t *testing.T) { t.Run(tt.name, func(t *testing.T) {
if got := RulesUnitTest(tt.queryOpts, nil, false, tt.args.files...); got != tt.want { if got := RulesUnitTest(tt.queryOpts, nil, false, false, tt.args.files...); got != tt.want {
t.Errorf("RulesUnitTest() = %v, want %v", got, tt.want) t.Errorf("RulesUnitTest() = %v, want %v", got, tt.want)
} }
}) })
} }
t.Run("Junit xml output ", func(t *testing.T) { t.Run("Junit xml output ", func(t *testing.T) {
var buf bytes.Buffer var buf bytes.Buffer
if got := RulesUnitTestResult(&buf, promqltest.LazyLoaderOpts{}, nil, false, reuseFiles...); got != 1 { if got := RulesUnitTestResult(&buf, promqltest.LazyLoaderOpts{}, nil, false, false, reuseFiles...); got != 1 {
t.Errorf("RulesUnitTestResults() = %v, want 1", got) t.Errorf("RulesUnitTestResults() = %v, want 1", got)
} }
var test junitxml.JUnitXML var test junitxml.JUnitXML
@ -230,7 +230,7 @@ func TestRulesUnitTestRun(t *testing.T) {
} }
for _, tt := range tests { for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) { t.Run(tt.name, func(t *testing.T) {
got := RulesUnitTest(tt.queryOpts, tt.args.run, false, tt.args.files...) got := RulesUnitTest(tt.queryOpts, tt.args.run, false, false, tt.args.files...)
require.Equal(t, tt.want, got) require.Equal(t, tt.want, got)
}) })
} }

View file

@ -17,6 +17,7 @@ import (
"errors" "errors"
"fmt" "fmt"
"log/slog" "log/slog"
"mime"
"net/url" "net/url"
"os" "os"
"path/filepath" "path/filepath"
@ -72,7 +73,7 @@ const (
) )
// Load parses the YAML input s into a Config. // Load parses the YAML input s into a Config.
func Load(s string, expandExternalLabels bool, logger *slog.Logger) (*Config, error) { func Load(s string, logger *slog.Logger) (*Config, error) {
cfg := &Config{} cfg := &Config{}
// If the entire config body is empty the UnmarshalYAML method is // If the entire config body is empty the UnmarshalYAML method is
// never called. We thus have to set the DefaultConfig at the entry // never called. We thus have to set the DefaultConfig at the entry
@ -84,10 +85,6 @@ func Load(s string, expandExternalLabels bool, logger *slog.Logger) (*Config, er
return nil, err return nil, err
} }
if !expandExternalLabels {
return cfg, nil
}
b := labels.NewScratchBuilder(0) b := labels.NewScratchBuilder(0)
cfg.GlobalConfig.ExternalLabels.Range(func(v labels.Label) { cfg.GlobalConfig.ExternalLabels.Range(func(v labels.Label) {
newV := os.Expand(v.Value, func(s string) string { newV := os.Expand(v.Value, func(s string) string {
@ -106,17 +103,31 @@ func Load(s string, expandExternalLabels bool, logger *slog.Logger) (*Config, er
// Note newV can be blank. https://github.com/prometheus/prometheus/issues/11024 // Note newV can be blank. https://github.com/prometheus/prometheus/issues/11024
b.Add(v.Name, newV) b.Add(v.Name, newV)
}) })
cfg.GlobalConfig.ExternalLabels = b.Labels() if !b.Labels().IsEmpty() {
cfg.GlobalConfig.ExternalLabels = b.Labels()
}
switch cfg.OTLPConfig.TranslationStrategy {
case UnderscoreEscapingWithSuffixes:
case "":
case NoUTF8EscapingWithSuffixes:
if cfg.GlobalConfig.MetricNameValidationScheme == LegacyValidationConfig {
return nil, errors.New("OTLP translation strategy NoUTF8EscapingWithSuffixes is not allowed when UTF8 is disabled")
}
default:
return nil, fmt.Errorf("unsupported OTLP translation strategy %q", cfg.OTLPConfig.TranslationStrategy)
}
return cfg, nil return cfg, nil
} }
// LoadFile parses the given YAML file into a Config. // LoadFile parses the given YAML file into a Config.
func LoadFile(filename string, agentMode, expandExternalLabels bool, logger *slog.Logger) (*Config, error) { func LoadFile(filename string, agentMode bool, logger *slog.Logger) (*Config, error) {
content, err := os.ReadFile(filename) content, err := os.ReadFile(filename)
if err != nil { if err != nil {
return nil, err return nil, err
} }
cfg, err := Load(string(content), expandExternalLabels, logger) cfg, err := Load(string(content), logger)
if err != nil { if err != nil {
return nil, fmt.Errorf("parsing YAML file %s: %w", filename, err) return nil, fmt.Errorf("parsing YAML file %s: %w", filename, err)
} }
@ -165,13 +176,13 @@ var (
// DefaultScrapeConfig is the default scrape configuration. // DefaultScrapeConfig is the default scrape configuration.
DefaultScrapeConfig = ScrapeConfig{ DefaultScrapeConfig = ScrapeConfig{
// ScrapeTimeout, ScrapeInterval and ScrapeProtocols default to the configured globals. // ScrapeTimeout, ScrapeInterval and ScrapeProtocols default to the configured globals.
ScrapeClassicHistograms: false, AlwaysScrapeClassicHistograms: false,
MetricsPath: "/metrics", MetricsPath: "/metrics",
Scheme: "http", Scheme: "http",
HonorLabels: false, HonorLabels: false,
HonorTimestamps: true, HonorTimestamps: true,
HTTPClientConfig: config.DefaultHTTPClientConfig, HTTPClientConfig: config.DefaultHTTPClientConfig,
EnableCompression: true, EnableCompression: true,
} }
// DefaultAlertmanagerConfig is the default alertmanager configuration. // DefaultAlertmanagerConfig is the default alertmanager configuration.
@ -182,13 +193,18 @@ var (
HTTPClientConfig: config.DefaultHTTPClientConfig, HTTPClientConfig: config.DefaultHTTPClientConfig,
} }
DefaultRemoteWriteHTTPClientConfig = config.HTTPClientConfig{
FollowRedirects: true,
EnableHTTP2: false,
}
// DefaultRemoteWriteConfig is the default remote write configuration. // DefaultRemoteWriteConfig is the default remote write configuration.
DefaultRemoteWriteConfig = RemoteWriteConfig{ DefaultRemoteWriteConfig = RemoteWriteConfig{
RemoteTimeout: model.Duration(30 * time.Second), RemoteTimeout: model.Duration(30 * time.Second),
ProtobufMessage: RemoteWriteProtoMsgV1, ProtobufMessage: RemoteWriteProtoMsgV1,
QueueConfig: DefaultQueueConfig, QueueConfig: DefaultQueueConfig,
MetadataConfig: DefaultMetadataConfig, MetadataConfig: DefaultMetadataConfig,
HTTPClientConfig: config.DefaultHTTPClientConfig, HTTPClientConfig: DefaultRemoteWriteHTTPClientConfig,
} }
// DefaultQueueConfig is the default remote queue configuration. // DefaultQueueConfig is the default remote queue configuration.
@ -235,7 +251,9 @@ var (
} }
// DefaultOTLPConfig is the default OTLP configuration. // DefaultOTLPConfig is the default OTLP configuration.
DefaultOTLPConfig = OTLPConfig{} DefaultOTLPConfig = OTLPConfig{
TranslationStrategy: UnderscoreEscapingWithSuffixes,
}
) )
// Config is the top-level configuration for Prometheus's config files. // Config is the top-level configuration for Prometheus's config files.
@ -475,9 +493,22 @@ func (s ScrapeProtocol) Validate() error {
return nil return nil
} }
// HeaderMediaType returns the MIME mediaType for a particular ScrapeProtocol.
func (s ScrapeProtocol) HeaderMediaType() string {
if _, ok := ScrapeProtocolsHeaders[s]; !ok {
return ""
}
mediaType, _, err := mime.ParseMediaType(ScrapeProtocolsHeaders[s])
if err != nil {
return ""
}
return mediaType
}
var ( var (
PrometheusProto ScrapeProtocol = "PrometheusProto" PrometheusProto ScrapeProtocol = "PrometheusProto"
PrometheusText0_0_4 ScrapeProtocol = "PrometheusText0.0.4" PrometheusText0_0_4 ScrapeProtocol = "PrometheusText0.0.4"
PrometheusText1_0_0 ScrapeProtocol = "PrometheusText1.0.0"
OpenMetricsText0_0_1 ScrapeProtocol = "OpenMetricsText0.0.1" OpenMetricsText0_0_1 ScrapeProtocol = "OpenMetricsText0.0.1"
OpenMetricsText1_0_0 ScrapeProtocol = "OpenMetricsText1.0.0" OpenMetricsText1_0_0 ScrapeProtocol = "OpenMetricsText1.0.0"
UTF8NamesHeader string = model.EscapingKey + "=" + model.AllowUTF8 UTF8NamesHeader string = model.EscapingKey + "=" + model.AllowUTF8
@ -485,6 +516,7 @@ var (
ScrapeProtocolsHeaders = map[ScrapeProtocol]string{ ScrapeProtocolsHeaders = map[ScrapeProtocol]string{
PrometheusProto: "application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=delimited", PrometheusProto: "application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=delimited",
PrometheusText0_0_4: "text/plain;version=0.0.4", PrometheusText0_0_4: "text/plain;version=0.0.4",
PrometheusText1_0_0: "text/plain;version=1.0.0;escaping=allow-utf-8",
OpenMetricsText0_0_1: "application/openmetrics-text;version=0.0.1", OpenMetricsText0_0_1: "application/openmetrics-text;version=0.0.1",
OpenMetricsText1_0_0: "application/openmetrics-text;version=1.0.0", OpenMetricsText1_0_0: "application/openmetrics-text;version=1.0.0",
} }
@ -494,6 +526,7 @@ var (
DefaultScrapeProtocols = []ScrapeProtocol{ DefaultScrapeProtocols = []ScrapeProtocol{
OpenMetricsText1_0_0, OpenMetricsText1_0_0,
OpenMetricsText0_0_1, OpenMetricsText0_0_1,
PrometheusText1_0_0,
PrometheusText0_0_4, PrometheusText0_0_4,
} }
@ -505,6 +538,7 @@ var (
PrometheusProto, PrometheusProto,
OpenMetricsText1_0_0, OpenMetricsText1_0_0,
OpenMetricsText0_0_1, OpenMetricsText0_0_1,
PrometheusText1_0_0,
PrometheusText0_0_4, PrometheusText0_0_4,
} }
) )
@ -631,10 +665,17 @@ type ScrapeConfig struct {
// The protocols to negotiate during a scrape. It tells clients what // The protocols to negotiate during a scrape. It tells clients what
// protocol are accepted by Prometheus and with what preference (most wanted is first). // protocol are accepted by Prometheus and with what preference (most wanted is first).
// Supported values (case sensitive): PrometheusProto, OpenMetricsText0.0.1, // Supported values (case sensitive): PrometheusProto, OpenMetricsText0.0.1,
// OpenMetricsText1.0.0, PrometheusText0.0.4. // OpenMetricsText1.0.0, PrometheusText1.0.0, PrometheusText0.0.4.
ScrapeProtocols []ScrapeProtocol `yaml:"scrape_protocols,omitempty"` ScrapeProtocols []ScrapeProtocol `yaml:"scrape_protocols,omitempty"`
// Whether to scrape a classic histogram that is also exposed as a native histogram. // The fallback protocol to use if the Content-Type provided by the target
ScrapeClassicHistograms bool `yaml:"scrape_classic_histograms,omitempty"` // is not provided, blank, or not one of the expected values.
// Supported values (case sensitive): PrometheusProto, OpenMetricsText0.0.1,
// OpenMetricsText1.0.0, PrometheusText1.0.0, PrometheusText0.0.4.
ScrapeFallbackProtocol ScrapeProtocol `yaml:"fallback_scrape_protocol,omitempty"`
// Whether to scrape a classic histogram, even if it is also exposed as a native histogram.
AlwaysScrapeClassicHistograms bool `yaml:"always_scrape_classic_histograms,omitempty"`
// Whether to convert all scraped classic histograms into a native histogram with custom buckets.
ConvertClassicHistogramsToNHCB bool `yaml:"convert_classic_histograms_to_nhcb,omitempty"`
// File to which scrape failures are logged. // File to which scrape failures are logged.
ScrapeFailureLogFile string `yaml:"scrape_failure_log_file,omitempty"` ScrapeFailureLogFile string `yaml:"scrape_failure_log_file,omitempty"`
// The HTTP resource path on which to fetch metrics from targets. // The HTTP resource path on which to fetch metrics from targets.
@ -782,6 +823,12 @@ func (c *ScrapeConfig) Validate(globalConfig GlobalConfig) error {
return fmt.Errorf("%w for scrape config with job name %q", err, c.JobName) return fmt.Errorf("%w for scrape config with job name %q", err, c.JobName)
} }
if c.ScrapeFallbackProtocol != "" {
if err := c.ScrapeFallbackProtocol.Validate(); err != nil {
return fmt.Errorf("invalid fallback_scrape_protocol for scrape config with job name %q: %w", c.JobName, err)
}
}
switch globalConfig.MetricNameValidationScheme { switch globalConfig.MetricNameValidationScheme {
case LegacyValidationConfig: case LegacyValidationConfig:
case "", UTF8ValidationConfig: case "", UTF8ValidationConfig:
@ -957,6 +1004,7 @@ func (a AlertmanagerConfigs) ToMap() map[string]*AlertmanagerConfig {
// AlertmanagerAPIVersion represents a version of the // AlertmanagerAPIVersion represents a version of the
// github.com/prometheus/alertmanager/api, e.g. 'v1' or 'v2'. // github.com/prometheus/alertmanager/api, e.g. 'v1' or 'v2'.
// 'v1' is no longer supported.
type AlertmanagerAPIVersion string type AlertmanagerAPIVersion string
// UnmarshalYAML implements the yaml.Unmarshaler interface. // UnmarshalYAML implements the yaml.Unmarshaler interface.
@ -986,7 +1034,7 @@ const (
) )
var SupportedAlertmanagerAPIVersions = []AlertmanagerAPIVersion{ var SupportedAlertmanagerAPIVersions = []AlertmanagerAPIVersion{
AlertmanagerAPIVersionV1, AlertmanagerAPIVersionV2, AlertmanagerAPIVersionV2,
} }
// AlertmanagerConfig configures how Alertmanagers can be discovered and communicated with. // AlertmanagerConfig configures how Alertmanagers can be discovered and communicated with.
@ -1038,7 +1086,7 @@ func (c *AlertmanagerConfig) UnmarshalYAML(unmarshal func(interface{}) error) er
c.HTTPClientConfig.Authorization != nil || c.HTTPClientConfig.OAuth2 != nil c.HTTPClientConfig.Authorization != nil || c.HTTPClientConfig.OAuth2 != nil
if httpClientConfigAuthEnabled && c.SigV4Config != nil { if httpClientConfigAuthEnabled && c.SigV4Config != nil {
return fmt.Errorf("at most one of basic_auth, authorization, oauth2, & sigv4 must be configured") return errors.New("at most one of basic_auth, authorization, oauth2, & sigv4 must be configured")
} }
// Check for users putting URLs in target groups. // Check for users putting URLs in target groups.
@ -1368,9 +1416,20 @@ func getGoGCEnv() int {
return DefaultRuntimeConfig.GoGC return DefaultRuntimeConfig.GoGC
} }
type translationStrategyOption string
var (
// NoUTF8EscapingWithSuffixes will keep UTF-8 characters as they are, units and type suffixes will still be added.
NoUTF8EscapingWithSuffixes translationStrategyOption = "NoUTF8EscapingWithSuffixes"
// UnderscoreEscapingWithSuffixes is the default option for translating OTLP to Prometheus.
// This option will translate all UTF-8 characters to underscores, while adding units and type suffixes.
UnderscoreEscapingWithSuffixes translationStrategyOption = "UnderscoreEscapingWithSuffixes"
)
// OTLPConfig is the configuration for writing to the OTLP endpoint. // OTLPConfig is the configuration for writing to the OTLP endpoint.
type OTLPConfig struct { type OTLPConfig struct {
PromoteResourceAttributes []string `yaml:"promote_resource_attributes,omitempty"` PromoteResourceAttributes []string `yaml:"promote_resource_attributes,omitempty"`
TranslationStrategy translationStrategyOption `yaml:"translation_strategy,omitempty"`
} }
// UnmarshalYAML implements the yaml.Unmarshaler interface. // UnmarshalYAML implements the yaml.Unmarshaler interface.
@ -1386,7 +1445,7 @@ func (c *OTLPConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
for i, attr := range c.PromoteResourceAttributes { for i, attr := range c.PromoteResourceAttributes {
attr = strings.TrimSpace(attr) attr = strings.TrimSpace(attr)
if attr == "" { if attr == "" {
err = errors.Join(err, fmt.Errorf("empty promoted OTel resource attribute")) err = errors.Join(err, errors.New("empty promoted OTel resource attribute"))
continue continue
} }
if _, exists := seen[attr]; exists { if _, exists := seen[attr]; exists {

View file

@ -142,7 +142,7 @@ var expectedConf = &Config{
}, },
}, },
FollowRedirects: true, FollowRedirects: true,
EnableHTTP2: true, EnableHTTP2: false,
}, },
}, },
{ {
@ -158,7 +158,7 @@ var expectedConf = &Config{
KeyFile: filepath.FromSlash("testdata/valid_key_file"), KeyFile: filepath.FromSlash("testdata/valid_key_file"),
}, },
FollowRedirects: true, FollowRedirects: true,
EnableHTTP2: true, EnableHTTP2: false,
}, },
Headers: map[string]string{"name": "value"}, Headers: map[string]string{"name": "value"},
}, },
@ -168,6 +168,7 @@ var expectedConf = &Config{
PromoteResourceAttributes: []string{ PromoteResourceAttributes: []string{
"k8s.cluster.name", "k8s.job.name", "k8s.namespace.name", "k8s.cluster.name", "k8s.job.name", "k8s.namespace.name",
}, },
TranslationStrategy: UnderscoreEscapingWithSuffixes,
}, },
RemoteReadConfigs: []*RemoteReadConfig{ RemoteReadConfigs: []*RemoteReadConfig{
@ -206,19 +207,20 @@ var expectedConf = &Config{
{ {
JobName: "prometheus", JobName: "prometheus",
HonorLabels: true, HonorLabels: true,
HonorTimestamps: true, HonorTimestamps: true,
ScrapeInterval: model.Duration(15 * time.Second), ScrapeInterval: model.Duration(15 * time.Second),
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
EnableCompression: true, EnableCompression: true,
BodySizeLimit: globBodySizeLimit, BodySizeLimit: globBodySizeLimit,
SampleLimit: globSampleLimit, SampleLimit: globSampleLimit,
TargetLimit: globTargetLimit, TargetLimit: globTargetLimit,
LabelLimit: globLabelLimit, LabelLimit: globLabelLimit,
LabelNameLengthLimit: globLabelNameLengthLimit, LabelNameLengthLimit: globLabelNameLengthLimit,
LabelValueLengthLimit: globLabelValueLengthLimit, LabelValueLengthLimit: globLabelValueLengthLimit,
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols, ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
ScrapeFailureLogFile: "testdata/fail_prom.log", ScrapeFallbackProtocol: PrometheusText0_0_4,
ScrapeFailureLogFile: "testdata/fail_prom.log",
MetricsPath: DefaultScrapeConfig.MetricsPath, MetricsPath: DefaultScrapeConfig.MetricsPath,
Scheme: DefaultScrapeConfig.Scheme, Scheme: DefaultScrapeConfig.Scheme,
@ -1500,8 +1502,13 @@ var expectedConf = &Config{
}, },
} }
func TestYAMLNotLongerSupportedAMApi(t *testing.T) {
_, err := LoadFile("testdata/config_with_no_longer_supported_am_api_config.yml", false, promslog.NewNopLogger())
require.Error(t, err)
}
func TestYAMLRoundtrip(t *testing.T) { func TestYAMLRoundtrip(t *testing.T) {
want, err := LoadFile("testdata/roundtrip.good.yml", false, false, promslog.NewNopLogger()) want, err := LoadFile("testdata/roundtrip.good.yml", false, promslog.NewNopLogger())
require.NoError(t, err) require.NoError(t, err)
out, err := yaml.Marshal(want) out, err := yaml.Marshal(want)
@ -1514,7 +1521,7 @@ func TestYAMLRoundtrip(t *testing.T) {
} }
func TestRemoteWriteRetryOnRateLimit(t *testing.T) { func TestRemoteWriteRetryOnRateLimit(t *testing.T) {
want, err := LoadFile("testdata/remote_write_retry_on_rate_limit.good.yml", false, false, promslog.NewNopLogger()) want, err := LoadFile("testdata/remote_write_retry_on_rate_limit.good.yml", false, promslog.NewNopLogger())
require.NoError(t, err) require.NoError(t, err)
out, err := yaml.Marshal(want) out, err := yaml.Marshal(want)
@ -1529,7 +1536,7 @@ func TestRemoteWriteRetryOnRateLimit(t *testing.T) {
func TestOTLPSanitizeResourceAttributes(t *testing.T) { func TestOTLPSanitizeResourceAttributes(t *testing.T) {
t.Run("good config", func(t *testing.T) { t.Run("good config", func(t *testing.T) {
want, err := LoadFile(filepath.Join("testdata", "otlp_sanitize_resource_attributes.good.yml"), false, false, promslog.NewNopLogger()) want, err := LoadFile(filepath.Join("testdata", "otlp_sanitize_resource_attributes.good.yml"), false, promslog.NewNopLogger())
require.NoError(t, err) require.NoError(t, err)
out, err := yaml.Marshal(want) out, err := yaml.Marshal(want)
@ -1541,25 +1548,87 @@ func TestOTLPSanitizeResourceAttributes(t *testing.T) {
}) })
t.Run("bad config", func(t *testing.T) { t.Run("bad config", func(t *testing.T) {
_, err := LoadFile(filepath.Join("testdata", "otlp_sanitize_resource_attributes.bad.yml"), false, false, promslog.NewNopLogger()) _, err := LoadFile(filepath.Join("testdata", "otlp_sanitize_resource_attributes.bad.yml"), false, promslog.NewNopLogger())
require.ErrorContains(t, err, `duplicated promoted OTel resource attribute "k8s.job.name"`) require.ErrorContains(t, err, `duplicated promoted OTel resource attribute "k8s.job.name"`)
require.ErrorContains(t, err, `empty promoted OTel resource attribute`) require.ErrorContains(t, err, `empty promoted OTel resource attribute`)
}) })
} }
func TestOTLPAllowUTF8(t *testing.T) {
t.Run("good config", func(t *testing.T) {
fpath := filepath.Join("testdata", "otlp_allow_utf8.good.yml")
verify := func(t *testing.T, conf *Config, err error) {
t.Helper()
require.NoError(t, err)
require.Equal(t, NoUTF8EscapingWithSuffixes, conf.OTLPConfig.TranslationStrategy)
}
t.Run("LoadFile", func(t *testing.T) {
conf, err := LoadFile(fpath, false, promslog.NewNopLogger())
verify(t, conf, err)
})
t.Run("Load", func(t *testing.T) {
content, err := os.ReadFile(fpath)
require.NoError(t, err)
conf, err := Load(string(content), promslog.NewNopLogger())
verify(t, conf, err)
})
})
t.Run("incompatible config", func(t *testing.T) {
fpath := filepath.Join("testdata", "otlp_allow_utf8.incompatible.yml")
verify := func(t *testing.T, err error) {
t.Helper()
require.ErrorContains(t, err, `OTLP translation strategy NoUTF8EscapingWithSuffixes is not allowed when UTF8 is disabled`)
}
t.Run("LoadFile", func(t *testing.T) {
_, err := LoadFile(fpath, false, promslog.NewNopLogger())
verify(t, err)
})
t.Run("Load", func(t *testing.T) {
content, err := os.ReadFile(fpath)
require.NoError(t, err)
_, err = Load(string(content), promslog.NewNopLogger())
t.Log("err", err)
verify(t, err)
})
})
t.Run("bad config", func(t *testing.T) {
fpath := filepath.Join("testdata", "otlp_allow_utf8.bad.yml")
verify := func(t *testing.T, err error) {
t.Helper()
require.ErrorContains(t, err, `unsupported OTLP translation strategy "Invalid"`)
}
t.Run("LoadFile", func(t *testing.T) {
_, err := LoadFile(fpath, false, promslog.NewNopLogger())
verify(t, err)
})
t.Run("Load", func(t *testing.T) {
content, err := os.ReadFile(fpath)
require.NoError(t, err)
_, err = Load(string(content), promslog.NewNopLogger())
verify(t, err)
})
})
}
func TestLoadConfig(t *testing.T) { func TestLoadConfig(t *testing.T) {
// Parse a valid file that sets a global scrape timeout. This tests whether parsing // Parse a valid file that sets a global scrape timeout. This tests whether parsing
// an overwritten default field in the global config permanently changes the default. // an overwritten default field in the global config permanently changes the default.
_, err := LoadFile("testdata/global_timeout.good.yml", false, false, promslog.NewNopLogger()) _, err := LoadFile("testdata/global_timeout.good.yml", false, promslog.NewNopLogger())
require.NoError(t, err) require.NoError(t, err)
c, err := LoadFile("testdata/conf.good.yml", false, false, promslog.NewNopLogger()) c, err := LoadFile("testdata/conf.good.yml", false, promslog.NewNopLogger())
require.NoError(t, err) require.NoError(t, err)
require.Equal(t, expectedConf, c) require.Equal(t, expectedConf, c)
} }
func TestScrapeIntervalLarger(t *testing.T) { func TestScrapeIntervalLarger(t *testing.T) {
c, err := LoadFile("testdata/scrape_interval_larger.good.yml", false, false, promslog.NewNopLogger()) c, err := LoadFile("testdata/scrape_interval_larger.good.yml", false, promslog.NewNopLogger())
require.NoError(t, err) require.NoError(t, err)
require.Len(t, c.ScrapeConfigs, 1) require.Len(t, c.ScrapeConfigs, 1)
for _, sc := range c.ScrapeConfigs { for _, sc := range c.ScrapeConfigs {
@ -1569,7 +1638,7 @@ func TestScrapeIntervalLarger(t *testing.T) {
// YAML marshaling must not reveal authentication credentials. // YAML marshaling must not reveal authentication credentials.
func TestElideSecrets(t *testing.T) { func TestElideSecrets(t *testing.T) {
c, err := LoadFile("testdata/conf.good.yml", false, false, promslog.NewNopLogger()) c, err := LoadFile("testdata/conf.good.yml", false, promslog.NewNopLogger())
require.NoError(t, err) require.NoError(t, err)
secretRe := regexp.MustCompile(`\\u003csecret\\u003e|<secret>`) secretRe := regexp.MustCompile(`\\u003csecret\\u003e|<secret>`)
@ -1586,31 +1655,31 @@ func TestElideSecrets(t *testing.T) {
func TestLoadConfigRuleFilesAbsolutePath(t *testing.T) { func TestLoadConfigRuleFilesAbsolutePath(t *testing.T) {
// Parse a valid file that sets a rule files with an absolute path // Parse a valid file that sets a rule files with an absolute path
c, err := LoadFile(ruleFilesConfigFile, false, false, promslog.NewNopLogger()) c, err := LoadFile(ruleFilesConfigFile, false, promslog.NewNopLogger())
require.NoError(t, err) require.NoError(t, err)
require.Equal(t, ruleFilesExpectedConf, c) require.Equal(t, ruleFilesExpectedConf, c)
} }
func TestKubernetesEmptyAPIServer(t *testing.T) { func TestKubernetesEmptyAPIServer(t *testing.T) {
_, err := LoadFile("testdata/kubernetes_empty_apiserver.good.yml", false, false, promslog.NewNopLogger()) _, err := LoadFile("testdata/kubernetes_empty_apiserver.good.yml", false, promslog.NewNopLogger())
require.NoError(t, err) require.NoError(t, err)
} }
func TestKubernetesWithKubeConfig(t *testing.T) { func TestKubernetesWithKubeConfig(t *testing.T) {
_, err := LoadFile("testdata/kubernetes_kubeconfig_without_apiserver.good.yml", false, false, promslog.NewNopLogger()) _, err := LoadFile("testdata/kubernetes_kubeconfig_without_apiserver.good.yml", false, promslog.NewNopLogger())
require.NoError(t, err) require.NoError(t, err)
} }
func TestKubernetesSelectors(t *testing.T) { func TestKubernetesSelectors(t *testing.T) {
_, err := LoadFile("testdata/kubernetes_selectors_endpoints.good.yml", false, false, promslog.NewNopLogger()) _, err := LoadFile("testdata/kubernetes_selectors_endpoints.good.yml", false, promslog.NewNopLogger())
require.NoError(t, err) require.NoError(t, err)
_, err = LoadFile("testdata/kubernetes_selectors_node.good.yml", false, false, promslog.NewNopLogger()) _, err = LoadFile("testdata/kubernetes_selectors_node.good.yml", false, promslog.NewNopLogger())
require.NoError(t, err) require.NoError(t, err)
_, err = LoadFile("testdata/kubernetes_selectors_ingress.good.yml", false, false, promslog.NewNopLogger()) _, err = LoadFile("testdata/kubernetes_selectors_ingress.good.yml", false, promslog.NewNopLogger())
require.NoError(t, err) require.NoError(t, err)
_, err = LoadFile("testdata/kubernetes_selectors_pod.good.yml", false, false, promslog.NewNopLogger()) _, err = LoadFile("testdata/kubernetes_selectors_pod.good.yml", false, promslog.NewNopLogger())
require.NoError(t, err) require.NoError(t, err)
_, err = LoadFile("testdata/kubernetes_selectors_service.good.yml", false, false, promslog.NewNopLogger()) _, err = LoadFile("testdata/kubernetes_selectors_service.good.yml", false, promslog.NewNopLogger())
require.NoError(t, err) require.NoError(t, err)
} }
@ -2080,12 +2149,20 @@ var expectedErrors = []struct {
}, },
{ {
filename: "scrape_config_files_scrape_protocols.bad.yml", filename: "scrape_config_files_scrape_protocols.bad.yml",
errMsg: `parsing YAML file testdata/scrape_config_files_scrape_protocols.bad.yml: scrape_protocols: unknown scrape protocol prometheusproto, supported: [OpenMetricsText0.0.1 OpenMetricsText1.0.0 PrometheusProto PrometheusText0.0.4] for scrape config with job name "node"`, errMsg: `parsing YAML file testdata/scrape_config_files_scrape_protocols.bad.yml: scrape_protocols: unknown scrape protocol prometheusproto, supported: [OpenMetricsText0.0.1 OpenMetricsText1.0.0 PrometheusProto PrometheusText0.0.4 PrometheusText1.0.0] for scrape config with job name "node"`,
}, },
{ {
filename: "scrape_config_files_scrape_protocols2.bad.yml", filename: "scrape_config_files_scrape_protocols2.bad.yml",
errMsg: `parsing YAML file testdata/scrape_config_files_scrape_protocols2.bad.yml: duplicated protocol in scrape_protocols, got [OpenMetricsText1.0.0 PrometheusProto OpenMetricsText1.0.0] for scrape config with job name "node"`, errMsg: `parsing YAML file testdata/scrape_config_files_scrape_protocols2.bad.yml: duplicated protocol in scrape_protocols, got [OpenMetricsText1.0.0 PrometheusProto OpenMetricsText1.0.0] for scrape config with job name "node"`,
}, },
{
filename: "scrape_config_files_fallback_scrape_protocol1.bad.yml",
errMsg: `parsing YAML file testdata/scrape_config_files_fallback_scrape_protocol1.bad.yml: invalid fallback_scrape_protocol for scrape config with job name "node": unknown scrape protocol prometheusproto, supported: [OpenMetricsText0.0.1 OpenMetricsText1.0.0 PrometheusProto PrometheusText0.0.4 PrometheusText1.0.0]`,
},
{
filename: "scrape_config_files_fallback_scrape_protocol2.bad.yml",
errMsg: `unmarshal errors`,
},
} }
func TestBadConfigs(t *testing.T) { func TestBadConfigs(t *testing.T) {
@ -2094,7 +2171,7 @@ func TestBadConfigs(t *testing.T) {
model.NameValidationScheme = model.UTF8Validation model.NameValidationScheme = model.UTF8Validation
}() }()
for _, ee := range expectedErrors { for _, ee := range expectedErrors {
_, err := LoadFile("testdata/"+ee.filename, false, false, promslog.NewNopLogger()) _, err := LoadFile("testdata/"+ee.filename, false, promslog.NewNopLogger())
require.ErrorContains(t, err, ee.errMsg, require.ErrorContains(t, err, ee.errMsg,
"Expected error for %s to contain %q but got: %s", ee.filename, ee.errMsg, err) "Expected error for %s to contain %q but got: %s", ee.filename, ee.errMsg, err)
} }
@ -2125,7 +2202,7 @@ func TestBadStaticConfigsYML(t *testing.T) {
} }
func TestEmptyConfig(t *testing.T) { func TestEmptyConfig(t *testing.T) {
c, err := Load("", false, promslog.NewNopLogger()) c, err := Load("", promslog.NewNopLogger())
require.NoError(t, err) require.NoError(t, err)
exp := DefaultConfig exp := DefaultConfig
require.Equal(t, exp, *c) require.Equal(t, exp, *c)
@ -2135,38 +2212,34 @@ func TestExpandExternalLabels(t *testing.T) {
// Cleanup ant TEST env variable that could exist on the system. // Cleanup ant TEST env variable that could exist on the system.
os.Setenv("TEST", "") os.Setenv("TEST", "")
c, err := LoadFile("testdata/external_labels.good.yml", false, false, promslog.NewNopLogger()) c, err := LoadFile("testdata/external_labels.good.yml", false, promslog.NewNopLogger())
require.NoError(t, err)
testutil.RequireEqual(t, labels.FromStrings("bar", "foo", "baz", "foo${TEST}bar", "foo", "${TEST}", "qux", "foo$${TEST}", "xyz", "foo$$bar"), c.GlobalConfig.ExternalLabels)
c, err = LoadFile("testdata/external_labels.good.yml", false, true, promslog.NewNopLogger())
require.NoError(t, err) require.NoError(t, err)
testutil.RequireEqual(t, labels.FromStrings("bar", "foo", "baz", "foobar", "foo", "", "qux", "foo${TEST}", "xyz", "foo$bar"), c.GlobalConfig.ExternalLabels) testutil.RequireEqual(t, labels.FromStrings("bar", "foo", "baz", "foobar", "foo", "", "qux", "foo${TEST}", "xyz", "foo$bar"), c.GlobalConfig.ExternalLabels)
os.Setenv("TEST", "TestValue") os.Setenv("TEST", "TestValue")
c, err = LoadFile("testdata/external_labels.good.yml", false, true, promslog.NewNopLogger()) c, err = LoadFile("testdata/external_labels.good.yml", false, promslog.NewNopLogger())
require.NoError(t, err) require.NoError(t, err)
testutil.RequireEqual(t, labels.FromStrings("bar", "foo", "baz", "fooTestValuebar", "foo", "TestValue", "qux", "foo${TEST}", "xyz", "foo$bar"), c.GlobalConfig.ExternalLabels) testutil.RequireEqual(t, labels.FromStrings("bar", "foo", "baz", "fooTestValuebar", "foo", "TestValue", "qux", "foo${TEST}", "xyz", "foo$bar"), c.GlobalConfig.ExternalLabels)
} }
func TestAgentMode(t *testing.T) { func TestAgentMode(t *testing.T) {
_, err := LoadFile("testdata/agent_mode.with_alert_manager.yml", true, false, promslog.NewNopLogger()) _, err := LoadFile("testdata/agent_mode.with_alert_manager.yml", true, promslog.NewNopLogger())
require.ErrorContains(t, err, "field alerting is not allowed in agent mode") require.ErrorContains(t, err, "field alerting is not allowed in agent mode")
_, err = LoadFile("testdata/agent_mode.with_alert_relabels.yml", true, false, promslog.NewNopLogger()) _, err = LoadFile("testdata/agent_mode.with_alert_relabels.yml", true, promslog.NewNopLogger())
require.ErrorContains(t, err, "field alerting is not allowed in agent mode") require.ErrorContains(t, err, "field alerting is not allowed in agent mode")
_, err = LoadFile("testdata/agent_mode.with_rule_files.yml", true, false, promslog.NewNopLogger()) _, err = LoadFile("testdata/agent_mode.with_rule_files.yml", true, promslog.NewNopLogger())
require.ErrorContains(t, err, "field rule_files is not allowed in agent mode") require.ErrorContains(t, err, "field rule_files is not allowed in agent mode")
_, err = LoadFile("testdata/agent_mode.with_remote_reads.yml", true, false, promslog.NewNopLogger()) _, err = LoadFile("testdata/agent_mode.with_remote_reads.yml", true, promslog.NewNopLogger())
require.ErrorContains(t, err, "field remote_read is not allowed in agent mode") require.ErrorContains(t, err, "field remote_read is not allowed in agent mode")
c, err := LoadFile("testdata/agent_mode.without_remote_writes.yml", true, false, promslog.NewNopLogger()) c, err := LoadFile("testdata/agent_mode.without_remote_writes.yml", true, promslog.NewNopLogger())
require.NoError(t, err) require.NoError(t, err)
require.Empty(t, c.RemoteWriteConfigs) require.Empty(t, c.RemoteWriteConfigs)
c, err = LoadFile("testdata/agent_mode.good.yml", true, false, promslog.NewNopLogger()) c, err = LoadFile("testdata/agent_mode.good.yml", true, promslog.NewNopLogger())
require.NoError(t, err) require.NoError(t, err)
require.Len(t, c.RemoteWriteConfigs, 1) require.Len(t, c.RemoteWriteConfigs, 1)
require.Equal( require.Equal(
@ -2177,7 +2250,7 @@ func TestAgentMode(t *testing.T) {
} }
func TestEmptyGlobalBlock(t *testing.T) { func TestEmptyGlobalBlock(t *testing.T) {
c, err := Load("global:\n", false, promslog.NewNopLogger()) c, err := Load("global:\n", promslog.NewNopLogger())
require.NoError(t, err) require.NoError(t, err)
exp := DefaultConfig exp := DefaultConfig
exp.Runtime = DefaultRuntimeConfig exp.Runtime = DefaultRuntimeConfig
@ -2332,7 +2405,7 @@ func TestGetScrapeConfigs(t *testing.T) {
for _, tc := range testCases { for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) { t.Run(tc.name, func(t *testing.T) {
c, err := LoadFile(tc.configFile, false, false, promslog.NewNopLogger()) c, err := LoadFile(tc.configFile, false, promslog.NewNopLogger())
require.NoError(t, err) require.NoError(t, err)
scfgs, err := c.GetScrapeConfigs() scfgs, err := c.GetScrapeConfigs()
@ -2350,7 +2423,7 @@ func kubernetesSDHostURL() config.URL {
} }
func TestScrapeConfigDisableCompression(t *testing.T) { func TestScrapeConfigDisableCompression(t *testing.T) {
want, err := LoadFile("testdata/scrape_config_disable_compression.good.yml", false, false, promslog.NewNopLogger()) want, err := LoadFile("testdata/scrape_config_disable_compression.good.yml", false, promslog.NewNopLogger())
require.NoError(t, err) require.NoError(t, err)
out, err := yaml.Marshal(want) out, err := yaml.Marshal(want)
@ -2397,7 +2470,7 @@ func TestScrapeConfigNameValidationSettings(t *testing.T) {
for _, tc := range tests { for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) { t.Run(tc.name, func(t *testing.T) {
want, err := LoadFile(fmt.Sprintf("testdata/%s.yml", tc.inputFile), false, false, promslog.NewNopLogger()) want, err := LoadFile(fmt.Sprintf("testdata/%s.yml", tc.inputFile), false, promslog.NewNopLogger())
require.NoError(t, err) require.NoError(t, err)
out, err := yaml.Marshal(want) out, err := yaml.Marshal(want)
@ -2410,3 +2483,54 @@ func TestScrapeConfigNameValidationSettings(t *testing.T) {
}) })
} }
} }
func TestScrapeProtocolHeader(t *testing.T) {
tests := []struct {
name string
proto ScrapeProtocol
expectedValue string
}{
{
name: "blank",
proto: ScrapeProtocol(""),
expectedValue: "",
},
{
name: "invalid",
proto: ScrapeProtocol("invalid"),
expectedValue: "",
},
{
name: "prometheus protobuf",
proto: PrometheusProto,
expectedValue: "application/vnd.google.protobuf",
},
{
name: "prometheus text 0.0.4",
proto: PrometheusText0_0_4,
expectedValue: "text/plain",
},
{
name: "prometheus text 1.0.0",
proto: PrometheusText1_0_0,
expectedValue: "text/plain",
},
{
name: "openmetrics 0.0.1",
proto: OpenMetricsText0_0_1,
expectedValue: "application/openmetrics-text",
},
{
name: "openmetrics 1.0.0",
proto: OpenMetricsText1_0_0,
expectedValue: "application/openmetrics-text",
},
}
for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) {
mediaType := tc.proto.HeaderMediaType()
require.Equal(t, tc.expectedValue, mediaType)
})
}
}

View file

@ -74,6 +74,8 @@ scrape_configs:
# metrics_path defaults to '/metrics' # metrics_path defaults to '/metrics'
# scheme defaults to 'http'. # scheme defaults to 'http'.
fallback_scrape_protocol: PrometheusText0.0.4
scrape_failure_log_file: fail_prom.log scrape_failure_log_file: fail_prom.log
file_sd_configs: file_sd_configs:
- files: - files:

View file

@ -0,0 +1,7 @@
alerting:
alertmanagers:
- scheme: http
api_version: v1
file_sd_configs:
- files:
- nonexistent_file.yml

View file

@ -0,0 +1,4 @@
global:
metric_name_validation_scheme: legacy
otlp:
translation_strategy: Invalid

View file

@ -0,0 +1,2 @@
otlp:
translation_strategy: NoUTF8EscapingWithSuffixes

View file

@ -0,0 +1,4 @@
global:
metric_name_validation_scheme: legacy
otlp:
translation_strategy: NoUTF8EscapingWithSuffixes

View file

@ -0,0 +1,5 @@
scrape_configs:
- job_name: node
fallback_scrape_protocol: "prometheusproto"
static_configs:
- targets: ['localhost:8080']

View file

@ -0,0 +1,5 @@
scrape_configs:
- job_name: node
fallback_scrape_protocol: ["OpenMetricsText1.0.0", "PrometheusText0.0.4"]
static_configs:
- targets: ['localhost:8080']

View file

@ -30,6 +30,7 @@ import (
"github.com/aws/aws-sdk-go/aws/ec2metadata" "github.com/aws/aws-sdk-go/aws/ec2metadata"
"github.com/aws/aws-sdk-go/aws/session" "github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/ec2" "github.com/aws/aws-sdk-go/service/ec2"
"github.com/aws/aws-sdk-go/service/ec2/ec2iface"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/common/config" "github.com/prometheus/common/config"
"github.com/prometheus/common/model" "github.com/prometheus/common/model"
@ -148,7 +149,7 @@ type EC2Discovery struct {
*refresh.Discovery *refresh.Discovery
logger *slog.Logger logger *slog.Logger
cfg *EC2SDConfig cfg *EC2SDConfig
ec2 *ec2.EC2 ec2 ec2iface.EC2API
// azToAZID maps this account's availability zones to their underlying AZ // azToAZID maps this account's availability zones to their underlying AZ
// ID, e.g. eu-west-2a -> euw2-az2. Refreshes are performed sequentially, so // ID, e.g. eu-west-2a -> euw2-az2. Refreshes are performed sequentially, so
@ -160,7 +161,7 @@ type EC2Discovery struct {
func NewEC2Discovery(conf *EC2SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*EC2Discovery, error) { func NewEC2Discovery(conf *EC2SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*EC2Discovery, error) {
m, ok := metrics.(*ec2Metrics) m, ok := metrics.(*ec2Metrics)
if !ok { if !ok {
return nil, fmt.Errorf("invalid discovery metrics type") return nil, errors.New("invalid discovery metrics type")
} }
if logger == nil { if logger == nil {
@ -182,7 +183,7 @@ func NewEC2Discovery(conf *EC2SDConfig, logger *slog.Logger, metrics discovery.D
return d, nil return d, nil
} }
func (d *EC2Discovery) ec2Client(context.Context) (*ec2.EC2, error) { func (d *EC2Discovery) ec2Client(context.Context) (ec2iface.EC2API, error) {
if d.ec2 != nil { if d.ec2 != nil {
return d.ec2, nil return d.ec2, nil
} }

434
discovery/aws/ec2_test.go Normal file
View file

@ -0,0 +1,434 @@
// Copyright 2024 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package aws
import (
"context"
"errors"
"testing"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/request"
"github.com/aws/aws-sdk-go/service/ec2"
"github.com/aws/aws-sdk-go/service/ec2/ec2iface"
"github.com/prometheus/common/model"
"github.com/stretchr/testify/require"
"go.uber.org/goleak"
"github.com/prometheus/prometheus/discovery/targetgroup"
)
// Helper function to get pointers on literals.
// NOTE: this is common between a few tests. In the future it might worth to move this out into a separate package.
func strptr(str string) *string {
return &str
}
func boolptr(b bool) *bool {
return &b
}
func int64ptr(i int64) *int64 {
return &i
}
// Struct for test data.
type ec2DataStore struct {
region string
azToAZID map[string]string
ownerID string
instances []*ec2.Instance
}
// The tests itself.
func TestMain(m *testing.M) {
goleak.VerifyTestMain(m)
}
func TestEC2DiscoveryRefreshAZIDs(t *testing.T) {
ctx := context.Background()
// iterate through the test cases
for _, tt := range []struct {
name string
shouldFail bool
ec2Data *ec2DataStore
}{
{
name: "Normal",
shouldFail: false,
ec2Data: &ec2DataStore{
azToAZID: map[string]string{
"azname-a": "azid-1",
"azname-b": "azid-2",
"azname-c": "azid-3",
},
},
},
{
name: "HandleError",
shouldFail: true,
ec2Data: &ec2DataStore{},
},
} {
t.Run(tt.name, func(t *testing.T) {
client := newMockEC2Client(tt.ec2Data)
d := &EC2Discovery{
ec2: client,
}
err := d.refreshAZIDs(ctx)
if tt.shouldFail {
require.Error(t, err)
} else {
require.NoError(t, err)
require.Equal(t, client.ec2Data.azToAZID, d.azToAZID)
}
})
}
}
func TestEC2DiscoveryRefresh(t *testing.T) {
ctx := context.Background()
// iterate through the test cases
for _, tt := range []struct {
name string
ec2Data *ec2DataStore
expected []*targetgroup.Group
}{
{
name: "NoPrivateIp",
ec2Data: &ec2DataStore{
region: "region-noprivateip",
azToAZID: map[string]string{
"azname-a": "azid-1",
"azname-b": "azid-2",
"azname-c": "azid-3",
},
instances: []*ec2.Instance{
{
InstanceId: strptr("instance-id-noprivateip"),
},
},
},
expected: []*targetgroup.Group{
{
Source: "region-noprivateip",
},
},
},
{
name: "NoVpc",
ec2Data: &ec2DataStore{
region: "region-novpc",
azToAZID: map[string]string{
"azname-a": "azid-1",
"azname-b": "azid-2",
"azname-c": "azid-3",
},
ownerID: "owner-id-novpc",
instances: []*ec2.Instance{
{
// set every possible options and test them here
Architecture: strptr("architecture-novpc"),
ImageId: strptr("ami-novpc"),
InstanceId: strptr("instance-id-novpc"),
InstanceLifecycle: strptr("instance-lifecycle-novpc"),
InstanceType: strptr("instance-type-novpc"),
Placement: &ec2.Placement{AvailabilityZone: strptr("azname-b")},
Platform: strptr("platform-novpc"),
PrivateDnsName: strptr("private-dns-novpc"),
PrivateIpAddress: strptr("1.2.3.4"),
PublicDnsName: strptr("public-dns-novpc"),
PublicIpAddress: strptr("42.42.42.2"),
State: &ec2.InstanceState{Name: strptr("running")},
// test tags once and for all
Tags: []*ec2.Tag{
{Key: strptr("tag-1-key"), Value: strptr("tag-1-value")},
{Key: strptr("tag-2-key"), Value: strptr("tag-2-value")},
nil,
{Value: strptr("tag-4-value")},
{Key: strptr("tag-5-key")},
},
},
},
},
expected: []*targetgroup.Group{
{
Source: "region-novpc",
Targets: []model.LabelSet{
{
"__address__": model.LabelValue("1.2.3.4:4242"),
"__meta_ec2_ami": model.LabelValue("ami-novpc"),
"__meta_ec2_architecture": model.LabelValue("architecture-novpc"),
"__meta_ec2_availability_zone": model.LabelValue("azname-b"),
"__meta_ec2_availability_zone_id": model.LabelValue("azid-2"),
"__meta_ec2_instance_id": model.LabelValue("instance-id-novpc"),
"__meta_ec2_instance_lifecycle": model.LabelValue("instance-lifecycle-novpc"),
"__meta_ec2_instance_type": model.LabelValue("instance-type-novpc"),
"__meta_ec2_instance_state": model.LabelValue("running"),
"__meta_ec2_owner_id": model.LabelValue("owner-id-novpc"),
"__meta_ec2_platform": model.LabelValue("platform-novpc"),
"__meta_ec2_private_dns_name": model.LabelValue("private-dns-novpc"),
"__meta_ec2_private_ip": model.LabelValue("1.2.3.4"),
"__meta_ec2_public_dns_name": model.LabelValue("public-dns-novpc"),
"__meta_ec2_public_ip": model.LabelValue("42.42.42.2"),
"__meta_ec2_region": model.LabelValue("region-novpc"),
"__meta_ec2_tag_tag_1_key": model.LabelValue("tag-1-value"),
"__meta_ec2_tag_tag_2_key": model.LabelValue("tag-2-value"),
},
},
},
},
},
{
name: "Ipv4",
ec2Data: &ec2DataStore{
region: "region-ipv4",
azToAZID: map[string]string{
"azname-a": "azid-1",
"azname-b": "azid-2",
"azname-c": "azid-3",
},
instances: []*ec2.Instance{
{
// just the minimum needed for the refresh work
ImageId: strptr("ami-ipv4"),
InstanceId: strptr("instance-id-ipv4"),
InstanceType: strptr("instance-type-ipv4"),
Placement: &ec2.Placement{AvailabilityZone: strptr("azname-c")},
PrivateIpAddress: strptr("5.6.7.8"),
State: &ec2.InstanceState{Name: strptr("running")},
SubnetId: strptr("azid-3"),
VpcId: strptr("vpc-ipv4"),
// network intefaces
NetworkInterfaces: []*ec2.InstanceNetworkInterface{
// interface without subnet -> should be ignored
{
Ipv6Addresses: []*ec2.InstanceIpv6Address{
{
Ipv6Address: strptr("2001:db8:1::1"),
IsPrimaryIpv6: boolptr(true),
},
},
},
// interface with subnet, no IPv6
{
Ipv6Addresses: []*ec2.InstanceIpv6Address{},
SubnetId: strptr("azid-3"),
},
// interface with another subnet, no IPv6
{
Ipv6Addresses: []*ec2.InstanceIpv6Address{},
SubnetId: strptr("azid-1"),
},
},
},
},
},
expected: []*targetgroup.Group{
{
Source: "region-ipv4",
Targets: []model.LabelSet{
{
"__address__": model.LabelValue("5.6.7.8:4242"),
"__meta_ec2_ami": model.LabelValue("ami-ipv4"),
"__meta_ec2_availability_zone": model.LabelValue("azname-c"),
"__meta_ec2_availability_zone_id": model.LabelValue("azid-3"),
"__meta_ec2_instance_id": model.LabelValue("instance-id-ipv4"),
"__meta_ec2_instance_state": model.LabelValue("running"),
"__meta_ec2_instance_type": model.LabelValue("instance-type-ipv4"),
"__meta_ec2_owner_id": model.LabelValue(""),
"__meta_ec2_primary_subnet_id": model.LabelValue("azid-3"),
"__meta_ec2_private_ip": model.LabelValue("5.6.7.8"),
"__meta_ec2_region": model.LabelValue("region-ipv4"),
"__meta_ec2_subnet_id": model.LabelValue(",azid-3,azid-1,"),
"__meta_ec2_vpc_id": model.LabelValue("vpc-ipv4"),
},
},
},
},
},
{
name: "Ipv6",
ec2Data: &ec2DataStore{
region: "region-ipv6",
azToAZID: map[string]string{
"azname-a": "azid-1",
"azname-b": "azid-2",
"azname-c": "azid-3",
},
instances: []*ec2.Instance{
{
// just the minimum needed for the refresh work
ImageId: strptr("ami-ipv6"),
InstanceId: strptr("instance-id-ipv6"),
InstanceType: strptr("instance-type-ipv6"),
Placement: &ec2.Placement{AvailabilityZone: strptr("azname-b")},
PrivateIpAddress: strptr("9.10.11.12"),
State: &ec2.InstanceState{Name: strptr("running")},
SubnetId: strptr("azid-2"),
VpcId: strptr("vpc-ipv6"),
// network intefaces
NetworkInterfaces: []*ec2.InstanceNetworkInterface{
// interface without primary IPv6, index 2
{
Attachment: &ec2.InstanceNetworkInterfaceAttachment{
DeviceIndex: int64ptr(3),
},
Ipv6Addresses: []*ec2.InstanceIpv6Address{
{
Ipv6Address: strptr("2001:db8:2::1:1"),
IsPrimaryIpv6: boolptr(false),
},
},
SubnetId: strptr("azid-2"),
},
// interface with primary IPv6, index 1
{
Attachment: &ec2.InstanceNetworkInterfaceAttachment{
DeviceIndex: int64ptr(1),
},
Ipv6Addresses: []*ec2.InstanceIpv6Address{
{
Ipv6Address: strptr("2001:db8:2::2:1"),
IsPrimaryIpv6: boolptr(false),
},
{
Ipv6Address: strptr("2001:db8:2::2:2"),
IsPrimaryIpv6: boolptr(true),
},
},
SubnetId: strptr("azid-2"),
},
// interface with primary IPv6, index 3
{
Attachment: &ec2.InstanceNetworkInterfaceAttachment{
DeviceIndex: int64ptr(3),
},
Ipv6Addresses: []*ec2.InstanceIpv6Address{
{
Ipv6Address: strptr("2001:db8:2::3:1"),
IsPrimaryIpv6: boolptr(true),
},
},
SubnetId: strptr("azid-1"),
},
// interface without primary IPv6, index 0
{
Attachment: &ec2.InstanceNetworkInterfaceAttachment{
DeviceIndex: int64ptr(0),
},
Ipv6Addresses: []*ec2.InstanceIpv6Address{},
SubnetId: strptr("azid-3"),
},
},
},
},
},
expected: []*targetgroup.Group{
{
Source: "region-ipv6",
Targets: []model.LabelSet{
{
"__address__": model.LabelValue("9.10.11.12:4242"),
"__meta_ec2_ami": model.LabelValue("ami-ipv6"),
"__meta_ec2_availability_zone": model.LabelValue("azname-b"),
"__meta_ec2_availability_zone_id": model.LabelValue("azid-2"),
"__meta_ec2_instance_id": model.LabelValue("instance-id-ipv6"),
"__meta_ec2_instance_state": model.LabelValue("running"),
"__meta_ec2_instance_type": model.LabelValue("instance-type-ipv6"),
"__meta_ec2_ipv6_addresses": model.LabelValue(",2001:db8:2::1:1,2001:db8:2::2:1,2001:db8:2::2:2,2001:db8:2::3:1,"),
"__meta_ec2_owner_id": model.LabelValue(""),
"__meta_ec2_primary_ipv6_addresses": model.LabelValue(",,2001:db8:2::2:2,,2001:db8:2::3:1,"),
"__meta_ec2_primary_subnet_id": model.LabelValue("azid-2"),
"__meta_ec2_private_ip": model.LabelValue("9.10.11.12"),
"__meta_ec2_region": model.LabelValue("region-ipv6"),
"__meta_ec2_subnet_id": model.LabelValue(",azid-2,azid-1,azid-3,"),
"__meta_ec2_vpc_id": model.LabelValue("vpc-ipv6"),
},
},
},
},
},
} {
t.Run(tt.name, func(t *testing.T) {
client := newMockEC2Client(tt.ec2Data)
d := &EC2Discovery{
ec2: client,
cfg: &EC2SDConfig{
Port: 4242,
Region: client.ec2Data.region,
},
}
g, err := d.refresh(ctx)
require.NoError(t, err)
require.Equal(t, tt.expected, g)
})
}
}
// EC2 client mock.
type mockEC2Client struct {
ec2iface.EC2API
ec2Data ec2DataStore
}
func newMockEC2Client(ec2Data *ec2DataStore) *mockEC2Client {
client := mockEC2Client{
ec2Data: *ec2Data,
}
return &client
}
func (m *mockEC2Client) DescribeAvailabilityZonesWithContext(ctx aws.Context, input *ec2.DescribeAvailabilityZonesInput, opts ...request.Option) (*ec2.DescribeAvailabilityZonesOutput, error) {
if len(m.ec2Data.azToAZID) == 0 {
return nil, errors.New("No AZs found")
}
azs := make([]*ec2.AvailabilityZone, len(m.ec2Data.azToAZID))
i := 0
for k, v := range m.ec2Data.azToAZID {
azs[i] = &ec2.AvailabilityZone{
ZoneName: strptr(k),
ZoneId: strptr(v),
}
i++
}
return &ec2.DescribeAvailabilityZonesOutput{
AvailabilityZones: azs,
}, nil
}
func (m *mockEC2Client) DescribeInstancesPagesWithContext(ctx aws.Context, input *ec2.DescribeInstancesInput, fn func(*ec2.DescribeInstancesOutput, bool) bool, opts ...request.Option) error {
r := ec2.Reservation{}
r.SetInstances(m.ec2Data.instances)
r.SetOwnerId(m.ec2Data.ownerID)
o := ec2.DescribeInstancesOutput{}
o.SetReservations([]*ec2.Reservation{&r})
_ = fn(&o, true)
return nil
}

View file

@ -134,7 +134,7 @@ type LightsailDiscovery struct {
func NewLightsailDiscovery(conf *LightsailSDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*LightsailDiscovery, error) { func NewLightsailDiscovery(conf *LightsailSDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*LightsailDiscovery, error) {
m, ok := metrics.(*lightsailMetrics) m, ok := metrics.(*lightsailMetrics)
if !ok { if !ok {
return nil, fmt.Errorf("invalid discovery metrics type") return nil, errors.New("invalid discovery metrics type")
} }
if logger == nil { if logger == nil {

View file

@ -186,7 +186,7 @@ type Discovery struct {
func NewDiscovery(cfg *SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) { func NewDiscovery(cfg *SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) {
m, ok := metrics.(*azureMetrics) m, ok := metrics.(*azureMetrics)
if !ok { if !ok {
return nil, fmt.Errorf("invalid discovery metrics type") return nil, errors.New("invalid discovery metrics type")
} }
if logger == nil { if logger == nil {

View file

@ -189,7 +189,7 @@ type Discovery struct {
func NewDiscovery(conf *SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) { func NewDiscovery(conf *SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) {
m, ok := metrics.(*consulMetrics) m, ok := metrics.(*consulMetrics)
if !ok { if !ok {
return nil, fmt.Errorf("invalid discovery metrics type") return nil, errors.New("invalid discovery metrics type")
} }
if logger == nil { if logger == nil {

View file

@ -15,6 +15,7 @@ package digitalocean
import ( import (
"context" "context"
"errors"
"fmt" "fmt"
"log/slog" "log/slog"
"net" "net"
@ -114,7 +115,7 @@ type Discovery struct {
func NewDiscovery(conf *SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) { func NewDiscovery(conf *SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) {
m, ok := metrics.(*digitaloceanMetrics) m, ok := metrics.(*digitaloceanMetrics)
if !ok { if !ok {
return nil, fmt.Errorf("invalid discovery metrics type") return nil, errors.New("invalid discovery metrics type")
} }
d := &Discovery{ d := &Discovery{

View file

@ -121,7 +121,7 @@ type Discovery struct {
func NewDiscovery(conf SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) { func NewDiscovery(conf SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) {
m, ok := metrics.(*dnsMetrics) m, ok := metrics.(*dnsMetrics)
if !ok { if !ok {
return nil, fmt.Errorf("invalid discovery metrics type") return nil, errors.New("invalid discovery metrics type")
} }
if logger == nil { if logger == nil {

View file

@ -15,7 +15,7 @@ package dns
import ( import (
"context" "context"
"fmt" "errors"
"log/slog" "log/slog"
"net" "net"
"testing" "testing"
@ -53,7 +53,7 @@ func TestDNS(t *testing.T) {
Type: "A", Type: "A",
}, },
lookup: func(name string, qtype uint16, logger *slog.Logger) (*dns.Msg, error) { lookup: func(name string, qtype uint16, logger *slog.Logger) (*dns.Msg, error) {
return nil, fmt.Errorf("some error") return nil, errors.New("some error")
}, },
expected: []*targetgroup.Group{}, expected: []*targetgroup.Group{},
}, },

View file

@ -16,7 +16,6 @@ package eureka
import ( import (
"context" "context"
"errors" "errors"
"fmt"
"log/slog" "log/slog"
"net" "net"
"net/http" "net/http"
@ -129,7 +128,7 @@ type Discovery struct {
func NewDiscovery(conf *SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) { func NewDiscovery(conf *SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) {
m, ok := metrics.(*eurekaMetrics) m, ok := metrics.(*eurekaMetrics)
if !ok { if !ok {
return nil, fmt.Errorf("invalid discovery metrics type") return nil, errors.New("invalid discovery metrics type")
} }
rt, err := config.NewRoundTripperFromConfig(conf.HTTPClientConfig, "eureka_sd") rt, err := config.NewRoundTripperFromConfig(conf.HTTPClientConfig, "eureka_sd")

View file

@ -184,7 +184,7 @@ type Discovery struct {
func NewDiscovery(conf *SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) { func NewDiscovery(conf *SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) {
fm, ok := metrics.(*fileMetrics) fm, ok := metrics.(*fileMetrics)
if !ok { if !ok {
return nil, fmt.Errorf("invalid discovery metrics type") return nil, errors.New("invalid discovery metrics type")
} }
if logger == nil { if logger == nil {

View file

@ -132,7 +132,7 @@ type Discovery struct {
func NewDiscovery(conf SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) { func NewDiscovery(conf SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) {
m, ok := metrics.(*gceMetrics) m, ok := metrics.(*gceMetrics)
if !ok { if !ok {
return nil, fmt.Errorf("invalid discovery metrics type") return nil, errors.New("invalid discovery metrics type")
} }
d := &Discovery{ d := &Discovery{

View file

@ -138,7 +138,7 @@ type Discovery struct {
func NewDiscovery(conf *SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*refresh.Discovery, error) { func NewDiscovery(conf *SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*refresh.Discovery, error) {
m, ok := metrics.(*hetznerMetrics) m, ok := metrics.(*hetznerMetrics)
if !ok { if !ok {
return nil, fmt.Errorf("invalid discovery metrics type") return nil, errors.New("invalid discovery metrics type")
} }
r, err := newRefresher(conf, logger) r, err := newRefresher(conf, logger)

View file

@ -41,8 +41,8 @@ import (
var ( var (
// DefaultSDConfig is the default HTTP SD configuration. // DefaultSDConfig is the default HTTP SD configuration.
DefaultSDConfig = SDConfig{ DefaultSDConfig = SDConfig{
RefreshInterval: model.Duration(60 * time.Second),
HTTPClientConfig: config.DefaultHTTPClientConfig, HTTPClientConfig: config.DefaultHTTPClientConfig,
RefreshInterval: model.Duration(60 * time.Second),
} }
userAgent = fmt.Sprintf("Prometheus/%s", version.Version) userAgent = fmt.Sprintf("Prometheus/%s", version.Version)
matchContentType = regexp.MustCompile(`^(?i:application\/json(;\s*charset=("utf-8"|utf-8))?)$`) matchContentType = regexp.MustCompile(`^(?i:application\/json(;\s*charset=("utf-8"|utf-8))?)$`)
@ -86,17 +86,17 @@ func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
return err return err
} }
if c.URL == "" { if c.URL == "" {
return fmt.Errorf("URL is missing") return errors.New("URL is missing")
} }
parsedURL, err := url.Parse(c.URL) parsedURL, err := url.Parse(c.URL)
if err != nil { if err != nil {
return err return err
} }
if parsedURL.Scheme != "http" && parsedURL.Scheme != "https" { if parsedURL.Scheme != "http" && parsedURL.Scheme != "https" {
return fmt.Errorf("URL scheme must be 'http' or 'https'") return errors.New("URL scheme must be 'http' or 'https'")
} }
if parsedURL.Host == "" { if parsedURL.Host == "" {
return fmt.Errorf("host is missing in URL") return errors.New("host is missing in URL")
} }
return c.HTTPClientConfig.Validate() return c.HTTPClientConfig.Validate()
} }
@ -118,7 +118,7 @@ type Discovery struct {
func NewDiscovery(conf *SDConfig, logger *slog.Logger, clientOpts []config.HTTPClientOption, metrics discovery.DiscovererMetrics) (*Discovery, error) { func NewDiscovery(conf *SDConfig, logger *slog.Logger, clientOpts []config.HTTPClientOption, metrics discovery.DiscovererMetrics) (*Discovery, error) {
m, ok := metrics.(*httpMetrics) m, ok := metrics.(*httpMetrics)
if !ok { if !ok {
return nil, fmt.Errorf("invalid discovery metrics type") return nil, errors.New("invalid discovery metrics type")
} }
if logger == nil { if logger == nil {

View file

@ -15,7 +15,6 @@ package ionos
import ( import (
"errors" "errors"
"fmt"
"log/slog" "log/slog"
"time" "time"
@ -46,7 +45,7 @@ type Discovery struct{}
func NewDiscovery(conf *SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*refresh.Discovery, error) { func NewDiscovery(conf *SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*refresh.Discovery, error) {
m, ok := metrics.(*ionosMetrics) m, ok := metrics.(*ionosMetrics)
if !ok { if !ok {
return nil, fmt.Errorf("invalid discovery metrics type") return nil, errors.New("invalid discovery metrics type")
} }
if conf.ionosEndpoint == "" { if conf.ionosEndpoint == "" {

View file

@ -102,10 +102,7 @@ func NewEndpoints(l *slog.Logger, eps cache.SharedIndexInformer, svc, pod, node
return return
} }
ep := &apiv1.Endpoints{} obj, exists, err := e.endpointsStore.GetByKey(namespacedName(svc.Namespace, svc.Name))
ep.Namespace = svc.Namespace
ep.Name = svc.Name
obj, exists, err := e.endpointsStore.Get(ep)
if exists && err == nil { if exists && err == nil {
e.enqueue(obj.(*apiv1.Endpoints)) e.enqueue(obj.(*apiv1.Endpoints))
} }
@ -167,8 +164,11 @@ func NewEndpoints(l *slog.Logger, eps cache.SharedIndexInformer, svc, pod, node
e.enqueueNode(node.Name) e.enqueueNode(node.Name)
}, },
DeleteFunc: func(o interface{}) { DeleteFunc: func(o interface{}) {
node := o.(*apiv1.Node) nodeName, err := nodeName(o)
e.enqueueNode(node.Name) if err != nil {
l.Error("Error getting Node name", "err", err)
}
e.enqueueNode(nodeName)
}, },
}) })
if err != nil { if err != nil {
@ -454,11 +454,8 @@ func (e *Endpoints) resolvePodRef(ref *apiv1.ObjectReference) *apiv1.Pod {
if ref == nil || ref.Kind != "Pod" { if ref == nil || ref.Kind != "Pod" {
return nil return nil
} }
p := &apiv1.Pod{}
p.Namespace = ref.Namespace
p.Name = ref.Name
obj, exists, err := e.podStore.Get(p) obj, exists, err := e.podStore.GetByKey(namespacedName(ref.Namespace, ref.Name))
if err != nil { if err != nil {
e.logger.Error("resolving pod ref failed", "err", err) e.logger.Error("resolving pod ref failed", "err", err)
return nil return nil
@ -470,11 +467,7 @@ func (e *Endpoints) resolvePodRef(ref *apiv1.ObjectReference) *apiv1.Pod {
} }
func (e *Endpoints) addServiceLabels(ns, name string, tg *targetgroup.Group) { func (e *Endpoints) addServiceLabels(ns, name string, tg *targetgroup.Group) {
svc := &apiv1.Service{} obj, exists, err := e.serviceStore.GetByKey(namespacedName(ns, name))
svc.Namespace = ns
svc.Name = name
obj, exists, err := e.serviceStore.Get(svc)
if err != nil { if err != nil {
e.logger.Error("retrieving service failed", "err", err) e.logger.Error("retrieving service failed", "err", err)
return return
@ -482,7 +475,7 @@ func (e *Endpoints) addServiceLabels(ns, name string, tg *targetgroup.Group) {
if !exists { if !exists {
return return
} }
svc = obj.(*apiv1.Service) svc := obj.(*apiv1.Service)
tg.Labels = tg.Labels.Merge(serviceLabels(svc)) tg.Labels = tg.Labels.Merge(serviceLabels(svc))
} }

View file

@ -18,10 +18,12 @@ import (
"testing" "testing"
"github.com/prometheus/common/model" "github.com/prometheus/common/model"
"github.com/stretchr/testify/require"
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/tools/cache"
"github.com/prometheus/prometheus/discovery/targetgroup" "github.com/prometheus/prometheus/discovery/targetgroup"
) )
@ -1257,3 +1259,22 @@ func TestEndpointsDiscoverySidecarContainer(t *testing.T) {
}, },
}.Run(t) }.Run(t)
} }
func BenchmarkResolvePodRef(b *testing.B) {
indexer := cache.NewIndexer(cache.DeletionHandlingMetaNamespaceKeyFunc, nil)
e := &Endpoints{
podStore: indexer,
}
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
p := e.resolvePodRef(&v1.ObjectReference{
Kind: "Pod",
Name: "testpod",
Namespace: "foo",
})
require.Nil(b, p)
}
}

View file

@ -145,8 +145,11 @@ func NewEndpointSlice(l *slog.Logger, eps cache.SharedIndexInformer, svc, pod, n
e.enqueueNode(node.Name) e.enqueueNode(node.Name)
}, },
DeleteFunc: func(o interface{}) { DeleteFunc: func(o interface{}) {
node := o.(*apiv1.Node) nodeName, err := nodeName(o)
e.enqueueNode(node.Name) if err != nil {
l.Error("Error getting Node name", "err", err)
}
e.enqueueNode(nodeName)
}, },
}) })
if err != nil { if err != nil {
@ -464,11 +467,8 @@ func (e *EndpointSlice) resolvePodRef(ref *apiv1.ObjectReference) *apiv1.Pod {
if ref == nil || ref.Kind != "Pod" { if ref == nil || ref.Kind != "Pod" {
return nil return nil
} }
p := &apiv1.Pod{}
p.Namespace = ref.Namespace
p.Name = ref.Name
obj, exists, err := e.podStore.Get(p) obj, exists, err := e.podStore.GetByKey(namespacedName(ref.Namespace, ref.Name))
if err != nil { if err != nil {
e.logger.Error("resolving pod ref failed", "err", err) e.logger.Error("resolving pod ref failed", "err", err)
return nil return nil
@ -481,19 +481,19 @@ func (e *EndpointSlice) resolvePodRef(ref *apiv1.ObjectReference) *apiv1.Pod {
func (e *EndpointSlice) addServiceLabels(esa endpointSliceAdaptor, tg *targetgroup.Group) { func (e *EndpointSlice) addServiceLabels(esa endpointSliceAdaptor, tg *targetgroup.Group) {
var ( var (
svc = &apiv1.Service{}
found bool found bool
name string
) )
svc.Namespace = esa.namespace() ns := esa.namespace()
// Every EndpointSlice object has the Service they belong to in the // Every EndpointSlice object has the Service they belong to in the
// kubernetes.io/service-name label. // kubernetes.io/service-name label.
svc.Name, found = esa.labels()[esa.labelServiceName()] name, found = esa.labels()[esa.labelServiceName()]
if !found { if !found {
return return
} }
obj, exists, err := e.serviceStore.Get(svc) obj, exists, err := e.serviceStore.GetByKey(namespacedName(ns, name))
if err != nil { if err != nil {
e.logger.Error("retrieving service failed", "err", err) e.logger.Error("retrieving service failed", "err", err)
return return
@ -501,7 +501,7 @@ func (e *EndpointSlice) addServiceLabels(esa endpointSliceAdaptor, tg *targetgro
if !exists { if !exists {
return return
} }
svc = obj.(*apiv1.Service) svc := obj.(*apiv1.Service)
tg.Labels = tg.Labels.Merge(serviceLabels(svc)) tg.Labels = tg.Labels.Merge(serviceLabels(svc))
} }

View file

@ -173,7 +173,7 @@ func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
return err return err
} }
if c.Role == "" { if c.Role == "" {
return fmt.Errorf("role missing (one of: pod, service, endpoints, endpointslice, node, ingress)") return errors.New("role missing (one of: pod, service, endpoints, endpointslice, node, ingress)")
} }
err = c.HTTPClientConfig.Validate() err = c.HTTPClientConfig.Validate()
if err != nil { if err != nil {
@ -181,20 +181,20 @@ func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
} }
if c.APIServer.URL != nil && c.KubeConfig != "" { if c.APIServer.URL != nil && c.KubeConfig != "" {
// Api-server and kubeconfig_file are mutually exclusive // Api-server and kubeconfig_file are mutually exclusive
return fmt.Errorf("cannot use 'kubeconfig_file' and 'api_server' simultaneously") return errors.New("cannot use 'kubeconfig_file' and 'api_server' simultaneously")
} }
if c.KubeConfig != "" && !reflect.DeepEqual(c.HTTPClientConfig, config.DefaultHTTPClientConfig) { if c.KubeConfig != "" && !reflect.DeepEqual(c.HTTPClientConfig, config.DefaultHTTPClientConfig) {
// Kubeconfig_file and custom http config are mutually exclusive // Kubeconfig_file and custom http config are mutually exclusive
return fmt.Errorf("cannot use a custom HTTP client configuration together with 'kubeconfig_file'") return errors.New("cannot use a custom HTTP client configuration together with 'kubeconfig_file'")
} }
if c.APIServer.URL == nil && !reflect.DeepEqual(c.HTTPClientConfig, config.DefaultHTTPClientConfig) { if c.APIServer.URL == nil && !reflect.DeepEqual(c.HTTPClientConfig, config.DefaultHTTPClientConfig) {
return fmt.Errorf("to use custom HTTP client configuration please provide the 'api_server' URL explicitly") return errors.New("to use custom HTTP client configuration please provide the 'api_server' URL explicitly")
} }
if c.APIServer.URL != nil && c.NamespaceDiscovery.IncludeOwnNamespace { if c.APIServer.URL != nil && c.NamespaceDiscovery.IncludeOwnNamespace {
return fmt.Errorf("cannot use 'api_server' and 'namespaces.own_namespace' simultaneously") return errors.New("cannot use 'api_server' and 'namespaces.own_namespace' simultaneously")
} }
if c.KubeConfig != "" && c.NamespaceDiscovery.IncludeOwnNamespace { if c.KubeConfig != "" && c.NamespaceDiscovery.IncludeOwnNamespace {
return fmt.Errorf("cannot use 'kubeconfig_file' and 'namespaces.own_namespace' simultaneously") return errors.New("cannot use 'kubeconfig_file' and 'namespaces.own_namespace' simultaneously")
} }
foundSelectorRoles := make(map[Role]struct{}) foundSelectorRoles := make(map[Role]struct{})
@ -288,7 +288,7 @@ func (d *Discovery) getNamespaces() []string {
func New(l *slog.Logger, metrics discovery.DiscovererMetrics, conf *SDConfig) (*Discovery, error) { func New(l *slog.Logger, metrics discovery.DiscovererMetrics, conf *SDConfig) (*Discovery, error) {
m, ok := metrics.(*kubernetesMetrics) m, ok := metrics.(*kubernetesMetrics)
if !ok { if !ok {
return nil, fmt.Errorf("invalid discovery metrics type") return nil, errors.New("invalid discovery metrics type")
} }
if l == nil { if l == nil {
@ -672,7 +672,7 @@ func (d *Discovery) newPodsByNodeInformer(plw *cache.ListWatch) cache.SharedInde
indexers[nodeIndex] = func(obj interface{}) ([]string, error) { indexers[nodeIndex] = func(obj interface{}) ([]string, error) {
pod, ok := obj.(*apiv1.Pod) pod, ok := obj.(*apiv1.Pod)
if !ok { if !ok {
return nil, fmt.Errorf("object is not a pod") return nil, errors.New("object is not a pod")
} }
return []string{pod.Spec.NodeName}, nil return []string{pod.Spec.NodeName}, nil
} }
@ -686,7 +686,7 @@ func (d *Discovery) newEndpointsByNodeInformer(plw *cache.ListWatch) cache.Share
indexers[podIndex] = func(obj interface{}) ([]string, error) { indexers[podIndex] = func(obj interface{}) ([]string, error) {
e, ok := obj.(*apiv1.Endpoints) e, ok := obj.(*apiv1.Endpoints)
if !ok { if !ok {
return nil, fmt.Errorf("object is not endpoints") return nil, errors.New("object is not endpoints")
} }
var pods []string var pods []string
for _, target := range e.Subsets { for _, target := range e.Subsets {
@ -705,7 +705,7 @@ func (d *Discovery) newEndpointsByNodeInformer(plw *cache.ListWatch) cache.Share
indexers[nodeIndex] = func(obj interface{}) ([]string, error) { indexers[nodeIndex] = func(obj interface{}) ([]string, error) {
e, ok := obj.(*apiv1.Endpoints) e, ok := obj.(*apiv1.Endpoints)
if !ok { if !ok {
return nil, fmt.Errorf("object is not endpoints") return nil, errors.New("object is not endpoints")
} }
var nodes []string var nodes []string
for _, target := range e.Subsets { for _, target := range e.Subsets {
@ -751,7 +751,7 @@ func (d *Discovery) newEndpointSlicesByNodeInformer(plw *cache.ListWatch, object
} }
} }
default: default:
return nil, fmt.Errorf("object is not an endpointslice") return nil, errors.New("object is not an endpointslice")
} }
return nodes, nil return nodes, nil
@ -804,3 +804,13 @@ func addObjectMetaLabels(labelSet model.LabelSet, objectMeta metav1.ObjectMeta,
func namespacedName(namespace, name string) string { func namespacedName(namespace, name string) string {
return namespace + "/" + name return namespace + "/" + name
} }
// nodeName knows how to handle the cache.DeletedFinalStateUnknown tombstone.
// It assumes the MetaNamespaceKeyFunc keyFunc is used, which uses the node name as the tombstone key.
func nodeName(o interface{}) (string, error) {
key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(o)
if err != nil {
return "", err
}
return key, nil
}

View file

@ -23,7 +23,9 @@ import (
prom_testutil "github.com/prometheus/client_golang/prometheus/testutil" prom_testutil "github.com/prometheus/client_golang/prometheus/testutil"
"github.com/prometheus/common/promslog" "github.com/prometheus/common/promslog"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
apiv1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors" apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/version" "k8s.io/apimachinery/pkg/version"
"k8s.io/apimachinery/pkg/watch" "k8s.io/apimachinery/pkg/watch"
@ -320,3 +322,18 @@ func TestFailuresCountMetric(t *testing.T) {
}) })
} }
} }
func TestNodeName(t *testing.T) {
node := &apiv1.Node{
ObjectMeta: metav1.ObjectMeta{
Name: "foo",
},
}
name, err := nodeName(node)
require.NoError(t, err)
require.Equal(t, "foo", name)
name, err = nodeName(cache.DeletedFinalStateUnknown{Key: "bar"})
require.NoError(t, err)
require.Equal(t, "bar", name)
}

View file

@ -82,7 +82,7 @@ func NewNode(l *slog.Logger, inf cache.SharedInformer, eventCount *prometheus.Co
} }
func (n *Node) enqueue(obj interface{}) { func (n *Node) enqueue(obj interface{}) {
key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(obj) key, err := nodeName(obj)
if err != nil { if err != nil {
return return
} }

View file

@ -95,8 +95,11 @@ func NewPod(l *slog.Logger, pods cache.SharedIndexInformer, nodes cache.SharedIn
p.enqueuePodsForNode(node.Name) p.enqueuePodsForNode(node.Name)
}, },
DeleteFunc: func(o interface{}) { DeleteFunc: func(o interface{}) {
node := o.(*apiv1.Node) nodeName, err := nodeName(o)
p.enqueuePodsForNode(node.Name) if err != nil {
l.Error("Error getting Node name", "err", err)
}
p.enqueuePodsForNode(nodeName)
}, },
}) })
if err != nil { if err != nil {

View file

@ -141,7 +141,7 @@ type Discovery struct {
func NewDiscovery(conf *SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) { func NewDiscovery(conf *SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) {
m, ok := metrics.(*linodeMetrics) m, ok := metrics.(*linodeMetrics)
if !ok { if !ok {
return nil, fmt.Errorf("invalid discovery metrics type") return nil, errors.New("invalid discovery metrics type")
} }
d := &Discovery{ d := &Discovery{

View file

@ -15,6 +15,7 @@ package discovery
import ( import (
"context" "context"
"errors"
"fmt" "fmt"
"sort" "sort"
"strconv" "strconv"
@ -1209,9 +1210,9 @@ func TestGaugeFailedConfigs(t *testing.T) {
c := map[string]Configs{ c := map[string]Configs{
"prometheus": { "prometheus": {
errorConfig{fmt.Errorf("tests error 0")}, errorConfig{errors.New("tests error 0")},
errorConfig{fmt.Errorf("tests error 1")}, errorConfig{errors.New("tests error 1")},
errorConfig{fmt.Errorf("tests error 2")}, errorConfig{errors.New("tests error 2")},
}, },
} }
discoveryManager.ApplyConfig(c) discoveryManager.ApplyConfig(c)

View file

@ -143,7 +143,7 @@ type Discovery struct {
func NewDiscovery(conf SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) { func NewDiscovery(conf SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) {
m, ok := metrics.(*marathonMetrics) m, ok := metrics.(*marathonMetrics)
if !ok { if !ok {
return nil, fmt.Errorf("invalid discovery metrics type") return nil, errors.New("invalid discovery metrics type")
} }
rt, err := config.NewRoundTripperFromConfig(conf.HTTPClientConfig, "marathon_sd") rt, err := config.NewRoundTripperFromConfig(conf.HTTPClientConfig, "marathon_sd")

View file

@ -15,6 +15,7 @@ package moby
import ( import (
"context" "context"
"errors"
"fmt" "fmt"
"log/slog" "log/slog"
"net" "net"
@ -110,7 +111,7 @@ func (c *DockerSDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error
return err return err
} }
if c.Host == "" { if c.Host == "" {
return fmt.Errorf("host missing") return errors.New("host missing")
} }
if _, err = url.Parse(c.Host); err != nil { if _, err = url.Parse(c.Host); err != nil {
return err return err
@ -131,7 +132,7 @@ type DockerDiscovery struct {
func NewDockerDiscovery(conf *DockerSDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*DockerDiscovery, error) { func NewDockerDiscovery(conf *DockerSDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*DockerDiscovery, error) {
m, ok := metrics.(*dockerMetrics) m, ok := metrics.(*dockerMetrics)
if !ok { if !ok {
return nil, fmt.Errorf("invalid discovery metrics type") return nil, errors.New("invalid discovery metrics type")
} }
d := &DockerDiscovery{ d := &DockerDiscovery{

View file

@ -15,6 +15,7 @@ package moby
import ( import (
"context" "context"
"errors"
"fmt" "fmt"
"log/slog" "log/slog"
"net/http" "net/http"
@ -99,7 +100,7 @@ func (c *DockerSwarmSDConfig) UnmarshalYAML(unmarshal func(interface{}) error) e
return err return err
} }
if c.Host == "" { if c.Host == "" {
return fmt.Errorf("host missing") return errors.New("host missing")
} }
if _, err = url.Parse(c.Host); err != nil { if _, err = url.Parse(c.Host); err != nil {
return err return err
@ -107,7 +108,7 @@ func (c *DockerSwarmSDConfig) UnmarshalYAML(unmarshal func(interface{}) error) e
switch c.Role { switch c.Role {
case "services", "nodes", "tasks": case "services", "nodes", "tasks":
case "": case "":
return fmt.Errorf("role missing (one of: tasks, services, nodes)") return errors.New("role missing (one of: tasks, services, nodes)")
default: default:
return fmt.Errorf("invalid role %s, expected tasks, services, or nodes", c.Role) return fmt.Errorf("invalid role %s, expected tasks, services, or nodes", c.Role)
} }
@ -128,7 +129,7 @@ type Discovery struct {
func NewDiscovery(conf *DockerSwarmSDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) { func NewDiscovery(conf *DockerSwarmSDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) {
m, ok := metrics.(*dockerswarmMetrics) m, ok := metrics.(*dockerswarmMetrics)
if !ok { if !ok {
return nil, fmt.Errorf("invalid discovery metrics type") return nil, errors.New("invalid discovery metrics type")
} }
d := &Discovery{ d := &Discovery{

View file

@ -124,7 +124,7 @@ type Discovery struct {
func NewDiscovery(conf *SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) { func NewDiscovery(conf *SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) {
m, ok := metrics.(*nomadMetrics) m, ok := metrics.(*nomadMetrics)
if !ok { if !ok {
return nil, fmt.Errorf("invalid discovery metrics type") return nil, errors.New("invalid discovery metrics type")
} }
d := &Discovery{ d := &Discovery{

View file

@ -145,7 +145,7 @@ type refresher interface {
func NewDiscovery(conf *SDConfig, l *slog.Logger, metrics discovery.DiscovererMetrics) (*refresh.Discovery, error) { func NewDiscovery(conf *SDConfig, l *slog.Logger, metrics discovery.DiscovererMetrics) (*refresh.Discovery, error) {
m, ok := metrics.(*openstackMetrics) m, ok := metrics.(*openstackMetrics)
if !ok { if !ok {
return nil, fmt.Errorf("invalid discovery metrics type") return nil, errors.New("invalid discovery metrics type")
} }
r, err := newRefresher(conf, l) r, err := newRefresher(conf, l)

View file

@ -151,7 +151,7 @@ func newRefresher(conf *SDConfig, logger *slog.Logger) (refresher, error) {
func NewDiscovery(conf *SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*refresh.Discovery, error) { func NewDiscovery(conf *SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*refresh.Discovery, error) {
m, ok := metrics.(*ovhcloudMetrics) m, ok := metrics.(*ovhcloudMetrics)
if !ok { if !ok {
return nil, fmt.Errorf("invalid discovery metrics type") return nil, errors.New("invalid discovery metrics type")
} }
r, err := newRefresher(conf, logger) r, err := newRefresher(conf, logger)

View file

@ -17,6 +17,7 @@ import (
"bytes" "bytes"
"context" "context"
"encoding/json" "encoding/json"
"errors"
"fmt" "fmt"
"io" "io"
"log/slog" "log/slog"
@ -109,20 +110,20 @@ func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
return err return err
} }
if c.URL == "" { if c.URL == "" {
return fmt.Errorf("URL is missing") return errors.New("URL is missing")
} }
parsedURL, err := url.Parse(c.URL) parsedURL, err := url.Parse(c.URL)
if err != nil { if err != nil {
return err return err
} }
if parsedURL.Scheme != "http" && parsedURL.Scheme != "https" { if parsedURL.Scheme != "http" && parsedURL.Scheme != "https" {
return fmt.Errorf("URL scheme must be 'http' or 'https'") return errors.New("URL scheme must be 'http' or 'https'")
} }
if parsedURL.Host == "" { if parsedURL.Host == "" {
return fmt.Errorf("host is missing in URL") return errors.New("host is missing in URL")
} }
if c.Query == "" { if c.Query == "" {
return fmt.Errorf("query missing") return errors.New("query missing")
} }
return c.HTTPClientConfig.Validate() return c.HTTPClientConfig.Validate()
} }
@ -142,7 +143,7 @@ type Discovery struct {
func NewDiscovery(conf *SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) { func NewDiscovery(conf *SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) {
m, ok := metrics.(*puppetdbMetrics) m, ok := metrics.(*puppetdbMetrics)
if !ok { if !ok {
return nil, fmt.Errorf("invalid discovery metrics type") return nil, errors.New("invalid discovery metrics type")
} }
if logger == nil { if logger == nil {

View file

@ -15,7 +15,7 @@ package refresh
import ( import (
"context" "context"
"fmt" "errors"
"testing" "testing"
"time" "time"
@ -64,7 +64,7 @@ func TestRefresh(t *testing.T) {
case 2: case 2:
return tg2, nil return tg2, nil
} }
return nil, fmt.Errorf("some error") return nil, errors.New("some error")
} }
interval := time.Millisecond interval := time.Millisecond

View file

@ -267,7 +267,7 @@ func replaceYAMLTypeError(err error, oldTyp, newTyp reflect.Type) error {
func RegisterSDMetrics(registerer prometheus.Registerer, rmm RefreshMetricsManager) (map[string]DiscovererMetrics, error) { func RegisterSDMetrics(registerer prometheus.Registerer, rmm RefreshMetricsManager) (map[string]DiscovererMetrics, error) {
err := rmm.Register() err := rmm.Register()
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to create service discovery refresh metrics") return nil, errors.New("failed to create service discovery refresh metrics")
} }
metrics := make(map[string]DiscovererMetrics) metrics := make(map[string]DiscovererMetrics)
@ -275,7 +275,7 @@ func RegisterSDMetrics(registerer prometheus.Registerer, rmm RefreshMetricsManag
currentSdMetrics := conf.NewDiscovererMetrics(registerer, rmm) currentSdMetrics := conf.NewDiscovererMetrics(registerer, rmm)
err = currentSdMetrics.Register() err = currentSdMetrics.Register()
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to create service discovery metrics") return nil, errors.New("failed to create service discovery metrics")
} }
metrics[conf.Name()] = currentSdMetrics metrics[conf.Name()] = currentSdMetrics
} }

View file

@ -188,7 +188,7 @@ type Discovery struct{}
func NewDiscovery(conf *SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*refresh.Discovery, error) { func NewDiscovery(conf *SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*refresh.Discovery, error) {
m, ok := metrics.(*scalewayMetrics) m, ok := metrics.(*scalewayMetrics)
if !ok { if !ok {
return nil, fmt.Errorf("invalid discovery metrics type") return nil, errors.New("invalid discovery metrics type")
} }
r, err := newRefresher(conf) r, err := newRefresher(conf)

View file

@ -149,7 +149,7 @@ type Discovery struct {
func New(logger *slog.Logger, conf *SDConfig, metrics discovery.DiscovererMetrics) (*Discovery, error) { func New(logger *slog.Logger, conf *SDConfig, metrics discovery.DiscovererMetrics) (*Discovery, error) {
m, ok := metrics.(*tritonMetrics) m, ok := metrics.(*tritonMetrics)
if !ok { if !ok {
return nil, fmt.Errorf("invalid discovery metrics type") return nil, errors.New("invalid discovery metrics type")
} }
tls, err := config.NewTLSConfig(&conf.TLSConfig) tls, err := config.NewTLSConfig(&conf.TLSConfig)

View file

@ -215,7 +215,7 @@ func getEndpointInfoForSystems(
func NewDiscovery(conf *SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) { func NewDiscovery(conf *SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) {
m, ok := metrics.(*uyuniMetrics) m, ok := metrics.(*uyuniMetrics)
if !ok { if !ok {
return nil, fmt.Errorf("invalid discovery metrics type") return nil, errors.New("invalid discovery metrics type")
} }
apiURL, err := url.Parse(conf.Server) apiURL, err := url.Parse(conf.Server)

View file

@ -15,6 +15,7 @@ package vultr
import ( import (
"context" "context"
"errors"
"fmt" "fmt"
"log/slog" "log/slog"
"net" "net"
@ -117,7 +118,7 @@ type Discovery struct {
func NewDiscovery(conf *SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) { func NewDiscovery(conf *SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) {
m, ok := metrics.(*vultrMetrics) m, ok := metrics.(*vultrMetrics)
if !ok { if !ok {
return nil, fmt.Errorf("invalid discovery metrics type") return nil, errors.New("invalid discovery metrics type")
} }
d := &Discovery{ d := &Discovery{

View file

@ -14,6 +14,7 @@
package xds package xds
import ( import (
"errors"
"fmt" "fmt"
"log/slog" "log/slog"
"net/url" "net/url"
@ -161,7 +162,7 @@ func kumaMadsV1ResourceParser(resources []*anypb.Any, typeURL string) ([]model.L
func NewKumaHTTPDiscovery(conf *KumaSDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (discovery.Discoverer, error) { func NewKumaHTTPDiscovery(conf *KumaSDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (discovery.Discoverer, error) {
m, ok := metrics.(*xdsMetrics) m, ok := metrics.(*xdsMetrics)
if !ok { if !ok {
return nil, fmt.Errorf("invalid discovery metrics type") return nil, errors.New("invalid discovery metrics type")
} }
// Default to "prometheus" if hostname is unavailable. // Default to "prometheus" if hostname is unavailable.

View file

@ -17,6 +17,7 @@ The Prometheus monitoring server
| <code class="text-nowrap">--config.file</code> | Prometheus configuration file path. | `prometheus.yml` | | <code class="text-nowrap">--config.file</code> | Prometheus configuration file path. | `prometheus.yml` |
| <code class="text-nowrap">--config.auto-reload-interval</code> | Specifies the interval for checking and automatically reloading the Prometheus configuration file upon detecting changes. | `30s` | | <code class="text-nowrap">--config.auto-reload-interval</code> | Specifies the interval for checking and automatically reloading the Prometheus configuration file upon detecting changes. | `30s` |
| <code class="text-nowrap">--web.listen-address</code> <code class="text-nowrap">...<code class="text-nowrap"> | Address to listen on for UI, API, and telemetry. Can be repeated. | `0.0.0.0:9090` | | <code class="text-nowrap">--web.listen-address</code> <code class="text-nowrap">...<code class="text-nowrap"> | Address to listen on for UI, API, and telemetry. Can be repeated. | `0.0.0.0:9090` |
| <code class="text-nowrap">--auto-gomemlimit</code> | Automatically set GOMEMLIMIT to match Linux container or system memory limit | `true` |
| <code class="text-nowrap">--auto-gomemlimit.ratio</code> | The ratio of reserved GOMEMLIMIT memory to the detected maximum container or system memory | `0.9` | | <code class="text-nowrap">--auto-gomemlimit.ratio</code> | The ratio of reserved GOMEMLIMIT memory to the detected maximum container or system memory | `0.9` |
| <code class="text-nowrap">--web.config.file</code> | [EXPERIMENTAL] Path to configuration file that can enable TLS or authentication. | | | <code class="text-nowrap">--web.config.file</code> | [EXPERIMENTAL] Path to configuration file that can enable TLS or authentication. | |
| <code class="text-nowrap">--web.read-timeout</code> | Maximum duration before timing out read of the request, and closing idle connections. | `5m` | | <code class="text-nowrap">--web.read-timeout</code> | Maximum duration before timing out read of the request, and closing idle connections. | `5m` |
@ -58,7 +59,7 @@ The Prometheus monitoring server
| <code class="text-nowrap">--query.timeout</code> | Maximum time a query may take before being aborted. Use with server mode only. | `2m` | | <code class="text-nowrap">--query.timeout</code> | Maximum time a query may take before being aborted. Use with server mode only. | `2m` |
| <code class="text-nowrap">--query.max-concurrency</code> | Maximum number of queries executed concurrently. Use with server mode only. | `20` | | <code class="text-nowrap">--query.max-concurrency</code> | Maximum number of queries executed concurrently. Use with server mode only. | `20` |
| <code class="text-nowrap">--query.max-samples</code> | Maximum number of samples a single query can load into memory. Note that queries will fail if they try to load more samples than this into memory, so this also limits the number of samples a query can return. Use with server mode only. | `50000000` | | <code class="text-nowrap">--query.max-samples</code> | Maximum number of samples a single query can load into memory. Note that queries will fail if they try to load more samples than this into memory, so this also limits the number of samples a query can return. Use with server mode only. | `50000000` |
| <code class="text-nowrap">--enable-feature</code> <code class="text-nowrap">...<code class="text-nowrap"> | Comma separated feature names to enable. Valid options: auto-gomemlimit, exemplar-storage, expand-external-labels, memory-snapshot-on-shutdown, promql-per-step-stats, promql-experimental-functions, extra-scrape-metrics, auto-gomaxprocs, native-histograms, created-timestamp-zero-ingestion, concurrent-rule-eval, delayed-compaction, old-ui. See https://prometheus.io/docs/prometheus/latest/feature_flags/ for more details. | | | <code class="text-nowrap">--enable-feature</code> <code class="text-nowrap">...<code class="text-nowrap"> | Comma separated feature names to enable. Valid options: exemplar-storage, expand-external-labels, memory-snapshot-on-shutdown, promql-per-step-stats, promql-experimental-functions, extra-scrape-metrics, auto-gomaxprocs, native-histograms, created-timestamp-zero-ingestion, concurrent-rule-eval, delayed-compaction, old-ui. See https://prometheus.io/docs/prometheus/latest/feature_flags/ for more details. | |
| <code class="text-nowrap">--agent</code> | Run Prometheus in 'Agent mode'. | | | <code class="text-nowrap">--agent</code> | Run Prometheus in 'Agent mode'. | |
| <code class="text-nowrap">--log.level</code> | Only log messages with the given severity or above. One of: [debug, info, warn, error] | `info` | | <code class="text-nowrap">--log.level</code> | Only log messages with the given severity or above. One of: [debug, info, warn, error] | `info` |
| <code class="text-nowrap">--log.format</code> | Output format of log messages. One of: [logfmt, json] | `logfmt` | | <code class="text-nowrap">--log.format</code> | Output format of log messages. One of: [logfmt, json] | `logfmt` |

View file

@ -462,6 +462,7 @@ Unit tests for rules.
| Flag | Description | Default | | Flag | Description | Default |
| --- | --- | --- | | --- | --- | --- |
| <code class="text-nowrap">--run</code> <code class="text-nowrap">...<code class="text-nowrap"> | If set, will only run test groups whose names match the regular expression. Can be specified multiple times. | | | <code class="text-nowrap">--run</code> <code class="text-nowrap">...<code class="text-nowrap"> | If set, will only run test groups whose names match the regular expression. Can be specified multiple times. | |
| <code class="text-nowrap">--debug</code> | Enable unit test debugging. | `false` |
| <code class="text-nowrap">--diff</code> | [Experimental] Print colored differential output between expected & received output. | `false` | | <code class="text-nowrap">--diff</code> | [Experimental] Print colored differential output between expected & received output. | `false` |

View file

@ -21,10 +21,13 @@ An example rules file with an alert would be:
```yaml ```yaml
groups: groups:
- name: example - name: example
labels:
team: myteam
rules: rules:
- alert: HighRequestLatency - alert: HighRequestLatency
expr: job:request_latency_seconds:mean5m{job="myjob"} > 0.5 expr: job:request_latency_seconds:mean5m{job="myjob"} > 0.5
for: 10m for: 10m
keep_firing_for: 5m
labels: labels:
severity: page severity: page
annotations: annotations:
@ -38,6 +41,13 @@ the alert continues to be active during each evaluation for 10 minutes before
firing the alert. Elements that are active, but not firing yet, are in the pending state. firing the alert. Elements that are active, but not firing yet, are in the pending state.
Alerting rules without the `for` clause will become active on the first evaluation. Alerting rules without the `for` clause will become active on the first evaluation.
There is also an optional `keep_firing_for` clause that tells Prometheus to keep
this alert firing for the specified duration after the firing condition was last met.
This can be used to prevent situations such as flapping alerts, false resolutions
due to lack of data loss, etc. Alerting rules without the `keep_firing_for` clause
will deactivate on the first evaluation where the condition is not met (assuming
any optional `for` duration desribed above has been satisfied).
The `labels` clause allows specifying a set of additional labels to be attached The `labels` clause allows specifying a set of additional labels to be attached
to the alert. Any existing conflicting labels will be overwritten. The label to the alert. Any existing conflicting labels will be overwritten. The label
values can be templated. values can be templated.

View file

@ -79,7 +79,11 @@ global:
[ rule_query_offset: <duration> | default = 0s ] [ rule_query_offset: <duration> | default = 0s ]
# The labels to add to any time series or alerts when communicating with # The labels to add to any time series or alerts when communicating with
# external systems (federation, remote storage, Alertmanager). # external systems (federation, remote storage, Alertmanager).
# Environment variable references `${var}` or `$var` are replaced according
# to the values of the current environment variables.
# References to undefined variables are replaced by the empty string.
# The `$` character can be escaped by using `$$`.
external_labels: external_labels:
[ <labelname>: <labelvalue> ... ] [ <labelname>: <labelvalue> ... ]
@ -208,12 +212,18 @@ job_name: <job_name>
# The protocols to negotiate during a scrape with the client. # The protocols to negotiate during a scrape with the client.
# Supported values (case sensitive): PrometheusProto, OpenMetricsText0.0.1, # Supported values (case sensitive): PrometheusProto, OpenMetricsText0.0.1,
# OpenMetricsText1.0.0, PrometheusText0.0.4. # OpenMetricsText1.0.0, PrometheusText0.0.4, PrometheusText1.0.0.
[ scrape_protocols: [<string>, ...] | default = <global_config.scrape_protocols> ] [ scrape_protocols: [<string>, ...] | default = <global_config.scrape_protocols> ]
# Whether to scrape a classic histogram that is also exposed as a native # Fallback protocol to use if a scrape returns blank, unparseable, or otherwise
# invalid Content-Type.
# Supported values (case sensitive): PrometheusProto, OpenMetricsText0.0.1,
# OpenMetricsText1.0.0, PrometheusText0.0.4, PrometheusText1.0.0.
[ fallback_scrape_protocol: <string> ]
# Whether to scrape a classic histogram, even if it is also exposed as a native
# histogram (has no effect without --enable-feature=native-histograms). # histogram (has no effect without --enable-feature=native-histograms).
[ scrape_classic_histograms: <boolean> | default = false ] [ always_scrape_classic_histograms: <boolean> | default = false ]
# The HTTP resource path on which to fetch metrics from targets. # The HTTP resource path on which to fetch metrics from targets.
[ metrics_path: <path> | default = /metrics ] [ metrics_path: <path> | default = /metrics ]
@ -2879,6 +2889,7 @@ metadata_config:
# HTTP client settings, including authentication methods (such as basic auth and # HTTP client settings, including authentication methods (such as basic auth and
# authorization), proxy configurations, TLS options, custom HTTP headers, etc. # authorization), proxy configurations, TLS options, custom HTTP headers, etc.
# enable_http2 defaults to false for remote-write.
[ <http_config> ] [ <http_config> ]
``` ```
@ -2930,8 +2941,6 @@ with this feature.
`tsdb` lets you configure the runtime-reloadable configuration settings of the TSDB. `tsdb` lets you configure the runtime-reloadable configuration settings of the TSDB.
NOTE: Out-of-order ingestion is an experimental feature, but you do not need any additional flag to enable it. Setting `out_of_order_time_window` to a positive duration enables it.
```yaml ```yaml
# Configures how old an out-of-order/out-of-bounds sample can be w.r.t. the TSDB max time. # Configures how old an out-of-order/out-of-bounds sample can be w.r.t. the TSDB max time.
# An out-of-order/out-of-bounds sample is ingested into the TSDB as long as the timestamp # An out-of-order/out-of-bounds sample is ingested into the TSDB as long as the timestamp

View file

@ -89,6 +89,11 @@ name: <string>
# Offset the rule evaluation timestamp of this particular group by the specified duration into the past. # Offset the rule evaluation timestamp of this particular group by the specified duration into the past.
[ query_offset: <duration> | default = global.rule_query_offset ] [ query_offset: <duration> | default = global.rule_query_offset ]
# Labels to add or overwrite before storing the result for its rules.
# Labels defined in <rule> will override the key if it has a collision.
labels:
[ <labelname>: <labelvalue> ]
rules: rules:
[ - <rule> ... ] [ - <rule> ... ]
``` ```

View file

@ -11,15 +11,6 @@ Their behaviour can change in future releases which will be communicated via the
You can enable them using the `--enable-feature` flag with a comma separated list of features. You can enable them using the `--enable-feature` flag with a comma separated list of features.
They may be enabled by default in future versions. They may be enabled by default in future versions.
## Expand environment variables in external labels
`--enable-feature=expand-external-labels`
Replace `${var}` or `$var` in the [`external_labels`](configuration/configuration.md#configuration-file)
values according to the values of the current environment variables. References
to undefined variables are replaced by the empty string.
The `$` character can be escaped by using `$$`.
## Exemplars storage ## Exemplars storage
`--enable-feature=exemplar-storage` `--enable-feature=exemplar-storage`
@ -32,9 +23,8 @@ Exemplar storage is implemented as a fixed size circular buffer that stores exem
`--enable-feature=memory-snapshot-on-shutdown` `--enable-feature=memory-snapshot-on-shutdown`
This takes the snapshot of the chunks that are in memory along with the series information when shutting down and stores This takes a snapshot of the chunks that are in memory along with the series information when shutting down and stores it on disk. This will reduce the startup time since the memory state can now be restored with this snapshot
it on disk. This will reduce the startup time since the memory state can be restored with this snapshot and m-mapped and m-mapped chunks, while a WAL replay from disk is only needed for the parts of the WAL that are not part of the snapshot.
chunks without the need of WAL replay.
## Extra scrape metrics ## Extra scrape metrics
@ -63,14 +53,6 @@ computed at all.
When enabled, GOMAXPROCS variable is automatically set to match Linux container CPU quota. When enabled, GOMAXPROCS variable is automatically set to match Linux container CPU quota.
## Auto GOMEMLIMIT
`--enable-feature=auto-gomemlimit`
When enabled, the GOMEMLIMIT variable is automatically set to match the Linux container memory limit. If there is no container limit, or the process is running outside of containers, the system memory total is used.
There is also an additional tuning flag, `--auto-gomemlimit.ratio`, which allows controlling how much of the memory is used for Prometheus. The remainder is reserved for memory outside the process. For example, kernel page cache. Page cache is important for Prometheus TSDB query performance. The default is `0.9`, which means 90% of the memory limit will be used for Prometheus.
## Native Histograms ## Native Histograms
`--enable-feature=native-histograms` `--enable-feature=native-histograms`
@ -93,59 +75,7 @@ those classic histograms that do not come with a corresponding native
histogram. However, if a native histogram is present, Prometheus will ignore histogram. However, if a native histogram is present, Prometheus will ignore
the corresponding classic histogram, with the notable exception of exemplars, the corresponding classic histogram, with the notable exception of exemplars,
which are always ingested. To keep the classic histograms as well, enable which are always ingested. To keep the classic histograms as well, enable
`scrape_classic_histograms` in the scrape job. `always_scrape_classic_histograms` in the scrape job.
_Note about the format of `le` and `quantile` label values:_
In certain situations, the protobuf parsing changes the number formatting of
the `le` labels of classic histograms and the `quantile` labels of
summaries. Typically, this happens if the scraped target is instrumented with
[client_golang](https://github.com/prometheus/client_golang) provided that
[promhttp.HandlerOpts.EnableOpenMetrics](https://pkg.go.dev/github.com/prometheus/client_golang/prometheus/promhttp#HandlerOpts)
is set to `false`. In such a case, integer label values are represented in the
text format as such, e.g. `quantile="1"` or `le="2"`. However, the protobuf parsing
changes the representation to float-like (following the OpenMetrics
specification), so the examples above become `quantile="1.0"` and `le="2.0"` after
ingestion into Prometheus, which changes the identity of the metric compared to
what was ingested before via the text format.
The effect of this change is that alerts, recording rules and dashboards that
directly reference label values as whole numbers such as `le="1"` will stop
working.
Aggregation by the `le` and `quantile` labels for vectors that contain the old and
new formatting will lead to unexpected results, and range vectors that span the
transition between the different formatting will contain additional series.
The most common use case for both is the quantile calculation via
`histogram_quantile`, e.g.
`histogram_quantile(0.95, sum by (le) (rate(histogram_bucket[10m])))`.
The `histogram_quantile` function already tries to mitigate the effects to some
extent, but there will be inaccuracies, in particular for shorter ranges that
cover only a few samples.
Ways to deal with this change either globally or on a per metric basis:
- Fix references to integer `le`, `quantile` label values, but otherwise do
nothing and accept that some queries that span the transition time will produce
inaccurate or unexpected results.
_This is the recommended solution, to get consistently normalized label values._
Also Prometheus 3.0 is expected to enforce normalization of these label values.
- Use `metric_relabel_config` to retain the old labels when scraping targets.
This should **only** be applied to metrics that currently produce such labels.
<!-- The following config snippet is unit tested in scrape/scrape_test.go. -->
```yaml
metric_relabel_configs:
- source_labels:
- quantile
target_label: quantile
regex: (\d+)\.0+
- source_labels:
- le
- __name__
target_label: le
regex: (\d+)\.0+;.*_bucket
```
## Experimental PromQL functions ## Experimental PromQL functions

View file

@ -3,198 +3,198 @@ title: Migration
sort_rank: 10 sort_rank: 10
--- ---
# Prometheus 2.0 migration guide # Prometheus 3.0 migration guide
In line with our [stability promise](https://prometheus.io/blog/2016/07/18/prometheus-1-0-released/#fine-print), In line with our [stability promise](https://prometheus.io/docs/prometheus/latest/stability/),
the Prometheus 2.0 release contains a number of backwards incompatible changes. the Prometheus 3.0 release contains a number of backwards incompatible changes.
This document offers guidance on migrating from Prometheus 1.8 to Prometheus 2.0 and newer versions. This document offers guidance on migrating from Prometheus 2.x to Prometheus 3.0 and newer versions.
## Flags ## Flags
The format of Prometheus command line flags has changed. Instead of a - The following feature flags have been removed and they have been added to the
single dash, all flags now use a double dash. Common flags (`--config.file`, default behavior of Prometheus v3:
`--web.listen-address` and `--web.external-url`) remain but - `promql-at-modifier`
almost all storage-related flags have been removed. - `promql-negative-offset`
- `remote-write-receiver`
- `new-service-discovery-manager`
- `expand-external-labels`
Environment variable references `${var}` or `$var` in external label values
are replaced according to the values of the current environment variables.
References to undefined variables are replaced by the empty string.
The `$` character can be escaped by using `$$`.
- `no-default-scrape-port`
Prometheus v3 will no longer add ports to scrape targets according to the
specified scheme. Target will now appear in labels as configured.
If you rely on scrape targets like
`https://example.com/metrics` or `http://exmaple.com/metrics` to be
represented as `https://example.com/metrics:443` and
`http://example.com/metrics:80` respectively, add them to your target URLs
- `agent`
Instead use the dedicated `--agent` cli flag.
Some notable flags which have been removed: Prometheus v3 will log a warning if you continue to pass these to
`--enable-feature`.
- `-alertmanager.url` In Prometheus 2.0, the command line flags for configuring ## Configuration
a static Alertmanager URL have been removed. Alertmanager must now be
discovered via service discovery, see [Alertmanager service discovery](#alertmanager-service-discovery).
- `-log.format` In Prometheus 2.0 logs can only be streamed to standard error. - The scrape job level configuration option `scrape_classic_histograms` has been
renamed to `always_scrape_classic_histograms`. If you use the
- `-query.staleness-delta` has been renamed to `--query.lookback-delta`; Prometheus `--enable-feature=native-histograms` feature flag to ingest native histograms
2.0 introduces a new mechanism for handling staleness, see [staleness](querying/basics.md#staleness). and you also want to ingest classic histograms that an endpoint might expose
along with native histograms, be sure to add this configuration or change your
- `-storage.local.*` Prometheus 2.0 introduces a new storage engine; as such all configuration from the old name.
flags relating to the old engine have been removed. For information on the - The `http_config.enable_http2` in `remote_write` items default has been
new engine, see [Storage](#storage). changed to `false`. In Prometheus v2 the remote write http client would
default to use http2. In order to parallelize multiple remote write queues
- `-storage.remote.*` Prometheus 2.0 has removed the deprecated remote across multiple sockets its preferable to not default to http2.
storage flags, and will fail to start if they are supplied. To write to If you prefer to use http2 for remote write you must now set
InfluxDB, Graphite, or OpenTSDB use the relevant storage adapter. `http_config.enable_http2: true` in your `remote_write` configuration section.
## Alertmanager service discovery
Alertmanager service discovery was introduced in Prometheus 1.4, allowing Prometheus
to dynamically discover Alertmanager replicas using the same mechanism as scrape
targets. In Prometheus 2.0, the command line flags for static Alertmanager config
have been removed, so the following command line flag:
```
./prometheus -alertmanager.url=http://alertmanager:9093/
```
Would be replaced with the following in the `prometheus.yml` config file:
```yaml
alerting:
alertmanagers:
- static_configs:
- targets:
- alertmanager:9093
```
You can also use all the usual Prometheus service discovery integrations and
relabeling in your Alertmanager configuration. This snippet instructs
Prometheus to search for Kubernetes pods, in the `default` namespace, with the
label `name: alertmanager` and with a non-empty port.
```yaml
alerting:
alertmanagers:
- kubernetes_sd_configs:
- role: pod
tls_config:
ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
relabel_configs:
- source_labels: [__meta_kubernetes_pod_label_name]
regex: alertmanager
action: keep
- source_labels: [__meta_kubernetes_namespace]
regex: default
action: keep
- source_labels: [__meta_kubernetes_pod_container_port_number]
regex:
action: drop
```
## Recording rules and alerts
The format for configuring alerting and recording rules has been changed to YAML.
An example of a recording rule and alert in the old format:
```
job:request_duration_seconds:histogram_quantile99 =
histogram_quantile(0.99, sum by (le, job) (rate(request_duration_seconds_bucket[1m])))
ALERT FrontendRequestLatency
IF job:request_duration_seconds:histogram_quantile99{job="frontend"} > 0.1
FOR 5m
ANNOTATIONS {
summary = "High frontend request latency",
}
```
Would look like this:
```yaml
groups:
- name: example.rules
rules:
- record: job:request_duration_seconds:histogram_quantile99
expr: histogram_quantile(0.99, sum by (le, job) (rate(request_duration_seconds_bucket[1m])))
- alert: FrontendRequestLatency
expr: job:request_duration_seconds:histogram_quantile99{job="frontend"} > 0.1
for: 5m
annotations:
summary: High frontend request latency
```
To help with the change, the `promtool` tool has a mode to automate the rules conversion. Given a `.rules` file, it will output a `.rules.yml` file in the
new format. For example:
```
$ promtool update rules example.rules
```
You will need to use `promtool` from [Prometheus 2.5](https://github.com/prometheus/prometheus/releases/tag/v2.5.0) as later versions no longer contain the above subcommand.
## Storage
The data format in Prometheus 2.0 has completely changed and is not backwards
compatible with 1.8 and older versions. To retain access to your historic monitoring data we
recommend you run a non-scraping Prometheus instance running at least version
1.8.1 in parallel with your Prometheus 2.0 instance, and have the new server
read existing data from the old one via the remote read protocol.
Your Prometheus 1.8 instance should be started with the following flags and an
config file containing only the `external_labels` setting (if any):
```
$ ./prometheus-1.8.1.linux-amd64/prometheus -web.listen-address ":9094" -config.file old.yml
```
Prometheus 2.0 can then be started (on the same machine) with the following flags:
```
$ ./prometheus-2.0.0.linux-amd64/prometheus --config.file prometheus.yml
```
Where `prometheus.yml` contains in addition to your full existing configuration, the stanza:
```yaml
remote_read:
- url: "http://localhost:9094/api/v1/read"
```
## PromQL ## PromQL
The following features have been removed from PromQL: - The `.` pattern in regular expressions in PromQL matches newline characters.
With this change a regular expressions like `.*` matches strings that include
`\n`. This applies to matchers in queries and relabel configs. For example the
following regular expressions now match the accompanying strings, wheras in
Prometheus v2 these combinations didn't match.
- `drop_common_labels` function - the `without` aggregation modifier should be used | Regex | Additional matches |
instead. | ----- | ------ |
- `keep_common` aggregation modifier - the `by` modifier should be used instead. | ".*" | "foo\n", "Foo\nBar" |
- `count_scalar` function - use cases are better handled by `absent()` or correct | "foo.?bar" | "foo\nbar" |
propagation of labels in operations. | "foo.+bar" | "foo\nbar" |
See [issue #3060](https://github.com/prometheus/prometheus/issues/3060) for more If you want Prometheus v3 to behave like v2 did, you will have to change your
details. regular expressions by replacing all `.` patterns with `[^\n]`, e.g.
`foo[^\n]*`.
- Lookback and range selectors are left open and right closed (previously left
closed and right closed). This change affects queries when the evaluation time
perfectly aligns with the sample timestamps. For example assume querying a
timeseries with even spaced samples exactly 1 minute apart. Before Prometheus
3.x, range query with `5m` will mostly return 5 samples. But if the query
evaluation aligns perfectly with a scrape, it would return 6 samples. In
Prometheus 3.x queries like this will always return 5 samples.
This change has likely few effects for everyday use, except for some sub query
use cases.
Query front-ends that align queries usually align sub-queries to multiples of
the step size. These sub queries will likely be affected.
Tests are more likely to affected. To fix those either adjust the expected
number of samples or extend to range by less then one sample interval.
- The `holt_winters` function has been renamed to `double_exponential_smoothing`
and is now guarded by the `promql-experimental-functions` feature flag.
If you want to keep using holt_winters, you have to do both of these things:
- Rename holt_winters to double_exponential_smoothing in your queries.
- Pass `--enable-feature=promql-experimental-functions` in your Prometheus
cli invocation..
## Scrape protocols
Prometheus v3 is more strict concerning the Content-Type header received when
scraping. Prometheus v2 would default to the standard Prometheus text protocol
if the target being scraped did not specify a Content-Type header or if the
header was unparsable or unrecognised. This could lead to incorrect data being
parsed in the scrape. Prometheus v3 will now fail the scrape in such cases.
If a scrape target is not providing the correct Content-Type header the
fallback protocol can be specified using the fallback_scrape_protocol
parameter. See [Prometheus scrape_config documentation.](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#scrape_config)
This is a breaking change as scrapes that may have succeeded with Prometheus v2
may now fail if this fallback protocol is not specified.
## Miscellaneous ## Miscellaneous
### Prometheus non-root user ### TSDB format and downgrade
The TSDB format has been changed in Prometheus v2.55 in preparation for changes
to the index format. Consequently a Prometheus v3 tsdb can only be read by a
Prometheus v2.55 or newer.
Before upgrading to Prometheus v3 please upgrade to v2.55 first and confirm
Prometheus works as expected. Only then continue with the upgrade to v3.
The Prometheus Docker image is now built to [run Prometheus ### TSDB Storage contract
as a non-root user](https://github.com/prometheus/prometheus/pull/2859). If you TSDB compatible storage is now expected to return results matching the specified
want the Prometheus UI/API to listen on a low port number (say, port 80), you'll selectors. This might impact some third party implementations, most likely
need to override it. For Kubernetes, you would use the following YAML: implementing `remote_read`.
This contract is not explicitly enforced, but can cause undefined behavior.
### UTF-8 names
Prometheus v3 supports UTF-8 in metric and label names. This means metric and
label names can change after upgrading according to what is exposed by
endpoints. Furthermore, metric and label names that would have previously been
flagged as invalid no longer will be.
Users wishing to preserve the original validation behavior can update their
prometheus yaml configuration to specify the legacy validation scheme:
```
global:
metric_name_validation_scheme: legacy
```
Or on a per-scrape basis:
```
scrape_configs:
- job_name: job1
metric_name_validation_scheme: utf8
- job_name: job2
metric_name_validation_scheme: legacy
```
### Log message format
Prometheus v3 has adopted `log/slog` over the previous `go-kit/log`. This
results in a change of log message format. An example of the old log format is:
```
ts=2024-10-23T22:01:06.074Z caller=main.go:627 level=info msg="No time or size retention was set so using the default time retention" duration=15d
ts=2024-10-23T22:01:06.074Z caller=main.go:671 level=info msg="Starting Prometheus Server" mode=server version="(version=, branch=, revision=91d80252c3e528728b0f88d254dd720f6be07cb8-modified)"
ts=2024-10-23T22:01:06.074Z caller=main.go:676 level=info build_context="(go=go1.23.0, platform=linux/amd64, user=, date=, tags=unknown)"
ts=2024-10-23T22:01:06.074Z caller=main.go:677 level=info host_details="(Linux 5.15.0-124-generic #134-Ubuntu SMP Fri Sep 27 20:20:17 UTC 2024 x86_64 gigafips (none))"
```
a similar sequence in the new log format looks like this:
```
time=2024-10-24T00:03:07.542+02:00 level=INFO source=/home/user/go/src/github.com/prometheus/prometheus/cmd/prometheus/main.go:640 msg="No time or size retention was set so using the default time retention" duration=15d
time=2024-10-24T00:03:07.542+02:00 level=INFO source=/home/user/go/src/github.com/prometheus/prometheus/cmd/prometheus/main.go:681 msg="Starting Prometheus Server" mode=server version="(version=, branch=, revision=7c7116fea8343795cae6da42960cacd0207a2af8)"
time=2024-10-24T00:03:07.542+02:00 level=INFO source=/home/user/go/src/github.com/prometheus/prometheus/cmd/prometheus/main.go:686 msg="operational information" build_context="(go=go1.23.0, platform=linux/amd64, user=, date=, tags=unknown)" host_details="(Linux 5.15.0-124-generic #134-Ubuntu SMP Fri Sep 27 20:20:17 UTC 2024 x86_64 gigafips (none))" fd_limits="(soft=1048576, hard=1048576)" vm_limits="(soft=unlimited, hard=unlimited)"
```
### `le` and `quantile` label values
In Prometheus v3, the values of the `le` label of classic histograms and the
`quantile` label of summaries are normalized upon ingestions. In Prometheus v2
the value of these labels depended on the scrape protocol (protobuf vs text
format) in some situations. This led to label values changing based on the
scrape protocol. E.g. a metric exposed as `my_classic_hist{le="1"}` would be
ingested as `my_classic_hist{le="1"}` via the text format, but as
`my_classic_hist{le="1.0"}` via protobuf. This changed the identity of the
metric and caused problems when querying the metric.
In Prometheus v3 these label values will always be normalized to a float like
representation. I.e. the above example will always result in
`my_classic_hist{le="1.0"}` being ingested into prometheus, no matter via which
protocol. The effect of this change is that alerts, recording rules and
dashboards that directly reference label values as whole numbers such as
`le="1"` will stop working.
Ways to deal with this change either globally or on a per metric basis:
- Fix references to integer `le`, `quantile` label values, but otherwise do
nothing and accept that some queries that span the transition time will produce
inaccurate or unexpected results.
_This is the recommended solution._
- Use `metric_relabel_config` to retain the old labels when scraping targets.
This should **only** be applied to metrics that currently produce such labels.
```yaml ```yaml
apiVersion: v1 metric_relabel_configs:
kind: Pod - source_labels:
metadata: - quantile
name: security-context-demo-2 target_label: quantile
spec: regex: (\d+)\.0+
securityContext: - source_labels:
runAsUser: 0 - le
... - __name__
target_label: le
regex: (\d+)\.0+;.*_bucket
``` ```
See [Configure a Security Context for a Pod or Container](https://kubernetes.io/docs/tasks/configure-pod-container/security-context/) # Prometheus 2.0 migration guide
for more details.
If you're using Docker, then the following snippet would be used: For the Prometheus 1.8 to 2.0 please refer to the [Prometheus v2.55 documentation](https://prometheus.io/docs/prometheus/2.55/migration/).
```
docker run -p 9090:9090 prom/prometheus:latest
```
### Prometheus lifecycle
If you use the Prometheus `/-/reload` HTTP endpoint to [automatically reload your
Prometheus config when it changes](configuration/configuration.md),
these endpoints are disabled by default for security reasons in Prometheus 2.0.
To enable them, set the `--web.enable-lifecycle` flag.

View file

@ -568,7 +568,7 @@ Instant vectors are returned as result type `vector`. The corresponding
Each series could have the `"value"` key, or the `"histogram"` key, but not both. Each series could have the `"value"` key, or the `"histogram"` key, but not both.
Series are not guaranteed to be returned in any particular order unless a function Series are not guaranteed to be returned in any particular order unless a function
such as [`sort`](functions.md#sort) or [`sort_by_label`](functions.md#sort_by_label)` such as [`sort`](functions.md#sort) or [`sort_by_label`](functions.md#sort_by_label)
is used. is used.
### Scalars ### Scalars
@ -764,6 +764,8 @@ URL query parameters:
- `file[]=<string>`: only return rules with the given filepath. If the parameter is repeated, rules with any of the provided filepaths are returned. When the parameter is absent or empty, no filtering is done. - `file[]=<string>`: only return rules with the given filepath. If the parameter is repeated, rules with any of the provided filepaths are returned. When the parameter is absent or empty, no filtering is done.
- `exclude_alerts=<bool>`: only return rules, do not return active alerts. - `exclude_alerts=<bool>`: only return rules, do not return active alerts.
- `match[]=<label_selector>`: only return rules that have configured labels that satisfy the label selectors. If the parameter is repeated, rules that match any of the sets of label selectors are returned. Note that matching is on the labels in the definition of each rule, not on the values after template expansion (for alerting rules). Optional. - `match[]=<label_selector>`: only return rules that have configured labels that satisfy the label selectors. If the parameter is repeated, rules that match any of the sets of label selectors are returned. Note that matching is on the labels in the definition of each rule, not on the values after template expansion (for alerting rules). Optional.
- `group_limit=<number>`: The `group_limit` parameter allows you to specify a limit for the number of rule groups that is returned in a single response. If the total number of rule groups exceeds the specified `group_limit` value, the response will include a `groupNextToken` property. You can use the value of this `groupNextToken` property in subsequent requests in the `group_next_token` parameter to paginate over the remaining rule groups. The `groupNextToken` property will not be present in the final response, indicating that you have retrieved all the available rule groups. Please note that there are no guarantees regarding the consistency of the response if the rule groups are being modified during the pagination process.
- `group_next_token`: the pagination token that was returned in previous request when the `group_limit` property is set. The pagination token is used to iteratively paginate over a large number of rule groups. To use the `group_next_token` parameter, the `group_limit` parameter also need to be present. If a rule group that coincides with the next token is removed while you are paginating over the rule groups, a response with status code 400 will be returned.
```json ```json
$ curl http://localhost:9090/api/v1/rules $ curl http://localhost:9090/api/v1/rules
@ -903,7 +905,7 @@ curl -G http://localhost:9091/api/v1/targets/metadata \
``` ```
The following example returns metadata for all metrics for all targets with The following example returns metadata for all metrics for all targets with
label `instance="127.0.0.1:9090`. label `instance="127.0.0.1:9090"`.
```json ```json
curl -G http://localhost:9091/api/v1/targets/metadata \ curl -G http://localhost:9091/api/v1/targets/metadata \
@ -1188,9 +1190,11 @@ The following endpoint returns various cardinality statistics about the Promethe
GET /api/v1/status/tsdb GET /api/v1/status/tsdb
``` ```
URL query parameters: URL query parameters:
- `limit=<number>`: Limit the number of returned items to a given number for each set of statistics. By default, 10 items are returned. - `limit=<number>`: Limit the number of returned items to a given number for each set of statistics. By default, 10 items are returned.
The `data` section of the query result consists of The `data` section of the query result consists of:
- **headStats**: This provides the following data about the head block of the TSDB: - **headStats**: This provides the following data about the head block of the TSDB:
- **numSeries**: The number of series. - **numSeries**: The number of series.
- **chunkCount**: The number of chunks. - **chunkCount**: The number of chunks.
@ -1266,13 +1270,13 @@ The following endpoint returns information about the WAL replay:
GET /api/v1/status/walreplay GET /api/v1/status/walreplay
``` ```
**read**: The number of segments replayed so far. - **read**: The number of segments replayed so far.
**total**: The total number segments needed to be replayed. - **total**: The total number segments needed to be replayed.
**progress**: The progress of the replay (0 - 100%). - **progress**: The progress of the replay (0 - 100%).
**state**: The state of the replay. Possible states: - **state**: The state of the replay. Possible states:
- **waiting**: Waiting for the replay to start. - **waiting**: Waiting for the replay to start.
- **in progress**: The replay is in progress. - **in progress**: The replay is in progress.
- **done**: The replay has finished. - **done**: The replay has finished.
```json ```json
$ curl http://localhost:9090/api/v1/status/walreplay $ curl http://localhost:9090/api/v1/status/walreplay

View file

@ -464,6 +464,97 @@ by the number of seconds under the specified time range window, and should be
used primarily for human readability. Use `rate` in recording rules so that used primarily for human readability. Use `rate` in recording rules so that
increases are tracked consistently on a per-second basis. increases are tracked consistently on a per-second basis.
## `info()` (experimental)
_The `info` function is an experiment to improve UX
around including labels from [info metrics](https://grafana.com/blog/2021/08/04/how-to-use-promql-joins-for-more-effective-queries-of-prometheus-metrics-at-scale/#info-metrics).
The behavior of this function may change in future versions of Prometheus,
including its removal from PromQL. `info` has to be enabled via the
[feature flag](../feature_flags.md#experimental-promql-functions) `--enable-feature=promql-experimental-functions`._
`info(v instant-vector, [data-label-selector instant-vector])` finds, for each time
series in `v`, all info series with matching _identifying_ labels (more on
this later), and adds the union of their _data_ (i.e., non-identifying) labels
to the time series. The second argument `data-label-selector` is optional.
It is not a real instant vector, but uses a subset of its syntax.
It must start and end with curly braces (`{ ... }`) and may only contain label matchers.
The label matchers are used to constrain which info series to consider
and which data labels to add to `v`.
Identifying labels of an info series are the subset of labels that uniquely
identify the info series. The remaining labels are considered
_data labels_ (also called non-identifying). (Note that Prometheus's concept
of time series identity always includes _all_ the labels. For the sake of the `info`
function, we “logically” define info series identity in a different way than
in the conventional Prometheus view.) The identifying labels of an info series
are used to join it to regular (non-info) series, i.e. those series that have
the same labels as the identifying labels of the info series. The data labels, which are
the ones added to the regular series by the `info` function, effectively encode
metadata key value pairs. (This implies that a change in the data labels
in the conventional Prometheus view constitutes the end of one info series and
the beginning of a new info series, while the “logical” view of the `info` function is
that the same info series continues to exist, just with different “data”.)
The conventional approach of adding data labels is sometimes called a “join query”,
as illustrated by the following example:
```
rate(http_server_request_duration_seconds_count[2m])
* on (job, instance) group_left (k8s_cluster_name)
target_info
```
The core of the query is the expression `rate(http_server_request_duration_seconds_count[2m])`.
But to add data labels from an info metric, the user has to use elaborate
(and not very obvious) syntax to specify which info metric to use (`target_info`), what the
identifying labels are (`on (job, instance)`), and which data labels to add
(`group_left (k8s_cluster_name)`).
This query is not only verbose and hard to write, it might also run into an “identity crisis”:
If any of the data labels of `target_info` changes, Prometheus sees that as a change of series
(as alluded to above, Prometheus just has no native concept of non-identifying labels).
If the old `target_info` series is not properly marked as stale (which can happen with certain ingestion paths),
the query above will fail for up to 5m (the lookback delta) because it will find a conflicting
match with both the old and the new version of `target_info`.
The `info` function not only resolves this conflict in favor of the newer series, it also simplifies the syntax
because it knows about the available info series and what their identifying labels are. The example query
looks like this with the `info` function:
```
info(
rate(http_server_request_duration_seconds_count[2m]),
{k8s_cluster_name=~".+"}
)
```
The common case of adding _all_ data labels can be achieved by
omitting the 2nd argument of the `info` function entirely, simplifying
the example even more:
```
info(rate(http_server_request_duration_seconds_count[2m]))
```
While `info` normally automatically finds all matching info series, it's possible to
restrict them by providing a `__name__` label matcher, e.g.
`{__name__="target_info"}`.
### Limitations
In its current iteration, `info` defaults to considering only info series with
the name `target_info`. It also assumes that the identifying info series labels are
`instance` and `job`. `info` does support other info series names however, through
`__name__` label matchers. E.g., one can explicitly say to consider both
`target_info` and `build_info` as follows:
`{__name__=~"(target|build)_info"}`. However, the identifying labels always
have to be `instance` and `job`.
These limitations are partially defeating the purpose of the `info` function.
At the current stage, this is an experiment to find out how useful the approach
turns out to be in practice. A final version of the `info` function will indeed
consider all matching info series and with their appropriate identifying labels.
## `irate()` ## `irate()`
`irate(v range-vector)` calculates the per-second instant rate of increase of `irate(v range-vector)` calculates the per-second instant rate of increase of

View file

@ -9,7 +9,7 @@ Prometheus promises API stability within a major version, and strives to avoid
breaking changes for key features. Some features, which are cosmetic, still breaking changes for key features. Some features, which are cosmetic, still
under development, or depend on 3rd party services, are not covered by this. under development, or depend on 3rd party services, are not covered by this.
Things considered stable for 2.x: Things considered stable for 3.x:
* The query language and data model * The query language and data model
* Alerting and recording rules * Alerting and recording rules
@ -18,21 +18,25 @@ Things considered stable for 2.x:
* Configuration file format (minus the service discovery remote read/write, see below) * Configuration file format (minus the service discovery remote read/write, see below)
* Rule/alert file format * Rule/alert file format
* Console template syntax and semantics * Console template syntax and semantics
* Remote write sending, per the [1.0 specification](https://prometheus.io/docs/concepts/remote_write_spec/). * Remote write sending, per the [1.0 specification](https://prometheus.io/docs/concepts/remote_write_spec/) and receiving
* Agent mode
* OTLP receiver endpoint
Things considered unstable for 2.x: Things considered unstable for 3.x:
* Any feature listed as experimental or subject to change, including: * Any feature listed as experimental or subject to change, including:
* The [`holt_winters` PromQL function](https://github.com/prometheus/prometheus/issues/2458) * The [`double_exponential_smoothing` PromQL function](https://github.com/prometheus/prometheus/issues/2458)
* Remote write receiving, remote read and the remote read endpoint * Remote read and the remote read endpoint
* Server-side HTTPS and basic authentication * Server-side HTTPS and basic authentication
* Service discovery integrations, with the exception of `static_configs` and `file_sd_configs` * Service discovery integrations, with the exception of `static_configs`, `file_sd_configs` and `http_sd_config`
* Go APIs of packages that are part of the server * Go APIs of packages that are part of the server
* HTML generated by the web UI * HTML generated by the web UI
* The metrics in the /metrics endpoint of Prometheus itself * The metrics in the /metrics endpoint of Prometheus itself
* Exact on-disk format. Potential changes however, will be forward compatible and transparently handled by Prometheus * Exact on-disk format. Potential changes however, will be forward compatible and transparently handled by Prometheus
* The format of the logs * The format of the logs
Prometheus 2.x stability guarantees can be found [in the 2.x documentation](https://prometheus.io/docs/prometheus/2.55/stability/).
As long as you are not using any features marked as experimental/unstable, an As long as you are not using any features marked as experimental/unstable, an
upgrade within a major version can usually be performed without any operational upgrade within a major version can usually be performed without any operational
adjustments and very little risk that anything will break. Any breaking changes adjustments and very little risk that anything will break. Any breaking changes

View file

@ -0,0 +1,31 @@
# my global config
global:
evaluation_interval: 15s # Evaluate rules every 15 seconds. The default is every 1 minute.
otlp:
# Recommended attributes to be promoted to labels.
promote_resource_attributes:
- service.instance.id
- service.name
- service.namespace
- cloud.availability_zone
- cloud.region
- container.name
- deployment.environment.name
- k8s.cluster.name
- k8s.container.name
- k8s.cronjob.name
- k8s.daemonset.name
- k8s.deployment.name
- k8s.job.name
- k8s.namespace.name
- k8s.pod.name
- k8s.replicaset.name
- k8s.statefulset.name
# Ingest OTLP data keeping UTF-8 characters in metric/label names.
translation_strategy: NoUTF8EscapingWithSuffixes
storage:
# OTLP is a push-based protocol, Out of order samples is a common scenario.
tsdb:
out_of_order_time_window: 30m

View file

@ -6,9 +6,9 @@ require (
github.com/alecthomas/kingpin/v2 v2.4.0 github.com/alecthomas/kingpin/v2 v2.4.0
github.com/gogo/protobuf v1.3.2 github.com/gogo/protobuf v1.3.2
github.com/golang/snappy v0.0.4 github.com/golang/snappy v0.0.4
github.com/influxdata/influxdb v1.11.6 github.com/influxdata/influxdb v1.11.7
github.com/prometheus/client_golang v1.20.4 github.com/prometheus/client_golang v1.20.5
github.com/prometheus/common v0.60.0 github.com/prometheus/common v0.60.1
github.com/prometheus/prometheus v0.53.1 github.com/prometheus/prometheus v0.53.1
github.com/stretchr/testify v1.9.0 github.com/stretchr/testify v1.9.0
) )

View file

@ -166,8 +166,8 @@ github.com/hetznercloud/hcloud-go/v2 v2.9.0 h1:s0N6R7Zoi2DPfMtUF5o9VeUBzTtHVY6MI
github.com/hetznercloud/hcloud-go/v2 v2.9.0/go.mod h1:qtW/TuU7Bs16ibXl/ktJarWqU2LwHr7eGlwoilHxtgg= github.com/hetznercloud/hcloud-go/v2 v2.9.0/go.mod h1:qtW/TuU7Bs16ibXl/ktJarWqU2LwHr7eGlwoilHxtgg=
github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4= github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4=
github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY= github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY=
github.com/influxdata/influxdb v1.11.6 h1:zS5MRY+RQ5/XFTer5R8xQRnY17JYSbacvO6OaP164wU= github.com/influxdata/influxdb v1.11.7 h1:C31A+S9YfjTCOuAv9Qs0ZdQufslOZZBtejjxiV8QNQw=
github.com/influxdata/influxdb v1.11.6/go.mod h1:F10NoQb9qa04lME3pTPWQrYt4JZ/ke1Eei+1ttgHHrg= github.com/influxdata/influxdb v1.11.7/go.mod h1:zRTAuk/Ie/V1LGxJUv8jfDmfv+ypz22lxfhc1MxC3rI=
github.com/ionos-cloud/sdk-go/v6 v6.1.11 h1:J/uRN4UWO3wCyGOeDdMKv8LWRzKu6UIkLEaes38Kzh8= github.com/ionos-cloud/sdk-go/v6 v6.1.11 h1:J/uRN4UWO3wCyGOeDdMKv8LWRzKu6UIkLEaes38Kzh8=
github.com/ionos-cloud/sdk-go/v6 v6.1.11/go.mod h1:EzEgRIDxBELvfoa/uBN0kOQaqovLjUWEB7iW4/Q+t4k= github.com/ionos-cloud/sdk-go/v6 v6.1.11/go.mod h1:EzEgRIDxBELvfoa/uBN0kOQaqovLjUWEB7iW4/Q+t4k=
github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg=
@ -253,8 +253,8 @@ github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXP
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
github.com/prometheus/client_golang v1.20.4 h1:Tgh3Yr67PaOv/uTqloMsCEdeuFTatm5zIq5+qNN23vI= github.com/prometheus/client_golang v1.20.5 h1:cxppBPuYhUnsO6yo/aoRol4L7q7UFfdm+bR9r+8l63Y=
github.com/prometheus/client_golang v1.20.4/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE= github.com/prometheus/client_golang v1.20.5/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE=
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
@ -264,8 +264,8 @@ github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y8
github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc=
github.com/prometheus/common v0.29.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= github.com/prometheus/common v0.29.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
github.com/prometheus/common v0.60.0 h1:+V9PAREWNvJMAuJ1x1BaWl9dewMW4YrHZQbx0sJNllA= github.com/prometheus/common v0.60.1 h1:FUas6GcOw66yB/73KC+BOZoFJmbo/1pojoILArPAaSc=
github.com/prometheus/common v0.60.0/go.mod h1:h0LYf1R1deLSKtD4Vdg8gy4RuOvENW2J/h19V5NADQw= github.com/prometheus/common v0.60.1/go.mod h1:h0LYf1R1deLSKtD4Vdg8gy4RuOvENW2J/h19V5NADQw=
github.com/prometheus/common/sigv4 v0.1.0 h1:qoVebwtwwEhS85Czm2dSROY5fTo2PAPEVdDeppTwGX4= github.com/prometheus/common/sigv4 v0.1.0 h1:qoVebwtwwEhS85Czm2dSROY5fTo2PAPEVdDeppTwGX4=
github.com/prometheus/common/sigv4 v0.1.0/go.mod h1:2Jkxxk9yYvCkE5G1sQT7GuEXm57JrvHu9k5YwTjsNtI= github.com/prometheus/common/sigv4 v0.1.0/go.mod h1:2Jkxxk9yYvCkE5G1sQT7GuEXm57JrvHu9k5YwTjsNtI=
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=

59
go.mod
View file

@ -5,8 +5,8 @@ go 1.22.0
toolchain go1.23.0 toolchain go1.23.0
require ( require (
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.14.0 github.com/Azure/azure-sdk-for-go/sdk/azcore v1.16.0
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.7.0 github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.8.0
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.7.0 github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.7.0
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4 v4.3.0 github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4 v4.3.0
github.com/Code-Hex/go-generics-cache v1.5.1 github.com/Code-Hex/go-generics-cache v1.5.1
@ -17,10 +17,10 @@ require (
github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3 github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3
github.com/cespare/xxhash/v2 v2.3.0 github.com/cespare/xxhash/v2 v2.3.0
github.com/dennwc/varint v1.0.0 github.com/dennwc/varint v1.0.0
github.com/digitalocean/godo v1.126.0 github.com/digitalocean/godo v1.128.0
github.com/docker/docker v27.3.1+incompatible github.com/docker/docker v27.3.1+incompatible
github.com/edsrzf/mmap-go v1.1.0 github.com/edsrzf/mmap-go v1.2.0
github.com/envoyproxy/go-control-plane v0.13.0 github.com/envoyproxy/go-control-plane v0.13.1
github.com/envoyproxy/protoc-gen-validate v1.1.0 github.com/envoyproxy/protoc-gen-validate v1.1.0
github.com/facette/natsort v0.0.0-20181210072756-2cd4dd1e2dcb github.com/facette/natsort v0.0.0-20181210072756-2cd4dd1e2dcb
github.com/fsnotify/fsnotify v1.7.0 github.com/fsnotify/fsnotify v1.7.0
@ -34,14 +34,14 @@ require (
github.com/gophercloud/gophercloud v1.14.1 github.com/gophercloud/gophercloud v1.14.1
github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc
github.com/grpc-ecosystem/grpc-gateway v1.16.0 github.com/grpc-ecosystem/grpc-gateway v1.16.0
github.com/hashicorp/consul/api v1.29.4 github.com/hashicorp/consul/api v1.30.0
github.com/hashicorp/nomad/api v0.0.0-20240717122358-3d93bd3778f3 github.com/hashicorp/nomad/api v0.0.0-20240717122358-3d93bd3778f3
github.com/hetznercloud/hcloud-go/v2 v2.13.1 github.com/hetznercloud/hcloud-go/v2 v2.15.0
github.com/ionos-cloud/sdk-go/v6 v6.2.1 github.com/ionos-cloud/sdk-go/v6 v6.2.1
github.com/json-iterator/go v1.1.12 github.com/json-iterator/go v1.1.12
github.com/klauspost/compress v1.17.10 github.com/klauspost/compress v1.17.11
github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b
github.com/linode/linodego v1.41.0 github.com/linode/linodego v1.42.0
github.com/miekg/dns v1.1.62 github.com/miekg/dns v1.1.62
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f
@ -50,25 +50,26 @@ require (
github.com/oklog/ulid v1.3.1 github.com/oklog/ulid v1.3.1
github.com/ovh/go-ovh v1.6.0 github.com/ovh/go-ovh v1.6.0
github.com/prometheus/alertmanager v0.27.0 github.com/prometheus/alertmanager v0.27.0
github.com/prometheus/client_golang v1.20.4 github.com/prometheus/client_golang v1.20.5
github.com/prometheus/client_model v0.6.1 github.com/prometheus/client_model v0.6.1
github.com/prometheus/common v0.60.0 github.com/prometheus/common v0.60.1
github.com/prometheus/common/assets v0.2.0 github.com/prometheus/common/assets v0.2.0
github.com/prometheus/common/sigv4 v0.1.0 github.com/prometheus/common/sigv4 v0.1.0
github.com/prometheus/exporter-toolkit v0.13.0 github.com/prometheus/exporter-toolkit v0.13.1
github.com/scaleway/scaleway-sdk-go v1.0.0-beta.30 github.com/scaleway/scaleway-sdk-go v1.0.0-beta.30
github.com/shurcooL/httpfs v0.0.0-20230704072500-f1e31cf0ba5c github.com/shurcooL/httpfs v0.0.0-20230704072500-f1e31cf0ba5c
github.com/stretchr/testify v1.9.0 github.com/stretchr/testify v1.9.0
github.com/vultr/govultr/v2 v2.17.2 github.com/vultr/govultr/v2 v2.17.2
go.opentelemetry.io/collector/pdata v1.16.0 go.opentelemetry.io/collector/pdata v1.18.0
go.opentelemetry.io/collector/semconv v0.110.0 go.opentelemetry.io/collector/semconv v0.112.0
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.55.0 go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.56.0
go.opentelemetry.io/otel v1.30.0 go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.56.0
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.30.0 go.opentelemetry.io/otel v1.31.0
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.30.0 go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.31.0
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.30.0 go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.31.0
go.opentelemetry.io/otel/sdk v1.30.0 go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.31.0
go.opentelemetry.io/otel/trace v1.30.0 go.opentelemetry.io/otel/sdk v1.31.0
go.opentelemetry.io/otel/trace v1.31.0
go.uber.org/atomic v1.11.0 go.uber.org/atomic v1.11.0
go.uber.org/automaxprocs v1.6.0 go.uber.org/automaxprocs v1.6.0
go.uber.org/goleak v1.3.0 go.uber.org/goleak v1.3.0
@ -78,10 +79,10 @@ require (
golang.org/x/sys v0.26.0 golang.org/x/sys v0.26.0
golang.org/x/text v0.19.0 golang.org/x/text v0.19.0
golang.org/x/tools v0.26.0 golang.org/x/tools v0.26.0
google.golang.org/api v0.199.0 google.golang.org/api v0.204.0
google.golang.org/genproto/googleapis/api v0.0.0-20240903143218-8af14fe29dc1 google.golang.org/genproto/googleapis/api v0.0.0-20241015192408-796eee8c2d53
google.golang.org/grpc v1.67.1 google.golang.org/grpc v1.67.1
google.golang.org/protobuf v1.34.2 google.golang.org/protobuf v1.35.1
gopkg.in/yaml.v2 v2.4.0 gopkg.in/yaml.v2 v2.4.0
gopkg.in/yaml.v3 v3.0.1 gopkg.in/yaml.v3 v3.0.1
k8s.io/api v0.31.1 k8s.io/api v0.31.1
@ -92,8 +93,8 @@ require (
) )
require ( require (
cloud.google.com/go/auth v0.9.5 // indirect cloud.google.com/go/auth v0.10.0 // indirect
cloud.google.com/go/auth/oauth2adapt v0.2.4 // indirect cloud.google.com/go/auth/oauth2adapt v0.2.5 // indirect
cloud.google.com/go/compute/metadata v0.5.2 // indirect cloud.google.com/go/compute/metadata v0.5.2 // indirect
github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 // indirect github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 // indirect
github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 // indirect github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 // indirect
@ -184,15 +185,15 @@ require (
github.com/xhit/go-str2duration/v2 v2.1.0 // indirect github.com/xhit/go-str2duration/v2 v2.1.0 // indirect
go.mongodb.org/mongo-driver v1.14.0 // indirect go.mongodb.org/mongo-driver v1.14.0 // indirect
go.opencensus.io v0.24.0 // indirect go.opencensus.io v0.24.0 // indirect
go.opentelemetry.io/otel/metric v1.30.0 // indirect go.opentelemetry.io/otel/metric v1.31.0 // indirect
go.opentelemetry.io/proto/otlp v1.3.1 // indirect go.opentelemetry.io/proto/otlp v1.3.1 // indirect
golang.org/x/crypto v0.28.0 // indirect golang.org/x/crypto v0.28.0 // indirect
golang.org/x/exp v0.0.0-20240119083558-1b970713d09a // indirect golang.org/x/exp v0.0.0-20240119083558-1b970713d09a // indirect
golang.org/x/mod v0.21.0 // indirect golang.org/x/mod v0.21.0 // indirect
golang.org/x/net v0.30.0 // indirect golang.org/x/net v0.30.0 // indirect
golang.org/x/term v0.25.0 // indirect golang.org/x/term v0.25.0 // indirect
golang.org/x/time v0.6.0 // indirect golang.org/x/time v0.7.0 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20241021214115-324edc3d5d38 // indirect
gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect
gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/inf.v0 v0.9.1 // indirect
gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/ini.v1 v1.67.0 // indirect

134
go.sum
View file

@ -12,10 +12,10 @@ cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bP
cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk=
cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs=
cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY=
cloud.google.com/go/auth v0.9.5 h1:4CTn43Eynw40aFVr3GpPqsQponx2jv0BQpjvajsbbzw= cloud.google.com/go/auth v0.10.0 h1:tWlkvFAh+wwTOzXIjrwM64karR1iTBZ/GRr0S/DULYo=
cloud.google.com/go/auth v0.9.5/go.mod h1:Xo0n7n66eHyOWWCnitop6870Ilwo3PiZyodVkkH1xWM= cloud.google.com/go/auth v0.10.0/go.mod h1:xxA5AqpDrvS+Gkmo9RqrGGRh6WSNKKOXhY3zNOr38tI=
cloud.google.com/go/auth/oauth2adapt v0.2.4 h1:0GWE/FUsXhf6C+jAkWgYm7X9tK8cuEIfy19DBn6B6bY= cloud.google.com/go/auth/oauth2adapt v0.2.5 h1:2p29+dePqsCHPP1bqDJcKj4qxRyYCcbzKpFyKGt3MTk=
cloud.google.com/go/auth/oauth2adapt v0.2.4/go.mod h1:jC/jOpwFP6JBxhB3P5Rr0a9HLMC/Pe3eaL4NmdvqPtc= cloud.google.com/go/auth/oauth2adapt v0.2.5/go.mod h1:AlmsELtlEBnaNTL7jCj8VQFLy6mbZv0s4Q7NGBeQ5E8=
cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE=
cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc=
@ -36,10 +36,12 @@ cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohl
cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs=
cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0=
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.14.0 h1:nyQWyZvwGTvunIMxi1Y9uXkcyr+I7TeNrr/foo4Kpk8= github.com/Azure/azure-sdk-for-go/sdk/azcore v1.16.0 h1:JZg6HRh6W6U4OLl6lk7BZ7BLisIzM9dG1R50zUk9C/M=
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.14.0/go.mod h1:l38EPgmsp71HHLq9j7De57JcKOWPyhrsW1Awm1JS6K0= github.com/Azure/azure-sdk-for-go/sdk/azcore v1.16.0/go.mod h1:YL1xnZ6QejvQHWJrX/AvhFl4WW4rqHVoKspWNVwFk0M=
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.7.0 h1:tfLQ34V6F7tVSwoTf/4lH5sE0o6eCJuNDTmH09nDpbc= github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.8.0 h1:B/dfvscEQtew9dVuoxqxrUKKv8Ih2f55PydknDamU+g=
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.7.0/go.mod h1:9kIvujWAA58nmPmWB1m23fyWic1kYZMxD9CxaWn4Qpg= github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.8.0/go.mod h1:fiPSssYvltE08HJchL04dOy+RD4hgrjph0cwGGMntdI=
github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache v0.3.0 h1:+m0M/LFxN43KvULkDNfdXOgrjtg6UYJPFBJyuEcRCAw=
github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache v0.3.0/go.mod h1:PwOyop78lveYMRs6oCxjiVyBdyCgIYH6XHIVZO9/SFQ=
github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 h1:ywEEhmNahHBihViHepv3xPBn1663uRv2t2q/ESv9seY= github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 h1:ywEEhmNahHBihViHepv3xPBn1663uRv2t2q/ESv9seY=
github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0/go.mod h1:iZDifYGJTIgIIkYRNWPENUnqx6bJ2xnSDFI2tjwZNuY= github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0/go.mod h1:iZDifYGJTIgIIkYRNWPENUnqx6bJ2xnSDFI2tjwZNuY=
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.7.0 h1:LkHbJbgF3YyvC53aqYGR+wWQDn2Rdp9AQdGndf9QvY4= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.7.0 h1:LkHbJbgF3YyvC53aqYGR+wWQDn2Rdp9AQdGndf9QvY4=
@ -52,6 +54,8 @@ github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources v1.
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources v1.1.1/go.mod h1:c/wcGeGx5FUPbM/JltUYHZcKmigwyVLJlDq+4HdtXaw= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources v1.1.1/go.mod h1:c/wcGeGx5FUPbM/JltUYHZcKmigwyVLJlDq+4HdtXaw=
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8= github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8=
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E=
github.com/AzureAD/microsoft-authentication-extensions-for-go/cache v0.1.1 h1:WJTmL004Abzc5wDB5VtZG2PJk5ndYDgVacGqfirKxjM=
github.com/AzureAD/microsoft-authentication-extensions-for-go/cache v0.1.1/go.mod h1:tCcJZ0uHAmvjsVYzEFivsRTN00oz5BEsRgQHu5JZ9WE=
github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 h1:XHOnouVk1mxXfQidrMEnLlPk9UMeRtyBTnEFtxkV0kU= github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 h1:XHOnouVk1mxXfQidrMEnLlPk9UMeRtyBTnEFtxkV0kU=
github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI= github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
@ -121,8 +125,10 @@ github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/dennwc/varint v1.0.0 h1:kGNFFSSw8ToIy3obO/kKr8U9GZYUAxQEVuix4zfDWzE= github.com/dennwc/varint v1.0.0 h1:kGNFFSSw8ToIy3obO/kKr8U9GZYUAxQEVuix4zfDWzE=
github.com/dennwc/varint v1.0.0/go.mod h1:hnItb35rvZvJrbTALZtY/iQfDs48JKRG1RPpgziApxA= github.com/dennwc/varint v1.0.0/go.mod h1:hnItb35rvZvJrbTALZtY/iQfDs48JKRG1RPpgziApxA=
github.com/digitalocean/godo v1.126.0 h1:+Znh7VMQj/E8ArbjWnc7OKGjWfzC+I8OCSRp7r1MdD8= github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78=
github.com/digitalocean/godo v1.126.0/go.mod h1:PU8JB6I1XYkQIdHFop8lLAY9ojp6M0XcU0TWaQSxbrc= github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc=
github.com/digitalocean/godo v1.128.0 h1:cGn/ibMSRZ9+8etbzMv2MnnCEPTTGlEnx3HHTPwdk1U=
github.com/digitalocean/godo v1.128.0/go.mod h1:PU8JB6I1XYkQIdHFop8lLAY9ojp6M0XcU0TWaQSxbrc=
github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0= github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0=
github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E=
github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI= github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI=
@ -133,15 +139,15 @@ github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKoh
github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec=
github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4=
github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
github.com/edsrzf/mmap-go v1.1.0 h1:6EUwBLQ/Mcr1EYLE4Tn1VdW1A4ckqCQWZBw8Hr0kjpQ= github.com/edsrzf/mmap-go v1.2.0 h1:hXLYlkbaPzt1SaQk+anYwKSRNhufIDCchSPkUD6dD84=
github.com/edsrzf/mmap-go v1.1.0/go.mod h1:19H/e8pUPLicwkyNgOykDXkJ9F0MHE+Z52B8EIth78Q= github.com/edsrzf/mmap-go v1.2.0/go.mod h1:19H/e8pUPLicwkyNgOykDXkJ9F0MHE+Z52B8EIth78Q=
github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g= github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g=
github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
github.com/envoyproxy/go-control-plane v0.13.0 h1:HzkeUz1Knt+3bK+8LG1bxOO/jzWZmdxpwC51i202les= github.com/envoyproxy/go-control-plane v0.13.1 h1:vPfJZCkob6yTMEgS+0TwfTUfbHjfy/6vOJ8hUWX/uXE=
github.com/envoyproxy/go-control-plane v0.13.0/go.mod h1:GRaKG3dwvFoTg4nj7aXdZnvMg4d7nvT/wl9WgVXn3Q8= github.com/envoyproxy/go-control-plane v0.13.1/go.mod h1:X45hY0mufo6Fd0KW3rqsGvQMw58jvjymeCzBU3mWyHw=
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
github.com/envoyproxy/protoc-gen-validate v1.1.0 h1:tntQDh69XqOCOZsDz0lVJQez/2L6Uu2PdjCQwWCJ3bM= github.com/envoyproxy/protoc-gen-validate v1.1.0 h1:tntQDh69XqOCOZsDz0lVJQez/2L6Uu2PdjCQwWCJ3bM=
github.com/envoyproxy/protoc-gen-validate v1.1.0/go.mod h1:sXRDRVmzEbkM7CVcM06s9shE/m23dg3wzjl0UWqJ2q4= github.com/envoyproxy/protoc-gen-validate v1.1.0/go.mod h1:sXRDRVmzEbkM7CVcM06s9shE/m23dg3wzjl0UWqJ2q4=
@ -300,10 +306,8 @@ github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4
github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0 h1:asbCHRVmodnJTuQ3qamDwqVOIjwqUPTYmYuemVOx+Ys= github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0 h1:asbCHRVmodnJTuQ3qamDwqVOIjwqUPTYmYuemVOx+Ys=
github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0/go.mod h1:ggCgvZ2r7uOoQjOyu2Y1NhHmEPPzzuhWgcza5M1Ji1I= github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0/go.mod h1:ggCgvZ2r7uOoQjOyu2Y1NhHmEPPzzuhWgcza5M1Ji1I=
github.com/hashicorp/consul/api v1.29.4 h1:P6slzxDLBOxUSj3fWo2o65VuKtbtOXFi7TSSgtXutuE= github.com/hashicorp/consul/api v1.30.0 h1:ArHVMMILb1nQv8vZSGIwwQd2gtc+oSQZ6CalyiyH2XQ=
github.com/hashicorp/consul/api v1.29.4/go.mod h1:HUlfw+l2Zy68ceJavv2zAyArl2fqhGWnMycyt56sBgg= github.com/hashicorp/consul/api v1.30.0/go.mod h1:B2uGchvaXVW2JhFoS8nqTxMD5PBykr4ebY4JWHTTeLM=
github.com/hashicorp/consul/proto-public v0.6.2 h1:+DA/3g/IiKlJZb88NBn0ZgXrxJp2NlvCZdEyl+qxvL0=
github.com/hashicorp/consul/proto-public v0.6.2/go.mod h1:cXXbOg74KBNGajC+o8RlA502Esf0R9prcoJgiOX/2Tg=
github.com/hashicorp/consul/sdk v0.16.1 h1:V8TxTnImoPD5cj0U9Spl0TUxcytjcbbJeADFF07KdHg= github.com/hashicorp/consul/sdk v0.16.1 h1:V8TxTnImoPD5cj0U9Spl0TUxcytjcbbJeADFF07KdHg=
github.com/hashicorp/consul/sdk v0.16.1/go.mod h1:fSXvwxB2hmh1FMZCNl6PwX0Q/1wdWtHJcZ7Ea5tns0s= github.com/hashicorp/consul/sdk v0.16.1/go.mod h1:fSXvwxB2hmh1FMZCNl6PwX0Q/1wdWtHJcZ7Ea5tns0s=
github.com/hashicorp/cronexpr v1.1.2 h1:wG/ZYIKT+RT3QkOdgYc+xsKWVRgnxJ1OJtjjy84fJ9A= github.com/hashicorp/cronexpr v1.1.2 h1:wG/ZYIKT+RT3QkOdgYc+xsKWVRgnxJ1OJtjjy84fJ9A=
@ -353,8 +357,8 @@ github.com/hashicorp/nomad/api v0.0.0-20240717122358-3d93bd3778f3 h1:fgVfQ4AC1av
github.com/hashicorp/nomad/api v0.0.0-20240717122358-3d93bd3778f3/go.mod h1:svtxn6QnrQ69P23VvIWMR34tg3vmwLz4UdUzm1dSCgE= github.com/hashicorp/nomad/api v0.0.0-20240717122358-3d93bd3778f3/go.mod h1:svtxn6QnrQ69P23VvIWMR34tg3vmwLz4UdUzm1dSCgE=
github.com/hashicorp/serf v0.10.1 h1:Z1H2J60yRKvfDYAOZLd2MU0ND4AH/WDz7xYHDWQsIPY= github.com/hashicorp/serf v0.10.1 h1:Z1H2J60yRKvfDYAOZLd2MU0ND4AH/WDz7xYHDWQsIPY=
github.com/hashicorp/serf v0.10.1/go.mod h1:yL2t6BqATOLGc5HF7qbFkTfXoPIY0WZdWHfEvMqbG+4= github.com/hashicorp/serf v0.10.1/go.mod h1:yL2t6BqATOLGc5HF7qbFkTfXoPIY0WZdWHfEvMqbG+4=
github.com/hetznercloud/hcloud-go/v2 v2.13.1 h1:jq0GP4QaYE5d8xR/Zw17s9qoaESRJMXfGmtD1a/qckQ= github.com/hetznercloud/hcloud-go/v2 v2.15.0 h1:6mpMJ/RuX1woZj+MCJdyKNEX9129KDkEIDeeyfr4GD4=
github.com/hetznercloud/hcloud-go/v2 v2.13.1/go.mod h1:dhix40Br3fDiBhwaSG/zgaYOFFddpfBm/6R1Zz0IiF0= github.com/hetznercloud/hcloud-go/v2 v2.15.0/go.mod h1:h8sHav+27Xa+48cVMAvAUMELov5h298Ilg2vflyTHgg=
github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4= github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4=
github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY= github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY=
@ -381,10 +385,12 @@ github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/X
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
github.com/julienschmidt/httprouter v1.3.0 h1:U0609e9tgbseu3rBINet9P48AI/D3oJs4dN7jwJOQ1U= github.com/julienschmidt/httprouter v1.3.0 h1:U0609e9tgbseu3rBINet9P48AI/D3oJs4dN7jwJOQ1U=
github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM=
github.com/keybase/go-keychain v0.0.0-20231219164618-57a3676c3af6 h1:IsMZxCuZqKuao2vNdfD82fjjgPLfyHLpR41Z88viRWs=
github.com/keybase/go-keychain v0.0.0-20231219164618-57a3676c3af6/go.mod h1:3VeWNIJaW+O5xpRQbPp0Ybqu1vJd/pm7s2F473HRrkw=
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/klauspost/compress v1.17.10 h1:oXAz+Vh0PMUvJczoi+flxpnBEPxoER1IaAnU/NMPtT0= github.com/klauspost/compress v1.17.11 h1:In6xLpyWOi1+C7tXUUWv2ot1QvBjxevKAaI6IXrJmUc=
github.com/klauspost/compress v1.17.10/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0= github.com/klauspost/compress v1.17.11/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0=
github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b h1:udzkj9S/zlT5X367kqJis0QP7YMxobob6zhzq6Yre00= github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b h1:udzkj9S/zlT5X367kqJis0QP7YMxobob6zhzq6Yre00=
github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b/go.mod h1:pcaDhQK0/NJZEvtCO0qQPPropqV0sJOJ6YW7X+9kRwM= github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b/go.mod h1:pcaDhQK0/NJZEvtCO0qQPPropqV0sJOJ6YW7X+9kRwM=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
@ -399,8 +405,8 @@ github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
github.com/linode/linodego v1.41.0 h1:GcP7JIBr9iLRJ9FwAtb9/WCT1DuPJS/xUApapfdjtiY= github.com/linode/linodego v1.42.0 h1:ZSbi4MtvwrfB9Y6bknesorvvueBGGilcmh2D5dq76RM=
github.com/linode/linodego v1.41.0/go.mod h1:Ow4/XZ0yvWBzt3iAHwchvhSx30AyLintsSMvvQ2/SJY= github.com/linode/linodego v1.42.0/go.mod h1:2yzmY6pegPBDgx2HDllmt0eIk2IlzqcgK6NR0wFCFRY=
github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0=
github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
@ -500,8 +506,8 @@ github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5Fsn
github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU=
github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
github.com/prometheus/client_golang v1.20.4 h1:Tgh3Yr67PaOv/uTqloMsCEdeuFTatm5zIq5+qNN23vI= github.com/prometheus/client_golang v1.20.5 h1:cxppBPuYhUnsO6yo/aoRol4L7q7UFfdm+bR9r+8l63Y=
github.com/prometheus/client_golang v1.20.4/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE= github.com/prometheus/client_golang v1.20.5/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE=
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
@ -513,14 +519,14 @@ github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8b
github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc=
github.com/prometheus/common v0.29.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= github.com/prometheus/common v0.29.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
github.com/prometheus/common v0.60.0 h1:+V9PAREWNvJMAuJ1x1BaWl9dewMW4YrHZQbx0sJNllA= github.com/prometheus/common v0.60.1 h1:FUas6GcOw66yB/73KC+BOZoFJmbo/1pojoILArPAaSc=
github.com/prometheus/common v0.60.0/go.mod h1:h0LYf1R1deLSKtD4Vdg8gy4RuOvENW2J/h19V5NADQw= github.com/prometheus/common v0.60.1/go.mod h1:h0LYf1R1deLSKtD4Vdg8gy4RuOvENW2J/h19V5NADQw=
github.com/prometheus/common/assets v0.2.0 h1:0P5OrzoHrYBOSM1OigWL3mY8ZvV2N4zIE/5AahrSrfM= github.com/prometheus/common/assets v0.2.0 h1:0P5OrzoHrYBOSM1OigWL3mY8ZvV2N4zIE/5AahrSrfM=
github.com/prometheus/common/assets v0.2.0/go.mod h1:D17UVUE12bHbim7HzwUvtqm6gwBEaDQ0F+hIGbFbccI= github.com/prometheus/common/assets v0.2.0/go.mod h1:D17UVUE12bHbim7HzwUvtqm6gwBEaDQ0F+hIGbFbccI=
github.com/prometheus/common/sigv4 v0.1.0 h1:qoVebwtwwEhS85Czm2dSROY5fTo2PAPEVdDeppTwGX4= github.com/prometheus/common/sigv4 v0.1.0 h1:qoVebwtwwEhS85Czm2dSROY5fTo2PAPEVdDeppTwGX4=
github.com/prometheus/common/sigv4 v0.1.0/go.mod h1:2Jkxxk9yYvCkE5G1sQT7GuEXm57JrvHu9k5YwTjsNtI= github.com/prometheus/common/sigv4 v0.1.0/go.mod h1:2Jkxxk9yYvCkE5G1sQT7GuEXm57JrvHu9k5YwTjsNtI=
github.com/prometheus/exporter-toolkit v0.13.0 h1:lmA0Q+8IaXgmFRKw09RldZmZdnvu9wwcDLIXGmTPw1c= github.com/prometheus/exporter-toolkit v0.13.1 h1:Evsh0gWQo2bdOHlnz9+0Nm7/OFfIwhE2Ws4A2jIlR04=
github.com/prometheus/exporter-toolkit v0.13.0/go.mod h1:2uop99EZl80KdXhv/MxVI2181fMcwlsumFOqBecGkG0= github.com/prometheus/exporter-toolkit v0.13.1/go.mod h1:ujdv2YIOxtdFxxqtloLpbqmxd5J0Le6IITUvIRSWjj0=
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A=
@ -528,10 +534,12 @@ github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4O
github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc=
github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk=
github.com/redis/go-redis/v9 v9.6.1 h1:HHDteefn6ZkTtY5fGUE8tj8uy85AHk6zP7CpzIAM0y4=
github.com/redis/go-redis/v9 v9.6.1/go.mod h1:0C0c6ycQsdpVNQpxb1njEQIqkx5UcsM8FJCQLgE9+RA=
github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII=
github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o=
github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
github.com/scaleway/scaleway-sdk-go v1.0.0-beta.30 h1:yoKAVkEVwAqbGbR8n87rHQ1dulL25rKloGadb3vm770= github.com/scaleway/scaleway-sdk-go v1.0.0-beta.30 h1:yoKAVkEVwAqbGbR8n87rHQ1dulL25rKloGadb3vm770=
github.com/scaleway/scaleway-sdk-go v1.0.0-beta.30/go.mod h1:sH0u6fq6x4R5M7WxkoQFY/o7UaiItec0o1LinLCJNq8= github.com/scaleway/scaleway-sdk-go v1.0.0-beta.30/go.mod h1:sH0u6fq6x4R5M7WxkoQFY/o7UaiItec0o1LinLCJNq8=
@ -587,26 +595,28 @@ go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0=
go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo=
go.opentelemetry.io/collector/pdata v1.16.0 h1:g02K8jlRnmQ7TQDuXpdgVL6vIxIVqr5Gbb1qIR27rto= go.opentelemetry.io/collector/pdata v1.18.0 h1:/yg2rO2dxqDM2p6GutsMCxXN6sKlXwyIz/ZYyUPONBg=
go.opentelemetry.io/collector/pdata v1.16.0/go.mod h1:YZZJIt2ehxosYf/Y1pbvexjNWsIGNNrzzlCTO9jC1F4= go.opentelemetry.io/collector/pdata v1.18.0/go.mod h1:Ox1YVLe87cZDB/TL30i4SUz1cA5s6AM6SpFMfY61ICs=
go.opentelemetry.io/collector/semconv v0.110.0 h1:KHQnOHe3gUz0zsxe8ph9kN5OTypCFD4V+06AiBTfeNk= go.opentelemetry.io/collector/semconv v0.112.0 h1:JPQyvZhlNLVSuVI+FScONaiFygB7h7NTZceUEKIQUEc=
go.opentelemetry.io/collector/semconv v0.110.0/go.mod h1:zCJ5njhWpejR+A40kiEoeFm1xq1uzyZwMnRNX6/D82A= go.opentelemetry.io/collector/semconv v0.112.0/go.mod h1:zCJ5njhWpejR+A40kiEoeFm1xq1uzyZwMnRNX6/D82A=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.55.0 h1:ZIg3ZT/aQ7AfKqdwp7ECpOK6vHqquXXuyTjIO8ZdmPs= go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.56.0 h1:4BZHA+B1wXEQoGNHxW8mURaLhcdGwvRnmhGbm+odRbc=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.55.0/go.mod h1:DQAwmETtZV00skUwgD6+0U89g80NKsJE3DCKeLLPQMI= go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.56.0/go.mod h1:3qi2EEwMgB4xnKgPLqsDP3j9qxnHDZeHsnAxfjQqTko=
go.opentelemetry.io/otel v1.30.0 h1:F2t8sK4qf1fAmY9ua4ohFS/K+FUuOPemHUIXHtktrts= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.56.0 h1:UP6IpuHFkUgOQL9FFQFrZ+5LiwhhYRbi7VZSIx6Nj5s=
go.opentelemetry.io/otel v1.30.0/go.mod h1:tFw4Br9b7fOS+uEao81PJjVMjW/5fvNCbpsDIXqP0pc= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.56.0/go.mod h1:qxuZLtbq5QDtdeSHsS7bcf6EH6uO6jUAgk764zd3rhM=
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.30.0 h1:lsInsfvhVIfOI6qHVyysXMNDnjO9Npvl7tlDPJFBVd4= go.opentelemetry.io/otel v1.31.0 h1:NsJcKPIW0D0H3NgzPDHmo0WW6SptzPdqg/L1zsIm2hY=
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.30.0/go.mod h1:KQsVNh4OjgjTG0G6EiNi1jVpnaeeKsKMRwbLN+f1+8M= go.opentelemetry.io/otel v1.31.0/go.mod h1:O0C14Yl9FgkjqcCZAsE053C13OaddMYr/hz6clDkEJE=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.30.0 h1:m0yTiGDLUvVYaTFbAvCkVYIYcvwKt3G7OLoN77NUs/8= go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.31.0 h1:K0XaT3DwHAcV4nKLzcQvwAgSyisUghWoY20I7huthMk=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.30.0/go.mod h1:wBQbT4UekBfegL2nx0Xk1vBcnzyBPsIVm9hRG4fYcr4= go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.31.0/go.mod h1:B5Ki776z/MBnVha1Nzwp5arlzBbE3+1jk+pGmaP5HME=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.30.0 h1:umZgi92IyxfXd/l4kaDhnKgY8rnN/cZcF1LKc6I8OQ8= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.31.0 h1:FFeLy03iVTXP6ffeN2iXrxfGsZGCjVx0/4KlizjyBwU=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.30.0/go.mod h1:4lVs6obhSVRb1EW5FhOuBTyiQhtRtAnnva9vD3yRfq8= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.31.0/go.mod h1:TMu73/k1CP8nBUpDLc71Wj/Kf7ZS9FK5b53VapRsP9o=
go.opentelemetry.io/otel/metric v1.30.0 h1:4xNulvn9gjzo4hjg+wzIKG7iNFEaBMX00Qd4QIZs7+w= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.31.0 h1:lUsI2TYsQw2r1IASwoROaCnjdj2cvC2+Jbxvk6nHnWU=
go.opentelemetry.io/otel/metric v1.30.0/go.mod h1:aXTfST94tswhWEb+5QjlSqG+cZlmyXy/u8jFpor3WqQ= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.31.0/go.mod h1:2HpZxxQurfGxJlJDblybejHB6RX6pmExPNe517hREw4=
go.opentelemetry.io/otel/sdk v1.30.0 h1:cHdik6irO49R5IysVhdn8oaiR9m8XluDaJAs4DfOrYE= go.opentelemetry.io/otel/metric v1.31.0 h1:FSErL0ATQAmYHUIzSezZibnyVlft1ybhy4ozRPcF2fE=
go.opentelemetry.io/otel/sdk v1.30.0/go.mod h1:p14X4Ok8S+sygzblytT1nqG98QG2KYKv++HE0LY/mhg= go.opentelemetry.io/otel/metric v1.31.0/go.mod h1:C3dEloVbLuYoX41KpmAhOqNriGbA+qqH6PQ5E5mUfnY=
go.opentelemetry.io/otel/trace v1.30.0 h1:7UBkkYzeg3C7kQX8VAidWh2biiQbtAKjyIML8dQ9wmc= go.opentelemetry.io/otel/sdk v1.31.0 h1:xLY3abVHYZ5HSfOg3l2E5LUj2Cwva5Y7yGxnSW9H5Gk=
go.opentelemetry.io/otel/trace v1.30.0/go.mod h1:5EyKqTzzmyqB9bwtCCq6pDLktPK6fmGf/Dph+8VI02o= go.opentelemetry.io/otel/sdk v1.31.0/go.mod h1:TfRbMdhvxIIr/B2N2LQW2S5v9m3gOQ/08KsbbO5BPT0=
go.opentelemetry.io/otel/trace v1.31.0 h1:ffjsj1aRouKewfr85U2aGagJ46+MvodynlQ1HYdmJys=
go.opentelemetry.io/otel/trace v1.31.0/go.mod h1:TXZkRk7SM2ZQLtR6eoAWQFIHPvzQ06FJAsO1tJg480A=
go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0= go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0=
go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8= go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8=
go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE=
@ -816,8 +826,8 @@ golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxb
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
golang.org/x/time v0.6.0 h1:eTDhh4ZXt5Qf0augr54TN6suAUudPcawVZeIAPU7D4U= golang.org/x/time v0.7.0 h1:ntUhktv3OPE6TgYxXWv9vKvUSJyIFJlyohwbkEwPrKQ=
golang.org/x/time v0.6.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/time v0.7.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
@ -882,8 +892,8 @@ google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/
google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
google.golang.org/api v0.199.0 h1:aWUXClp+VFJmqE0JPvpZOK3LDQMyFKYIow4etYd9qxs= google.golang.org/api v0.204.0 h1:3PjmQQEDkR/ENVZZwIYB4W/KzYtN8OrqnNcHWpeR8E4=
google.golang.org/api v0.199.0/go.mod h1:ohG4qSztDJmZdjK/Ar6MhbAmb/Rpi4JHOqagsh90K28= google.golang.org/api v0.204.0/go.mod h1:69y8QSoKIbL9F94bWgWAq6wGqGwyjBgi2y8rAK8zLag=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
@ -918,10 +928,10 @@ google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1m
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA=
google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto/googleapis/api v0.0.0-20240903143218-8af14fe29dc1 h1:hjSy6tcFQZ171igDaN5QHOw2n6vx40juYbC/x67CEhc= google.golang.org/genproto/googleapis/api v0.0.0-20241015192408-796eee8c2d53 h1:fVoAXEKA4+yufmbdVYv+SE73+cPZbbbe8paLsHfkK+U=
google.golang.org/genproto/googleapis/api v0.0.0-20240903143218-8af14fe29dc1/go.mod h1:qpvKtACPCQhAdu3PyQgV4l3LMXZEtft7y8QcarRsp9I= google.golang.org/genproto/googleapis/api v0.0.0-20241015192408-796eee8c2d53/go.mod h1:riSXTwQ4+nqmPGtobMFyW5FqVAmIs0St6VPp4Ug7CE4=
google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 h1:pPJltXNxVzT4pK9yD8vR9X75DaWYYmLGMsEvBfFQZzQ= google.golang.org/genproto/googleapis/rpc v0.0.0-20241021214115-324edc3d5d38 h1:zciRKQ4kBpFgpfC5QQCVtnnNAcLIqweL7plyZRQHVpI=
google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= google.golang.org/genproto/googleapis/rpc v0.0.0-20241021214115-324edc3d5d38/go.mod h1:GX3210XPVPUjJbTUbvwI8f2IpZDMZuPJWDzDuebbviI=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
@ -948,8 +958,8 @@ google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpAD
google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4=
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg= google.golang.org/protobuf v1.35.1 h1:m3LfL6/Ca+fqnjnlqQXNpFPABW1UD7mjh8KO2mKFytA=
google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw= google.golang.org/protobuf v1.35.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=

View file

@ -14,6 +14,7 @@
package histogram package histogram
import ( import (
"errors"
"fmt" "fmt"
"math" "math"
"strings" "strings"
@ -784,16 +785,16 @@ func (h *FloatHistogram) Validate() error {
return fmt.Errorf("custom buckets: %w", err) return fmt.Errorf("custom buckets: %w", err)
} }
if h.ZeroCount != 0 { if h.ZeroCount != 0 {
return fmt.Errorf("custom buckets: must have zero count of 0") return errors.New("custom buckets: must have zero count of 0")
} }
if h.ZeroThreshold != 0 { if h.ZeroThreshold != 0 {
return fmt.Errorf("custom buckets: must have zero threshold of 0") return errors.New("custom buckets: must have zero threshold of 0")
} }
if len(h.NegativeSpans) > 0 { if len(h.NegativeSpans) > 0 {
return fmt.Errorf("custom buckets: must not have negative spans") return errors.New("custom buckets: must not have negative spans")
} }
if len(h.NegativeBuckets) > 0 { if len(h.NegativeBuckets) > 0 {
return fmt.Errorf("custom buckets: must not have negative buckets") return errors.New("custom buckets: must not have negative buckets")
} }
} else { } else {
if err := checkHistogramSpans(h.PositiveSpans, len(h.PositiveBuckets)); err != nil { if err := checkHistogramSpans(h.PositiveSpans, len(h.PositiveBuckets)); err != nil {
@ -807,7 +808,7 @@ func (h *FloatHistogram) Validate() error {
return fmt.Errorf("negative side: %w", err) return fmt.Errorf("negative side: %w", err)
} }
if h.CustomValues != nil { if h.CustomValues != nil {
return fmt.Errorf("histogram with exponential schema must not have custom bounds") return errors.New("histogram with exponential schema must not have custom bounds")
} }
} }
err := checkHistogramBuckets(h.PositiveBuckets, &pCount, false) err := checkHistogramBuckets(h.PositiveBuckets, &pCount, false)
@ -948,10 +949,10 @@ func (h *FloatHistogram) floatBucketIterator(
positive bool, absoluteStartValue float64, targetSchema int32, positive bool, absoluteStartValue float64, targetSchema int32,
) floatBucketIterator { ) floatBucketIterator {
if h.UsesCustomBuckets() && targetSchema != h.Schema { if h.UsesCustomBuckets() && targetSchema != h.Schema {
panic(fmt.Errorf("cannot merge from custom buckets schema to exponential schema")) panic(errors.New("cannot merge from custom buckets schema to exponential schema"))
} }
if !h.UsesCustomBuckets() && IsCustomBucketsSchema(targetSchema) { if !h.UsesCustomBuckets() && IsCustomBucketsSchema(targetSchema) {
panic(fmt.Errorf("cannot merge from exponential buckets schema to custom schema")) panic(errors.New("cannot merge from exponential buckets schema to custom schema"))
} }
if targetSchema > h.Schema { if targetSchema > h.Schema {
panic(fmt.Errorf("cannot merge from schema %d to %d", h.Schema, targetSchema)) panic(fmt.Errorf("cannot merge from schema %d to %d", h.Schema, targetSchema))

View file

@ -14,6 +14,7 @@
package histogram package histogram
import ( import (
"errors"
"fmt" "fmt"
"math" "math"
"slices" "slices"
@ -432,16 +433,16 @@ func (h *Histogram) Validate() error {
return fmt.Errorf("custom buckets: %w", err) return fmt.Errorf("custom buckets: %w", err)
} }
if h.ZeroCount != 0 { if h.ZeroCount != 0 {
return fmt.Errorf("custom buckets: must have zero count of 0") return errors.New("custom buckets: must have zero count of 0")
} }
if h.ZeroThreshold != 0 { if h.ZeroThreshold != 0 {
return fmt.Errorf("custom buckets: must have zero threshold of 0") return errors.New("custom buckets: must have zero threshold of 0")
} }
if len(h.NegativeSpans) > 0 { if len(h.NegativeSpans) > 0 {
return fmt.Errorf("custom buckets: must not have negative spans") return errors.New("custom buckets: must not have negative spans")
} }
if len(h.NegativeBuckets) > 0 { if len(h.NegativeBuckets) > 0 {
return fmt.Errorf("custom buckets: must not have negative buckets") return errors.New("custom buckets: must not have negative buckets")
} }
} else { } else {
if err := checkHistogramSpans(h.PositiveSpans, len(h.PositiveBuckets)); err != nil { if err := checkHistogramSpans(h.PositiveSpans, len(h.PositiveBuckets)); err != nil {
@ -455,7 +456,7 @@ func (h *Histogram) Validate() error {
return fmt.Errorf("negative side: %w", err) return fmt.Errorf("negative side: %w", err)
} }
if h.CustomValues != nil { if h.CustomValues != nil {
return fmt.Errorf("histogram with exponential schema must not have custom bounds") return errors.New("histogram with exponential schema must not have custom bounds")
} }
} }
err := checkHistogramBuckets(h.PositiveBuckets, &pCount, true) err := checkHistogramBuckets(h.PositiveBuckets, &pCount, true)

View file

@ -16,6 +16,7 @@ package relabel
import ( import (
"crypto/md5" "crypto/md5"
"encoding/binary" "encoding/binary"
"errors"
"fmt" "fmt"
"strconv" "strconv"
"strings" "strings"
@ -114,10 +115,10 @@ func (c *Config) UnmarshalYAML(unmarshal func(interface{}) error) error {
func (c *Config) Validate() error { func (c *Config) Validate() error {
if c.Action == "" { if c.Action == "" {
return fmt.Errorf("relabel action cannot be empty") return errors.New("relabel action cannot be empty")
} }
if c.Modulus == 0 && c.Action == HashMod { if c.Modulus == 0 && c.Action == HashMod {
return fmt.Errorf("relabel configuration for hashmod requires non-zero modulus") return errors.New("relabel configuration for hashmod requires non-zero modulus")
} }
if (c.Action == Replace || c.Action == HashMod || c.Action == Lowercase || c.Action == Uppercase || c.Action == KeepEqual || c.Action == DropEqual) && c.TargetLabel == "" { if (c.Action == Replace || c.Action == HashMod || c.Action == Lowercase || c.Action == Uppercase || c.Action == KeepEqual || c.Action == DropEqual) && c.TargetLabel == "" {
return fmt.Errorf("relabel configuration for %s action requires 'target_label' value", c.Action) return fmt.Errorf("relabel configuration for %s action requires 'target_label' value", c.Action)

View file

@ -111,6 +111,20 @@ func (g *RuleGroups) Validate(node ruleGroups) (errs []error) {
) )
} }
for k, v := range g.Labels {
if !model.LabelName(k).IsValid() || k == model.MetricNameLabel {
errs = append(
errs, fmt.Errorf("invalid label name: %s", k),
)
}
if !model.LabelValue(v).IsValid() {
errs = append(
errs, fmt.Errorf("invalid label value: %s", v),
)
}
}
set[g.Name] = struct{}{} set[g.Name] = struct{}{}
for i, r := range g.Rules { for i, r := range g.Rules {
@ -136,11 +150,12 @@ func (g *RuleGroups) Validate(node ruleGroups) (errs []error) {
// RuleGroup is a list of sequentially evaluated recording and alerting rules. // RuleGroup is a list of sequentially evaluated recording and alerting rules.
type RuleGroup struct { type RuleGroup struct {
Name string `yaml:"name"` Name string `yaml:"name"`
Interval model.Duration `yaml:"interval,omitempty"` Interval model.Duration `yaml:"interval,omitempty"`
QueryOffset *model.Duration `yaml:"query_offset,omitempty"` QueryOffset *model.Duration `yaml:"query_offset,omitempty"`
Limit int `yaml:"limit,omitempty"` Limit int `yaml:"limit,omitempty"`
Rules []RuleNode `yaml:"rules"` Rules []RuleNode `yaml:"rules"`
Labels map[string]string `yaml:"labels,omitempty"`
} }
// Rule describes an alerting or recording rule. // Rule describes an alerting or recording rule.
@ -169,14 +184,14 @@ type RuleNode struct {
func (r *RuleNode) Validate() (nodes []WrappedError) { func (r *RuleNode) Validate() (nodes []WrappedError) {
if r.Record.Value != "" && r.Alert.Value != "" { if r.Record.Value != "" && r.Alert.Value != "" {
nodes = append(nodes, WrappedError{ nodes = append(nodes, WrappedError{
err: fmt.Errorf("only one of 'record' and 'alert' must be set"), err: errors.New("only one of 'record' and 'alert' must be set"),
node: &r.Record, node: &r.Record,
nodeAlt: &r.Alert, nodeAlt: &r.Alert,
}) })
} }
if r.Record.Value == "" && r.Alert.Value == "" { if r.Record.Value == "" && r.Alert.Value == "" {
nodes = append(nodes, WrappedError{ nodes = append(nodes, WrappedError{
err: fmt.Errorf("one of 'record' or 'alert' must be set"), err: errors.New("one of 'record' or 'alert' must be set"),
node: &r.Record, node: &r.Record,
nodeAlt: &r.Alert, nodeAlt: &r.Alert,
}) })
@ -184,7 +199,7 @@ func (r *RuleNode) Validate() (nodes []WrappedError) {
if r.Expr.Value == "" { if r.Expr.Value == "" {
nodes = append(nodes, WrappedError{ nodes = append(nodes, WrappedError{
err: fmt.Errorf("field 'expr' must be set in rule"), err: errors.New("field 'expr' must be set in rule"),
node: &r.Expr, node: &r.Expr,
}) })
} else if _, err := parser.ParseExpr(r.Expr.Value); err != nil { } else if _, err := parser.ParseExpr(r.Expr.Value); err != nil {
@ -196,19 +211,19 @@ func (r *RuleNode) Validate() (nodes []WrappedError) {
if r.Record.Value != "" { if r.Record.Value != "" {
if len(r.Annotations) > 0 { if len(r.Annotations) > 0 {
nodes = append(nodes, WrappedError{ nodes = append(nodes, WrappedError{
err: fmt.Errorf("invalid field 'annotations' in recording rule"), err: errors.New("invalid field 'annotations' in recording rule"),
node: &r.Record, node: &r.Record,
}) })
} }
if r.For != 0 { if r.For != 0 {
nodes = append(nodes, WrappedError{ nodes = append(nodes, WrappedError{
err: fmt.Errorf("invalid field 'for' in recording rule"), err: errors.New("invalid field 'for' in recording rule"),
node: &r.Record, node: &r.Record,
}) })
} }
if r.KeepFiringFor != 0 { if r.KeepFiringFor != 0 {
nodes = append(nodes, WrappedError{ nodes = append(nodes, WrappedError{
err: fmt.Errorf("invalid field 'keep_firing_for' in recording rule"), err: errors.New("invalid field 'keep_firing_for' in recording rule"),
node: &r.Record, node: &r.Record,
}) })
} }

View file

@ -107,6 +107,23 @@ groups:
severity: "page" severity: "page"
annotations: annotations:
summary: "Instance {{ $labels.instance }} down" summary: "Instance {{ $labels.instance }} down"
`,
shouldPass: true,
},
{
ruleString: `
groups:
- name: example
labels:
team: myteam
rules:
- alert: InstanceDown
expr: up == 0
for: 5m
labels:
severity: "page"
annotations:
summary: "Instance {{ $labels.instance }} down"
`, `,
shouldPass: true, shouldPass: true,
}, },

View file

@ -40,6 +40,10 @@ var newTestParserFns = map[string]newParser{
"omtext": func(b []byte, st *labels.SymbolTable) Parser { "omtext": func(b []byte, st *labels.SymbolTable) Parser {
return NewOpenMetricsParser(b, st, WithOMParserCTSeriesSkipped()) return NewOpenMetricsParser(b, st, WithOMParserCTSeriesSkipped())
}, },
"omtext_with_nhcb": func(b []byte, st *labels.SymbolTable) Parser {
p := NewOpenMetricsParser(b, st, WithOMParserCTSeriesSkipped())
return NewNHCBParser(p, st, false)
},
} }
// BenchmarkParse benchmarks parsing, mimicking how scrape/scrape.go#append use it. // BenchmarkParse benchmarks parsing, mimicking how scrape/scrape.go#append use it.
@ -78,6 +82,10 @@ func BenchmarkParse(b *testing.B) {
// We don't pass compareToExpfmtFormat: expfmt.TypeOpenMetrics as expfmt does not support OM exemplars, see https://github.com/prometheus/common/issues/703. // We don't pass compareToExpfmtFormat: expfmt.TypeOpenMetrics as expfmt does not support OM exemplars, see https://github.com/prometheus/common/issues/703.
{dataFile: "omtestdata.txt", parser: "omtext"}, {dataFile: "omtestdata.txt", parser: "omtext"},
{dataFile: "promtestdata.txt", parser: "omtext"}, // Compare how omtext parser deals with Prometheus text format vs promtext. {dataFile: "promtestdata.txt", parser: "omtext"}, // Compare how omtext parser deals with Prometheus text format vs promtext.
// NHCB.
{dataFile: "omhistogramdata.txt", parser: "omtext"}, // Measure OM parser baseline for histograms.
{dataFile: "omhistogramdata.txt", parser: "omtext_with_nhcb"}, // Measure NHCB over OM parser.
} { } {
var buf []byte var buf []byte
dataCase := bcase.dataFile dataCase := bcase.dataFile

View file

@ -14,6 +14,8 @@
package textparse package textparse
import ( import (
"errors"
"fmt"
"mime" "mime"
"github.com/prometheus/common/model" "github.com/prometheus/common/model"
@ -23,8 +25,7 @@ import (
"github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/labels"
) )
// Parser parses samples from a byte slice of samples in the official // Parser parses samples from a byte slice of samples in different exposition formats.
// Prometheus and OpenMetrics text exposition formats.
type Parser interface { type Parser interface {
// Series returns the bytes of a series with a simple float64 as a // Series returns the bytes of a series with a simple float64 as a
// value, the timestamp if set, and the value of the current sample. // value, the timestamp if set, and the value of the current sample.
@ -58,6 +59,8 @@ type Parser interface {
// Metric writes the labels of the current sample into the passed labels. // Metric writes the labels of the current sample into the passed labels.
// It returns the string from which the metric was parsed. // It returns the string from which the metric was parsed.
// The values of the "le" labels of classic histograms and "quantile" labels
// of summaries should follow the OpenMetrics formatting rules.
Metric(l *labels.Labels) string Metric(l *labels.Labels) string
// Exemplar writes the exemplar of the current sample into the passed // Exemplar writes the exemplar of the current sample into the passed
@ -78,28 +81,65 @@ type Parser interface {
Next() (Entry, error) Next() (Entry, error)
} }
// New returns a new parser of the byte slice. // extractMediaType returns the mediaType of a required parser. It tries first to
// // extract a valid and supported mediaType from contentType. If that fails,
// This function always returns a valid parser, but might additionally // the provided fallbackType (possibly an empty string) is returned, together with
// return an error if the content type cannot be parsed. // an error. fallbackType is used as-is without further validation.
func New(b []byte, contentType string, parseClassicHistograms, skipOMCTSeries bool, st *labels.SymbolTable) (Parser, error) { func extractMediaType(contentType, fallbackType string) (string, error) {
if contentType == "" { if contentType == "" {
return NewPromParser(b, st), nil if fallbackType == "" {
return "", errors.New("non-compliant scrape target sending blank Content-Type and no fallback_scrape_protocol specified for target")
}
return fallbackType, fmt.Errorf("non-compliant scrape target sending blank Content-Type, using fallback_scrape_protocol %q", fallbackType)
} }
// We have a contentType, parse it.
mediaType, _, err := mime.ParseMediaType(contentType) mediaType, _, err := mime.ParseMediaType(contentType)
if err != nil { if err != nil {
return NewPromParser(b, st), err if fallbackType == "" {
retErr := fmt.Errorf("cannot parse Content-Type %q and no fallback_scrape_protocol for target", contentType)
return "", errors.Join(retErr, err)
}
retErr := fmt.Errorf("could not parse received Content-Type %q, using fallback_scrape_protocol %q", contentType, fallbackType)
return fallbackType, errors.Join(retErr, err)
} }
// We have a valid media type, either we recognise it and can use it
// or we have to error.
switch mediaType {
case "application/openmetrics-text", "application/vnd.google.protobuf", "text/plain":
return mediaType, nil
}
// We're here because we have no recognised mediaType.
if fallbackType == "" {
return "", fmt.Errorf("received unsupported Content-Type %q and no fallback_scrape_protocol specified for target", contentType)
}
return fallbackType, fmt.Errorf("received unsupported Content-Type %q, using fallback_scrape_protocol %q", contentType, fallbackType)
}
// New returns a new parser of the byte slice.
//
// This function no longer guarantees to return a valid parser.
//
// It only returns a valid parser if the supplied contentType and fallbackType allow.
// An error may also be returned if fallbackType had to be used or there was some
// other error parsing the supplied Content-Type.
// If the returned parser is nil then the scrape must fail.
func New(b []byte, contentType, fallbackType string, parseClassicHistograms, skipOMCTSeries bool, st *labels.SymbolTable) (Parser, error) {
mediaType, err := extractMediaType(contentType, fallbackType)
// err may be nil or something we want to warn about.
switch mediaType { switch mediaType {
case "application/openmetrics-text": case "application/openmetrics-text":
return NewOpenMetricsParser(b, st, func(o *openMetricsParserOptions) { return NewOpenMetricsParser(b, st, func(o *openMetricsParserOptions) {
o.SkipCTSeries = skipOMCTSeries o.SkipCTSeries = skipOMCTSeries
}), nil }), err
case "application/vnd.google.protobuf": case "application/vnd.google.protobuf":
return NewProtobufParser(b, parseClassicHistograms, st), nil return NewProtobufParser(b, parseClassicHistograms, st), err
case "text/plain":
return NewPromParser(b, st), err
default: default:
return NewPromParser(b, st), nil return nil, err
} }
} }

View file

@ -22,6 +22,7 @@ import (
"github.com/prometheus/common/model" "github.com/prometheus/common/model"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
"github.com/prometheus/prometheus/config"
"github.com/prometheus/prometheus/model/exemplar" "github.com/prometheus/prometheus/model/exemplar"
"github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/model/histogram"
"github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/labels"
@ -31,6 +32,10 @@ import (
func TestNewParser(t *testing.T) { func TestNewParser(t *testing.T) {
t.Parallel() t.Parallel()
requireNilParser := func(t *testing.T, p Parser) {
require.Nil(t, p)
}
requirePromParser := func(t *testing.T, p Parser) { requirePromParser := func(t *testing.T, p Parser) {
require.NotNil(t, p) require.NotNil(t, p)
_, ok := p.(*PromParser) _, ok := p.(*PromParser)
@ -43,34 +48,83 @@ func TestNewParser(t *testing.T) {
require.True(t, ok) require.True(t, ok)
} }
requireProtobufParser := func(t *testing.T, p Parser) {
require.NotNil(t, p)
_, ok := p.(*ProtobufParser)
require.True(t, ok)
}
for name, tt := range map[string]*struct { for name, tt := range map[string]*struct {
contentType string contentType string
validateParser func(*testing.T, Parser) fallbackScrapeProtocol config.ScrapeProtocol
err string validateParser func(*testing.T, Parser)
err string
}{ }{
"empty-string": { "empty-string": {
validateParser: requirePromParser, validateParser: requireNilParser,
err: "non-compliant scrape target sending blank Content-Type and no fallback_scrape_protocol specified for target",
},
"empty-string-fallback-text-plain": {
validateParser: requirePromParser,
fallbackScrapeProtocol: config.PrometheusText0_0_4,
err: "non-compliant scrape target sending blank Content-Type, using fallback_scrape_protocol \"text/plain\"",
}, },
"invalid-content-type-1": { "invalid-content-type-1": {
contentType: "invalid/", contentType: "invalid/",
validateParser: requirePromParser, validateParser: requireNilParser,
err: "expected token after slash", err: "expected token after slash",
}, },
"invalid-content-type-1-fallback-text-plain": {
contentType: "invalid/",
validateParser: requirePromParser,
fallbackScrapeProtocol: config.PrometheusText0_0_4,
err: "expected token after slash",
},
"invalid-content-type-1-fallback-openmetrics": {
contentType: "invalid/",
validateParser: requireOpenMetricsParser,
fallbackScrapeProtocol: config.OpenMetricsText0_0_1,
err: "expected token after slash",
},
"invalid-content-type-1-fallback-protobuf": {
contentType: "invalid/",
validateParser: requireProtobufParser,
fallbackScrapeProtocol: config.PrometheusProto,
err: "expected token after slash",
},
"invalid-content-type-2": { "invalid-content-type-2": {
contentType: "invalid/invalid/invalid", contentType: "invalid/invalid/invalid",
validateParser: requirePromParser, validateParser: requireNilParser,
err: "unexpected content after media subtype", err: "unexpected content after media subtype",
}, },
"invalid-content-type-2-fallback-text-plain": {
contentType: "invalid/invalid/invalid",
validateParser: requirePromParser,
fallbackScrapeProtocol: config.PrometheusText1_0_0,
err: "unexpected content after media subtype",
},
"invalid-content-type-3": { "invalid-content-type-3": {
contentType: "/", contentType: "/",
validateParser: requirePromParser, validateParser: requireNilParser,
err: "no media type", err: "no media type",
}, },
"invalid-content-type-3-fallback-text-plain": {
contentType: "/",
validateParser: requirePromParser,
fallbackScrapeProtocol: config.PrometheusText1_0_0,
err: "no media type",
},
"invalid-content-type-4": { "invalid-content-type-4": {
contentType: "application/openmetrics-text; charset=UTF-8; charset=utf-8", contentType: "application/openmetrics-text; charset=UTF-8; charset=utf-8",
validateParser: requirePromParser, validateParser: requireNilParser,
err: "duplicate parameter name", err: "duplicate parameter name",
}, },
"invalid-content-type-4-fallback-open-metrics": {
contentType: "application/openmetrics-text; charset=UTF-8; charset=utf-8",
validateParser: requireOpenMetricsParser,
fallbackScrapeProtocol: config.OpenMetricsText1_0_0,
err: "duplicate parameter name",
},
"openmetrics": { "openmetrics": {
contentType: "application/openmetrics-text", contentType: "application/openmetrics-text",
validateParser: requireOpenMetricsParser, validateParser: requireOpenMetricsParser,
@ -87,20 +141,33 @@ func TestNewParser(t *testing.T) {
contentType: "text/plain", contentType: "text/plain",
validateParser: requirePromParser, validateParser: requirePromParser,
}, },
"protobuf": {
contentType: "application/vnd.google.protobuf",
validateParser: requireProtobufParser,
},
"plain-text-with-version": { "plain-text-with-version": {
contentType: "text/plain; version=0.0.4", contentType: "text/plain; version=0.0.4",
validateParser: requirePromParser, validateParser: requirePromParser,
}, },
"some-other-valid-content-type": { "some-other-valid-content-type": {
contentType: "text/html", contentType: "text/html",
validateParser: requirePromParser, validateParser: requireNilParser,
err: "received unsupported Content-Type \"text/html\" and no fallback_scrape_protocol specified for target",
},
"some-other-valid-content-type-fallback-text-plain": {
contentType: "text/html",
validateParser: requirePromParser,
fallbackScrapeProtocol: config.PrometheusText0_0_4,
err: "received unsupported Content-Type \"text/html\", using fallback_scrape_protocol \"text/plain\"",
}, },
} { } {
t.Run(name, func(t *testing.T) { t.Run(name, func(t *testing.T) {
tt := tt // Copy to local variable before going parallel. tt := tt // Copy to local variable before going parallel.
t.Parallel() t.Parallel()
p, err := New([]byte{}, tt.contentType, false, false, labels.NewSymbolTable()) fallbackProtoMediaType := tt.fallbackScrapeProtocol.HeaderMediaType()
p, err := New([]byte{}, tt.contentType, fallbackProtoMediaType, false, false, labels.NewSymbolTable())
tt.validateParser(t, p) tt.validateParser(t, p)
if tt.err == "" { if tt.err == "" {
require.NoError(t, err) require.NoError(t, err)
@ -172,13 +239,13 @@ func testParse(t *testing.T, p Parser) (ret []parsedEntry) {
} }
p.Metric(&got.lset) p.Metric(&got.lset)
for e := (exemplar.Exemplar{}); p.Exemplar(&e); {
got.es = append(got.es, e)
}
// Parser reuses int pointer. // Parser reuses int pointer.
if ct := p.CreatedTimestamp(); ct != nil { if ct := p.CreatedTimestamp(); ct != nil {
got.ct = int64p(*ct) got.ct = int64p(*ct)
} }
for e := (exemplar.Exemplar{}); p.Exemplar(&e); {
got.es = append(got.es, e)
}
case EntryType: case EntryType:
m, got.typ = p.Type() m, got.typ = p.Type()
got.m = string(m) got.m = string(m)

View file

@ -0,0 +1,374 @@
// Copyright 2024 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package textparse
import (
"errors"
"io"
"math"
"strconv"
"strings"
"github.com/prometheus/common/model"
"github.com/prometheus/prometheus/model/exemplar"
"github.com/prometheus/prometheus/model/histogram"
"github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/util/convertnhcb"
)
type collectionState int
const (
stateStart collectionState = iota
stateCollecting
stateEmitting
)
// The NHCBParser wraps a Parser and converts classic histograms to native
// histograms with custom buckets.
//
// Since Parser interface is line based, this parser needs to keep track
// of the last classic histogram series it saw to collate them into a
// single native histogram.
//
// Note:
// - Only series that have the histogram metadata type are considered for
// conversion.
// - The classic series are also returned if keepClassicHistograms is true.
type NHCBParser struct {
// The parser we're wrapping.
parser Parser
// Option to keep classic histograms along with converted histograms.
keepClassicHistograms bool
// Labels builder.
builder labels.ScratchBuilder
// State of the parser.
state collectionState
// Caches the values from the underlying parser.
// For Series and Histogram.
bytes []byte
ts *int64
value float64
h *histogram.Histogram
fh *histogram.FloatHistogram
// For Metric.
lset labels.Labels
metricString string
// For Type.
bName []byte
typ model.MetricType
// Caches the entry itself if we are inserting a converted NHCB
// halfway through.
entry Entry
err error
// Caches the values and metric for the inserted converted NHCB.
bytesNHCB []byte
hNHCB *histogram.Histogram
fhNHCB *histogram.FloatHistogram
lsetNHCB labels.Labels
exemplars []exemplar.Exemplar
ctNHCB *int64
metricStringNHCB string
// Collates values from the classic histogram series to build
// the converted histogram later.
tempLsetNHCB labels.Labels
tempNHCB convertnhcb.TempHistogram
tempExemplars []exemplar.Exemplar
tempExemplarCount int
tempCT *int64
// Remembers the last base histogram metric name (assuming it's
// a classic histogram) so we can tell if the next float series
// is part of the same classic histogram.
lastHistogramName string
lastHistogramLabelsHash uint64
lastHistogramExponential bool
// Reused buffer for hashing labels.
hBuffer []byte
}
func NewNHCBParser(p Parser, st *labels.SymbolTable, keepClassicHistograms bool) Parser {
return &NHCBParser{
parser: p,
keepClassicHistograms: keepClassicHistograms,
builder: labels.NewScratchBuilderWithSymbolTable(st, 16),
tempNHCB: convertnhcb.NewTempHistogram(),
}
}
func (p *NHCBParser) Series() ([]byte, *int64, float64) {
return p.bytes, p.ts, p.value
}
func (p *NHCBParser) Histogram() ([]byte, *int64, *histogram.Histogram, *histogram.FloatHistogram) {
if p.state == stateEmitting {
return p.bytesNHCB, p.ts, p.hNHCB, p.fhNHCB
}
return p.bytes, p.ts, p.h, p.fh
}
func (p *NHCBParser) Help() ([]byte, []byte) {
return p.parser.Help()
}
func (p *NHCBParser) Type() ([]byte, model.MetricType) {
return p.bName, p.typ
}
func (p *NHCBParser) Unit() ([]byte, []byte) {
return p.parser.Unit()
}
func (p *NHCBParser) Comment() []byte {
return p.parser.Comment()
}
func (p *NHCBParser) Metric(l *labels.Labels) string {
if p.state == stateEmitting {
*l = p.lsetNHCB
return p.metricStringNHCB
}
*l = p.lset
return p.metricString
}
func (p *NHCBParser) Exemplar(ex *exemplar.Exemplar) bool {
if p.state == stateEmitting {
if len(p.exemplars) == 0 {
return false
}
*ex = p.exemplars[0]
p.exemplars = p.exemplars[1:]
return true
}
return p.parser.Exemplar(ex)
}
func (p *NHCBParser) CreatedTimestamp() *int64 {
switch p.state {
case stateStart:
if p.entry == EntrySeries || p.entry == EntryHistogram {
return p.parser.CreatedTimestamp()
}
case stateCollecting:
return p.tempCT
case stateEmitting:
return p.ctNHCB
}
return nil
}
func (p *NHCBParser) Next() (Entry, error) {
if p.state == stateEmitting {
p.state = stateStart
if p.entry == EntrySeries {
isNHCB := p.handleClassicHistogramSeries(p.lset)
if isNHCB && !p.keepClassicHistograms {
// Do not return the classic histogram series if it was converted to NHCB and we are not keeping classic histograms.
return p.Next()
}
}
return p.entry, p.err
}
p.entry, p.err = p.parser.Next()
if p.err != nil {
if errors.Is(p.err, io.EOF) && p.processNHCB() {
return EntryHistogram, nil
}
return EntryInvalid, p.err
}
switch p.entry {
case EntrySeries:
p.bytes, p.ts, p.value = p.parser.Series()
p.metricString = p.parser.Metric(&p.lset)
// Check the label set to see if we can continue or need to emit the NHCB.
var isNHCB bool
if p.compareLabels() {
// Labels differ. Check if we can emit the NHCB.
if p.processNHCB() {
return EntryHistogram, nil
}
isNHCB = p.handleClassicHistogramSeries(p.lset)
} else {
// Labels are the same. Check if after an exponential histogram.
if p.lastHistogramExponential {
isNHCB = false
} else {
isNHCB = p.handleClassicHistogramSeries(p.lset)
}
}
if isNHCB && !p.keepClassicHistograms {
// Do not return the classic histogram series if it was converted to NHCB and we are not keeping classic histograms.
return p.Next()
}
return p.entry, p.err
case EntryHistogram:
p.bytes, p.ts, p.h, p.fh = p.parser.Histogram()
p.metricString = p.parser.Metric(&p.lset)
p.storeExponentialLabels()
case EntryType:
p.bName, p.typ = p.parser.Type()
}
if p.processNHCB() {
return EntryHistogram, nil
}
return p.entry, p.err
}
// Return true if labels have changed and we should emit the NHCB.
func (p *NHCBParser) compareLabels() bool {
if p.state != stateCollecting {
return false
}
if p.typ != model.MetricTypeHistogram {
// Different metric type.
return true
}
if p.lastHistogramName != convertnhcb.GetHistogramMetricBaseName(p.lset.Get(labels.MetricName)) {
// Different metric name.
return true
}
nextHash, _ := p.lset.HashWithoutLabels(p.hBuffer, labels.BucketLabel)
// Different label values.
return p.lastHistogramLabelsHash != nextHash
}
// Save the label set of the classic histogram without suffix and bucket `le` label.
func (p *NHCBParser) storeClassicLabels() {
p.lastHistogramName = convertnhcb.GetHistogramMetricBaseName(p.lset.Get(labels.MetricName))
p.lastHistogramLabelsHash, _ = p.lset.HashWithoutLabels(p.hBuffer, labels.BucketLabel)
p.lastHistogramExponential = false
}
func (p *NHCBParser) storeExponentialLabels() {
p.lastHistogramName = p.lset.Get(labels.MetricName)
p.lastHistogramLabelsHash, _ = p.lset.HashWithoutLabels(p.hBuffer)
p.lastHistogramExponential = true
}
// handleClassicHistogramSeries collates the classic histogram series to be converted to NHCB
// if it is actually a classic histogram series (and not a normal float series) and if there
// isn't already a native histogram with the same name (assuming it is always processed
// right before the classic histograms) and returns true if the collation was done.
func (p *NHCBParser) handleClassicHistogramSeries(lset labels.Labels) bool {
if p.typ != model.MetricTypeHistogram {
return false
}
mName := lset.Get(labels.MetricName)
// Sanity check to ensure that the TYPE metadata entry name is the same as the base name.
if convertnhcb.GetHistogramMetricBaseName(mName) != string(p.bName) {
return false
}
switch {
case strings.HasSuffix(mName, "_bucket") && lset.Has(labels.BucketLabel):
le, err := strconv.ParseFloat(lset.Get(labels.BucketLabel), 64)
if err == nil && !math.IsNaN(le) {
p.processClassicHistogramSeries(lset, "_bucket", func(hist *convertnhcb.TempHistogram) {
_ = hist.SetBucketCount(le, p.value)
})
return true
}
case strings.HasSuffix(mName, "_count"):
p.processClassicHistogramSeries(lset, "_count", func(hist *convertnhcb.TempHistogram) {
_ = hist.SetCount(p.value)
})
return true
case strings.HasSuffix(mName, "_sum"):
p.processClassicHistogramSeries(lset, "_sum", func(hist *convertnhcb.TempHistogram) {
_ = hist.SetSum(p.value)
})
return true
}
return false
}
func (p *NHCBParser) processClassicHistogramSeries(lset labels.Labels, suffix string, updateHist func(*convertnhcb.TempHistogram)) {
if p.state != stateCollecting {
p.storeClassicLabels()
p.tempCT = p.parser.CreatedTimestamp()
p.state = stateCollecting
p.tempLsetNHCB = convertnhcb.GetHistogramMetricBase(lset, suffix)
}
p.storeExemplars()
updateHist(&p.tempNHCB)
}
func (p *NHCBParser) storeExemplars() {
for ex := p.nextExemplarPtr(); p.parser.Exemplar(ex); ex = p.nextExemplarPtr() {
p.tempExemplarCount++
}
}
func (p *NHCBParser) nextExemplarPtr() *exemplar.Exemplar {
switch {
case p.tempExemplarCount == len(p.tempExemplars)-1:
// Reuse the previously allocated exemplar, it was not filled up.
case len(p.tempExemplars) == cap(p.tempExemplars):
// Let the runtime grow the slice.
p.tempExemplars = append(p.tempExemplars, exemplar.Exemplar{})
default:
// Take the next element into use.
p.tempExemplars = p.tempExemplars[:len(p.tempExemplars)+1]
}
return &p.tempExemplars[len(p.tempExemplars)-1]
}
func (p *NHCBParser) swapExemplars() {
p.exemplars = p.tempExemplars[:p.tempExemplarCount]
p.tempExemplars = p.tempExemplars[:0]
}
// processNHCB converts the collated classic histogram series to NHCB and caches the info
// to be returned to callers. Retruns true if the conversion was successful.
func (p *NHCBParser) processNHCB() bool {
if p.state != stateCollecting {
return false
}
h, fh, err := p.tempNHCB.Convert()
if err == nil {
if h != nil {
if err := h.Validate(); err != nil {
return false
}
p.hNHCB = h
p.fhNHCB = nil
} else if fh != nil {
if err := fh.Validate(); err != nil {
return false
}
p.hNHCB = nil
p.fhNHCB = fh
}
p.metricStringNHCB = p.tempLsetNHCB.Get(labels.MetricName) + strings.ReplaceAll(p.tempLsetNHCB.DropMetricName().String(), ", ", ",")
p.bytesNHCB = []byte(p.metricStringNHCB)
p.lsetNHCB = p.tempLsetNHCB
p.swapExemplars()
p.ctNHCB = p.tempCT
p.state = stateEmitting
} else {
p.state = stateStart
}
p.tempNHCB.Reset()
p.tempExemplarCount = 0
p.tempCT = nil
return err == nil
}

View file

@ -0,0 +1,984 @@
// Copyright 2024 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package textparse
import (
"bytes"
"encoding/binary"
"strconv"
"testing"
"github.com/gogo/protobuf/proto"
"github.com/stretchr/testify/require"
"github.com/prometheus/common/model"
"github.com/prometheus/prometheus/model/exemplar"
"github.com/prometheus/prometheus/model/histogram"
"github.com/prometheus/prometheus/model/labels"
dto "github.com/prometheus/prometheus/prompb/io/prometheus/client"
)
func TestNHCBParserOnOMParser(t *testing.T) {
// The input is taken originally from TestOpenMetricsParse, with additional tests for the NHCBParser.
input := `# HELP go_gc_duration_seconds A summary of the GC invocation durations.
# TYPE go_gc_duration_seconds summary
# UNIT go_gc_duration_seconds seconds
go_gc_duration_seconds{quantile="0"} 4.9351e-05
go_gc_duration_seconds{quantile="0.25"} 7.424100000000001e-05
go_gc_duration_seconds{quantile="0.5",a="b"} 8.3835e-05
# HELP nohelp1
# HELP help2 escape \ \n \\ \" \x chars
# UNIT nounit
go_gc_duration_seconds{quantile="1.0",a="b"} 8.3835e-05
go_gc_duration_seconds_count 99
some:aggregate:rate5m{a_b="c"} 1
# HELP go_goroutines Number of goroutines that currently exist.
# TYPE go_goroutines gauge
go_goroutines 33 123.123
# TYPE hh histogram
hh_bucket{le="+Inf"} 1
# TYPE gh gaugehistogram
gh_bucket{le="+Inf"} 1
# TYPE hhh histogram
hhh_bucket{le="+Inf"} 1 # {id="histogram-bucket-test"} 4
hhh_count 1 # {id="histogram-count-test"} 4
# TYPE ggh gaugehistogram
ggh_bucket{le="+Inf"} 1 # {id="gaugehistogram-bucket-test",xx="yy"} 4 123.123
ggh_count 1 # {id="gaugehistogram-count-test",xx="yy"} 4 123.123
# TYPE smr_seconds summary
smr_seconds_count 2.0 # {id="summary-count-test"} 1 123.321
smr_seconds_sum 42.0 # {id="summary-sum-test"} 1 123.321
# TYPE ii info
ii{foo="bar"} 1
# TYPE ss stateset
ss{ss="foo"} 1
ss{ss="bar"} 0
ss{A="a"} 0
# TYPE un unknown
_metric_starting_with_underscore 1
testmetric{_label_starting_with_underscore="foo"} 1
testmetric{label="\"bar\""} 1
# HELP foo Counter with and without labels to certify CT is parsed for both cases
# TYPE foo counter
foo_total 17.0 1520879607.789 # {id="counter-test"} 5
foo_created 1520872607.123
foo_total{a="b"} 17.0 1520879607.789 # {id="counter-test"} 5
foo_created{a="b"} 1520872607.123
# HELP bar Summary with CT at the end, making sure we find CT even if it's multiple lines a far
# TYPE bar summary
bar_count 17.0
bar_sum 324789.3
bar{quantile="0.95"} 123.7
bar{quantile="0.99"} 150.0
bar_created 1520872608.124
# HELP baz Histogram with the same objective as above's summary
# TYPE baz histogram
baz_bucket{le="0.0"} 0
baz_bucket{le="+Inf"} 17
baz_count 17
baz_sum 324789.3
baz_created 1520872609.125
# HELP fizz_created Gauge which shouldn't be parsed as CT
# TYPE fizz_created gauge
fizz_created 17.0
# HELP something Histogram with _created between buckets and summary
# TYPE something histogram
something_count 18
something_sum 324789.4
something_created 1520430001
something_bucket{le="0.0"} 1
something_bucket{le="+Inf"} 18
something_count{a="b"} 9
something_sum{a="b"} 42123.0
something_bucket{a="b",le="0.0"} 8
something_bucket{a="b",le="+Inf"} 9
something_created{a="b"} 1520430002
# HELP yum Summary with _created between sum and quantiles
# TYPE yum summary
yum_count 20
yum_sum 324789.5
yum_created 1520430003
yum{quantile="0.95"} 123.7
yum{quantile="0.99"} 150.0
# HELP foobar Summary with _created as the first line
# TYPE foobar summary
foobar_count 21
foobar_created 1520430004
foobar_sum 324789.6
foobar{quantile="0.95"} 123.8
foobar{quantile="0.99"} 150.1`
input += "\n# HELP metric foo\x00bar"
input += "\nnull_byte_metric{a=\"abc\x00\"} 1"
input += "\n# EOF\n"
exp := []parsedEntry{
{
m: "go_gc_duration_seconds",
help: "A summary of the GC invocation durations.",
}, {
m: "go_gc_duration_seconds",
typ: model.MetricTypeSummary,
}, {
m: "go_gc_duration_seconds",
unit: "seconds",
}, {
m: `go_gc_duration_seconds{quantile="0"}`,
v: 4.9351e-05,
lset: labels.FromStrings("__name__", "go_gc_duration_seconds", "quantile", "0.0"),
}, {
m: `go_gc_duration_seconds{quantile="0.25"}`,
v: 7.424100000000001e-05,
lset: labels.FromStrings("__name__", "go_gc_duration_seconds", "quantile", "0.25"),
}, {
m: `go_gc_duration_seconds{quantile="0.5",a="b"}`,
v: 8.3835e-05,
lset: labels.FromStrings("__name__", "go_gc_duration_seconds", "quantile", "0.5", "a", "b"),
}, {
m: "nohelp1",
help: "",
}, {
m: "help2",
help: "escape \\ \n \\ \" \\x chars",
}, {
m: "nounit",
unit: "",
}, {
m: `go_gc_duration_seconds{quantile="1.0",a="b"}`,
v: 8.3835e-05,
lset: labels.FromStrings("__name__", "go_gc_duration_seconds", "quantile", "1.0", "a", "b"),
}, {
m: `go_gc_duration_seconds_count`,
v: 99,
lset: labels.FromStrings("__name__", "go_gc_duration_seconds_count"),
}, {
m: `some:aggregate:rate5m{a_b="c"}`,
v: 1,
lset: labels.FromStrings("__name__", "some:aggregate:rate5m", "a_b", "c"),
}, {
m: "go_goroutines",
help: "Number of goroutines that currently exist.",
}, {
m: "go_goroutines",
typ: model.MetricTypeGauge,
}, {
m: `go_goroutines`,
v: 33,
t: int64p(123123),
lset: labels.FromStrings("__name__", "go_goroutines"),
}, {
m: "hh",
typ: model.MetricTypeHistogram,
}, {
m: `hh{}`,
shs: &histogram.Histogram{
Schema: histogram.CustomBucketsSchema,
Count: 1,
Sum: 0.0,
PositiveSpans: []histogram.Span{{Offset: 0, Length: 1}},
PositiveBuckets: []int64{1},
// Custom values are empty as we do not store the +Inf boundary.
},
lset: labels.FromStrings("__name__", "hh"),
}, {
m: "gh",
typ: model.MetricTypeGaugeHistogram,
}, {
m: `gh_bucket{le="+Inf"}`,
v: 1,
lset: labels.FromStrings("__name__", "gh_bucket", "le", "+Inf"),
}, {
m: "hhh",
typ: model.MetricTypeHistogram,
}, {
m: `hhh{}`,
shs: &histogram.Histogram{
Schema: histogram.CustomBucketsSchema,
Count: 1,
Sum: 0.0,
PositiveSpans: []histogram.Span{{Offset: 0, Length: 1}},
PositiveBuckets: []int64{1},
// Custom values are empty as we do not store the +Inf boundary.
},
lset: labels.FromStrings("__name__", "hhh"),
es: []exemplar.Exemplar{
{Labels: labels.FromStrings("id", "histogram-bucket-test"), Value: 4},
{Labels: labels.FromStrings("id", "histogram-count-test"), Value: 4},
},
}, {
m: "ggh",
typ: model.MetricTypeGaugeHistogram,
}, {
m: `ggh_bucket{le="+Inf"}`,
v: 1,
lset: labels.FromStrings("__name__", "ggh_bucket", "le", "+Inf"),
es: []exemplar.Exemplar{{Labels: labels.FromStrings("id", "gaugehistogram-bucket-test", "xx", "yy"), Value: 4, HasTs: true, Ts: 123123}},
}, {
m: `ggh_count`,
v: 1,
lset: labels.FromStrings("__name__", "ggh_count"),
es: []exemplar.Exemplar{{Labels: labels.FromStrings("id", "gaugehistogram-count-test", "xx", "yy"), Value: 4, HasTs: true, Ts: 123123}},
}, {
m: "smr_seconds",
typ: model.MetricTypeSummary,
}, {
m: `smr_seconds_count`,
v: 2,
lset: labels.FromStrings("__name__", "smr_seconds_count"),
es: []exemplar.Exemplar{{Labels: labels.FromStrings("id", "summary-count-test"), Value: 1, HasTs: true, Ts: 123321}},
}, {
m: `smr_seconds_sum`,
v: 42,
lset: labels.FromStrings("__name__", "smr_seconds_sum"),
es: []exemplar.Exemplar{{Labels: labels.FromStrings("id", "summary-sum-test"), Value: 1, HasTs: true, Ts: 123321}},
}, {
m: "ii",
typ: model.MetricTypeInfo,
}, {
m: `ii{foo="bar"}`,
v: 1,
lset: labels.FromStrings("__name__", "ii", "foo", "bar"),
}, {
m: "ss",
typ: model.MetricTypeStateset,
}, {
m: `ss{ss="foo"}`,
v: 1,
lset: labels.FromStrings("__name__", "ss", "ss", "foo"),
}, {
m: `ss{ss="bar"}`,
v: 0,
lset: labels.FromStrings("__name__", "ss", "ss", "bar"),
}, {
m: `ss{A="a"}`,
v: 0,
lset: labels.FromStrings("A", "a", "__name__", "ss"),
}, {
m: "un",
typ: model.MetricTypeUnknown,
}, {
m: "_metric_starting_with_underscore",
v: 1,
lset: labels.FromStrings("__name__", "_metric_starting_with_underscore"),
}, {
m: "testmetric{_label_starting_with_underscore=\"foo\"}",
v: 1,
lset: labels.FromStrings("__name__", "testmetric", "_label_starting_with_underscore", "foo"),
}, {
m: "testmetric{label=\"\\\"bar\\\"\"}",
v: 1,
lset: labels.FromStrings("__name__", "testmetric", "label", `"bar"`),
}, {
m: "foo",
help: "Counter with and without labels to certify CT is parsed for both cases",
}, {
m: "foo",
typ: model.MetricTypeCounter,
}, {
m: "foo_total",
v: 17,
lset: labels.FromStrings("__name__", "foo_total"),
t: int64p(1520879607789),
es: []exemplar.Exemplar{{Labels: labels.FromStrings("id", "counter-test"), Value: 5}},
ct: int64p(1520872607123),
}, {
m: `foo_total{a="b"}`,
v: 17.0,
lset: labels.FromStrings("__name__", "foo_total", "a", "b"),
t: int64p(1520879607789),
es: []exemplar.Exemplar{{Labels: labels.FromStrings("id", "counter-test"), Value: 5}},
ct: int64p(1520872607123),
}, {
m: "bar",
help: "Summary with CT at the end, making sure we find CT even if it's multiple lines a far",
}, {
m: "bar",
typ: model.MetricTypeSummary,
}, {
m: "bar_count",
v: 17.0,
lset: labels.FromStrings("__name__", "bar_count"),
ct: int64p(1520872608124),
}, {
m: "bar_sum",
v: 324789.3,
lset: labels.FromStrings("__name__", "bar_sum"),
ct: int64p(1520872608124),
}, {
m: `bar{quantile="0.95"}`,
v: 123.7,
lset: labels.FromStrings("__name__", "bar", "quantile", "0.95"),
ct: int64p(1520872608124),
}, {
m: `bar{quantile="0.99"}`,
v: 150.0,
lset: labels.FromStrings("__name__", "bar", "quantile", "0.99"),
ct: int64p(1520872608124),
}, {
m: "baz",
help: "Histogram with the same objective as above's summary",
}, {
m: "baz",
typ: model.MetricTypeHistogram,
}, {
m: `baz{}`,
shs: &histogram.Histogram{
Schema: histogram.CustomBucketsSchema,
Count: 17,
Sum: 324789.3,
PositiveSpans: []histogram.Span{{Offset: 1, Length: 1}}, // The first bucket has 0 count so we don't store it and Offset is 1.
PositiveBuckets: []int64{17},
CustomValues: []float64{0.0}, // We do not store the +Inf boundary.
},
lset: labels.FromStrings("__name__", "baz"),
ct: int64p(1520872609125),
}, {
m: "fizz_created",
help: "Gauge which shouldn't be parsed as CT",
}, {
m: "fizz_created",
typ: model.MetricTypeGauge,
}, {
m: `fizz_created`,
v: 17,
lset: labels.FromStrings("__name__", "fizz_created"),
}, {
m: "something",
help: "Histogram with _created between buckets and summary",
}, {
m: "something",
typ: model.MetricTypeHistogram,
}, {
m: `something{}`,
shs: &histogram.Histogram{
Schema: histogram.CustomBucketsSchema,
Count: 18,
Sum: 324789.4,
PositiveSpans: []histogram.Span{{Offset: 0, Length: 2}},
PositiveBuckets: []int64{1, 16},
CustomValues: []float64{0.0}, // We do not store the +Inf boundary.
},
lset: labels.FromStrings("__name__", "something"),
ct: int64p(1520430001000),
}, {
m: `something{a="b"}`,
shs: &histogram.Histogram{
Schema: histogram.CustomBucketsSchema,
Count: 9,
Sum: 42123.0,
PositiveSpans: []histogram.Span{{Offset: 0, Length: 2}},
PositiveBuckets: []int64{8, -7},
CustomValues: []float64{0.0}, // We do not store the +Inf boundary.
},
lset: labels.FromStrings("__name__", "something", "a", "b"),
ct: int64p(1520430002000),
}, {
m: "yum",
help: "Summary with _created between sum and quantiles",
}, {
m: "yum",
typ: model.MetricTypeSummary,
}, {
m: `yum_count`,
v: 20,
lset: labels.FromStrings("__name__", "yum_count"),
ct: int64p(1520430003000),
}, {
m: `yum_sum`,
v: 324789.5,
lset: labels.FromStrings("__name__", "yum_sum"),
ct: int64p(1520430003000),
}, {
m: `yum{quantile="0.95"}`,
v: 123.7,
lset: labels.FromStrings("__name__", "yum", "quantile", "0.95"),
ct: int64p(1520430003000),
}, {
m: `yum{quantile="0.99"}`,
v: 150.0,
lset: labels.FromStrings("__name__", "yum", "quantile", "0.99"),
ct: int64p(1520430003000),
}, {
m: "foobar",
help: "Summary with _created as the first line",
}, {
m: "foobar",
typ: model.MetricTypeSummary,
}, {
m: `foobar_count`,
v: 21,
lset: labels.FromStrings("__name__", "foobar_count"),
ct: int64p(1520430004000),
}, {
m: `foobar_sum`,
v: 324789.6,
lset: labels.FromStrings("__name__", "foobar_sum"),
ct: int64p(1520430004000),
}, {
m: `foobar{quantile="0.95"}`,
v: 123.8,
lset: labels.FromStrings("__name__", "foobar", "quantile", "0.95"),
ct: int64p(1520430004000),
}, {
m: `foobar{quantile="0.99"}`,
v: 150.1,
lset: labels.FromStrings("__name__", "foobar", "quantile", "0.99"),
ct: int64p(1520430004000),
}, {
m: "metric",
help: "foo\x00bar",
}, {
m: "null_byte_metric{a=\"abc\x00\"}",
v: 1,
lset: labels.FromStrings("__name__", "null_byte_metric", "a", "abc\x00"),
},
}
p := NewOpenMetricsParser([]byte(input), labels.NewSymbolTable(), WithOMParserCTSeriesSkipped())
p = NewNHCBParser(p, labels.NewSymbolTable(), false)
got := testParse(t, p)
requireEntries(t, exp, got)
}
func TestNHCBParserOMParser_MultipleHistograms(t *testing.T) {
// The input is taken originally from TestOpenMetricsParse, with additional tests for the NHCBParser.
input := `# HELP something Histogram with _created between buckets and summary
# TYPE something histogram
something_count 18
something_sum 324789.4
something_bucket{le="0.0"} 1 # {id="something-test"} -2.0
something_bucket{le="1.0"} 16 # {id="something-test"} 0.5
something_bucket{le="+Inf"} 18 # {id="something-test"} 8
something_count{a="b"} 9
something_sum{a="b"} 42123.0
something_bucket{a="b",le="0.0"} 8 # {id="something-test"} 0.0 123.321
something_bucket{a="b",le="1.0"} 8
something_bucket{a="b",le="+Inf"} 9 # {id="something-test"} 2e100 123.000
# EOF
`
exp := []parsedEntry{
{
m: "something",
help: "Histogram with _created between buckets and summary",
}, {
m: "something",
typ: model.MetricTypeHistogram,
}, {
m: `something{}`,
shs: &histogram.Histogram{
Schema: histogram.CustomBucketsSchema,
Count: 18,
Sum: 324789.4,
PositiveSpans: []histogram.Span{{Offset: 0, Length: 3}},
PositiveBuckets: []int64{1, 14, -13},
CustomValues: []float64{0.0, 1.0}, // We do not store the +Inf boundary.
},
lset: labels.FromStrings("__name__", "something"),
es: []exemplar.Exemplar{
{Labels: labels.FromStrings("id", "something-test"), Value: -2.0},
{Labels: labels.FromStrings("id", "something-test"), Value: 0.5},
{Labels: labels.FromStrings("id", "something-test"), Value: 8.0},
},
}, {
m: `something{a="b"}`,
shs: &histogram.Histogram{
Schema: histogram.CustomBucketsSchema,
Count: 9,
Sum: 42123.0,
PositiveSpans: []histogram.Span{{Offset: 0, Length: 3}},
PositiveBuckets: []int64{8, -8, 1},
CustomValues: []float64{0.0, 1.0}, // We do not store the +Inf boundary.
},
lset: labels.FromStrings("__name__", "something", "a", "b"),
es: []exemplar.Exemplar{
{Labels: labels.FromStrings("id", "something-test"), Value: 0.0, HasTs: true, Ts: 123321},
{Labels: labels.FromStrings("id", "something-test"), Value: 2e100, HasTs: true, Ts: 123000},
},
},
}
p := NewOpenMetricsParser([]byte(input), labels.NewSymbolTable(), WithOMParserCTSeriesSkipped())
p = NewNHCBParser(p, labels.NewSymbolTable(), false)
got := testParse(t, p)
requireEntries(t, exp, got)
}
// Verify the requirement tables from
// https://github.com/prometheus/prometheus/issues/13532 .
// "classic" means the option "always_scrape_classic_histograms".
// "nhcb" means the option "convert_classic_histograms_to_nhcb".
//
// Case 1. Only classic histogram is exposed.
//
// | Scrape Config | Expect classic | Expect exponential | Expect NHCB |.
// | classic=false, nhcb=false | YES | NO | NO |.
// | classic=true, nhcb=false | YES | NO | NO |.
// | classic=false, nhcb=true | NO | NO | YES |.
// | classic=true, nhcb=true | YES | NO | YES |.
//
// Case 2. Both classic and exponential histograms are exposed.
//
// | Scrape Config | Expect classic | Expect exponential | Expect NHCB |.
// | classic=false, nhcb=false | NO | YES | NO |.
// | classic=true, nhcb=false | YES | YES | NO |.
// | classic=false, nhcb=true | NO | YES | NO |.
// | classic=true, nhcb=true | YES | YES | NO |.
//
// Case 3. Only exponential histogram is exposed.
//
// | Scrape Config | Expect classic | Expect exponential | Expect NHCB |.
// | classic=false, nhcb=false | NO | YES | NO |.
// | classic=true, nhcb=false | NO | YES | NO |.
// | classic=false, nhcb=true | NO | YES | NO |.
// | classic=true, nhcb=true | NO | YES | NO |.
func TestNHCBParser_NoNHCBWhenExponential(t *testing.T) {
type requirement struct {
expectClassic bool
expectExponential bool
expectNHCB bool
}
cases := []map[string]requirement{
// Case 1.
{
"classic=false, nhcb=false": {expectClassic: true, expectExponential: false, expectNHCB: false},
"classic=true, nhcb=false": {expectClassic: true, expectExponential: false, expectNHCB: false},
"classic=false, nhcb=true": {expectClassic: false, expectExponential: false, expectNHCB: true},
"classic=true, nhcb=true": {expectClassic: true, expectExponential: false, expectNHCB: true},
},
// Case 2.
{
"classic=false, nhcb=false": {expectClassic: false, expectExponential: true, expectNHCB: false},
"classic=true, nhcb=false": {expectClassic: true, expectExponential: true, expectNHCB: false},
"classic=false, nhcb=true": {expectClassic: false, expectExponential: true, expectNHCB: false},
"classic=true, nhcb=true": {expectClassic: true, expectExponential: true, expectNHCB: false},
},
// Case 3.
{
"classic=false, nhcb=false": {expectClassic: false, expectExponential: true, expectNHCB: false},
"classic=true, nhcb=false": {expectClassic: false, expectExponential: true, expectNHCB: false},
"classic=false, nhcb=true": {expectClassic: false, expectExponential: true, expectNHCB: false},
"classic=true, nhcb=true": {expectClassic: false, expectExponential: true, expectNHCB: false},
},
}
// Create parser from keep classic option.
type parserFactory func(bool) Parser
type testCase struct {
name string
parser parserFactory
classic bool
nhcb bool
exp []parsedEntry
}
type parserOptions struct {
useUTF8sep bool
hasCreatedTimeStamp bool
}
// Defines the parser name, the Parser factory and the test cases
// supported by the parser and parser options.
parsers := []func() (string, parserFactory, []int, parserOptions){
func() (string, parserFactory, []int, parserOptions) {
factory := func(keepClassic bool) Parser {
inputBuf := createTestProtoBufHistogram(t)
return NewProtobufParser(inputBuf.Bytes(), keepClassic, labels.NewSymbolTable())
}
return "ProtoBuf", factory, []int{1, 2, 3}, parserOptions{useUTF8sep: true, hasCreatedTimeStamp: true}
},
func() (string, parserFactory, []int, parserOptions) {
factory := func(keepClassic bool) Parser {
input := createTestOpenMetricsHistogram()
return NewOpenMetricsParser([]byte(input), labels.NewSymbolTable(), WithOMParserCTSeriesSkipped())
}
return "OpenMetrics", factory, []int{1}, parserOptions{hasCreatedTimeStamp: true}
},
func() (string, parserFactory, []int, parserOptions) {
factory := func(keepClassic bool) Parser {
input := createTestPromHistogram()
return NewPromParser([]byte(input), labels.NewSymbolTable())
}
return "Prometheus", factory, []int{1}, parserOptions{}
},
}
testCases := []testCase{}
for _, parser := range parsers {
for _, classic := range []bool{false, true} {
for _, nhcb := range []bool{false, true} {
parserName, parser, supportedCases, options := parser()
requirementName := "classic=" + strconv.FormatBool(classic) + ", nhcb=" + strconv.FormatBool(nhcb)
tc := testCase{
name: "parser=" + parserName + ", " + requirementName,
parser: parser,
classic: classic,
nhcb: nhcb,
exp: []parsedEntry{},
}
for _, caseNumber := range supportedCases {
caseI := cases[caseNumber-1]
req, ok := caseI[requirementName]
require.True(t, ok, "Case %d does not have requirement %s", caseNumber, requirementName)
metric := "test_histogram" + strconv.Itoa(caseNumber)
tc.exp = append(tc.exp, parsedEntry{
m: metric,
help: "Test histogram " + strconv.Itoa(caseNumber),
})
tc.exp = append(tc.exp, parsedEntry{
m: metric,
typ: model.MetricTypeHistogram,
})
var ct *int64
if options.hasCreatedTimeStamp {
ct = int64p(1000)
}
var bucketForMetric func(string) string
if options.useUTF8sep {
bucketForMetric = func(s string) string {
return "_bucket\xffle\xff" + s
}
} else {
bucketForMetric = func(s string) string {
return "_bucket{le=\"" + s + "\"}"
}
}
if req.expectExponential {
// Always expect exponential histogram first.
exponentialSeries := []parsedEntry{
{
m: metric,
shs: &histogram.Histogram{
Schema: 3,
Count: 175,
Sum: 0.0008280461746287094,
ZeroThreshold: 2.938735877055719e-39,
ZeroCount: 2,
PositiveSpans: []histogram.Span{{Offset: -161, Length: 1}, {Offset: 8, Length: 3}},
NegativeSpans: []histogram.Span{{Offset: -162, Length: 1}, {Offset: 23, Length: 4}},
PositiveBuckets: []int64{1, 2, -1, -1},
NegativeBuckets: []int64{1, 3, -2, -1, 1},
},
lset: labels.FromStrings("__name__", metric),
t: int64p(1234568),
ct: ct,
},
}
tc.exp = append(tc.exp, exponentialSeries...)
}
if req.expectClassic {
// Always expect classic histogram series after exponential.
classicSeries := []parsedEntry{
{
m: metric + "_count",
v: 175,
lset: labels.FromStrings("__name__", metric+"_count"),
t: int64p(1234568),
ct: ct,
},
{
m: metric + "_sum",
v: 0.0008280461746287094,
lset: labels.FromStrings("__name__", metric+"_sum"),
t: int64p(1234568),
ct: ct,
},
{
m: metric + bucketForMetric("-0.0004899999999999998"),
v: 2,
lset: labels.FromStrings("__name__", metric+"_bucket", "le", "-0.0004899999999999998"),
t: int64p(1234568),
ct: ct,
},
{
m: metric + bucketForMetric("-0.0003899999999999998"),
v: 4,
lset: labels.FromStrings("__name__", metric+"_bucket", "le", "-0.0003899999999999998"),
t: int64p(1234568),
ct: ct,
},
{
m: metric + bucketForMetric("-0.0002899999999999998"),
v: 16,
lset: labels.FromStrings("__name__", metric+"_bucket", "le", "-0.0002899999999999998"),
t: int64p(1234568),
ct: ct,
},
{
m: metric + bucketForMetric("+Inf"),
v: 175,
lset: labels.FromStrings("__name__", metric+"_bucket", "le", "+Inf"),
t: int64p(1234568),
ct: ct,
},
}
tc.exp = append(tc.exp, classicSeries...)
}
if req.expectNHCB {
// Always expect NHCB series after classic.
nhcbSeries := []parsedEntry{
{
m: metric + "{}",
shs: &histogram.Histogram{
Schema: histogram.CustomBucketsSchema,
Count: 175,
Sum: 0.0008280461746287094,
PositiveSpans: []histogram.Span{{Length: 4}},
PositiveBuckets: []int64{2, 0, 10, 147},
CustomValues: []float64{-0.0004899999999999998, -0.0003899999999999998, -0.0002899999999999998},
},
lset: labels.FromStrings("__name__", metric),
t: int64p(1234568),
ct: ct,
},
}
tc.exp = append(tc.exp, nhcbSeries...)
}
}
testCases = append(testCases, tc)
}
}
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
p := tc.parser(tc.classic)
if tc.nhcb {
p = NewNHCBParser(p, labels.NewSymbolTable(), tc.classic)
}
got := testParse(t, p)
requireEntries(t, tc.exp, got)
})
}
}
func createTestProtoBufHistogram(t *testing.T) *bytes.Buffer {
testMetricFamilies := []string{`name: "test_histogram1"
help: "Test histogram 1"
type: HISTOGRAM
metric: <
histogram: <
created_timestamp: <
seconds: 1
nanos: 1
>
sample_count: 175
sample_sum: 0.0008280461746287094
bucket: <
cumulative_count: 2
upper_bound: -0.0004899999999999998
>
bucket: <
cumulative_count: 4
upper_bound: -0.0003899999999999998
>
bucket: <
cumulative_count: 16
upper_bound: -0.0002899999999999998
>
>
timestamp_ms: 1234568
>`, `name: "test_histogram2"
help: "Test histogram 2"
type: HISTOGRAM
metric: <
histogram: <
created_timestamp: <
seconds: 1
nanos: 1
>
sample_count: 175
sample_sum: 0.0008280461746287094
bucket: <
cumulative_count: 2
upper_bound: -0.0004899999999999998
>
bucket: <
cumulative_count: 4
upper_bound: -0.0003899999999999998
>
bucket: <
cumulative_count: 16
upper_bound: -0.0002899999999999998
>
schema: 3
zero_threshold: 2.938735877055719e-39
zero_count: 2
negative_span: <
offset: -162
length: 1
>
negative_span: <
offset: 23
length: 4
>
negative_delta: 1
negative_delta: 3
negative_delta: -2
negative_delta: -1
negative_delta: 1
positive_span: <
offset: -161
length: 1
>
positive_span: <
offset: 8
length: 3
>
positive_delta: 1
positive_delta: 2
positive_delta: -1
positive_delta: -1
>
timestamp_ms: 1234568
>`, `name: "test_histogram3"
help: "Test histogram 3"
type: HISTOGRAM
metric: <
histogram: <
created_timestamp: <
seconds: 1
nanos: 1
>
sample_count: 175
sample_sum: 0.0008280461746287094
schema: 3
zero_threshold: 2.938735877055719e-39
zero_count: 2
negative_span: <
offset: -162
length: 1
>
negative_span: <
offset: 23
length: 4
>
negative_delta: 1
negative_delta: 3
negative_delta: -2
negative_delta: -1
negative_delta: 1
positive_span: <
offset: -161
length: 1
>
positive_span: <
offset: 8
length: 3
>
positive_delta: 1
positive_delta: 2
positive_delta: -1
positive_delta: -1
>
timestamp_ms: 1234568
>
`}
varintBuf := make([]byte, binary.MaxVarintLen32)
buf := &bytes.Buffer{}
for _, tmf := range testMetricFamilies {
pb := &dto.MetricFamily{}
// From text to proto message.
require.NoError(t, proto.UnmarshalText(tmf, pb))
// From proto message to binary protobuf.
protoBuf, err := proto.Marshal(pb)
require.NoError(t, err)
// Write first length, then binary protobuf.
varintLength := binary.PutUvarint(varintBuf, uint64(len(protoBuf)))
buf.Write(varintBuf[:varintLength])
buf.Write(protoBuf)
}
return buf
}
func createTestOpenMetricsHistogram() string {
return `# HELP test_histogram1 Test histogram 1
# TYPE test_histogram1 histogram
test_histogram1_count 175 1234.568
test_histogram1_sum 0.0008280461746287094 1234.568
test_histogram1_bucket{le="-0.0004899999999999998"} 2 1234.568
test_histogram1_bucket{le="-0.0003899999999999998"} 4 1234.568
test_histogram1_bucket{le="-0.0002899999999999998"} 16 1234.568
test_histogram1_bucket{le="+Inf"} 175 1234.568
test_histogram1_created 1
# EOF`
}
func createTestPromHistogram() string {
return `# HELP test_histogram1 Test histogram 1
# TYPE test_histogram1 histogram
test_histogram1_count 175 1234568
test_histogram1_sum 0.0008280461746287094 1234768
test_histogram1_bucket{le="-0.0004899999999999998"} 2 1234568
test_histogram1_bucket{le="-0.0003899999999999998"} 4 1234568
test_histogram1_bucket{le="-0.0002899999999999998"} 16 1234568
test_histogram1_bucket{le="+Inf"} 175 1234568`
}
func TestNHCBParserErrorHandling(t *testing.T) {
input := `# HELP something Histogram with non cumulative buckets
# TYPE something histogram
something_count 18
something_sum 324789.4
something_created 1520430001
something_bucket{le="0.0"} 18
something_bucket{le="+Inf"} 1
something_count{a="b"} 9
something_sum{a="b"} 42123
something_created{a="b"} 1520430002
something_bucket{a="b",le="0.0"} 1
something_bucket{a="b",le="+Inf"} 9
# EOF`
exp := []parsedEntry{
{
m: "something",
help: "Histogram with non cumulative buckets",
},
{
m: "something",
typ: model.MetricTypeHistogram,
},
// The parser should skip the series with non-cumulative buckets.
{
m: `something{a="b"}`,
shs: &histogram.Histogram{
Schema: histogram.CustomBucketsSchema,
Count: 9,
Sum: 42123.0,
PositiveSpans: []histogram.Span{{Offset: 0, Length: 2}},
PositiveBuckets: []int64{1, 7},
CustomValues: []float64{0.0}, // We do not store the +Inf boundary.
},
lset: labels.FromStrings("__name__", "something", "a", "b"),
ct: int64p(1520430002000),
},
}
p := NewOpenMetricsParser([]byte(input), labels.NewSymbolTable(), WithOMParserCTSeriesSkipped())
p = NewNHCBParser(p, labels.NewSymbolTable(), false)
got := testParse(t, p)
requireEntries(t, exp, got)
}

View file

@ -22,6 +22,7 @@ import (
"fmt" "fmt"
"io" "io"
"math" "math"
"strconv"
"strings" "strings"
"unicode/utf8" "unicode/utf8"
@ -101,6 +102,8 @@ type OpenMetricsParser struct {
// Created timestamp parsing state. // Created timestamp parsing state.
ct int64 ct int64
ctHashSet uint64 ctHashSet uint64
// ignoreExemplar instructs the parser to not overwrite exemplars (to keep them while peeking ahead).
ignoreExemplar bool
// visitedMFName is the metric family name of the last visited metric when peeking ahead // visitedMFName is the metric family name of the last visited metric when peeking ahead
// for _created series during the execution of the CreatedTimestamp method. // for _created series during the execution of the CreatedTimestamp method.
visitedMFName []byte visitedMFName []byte
@ -210,7 +213,7 @@ func (p *OpenMetricsParser) Metric(l *labels.Labels) string {
label := unreplace(s[a:b]) label := unreplace(s[a:b])
c := p.offsets[i+2] - p.start c := p.offsets[i+2] - p.start
d := p.offsets[i+3] - p.start d := p.offsets[i+3] - p.start
value := unreplace(s[c:d]) value := normalizeFloatsInLabelValues(p.mtype, label, unreplace(s[c:d]))
p.builder.Add(label, value) p.builder.Add(label, value)
} }
@ -295,6 +298,14 @@ func (p *OpenMetricsParser) CreatedTimestamp() *int64 {
p.skipCTSeries = false p.skipCTSeries = false
p.ignoreExemplar = true
savedStart := p.start
defer func() {
p.ignoreExemplar = false
p.start = savedStart
p.l = resetLexer
}()
for { for {
eType, err := p.Next() eType, err := p.Next()
if err != nil { if err != nil {
@ -302,12 +313,12 @@ func (p *OpenMetricsParser) CreatedTimestamp() *int64 {
// This might result in partial scrape with wrong/missing CT, but only // This might result in partial scrape with wrong/missing CT, but only
// spec improvement would help. // spec improvement would help.
// TODO: Make sure OM 1.1/2.0 pass CT via metadata or exemplar-like to avoid this. // TODO: Make sure OM 1.1/2.0 pass CT via metadata or exemplar-like to avoid this.
p.resetCTParseValues(resetLexer) p.resetCTParseValues()
return nil return nil
} }
if eType != EntrySeries { if eType != EntrySeries {
// Assume we hit different family, no CT line found. // Assume we hit different family, no CT line found.
p.resetCTParseValues(resetLexer) p.resetCTParseValues()
return nil return nil
} }
@ -321,14 +332,14 @@ func (p *OpenMetricsParser) CreatedTimestamp() *int64 {
peekedHash := p.seriesHash(&buf, peekedName[:len(peekedName)-8]) peekedHash := p.seriesHash(&buf, peekedName[:len(peekedName)-8])
if peekedHash != currHash { if peekedHash != currHash {
// Found CT line for a different series, for our series no CT. // Found CT line for a different series, for our series no CT.
p.resetCTParseValues(resetLexer) p.resetCTParseValues()
return nil return nil
} }
// All timestamps in OpenMetrics are Unix Epoch in seconds. Convert to milliseconds. // All timestamps in OpenMetrics are Unix Epoch in seconds. Convert to milliseconds.
// https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#timestamps // https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#timestamps
ct := int64(p.val * 1000.0) ct := int64(p.val * 1000.0)
p.setCTParseValues(ct, currHash, currName, true, resetLexer) p.setCTParseValues(ct, currHash, currName, true)
return &ct return &ct
} }
} }
@ -370,17 +381,15 @@ func (p *OpenMetricsParser) seriesHash(offsetsArr *[]byte, metricFamilyName []by
// setCTParseValues sets the parser to the state after CreatedTimestamp method was called and CT was found. // setCTParseValues sets the parser to the state after CreatedTimestamp method was called and CT was found.
// This is useful to prevent re-parsing the same series again and early return the CT value. // This is useful to prevent re-parsing the same series again and early return the CT value.
func (p *OpenMetricsParser) setCTParseValues(ct int64, ctHashSet uint64, mfName []byte, skipCTSeries bool, resetLexer *openMetricsLexer) { func (p *OpenMetricsParser) setCTParseValues(ct int64, ctHashSet uint64, mfName []byte, skipCTSeries bool) {
p.ct = ct p.ct = ct
p.l = resetLexer
p.ctHashSet = ctHashSet p.ctHashSet = ctHashSet
p.visitedMFName = mfName p.visitedMFName = mfName
p.skipCTSeries = skipCTSeries // Do we need to set it? p.skipCTSeries = skipCTSeries // Do we need to set it?
} }
// resetCtParseValues resets the parser to the state before CreatedTimestamp method was called. // resetCtParseValues resets the parser to the state before CreatedTimestamp method was called.
func (p *OpenMetricsParser) resetCTParseValues(resetLexer *openMetricsLexer) { func (p *OpenMetricsParser) resetCTParseValues() {
p.l = resetLexer
p.ctHashSet = 0 p.ctHashSet = 0
p.skipCTSeries = true p.skipCTSeries = true
} }
@ -416,10 +425,12 @@ func (p *OpenMetricsParser) Next() (Entry, error) {
p.start = p.l.i p.start = p.l.i
p.offsets = p.offsets[:0] p.offsets = p.offsets[:0]
p.eOffsets = p.eOffsets[:0] if !p.ignoreExemplar {
p.exemplar = p.exemplar[:0] p.eOffsets = p.eOffsets[:0]
p.exemplarVal = 0 p.exemplar = p.exemplar[:0]
p.hasExemplarTs = false p.exemplarVal = 0
p.hasExemplarTs = false
}
switch t := p.nextToken(); t { switch t := p.nextToken(); t {
case tEOFWord: case tEOFWord:
@ -544,6 +555,16 @@ func (p *OpenMetricsParser) Next() (Entry, error) {
func (p *OpenMetricsParser) parseComment() error { func (p *OpenMetricsParser) parseComment() error {
var err error var err error
if p.ignoreExemplar {
for t := p.nextToken(); t != tLinebreak; t = p.nextToken() {
if t == tEOF {
return errors.New("data does not end with # EOF")
}
}
return nil
}
// Parse the labels. // Parse the labels.
p.eOffsets, err = p.parseLVals(p.eOffsets, true) p.eOffsets, err = p.parseLVals(p.eOffsets, true)
if err != nil { if err != nil {
@ -723,3 +744,15 @@ func (p *OpenMetricsParser) getFloatValue(t token, after string) (float64, error
} }
return val, nil return val, nil
} }
// normalizeFloatsInLabelValues ensures that values of the "le" labels of classic histograms and "quantile" labels
// of summaries follow OpenMetrics formatting rules.
func normalizeFloatsInLabelValues(t model.MetricType, l, v string) string {
if (t == model.MetricTypeSummary && l == model.QuantileLabel) || (t == model.MetricTypeHistogram && l == model.BucketLabel) {
f, err := strconv.ParseFloat(v, 64)
if err == nil {
return formatOpenMetricsFloat(f)
}
}
return v
}

View file

@ -74,6 +74,7 @@ foo_total{a="b"} 17.0 1520879607.789 # {id="counter-test"} 5
foo_created{a="b"} 1520872607.123 foo_created{a="b"} 1520872607.123
foo_total{le="c"} 21.0 foo_total{le="c"} 21.0
foo_created{le="c"} 1520872621.123 foo_created{le="c"} 1520872621.123
foo_total{le="1"} 10.0
# HELP bar Summary with CT at the end, making sure we find CT even if it's multiple lines a far # HELP bar Summary with CT at the end, making sure we find CT even if it's multiple lines a far
# TYPE bar summary # TYPE bar summary
bar_count 17.0 bar_count 17.0
@ -97,6 +98,7 @@ something_count 18
something_sum 324789.4 something_sum 324789.4
something_created 1520430001 something_created 1520430001
something_bucket{le="0.0"} 1 something_bucket{le="0.0"} 1
something_bucket{le="1"} 2
something_bucket{le="+Inf"} 18 something_bucket{le="+Inf"} 18
# HELP yum Summary with _created between sum and quantiles # HELP yum Summary with _created between sum and quantiles
# TYPE yum summary # TYPE yum summary
@ -130,7 +132,7 @@ foobar{quantile="0.99"} 150.1`
}, { }, {
m: `go_gc_duration_seconds{quantile="0"}`, m: `go_gc_duration_seconds{quantile="0"}`,
v: 4.9351e-05, v: 4.9351e-05,
lset: labels.FromStrings("__name__", "go_gc_duration_seconds", "quantile", "0"), lset: labels.FromStrings("__name__", "go_gc_duration_seconds", "quantile", "0.0"),
}, { }, {
m: `go_gc_duration_seconds{quantile="0.25"}`, m: `go_gc_duration_seconds{quantile="0.25"}`,
v: 7.424100000000001e-05, v: 7.424100000000001e-05,
@ -302,6 +304,10 @@ foobar{quantile="0.99"} 150.1`
v: 21.0, v: 21.0,
lset: labels.FromStrings("__name__", "foo_total", "le", "c"), lset: labels.FromStrings("__name__", "foo_total", "le", "c"),
ct: int64p(1520872621123), ct: int64p(1520872621123),
}, {
m: `foo_total{le="1"}`,
v: 10.0,
lset: labels.FromStrings("__name__", "foo_total", "le", "1"),
}, { }, {
m: "bar", m: "bar",
help: "Summary with CT at the end, making sure we find CT even if it's multiple lines a far", help: "Summary with CT at the end, making sure we find CT even if it's multiple lines a far",
@ -385,6 +391,11 @@ foobar{quantile="0.99"} 150.1`
v: 1, v: 1,
lset: labels.FromStrings("__name__", "something_bucket", "le", "0.0"), lset: labels.FromStrings("__name__", "something_bucket", "le", "0.0"),
ct: int64p(1520430001000), ct: int64p(1520430001000),
}, {
m: `something_bucket{le="1"}`,
v: 2,
lset: labels.FromStrings("__name__", "something_bucket", "le", "1.0"),
ct: int64p(1520430001000),
}, { }, {
m: `something_bucket{le="+Inf"}`, m: `something_bucket{le="+Inf"}`,
v: 18, v: 18,
@ -492,7 +503,7 @@ func TestUTF8OpenMetricsParse(t *testing.T) {
}, { }, {
m: `{"go.gc_duration_seconds",quantile="0"}`, m: `{"go.gc_duration_seconds",quantile="0"}`,
v: 4.9351e-05, v: 4.9351e-05,
lset: labels.FromStrings("__name__", "go.gc_duration_seconds", "quantile", "0"), lset: labels.FromStrings("__name__", "go.gc_duration_seconds", "quantile", "0.0"),
ct: int64p(1520872607123), ct: int64p(1520872607123),
}, { }, {
m: `{"go.gc_duration_seconds",quantile="0.25"}`, m: `{"go.gc_duration_seconds",quantile="0.25"}`,

View file

@ -239,7 +239,8 @@ func (p *PromParser) Metric(l *labels.Labels) string {
label := unreplace(s[a:b]) label := unreplace(s[a:b])
c := p.offsets[i+2] - p.start c := p.offsets[i+2] - p.start
d := p.offsets[i+3] - p.start d := p.offsets[i+3] - p.start
value := unreplace(s[c:d]) value := normalizeFloatsInLabelValues(p.mtype, label, unreplace(s[c:d]))
p.builder.Add(label, value) p.builder.Add(label, value)
} }
@ -508,7 +509,7 @@ func yoloString(b []byte) string {
func parseFloat(s string) (float64, error) { func parseFloat(s string) (float64, error) {
// Keep to pre-Go 1.13 float formats. // Keep to pre-Go 1.13 float formats.
if strings.ContainsAny(s, "pP_") { if strings.ContainsAny(s, "pP_") {
return 0, fmt.Errorf("unsupported character in float") return 0, errors.New("unsupported character in float")
} }
return strconv.ParseFloat(s, 64) return strconv.ParseFloat(s, 64)
} }

View file

@ -31,6 +31,13 @@ go_gc_duration_seconds{quantile="0.25",} 7.424100000000001e-05
go_gc_duration_seconds{quantile="0.5",a="b"} 8.3835e-05 go_gc_duration_seconds{quantile="0.5",a="b"} 8.3835e-05
go_gc_duration_seconds{quantile="0.8", a="b"} 8.3835e-05 go_gc_duration_seconds{quantile="0.8", a="b"} 8.3835e-05
go_gc_duration_seconds{ quantile="0.9", a="b"} 8.3835e-05 go_gc_duration_seconds{ quantile="0.9", a="b"} 8.3835e-05
# HELP prometheus_http_request_duration_seconds Histogram of latencies for HTTP requests.
# TYPE prometheus_http_request_duration_seconds histogram
prometheus_http_request_duration_seconds_bucket{handler="/",le="1"} 423
prometheus_http_request_duration_seconds_bucket{handler="/",le="2"} 1423
prometheus_http_request_duration_seconds_bucket{handler="/",le="+Inf"} 1423
prometheus_http_request_duration_seconds_sum{handler="/"} 2000
prometheus_http_request_duration_seconds_count{handler="/"} 1423
# Hrandom comment starting with prefix of HELP # Hrandom comment starting with prefix of HELP
# #
wind_speed{A="2",c="3"} 12345 wind_speed{A="2",c="3"} 12345
@ -50,7 +57,8 @@ some:aggregate:rate5m{a_b="c"} 1
go_goroutines 33 123123 go_goroutines 33 123123
_metric_starting_with_underscore 1 _metric_starting_with_underscore 1
testmetric{_label_starting_with_underscore="foo"} 1 testmetric{_label_starting_with_underscore="foo"} 1
testmetric{label="\"bar\""} 1` testmetric{label="\"bar\""} 1
testmetric{le="10"} 1`
input += "\n# HELP metric foo\x00bar" input += "\n# HELP metric foo\x00bar"
input += "\nnull_byte_metric{a=\"abc\x00\"} 1" input += "\nnull_byte_metric{a=\"abc\x00\"} 1"
@ -64,7 +72,7 @@ testmetric{label="\"bar\""} 1`
}, { }, {
m: `go_gc_duration_seconds{quantile="0"}`, m: `go_gc_duration_seconds{quantile="0"}`,
v: 4.9351e-05, v: 4.9351e-05,
lset: labels.FromStrings("__name__", "go_gc_duration_seconds", "quantile", "0"), lset: labels.FromStrings("__name__", "go_gc_duration_seconds", "quantile", "0.0"),
}, { }, {
m: `go_gc_duration_seconds{quantile="0.25",}`, m: `go_gc_duration_seconds{quantile="0.25",}`,
v: 7.424100000000001e-05, v: 7.424100000000001e-05,
@ -81,6 +89,32 @@ testmetric{label="\"bar\""} 1`
m: `go_gc_duration_seconds{ quantile="0.9", a="b"}`, m: `go_gc_duration_seconds{ quantile="0.9", a="b"}`,
v: 8.3835e-05, v: 8.3835e-05,
lset: labels.FromStrings("__name__", "go_gc_duration_seconds", "quantile", "0.9", "a", "b"), lset: labels.FromStrings("__name__", "go_gc_duration_seconds", "quantile", "0.9", "a", "b"),
}, {
m: "prometheus_http_request_duration_seconds",
help: "Histogram of latencies for HTTP requests.",
}, {
m: "prometheus_http_request_duration_seconds",
typ: model.MetricTypeHistogram,
}, {
m: `prometheus_http_request_duration_seconds_bucket{handler="/",le="1"}`,
v: 423,
lset: labels.FromStrings("__name__", "prometheus_http_request_duration_seconds_bucket", "handler", "/", "le", "1.0"),
}, {
m: `prometheus_http_request_duration_seconds_bucket{handler="/",le="2"}`,
v: 1423,
lset: labels.FromStrings("__name__", "prometheus_http_request_duration_seconds_bucket", "handler", "/", "le", "2.0"),
}, {
m: `prometheus_http_request_duration_seconds_bucket{handler="/",le="+Inf"}`,
v: 1423,
lset: labels.FromStrings("__name__", "prometheus_http_request_duration_seconds_bucket", "handler", "/", "le", "+Inf"),
}, {
m: `prometheus_http_request_duration_seconds_sum{handler="/"}`,
v: 2000,
lset: labels.FromStrings("__name__", "prometheus_http_request_duration_seconds_sum", "handler", "/"),
}, {
m: `prometheus_http_request_duration_seconds_count{handler="/"}`,
v: 1423,
lset: labels.FromStrings("__name__", "prometheus_http_request_duration_seconds_count", "handler", "/"),
}, { }, {
comment: "# Hrandom comment starting with prefix of HELP", comment: "# Hrandom comment starting with prefix of HELP",
}, { }, {
@ -151,6 +185,10 @@ testmetric{label="\"bar\""} 1`
m: "testmetric{label=\"\\\"bar\\\"\"}", m: "testmetric{label=\"\\\"bar\\\"\"}",
v: 1, v: 1,
lset: labels.FromStrings("__name__", "testmetric", "label", `"bar"`), lset: labels.FromStrings("__name__", "testmetric", "label", `"bar"`),
}, {
m: `testmetric{le="10"}`,
v: 1,
lset: labels.FromStrings("__name__", "testmetric", "le", "10"),
}, { }, {
m: "metric", m: "metric",
help: "foo\x00bar", help: "foo\x00bar",
@ -197,7 +235,7 @@ func TestUTF8PromParse(t *testing.T) {
}, { }, {
m: `{"go.gc_duration_seconds",quantile="0"}`, m: `{"go.gc_duration_seconds",quantile="0"}`,
v: 4.9351e-05, v: 4.9351e-05,
lset: labels.FromStrings("__name__", "go.gc_duration_seconds", "quantile", "0"), lset: labels.FromStrings("__name__", "go.gc_duration_seconds", "quantile", "0.0"),
}, { }, {
m: `{"go.gc_duration_seconds",quantile="0.25",}`, m: `{"go.gc_duration_seconds",quantile="0.25",}`,
v: 7.424100000000001e-05, v: 7.424100000000001e-05,

View file

@ -20,7 +20,9 @@ import (
"fmt" "fmt"
"io" "io"
"math" "math"
"strconv"
"strings" "strings"
"sync"
"unicode/utf8" "unicode/utf8"
"github.com/gogo/protobuf/proto" "github.com/gogo/protobuf/proto"
@ -34,6 +36,15 @@ import (
dto "github.com/prometheus/prometheus/prompb/io/prometheus/client" dto "github.com/prometheus/prometheus/prompb/io/prometheus/client"
) )
// floatFormatBufPool is exclusively used in formatOpenMetricsFloat.
var floatFormatBufPool = sync.Pool{
New: func() interface{} {
// To contain at most 17 digits and additional syntax for a float64.
b := make([]byte, 0, 24)
return &b
},
}
// ProtobufParser is a very inefficient way of unmarshaling the old Prometheus // ProtobufParser is a very inefficient way of unmarshaling the old Prometheus
// protobuf format and then present it as it if were parsed by a // protobuf format and then present it as it if were parsed by a
// Prometheus-2-style text parser. This is only done so that we can easily plug // Prometheus-2-style text parser. This is only done so that we can easily plug
@ -629,11 +640,15 @@ func formatOpenMetricsFloat(f float64) string {
case math.IsInf(f, -1): case math.IsInf(f, -1):
return "-Inf" return "-Inf"
} }
s := fmt.Sprint(f) bp := floatFormatBufPool.Get().(*[]byte)
if strings.ContainsAny(s, "e.") { defer floatFormatBufPool.Put(bp)
return s
*bp = strconv.AppendFloat((*bp)[:0], f, 'g', -1, 64)
if bytes.ContainsAny(*bp, "e.") {
return string(*bp)
} }
return s + ".0" *bp = append(*bp, '.', '0')
return string(*bp)
} }
// isNativeHistogram returns false iff the provided histograms has no spans at // isNativeHistogram returns false iff the provided histograms has no spans at

View file

@ -409,6 +409,49 @@ metric: <
> >
> >
`,
`name: "test_histogram3"
help: "Similar histogram as before but now with integer buckets."
type: HISTOGRAM
metric: <
histogram: <
sample_count: 6
sample_sum: 50
bucket: <
cumulative_count: 2
upper_bound: -20
>
bucket: <
cumulative_count: 4
upper_bound: 20
exemplar: <
label: <
name: "dummyID"
value: "59727"
>
value: 15
timestamp: <
seconds: 1625851153
nanos: 146848499
>
>
>
bucket: <
cumulative_count: 6
upper_bound: 30
exemplar: <
label: <
name: "dummyID"
value: "5617"
>
value: 25
>
>
schema: 0
zero_threshold: 0
>
>
`, `,
`name: "test_histogram_family" `name: "test_histogram_family"
help: "Test histogram metric family with two very simple histograms." help: "Test histogram metric family with two very simple histograms."
@ -1050,6 +1093,66 @@ func TestProtobufParse(t *testing.T) {
"le", "+Inf", "le", "+Inf",
), ),
}, },
{
m: "test_histogram3",
help: "Similar histogram as before but now with integer buckets.",
},
{
m: "test_histogram3",
typ: model.MetricTypeHistogram,
},
{
m: "test_histogram3_count",
v: 6,
lset: labels.FromStrings(
"__name__", "test_histogram3_count",
),
},
{
m: "test_histogram3_sum",
v: 50,
lset: labels.FromStrings(
"__name__", "test_histogram3_sum",
),
},
{
m: "test_histogram3_bucket\xffle\xff-20.0",
v: 2,
lset: labels.FromStrings(
"__name__", "test_histogram3_bucket",
"le", "-20.0",
),
},
{
m: "test_histogram3_bucket\xffle\xff20.0",
v: 4,
lset: labels.FromStrings(
"__name__", "test_histogram3_bucket",
"le", "20.0",
),
es: []exemplar.Exemplar{
{Labels: labels.FromStrings("dummyID", "59727"), Value: 15, HasTs: true, Ts: 1625851153146},
},
},
{
m: "test_histogram3_bucket\xffle\xff30.0",
v: 6,
lset: labels.FromStrings(
"__name__", "test_histogram3_bucket",
"le", "30.0",
),
es: []exemplar.Exemplar{
{Labels: labels.FromStrings("dummyID", "5617"), Value: 25, HasTs: false},
},
},
{
m: "test_histogram3_bucket\xffle\xff+Inf",
v: 6,
lset: labels.FromStrings(
"__name__", "test_histogram3_bucket",
"le", "+Inf",
),
},
{ {
m: "test_histogram_family", m: "test_histogram_family",
help: "Test histogram metric family with two very simple histograms.", help: "Test histogram metric family with two very simple histograms.",
@ -1857,6 +1960,66 @@ func TestProtobufParse(t *testing.T) {
"le", "+Inf", "le", "+Inf",
), ),
}, },
{
m: "test_histogram3",
help: "Similar histogram as before but now with integer buckets.",
},
{
m: "test_histogram3",
typ: model.MetricTypeHistogram,
},
{
m: "test_histogram3_count",
v: 6,
lset: labels.FromStrings(
"__name__", "test_histogram3_count",
),
},
{
m: "test_histogram3_sum",
v: 50,
lset: labels.FromStrings(
"__name__", "test_histogram3_sum",
),
},
{
m: "test_histogram3_bucket\xffle\xff-20.0",
v: 2,
lset: labels.FromStrings(
"__name__", "test_histogram3_bucket",
"le", "-20.0",
),
},
{
m: "test_histogram3_bucket\xffle\xff20.0",
v: 4,
lset: labels.FromStrings(
"__name__", "test_histogram3_bucket",
"le", "20.0",
),
es: []exemplar.Exemplar{
{Labels: labels.FromStrings("dummyID", "59727"), Value: 15, HasTs: true, Ts: 1625851153146},
},
},
{
m: "test_histogram3_bucket\xffle\xff30.0",
v: 6,
lset: labels.FromStrings(
"__name__", "test_histogram3_bucket",
"le", "30.0",
),
es: []exemplar.Exemplar{
{Labels: labels.FromStrings("dummyID", "5617"), Value: 25, HasTs: false},
},
},
{
m: "test_histogram3_bucket\xffle\xff+Inf",
v: 6,
lset: labels.FromStrings(
"__name__", "test_histogram3_bucket",
"le", "+Inf",
),
},
{ {
m: "test_histogram_family", m: "test_histogram_family",
help: "Test histogram metric family with two very simple histograms.", help: "Test histogram metric family with two very simple histograms.",

Some files were not shown because too many files have changed in this diff Show more