mirror of
https://github.com/prometheus/prometheus.git
synced 2024-11-14 01:24:04 -08:00
Merge branch 'main' into jhesketh/clamp
Signed-off-by: Joshua Hesketh <josh@nitrotech.org>
This commit is contained in:
commit
ed2668bbda
2
.github/workflows/buf-lint.yml
vendored
2
.github/workflows/buf-lint.yml
vendored
|
@ -12,7 +12,7 @@ jobs:
|
|||
name: lint
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- uses: bufbuild/buf-setup-action@62ee92603c244ad0da98bab36a834a999a5329e6 # v1.43.0
|
||||
with:
|
||||
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
|
2
.github/workflows/buf.yml
vendored
2
.github/workflows/buf.yml
vendored
|
@ -12,7 +12,7 @@ jobs:
|
|||
runs-on: ubuntu-latest
|
||||
if: github.repository_owner == 'prometheus'
|
||||
steps:
|
||||
- uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- uses: bufbuild/buf-setup-action@62ee92603c244ad0da98bab36a834a999a5329e6 # v1.43.0
|
||||
with:
|
||||
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
|
50
.github/workflows/ci.yml
vendored
50
.github/workflows/ci.yml
vendored
|
@ -13,12 +13,12 @@ jobs:
|
|||
# should also be updated.
|
||||
image: quay.io/prometheus/golang-builder:1.23-base
|
||||
steps:
|
||||
- uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0
|
||||
- uses: prometheus/promci@468927c440349ab56c4a1aafd453b312841503c2 # v0.4.4
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- uses: prometheus/promci@52c7012f5f0070d7281b8db4a119e21341d43c91 # v0.4.5
|
||||
- uses: ./.github/promci/actions/setup_environment
|
||||
with:
|
||||
enable_npm: true
|
||||
- run: make GOOPTS=--tags=stringlabels GO_ONLY=1 SKIP_GOLANGCI_LINT=1 test-flags=""
|
||||
- run: make GOOPTS=--tags=stringlabels GO_ONLY=1 SKIP_GOLANGCI_LINT=1
|
||||
- run: go test --tags=stringlabels ./tsdb/ -test.tsdb-isolation=false
|
||||
- run: make -C documentation/examples/remote_storage
|
||||
- run: make -C documentation/examples
|
||||
|
@ -29,8 +29,8 @@ jobs:
|
|||
container:
|
||||
image: quay.io/prometheus/golang-builder:1.23-base
|
||||
steps:
|
||||
- uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0
|
||||
- uses: prometheus/promci@468927c440349ab56c4a1aafd453b312841503c2 # v0.4.4
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- uses: prometheus/promci@52c7012f5f0070d7281b8db4a119e21341d43c91 # v0.4.5
|
||||
- uses: ./.github/promci/actions/setup_environment
|
||||
- run: go test --tags=dedupelabels ./...
|
||||
- run: GOARCH=386 go test ./cmd/prometheus
|
||||
|
@ -48,7 +48,7 @@ jobs:
|
|||
# The go version in this image should be N-1 wrt test_go.
|
||||
image: quay.io/prometheus/golang-builder:1.22-base
|
||||
steps:
|
||||
- uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- run: make build
|
||||
# Don't run NPM build; don't run race-detector.
|
||||
- run: make test GO_ONLY=1 test-flags=""
|
||||
|
@ -62,8 +62,8 @@ jobs:
|
|||
image: quay.io/prometheus/golang-builder:1.23-base
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0
|
||||
- uses: prometheus/promci@468927c440349ab56c4a1aafd453b312841503c2 # v0.4.4
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- uses: prometheus/promci@52c7012f5f0070d7281b8db4a119e21341d43c91 # v0.4.5
|
||||
- uses: ./.github/promci/actions/setup_environment
|
||||
with:
|
||||
enable_go: false
|
||||
|
@ -79,7 +79,7 @@ jobs:
|
|||
name: Go tests on Windows
|
||||
runs-on: windows-latest
|
||||
steps:
|
||||
- uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 # v5.0.2
|
||||
with:
|
||||
go-version: 1.23.x
|
||||
|
@ -96,7 +96,7 @@ jobs:
|
|||
container:
|
||||
image: quay.io/prometheus/golang-builder:1.23-base
|
||||
steps:
|
||||
- uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- run: go install ./cmd/promtool/.
|
||||
- run: go install github.com/google/go-jsonnet/cmd/jsonnet@latest
|
||||
- run: go install github.com/google/go-jsonnet/cmd/jsonnetfmt@latest
|
||||
|
@ -121,8 +121,8 @@ jobs:
|
|||
matrix:
|
||||
thread: [ 0, 1, 2 ]
|
||||
steps:
|
||||
- uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0
|
||||
- uses: prometheus/promci@468927c440349ab56c4a1aafd453b312841503c2 # v0.4.4
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- uses: prometheus/promci@52c7012f5f0070d7281b8db4a119e21341d43c91 # v0.4.5
|
||||
- uses: ./.github/promci/actions/build
|
||||
with:
|
||||
promu_opts: "-p linux/amd64 -p windows/amd64 -p linux/arm64 -p darwin/amd64 -p darwin/arm64 -p linux/386"
|
||||
|
@ -146,8 +146,8 @@ jobs:
|
|||
# Whenever the Go version is updated here, .promu.yml
|
||||
# should also be updated.
|
||||
steps:
|
||||
- uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0
|
||||
- uses: prometheus/promci@468927c440349ab56c4a1aafd453b312841503c2 # v0.4.4
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- uses: prometheus/promci@52c7012f5f0070d7281b8db4a119e21341d43c91 # v0.4.5
|
||||
- uses: ./.github/promci/actions/build
|
||||
with:
|
||||
parallelism: 12
|
||||
|
@ -169,7 +169,7 @@ jobs:
|
|||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 # v5.0.2
|
||||
with:
|
||||
|
@ -182,7 +182,7 @@ jobs:
|
|||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 # v5.0.2
|
||||
with:
|
||||
|
@ -191,11 +191,11 @@ jobs:
|
|||
run: sudo apt-get update && sudo apt-get -y install libsnmp-dev
|
||||
if: github.repository == 'prometheus/snmp_exporter'
|
||||
- name: Lint
|
||||
uses: golangci/golangci-lint-action@aaa42aa0628b4ae2578232a66b541047968fac86 # v6.1.0
|
||||
uses: golangci/golangci-lint-action@971e284b6050e8a5849b72094c50ab08da042db8 # v6.1.1
|
||||
with:
|
||||
args: --verbose
|
||||
# Make sure to sync this with Makefile.common and scripts/golangci-lint.yml.
|
||||
version: v1.60.2
|
||||
version: v1.61.0
|
||||
fuzzing:
|
||||
uses: ./.github/workflows/fuzzing.yml
|
||||
if: github.event_name == 'pull_request'
|
||||
|
@ -208,8 +208,8 @@ jobs:
|
|||
needs: [test_ui, test_go, test_go_more, test_go_oldest, test_windows, golangci, codeql, build_all]
|
||||
if: github.event_name == 'push' && github.event.ref == 'refs/heads/main'
|
||||
steps:
|
||||
- uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0
|
||||
- uses: prometheus/promci@468927c440349ab56c4a1aafd453b312841503c2 # v0.4.4
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- uses: prometheus/promci@52c7012f5f0070d7281b8db4a119e21341d43c91 # v0.4.5
|
||||
- uses: ./.github/promci/actions/publish_main
|
||||
with:
|
||||
docker_hub_login: ${{ secrets.docker_hub_login }}
|
||||
|
@ -225,8 +225,8 @@ jobs:
|
|||
||
|
||||
(github.event_name == 'push' && startsWith(github.ref, 'refs/tags/v3.'))
|
||||
steps:
|
||||
- uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0
|
||||
- uses: prometheus/promci@468927c440349ab56c4a1aafd453b312841503c2 # v0.4.4
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- uses: prometheus/promci@52c7012f5f0070d7281b8db4a119e21341d43c91 # v0.4.5
|
||||
- uses: ./.github/promci/actions/publish_release
|
||||
with:
|
||||
docker_hub_login: ${{ secrets.docker_hub_login }}
|
||||
|
@ -240,10 +240,10 @@ jobs:
|
|||
needs: [test_ui, codeql]
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0
|
||||
- uses: prometheus/promci@468927c440349ab56c4a1aafd453b312841503c2 # v0.4.4
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- uses: prometheus/promci@52c7012f5f0070d7281b8db4a119e21341d43c91 # v0.4.5
|
||||
- name: Install nodejs
|
||||
uses: actions/setup-node@0a44ba7841725637a19e28fa30b79a866c81b0a6 # v4.0.4
|
||||
uses: actions/setup-node@39370e3970a6d050c480ffad4ff0ed4d3fdee5af # v4.1.0
|
||||
with:
|
||||
node-version-file: "web/ui/.nvmrc"
|
||||
registry-url: "https://registry.npmjs.org"
|
||||
|
|
2
.github/workflows/codeql-analysis.yml
vendored
2
.github/workflows/codeql-analysis.yml
vendored
|
@ -24,7 +24,7 @@ jobs:
|
|||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
|
||||
- name: Initialize CodeQL
|
||||
uses: github/codeql-action/init@e2b3eafc8d227b0241d48be5f425d47c2d750a13 # v3.26.10
|
||||
|
|
4
.github/workflows/container_description.yml
vendored
4
.github/workflows/container_description.yml
vendored
|
@ -18,7 +18,7 @@ jobs:
|
|||
if: github.repository_owner == 'prometheus' || github.repository_owner == 'prometheus-community' # Don't run this workflow on forks.
|
||||
steps:
|
||||
- name: git checkout
|
||||
uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- name: Set docker hub repo name
|
||||
run: echo "DOCKER_REPO_NAME=$(make docker-repo-name)" >> $GITHUB_ENV
|
||||
- name: Push README to Dockerhub
|
||||
|
@ -40,7 +40,7 @@ jobs:
|
|||
if: github.repository_owner == 'prometheus' || github.repository_owner == 'prometheus-community' # Don't run this workflow on forks.
|
||||
steps:
|
||||
- name: git checkout
|
||||
uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- name: Set quay.io org name
|
||||
run: echo "DOCKER_REPO=$(echo quay.io/${GITHUB_REPOSITORY_OWNER} | tr -d '-')" >> $GITHUB_ENV
|
||||
- name: Set quay.io repo name
|
||||
|
|
2
.github/workflows/fuzzing.yml
vendored
2
.github/workflows/fuzzing.yml
vendored
|
@ -21,7 +21,7 @@ jobs:
|
|||
fuzz-seconds: 600
|
||||
dry-run: false
|
||||
- name: Upload Crash
|
||||
uses: actions/upload-artifact@50769540e7f4bd5e21e526ee35c689e35e0d6874 # v4.4.0
|
||||
uses: actions/upload-artifact@b4b15b8c7c6ac21ea08fcf65892d2ee8f75cf882 # v4.4.3
|
||||
if: failure() && steps.build.outcome == 'success'
|
||||
with:
|
||||
name: artifacts
|
||||
|
|
2
.github/workflows/repo_sync.yml
vendored
2
.github/workflows/repo_sync.yml
vendored
|
@ -13,7 +13,7 @@ jobs:
|
|||
container:
|
||||
image: quay.io/prometheus/golang-builder
|
||||
steps:
|
||||
- uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- run: ./scripts/sync_repo_files.sh
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.PROMBOT_GITHUB_TOKEN }}
|
||||
|
|
4
.github/workflows/scorecards.yml
vendored
4
.github/workflows/scorecards.yml
vendored
|
@ -21,7 +21,7 @@ jobs:
|
|||
|
||||
steps:
|
||||
- name: "Checkout code"
|
||||
uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # tag=v4.2.0
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # tag=v4.2.2
|
||||
with:
|
||||
persist-credentials: false
|
||||
|
||||
|
@ -37,7 +37,7 @@ jobs:
|
|||
# Upload the results as artifacts (optional). Commenting out will disable uploads of run results in SARIF
|
||||
# format to the repository Actions tab.
|
||||
- name: "Upload artifact"
|
||||
uses: actions/upload-artifact@50769540e7f4bd5e21e526ee35c689e35e0d6874 # tag=v4.4.0
|
||||
uses: actions/upload-artifact@b4b15b8c7c6ac21ea08fcf65892d2ee8f75cf882 # tag=v4.4.3
|
||||
with:
|
||||
name: SARIF file
|
||||
path: results.sarif
|
||||
|
|
|
@ -109,7 +109,7 @@ linters-settings:
|
|||
extra-rules: true
|
||||
perfsprint:
|
||||
# Optimizes `fmt.Errorf`.
|
||||
errorf: false
|
||||
errorf: true
|
||||
revive:
|
||||
# By default, revive will enable only the linting rules that are named in the configuration file.
|
||||
# So, it's needed to explicitly enable all required rules here.
|
||||
|
|
28
CHANGELOG.md
28
CHANGELOG.md
|
@ -2,6 +2,18 @@
|
|||
|
||||
## unreleased
|
||||
|
||||
* [CHANGE] Scraping: Remove implicit fallback to the Prometheus text format in case of invalid/missing Content-Type and fail the scrape instead. Add ability to specify a `fallback_scrape_protocol` in the scrape config. #15136
|
||||
* [CHANGE] Remote-write: default enable_http2 to false. #15219
|
||||
* [CHANGE] Scraping: normalize "le" and "quantile" label values upon ingestion. #15164
|
||||
* [CHANGE] Scraping: config `scrape_classic_histograms` was renamed to `always_scrape_classic_histograms`. #15178
|
||||
* [CHANGE] Config: remove expand-external-labels flag, expand external labels env vars by default. #14657
|
||||
* [CHANGE] Disallow configuring AM with the v1 api. #13883
|
||||
* [ENHANCEMENT] Scraping, rules: handle targets reappearing, or rules moving group, when out-of-order is enabled. #14710
|
||||
* [ENHANCEMENT] Tools: add debug printouts to promtool rules unit testing #15196
|
||||
* [ENHANCEMENT] Scraping: support Created-Timestamp feature on native histograms. #14694
|
||||
* [BUGFIX] PromQL: Fix stddev+stdvar aggregations to always ignore native histograms. #14941
|
||||
* [BUGFIX] PromQL: Fix stddev+stdvar aggregations to treat Infinity consistently. #14941
|
||||
* [BUGFIX] OTLP receiver: Preserve colons when generating metric names in suffix adding mode (this mode is always enabled, unless one uses Prometheus as a library). #15251
|
||||
* [BUGFIX] Clamp functions: Ignore any points with native histograms. #15169
|
||||
|
||||
## 3.0.0-beta.1 / 2024-10-09
|
||||
|
@ -18,7 +30,6 @@
|
|||
* [ENHANCEMENT] PromQL: Introduce exponential interpolation for native histograms. #14677
|
||||
* [ENHANCEMENT] TSDB: Add support for ingestion of out-of-order native histogram samples. #14850, #14546
|
||||
* [ENHANCEMENT] Alerts: remove metrics for removed Alertmanagers. #13909
|
||||
* [ENHANCEMENT] Scraping: support Created-Timestamp feature on native histograms. #14694
|
||||
* [ENHANCEMENT] Kubernetes SD: Support sidecar containers in endpoint discovery. #14929
|
||||
* [ENHANCEMENT] Consul SD: Support catalog filters. #11224
|
||||
* [PERF] TSDB: Parallelize deletion of postings after head compaction. #14975
|
||||
|
@ -50,12 +61,17 @@ As is traditional with a beta release, we do **not** recommend users install 3.0
|
|||
* [CHANGE] Remove deprecated `remote-write-receiver`,`promql-at-modifier`, and `promql-negative-offset` feature flags. #13456, #14526
|
||||
* [CHANGE] Remove deprecated `storage.tsdb.allow-overlapping-blocks`, `alertmanager.timeout`, and `storage.tsdb.retention` flags. #14640, #14643
|
||||
* [ENHANCEMENT] Move AM discovery page from "Monitoring status" to "Server status". #14875
|
||||
* [FEATURE] Support config reload automatically - feature flag `auto-reload-config`. #14769
|
||||
* [BUGFIX] Scrape: Do not override target parameter labels with config params. #11029
|
||||
|
||||
## 2.55.0-rc.0 / 2024-09-20
|
||||
## 2.55.1 / 2024-01-04
|
||||
|
||||
* [BUGFIX] `round()` function did not remove `__name__` label. #15250
|
||||
|
||||
## 2.55.0 / 2024-10-22
|
||||
|
||||
* [FEATURE] PromQL: Add experimental `info` function. #14495
|
||||
* [FEATURE] Support UTF-8 characters in label names - feature flag `utf8-names`. #14482, #14880, #14736, #14727
|
||||
* [FEATURE] Support config reload automatically - feature flag `auto-reload-config`. #14769
|
||||
* [FEATURE] Scraping: Add the ability to set custom `http_headers` in config. #14817
|
||||
* [FEATURE] Scraping: Support feature flag `created-timestamp-zero-ingestion` in OpenMetrics. #14356, #14815
|
||||
* [FEATURE] Scraping: `scrape_failure_log_file` option to log failures to a file. #14734
|
||||
|
@ -63,19 +79,21 @@ As is traditional with a beta release, we do **not** recommend users install 3.0
|
|||
* [FEATURE] Remote-Write: Support Google Cloud Monitoring authorization. #14346
|
||||
* [FEATURE] Promtool: `tsdb create-blocks` new option to add labels. #14403
|
||||
* [FEATURE] Promtool: `promtool test` adds `--junit` flag to format results. #14506
|
||||
* [FEATURE] TSDB: Add `delayed-compaction` feature flag, for people running many Prometheus to randomize timing. #12532
|
||||
* [ENHANCEMENT] OTLP receiver: Warn on exponential histograms with zero count and non-zero sum. #14706
|
||||
* [ENHANCEMENT] OTLP receiver: Interrupt translation on context cancellation/timeout. #14612
|
||||
* [ENHANCEMENT] Remote Read client: Enable streaming remote read if the server supports it. #11379
|
||||
* [ENHANCEMENT] Remote-Write: Don't reshard if we haven't successfully sent a sample since last update. #14450
|
||||
* [ENHANCEMENT] PromQL: Delay deletion of `__name__` label to the end of the query evaluation. This is **experimental** and enabled under the feature-flag `promql-delayed-name-removal`. #14477
|
||||
* [ENHANCEMENT] PromQL: Experimental `sort_by_label` and `sort_by_label_desc` sort by all labels when label is equal. #14655
|
||||
* [ENHANCEMENT] PromQL: Experimental `sort_by_label` and `sort_by_label_desc` sort by all labels when label is equal. #14655, #14985
|
||||
* [ENHANCEMENT] PromQL: Clarify error message logged when Go runtime panic occurs during query evaluation. #14621
|
||||
* [ENHANCEMENT] PromQL: Use Kahan summation for better accuracy in `avg` and `avg_over_time`. #14413
|
||||
* [ENHANCEMENT] Tracing: Improve PromQL tracing, including showing the operation performed for aggregates, operators, and calls. #14816
|
||||
* [ENHANCEMENT] API: Support multiple listening addresses. #14665
|
||||
* [ENHANCEMENT] TSDB: Backward compatibility with upcoming index v3. #14934
|
||||
* [PERF] TSDB: Query in-order and out-of-order series together. #14354, #14693, #14714, #14831, #14874, #14948
|
||||
* [PERF] TSDB: Query in-order and out-of-order series together. #14354, #14693, #14714, #14831, #14874, #14948, #15120
|
||||
* [PERF] TSDB: Streamline reading of overlapping out-of-order head chunks. #14729
|
||||
* [BUGFIX] PromQL: make sort_by_label stable. #14985
|
||||
* [BUGFIX] SD: Fix dropping targets (with feature flag `new-service-discovery-manager`). #13147
|
||||
* [BUGFIX] SD: Stop storing stale targets (with feature flag `new-service-discovery-manager`). #13622
|
||||
* [BUGFIX] Scraping: exemplars could be dropped in protobuf scraping. #14810
|
||||
|
|
|
@ -2,6 +2,7 @@ ARG ARCH="amd64"
|
|||
ARG OS="linux"
|
||||
FROM quay.io/prometheus/busybox-${OS}-${ARCH}:latest
|
||||
LABEL maintainer="The Prometheus Authors <prometheus-developers@googlegroups.com>"
|
||||
LABEL org.opencontainers.image.source="https://github.com/prometheus/prometheus"
|
||||
|
||||
ARG ARCH="amd64"
|
||||
ARG OS="linux"
|
||||
|
|
|
@ -190,20 +190,19 @@ type flagConfig struct {
|
|||
queryConcurrency int
|
||||
queryMaxSamples int
|
||||
RemoteFlushDeadline model.Duration
|
||||
nameEscapingScheme string
|
||||
maxNotificationsSubscribers int
|
||||
|
||||
enableAutoReload bool
|
||||
autoReloadInterval model.Duration
|
||||
|
||||
featureList []string
|
||||
memlimitEnable bool
|
||||
memlimitRatio float64
|
||||
|
||||
featureList []string
|
||||
// These options are extracted from featureList
|
||||
// for ease of use.
|
||||
enableExpandExternalLabels bool
|
||||
enablePerStepStats bool
|
||||
enableAutoGOMAXPROCS bool
|
||||
enableAutoGOMEMLIMIT bool
|
||||
enableConcurrentRuleEval bool
|
||||
|
||||
prometheusURL string
|
||||
|
@ -220,9 +219,6 @@ func (c *flagConfig) setFeatureListOptions(logger *slog.Logger) error {
|
|||
opts := strings.Split(f, ",")
|
||||
for _, o := range opts {
|
||||
switch o {
|
||||
case "expand-external-labels":
|
||||
c.enableExpandExternalLabels = true
|
||||
logger.Info("Experimental expand-external-labels enabled")
|
||||
case "exemplar-storage":
|
||||
c.tsdb.EnableExemplarStorage = true
|
||||
logger.Info("Experimental in-memory exemplar storage enabled")
|
||||
|
@ -247,9 +243,6 @@ func (c *flagConfig) setFeatureListOptions(logger *slog.Logger) error {
|
|||
c.autoReloadInterval, _ = model.ParseDuration("1s")
|
||||
}
|
||||
logger.Info("Enabled automatic configuration file reloading. Checking for configuration changes every", "interval", c.autoReloadInterval)
|
||||
case "auto-gomemlimit":
|
||||
c.enableAutoGOMEMLIMIT = true
|
||||
logger.Info("Automatically set GOMEMLIMIT to match Linux container or system memory limit")
|
||||
case "concurrent-rule-eval":
|
||||
c.enableConcurrentRuleEval = true
|
||||
logger.Info("Experimental concurrent rule evaluation enabled.")
|
||||
|
@ -336,6 +329,8 @@ func main() {
|
|||
a.Flag("web.listen-address", "Address to listen on for UI, API, and telemetry. Can be repeated.").
|
||||
Default("0.0.0.0:9090").StringsVar(&cfg.web.ListenAddresses)
|
||||
|
||||
a.Flag("auto-gomemlimit", "Automatically set GOMEMLIMIT to match Linux container or system memory limit").
|
||||
Default("true").BoolVar(&cfg.memlimitEnable)
|
||||
a.Flag("auto-gomemlimit.ratio", "The ratio of reserved GOMEMLIMIT memory to the detected maximum container or system memory").
|
||||
Default("0.9").FloatVar(&cfg.memlimitRatio)
|
||||
|
||||
|
@ -437,6 +432,9 @@ func main() {
|
|||
serverOnlyFlag(a, "storage.tsdb.samples-per-chunk", "Target number of samples per chunk.").
|
||||
Default("120").Hidden().IntVar(&cfg.tsdb.SamplesPerChunk)
|
||||
|
||||
serverOnlyFlag(a, "storage.tsdb.delayed-compaction.max-percent", "Sets the upper limit for the random compaction delay, specified as a percentage of the head chunk range. 100 means the compaction can be delayed by up to the entire head chunk range. Only effective when the delayed-compaction feature flag is enabled.").
|
||||
Default("10").Hidden().IntVar(&cfg.tsdb.CompactionDelayMaxPercent)
|
||||
|
||||
agentOnlyFlag(a, "storage.agent.path", "Base path for metrics storage.").
|
||||
Default("data-agent/").StringVar(&cfg.agentStoragePath)
|
||||
|
||||
|
@ -516,7 +514,7 @@ func main() {
|
|||
a.Flag("scrape.discovery-reload-interval", "Interval used by scrape manager to throttle target groups updates.").
|
||||
Hidden().Default("5s").SetValue(&cfg.scrape.DiscoveryReloadInterval)
|
||||
|
||||
a.Flag("enable-feature", "Comma separated feature names to enable. Valid options: auto-gomemlimit, exemplar-storage, expand-external-labels, memory-snapshot-on-shutdown, promql-per-step-stats, promql-experimental-functions, extra-scrape-metrics, auto-gomaxprocs, native-histograms, created-timestamp-zero-ingestion, concurrent-rule-eval, delayed-compaction, old-ui. See https://prometheus.io/docs/prometheus/latest/feature_flags/ for more details.").
|
||||
a.Flag("enable-feature", "Comma separated feature names to enable. Valid options: exemplar-storage, expand-external-labels, memory-snapshot-on-shutdown, promql-per-step-stats, promql-experimental-functions, extra-scrape-metrics, auto-gomaxprocs, native-histograms, created-timestamp-zero-ingestion, concurrent-rule-eval, delayed-compaction, old-ui. See https://prometheus.io/docs/prometheus/latest/feature_flags/ for more details.").
|
||||
Default("").StringsVar(&cfg.featureList)
|
||||
|
||||
a.Flag("agent", "Run Prometheus in 'Agent mode'.").BoolVar(&agentMode)
|
||||
|
@ -552,15 +550,6 @@ func main() {
|
|||
os.Exit(1)
|
||||
}
|
||||
|
||||
if cfg.nameEscapingScheme != "" {
|
||||
scheme, err := model.ToEscapingScheme(cfg.nameEscapingScheme)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, `Invalid name escaping scheme: %q; Needs to be one of "values", "underscores", or "dots"`, cfg.nameEscapingScheme)
|
||||
os.Exit(1)
|
||||
}
|
||||
model.NameEscapingScheme = scheme
|
||||
}
|
||||
|
||||
if agentMode && len(serverOnlyFlags) > 0 {
|
||||
fmt.Fprintf(os.Stderr, "The following flag(s) can not be used in agent mode: %q", serverOnlyFlags)
|
||||
os.Exit(3)
|
||||
|
@ -595,7 +584,7 @@ func main() {
|
|||
|
||||
// Throw error for invalid config before starting other components.
|
||||
var cfgFile *config.Config
|
||||
if cfgFile, err = config.LoadFile(cfg.configFile, agentMode, false, promslog.NewNopLogger()); err != nil {
|
||||
if cfgFile, err = config.LoadFile(cfg.configFile, agentMode, promslog.NewNopLogger()); err != nil {
|
||||
absPath, pathErr := filepath.Abs(cfg.configFile)
|
||||
if pathErr != nil {
|
||||
absPath = cfg.configFile
|
||||
|
@ -667,6 +656,12 @@ func main() {
|
|||
|
||||
cfg.tsdb.MaxBlockDuration = maxBlockDuration
|
||||
}
|
||||
|
||||
// Delayed compaction checks
|
||||
if cfg.tsdb.EnableDelayedCompaction && (cfg.tsdb.CompactionDelayMaxPercent > 100 || cfg.tsdb.CompactionDelayMaxPercent <= 0) {
|
||||
logger.Warn("The --storage.tsdb.delayed-compaction.max-percent should have a value between 1 and 100. Using default", "default", tsdb.DefaultCompactionDelayMaxPercent)
|
||||
cfg.tsdb.CompactionDelayMaxPercent = tsdb.DefaultCompactionDelayMaxPercent
|
||||
}
|
||||
}
|
||||
|
||||
noStepSubqueryInterval := &safePromQLNoStepSubqueryInterval{}
|
||||
|
@ -770,7 +765,7 @@ func main() {
|
|||
}
|
||||
}
|
||||
|
||||
if cfg.enableAutoGOMEMLIMIT {
|
||||
if cfg.memlimitEnable {
|
||||
if _, err := memlimit.SetGoMemLimitWithOpts(
|
||||
memlimit.WithRatio(cfg.memlimitRatio),
|
||||
memlimit.WithProvider(
|
||||
|
@ -1145,7 +1140,7 @@ func main() {
|
|||
for {
|
||||
select {
|
||||
case <-hup:
|
||||
if err := reloadConfig(cfg.configFile, cfg.enableExpandExternalLabels, cfg.tsdb.EnableExemplarStorage, logger, noStepSubqueryInterval, callback, reloaders...); err != nil {
|
||||
if err := reloadConfig(cfg.configFile, cfg.tsdb.EnableExemplarStorage, logger, noStepSubqueryInterval, callback, reloaders...); err != nil {
|
||||
logger.Error("Error reloading config", "err", err)
|
||||
} else if cfg.enableAutoReload {
|
||||
if currentChecksum, err := config.GenerateChecksum(cfg.configFile); err == nil {
|
||||
|
@ -1155,7 +1150,7 @@ func main() {
|
|||
}
|
||||
}
|
||||
case rc := <-webHandler.Reload():
|
||||
if err := reloadConfig(cfg.configFile, cfg.enableExpandExternalLabels, cfg.tsdb.EnableExemplarStorage, logger, noStepSubqueryInterval, callback, reloaders...); err != nil {
|
||||
if err := reloadConfig(cfg.configFile, cfg.tsdb.EnableExemplarStorage, logger, noStepSubqueryInterval, callback, reloaders...); err != nil {
|
||||
logger.Error("Error reloading config", "err", err)
|
||||
rc <- err
|
||||
} else {
|
||||
|
@ -1180,7 +1175,7 @@ func main() {
|
|||
}
|
||||
logger.Info("Configuration file change detected, reloading the configuration.")
|
||||
|
||||
if err := reloadConfig(cfg.configFile, cfg.enableExpandExternalLabels, cfg.tsdb.EnableExemplarStorage, logger, noStepSubqueryInterval, callback, reloaders...); err != nil {
|
||||
if err := reloadConfig(cfg.configFile, cfg.tsdb.EnableExemplarStorage, logger, noStepSubqueryInterval, callback, reloaders...); err != nil {
|
||||
logger.Error("Error reloading config", "err", err)
|
||||
} else {
|
||||
checksum = currentChecksum
|
||||
|
@ -1210,7 +1205,7 @@ func main() {
|
|||
return nil
|
||||
}
|
||||
|
||||
if err := reloadConfig(cfg.configFile, cfg.enableExpandExternalLabels, cfg.tsdb.EnableExemplarStorage, logger, noStepSubqueryInterval, func(bool) {}, reloaders...); err != nil {
|
||||
if err := reloadConfig(cfg.configFile, cfg.tsdb.EnableExemplarStorage, logger, noStepSubqueryInterval, func(bool) {}, reloaders...); err != nil {
|
||||
return fmt.Errorf("error loading config from %q: %w", cfg.configFile, err)
|
||||
}
|
||||
|
||||
|
@ -1437,7 +1432,7 @@ type reloader struct {
|
|||
reloader func(*config.Config) error
|
||||
}
|
||||
|
||||
func reloadConfig(filename string, expandExternalLabels, enableExemplarStorage bool, logger *slog.Logger, noStepSuqueryInterval *safePromQLNoStepSubqueryInterval, callback func(bool), rls ...reloader) (err error) {
|
||||
func reloadConfig(filename string, enableExemplarStorage bool, logger *slog.Logger, noStepSuqueryInterval *safePromQLNoStepSubqueryInterval, callback func(bool), rls ...reloader) (err error) {
|
||||
start := time.Now()
|
||||
timingsLogger := logger
|
||||
logger.Info("Loading configuration file", "filename", filename)
|
||||
|
@ -1453,7 +1448,7 @@ func reloadConfig(filename string, expandExternalLabels, enableExemplarStorage b
|
|||
}
|
||||
}()
|
||||
|
||||
conf, err := config.LoadFile(filename, agentMode, expandExternalLabels, logger)
|
||||
conf, err := config.LoadFile(filename, agentMode, logger)
|
||||
if err != nil {
|
||||
return fmt.Errorf("couldn't load configuration (--config.file=%q): %w", filename, err)
|
||||
}
|
||||
|
@ -1643,6 +1638,9 @@ func (s *readyStorage) Appender(ctx context.Context) storage.Appender {
|
|||
|
||||
type notReadyAppender struct{}
|
||||
|
||||
// SetOptions does nothing in this appender implementation.
|
||||
func (n notReadyAppender) SetOptions(opts *storage.AppendOptions) {}
|
||||
|
||||
func (n notReadyAppender) Append(ref storage.SeriesRef, l labels.Labels, t int64, v float64) (storage.SeriesRef, error) {
|
||||
return 0, tsdb.ErrNotReady
|
||||
}
|
||||
|
@ -1797,6 +1795,7 @@ type tsdbOptions struct {
|
|||
EnableMemorySnapshotOnShutdown bool
|
||||
EnableNativeHistograms bool
|
||||
EnableDelayedCompaction bool
|
||||
CompactionDelayMaxPercent int
|
||||
EnableOverlappingCompaction bool
|
||||
EnableOOONativeHistograms bool
|
||||
}
|
||||
|
@ -1821,6 +1820,7 @@ func (opts tsdbOptions) ToTSDBOptions() tsdb.Options {
|
|||
EnableOOONativeHistograms: opts.EnableOOONativeHistograms,
|
||||
OutOfOrderTimeWindow: opts.OutOfOrderTimeWindow,
|
||||
EnableDelayedCompaction: opts.EnableDelayedCompaction,
|
||||
CompactionDelayMaxPercent: opts.CompactionDelayMaxPercent,
|
||||
EnableOverlappingCompaction: opts.EnableOverlappingCompaction,
|
||||
}
|
||||
}
|
||||
|
|
|
@ -125,6 +125,7 @@ func TestFailedStartupExitCode(t *testing.T) {
|
|||
if testing.Short() {
|
||||
t.Skip("skipping test in short mode.")
|
||||
}
|
||||
t.Parallel()
|
||||
|
||||
fakeInputFile := "fake-input-file"
|
||||
expectedExitStatus := 2
|
||||
|
@ -211,9 +212,36 @@ func TestWALSegmentSizeBounds(t *testing.T) {
|
|||
if testing.Short() {
|
||||
t.Skip("skipping test in short mode.")
|
||||
}
|
||||
t.Parallel()
|
||||
|
||||
for size, expectedExitStatus := range map[string]int{"9MB": 1, "257MB": 1, "10": 2, "1GB": 1, "12MB": 0} {
|
||||
prom := exec.Command(promPath, "-test.main", "--storage.tsdb.wal-segment-size="+size, "--web.listen-address=0.0.0.0:0", "--config.file="+promConfig, "--storage.tsdb.path="+filepath.Join(t.TempDir(), "data"))
|
||||
for _, tc := range []struct {
|
||||
size string
|
||||
exitCode int
|
||||
}{
|
||||
{
|
||||
size: "9MB",
|
||||
exitCode: 1,
|
||||
},
|
||||
{
|
||||
size: "257MB",
|
||||
exitCode: 1,
|
||||
},
|
||||
{
|
||||
size: "10",
|
||||
exitCode: 2,
|
||||
},
|
||||
{
|
||||
size: "1GB",
|
||||
exitCode: 1,
|
||||
},
|
||||
{
|
||||
size: "12MB",
|
||||
exitCode: 0,
|
||||
},
|
||||
} {
|
||||
t.Run(tc.size, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
prom := exec.Command(promPath, "-test.main", "--storage.tsdb.wal-segment-size="+tc.size, "--web.listen-address=0.0.0.0:0", "--config.file="+promConfig, "--storage.tsdb.path="+filepath.Join(t.TempDir(), "data"))
|
||||
|
||||
// Log stderr in case of failure.
|
||||
stderr, err := prom.StderrPipe()
|
||||
|
@ -226,7 +254,7 @@ func TestWALSegmentSizeBounds(t *testing.T) {
|
|||
err = prom.Start()
|
||||
require.NoError(t, err)
|
||||
|
||||
if expectedExitStatus == 0 {
|
||||
if tc.exitCode == 0 {
|
||||
done := make(chan error, 1)
|
||||
go func() { done <- prom.Wait() }()
|
||||
select {
|
||||
|
@ -236,7 +264,7 @@ func TestWALSegmentSizeBounds(t *testing.T) {
|
|||
prom.Process.Kill()
|
||||
<-done
|
||||
}
|
||||
continue
|
||||
return
|
||||
}
|
||||
|
||||
err = prom.Wait()
|
||||
|
@ -244,19 +272,33 @@ func TestWALSegmentSizeBounds(t *testing.T) {
|
|||
var exitError *exec.ExitError
|
||||
require.ErrorAs(t, err, &exitError)
|
||||
status := exitError.Sys().(syscall.WaitStatus)
|
||||
require.Equal(t, expectedExitStatus, status.ExitStatus())
|
||||
require.Equal(t, tc.exitCode, status.ExitStatus())
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestMaxBlockChunkSegmentSizeBounds(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
if testing.Short() {
|
||||
t.Skip("skipping test in short mode.")
|
||||
}
|
||||
t.Parallel()
|
||||
|
||||
for size, expectedExitStatus := range map[string]int{"512KB": 1, "1MB": 0} {
|
||||
prom := exec.Command(promPath, "-test.main", "--storage.tsdb.max-block-chunk-segment-size="+size, "--web.listen-address=0.0.0.0:0", "--config.file="+promConfig, "--storage.tsdb.path="+filepath.Join(t.TempDir(), "data"))
|
||||
for _, tc := range []struct {
|
||||
size string
|
||||
exitCode int
|
||||
}{
|
||||
{
|
||||
size: "512KB",
|
||||
exitCode: 1,
|
||||
},
|
||||
{
|
||||
size: "1MB",
|
||||
exitCode: 0,
|
||||
},
|
||||
} {
|
||||
t.Run(tc.size, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
prom := exec.Command(promPath, "-test.main", "--storage.tsdb.max-block-chunk-segment-size="+tc.size, "--web.listen-address=0.0.0.0:0", "--config.file="+promConfig, "--storage.tsdb.path="+filepath.Join(t.TempDir(), "data"))
|
||||
|
||||
// Log stderr in case of failure.
|
||||
stderr, err := prom.StderrPipe()
|
||||
|
@ -269,7 +311,7 @@ func TestMaxBlockChunkSegmentSizeBounds(t *testing.T) {
|
|||
err = prom.Start()
|
||||
require.NoError(t, err)
|
||||
|
||||
if expectedExitStatus == 0 {
|
||||
if tc.exitCode == 0 {
|
||||
done := make(chan error, 1)
|
||||
go func() { done <- prom.Wait() }()
|
||||
select {
|
||||
|
@ -279,7 +321,7 @@ func TestMaxBlockChunkSegmentSizeBounds(t *testing.T) {
|
|||
prom.Process.Kill()
|
||||
<-done
|
||||
}
|
||||
continue
|
||||
return
|
||||
}
|
||||
|
||||
err = prom.Wait()
|
||||
|
@ -287,7 +329,8 @@ func TestMaxBlockChunkSegmentSizeBounds(t *testing.T) {
|
|||
var exitError *exec.ExitError
|
||||
require.ErrorAs(t, err, &exitError)
|
||||
status := exitError.Sys().(syscall.WaitStatus)
|
||||
require.Equal(t, expectedExitStatus, status.ExitStatus())
|
||||
require.Equal(t, tc.exitCode, status.ExitStatus())
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -353,6 +396,8 @@ func getCurrentGaugeValuesFor(t *testing.T, reg prometheus.Gatherer, metricNames
|
|||
}
|
||||
|
||||
func TestAgentSuccessfulStartup(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
prom := exec.Command(promPath, "-test.main", "--agent", "--web.listen-address=0.0.0.0:0", "--config.file="+agentConfig)
|
||||
require.NoError(t, prom.Start())
|
||||
|
||||
|
@ -371,6 +416,8 @@ func TestAgentSuccessfulStartup(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestAgentFailedStartupWithServerFlag(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
prom := exec.Command(promPath, "-test.main", "--agent", "--storage.tsdb.path=.", "--web.listen-address=0.0.0.0:0", "--config.file="+promConfig)
|
||||
|
||||
output := bytes.Buffer{}
|
||||
|
@ -398,6 +445,8 @@ func TestAgentFailedStartupWithServerFlag(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestAgentFailedStartupWithInvalidConfig(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
prom := exec.Command(promPath, "-test.main", "--agent", "--web.listen-address=0.0.0.0:0", "--config.file="+promConfig)
|
||||
require.NoError(t, prom.Start())
|
||||
|
||||
|
@ -419,6 +468,7 @@ func TestModeSpecificFlags(t *testing.T) {
|
|||
if testing.Short() {
|
||||
t.Skip("skipping test in short mode.")
|
||||
}
|
||||
t.Parallel()
|
||||
|
||||
testcases := []struct {
|
||||
mode string
|
||||
|
@ -433,6 +483,7 @@ func TestModeSpecificFlags(t *testing.T) {
|
|||
|
||||
for _, tc := range testcases {
|
||||
t.Run(fmt.Sprintf("%s mode with option %s", tc.mode, tc.arg), func(t *testing.T) {
|
||||
t.Parallel()
|
||||
args := []string{"-test.main", tc.arg, t.TempDir(), "--web.listen-address=0.0.0.0:0"}
|
||||
|
||||
if tc.mode == "agent" {
|
||||
|
@ -484,6 +535,8 @@ func TestDocumentation(t *testing.T) {
|
|||
if runtime.GOOS == "windows" {
|
||||
t.SkipNow()
|
||||
}
|
||||
t.Parallel()
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||
defer cancel()
|
||||
|
||||
|
@ -508,6 +561,8 @@ func TestDocumentation(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestRwProtoMsgFlagParser(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
defaultOpts := config.RemoteWriteProtoMsgs{
|
||||
config.RemoteWriteProtoMsgV1, config.RemoteWriteProtoMsgV2,
|
||||
}
|
||||
|
|
|
@ -34,6 +34,7 @@ func TestStartupInterrupt(t *testing.T) {
|
|||
if testing.Short() {
|
||||
t.Skip("skipping test in short mode.")
|
||||
}
|
||||
t.Parallel()
|
||||
|
||||
port := fmt.Sprintf(":%d", testutil.RandomUnprivilegedPort(t))
|
||||
|
||||
|
|
|
@ -456,6 +456,7 @@ func TestQueryLog(t *testing.T) {
|
|||
if testing.Short() {
|
||||
t.Skip("skipping test in short mode.")
|
||||
}
|
||||
t.Parallel()
|
||||
|
||||
cwd, err := os.Getwd()
|
||||
require.NoError(t, err)
|
||||
|
@ -474,6 +475,7 @@ func TestQueryLog(t *testing.T) {
|
|||
}
|
||||
|
||||
t.Run(p.String(), func(t *testing.T) {
|
||||
t.Parallel()
|
||||
p.run(t)
|
||||
})
|
||||
}
|
||||
|
|
|
@ -34,8 +34,8 @@ import (
|
|||
)
|
||||
|
||||
var (
|
||||
errNotNativeHistogram = fmt.Errorf("not a native histogram")
|
||||
errNotEnoughData = fmt.Errorf("not enough data")
|
||||
errNotNativeHistogram = errors.New("not a native histogram")
|
||||
errNotEnoughData = errors.New("not enough data")
|
||||
|
||||
outputHeader = `Bucket stats for each histogram series over time
|
||||
------------------------------------------------
|
||||
|
@ -169,7 +169,7 @@ func querySamples(ctx context.Context, api v1.API, query string, end time.Time)
|
|||
|
||||
matrix, ok := values.(model.Matrix)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("query of buckets resulted in non-Matrix")
|
||||
return nil, errors.New("query of buckets resulted in non-Matrix")
|
||||
}
|
||||
|
||||
return matrix, nil
|
||||
|
@ -259,7 +259,7 @@ func getBucketCountsAtTime(matrix model.Matrix, numBuckets, timeIdx int) ([]int,
|
|||
prev := matrix[i].Values[timeIdx]
|
||||
// Assume the results are nicely aligned.
|
||||
if curr.Timestamp != prev.Timestamp {
|
||||
return counts, fmt.Errorf("matrix result is not time aligned")
|
||||
return counts, errors.New("matrix result is not time aligned")
|
||||
}
|
||||
counts[i+1] = int(curr.Value - prev.Value)
|
||||
}
|
||||
|
|
|
@ -49,7 +49,7 @@ func getMinAndMaxTimestamps(p textparse.Parser) (int64, int64, error) {
|
|||
|
||||
_, ts, _ := p.Series()
|
||||
if ts == nil {
|
||||
return 0, 0, fmt.Errorf("expected timestamp for series got none")
|
||||
return 0, 0, errors.New("expected timestamp for series got none")
|
||||
}
|
||||
|
||||
if *ts > maxt {
|
||||
|
|
|
@ -58,6 +58,7 @@ import (
|
|||
_ "github.com/prometheus/prometheus/plugins" // Register plugins.
|
||||
"github.com/prometheus/prometheus/promql/parser"
|
||||
"github.com/prometheus/prometheus/promql/promqltest"
|
||||
"github.com/prometheus/prometheus/rules"
|
||||
"github.com/prometheus/prometheus/scrape"
|
||||
"github.com/prometheus/prometheus/util/documentcli"
|
||||
)
|
||||
|
@ -216,6 +217,7 @@ func main() {
|
|||
"test-rule-file",
|
||||
"The unit test file.",
|
||||
).Required().ExistingFiles()
|
||||
testRulesDebug := testRulesCmd.Flag("debug", "Enable unit test debugging.").Default("false").Bool()
|
||||
testRulesDiff := testRulesCmd.Flag("diff", "[Experimental] Print colored differential output between expected & received output.").Default("false").Bool()
|
||||
|
||||
defaultDBPath := "data/"
|
||||
|
@ -391,6 +393,7 @@ func main() {
|
|||
},
|
||||
*testRulesRun,
|
||||
*testRulesDiff,
|
||||
*testRulesDebug,
|
||||
*testRulesFiles...),
|
||||
)
|
||||
|
||||
|
@ -441,7 +444,7 @@ func checkExperimental(f bool) {
|
|||
}
|
||||
}
|
||||
|
||||
var errLint = fmt.Errorf("lint error")
|
||||
var errLint = errors.New("lint error")
|
||||
|
||||
type lintConfig struct {
|
||||
all bool
|
||||
|
@ -575,7 +578,7 @@ func checkFileExists(fn string) error {
|
|||
func checkConfig(agentMode bool, filename string, checkSyntaxOnly bool) ([]string, error) {
|
||||
fmt.Println("Checking", filename)
|
||||
|
||||
cfg, err := config.LoadFile(filename, agentMode, false, promslog.NewNopLogger())
|
||||
cfg, err := config.LoadFile(filename, agentMode, promslog.NewNopLogger())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -895,30 +898,30 @@ func compare(a, b compareRuleType) int {
|
|||
|
||||
func checkDuplicates(groups []rulefmt.RuleGroup) []compareRuleType {
|
||||
var duplicates []compareRuleType
|
||||
var rules compareRuleTypes
|
||||
var cRules compareRuleTypes
|
||||
|
||||
for _, group := range groups {
|
||||
for _, rule := range group.Rules {
|
||||
rules = append(rules, compareRuleType{
|
||||
cRules = append(cRules, compareRuleType{
|
||||
metric: ruleMetric(rule),
|
||||
label: labels.FromMap(rule.Labels),
|
||||
label: rules.FromMaps(group.Labels, rule.Labels),
|
||||
})
|
||||
}
|
||||
}
|
||||
if len(rules) < 2 {
|
||||
if len(cRules) < 2 {
|
||||
return duplicates
|
||||
}
|
||||
sort.Sort(rules)
|
||||
sort.Sort(cRules)
|
||||
|
||||
last := rules[0]
|
||||
for i := 1; i < len(rules); i++ {
|
||||
if compare(last, rules[i]) == 0 {
|
||||
last := cRules[0]
|
||||
for i := 1; i < len(cRules); i++ {
|
||||
if compare(last, cRules[i]) == 0 {
|
||||
// Don't add a duplicated rule multiple times.
|
||||
if len(duplicates) == 0 || compare(last, duplicates[len(duplicates)-1]) != 0 {
|
||||
duplicates = append(duplicates, rules[i])
|
||||
duplicates = append(duplicates, cRules[i])
|
||||
}
|
||||
}
|
||||
last = rules[i]
|
||||
last = cRules[i]
|
||||
}
|
||||
|
||||
return duplicates
|
||||
|
|
|
@ -41,7 +41,7 @@ type sdCheckResult struct {
|
|||
func CheckSD(sdConfigFiles, sdJobName string, sdTimeout time.Duration, registerer prometheus.Registerer) int {
|
||||
logger := promslog.New(&promslog.Config{})
|
||||
|
||||
cfg, err := config.LoadFile(sdConfigFiles, false, false, logger)
|
||||
cfg, err := config.LoadFile(sdConfigFiles, false, logger)
|
||||
if err != nil {
|
||||
fmt.Fprintln(os.Stderr, "Cannot load config", err)
|
||||
return failureExitCode
|
||||
|
|
|
@ -6,7 +6,7 @@ scrape_configs:
|
|||
alerting:
|
||||
alertmanagers:
|
||||
- scheme: http
|
||||
api_version: v1
|
||||
api_version: v2
|
||||
file_sd_configs:
|
||||
- files:
|
||||
- nonexistent_file.yml
|
||||
|
|
|
@ -405,7 +405,7 @@ func openBlock(path, blockID string) (*tsdb.DBReadOnly, tsdb.BlockReader, error)
|
|||
}
|
||||
}
|
||||
|
||||
b, err := db.Block(blockID)
|
||||
b, err := db.Block(blockID, tsdb.DefaultPostingsDecoderFactory)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
@ -662,7 +662,7 @@ func analyzeCompaction(ctx context.Context, block tsdb.BlockReader, indexr tsdb.
|
|||
histogramChunkSize = append(histogramChunkSize, len(chk.Bytes()))
|
||||
fhchk, ok := chk.(*chunkenc.FloatHistogramChunk)
|
||||
if !ok {
|
||||
return fmt.Errorf("chunk is not FloatHistogramChunk")
|
||||
return errors.New("chunk is not FloatHistogramChunk")
|
||||
}
|
||||
it := fhchk.Iterator(nil)
|
||||
bucketCount := 0
|
||||
|
@ -677,7 +677,7 @@ func analyzeCompaction(ctx context.Context, block tsdb.BlockReader, indexr tsdb.
|
|||
histogramChunkSize = append(histogramChunkSize, len(chk.Bytes()))
|
||||
hchk, ok := chk.(*chunkenc.HistogramChunk)
|
||||
if !ok {
|
||||
return fmt.Errorf("chunk is not HistogramChunk")
|
||||
return errors.New("chunk is not HistogramChunk")
|
||||
}
|
||||
it := hchk.Iterator(nil)
|
||||
bucketCount := 0
|
||||
|
|
|
@ -46,11 +46,11 @@ import (
|
|||
|
||||
// RulesUnitTest does unit testing of rules based on the unit testing files provided.
|
||||
// More info about the file format can be found in the docs.
|
||||
func RulesUnitTest(queryOpts promqltest.LazyLoaderOpts, runStrings []string, diffFlag bool, files ...string) int {
|
||||
return RulesUnitTestResult(io.Discard, queryOpts, runStrings, diffFlag, files...)
|
||||
func RulesUnitTest(queryOpts promqltest.LazyLoaderOpts, runStrings []string, diffFlag, debug bool, files ...string) int {
|
||||
return RulesUnitTestResult(io.Discard, queryOpts, runStrings, diffFlag, debug, files...)
|
||||
}
|
||||
|
||||
func RulesUnitTestResult(results io.Writer, queryOpts promqltest.LazyLoaderOpts, runStrings []string, diffFlag bool, files ...string) int {
|
||||
func RulesUnitTestResult(results io.Writer, queryOpts promqltest.LazyLoaderOpts, runStrings []string, diffFlag, debug bool, files ...string) int {
|
||||
failed := false
|
||||
junit := &junitxml.JUnitXML{}
|
||||
|
||||
|
@ -60,7 +60,7 @@ func RulesUnitTestResult(results io.Writer, queryOpts promqltest.LazyLoaderOpts,
|
|||
}
|
||||
|
||||
for _, f := range files {
|
||||
if errs := ruleUnitTest(f, queryOpts, run, diffFlag, junit.Suite(f)); errs != nil {
|
||||
if errs := ruleUnitTest(f, queryOpts, run, diffFlag, debug, junit.Suite(f)); errs != nil {
|
||||
fmt.Fprintln(os.Stderr, " FAILED:")
|
||||
for _, e := range errs {
|
||||
fmt.Fprintln(os.Stderr, e.Error())
|
||||
|
@ -82,7 +82,7 @@ func RulesUnitTestResult(results io.Writer, queryOpts promqltest.LazyLoaderOpts,
|
|||
return successExitCode
|
||||
}
|
||||
|
||||
func ruleUnitTest(filename string, queryOpts promqltest.LazyLoaderOpts, run *regexp.Regexp, diffFlag bool, ts *junitxml.TestSuite) []error {
|
||||
func ruleUnitTest(filename string, queryOpts promqltest.LazyLoaderOpts, run *regexp.Regexp, diffFlag, debug bool, ts *junitxml.TestSuite) []error {
|
||||
b, err := os.ReadFile(filename)
|
||||
if err != nil {
|
||||
ts.Abort(err)
|
||||
|
@ -131,7 +131,7 @@ func ruleUnitTest(filename string, queryOpts promqltest.LazyLoaderOpts, run *reg
|
|||
if t.Interval == 0 {
|
||||
t.Interval = unitTestInp.EvaluationInterval
|
||||
}
|
||||
ers := t.test(evalInterval, groupOrderMap, queryOpts, diffFlag, unitTestInp.RuleFiles...)
|
||||
ers := t.test(testname, evalInterval, groupOrderMap, queryOpts, diffFlag, debug, unitTestInp.RuleFiles...)
|
||||
if ers != nil {
|
||||
for _, e := range ers {
|
||||
tc.Fail(e.Error())
|
||||
|
@ -198,7 +198,14 @@ type testGroup struct {
|
|||
}
|
||||
|
||||
// test performs the unit tests.
|
||||
func (tg *testGroup) test(evalInterval time.Duration, groupOrderMap map[string]int, queryOpts promqltest.LazyLoaderOpts, diffFlag bool, ruleFiles ...string) (outErr []error) {
|
||||
func (tg *testGroup) test(testname string, evalInterval time.Duration, groupOrderMap map[string]int, queryOpts promqltest.LazyLoaderOpts, diffFlag, debug bool, ruleFiles ...string) (outErr []error) {
|
||||
if debug {
|
||||
testStart := time.Now()
|
||||
fmt.Printf("DEBUG: Starting test %s\n", testname)
|
||||
defer func() {
|
||||
fmt.Printf("DEBUG: Test %s finished, took %v\n", testname, time.Since(testStart))
|
||||
}()
|
||||
}
|
||||
// Setup testing suite.
|
||||
suite, err := promqltest.NewLazyLoader(tg.seriesLoadingString(), queryOpts)
|
||||
if err != nil {
|
||||
|
@ -482,6 +489,32 @@ Outer:
|
|||
}
|
||||
}
|
||||
|
||||
if debug {
|
||||
ts := tg.maxEvalTime()
|
||||
// Potentially a test can be specified at a time with fractional seconds,
|
||||
// which PromQL cannot represent, so round up to the next whole second.
|
||||
ts = (ts + time.Second).Truncate(time.Second)
|
||||
expr := fmt.Sprintf(`{__name__=~".+"}[%v]`, ts)
|
||||
q, err := suite.QueryEngine().NewInstantQuery(context.Background(), suite.Queryable(), nil, expr, mint.Add(ts))
|
||||
if err != nil {
|
||||
fmt.Printf("DEBUG: Failed querying, expr: %q, err: %v\n", expr, err)
|
||||
return errs
|
||||
}
|
||||
res := q.Exec(suite.Context())
|
||||
if res.Err != nil {
|
||||
fmt.Printf("DEBUG: Failed query exec, expr: %q, err: %v\n", expr, res.Err)
|
||||
return errs
|
||||
}
|
||||
switch v := res.Value.(type) {
|
||||
case promql.Matrix:
|
||||
fmt.Printf("DEBUG: Dump of all data (input_series and rules) at %v:\n", ts)
|
||||
fmt.Println(v.String())
|
||||
default:
|
||||
fmt.Printf("DEBUG: Got unexpected type %T\n", v)
|
||||
return errs
|
||||
}
|
||||
}
|
||||
|
||||
if len(errs) > 0 {
|
||||
return errs
|
||||
}
|
||||
|
|
|
@ -141,14 +141,14 @@ func TestRulesUnitTest(t *testing.T) {
|
|||
reuseCount[tt.want] += len(tt.args.files)
|
||||
}
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if got := RulesUnitTest(tt.queryOpts, nil, false, tt.args.files...); got != tt.want {
|
||||
if got := RulesUnitTest(tt.queryOpts, nil, false, false, tt.args.files...); got != tt.want {
|
||||
t.Errorf("RulesUnitTest() = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
t.Run("Junit xml output ", func(t *testing.T) {
|
||||
var buf bytes.Buffer
|
||||
if got := RulesUnitTestResult(&buf, promqltest.LazyLoaderOpts{}, nil, false, reuseFiles...); got != 1 {
|
||||
if got := RulesUnitTestResult(&buf, promqltest.LazyLoaderOpts{}, nil, false, false, reuseFiles...); got != 1 {
|
||||
t.Errorf("RulesUnitTestResults() = %v, want 1", got)
|
||||
}
|
||||
var test junitxml.JUnitXML
|
||||
|
@ -230,7 +230,7 @@ func TestRulesUnitTestRun(t *testing.T) {
|
|||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got := RulesUnitTest(tt.queryOpts, tt.args.run, false, tt.args.files...)
|
||||
got := RulesUnitTest(tt.queryOpts, tt.args.run, false, false, tt.args.files...)
|
||||
require.Equal(t, tt.want, got)
|
||||
})
|
||||
}
|
||||
|
|
|
@ -17,6 +17,7 @@ import (
|
|||
"errors"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"mime"
|
||||
"net/url"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
@ -72,7 +73,7 @@ const (
|
|||
)
|
||||
|
||||
// Load parses the YAML input s into a Config.
|
||||
func Load(s string, expandExternalLabels bool, logger *slog.Logger) (*Config, error) {
|
||||
func Load(s string, logger *slog.Logger) (*Config, error) {
|
||||
cfg := &Config{}
|
||||
// If the entire config body is empty the UnmarshalYAML method is
|
||||
// never called. We thus have to set the DefaultConfig at the entry
|
||||
|
@ -84,10 +85,6 @@ func Load(s string, expandExternalLabels bool, logger *slog.Logger) (*Config, er
|
|||
return nil, err
|
||||
}
|
||||
|
||||
if !expandExternalLabels {
|
||||
return cfg, nil
|
||||
}
|
||||
|
||||
b := labels.NewScratchBuilder(0)
|
||||
cfg.GlobalConfig.ExternalLabels.Range(func(v labels.Label) {
|
||||
newV := os.Expand(v.Value, func(s string) string {
|
||||
|
@ -106,17 +103,31 @@ func Load(s string, expandExternalLabels bool, logger *slog.Logger) (*Config, er
|
|||
// Note newV can be blank. https://github.com/prometheus/prometheus/issues/11024
|
||||
b.Add(v.Name, newV)
|
||||
})
|
||||
if !b.Labels().IsEmpty() {
|
||||
cfg.GlobalConfig.ExternalLabels = b.Labels()
|
||||
}
|
||||
|
||||
switch cfg.OTLPConfig.TranslationStrategy {
|
||||
case UnderscoreEscapingWithSuffixes:
|
||||
case "":
|
||||
case NoUTF8EscapingWithSuffixes:
|
||||
if cfg.GlobalConfig.MetricNameValidationScheme == LegacyValidationConfig {
|
||||
return nil, errors.New("OTLP translation strategy NoUTF8EscapingWithSuffixes is not allowed when UTF8 is disabled")
|
||||
}
|
||||
default:
|
||||
return nil, fmt.Errorf("unsupported OTLP translation strategy %q", cfg.OTLPConfig.TranslationStrategy)
|
||||
}
|
||||
|
||||
return cfg, nil
|
||||
}
|
||||
|
||||
// LoadFile parses the given YAML file into a Config.
|
||||
func LoadFile(filename string, agentMode, expandExternalLabels bool, logger *slog.Logger) (*Config, error) {
|
||||
func LoadFile(filename string, agentMode bool, logger *slog.Logger) (*Config, error) {
|
||||
content, err := os.ReadFile(filename)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
cfg, err := Load(string(content), expandExternalLabels, logger)
|
||||
cfg, err := Load(string(content), logger)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("parsing YAML file %s: %w", filename, err)
|
||||
}
|
||||
|
@ -165,7 +176,7 @@ var (
|
|||
// DefaultScrapeConfig is the default scrape configuration.
|
||||
DefaultScrapeConfig = ScrapeConfig{
|
||||
// ScrapeTimeout, ScrapeInterval and ScrapeProtocols default to the configured globals.
|
||||
ScrapeClassicHistograms: false,
|
||||
AlwaysScrapeClassicHistograms: false,
|
||||
MetricsPath: "/metrics",
|
||||
Scheme: "http",
|
||||
HonorLabels: false,
|
||||
|
@ -182,13 +193,18 @@ var (
|
|||
HTTPClientConfig: config.DefaultHTTPClientConfig,
|
||||
}
|
||||
|
||||
DefaultRemoteWriteHTTPClientConfig = config.HTTPClientConfig{
|
||||
FollowRedirects: true,
|
||||
EnableHTTP2: false,
|
||||
}
|
||||
|
||||
// DefaultRemoteWriteConfig is the default remote write configuration.
|
||||
DefaultRemoteWriteConfig = RemoteWriteConfig{
|
||||
RemoteTimeout: model.Duration(30 * time.Second),
|
||||
ProtobufMessage: RemoteWriteProtoMsgV1,
|
||||
QueueConfig: DefaultQueueConfig,
|
||||
MetadataConfig: DefaultMetadataConfig,
|
||||
HTTPClientConfig: config.DefaultHTTPClientConfig,
|
||||
HTTPClientConfig: DefaultRemoteWriteHTTPClientConfig,
|
||||
}
|
||||
|
||||
// DefaultQueueConfig is the default remote queue configuration.
|
||||
|
@ -235,7 +251,9 @@ var (
|
|||
}
|
||||
|
||||
// DefaultOTLPConfig is the default OTLP configuration.
|
||||
DefaultOTLPConfig = OTLPConfig{}
|
||||
DefaultOTLPConfig = OTLPConfig{
|
||||
TranslationStrategy: UnderscoreEscapingWithSuffixes,
|
||||
}
|
||||
)
|
||||
|
||||
// Config is the top-level configuration for Prometheus's config files.
|
||||
|
@ -475,9 +493,22 @@ func (s ScrapeProtocol) Validate() error {
|
|||
return nil
|
||||
}
|
||||
|
||||
// HeaderMediaType returns the MIME mediaType for a particular ScrapeProtocol.
|
||||
func (s ScrapeProtocol) HeaderMediaType() string {
|
||||
if _, ok := ScrapeProtocolsHeaders[s]; !ok {
|
||||
return ""
|
||||
}
|
||||
mediaType, _, err := mime.ParseMediaType(ScrapeProtocolsHeaders[s])
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
return mediaType
|
||||
}
|
||||
|
||||
var (
|
||||
PrometheusProto ScrapeProtocol = "PrometheusProto"
|
||||
PrometheusText0_0_4 ScrapeProtocol = "PrometheusText0.0.4"
|
||||
PrometheusText1_0_0 ScrapeProtocol = "PrometheusText1.0.0"
|
||||
OpenMetricsText0_0_1 ScrapeProtocol = "OpenMetricsText0.0.1"
|
||||
OpenMetricsText1_0_0 ScrapeProtocol = "OpenMetricsText1.0.0"
|
||||
UTF8NamesHeader string = model.EscapingKey + "=" + model.AllowUTF8
|
||||
|
@ -485,6 +516,7 @@ var (
|
|||
ScrapeProtocolsHeaders = map[ScrapeProtocol]string{
|
||||
PrometheusProto: "application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=delimited",
|
||||
PrometheusText0_0_4: "text/plain;version=0.0.4",
|
||||
PrometheusText1_0_0: "text/plain;version=1.0.0;escaping=allow-utf-8",
|
||||
OpenMetricsText0_0_1: "application/openmetrics-text;version=0.0.1",
|
||||
OpenMetricsText1_0_0: "application/openmetrics-text;version=1.0.0",
|
||||
}
|
||||
|
@ -494,6 +526,7 @@ var (
|
|||
DefaultScrapeProtocols = []ScrapeProtocol{
|
||||
OpenMetricsText1_0_0,
|
||||
OpenMetricsText0_0_1,
|
||||
PrometheusText1_0_0,
|
||||
PrometheusText0_0_4,
|
||||
}
|
||||
|
||||
|
@ -505,6 +538,7 @@ var (
|
|||
PrometheusProto,
|
||||
OpenMetricsText1_0_0,
|
||||
OpenMetricsText0_0_1,
|
||||
PrometheusText1_0_0,
|
||||
PrometheusText0_0_4,
|
||||
}
|
||||
)
|
||||
|
@ -631,10 +665,17 @@ type ScrapeConfig struct {
|
|||
// The protocols to negotiate during a scrape. It tells clients what
|
||||
// protocol are accepted by Prometheus and with what preference (most wanted is first).
|
||||
// Supported values (case sensitive): PrometheusProto, OpenMetricsText0.0.1,
|
||||
// OpenMetricsText1.0.0, PrometheusText0.0.4.
|
||||
// OpenMetricsText1.0.0, PrometheusText1.0.0, PrometheusText0.0.4.
|
||||
ScrapeProtocols []ScrapeProtocol `yaml:"scrape_protocols,omitempty"`
|
||||
// Whether to scrape a classic histogram that is also exposed as a native histogram.
|
||||
ScrapeClassicHistograms bool `yaml:"scrape_classic_histograms,omitempty"`
|
||||
// The fallback protocol to use if the Content-Type provided by the target
|
||||
// is not provided, blank, or not one of the expected values.
|
||||
// Supported values (case sensitive): PrometheusProto, OpenMetricsText0.0.1,
|
||||
// OpenMetricsText1.0.0, PrometheusText1.0.0, PrometheusText0.0.4.
|
||||
ScrapeFallbackProtocol ScrapeProtocol `yaml:"fallback_scrape_protocol,omitempty"`
|
||||
// Whether to scrape a classic histogram, even if it is also exposed as a native histogram.
|
||||
AlwaysScrapeClassicHistograms bool `yaml:"always_scrape_classic_histograms,omitempty"`
|
||||
// Whether to convert all scraped classic histograms into a native histogram with custom buckets.
|
||||
ConvertClassicHistogramsToNHCB bool `yaml:"convert_classic_histograms_to_nhcb,omitempty"`
|
||||
// File to which scrape failures are logged.
|
||||
ScrapeFailureLogFile string `yaml:"scrape_failure_log_file,omitempty"`
|
||||
// The HTTP resource path on which to fetch metrics from targets.
|
||||
|
@ -782,6 +823,12 @@ func (c *ScrapeConfig) Validate(globalConfig GlobalConfig) error {
|
|||
return fmt.Errorf("%w for scrape config with job name %q", err, c.JobName)
|
||||
}
|
||||
|
||||
if c.ScrapeFallbackProtocol != "" {
|
||||
if err := c.ScrapeFallbackProtocol.Validate(); err != nil {
|
||||
return fmt.Errorf("invalid fallback_scrape_protocol for scrape config with job name %q: %w", c.JobName, err)
|
||||
}
|
||||
}
|
||||
|
||||
switch globalConfig.MetricNameValidationScheme {
|
||||
case LegacyValidationConfig:
|
||||
case "", UTF8ValidationConfig:
|
||||
|
@ -957,6 +1004,7 @@ func (a AlertmanagerConfigs) ToMap() map[string]*AlertmanagerConfig {
|
|||
|
||||
// AlertmanagerAPIVersion represents a version of the
|
||||
// github.com/prometheus/alertmanager/api, e.g. 'v1' or 'v2'.
|
||||
// 'v1' is no longer supported.
|
||||
type AlertmanagerAPIVersion string
|
||||
|
||||
// UnmarshalYAML implements the yaml.Unmarshaler interface.
|
||||
|
@ -986,7 +1034,7 @@ const (
|
|||
)
|
||||
|
||||
var SupportedAlertmanagerAPIVersions = []AlertmanagerAPIVersion{
|
||||
AlertmanagerAPIVersionV1, AlertmanagerAPIVersionV2,
|
||||
AlertmanagerAPIVersionV2,
|
||||
}
|
||||
|
||||
// AlertmanagerConfig configures how Alertmanagers can be discovered and communicated with.
|
||||
|
@ -1038,7 +1086,7 @@ func (c *AlertmanagerConfig) UnmarshalYAML(unmarshal func(interface{}) error) er
|
|||
c.HTTPClientConfig.Authorization != nil || c.HTTPClientConfig.OAuth2 != nil
|
||||
|
||||
if httpClientConfigAuthEnabled && c.SigV4Config != nil {
|
||||
return fmt.Errorf("at most one of basic_auth, authorization, oauth2, & sigv4 must be configured")
|
||||
return errors.New("at most one of basic_auth, authorization, oauth2, & sigv4 must be configured")
|
||||
}
|
||||
|
||||
// Check for users putting URLs in target groups.
|
||||
|
@ -1368,9 +1416,20 @@ func getGoGCEnv() int {
|
|||
return DefaultRuntimeConfig.GoGC
|
||||
}
|
||||
|
||||
type translationStrategyOption string
|
||||
|
||||
var (
|
||||
// NoUTF8EscapingWithSuffixes will keep UTF-8 characters as they are, units and type suffixes will still be added.
|
||||
NoUTF8EscapingWithSuffixes translationStrategyOption = "NoUTF8EscapingWithSuffixes"
|
||||
// UnderscoreEscapingWithSuffixes is the default option for translating OTLP to Prometheus.
|
||||
// This option will translate all UTF-8 characters to underscores, while adding units and type suffixes.
|
||||
UnderscoreEscapingWithSuffixes translationStrategyOption = "UnderscoreEscapingWithSuffixes"
|
||||
)
|
||||
|
||||
// OTLPConfig is the configuration for writing to the OTLP endpoint.
|
||||
type OTLPConfig struct {
|
||||
PromoteResourceAttributes []string `yaml:"promote_resource_attributes,omitempty"`
|
||||
TranslationStrategy translationStrategyOption `yaml:"translation_strategy,omitempty"`
|
||||
}
|
||||
|
||||
// UnmarshalYAML implements the yaml.Unmarshaler interface.
|
||||
|
@ -1386,7 +1445,7 @@ func (c *OTLPConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
|||
for i, attr := range c.PromoteResourceAttributes {
|
||||
attr = strings.TrimSpace(attr)
|
||||
if attr == "" {
|
||||
err = errors.Join(err, fmt.Errorf("empty promoted OTel resource attribute"))
|
||||
err = errors.Join(err, errors.New("empty promoted OTel resource attribute"))
|
||||
continue
|
||||
}
|
||||
if _, exists := seen[attr]; exists {
|
||||
|
|
|
@ -142,7 +142,7 @@ var expectedConf = &Config{
|
|||
},
|
||||
},
|
||||
FollowRedirects: true,
|
||||
EnableHTTP2: true,
|
||||
EnableHTTP2: false,
|
||||
},
|
||||
},
|
||||
{
|
||||
|
@ -158,7 +158,7 @@ var expectedConf = &Config{
|
|||
KeyFile: filepath.FromSlash("testdata/valid_key_file"),
|
||||
},
|
||||
FollowRedirects: true,
|
||||
EnableHTTP2: true,
|
||||
EnableHTTP2: false,
|
||||
},
|
||||
Headers: map[string]string{"name": "value"},
|
||||
},
|
||||
|
@ -168,6 +168,7 @@ var expectedConf = &Config{
|
|||
PromoteResourceAttributes: []string{
|
||||
"k8s.cluster.name", "k8s.job.name", "k8s.namespace.name",
|
||||
},
|
||||
TranslationStrategy: UnderscoreEscapingWithSuffixes,
|
||||
},
|
||||
|
||||
RemoteReadConfigs: []*RemoteReadConfig{
|
||||
|
@ -218,6 +219,7 @@ var expectedConf = &Config{
|
|||
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
|
||||
ScrapeFallbackProtocol: PrometheusText0_0_4,
|
||||
ScrapeFailureLogFile: "testdata/fail_prom.log",
|
||||
|
||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||
|
@ -1500,8 +1502,13 @@ var expectedConf = &Config{
|
|||
},
|
||||
}
|
||||
|
||||
func TestYAMLNotLongerSupportedAMApi(t *testing.T) {
|
||||
_, err := LoadFile("testdata/config_with_no_longer_supported_am_api_config.yml", false, promslog.NewNopLogger())
|
||||
require.Error(t, err)
|
||||
}
|
||||
|
||||
func TestYAMLRoundtrip(t *testing.T) {
|
||||
want, err := LoadFile("testdata/roundtrip.good.yml", false, false, promslog.NewNopLogger())
|
||||
want, err := LoadFile("testdata/roundtrip.good.yml", false, promslog.NewNopLogger())
|
||||
require.NoError(t, err)
|
||||
|
||||
out, err := yaml.Marshal(want)
|
||||
|
@ -1514,7 +1521,7 @@ func TestYAMLRoundtrip(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestRemoteWriteRetryOnRateLimit(t *testing.T) {
|
||||
want, err := LoadFile("testdata/remote_write_retry_on_rate_limit.good.yml", false, false, promslog.NewNopLogger())
|
||||
want, err := LoadFile("testdata/remote_write_retry_on_rate_limit.good.yml", false, promslog.NewNopLogger())
|
||||
require.NoError(t, err)
|
||||
|
||||
out, err := yaml.Marshal(want)
|
||||
|
@ -1529,7 +1536,7 @@ func TestRemoteWriteRetryOnRateLimit(t *testing.T) {
|
|||
|
||||
func TestOTLPSanitizeResourceAttributes(t *testing.T) {
|
||||
t.Run("good config", func(t *testing.T) {
|
||||
want, err := LoadFile(filepath.Join("testdata", "otlp_sanitize_resource_attributes.good.yml"), false, false, promslog.NewNopLogger())
|
||||
want, err := LoadFile(filepath.Join("testdata", "otlp_sanitize_resource_attributes.good.yml"), false, promslog.NewNopLogger())
|
||||
require.NoError(t, err)
|
||||
|
||||
out, err := yaml.Marshal(want)
|
||||
|
@ -1541,25 +1548,87 @@ func TestOTLPSanitizeResourceAttributes(t *testing.T) {
|
|||
})
|
||||
|
||||
t.Run("bad config", func(t *testing.T) {
|
||||
_, err := LoadFile(filepath.Join("testdata", "otlp_sanitize_resource_attributes.bad.yml"), false, false, promslog.NewNopLogger())
|
||||
_, err := LoadFile(filepath.Join("testdata", "otlp_sanitize_resource_attributes.bad.yml"), false, promslog.NewNopLogger())
|
||||
require.ErrorContains(t, err, `duplicated promoted OTel resource attribute "k8s.job.name"`)
|
||||
require.ErrorContains(t, err, `empty promoted OTel resource attribute`)
|
||||
})
|
||||
}
|
||||
|
||||
func TestOTLPAllowUTF8(t *testing.T) {
|
||||
t.Run("good config", func(t *testing.T) {
|
||||
fpath := filepath.Join("testdata", "otlp_allow_utf8.good.yml")
|
||||
verify := func(t *testing.T, conf *Config, err error) {
|
||||
t.Helper()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, NoUTF8EscapingWithSuffixes, conf.OTLPConfig.TranslationStrategy)
|
||||
}
|
||||
|
||||
t.Run("LoadFile", func(t *testing.T) {
|
||||
conf, err := LoadFile(fpath, false, promslog.NewNopLogger())
|
||||
verify(t, conf, err)
|
||||
})
|
||||
t.Run("Load", func(t *testing.T) {
|
||||
content, err := os.ReadFile(fpath)
|
||||
require.NoError(t, err)
|
||||
conf, err := Load(string(content), promslog.NewNopLogger())
|
||||
verify(t, conf, err)
|
||||
})
|
||||
})
|
||||
|
||||
t.Run("incompatible config", func(t *testing.T) {
|
||||
fpath := filepath.Join("testdata", "otlp_allow_utf8.incompatible.yml")
|
||||
verify := func(t *testing.T, err error) {
|
||||
t.Helper()
|
||||
require.ErrorContains(t, err, `OTLP translation strategy NoUTF8EscapingWithSuffixes is not allowed when UTF8 is disabled`)
|
||||
}
|
||||
|
||||
t.Run("LoadFile", func(t *testing.T) {
|
||||
_, err := LoadFile(fpath, false, promslog.NewNopLogger())
|
||||
verify(t, err)
|
||||
})
|
||||
t.Run("Load", func(t *testing.T) {
|
||||
content, err := os.ReadFile(fpath)
|
||||
require.NoError(t, err)
|
||||
_, err = Load(string(content), promslog.NewNopLogger())
|
||||
t.Log("err", err)
|
||||
verify(t, err)
|
||||
})
|
||||
})
|
||||
|
||||
t.Run("bad config", func(t *testing.T) {
|
||||
fpath := filepath.Join("testdata", "otlp_allow_utf8.bad.yml")
|
||||
verify := func(t *testing.T, err error) {
|
||||
t.Helper()
|
||||
require.ErrorContains(t, err, `unsupported OTLP translation strategy "Invalid"`)
|
||||
}
|
||||
|
||||
t.Run("LoadFile", func(t *testing.T) {
|
||||
_, err := LoadFile(fpath, false, promslog.NewNopLogger())
|
||||
verify(t, err)
|
||||
})
|
||||
t.Run("Load", func(t *testing.T) {
|
||||
content, err := os.ReadFile(fpath)
|
||||
require.NoError(t, err)
|
||||
_, err = Load(string(content), promslog.NewNopLogger())
|
||||
verify(t, err)
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
func TestLoadConfig(t *testing.T) {
|
||||
// Parse a valid file that sets a global scrape timeout. This tests whether parsing
|
||||
// an overwritten default field in the global config permanently changes the default.
|
||||
_, err := LoadFile("testdata/global_timeout.good.yml", false, false, promslog.NewNopLogger())
|
||||
_, err := LoadFile("testdata/global_timeout.good.yml", false, promslog.NewNopLogger())
|
||||
require.NoError(t, err)
|
||||
|
||||
c, err := LoadFile("testdata/conf.good.yml", false, false, promslog.NewNopLogger())
|
||||
c, err := LoadFile("testdata/conf.good.yml", false, promslog.NewNopLogger())
|
||||
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, expectedConf, c)
|
||||
}
|
||||
|
||||
func TestScrapeIntervalLarger(t *testing.T) {
|
||||
c, err := LoadFile("testdata/scrape_interval_larger.good.yml", false, false, promslog.NewNopLogger())
|
||||
c, err := LoadFile("testdata/scrape_interval_larger.good.yml", false, promslog.NewNopLogger())
|
||||
require.NoError(t, err)
|
||||
require.Len(t, c.ScrapeConfigs, 1)
|
||||
for _, sc := range c.ScrapeConfigs {
|
||||
|
@ -1569,7 +1638,7 @@ func TestScrapeIntervalLarger(t *testing.T) {
|
|||
|
||||
// YAML marshaling must not reveal authentication credentials.
|
||||
func TestElideSecrets(t *testing.T) {
|
||||
c, err := LoadFile("testdata/conf.good.yml", false, false, promslog.NewNopLogger())
|
||||
c, err := LoadFile("testdata/conf.good.yml", false, promslog.NewNopLogger())
|
||||
require.NoError(t, err)
|
||||
|
||||
secretRe := regexp.MustCompile(`\\u003csecret\\u003e|<secret>`)
|
||||
|
@ -1586,31 +1655,31 @@ func TestElideSecrets(t *testing.T) {
|
|||
|
||||
func TestLoadConfigRuleFilesAbsolutePath(t *testing.T) {
|
||||
// Parse a valid file that sets a rule files with an absolute path
|
||||
c, err := LoadFile(ruleFilesConfigFile, false, false, promslog.NewNopLogger())
|
||||
c, err := LoadFile(ruleFilesConfigFile, false, promslog.NewNopLogger())
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, ruleFilesExpectedConf, c)
|
||||
}
|
||||
|
||||
func TestKubernetesEmptyAPIServer(t *testing.T) {
|
||||
_, err := LoadFile("testdata/kubernetes_empty_apiserver.good.yml", false, false, promslog.NewNopLogger())
|
||||
_, err := LoadFile("testdata/kubernetes_empty_apiserver.good.yml", false, promslog.NewNopLogger())
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestKubernetesWithKubeConfig(t *testing.T) {
|
||||
_, err := LoadFile("testdata/kubernetes_kubeconfig_without_apiserver.good.yml", false, false, promslog.NewNopLogger())
|
||||
_, err := LoadFile("testdata/kubernetes_kubeconfig_without_apiserver.good.yml", false, promslog.NewNopLogger())
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestKubernetesSelectors(t *testing.T) {
|
||||
_, err := LoadFile("testdata/kubernetes_selectors_endpoints.good.yml", false, false, promslog.NewNopLogger())
|
||||
_, err := LoadFile("testdata/kubernetes_selectors_endpoints.good.yml", false, promslog.NewNopLogger())
|
||||
require.NoError(t, err)
|
||||
_, err = LoadFile("testdata/kubernetes_selectors_node.good.yml", false, false, promslog.NewNopLogger())
|
||||
_, err = LoadFile("testdata/kubernetes_selectors_node.good.yml", false, promslog.NewNopLogger())
|
||||
require.NoError(t, err)
|
||||
_, err = LoadFile("testdata/kubernetes_selectors_ingress.good.yml", false, false, promslog.NewNopLogger())
|
||||
_, err = LoadFile("testdata/kubernetes_selectors_ingress.good.yml", false, promslog.NewNopLogger())
|
||||
require.NoError(t, err)
|
||||
_, err = LoadFile("testdata/kubernetes_selectors_pod.good.yml", false, false, promslog.NewNopLogger())
|
||||
_, err = LoadFile("testdata/kubernetes_selectors_pod.good.yml", false, promslog.NewNopLogger())
|
||||
require.NoError(t, err)
|
||||
_, err = LoadFile("testdata/kubernetes_selectors_service.good.yml", false, false, promslog.NewNopLogger())
|
||||
_, err = LoadFile("testdata/kubernetes_selectors_service.good.yml", false, promslog.NewNopLogger())
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
|
@ -2080,12 +2149,20 @@ var expectedErrors = []struct {
|
|||
},
|
||||
{
|
||||
filename: "scrape_config_files_scrape_protocols.bad.yml",
|
||||
errMsg: `parsing YAML file testdata/scrape_config_files_scrape_protocols.bad.yml: scrape_protocols: unknown scrape protocol prometheusproto, supported: [OpenMetricsText0.0.1 OpenMetricsText1.0.0 PrometheusProto PrometheusText0.0.4] for scrape config with job name "node"`,
|
||||
errMsg: `parsing YAML file testdata/scrape_config_files_scrape_protocols.bad.yml: scrape_protocols: unknown scrape protocol prometheusproto, supported: [OpenMetricsText0.0.1 OpenMetricsText1.0.0 PrometheusProto PrometheusText0.0.4 PrometheusText1.0.0] for scrape config with job name "node"`,
|
||||
},
|
||||
{
|
||||
filename: "scrape_config_files_scrape_protocols2.bad.yml",
|
||||
errMsg: `parsing YAML file testdata/scrape_config_files_scrape_protocols2.bad.yml: duplicated protocol in scrape_protocols, got [OpenMetricsText1.0.0 PrometheusProto OpenMetricsText1.0.0] for scrape config with job name "node"`,
|
||||
},
|
||||
{
|
||||
filename: "scrape_config_files_fallback_scrape_protocol1.bad.yml",
|
||||
errMsg: `parsing YAML file testdata/scrape_config_files_fallback_scrape_protocol1.bad.yml: invalid fallback_scrape_protocol for scrape config with job name "node": unknown scrape protocol prometheusproto, supported: [OpenMetricsText0.0.1 OpenMetricsText1.0.0 PrometheusProto PrometheusText0.0.4 PrometheusText1.0.0]`,
|
||||
},
|
||||
{
|
||||
filename: "scrape_config_files_fallback_scrape_protocol2.bad.yml",
|
||||
errMsg: `unmarshal errors`,
|
||||
},
|
||||
}
|
||||
|
||||
func TestBadConfigs(t *testing.T) {
|
||||
|
@ -2094,7 +2171,7 @@ func TestBadConfigs(t *testing.T) {
|
|||
model.NameValidationScheme = model.UTF8Validation
|
||||
}()
|
||||
for _, ee := range expectedErrors {
|
||||
_, err := LoadFile("testdata/"+ee.filename, false, false, promslog.NewNopLogger())
|
||||
_, err := LoadFile("testdata/"+ee.filename, false, promslog.NewNopLogger())
|
||||
require.ErrorContains(t, err, ee.errMsg,
|
||||
"Expected error for %s to contain %q but got: %s", ee.filename, ee.errMsg, err)
|
||||
}
|
||||
|
@ -2125,7 +2202,7 @@ func TestBadStaticConfigsYML(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestEmptyConfig(t *testing.T) {
|
||||
c, err := Load("", false, promslog.NewNopLogger())
|
||||
c, err := Load("", promslog.NewNopLogger())
|
||||
require.NoError(t, err)
|
||||
exp := DefaultConfig
|
||||
require.Equal(t, exp, *c)
|
||||
|
@ -2135,38 +2212,34 @@ func TestExpandExternalLabels(t *testing.T) {
|
|||
// Cleanup ant TEST env variable that could exist on the system.
|
||||
os.Setenv("TEST", "")
|
||||
|
||||
c, err := LoadFile("testdata/external_labels.good.yml", false, false, promslog.NewNopLogger())
|
||||
require.NoError(t, err)
|
||||
testutil.RequireEqual(t, labels.FromStrings("bar", "foo", "baz", "foo${TEST}bar", "foo", "${TEST}", "qux", "foo$${TEST}", "xyz", "foo$$bar"), c.GlobalConfig.ExternalLabels)
|
||||
|
||||
c, err = LoadFile("testdata/external_labels.good.yml", false, true, promslog.NewNopLogger())
|
||||
c, err := LoadFile("testdata/external_labels.good.yml", false, promslog.NewNopLogger())
|
||||
require.NoError(t, err)
|
||||
testutil.RequireEqual(t, labels.FromStrings("bar", "foo", "baz", "foobar", "foo", "", "qux", "foo${TEST}", "xyz", "foo$bar"), c.GlobalConfig.ExternalLabels)
|
||||
|
||||
os.Setenv("TEST", "TestValue")
|
||||
c, err = LoadFile("testdata/external_labels.good.yml", false, true, promslog.NewNopLogger())
|
||||
c, err = LoadFile("testdata/external_labels.good.yml", false, promslog.NewNopLogger())
|
||||
require.NoError(t, err)
|
||||
testutil.RequireEqual(t, labels.FromStrings("bar", "foo", "baz", "fooTestValuebar", "foo", "TestValue", "qux", "foo${TEST}", "xyz", "foo$bar"), c.GlobalConfig.ExternalLabels)
|
||||
}
|
||||
|
||||
func TestAgentMode(t *testing.T) {
|
||||
_, err := LoadFile("testdata/agent_mode.with_alert_manager.yml", true, false, promslog.NewNopLogger())
|
||||
_, err := LoadFile("testdata/agent_mode.with_alert_manager.yml", true, promslog.NewNopLogger())
|
||||
require.ErrorContains(t, err, "field alerting is not allowed in agent mode")
|
||||
|
||||
_, err = LoadFile("testdata/agent_mode.with_alert_relabels.yml", true, false, promslog.NewNopLogger())
|
||||
_, err = LoadFile("testdata/agent_mode.with_alert_relabels.yml", true, promslog.NewNopLogger())
|
||||
require.ErrorContains(t, err, "field alerting is not allowed in agent mode")
|
||||
|
||||
_, err = LoadFile("testdata/agent_mode.with_rule_files.yml", true, false, promslog.NewNopLogger())
|
||||
_, err = LoadFile("testdata/agent_mode.with_rule_files.yml", true, promslog.NewNopLogger())
|
||||
require.ErrorContains(t, err, "field rule_files is not allowed in agent mode")
|
||||
|
||||
_, err = LoadFile("testdata/agent_mode.with_remote_reads.yml", true, false, promslog.NewNopLogger())
|
||||
_, err = LoadFile("testdata/agent_mode.with_remote_reads.yml", true, promslog.NewNopLogger())
|
||||
require.ErrorContains(t, err, "field remote_read is not allowed in agent mode")
|
||||
|
||||
c, err := LoadFile("testdata/agent_mode.without_remote_writes.yml", true, false, promslog.NewNopLogger())
|
||||
c, err := LoadFile("testdata/agent_mode.without_remote_writes.yml", true, promslog.NewNopLogger())
|
||||
require.NoError(t, err)
|
||||
require.Empty(t, c.RemoteWriteConfigs)
|
||||
|
||||
c, err = LoadFile("testdata/agent_mode.good.yml", true, false, promslog.NewNopLogger())
|
||||
c, err = LoadFile("testdata/agent_mode.good.yml", true, promslog.NewNopLogger())
|
||||
require.NoError(t, err)
|
||||
require.Len(t, c.RemoteWriteConfigs, 1)
|
||||
require.Equal(
|
||||
|
@ -2177,7 +2250,7 @@ func TestAgentMode(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestEmptyGlobalBlock(t *testing.T) {
|
||||
c, err := Load("global:\n", false, promslog.NewNopLogger())
|
||||
c, err := Load("global:\n", promslog.NewNopLogger())
|
||||
require.NoError(t, err)
|
||||
exp := DefaultConfig
|
||||
exp.Runtime = DefaultRuntimeConfig
|
||||
|
@ -2332,7 +2405,7 @@ func TestGetScrapeConfigs(t *testing.T) {
|
|||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
c, err := LoadFile(tc.configFile, false, false, promslog.NewNopLogger())
|
||||
c, err := LoadFile(tc.configFile, false, promslog.NewNopLogger())
|
||||
require.NoError(t, err)
|
||||
|
||||
scfgs, err := c.GetScrapeConfigs()
|
||||
|
@ -2350,7 +2423,7 @@ func kubernetesSDHostURL() config.URL {
|
|||
}
|
||||
|
||||
func TestScrapeConfigDisableCompression(t *testing.T) {
|
||||
want, err := LoadFile("testdata/scrape_config_disable_compression.good.yml", false, false, promslog.NewNopLogger())
|
||||
want, err := LoadFile("testdata/scrape_config_disable_compression.good.yml", false, promslog.NewNopLogger())
|
||||
require.NoError(t, err)
|
||||
|
||||
out, err := yaml.Marshal(want)
|
||||
|
@ -2397,7 +2470,7 @@ func TestScrapeConfigNameValidationSettings(t *testing.T) {
|
|||
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
want, err := LoadFile(fmt.Sprintf("testdata/%s.yml", tc.inputFile), false, false, promslog.NewNopLogger())
|
||||
want, err := LoadFile(fmt.Sprintf("testdata/%s.yml", tc.inputFile), false, promslog.NewNopLogger())
|
||||
require.NoError(t, err)
|
||||
|
||||
out, err := yaml.Marshal(want)
|
||||
|
@ -2410,3 +2483,54 @@ func TestScrapeConfigNameValidationSettings(t *testing.T) {
|
|||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestScrapeProtocolHeader(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
proto ScrapeProtocol
|
||||
expectedValue string
|
||||
}{
|
||||
{
|
||||
name: "blank",
|
||||
proto: ScrapeProtocol(""),
|
||||
expectedValue: "",
|
||||
},
|
||||
{
|
||||
name: "invalid",
|
||||
proto: ScrapeProtocol("invalid"),
|
||||
expectedValue: "",
|
||||
},
|
||||
{
|
||||
name: "prometheus protobuf",
|
||||
proto: PrometheusProto,
|
||||
expectedValue: "application/vnd.google.protobuf",
|
||||
},
|
||||
{
|
||||
name: "prometheus text 0.0.4",
|
||||
proto: PrometheusText0_0_4,
|
||||
expectedValue: "text/plain",
|
||||
},
|
||||
{
|
||||
name: "prometheus text 1.0.0",
|
||||
proto: PrometheusText1_0_0,
|
||||
expectedValue: "text/plain",
|
||||
},
|
||||
{
|
||||
name: "openmetrics 0.0.1",
|
||||
proto: OpenMetricsText0_0_1,
|
||||
expectedValue: "application/openmetrics-text",
|
||||
},
|
||||
{
|
||||
name: "openmetrics 1.0.0",
|
||||
proto: OpenMetricsText1_0_0,
|
||||
expectedValue: "application/openmetrics-text",
|
||||
},
|
||||
}
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
mediaType := tc.proto.HeaderMediaType()
|
||||
|
||||
require.Equal(t, tc.expectedValue, mediaType)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
|
2
config/testdata/conf.good.yml
vendored
2
config/testdata/conf.good.yml
vendored
|
@ -74,6 +74,8 @@ scrape_configs:
|
|||
# metrics_path defaults to '/metrics'
|
||||
# scheme defaults to 'http'.
|
||||
|
||||
fallback_scrape_protocol: PrometheusText0.0.4
|
||||
|
||||
scrape_failure_log_file: fail_prom.log
|
||||
file_sd_configs:
|
||||
- files:
|
||||
|
|
7
config/testdata/config_with_deprecated_am_api_config.yml
vendored
Normal file
7
config/testdata/config_with_deprecated_am_api_config.yml
vendored
Normal file
|
@ -0,0 +1,7 @@
|
|||
alerting:
|
||||
alertmanagers:
|
||||
- scheme: http
|
||||
api_version: v1
|
||||
file_sd_configs:
|
||||
- files:
|
||||
- nonexistent_file.yml
|
4
config/testdata/otlp_allow_utf8.bad.yml
vendored
Normal file
4
config/testdata/otlp_allow_utf8.bad.yml
vendored
Normal file
|
@ -0,0 +1,4 @@
|
|||
global:
|
||||
metric_name_validation_scheme: legacy
|
||||
otlp:
|
||||
translation_strategy: Invalid
|
2
config/testdata/otlp_allow_utf8.good.yml
vendored
Normal file
2
config/testdata/otlp_allow_utf8.good.yml
vendored
Normal file
|
@ -0,0 +1,2 @@
|
|||
otlp:
|
||||
translation_strategy: NoUTF8EscapingWithSuffixes
|
4
config/testdata/otlp_allow_utf8.incompatible.yml
vendored
Normal file
4
config/testdata/otlp_allow_utf8.incompatible.yml
vendored
Normal file
|
@ -0,0 +1,4 @@
|
|||
global:
|
||||
metric_name_validation_scheme: legacy
|
||||
otlp:
|
||||
translation_strategy: NoUTF8EscapingWithSuffixes
|
5
config/testdata/scrape_config_files_fallback_scrape_protocol1.bad.yml
vendored
Normal file
5
config/testdata/scrape_config_files_fallback_scrape_protocol1.bad.yml
vendored
Normal file
|
@ -0,0 +1,5 @@
|
|||
scrape_configs:
|
||||
- job_name: node
|
||||
fallback_scrape_protocol: "prometheusproto"
|
||||
static_configs:
|
||||
- targets: ['localhost:8080']
|
5
config/testdata/scrape_config_files_fallback_scrape_protocol2.bad.yml
vendored
Normal file
5
config/testdata/scrape_config_files_fallback_scrape_protocol2.bad.yml
vendored
Normal file
|
@ -0,0 +1,5 @@
|
|||
scrape_configs:
|
||||
- job_name: node
|
||||
fallback_scrape_protocol: ["OpenMetricsText1.0.0", "PrometheusText0.0.4"]
|
||||
static_configs:
|
||||
- targets: ['localhost:8080']
|
|
@ -30,6 +30,7 @@ import (
|
|||
"github.com/aws/aws-sdk-go/aws/ec2metadata"
|
||||
"github.com/aws/aws-sdk-go/aws/session"
|
||||
"github.com/aws/aws-sdk-go/service/ec2"
|
||||
"github.com/aws/aws-sdk-go/service/ec2/ec2iface"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/common/config"
|
||||
"github.com/prometheus/common/model"
|
||||
|
@ -148,7 +149,7 @@ type EC2Discovery struct {
|
|||
*refresh.Discovery
|
||||
logger *slog.Logger
|
||||
cfg *EC2SDConfig
|
||||
ec2 *ec2.EC2
|
||||
ec2 ec2iface.EC2API
|
||||
|
||||
// azToAZID maps this account's availability zones to their underlying AZ
|
||||
// ID, e.g. eu-west-2a -> euw2-az2. Refreshes are performed sequentially, so
|
||||
|
@ -160,7 +161,7 @@ type EC2Discovery struct {
|
|||
func NewEC2Discovery(conf *EC2SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*EC2Discovery, error) {
|
||||
m, ok := metrics.(*ec2Metrics)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("invalid discovery metrics type")
|
||||
return nil, errors.New("invalid discovery metrics type")
|
||||
}
|
||||
|
||||
if logger == nil {
|
||||
|
@ -182,7 +183,7 @@ func NewEC2Discovery(conf *EC2SDConfig, logger *slog.Logger, metrics discovery.D
|
|||
return d, nil
|
||||
}
|
||||
|
||||
func (d *EC2Discovery) ec2Client(context.Context) (*ec2.EC2, error) {
|
||||
func (d *EC2Discovery) ec2Client(context.Context) (ec2iface.EC2API, error) {
|
||||
if d.ec2 != nil {
|
||||
return d.ec2, nil
|
||||
}
|
||||
|
|
434
discovery/aws/ec2_test.go
Normal file
434
discovery/aws/ec2_test.go
Normal file
|
@ -0,0 +1,434 @@
|
|||
// Copyright 2024 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package aws
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"testing"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/request"
|
||||
"github.com/aws/aws-sdk-go/service/ec2"
|
||||
"github.com/aws/aws-sdk-go/service/ec2/ec2iface"
|
||||
"github.com/prometheus/common/model"
|
||||
"github.com/stretchr/testify/require"
|
||||
"go.uber.org/goleak"
|
||||
|
||||
"github.com/prometheus/prometheus/discovery/targetgroup"
|
||||
)
|
||||
|
||||
// Helper function to get pointers on literals.
|
||||
// NOTE: this is common between a few tests. In the future it might worth to move this out into a separate package.
|
||||
func strptr(str string) *string {
|
||||
return &str
|
||||
}
|
||||
|
||||
func boolptr(b bool) *bool {
|
||||
return &b
|
||||
}
|
||||
|
||||
func int64ptr(i int64) *int64 {
|
||||
return &i
|
||||
}
|
||||
|
||||
// Struct for test data.
|
||||
type ec2DataStore struct {
|
||||
region string
|
||||
|
||||
azToAZID map[string]string
|
||||
|
||||
ownerID string
|
||||
|
||||
instances []*ec2.Instance
|
||||
}
|
||||
|
||||
// The tests itself.
|
||||
func TestMain(m *testing.M) {
|
||||
goleak.VerifyTestMain(m)
|
||||
}
|
||||
|
||||
func TestEC2DiscoveryRefreshAZIDs(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
// iterate through the test cases
|
||||
for _, tt := range []struct {
|
||||
name string
|
||||
shouldFail bool
|
||||
ec2Data *ec2DataStore
|
||||
}{
|
||||
{
|
||||
name: "Normal",
|
||||
shouldFail: false,
|
||||
ec2Data: &ec2DataStore{
|
||||
azToAZID: map[string]string{
|
||||
"azname-a": "azid-1",
|
||||
"azname-b": "azid-2",
|
||||
"azname-c": "azid-3",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "HandleError",
|
||||
shouldFail: true,
|
||||
ec2Data: &ec2DataStore{},
|
||||
},
|
||||
} {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
client := newMockEC2Client(tt.ec2Data)
|
||||
|
||||
d := &EC2Discovery{
|
||||
ec2: client,
|
||||
}
|
||||
|
||||
err := d.refreshAZIDs(ctx)
|
||||
if tt.shouldFail {
|
||||
require.Error(t, err)
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, client.ec2Data.azToAZID, d.azToAZID)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestEC2DiscoveryRefresh(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
// iterate through the test cases
|
||||
for _, tt := range []struct {
|
||||
name string
|
||||
ec2Data *ec2DataStore
|
||||
expected []*targetgroup.Group
|
||||
}{
|
||||
{
|
||||
name: "NoPrivateIp",
|
||||
ec2Data: &ec2DataStore{
|
||||
region: "region-noprivateip",
|
||||
azToAZID: map[string]string{
|
||||
"azname-a": "azid-1",
|
||||
"azname-b": "azid-2",
|
||||
"azname-c": "azid-3",
|
||||
},
|
||||
instances: []*ec2.Instance{
|
||||
{
|
||||
InstanceId: strptr("instance-id-noprivateip"),
|
||||
},
|
||||
},
|
||||
},
|
||||
expected: []*targetgroup.Group{
|
||||
{
|
||||
Source: "region-noprivateip",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "NoVpc",
|
||||
ec2Data: &ec2DataStore{
|
||||
region: "region-novpc",
|
||||
azToAZID: map[string]string{
|
||||
"azname-a": "azid-1",
|
||||
"azname-b": "azid-2",
|
||||
"azname-c": "azid-3",
|
||||
},
|
||||
ownerID: "owner-id-novpc",
|
||||
instances: []*ec2.Instance{
|
||||
{
|
||||
// set every possible options and test them here
|
||||
Architecture: strptr("architecture-novpc"),
|
||||
ImageId: strptr("ami-novpc"),
|
||||
InstanceId: strptr("instance-id-novpc"),
|
||||
InstanceLifecycle: strptr("instance-lifecycle-novpc"),
|
||||
InstanceType: strptr("instance-type-novpc"),
|
||||
Placement: &ec2.Placement{AvailabilityZone: strptr("azname-b")},
|
||||
Platform: strptr("platform-novpc"),
|
||||
PrivateDnsName: strptr("private-dns-novpc"),
|
||||
PrivateIpAddress: strptr("1.2.3.4"),
|
||||
PublicDnsName: strptr("public-dns-novpc"),
|
||||
PublicIpAddress: strptr("42.42.42.2"),
|
||||
State: &ec2.InstanceState{Name: strptr("running")},
|
||||
// test tags once and for all
|
||||
Tags: []*ec2.Tag{
|
||||
{Key: strptr("tag-1-key"), Value: strptr("tag-1-value")},
|
||||
{Key: strptr("tag-2-key"), Value: strptr("tag-2-value")},
|
||||
nil,
|
||||
{Value: strptr("tag-4-value")},
|
||||
{Key: strptr("tag-5-key")},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expected: []*targetgroup.Group{
|
||||
{
|
||||
Source: "region-novpc",
|
||||
Targets: []model.LabelSet{
|
||||
{
|
||||
"__address__": model.LabelValue("1.2.3.4:4242"),
|
||||
"__meta_ec2_ami": model.LabelValue("ami-novpc"),
|
||||
"__meta_ec2_architecture": model.LabelValue("architecture-novpc"),
|
||||
"__meta_ec2_availability_zone": model.LabelValue("azname-b"),
|
||||
"__meta_ec2_availability_zone_id": model.LabelValue("azid-2"),
|
||||
"__meta_ec2_instance_id": model.LabelValue("instance-id-novpc"),
|
||||
"__meta_ec2_instance_lifecycle": model.LabelValue("instance-lifecycle-novpc"),
|
||||
"__meta_ec2_instance_type": model.LabelValue("instance-type-novpc"),
|
||||
"__meta_ec2_instance_state": model.LabelValue("running"),
|
||||
"__meta_ec2_owner_id": model.LabelValue("owner-id-novpc"),
|
||||
"__meta_ec2_platform": model.LabelValue("platform-novpc"),
|
||||
"__meta_ec2_private_dns_name": model.LabelValue("private-dns-novpc"),
|
||||
"__meta_ec2_private_ip": model.LabelValue("1.2.3.4"),
|
||||
"__meta_ec2_public_dns_name": model.LabelValue("public-dns-novpc"),
|
||||
"__meta_ec2_public_ip": model.LabelValue("42.42.42.2"),
|
||||
"__meta_ec2_region": model.LabelValue("region-novpc"),
|
||||
"__meta_ec2_tag_tag_1_key": model.LabelValue("tag-1-value"),
|
||||
"__meta_ec2_tag_tag_2_key": model.LabelValue("tag-2-value"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Ipv4",
|
||||
ec2Data: &ec2DataStore{
|
||||
region: "region-ipv4",
|
||||
azToAZID: map[string]string{
|
||||
"azname-a": "azid-1",
|
||||
"azname-b": "azid-2",
|
||||
"azname-c": "azid-3",
|
||||
},
|
||||
instances: []*ec2.Instance{
|
||||
{
|
||||
// just the minimum needed for the refresh work
|
||||
ImageId: strptr("ami-ipv4"),
|
||||
InstanceId: strptr("instance-id-ipv4"),
|
||||
InstanceType: strptr("instance-type-ipv4"),
|
||||
Placement: &ec2.Placement{AvailabilityZone: strptr("azname-c")},
|
||||
PrivateIpAddress: strptr("5.6.7.8"),
|
||||
State: &ec2.InstanceState{Name: strptr("running")},
|
||||
SubnetId: strptr("azid-3"),
|
||||
VpcId: strptr("vpc-ipv4"),
|
||||
// network intefaces
|
||||
NetworkInterfaces: []*ec2.InstanceNetworkInterface{
|
||||
// interface without subnet -> should be ignored
|
||||
{
|
||||
Ipv6Addresses: []*ec2.InstanceIpv6Address{
|
||||
{
|
||||
Ipv6Address: strptr("2001:db8:1::1"),
|
||||
IsPrimaryIpv6: boolptr(true),
|
||||
},
|
||||
},
|
||||
},
|
||||
// interface with subnet, no IPv6
|
||||
{
|
||||
Ipv6Addresses: []*ec2.InstanceIpv6Address{},
|
||||
SubnetId: strptr("azid-3"),
|
||||
},
|
||||
// interface with another subnet, no IPv6
|
||||
{
|
||||
Ipv6Addresses: []*ec2.InstanceIpv6Address{},
|
||||
SubnetId: strptr("azid-1"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expected: []*targetgroup.Group{
|
||||
{
|
||||
Source: "region-ipv4",
|
||||
Targets: []model.LabelSet{
|
||||
{
|
||||
"__address__": model.LabelValue("5.6.7.8:4242"),
|
||||
"__meta_ec2_ami": model.LabelValue("ami-ipv4"),
|
||||
"__meta_ec2_availability_zone": model.LabelValue("azname-c"),
|
||||
"__meta_ec2_availability_zone_id": model.LabelValue("azid-3"),
|
||||
"__meta_ec2_instance_id": model.LabelValue("instance-id-ipv4"),
|
||||
"__meta_ec2_instance_state": model.LabelValue("running"),
|
||||
"__meta_ec2_instance_type": model.LabelValue("instance-type-ipv4"),
|
||||
"__meta_ec2_owner_id": model.LabelValue(""),
|
||||
"__meta_ec2_primary_subnet_id": model.LabelValue("azid-3"),
|
||||
"__meta_ec2_private_ip": model.LabelValue("5.6.7.8"),
|
||||
"__meta_ec2_region": model.LabelValue("region-ipv4"),
|
||||
"__meta_ec2_subnet_id": model.LabelValue(",azid-3,azid-1,"),
|
||||
"__meta_ec2_vpc_id": model.LabelValue("vpc-ipv4"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Ipv6",
|
||||
ec2Data: &ec2DataStore{
|
||||
region: "region-ipv6",
|
||||
azToAZID: map[string]string{
|
||||
"azname-a": "azid-1",
|
||||
"azname-b": "azid-2",
|
||||
"azname-c": "azid-3",
|
||||
},
|
||||
instances: []*ec2.Instance{
|
||||
{
|
||||
// just the minimum needed for the refresh work
|
||||
ImageId: strptr("ami-ipv6"),
|
||||
InstanceId: strptr("instance-id-ipv6"),
|
||||
InstanceType: strptr("instance-type-ipv6"),
|
||||
Placement: &ec2.Placement{AvailabilityZone: strptr("azname-b")},
|
||||
PrivateIpAddress: strptr("9.10.11.12"),
|
||||
State: &ec2.InstanceState{Name: strptr("running")},
|
||||
SubnetId: strptr("azid-2"),
|
||||
VpcId: strptr("vpc-ipv6"),
|
||||
// network intefaces
|
||||
NetworkInterfaces: []*ec2.InstanceNetworkInterface{
|
||||
// interface without primary IPv6, index 2
|
||||
{
|
||||
Attachment: &ec2.InstanceNetworkInterfaceAttachment{
|
||||
DeviceIndex: int64ptr(3),
|
||||
},
|
||||
Ipv6Addresses: []*ec2.InstanceIpv6Address{
|
||||
{
|
||||
Ipv6Address: strptr("2001:db8:2::1:1"),
|
||||
IsPrimaryIpv6: boolptr(false),
|
||||
},
|
||||
},
|
||||
SubnetId: strptr("azid-2"),
|
||||
},
|
||||
// interface with primary IPv6, index 1
|
||||
{
|
||||
Attachment: &ec2.InstanceNetworkInterfaceAttachment{
|
||||
DeviceIndex: int64ptr(1),
|
||||
},
|
||||
Ipv6Addresses: []*ec2.InstanceIpv6Address{
|
||||
{
|
||||
Ipv6Address: strptr("2001:db8:2::2:1"),
|
||||
IsPrimaryIpv6: boolptr(false),
|
||||
},
|
||||
{
|
||||
Ipv6Address: strptr("2001:db8:2::2:2"),
|
||||
IsPrimaryIpv6: boolptr(true),
|
||||
},
|
||||
},
|
||||
SubnetId: strptr("azid-2"),
|
||||
},
|
||||
// interface with primary IPv6, index 3
|
||||
{
|
||||
Attachment: &ec2.InstanceNetworkInterfaceAttachment{
|
||||
DeviceIndex: int64ptr(3),
|
||||
},
|
||||
Ipv6Addresses: []*ec2.InstanceIpv6Address{
|
||||
{
|
||||
Ipv6Address: strptr("2001:db8:2::3:1"),
|
||||
IsPrimaryIpv6: boolptr(true),
|
||||
},
|
||||
},
|
||||
SubnetId: strptr("azid-1"),
|
||||
},
|
||||
// interface without primary IPv6, index 0
|
||||
{
|
||||
Attachment: &ec2.InstanceNetworkInterfaceAttachment{
|
||||
DeviceIndex: int64ptr(0),
|
||||
},
|
||||
Ipv6Addresses: []*ec2.InstanceIpv6Address{},
|
||||
SubnetId: strptr("azid-3"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expected: []*targetgroup.Group{
|
||||
{
|
||||
Source: "region-ipv6",
|
||||
Targets: []model.LabelSet{
|
||||
{
|
||||
"__address__": model.LabelValue("9.10.11.12:4242"),
|
||||
"__meta_ec2_ami": model.LabelValue("ami-ipv6"),
|
||||
"__meta_ec2_availability_zone": model.LabelValue("azname-b"),
|
||||
"__meta_ec2_availability_zone_id": model.LabelValue("azid-2"),
|
||||
"__meta_ec2_instance_id": model.LabelValue("instance-id-ipv6"),
|
||||
"__meta_ec2_instance_state": model.LabelValue("running"),
|
||||
"__meta_ec2_instance_type": model.LabelValue("instance-type-ipv6"),
|
||||
"__meta_ec2_ipv6_addresses": model.LabelValue(",2001:db8:2::1:1,2001:db8:2::2:1,2001:db8:2::2:2,2001:db8:2::3:1,"),
|
||||
"__meta_ec2_owner_id": model.LabelValue(""),
|
||||
"__meta_ec2_primary_ipv6_addresses": model.LabelValue(",,2001:db8:2::2:2,,2001:db8:2::3:1,"),
|
||||
"__meta_ec2_primary_subnet_id": model.LabelValue("azid-2"),
|
||||
"__meta_ec2_private_ip": model.LabelValue("9.10.11.12"),
|
||||
"__meta_ec2_region": model.LabelValue("region-ipv6"),
|
||||
"__meta_ec2_subnet_id": model.LabelValue(",azid-2,azid-1,azid-3,"),
|
||||
"__meta_ec2_vpc_id": model.LabelValue("vpc-ipv6"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
} {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
client := newMockEC2Client(tt.ec2Data)
|
||||
|
||||
d := &EC2Discovery{
|
||||
ec2: client,
|
||||
cfg: &EC2SDConfig{
|
||||
Port: 4242,
|
||||
Region: client.ec2Data.region,
|
||||
},
|
||||
}
|
||||
|
||||
g, err := d.refresh(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, tt.expected, g)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// EC2 client mock.
|
||||
type mockEC2Client struct {
|
||||
ec2iface.EC2API
|
||||
ec2Data ec2DataStore
|
||||
}
|
||||
|
||||
func newMockEC2Client(ec2Data *ec2DataStore) *mockEC2Client {
|
||||
client := mockEC2Client{
|
||||
ec2Data: *ec2Data,
|
||||
}
|
||||
return &client
|
||||
}
|
||||
|
||||
func (m *mockEC2Client) DescribeAvailabilityZonesWithContext(ctx aws.Context, input *ec2.DescribeAvailabilityZonesInput, opts ...request.Option) (*ec2.DescribeAvailabilityZonesOutput, error) {
|
||||
if len(m.ec2Data.azToAZID) == 0 {
|
||||
return nil, errors.New("No AZs found")
|
||||
}
|
||||
|
||||
azs := make([]*ec2.AvailabilityZone, len(m.ec2Data.azToAZID))
|
||||
|
||||
i := 0
|
||||
for k, v := range m.ec2Data.azToAZID {
|
||||
azs[i] = &ec2.AvailabilityZone{
|
||||
ZoneName: strptr(k),
|
||||
ZoneId: strptr(v),
|
||||
}
|
||||
i++
|
||||
}
|
||||
|
||||
return &ec2.DescribeAvailabilityZonesOutput{
|
||||
AvailabilityZones: azs,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (m *mockEC2Client) DescribeInstancesPagesWithContext(ctx aws.Context, input *ec2.DescribeInstancesInput, fn func(*ec2.DescribeInstancesOutput, bool) bool, opts ...request.Option) error {
|
||||
r := ec2.Reservation{}
|
||||
r.SetInstances(m.ec2Data.instances)
|
||||
r.SetOwnerId(m.ec2Data.ownerID)
|
||||
|
||||
o := ec2.DescribeInstancesOutput{}
|
||||
o.SetReservations([]*ec2.Reservation{&r})
|
||||
|
||||
_ = fn(&o, true)
|
||||
|
||||
return nil
|
||||
}
|
|
@ -134,7 +134,7 @@ type LightsailDiscovery struct {
|
|||
func NewLightsailDiscovery(conf *LightsailSDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*LightsailDiscovery, error) {
|
||||
m, ok := metrics.(*lightsailMetrics)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("invalid discovery metrics type")
|
||||
return nil, errors.New("invalid discovery metrics type")
|
||||
}
|
||||
|
||||
if logger == nil {
|
||||
|
|
|
@ -186,7 +186,7 @@ type Discovery struct {
|
|||
func NewDiscovery(cfg *SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) {
|
||||
m, ok := metrics.(*azureMetrics)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("invalid discovery metrics type")
|
||||
return nil, errors.New("invalid discovery metrics type")
|
||||
}
|
||||
|
||||
if logger == nil {
|
||||
|
|
|
@ -189,7 +189,7 @@ type Discovery struct {
|
|||
func NewDiscovery(conf *SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) {
|
||||
m, ok := metrics.(*consulMetrics)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("invalid discovery metrics type")
|
||||
return nil, errors.New("invalid discovery metrics type")
|
||||
}
|
||||
|
||||
if logger == nil {
|
||||
|
|
|
@ -15,6 +15,7 @@ package digitalocean
|
|||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"net"
|
||||
|
@ -114,7 +115,7 @@ type Discovery struct {
|
|||
func NewDiscovery(conf *SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) {
|
||||
m, ok := metrics.(*digitaloceanMetrics)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("invalid discovery metrics type")
|
||||
return nil, errors.New("invalid discovery metrics type")
|
||||
}
|
||||
|
||||
d := &Discovery{
|
||||
|
|
|
@ -121,7 +121,7 @@ type Discovery struct {
|
|||
func NewDiscovery(conf SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) {
|
||||
m, ok := metrics.(*dnsMetrics)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("invalid discovery metrics type")
|
||||
return nil, errors.New("invalid discovery metrics type")
|
||||
}
|
||||
|
||||
if logger == nil {
|
||||
|
|
|
@ -15,7 +15,7 @@ package dns
|
|||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"errors"
|
||||
"log/slog"
|
||||
"net"
|
||||
"testing"
|
||||
|
@ -53,7 +53,7 @@ func TestDNS(t *testing.T) {
|
|||
Type: "A",
|
||||
},
|
||||
lookup: func(name string, qtype uint16, logger *slog.Logger) (*dns.Msg, error) {
|
||||
return nil, fmt.Errorf("some error")
|
||||
return nil, errors.New("some error")
|
||||
},
|
||||
expected: []*targetgroup.Group{},
|
||||
},
|
||||
|
|
|
@ -16,7 +16,6 @@ package eureka
|
|||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"net"
|
||||
"net/http"
|
||||
|
@ -129,7 +128,7 @@ type Discovery struct {
|
|||
func NewDiscovery(conf *SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) {
|
||||
m, ok := metrics.(*eurekaMetrics)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("invalid discovery metrics type")
|
||||
return nil, errors.New("invalid discovery metrics type")
|
||||
}
|
||||
|
||||
rt, err := config.NewRoundTripperFromConfig(conf.HTTPClientConfig, "eureka_sd")
|
||||
|
|
|
@ -184,7 +184,7 @@ type Discovery struct {
|
|||
func NewDiscovery(conf *SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) {
|
||||
fm, ok := metrics.(*fileMetrics)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("invalid discovery metrics type")
|
||||
return nil, errors.New("invalid discovery metrics type")
|
||||
}
|
||||
|
||||
if logger == nil {
|
||||
|
|
|
@ -132,7 +132,7 @@ type Discovery struct {
|
|||
func NewDiscovery(conf SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) {
|
||||
m, ok := metrics.(*gceMetrics)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("invalid discovery metrics type")
|
||||
return nil, errors.New("invalid discovery metrics type")
|
||||
}
|
||||
|
||||
d := &Discovery{
|
||||
|
|
|
@ -138,7 +138,7 @@ type Discovery struct {
|
|||
func NewDiscovery(conf *SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*refresh.Discovery, error) {
|
||||
m, ok := metrics.(*hetznerMetrics)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("invalid discovery metrics type")
|
||||
return nil, errors.New("invalid discovery metrics type")
|
||||
}
|
||||
|
||||
r, err := newRefresher(conf, logger)
|
||||
|
|
|
@ -41,8 +41,8 @@ import (
|
|||
var (
|
||||
// DefaultSDConfig is the default HTTP SD configuration.
|
||||
DefaultSDConfig = SDConfig{
|
||||
RefreshInterval: model.Duration(60 * time.Second),
|
||||
HTTPClientConfig: config.DefaultHTTPClientConfig,
|
||||
RefreshInterval: model.Duration(60 * time.Second),
|
||||
}
|
||||
userAgent = fmt.Sprintf("Prometheus/%s", version.Version)
|
||||
matchContentType = regexp.MustCompile(`^(?i:application\/json(;\s*charset=("utf-8"|utf-8))?)$`)
|
||||
|
@ -86,17 +86,17 @@ func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
|||
return err
|
||||
}
|
||||
if c.URL == "" {
|
||||
return fmt.Errorf("URL is missing")
|
||||
return errors.New("URL is missing")
|
||||
}
|
||||
parsedURL, err := url.Parse(c.URL)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if parsedURL.Scheme != "http" && parsedURL.Scheme != "https" {
|
||||
return fmt.Errorf("URL scheme must be 'http' or 'https'")
|
||||
return errors.New("URL scheme must be 'http' or 'https'")
|
||||
}
|
||||
if parsedURL.Host == "" {
|
||||
return fmt.Errorf("host is missing in URL")
|
||||
return errors.New("host is missing in URL")
|
||||
}
|
||||
return c.HTTPClientConfig.Validate()
|
||||
}
|
||||
|
@ -118,7 +118,7 @@ type Discovery struct {
|
|||
func NewDiscovery(conf *SDConfig, logger *slog.Logger, clientOpts []config.HTTPClientOption, metrics discovery.DiscovererMetrics) (*Discovery, error) {
|
||||
m, ok := metrics.(*httpMetrics)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("invalid discovery metrics type")
|
||||
return nil, errors.New("invalid discovery metrics type")
|
||||
}
|
||||
|
||||
if logger == nil {
|
||||
|
|
|
@ -15,7 +15,6 @@ package ionos
|
|||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"time"
|
||||
|
||||
|
@ -46,7 +45,7 @@ type Discovery struct{}
|
|||
func NewDiscovery(conf *SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*refresh.Discovery, error) {
|
||||
m, ok := metrics.(*ionosMetrics)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("invalid discovery metrics type")
|
||||
return nil, errors.New("invalid discovery metrics type")
|
||||
}
|
||||
|
||||
if conf.ionosEndpoint == "" {
|
||||
|
|
|
@ -102,10 +102,7 @@ func NewEndpoints(l *slog.Logger, eps cache.SharedIndexInformer, svc, pod, node
|
|||
return
|
||||
}
|
||||
|
||||
ep := &apiv1.Endpoints{}
|
||||
ep.Namespace = svc.Namespace
|
||||
ep.Name = svc.Name
|
||||
obj, exists, err := e.endpointsStore.Get(ep)
|
||||
obj, exists, err := e.endpointsStore.GetByKey(namespacedName(svc.Namespace, svc.Name))
|
||||
if exists && err == nil {
|
||||
e.enqueue(obj.(*apiv1.Endpoints))
|
||||
}
|
||||
|
@ -167,8 +164,11 @@ func NewEndpoints(l *slog.Logger, eps cache.SharedIndexInformer, svc, pod, node
|
|||
e.enqueueNode(node.Name)
|
||||
},
|
||||
DeleteFunc: func(o interface{}) {
|
||||
node := o.(*apiv1.Node)
|
||||
e.enqueueNode(node.Name)
|
||||
nodeName, err := nodeName(o)
|
||||
if err != nil {
|
||||
l.Error("Error getting Node name", "err", err)
|
||||
}
|
||||
e.enqueueNode(nodeName)
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
|
@ -454,11 +454,8 @@ func (e *Endpoints) resolvePodRef(ref *apiv1.ObjectReference) *apiv1.Pod {
|
|||
if ref == nil || ref.Kind != "Pod" {
|
||||
return nil
|
||||
}
|
||||
p := &apiv1.Pod{}
|
||||
p.Namespace = ref.Namespace
|
||||
p.Name = ref.Name
|
||||
|
||||
obj, exists, err := e.podStore.Get(p)
|
||||
obj, exists, err := e.podStore.GetByKey(namespacedName(ref.Namespace, ref.Name))
|
||||
if err != nil {
|
||||
e.logger.Error("resolving pod ref failed", "err", err)
|
||||
return nil
|
||||
|
@ -470,11 +467,7 @@ func (e *Endpoints) resolvePodRef(ref *apiv1.ObjectReference) *apiv1.Pod {
|
|||
}
|
||||
|
||||
func (e *Endpoints) addServiceLabels(ns, name string, tg *targetgroup.Group) {
|
||||
svc := &apiv1.Service{}
|
||||
svc.Namespace = ns
|
||||
svc.Name = name
|
||||
|
||||
obj, exists, err := e.serviceStore.Get(svc)
|
||||
obj, exists, err := e.serviceStore.GetByKey(namespacedName(ns, name))
|
||||
if err != nil {
|
||||
e.logger.Error("retrieving service failed", "err", err)
|
||||
return
|
||||
|
@ -482,7 +475,7 @@ func (e *Endpoints) addServiceLabels(ns, name string, tg *targetgroup.Group) {
|
|||
if !exists {
|
||||
return
|
||||
}
|
||||
svc = obj.(*apiv1.Service)
|
||||
svc := obj.(*apiv1.Service)
|
||||
|
||||
tg.Labels = tg.Labels.Merge(serviceLabels(svc))
|
||||
}
|
||||
|
|
|
@ -18,10 +18,12 @@ import (
|
|||
"testing"
|
||||
|
||||
"github.com/prometheus/common/model"
|
||||
"github.com/stretchr/testify/require"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
|
||||
"github.com/prometheus/prometheus/discovery/targetgroup"
|
||||
)
|
||||
|
@ -1257,3 +1259,22 @@ func TestEndpointsDiscoverySidecarContainer(t *testing.T) {
|
|||
},
|
||||
}.Run(t)
|
||||
}
|
||||
|
||||
func BenchmarkResolvePodRef(b *testing.B) {
|
||||
indexer := cache.NewIndexer(cache.DeletionHandlingMetaNamespaceKeyFunc, nil)
|
||||
e := &Endpoints{
|
||||
podStore: indexer,
|
||||
}
|
||||
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
|
||||
for i := 0; i < b.N; i++ {
|
||||
p := e.resolvePodRef(&v1.ObjectReference{
|
||||
Kind: "Pod",
|
||||
Name: "testpod",
|
||||
Namespace: "foo",
|
||||
})
|
||||
require.Nil(b, p)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -145,8 +145,11 @@ func NewEndpointSlice(l *slog.Logger, eps cache.SharedIndexInformer, svc, pod, n
|
|||
e.enqueueNode(node.Name)
|
||||
},
|
||||
DeleteFunc: func(o interface{}) {
|
||||
node := o.(*apiv1.Node)
|
||||
e.enqueueNode(node.Name)
|
||||
nodeName, err := nodeName(o)
|
||||
if err != nil {
|
||||
l.Error("Error getting Node name", "err", err)
|
||||
}
|
||||
e.enqueueNode(nodeName)
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
|
@ -464,11 +467,8 @@ func (e *EndpointSlice) resolvePodRef(ref *apiv1.ObjectReference) *apiv1.Pod {
|
|||
if ref == nil || ref.Kind != "Pod" {
|
||||
return nil
|
||||
}
|
||||
p := &apiv1.Pod{}
|
||||
p.Namespace = ref.Namespace
|
||||
p.Name = ref.Name
|
||||
|
||||
obj, exists, err := e.podStore.Get(p)
|
||||
obj, exists, err := e.podStore.GetByKey(namespacedName(ref.Namespace, ref.Name))
|
||||
if err != nil {
|
||||
e.logger.Error("resolving pod ref failed", "err", err)
|
||||
return nil
|
||||
|
@ -481,19 +481,19 @@ func (e *EndpointSlice) resolvePodRef(ref *apiv1.ObjectReference) *apiv1.Pod {
|
|||
|
||||
func (e *EndpointSlice) addServiceLabels(esa endpointSliceAdaptor, tg *targetgroup.Group) {
|
||||
var (
|
||||
svc = &apiv1.Service{}
|
||||
found bool
|
||||
name string
|
||||
)
|
||||
svc.Namespace = esa.namespace()
|
||||
ns := esa.namespace()
|
||||
|
||||
// Every EndpointSlice object has the Service they belong to in the
|
||||
// kubernetes.io/service-name label.
|
||||
svc.Name, found = esa.labels()[esa.labelServiceName()]
|
||||
name, found = esa.labels()[esa.labelServiceName()]
|
||||
if !found {
|
||||
return
|
||||
}
|
||||
|
||||
obj, exists, err := e.serviceStore.Get(svc)
|
||||
obj, exists, err := e.serviceStore.GetByKey(namespacedName(ns, name))
|
||||
if err != nil {
|
||||
e.logger.Error("retrieving service failed", "err", err)
|
||||
return
|
||||
|
@ -501,7 +501,7 @@ func (e *EndpointSlice) addServiceLabels(esa endpointSliceAdaptor, tg *targetgro
|
|||
if !exists {
|
||||
return
|
||||
}
|
||||
svc = obj.(*apiv1.Service)
|
||||
svc := obj.(*apiv1.Service)
|
||||
|
||||
tg.Labels = tg.Labels.Merge(serviceLabels(svc))
|
||||
}
|
||||
|
|
|
@ -173,7 +173,7 @@ func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
|||
return err
|
||||
}
|
||||
if c.Role == "" {
|
||||
return fmt.Errorf("role missing (one of: pod, service, endpoints, endpointslice, node, ingress)")
|
||||
return errors.New("role missing (one of: pod, service, endpoints, endpointslice, node, ingress)")
|
||||
}
|
||||
err = c.HTTPClientConfig.Validate()
|
||||
if err != nil {
|
||||
|
@ -181,20 +181,20 @@ func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
|||
}
|
||||
if c.APIServer.URL != nil && c.KubeConfig != "" {
|
||||
// Api-server and kubeconfig_file are mutually exclusive
|
||||
return fmt.Errorf("cannot use 'kubeconfig_file' and 'api_server' simultaneously")
|
||||
return errors.New("cannot use 'kubeconfig_file' and 'api_server' simultaneously")
|
||||
}
|
||||
if c.KubeConfig != "" && !reflect.DeepEqual(c.HTTPClientConfig, config.DefaultHTTPClientConfig) {
|
||||
// Kubeconfig_file and custom http config are mutually exclusive
|
||||
return fmt.Errorf("cannot use a custom HTTP client configuration together with 'kubeconfig_file'")
|
||||
return errors.New("cannot use a custom HTTP client configuration together with 'kubeconfig_file'")
|
||||
}
|
||||
if c.APIServer.URL == nil && !reflect.DeepEqual(c.HTTPClientConfig, config.DefaultHTTPClientConfig) {
|
||||
return fmt.Errorf("to use custom HTTP client configuration please provide the 'api_server' URL explicitly")
|
||||
return errors.New("to use custom HTTP client configuration please provide the 'api_server' URL explicitly")
|
||||
}
|
||||
if c.APIServer.URL != nil && c.NamespaceDiscovery.IncludeOwnNamespace {
|
||||
return fmt.Errorf("cannot use 'api_server' and 'namespaces.own_namespace' simultaneously")
|
||||
return errors.New("cannot use 'api_server' and 'namespaces.own_namespace' simultaneously")
|
||||
}
|
||||
if c.KubeConfig != "" && c.NamespaceDiscovery.IncludeOwnNamespace {
|
||||
return fmt.Errorf("cannot use 'kubeconfig_file' and 'namespaces.own_namespace' simultaneously")
|
||||
return errors.New("cannot use 'kubeconfig_file' and 'namespaces.own_namespace' simultaneously")
|
||||
}
|
||||
|
||||
foundSelectorRoles := make(map[Role]struct{})
|
||||
|
@ -288,7 +288,7 @@ func (d *Discovery) getNamespaces() []string {
|
|||
func New(l *slog.Logger, metrics discovery.DiscovererMetrics, conf *SDConfig) (*Discovery, error) {
|
||||
m, ok := metrics.(*kubernetesMetrics)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("invalid discovery metrics type")
|
||||
return nil, errors.New("invalid discovery metrics type")
|
||||
}
|
||||
|
||||
if l == nil {
|
||||
|
@ -672,7 +672,7 @@ func (d *Discovery) newPodsByNodeInformer(plw *cache.ListWatch) cache.SharedInde
|
|||
indexers[nodeIndex] = func(obj interface{}) ([]string, error) {
|
||||
pod, ok := obj.(*apiv1.Pod)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("object is not a pod")
|
||||
return nil, errors.New("object is not a pod")
|
||||
}
|
||||
return []string{pod.Spec.NodeName}, nil
|
||||
}
|
||||
|
@ -686,7 +686,7 @@ func (d *Discovery) newEndpointsByNodeInformer(plw *cache.ListWatch) cache.Share
|
|||
indexers[podIndex] = func(obj interface{}) ([]string, error) {
|
||||
e, ok := obj.(*apiv1.Endpoints)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("object is not endpoints")
|
||||
return nil, errors.New("object is not endpoints")
|
||||
}
|
||||
var pods []string
|
||||
for _, target := range e.Subsets {
|
||||
|
@ -705,7 +705,7 @@ func (d *Discovery) newEndpointsByNodeInformer(plw *cache.ListWatch) cache.Share
|
|||
indexers[nodeIndex] = func(obj interface{}) ([]string, error) {
|
||||
e, ok := obj.(*apiv1.Endpoints)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("object is not endpoints")
|
||||
return nil, errors.New("object is not endpoints")
|
||||
}
|
||||
var nodes []string
|
||||
for _, target := range e.Subsets {
|
||||
|
@ -751,7 +751,7 @@ func (d *Discovery) newEndpointSlicesByNodeInformer(plw *cache.ListWatch, object
|
|||
}
|
||||
}
|
||||
default:
|
||||
return nil, fmt.Errorf("object is not an endpointslice")
|
||||
return nil, errors.New("object is not an endpointslice")
|
||||
}
|
||||
|
||||
return nodes, nil
|
||||
|
@ -804,3 +804,13 @@ func addObjectMetaLabels(labelSet model.LabelSet, objectMeta metav1.ObjectMeta,
|
|||
func namespacedName(namespace, name string) string {
|
||||
return namespace + "/" + name
|
||||
}
|
||||
|
||||
// nodeName knows how to handle the cache.DeletedFinalStateUnknown tombstone.
|
||||
// It assumes the MetaNamespaceKeyFunc keyFunc is used, which uses the node name as the tombstone key.
|
||||
func nodeName(o interface{}) (string, error) {
|
||||
key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(o)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return key, nil
|
||||
}
|
||||
|
|
|
@ -23,7 +23,9 @@ import (
|
|||
prom_testutil "github.com/prometheus/client_golang/prometheus/testutil"
|
||||
"github.com/prometheus/common/promslog"
|
||||
"github.com/stretchr/testify/require"
|
||||
apiv1 "k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/version"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
|
@ -320,3 +322,18 @@ func TestFailuresCountMetric(t *testing.T) {
|
|||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestNodeName(t *testing.T) {
|
||||
node := &apiv1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "foo",
|
||||
},
|
||||
}
|
||||
name, err := nodeName(node)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, "foo", name)
|
||||
|
||||
name, err = nodeName(cache.DeletedFinalStateUnknown{Key: "bar"})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, "bar", name)
|
||||
}
|
||||
|
|
|
@ -82,7 +82,7 @@ func NewNode(l *slog.Logger, inf cache.SharedInformer, eventCount *prometheus.Co
|
|||
}
|
||||
|
||||
func (n *Node) enqueue(obj interface{}) {
|
||||
key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(obj)
|
||||
key, err := nodeName(obj)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
|
|
@ -95,8 +95,11 @@ func NewPod(l *slog.Logger, pods cache.SharedIndexInformer, nodes cache.SharedIn
|
|||
p.enqueuePodsForNode(node.Name)
|
||||
},
|
||||
DeleteFunc: func(o interface{}) {
|
||||
node := o.(*apiv1.Node)
|
||||
p.enqueuePodsForNode(node.Name)
|
||||
nodeName, err := nodeName(o)
|
||||
if err != nil {
|
||||
l.Error("Error getting Node name", "err", err)
|
||||
}
|
||||
p.enqueuePodsForNode(nodeName)
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
|
|
|
@ -141,7 +141,7 @@ type Discovery struct {
|
|||
func NewDiscovery(conf *SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) {
|
||||
m, ok := metrics.(*linodeMetrics)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("invalid discovery metrics type")
|
||||
return nil, errors.New("invalid discovery metrics type")
|
||||
}
|
||||
|
||||
d := &Discovery{
|
||||
|
|
|
@ -15,6 +15,7 @@ package discovery
|
|||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"sort"
|
||||
"strconv"
|
||||
|
@ -1209,9 +1210,9 @@ func TestGaugeFailedConfigs(t *testing.T) {
|
|||
|
||||
c := map[string]Configs{
|
||||
"prometheus": {
|
||||
errorConfig{fmt.Errorf("tests error 0")},
|
||||
errorConfig{fmt.Errorf("tests error 1")},
|
||||
errorConfig{fmt.Errorf("tests error 2")},
|
||||
errorConfig{errors.New("tests error 0")},
|
||||
errorConfig{errors.New("tests error 1")},
|
||||
errorConfig{errors.New("tests error 2")},
|
||||
},
|
||||
}
|
||||
discoveryManager.ApplyConfig(c)
|
||||
|
|
|
@ -143,7 +143,7 @@ type Discovery struct {
|
|||
func NewDiscovery(conf SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) {
|
||||
m, ok := metrics.(*marathonMetrics)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("invalid discovery metrics type")
|
||||
return nil, errors.New("invalid discovery metrics type")
|
||||
}
|
||||
|
||||
rt, err := config.NewRoundTripperFromConfig(conf.HTTPClientConfig, "marathon_sd")
|
||||
|
|
|
@ -15,6 +15,7 @@ package moby
|
|||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"net"
|
||||
|
@ -110,7 +111,7 @@ func (c *DockerSDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error
|
|||
return err
|
||||
}
|
||||
if c.Host == "" {
|
||||
return fmt.Errorf("host missing")
|
||||
return errors.New("host missing")
|
||||
}
|
||||
if _, err = url.Parse(c.Host); err != nil {
|
||||
return err
|
||||
|
@ -131,7 +132,7 @@ type DockerDiscovery struct {
|
|||
func NewDockerDiscovery(conf *DockerSDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*DockerDiscovery, error) {
|
||||
m, ok := metrics.(*dockerMetrics)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("invalid discovery metrics type")
|
||||
return nil, errors.New("invalid discovery metrics type")
|
||||
}
|
||||
|
||||
d := &DockerDiscovery{
|
||||
|
|
|
@ -15,6 +15,7 @@ package moby
|
|||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"net/http"
|
||||
|
@ -99,7 +100,7 @@ func (c *DockerSwarmSDConfig) UnmarshalYAML(unmarshal func(interface{}) error) e
|
|||
return err
|
||||
}
|
||||
if c.Host == "" {
|
||||
return fmt.Errorf("host missing")
|
||||
return errors.New("host missing")
|
||||
}
|
||||
if _, err = url.Parse(c.Host); err != nil {
|
||||
return err
|
||||
|
@ -107,7 +108,7 @@ func (c *DockerSwarmSDConfig) UnmarshalYAML(unmarshal func(interface{}) error) e
|
|||
switch c.Role {
|
||||
case "services", "nodes", "tasks":
|
||||
case "":
|
||||
return fmt.Errorf("role missing (one of: tasks, services, nodes)")
|
||||
return errors.New("role missing (one of: tasks, services, nodes)")
|
||||
default:
|
||||
return fmt.Errorf("invalid role %s, expected tasks, services, or nodes", c.Role)
|
||||
}
|
||||
|
@ -128,7 +129,7 @@ type Discovery struct {
|
|||
func NewDiscovery(conf *DockerSwarmSDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) {
|
||||
m, ok := metrics.(*dockerswarmMetrics)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("invalid discovery metrics type")
|
||||
return nil, errors.New("invalid discovery metrics type")
|
||||
}
|
||||
|
||||
d := &Discovery{
|
||||
|
|
|
@ -124,7 +124,7 @@ type Discovery struct {
|
|||
func NewDiscovery(conf *SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) {
|
||||
m, ok := metrics.(*nomadMetrics)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("invalid discovery metrics type")
|
||||
return nil, errors.New("invalid discovery metrics type")
|
||||
}
|
||||
|
||||
d := &Discovery{
|
||||
|
|
|
@ -145,7 +145,7 @@ type refresher interface {
|
|||
func NewDiscovery(conf *SDConfig, l *slog.Logger, metrics discovery.DiscovererMetrics) (*refresh.Discovery, error) {
|
||||
m, ok := metrics.(*openstackMetrics)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("invalid discovery metrics type")
|
||||
return nil, errors.New("invalid discovery metrics type")
|
||||
}
|
||||
|
||||
r, err := newRefresher(conf, l)
|
||||
|
|
|
@ -151,7 +151,7 @@ func newRefresher(conf *SDConfig, logger *slog.Logger) (refresher, error) {
|
|||
func NewDiscovery(conf *SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*refresh.Discovery, error) {
|
||||
m, ok := metrics.(*ovhcloudMetrics)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("invalid discovery metrics type")
|
||||
return nil, errors.New("invalid discovery metrics type")
|
||||
}
|
||||
|
||||
r, err := newRefresher(conf, logger)
|
||||
|
|
|
@ -17,6 +17,7 @@ import (
|
|||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"log/slog"
|
||||
|
@ -109,20 +110,20 @@ func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
|||
return err
|
||||
}
|
||||
if c.URL == "" {
|
||||
return fmt.Errorf("URL is missing")
|
||||
return errors.New("URL is missing")
|
||||
}
|
||||
parsedURL, err := url.Parse(c.URL)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if parsedURL.Scheme != "http" && parsedURL.Scheme != "https" {
|
||||
return fmt.Errorf("URL scheme must be 'http' or 'https'")
|
||||
return errors.New("URL scheme must be 'http' or 'https'")
|
||||
}
|
||||
if parsedURL.Host == "" {
|
||||
return fmt.Errorf("host is missing in URL")
|
||||
return errors.New("host is missing in URL")
|
||||
}
|
||||
if c.Query == "" {
|
||||
return fmt.Errorf("query missing")
|
||||
return errors.New("query missing")
|
||||
}
|
||||
return c.HTTPClientConfig.Validate()
|
||||
}
|
||||
|
@ -142,7 +143,7 @@ type Discovery struct {
|
|||
func NewDiscovery(conf *SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) {
|
||||
m, ok := metrics.(*puppetdbMetrics)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("invalid discovery metrics type")
|
||||
return nil, errors.New("invalid discovery metrics type")
|
||||
}
|
||||
|
||||
if logger == nil {
|
||||
|
|
|
@ -15,7 +15,7 @@ package refresh
|
|||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"errors"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
|
@ -64,7 +64,7 @@ func TestRefresh(t *testing.T) {
|
|||
case 2:
|
||||
return tg2, nil
|
||||
}
|
||||
return nil, fmt.Errorf("some error")
|
||||
return nil, errors.New("some error")
|
||||
}
|
||||
interval := time.Millisecond
|
||||
|
||||
|
|
|
@ -267,7 +267,7 @@ func replaceYAMLTypeError(err error, oldTyp, newTyp reflect.Type) error {
|
|||
func RegisterSDMetrics(registerer prometheus.Registerer, rmm RefreshMetricsManager) (map[string]DiscovererMetrics, error) {
|
||||
err := rmm.Register()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create service discovery refresh metrics")
|
||||
return nil, errors.New("failed to create service discovery refresh metrics")
|
||||
}
|
||||
|
||||
metrics := make(map[string]DiscovererMetrics)
|
||||
|
@ -275,7 +275,7 @@ func RegisterSDMetrics(registerer prometheus.Registerer, rmm RefreshMetricsManag
|
|||
currentSdMetrics := conf.NewDiscovererMetrics(registerer, rmm)
|
||||
err = currentSdMetrics.Register()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create service discovery metrics")
|
||||
return nil, errors.New("failed to create service discovery metrics")
|
||||
}
|
||||
metrics[conf.Name()] = currentSdMetrics
|
||||
}
|
||||
|
|
|
@ -188,7 +188,7 @@ type Discovery struct{}
|
|||
func NewDiscovery(conf *SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*refresh.Discovery, error) {
|
||||
m, ok := metrics.(*scalewayMetrics)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("invalid discovery metrics type")
|
||||
return nil, errors.New("invalid discovery metrics type")
|
||||
}
|
||||
|
||||
r, err := newRefresher(conf)
|
||||
|
|
|
@ -149,7 +149,7 @@ type Discovery struct {
|
|||
func New(logger *slog.Logger, conf *SDConfig, metrics discovery.DiscovererMetrics) (*Discovery, error) {
|
||||
m, ok := metrics.(*tritonMetrics)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("invalid discovery metrics type")
|
||||
return nil, errors.New("invalid discovery metrics type")
|
||||
}
|
||||
|
||||
tls, err := config.NewTLSConfig(&conf.TLSConfig)
|
||||
|
|
|
@ -215,7 +215,7 @@ func getEndpointInfoForSystems(
|
|||
func NewDiscovery(conf *SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) {
|
||||
m, ok := metrics.(*uyuniMetrics)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("invalid discovery metrics type")
|
||||
return nil, errors.New("invalid discovery metrics type")
|
||||
}
|
||||
|
||||
apiURL, err := url.Parse(conf.Server)
|
||||
|
|
|
@ -15,6 +15,7 @@ package vultr
|
|||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"net"
|
||||
|
@ -117,7 +118,7 @@ type Discovery struct {
|
|||
func NewDiscovery(conf *SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) {
|
||||
m, ok := metrics.(*vultrMetrics)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("invalid discovery metrics type")
|
||||
return nil, errors.New("invalid discovery metrics type")
|
||||
}
|
||||
|
||||
d := &Discovery{
|
||||
|
|
|
@ -14,6 +14,7 @@
|
|||
package xds
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"net/url"
|
||||
|
@ -161,7 +162,7 @@ func kumaMadsV1ResourceParser(resources []*anypb.Any, typeURL string) ([]model.L
|
|||
func NewKumaHTTPDiscovery(conf *KumaSDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (discovery.Discoverer, error) {
|
||||
m, ok := metrics.(*xdsMetrics)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("invalid discovery metrics type")
|
||||
return nil, errors.New("invalid discovery metrics type")
|
||||
}
|
||||
|
||||
// Default to "prometheus" if hostname is unavailable.
|
||||
|
|
|
@ -17,6 +17,7 @@ The Prometheus monitoring server
|
|||
| <code class="text-nowrap">--config.file</code> | Prometheus configuration file path. | `prometheus.yml` |
|
||||
| <code class="text-nowrap">--config.auto-reload-interval</code> | Specifies the interval for checking and automatically reloading the Prometheus configuration file upon detecting changes. | `30s` |
|
||||
| <code class="text-nowrap">--web.listen-address</code> <code class="text-nowrap">...<code class="text-nowrap"> | Address to listen on for UI, API, and telemetry. Can be repeated. | `0.0.0.0:9090` |
|
||||
| <code class="text-nowrap">--auto-gomemlimit</code> | Automatically set GOMEMLIMIT to match Linux container or system memory limit | `true` |
|
||||
| <code class="text-nowrap">--auto-gomemlimit.ratio</code> | The ratio of reserved GOMEMLIMIT memory to the detected maximum container or system memory | `0.9` |
|
||||
| <code class="text-nowrap">--web.config.file</code> | [EXPERIMENTAL] Path to configuration file that can enable TLS or authentication. | |
|
||||
| <code class="text-nowrap">--web.read-timeout</code> | Maximum duration before timing out read of the request, and closing idle connections. | `5m` |
|
||||
|
@ -58,7 +59,7 @@ The Prometheus monitoring server
|
|||
| <code class="text-nowrap">--query.timeout</code> | Maximum time a query may take before being aborted. Use with server mode only. | `2m` |
|
||||
| <code class="text-nowrap">--query.max-concurrency</code> | Maximum number of queries executed concurrently. Use with server mode only. | `20` |
|
||||
| <code class="text-nowrap">--query.max-samples</code> | Maximum number of samples a single query can load into memory. Note that queries will fail if they try to load more samples than this into memory, so this also limits the number of samples a query can return. Use with server mode only. | `50000000` |
|
||||
| <code class="text-nowrap">--enable-feature</code> <code class="text-nowrap">...<code class="text-nowrap"> | Comma separated feature names to enable. Valid options: auto-gomemlimit, exemplar-storage, expand-external-labels, memory-snapshot-on-shutdown, promql-per-step-stats, promql-experimental-functions, extra-scrape-metrics, auto-gomaxprocs, native-histograms, created-timestamp-zero-ingestion, concurrent-rule-eval, delayed-compaction, old-ui. See https://prometheus.io/docs/prometheus/latest/feature_flags/ for more details. | |
|
||||
| <code class="text-nowrap">--enable-feature</code> <code class="text-nowrap">...<code class="text-nowrap"> | Comma separated feature names to enable. Valid options: exemplar-storage, expand-external-labels, memory-snapshot-on-shutdown, promql-per-step-stats, promql-experimental-functions, extra-scrape-metrics, auto-gomaxprocs, native-histograms, created-timestamp-zero-ingestion, concurrent-rule-eval, delayed-compaction, old-ui. See https://prometheus.io/docs/prometheus/latest/feature_flags/ for more details. | |
|
||||
| <code class="text-nowrap">--agent</code> | Run Prometheus in 'Agent mode'. | |
|
||||
| <code class="text-nowrap">--log.level</code> | Only log messages with the given severity or above. One of: [debug, info, warn, error] | `info` |
|
||||
| <code class="text-nowrap">--log.format</code> | Output format of log messages. One of: [logfmt, json] | `logfmt` |
|
||||
|
|
|
@ -462,6 +462,7 @@ Unit tests for rules.
|
|||
| Flag | Description | Default |
|
||||
| --- | --- | --- |
|
||||
| <code class="text-nowrap">--run</code> <code class="text-nowrap">...<code class="text-nowrap"> | If set, will only run test groups whose names match the regular expression. Can be specified multiple times. | |
|
||||
| <code class="text-nowrap">--debug</code> | Enable unit test debugging. | `false` |
|
||||
| <code class="text-nowrap">--diff</code> | [Experimental] Print colored differential output between expected & received output. | `false` |
|
||||
|
||||
|
||||
|
|
|
@ -21,10 +21,13 @@ An example rules file with an alert would be:
|
|||
```yaml
|
||||
groups:
|
||||
- name: example
|
||||
labels:
|
||||
team: myteam
|
||||
rules:
|
||||
- alert: HighRequestLatency
|
||||
expr: job:request_latency_seconds:mean5m{job="myjob"} > 0.5
|
||||
for: 10m
|
||||
keep_firing_for: 5m
|
||||
labels:
|
||||
severity: page
|
||||
annotations:
|
||||
|
@ -38,6 +41,13 @@ the alert continues to be active during each evaluation for 10 minutes before
|
|||
firing the alert. Elements that are active, but not firing yet, are in the pending state.
|
||||
Alerting rules without the `for` clause will become active on the first evaluation.
|
||||
|
||||
There is also an optional `keep_firing_for` clause that tells Prometheus to keep
|
||||
this alert firing for the specified duration after the firing condition was last met.
|
||||
This can be used to prevent situations such as flapping alerts, false resolutions
|
||||
due to lack of data loss, etc. Alerting rules without the `keep_firing_for` clause
|
||||
will deactivate on the first evaluation where the condition is not met (assuming
|
||||
any optional `for` duration desribed above has been satisfied).
|
||||
|
||||
The `labels` clause allows specifying a set of additional labels to be attached
|
||||
to the alert. Any existing conflicting labels will be overwritten. The label
|
||||
values can be templated.
|
||||
|
|
|
@ -80,6 +80,10 @@ global:
|
|||
|
||||
# The labels to add to any time series or alerts when communicating with
|
||||
# external systems (federation, remote storage, Alertmanager).
|
||||
# Environment variable references `${var}` or `$var` are replaced according
|
||||
# to the values of the current environment variables.
|
||||
# References to undefined variables are replaced by the empty string.
|
||||
# The `$` character can be escaped by using `$$`.
|
||||
external_labels:
|
||||
[ <labelname>: <labelvalue> ... ]
|
||||
|
||||
|
@ -208,12 +212,18 @@ job_name: <job_name>
|
|||
|
||||
# The protocols to negotiate during a scrape with the client.
|
||||
# Supported values (case sensitive): PrometheusProto, OpenMetricsText0.0.1,
|
||||
# OpenMetricsText1.0.0, PrometheusText0.0.4.
|
||||
# OpenMetricsText1.0.0, PrometheusText0.0.4, PrometheusText1.0.0.
|
||||
[ scrape_protocols: [<string>, ...] | default = <global_config.scrape_protocols> ]
|
||||
|
||||
# Whether to scrape a classic histogram that is also exposed as a native
|
||||
# Fallback protocol to use if a scrape returns blank, unparseable, or otherwise
|
||||
# invalid Content-Type.
|
||||
# Supported values (case sensitive): PrometheusProto, OpenMetricsText0.0.1,
|
||||
# OpenMetricsText1.0.0, PrometheusText0.0.4, PrometheusText1.0.0.
|
||||
[ fallback_scrape_protocol: <string> ]
|
||||
|
||||
# Whether to scrape a classic histogram, even if it is also exposed as a native
|
||||
# histogram (has no effect without --enable-feature=native-histograms).
|
||||
[ scrape_classic_histograms: <boolean> | default = false ]
|
||||
[ always_scrape_classic_histograms: <boolean> | default = false ]
|
||||
|
||||
# The HTTP resource path on which to fetch metrics from targets.
|
||||
[ metrics_path: <path> | default = /metrics ]
|
||||
|
@ -2879,6 +2889,7 @@ metadata_config:
|
|||
|
||||
# HTTP client settings, including authentication methods (such as basic auth and
|
||||
# authorization), proxy configurations, TLS options, custom HTTP headers, etc.
|
||||
# enable_http2 defaults to false for remote-write.
|
||||
[ <http_config> ]
|
||||
```
|
||||
|
||||
|
@ -2930,8 +2941,6 @@ with this feature.
|
|||
|
||||
`tsdb` lets you configure the runtime-reloadable configuration settings of the TSDB.
|
||||
|
||||
NOTE: Out-of-order ingestion is an experimental feature, but you do not need any additional flag to enable it. Setting `out_of_order_time_window` to a positive duration enables it.
|
||||
|
||||
```yaml
|
||||
# Configures how old an out-of-order/out-of-bounds sample can be w.r.t. the TSDB max time.
|
||||
# An out-of-order/out-of-bounds sample is ingested into the TSDB as long as the timestamp
|
||||
|
|
|
@ -89,6 +89,11 @@ name: <string>
|
|||
# Offset the rule evaluation timestamp of this particular group by the specified duration into the past.
|
||||
[ query_offset: <duration> | default = global.rule_query_offset ]
|
||||
|
||||
# Labels to add or overwrite before storing the result for its rules.
|
||||
# Labels defined in <rule> will override the key if it has a collision.
|
||||
labels:
|
||||
[ <labelname>: <labelvalue> ]
|
||||
|
||||
rules:
|
||||
[ - <rule> ... ]
|
||||
```
|
||||
|
|
|
@ -11,15 +11,6 @@ Their behaviour can change in future releases which will be communicated via the
|
|||
You can enable them using the `--enable-feature` flag with a comma separated list of features.
|
||||
They may be enabled by default in future versions.
|
||||
|
||||
## Expand environment variables in external labels
|
||||
|
||||
`--enable-feature=expand-external-labels`
|
||||
|
||||
Replace `${var}` or `$var` in the [`external_labels`](configuration/configuration.md#configuration-file)
|
||||
values according to the values of the current environment variables. References
|
||||
to undefined variables are replaced by the empty string.
|
||||
The `$` character can be escaped by using `$$`.
|
||||
|
||||
## Exemplars storage
|
||||
|
||||
`--enable-feature=exemplar-storage`
|
||||
|
@ -32,9 +23,8 @@ Exemplar storage is implemented as a fixed size circular buffer that stores exem
|
|||
|
||||
`--enable-feature=memory-snapshot-on-shutdown`
|
||||
|
||||
This takes the snapshot of the chunks that are in memory along with the series information when shutting down and stores
|
||||
it on disk. This will reduce the startup time since the memory state can be restored with this snapshot and m-mapped
|
||||
chunks without the need of WAL replay.
|
||||
This takes a snapshot of the chunks that are in memory along with the series information when shutting down and stores it on disk. This will reduce the startup time since the memory state can now be restored with this snapshot
|
||||
and m-mapped chunks, while a WAL replay from disk is only needed for the parts of the WAL that are not part of the snapshot.
|
||||
|
||||
## Extra scrape metrics
|
||||
|
||||
|
@ -63,14 +53,6 @@ computed at all.
|
|||
|
||||
When enabled, GOMAXPROCS variable is automatically set to match Linux container CPU quota.
|
||||
|
||||
## Auto GOMEMLIMIT
|
||||
|
||||
`--enable-feature=auto-gomemlimit`
|
||||
|
||||
When enabled, the GOMEMLIMIT variable is automatically set to match the Linux container memory limit. If there is no container limit, or the process is running outside of containers, the system memory total is used.
|
||||
|
||||
There is also an additional tuning flag, `--auto-gomemlimit.ratio`, which allows controlling how much of the memory is used for Prometheus. The remainder is reserved for memory outside the process. For example, kernel page cache. Page cache is important for Prometheus TSDB query performance. The default is `0.9`, which means 90% of the memory limit will be used for Prometheus.
|
||||
|
||||
## Native Histograms
|
||||
|
||||
`--enable-feature=native-histograms`
|
||||
|
@ -93,59 +75,7 @@ those classic histograms that do not come with a corresponding native
|
|||
histogram. However, if a native histogram is present, Prometheus will ignore
|
||||
the corresponding classic histogram, with the notable exception of exemplars,
|
||||
which are always ingested. To keep the classic histograms as well, enable
|
||||
`scrape_classic_histograms` in the scrape job.
|
||||
|
||||
_Note about the format of `le` and `quantile` label values:_
|
||||
|
||||
In certain situations, the protobuf parsing changes the number formatting of
|
||||
the `le` labels of classic histograms and the `quantile` labels of
|
||||
summaries. Typically, this happens if the scraped target is instrumented with
|
||||
[client_golang](https://github.com/prometheus/client_golang) provided that
|
||||
[promhttp.HandlerOpts.EnableOpenMetrics](https://pkg.go.dev/github.com/prometheus/client_golang/prometheus/promhttp#HandlerOpts)
|
||||
is set to `false`. In such a case, integer label values are represented in the
|
||||
text format as such, e.g. `quantile="1"` or `le="2"`. However, the protobuf parsing
|
||||
changes the representation to float-like (following the OpenMetrics
|
||||
specification), so the examples above become `quantile="1.0"` and `le="2.0"` after
|
||||
ingestion into Prometheus, which changes the identity of the metric compared to
|
||||
what was ingested before via the text format.
|
||||
|
||||
The effect of this change is that alerts, recording rules and dashboards that
|
||||
directly reference label values as whole numbers such as `le="1"` will stop
|
||||
working.
|
||||
|
||||
Aggregation by the `le` and `quantile` labels for vectors that contain the old and
|
||||
new formatting will lead to unexpected results, and range vectors that span the
|
||||
transition between the different formatting will contain additional series.
|
||||
The most common use case for both is the quantile calculation via
|
||||
`histogram_quantile`, e.g.
|
||||
`histogram_quantile(0.95, sum by (le) (rate(histogram_bucket[10m])))`.
|
||||
The `histogram_quantile` function already tries to mitigate the effects to some
|
||||
extent, but there will be inaccuracies, in particular for shorter ranges that
|
||||
cover only a few samples.
|
||||
|
||||
Ways to deal with this change either globally or on a per metric basis:
|
||||
|
||||
- Fix references to integer `le`, `quantile` label values, but otherwise do
|
||||
nothing and accept that some queries that span the transition time will produce
|
||||
inaccurate or unexpected results.
|
||||
_This is the recommended solution, to get consistently normalized label values._
|
||||
Also Prometheus 3.0 is expected to enforce normalization of these label values.
|
||||
- Use `metric_relabel_config` to retain the old labels when scraping targets.
|
||||
This should **only** be applied to metrics that currently produce such labels.
|
||||
|
||||
<!-- The following config snippet is unit tested in scrape/scrape_test.go. -->
|
||||
```yaml
|
||||
metric_relabel_configs:
|
||||
- source_labels:
|
||||
- quantile
|
||||
target_label: quantile
|
||||
regex: (\d+)\.0+
|
||||
- source_labels:
|
||||
- le
|
||||
- __name__
|
||||
target_label: le
|
||||
regex: (\d+)\.0+;.*_bucket
|
||||
```
|
||||
`always_scrape_classic_histograms` in the scrape job.
|
||||
|
||||
## Experimental PromQL functions
|
||||
|
||||
|
|
|
@ -3,198 +3,198 @@ title: Migration
|
|||
sort_rank: 10
|
||||
---
|
||||
|
||||
# Prometheus 2.0 migration guide
|
||||
# Prometheus 3.0 migration guide
|
||||
|
||||
In line with our [stability promise](https://prometheus.io/blog/2016/07/18/prometheus-1-0-released/#fine-print),
|
||||
the Prometheus 2.0 release contains a number of backwards incompatible changes.
|
||||
This document offers guidance on migrating from Prometheus 1.8 to Prometheus 2.0 and newer versions.
|
||||
In line with our [stability promise](https://prometheus.io/docs/prometheus/latest/stability/),
|
||||
the Prometheus 3.0 release contains a number of backwards incompatible changes.
|
||||
This document offers guidance on migrating from Prometheus 2.x to Prometheus 3.0 and newer versions.
|
||||
|
||||
## Flags
|
||||
|
||||
The format of Prometheus command line flags has changed. Instead of a
|
||||
single dash, all flags now use a double dash. Common flags (`--config.file`,
|
||||
`--web.listen-address` and `--web.external-url`) remain but
|
||||
almost all storage-related flags have been removed.
|
||||
- The following feature flags have been removed and they have been added to the
|
||||
default behavior of Prometheus v3:
|
||||
- `promql-at-modifier`
|
||||
- `promql-negative-offset`
|
||||
- `remote-write-receiver`
|
||||
- `new-service-discovery-manager`
|
||||
- `expand-external-labels`
|
||||
Environment variable references `${var}` or `$var` in external label values
|
||||
are replaced according to the values of the current environment variables.
|
||||
References to undefined variables are replaced by the empty string.
|
||||
The `$` character can be escaped by using `$$`.
|
||||
- `no-default-scrape-port`
|
||||
Prometheus v3 will no longer add ports to scrape targets according to the
|
||||
specified scheme. Target will now appear in labels as configured.
|
||||
If you rely on scrape targets like
|
||||
`https://example.com/metrics` or `http://exmaple.com/metrics` to be
|
||||
represented as `https://example.com/metrics:443` and
|
||||
`http://example.com/metrics:80` respectively, add them to your target URLs
|
||||
- `agent`
|
||||
Instead use the dedicated `--agent` cli flag.
|
||||
|
||||
Some notable flags which have been removed:
|
||||
Prometheus v3 will log a warning if you continue to pass these to
|
||||
`--enable-feature`.
|
||||
|
||||
- `-alertmanager.url` In Prometheus 2.0, the command line flags for configuring
|
||||
a static Alertmanager URL have been removed. Alertmanager must now be
|
||||
discovered via service discovery, see [Alertmanager service discovery](#alertmanager-service-discovery).
|
||||
## Configuration
|
||||
|
||||
- `-log.format` In Prometheus 2.0 logs can only be streamed to standard error.
|
||||
|
||||
- `-query.staleness-delta` has been renamed to `--query.lookback-delta`; Prometheus
|
||||
2.0 introduces a new mechanism for handling staleness, see [staleness](querying/basics.md#staleness).
|
||||
|
||||
- `-storage.local.*` Prometheus 2.0 introduces a new storage engine; as such all
|
||||
flags relating to the old engine have been removed. For information on the
|
||||
new engine, see [Storage](#storage).
|
||||
|
||||
- `-storage.remote.*` Prometheus 2.0 has removed the deprecated remote
|
||||
storage flags, and will fail to start if they are supplied. To write to
|
||||
InfluxDB, Graphite, or OpenTSDB use the relevant storage adapter.
|
||||
|
||||
## Alertmanager service discovery
|
||||
|
||||
Alertmanager service discovery was introduced in Prometheus 1.4, allowing Prometheus
|
||||
to dynamically discover Alertmanager replicas using the same mechanism as scrape
|
||||
targets. In Prometheus 2.0, the command line flags for static Alertmanager config
|
||||
have been removed, so the following command line flag:
|
||||
|
||||
```
|
||||
./prometheus -alertmanager.url=http://alertmanager:9093/
|
||||
```
|
||||
|
||||
Would be replaced with the following in the `prometheus.yml` config file:
|
||||
|
||||
```yaml
|
||||
alerting:
|
||||
alertmanagers:
|
||||
- static_configs:
|
||||
- targets:
|
||||
- alertmanager:9093
|
||||
```
|
||||
|
||||
You can also use all the usual Prometheus service discovery integrations and
|
||||
relabeling in your Alertmanager configuration. This snippet instructs
|
||||
Prometheus to search for Kubernetes pods, in the `default` namespace, with the
|
||||
label `name: alertmanager` and with a non-empty port.
|
||||
|
||||
```yaml
|
||||
alerting:
|
||||
alertmanagers:
|
||||
- kubernetes_sd_configs:
|
||||
- role: pod
|
||||
tls_config:
|
||||
ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
|
||||
bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
|
||||
relabel_configs:
|
||||
- source_labels: [__meta_kubernetes_pod_label_name]
|
||||
regex: alertmanager
|
||||
action: keep
|
||||
- source_labels: [__meta_kubernetes_namespace]
|
||||
regex: default
|
||||
action: keep
|
||||
- source_labels: [__meta_kubernetes_pod_container_port_number]
|
||||
regex:
|
||||
action: drop
|
||||
```
|
||||
|
||||
## Recording rules and alerts
|
||||
|
||||
The format for configuring alerting and recording rules has been changed to YAML.
|
||||
An example of a recording rule and alert in the old format:
|
||||
|
||||
```
|
||||
job:request_duration_seconds:histogram_quantile99 =
|
||||
histogram_quantile(0.99, sum by (le, job) (rate(request_duration_seconds_bucket[1m])))
|
||||
|
||||
ALERT FrontendRequestLatency
|
||||
IF job:request_duration_seconds:histogram_quantile99{job="frontend"} > 0.1
|
||||
FOR 5m
|
||||
ANNOTATIONS {
|
||||
summary = "High frontend request latency",
|
||||
}
|
||||
```
|
||||
|
||||
Would look like this:
|
||||
|
||||
```yaml
|
||||
groups:
|
||||
- name: example.rules
|
||||
rules:
|
||||
- record: job:request_duration_seconds:histogram_quantile99
|
||||
expr: histogram_quantile(0.99, sum by (le, job) (rate(request_duration_seconds_bucket[1m])))
|
||||
- alert: FrontendRequestLatency
|
||||
expr: job:request_duration_seconds:histogram_quantile99{job="frontend"} > 0.1
|
||||
for: 5m
|
||||
annotations:
|
||||
summary: High frontend request latency
|
||||
```
|
||||
|
||||
To help with the change, the `promtool` tool has a mode to automate the rules conversion. Given a `.rules` file, it will output a `.rules.yml` file in the
|
||||
new format. For example:
|
||||
|
||||
```
|
||||
$ promtool update rules example.rules
|
||||
```
|
||||
|
||||
You will need to use `promtool` from [Prometheus 2.5](https://github.com/prometheus/prometheus/releases/tag/v2.5.0) as later versions no longer contain the above subcommand.
|
||||
|
||||
## Storage
|
||||
|
||||
The data format in Prometheus 2.0 has completely changed and is not backwards
|
||||
compatible with 1.8 and older versions. To retain access to your historic monitoring data we
|
||||
recommend you run a non-scraping Prometheus instance running at least version
|
||||
1.8.1 in parallel with your Prometheus 2.0 instance, and have the new server
|
||||
read existing data from the old one via the remote read protocol.
|
||||
|
||||
Your Prometheus 1.8 instance should be started with the following flags and an
|
||||
config file containing only the `external_labels` setting (if any):
|
||||
|
||||
```
|
||||
$ ./prometheus-1.8.1.linux-amd64/prometheus -web.listen-address ":9094" -config.file old.yml
|
||||
```
|
||||
|
||||
Prometheus 2.0 can then be started (on the same machine) with the following flags:
|
||||
|
||||
```
|
||||
$ ./prometheus-2.0.0.linux-amd64/prometheus --config.file prometheus.yml
|
||||
```
|
||||
|
||||
Where `prometheus.yml` contains in addition to your full existing configuration, the stanza:
|
||||
|
||||
```yaml
|
||||
remote_read:
|
||||
- url: "http://localhost:9094/api/v1/read"
|
||||
```
|
||||
- The scrape job level configuration option `scrape_classic_histograms` has been
|
||||
renamed to `always_scrape_classic_histograms`. If you use the
|
||||
`--enable-feature=native-histograms` feature flag to ingest native histograms
|
||||
and you also want to ingest classic histograms that an endpoint might expose
|
||||
along with native histograms, be sure to add this configuration or change your
|
||||
configuration from the old name.
|
||||
- The `http_config.enable_http2` in `remote_write` items default has been
|
||||
changed to `false`. In Prometheus v2 the remote write http client would
|
||||
default to use http2. In order to parallelize multiple remote write queues
|
||||
across multiple sockets its preferable to not default to http2.
|
||||
If you prefer to use http2 for remote write you must now set
|
||||
`http_config.enable_http2: true` in your `remote_write` configuration section.
|
||||
|
||||
## PromQL
|
||||
|
||||
The following features have been removed from PromQL:
|
||||
- The `.` pattern in regular expressions in PromQL matches newline characters.
|
||||
With this change a regular expressions like `.*` matches strings that include
|
||||
`\n`. This applies to matchers in queries and relabel configs. For example the
|
||||
following regular expressions now match the accompanying strings, wheras in
|
||||
Prometheus v2 these combinations didn't match.
|
||||
|
||||
- `drop_common_labels` function - the `without` aggregation modifier should be used
|
||||
instead.
|
||||
- `keep_common` aggregation modifier - the `by` modifier should be used instead.
|
||||
- `count_scalar` function - use cases are better handled by `absent()` or correct
|
||||
propagation of labels in operations.
|
||||
| Regex | Additional matches |
|
||||
| ----- | ------ |
|
||||
| ".*" | "foo\n", "Foo\nBar" |
|
||||
| "foo.?bar" | "foo\nbar" |
|
||||
| "foo.+bar" | "foo\nbar" |
|
||||
|
||||
See [issue #3060](https://github.com/prometheus/prometheus/issues/3060) for more
|
||||
details.
|
||||
If you want Prometheus v3 to behave like v2 did, you will have to change your
|
||||
regular expressions by replacing all `.` patterns with `[^\n]`, e.g.
|
||||
`foo[^\n]*`.
|
||||
- Lookback and range selectors are left open and right closed (previously left
|
||||
closed and right closed). This change affects queries when the evaluation time
|
||||
perfectly aligns with the sample timestamps. For example assume querying a
|
||||
timeseries with even spaced samples exactly 1 minute apart. Before Prometheus
|
||||
3.x, range query with `5m` will mostly return 5 samples. But if the query
|
||||
evaluation aligns perfectly with a scrape, it would return 6 samples. In
|
||||
Prometheus 3.x queries like this will always return 5 samples.
|
||||
This change has likely few effects for everyday use, except for some sub query
|
||||
use cases.
|
||||
Query front-ends that align queries usually align sub-queries to multiples of
|
||||
the step size. These sub queries will likely be affected.
|
||||
Tests are more likely to affected. To fix those either adjust the expected
|
||||
number of samples or extend to range by less then one sample interval.
|
||||
- The `holt_winters` function has been renamed to `double_exponential_smoothing`
|
||||
and is now guarded by the `promql-experimental-functions` feature flag.
|
||||
If you want to keep using holt_winters, you have to do both of these things:
|
||||
- Rename holt_winters to double_exponential_smoothing in your queries.
|
||||
- Pass `--enable-feature=promql-experimental-functions` in your Prometheus
|
||||
cli invocation..
|
||||
|
||||
## Scrape protocols
|
||||
Prometheus v3 is more strict concerning the Content-Type header received when
|
||||
scraping. Prometheus v2 would default to the standard Prometheus text protocol
|
||||
if the target being scraped did not specify a Content-Type header or if the
|
||||
header was unparsable or unrecognised. This could lead to incorrect data being
|
||||
parsed in the scrape. Prometheus v3 will now fail the scrape in such cases.
|
||||
|
||||
If a scrape target is not providing the correct Content-Type header the
|
||||
fallback protocol can be specified using the fallback_scrape_protocol
|
||||
parameter. See [Prometheus scrape_config documentation.](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#scrape_config)
|
||||
|
||||
This is a breaking change as scrapes that may have succeeded with Prometheus v2
|
||||
may now fail if this fallback protocol is not specified.
|
||||
|
||||
## Miscellaneous
|
||||
|
||||
### Prometheus non-root user
|
||||
### TSDB format and downgrade
|
||||
The TSDB format has been changed in Prometheus v2.55 in preparation for changes
|
||||
to the index format. Consequently a Prometheus v3 tsdb can only be read by a
|
||||
Prometheus v2.55 or newer.
|
||||
Before upgrading to Prometheus v3 please upgrade to v2.55 first and confirm
|
||||
Prometheus works as expected. Only then continue with the upgrade to v3.
|
||||
|
||||
The Prometheus Docker image is now built to [run Prometheus
|
||||
as a non-root user](https://github.com/prometheus/prometheus/pull/2859). If you
|
||||
want the Prometheus UI/API to listen on a low port number (say, port 80), you'll
|
||||
need to override it. For Kubernetes, you would use the following YAML:
|
||||
### TSDB Storage contract
|
||||
TSDB compatible storage is now expected to return results matching the specified
|
||||
selectors. This might impact some third party implementations, most likely
|
||||
implementing `remote_read`.
|
||||
This contract is not explicitly enforced, but can cause undefined behavior.
|
||||
|
||||
### UTF-8 names
|
||||
Prometheus v3 supports UTF-8 in metric and label names. This means metric and
|
||||
label names can change after upgrading according to what is exposed by
|
||||
endpoints. Furthermore, metric and label names that would have previously been
|
||||
flagged as invalid no longer will be.
|
||||
|
||||
Users wishing to preserve the original validation behavior can update their
|
||||
prometheus yaml configuration to specify the legacy validation scheme:
|
||||
|
||||
```
|
||||
global:
|
||||
metric_name_validation_scheme: legacy
|
||||
```
|
||||
|
||||
Or on a per-scrape basis:
|
||||
|
||||
```
|
||||
scrape_configs:
|
||||
- job_name: job1
|
||||
metric_name_validation_scheme: utf8
|
||||
- job_name: job2
|
||||
metric_name_validation_scheme: legacy
|
||||
```
|
||||
|
||||
### Log message format
|
||||
Prometheus v3 has adopted `log/slog` over the previous `go-kit/log`. This
|
||||
results in a change of log message format. An example of the old log format is:
|
||||
```
|
||||
ts=2024-10-23T22:01:06.074Z caller=main.go:627 level=info msg="No time or size retention was set so using the default time retention" duration=15d
|
||||
ts=2024-10-23T22:01:06.074Z caller=main.go:671 level=info msg="Starting Prometheus Server" mode=server version="(version=, branch=, revision=91d80252c3e528728b0f88d254dd720f6be07cb8-modified)"
|
||||
ts=2024-10-23T22:01:06.074Z caller=main.go:676 level=info build_context="(go=go1.23.0, platform=linux/amd64, user=, date=, tags=unknown)"
|
||||
ts=2024-10-23T22:01:06.074Z caller=main.go:677 level=info host_details="(Linux 5.15.0-124-generic #134-Ubuntu SMP Fri Sep 27 20:20:17 UTC 2024 x86_64 gigafips (none))"
|
||||
```
|
||||
|
||||
a similar sequence in the new log format looks like this:
|
||||
```
|
||||
time=2024-10-24T00:03:07.542+02:00 level=INFO source=/home/user/go/src/github.com/prometheus/prometheus/cmd/prometheus/main.go:640 msg="No time or size retention was set so using the default time retention" duration=15d
|
||||
time=2024-10-24T00:03:07.542+02:00 level=INFO source=/home/user/go/src/github.com/prometheus/prometheus/cmd/prometheus/main.go:681 msg="Starting Prometheus Server" mode=server version="(version=, branch=, revision=7c7116fea8343795cae6da42960cacd0207a2af8)"
|
||||
time=2024-10-24T00:03:07.542+02:00 level=INFO source=/home/user/go/src/github.com/prometheus/prometheus/cmd/prometheus/main.go:686 msg="operational information" build_context="(go=go1.23.0, platform=linux/amd64, user=, date=, tags=unknown)" host_details="(Linux 5.15.0-124-generic #134-Ubuntu SMP Fri Sep 27 20:20:17 UTC 2024 x86_64 gigafips (none))" fd_limits="(soft=1048576, hard=1048576)" vm_limits="(soft=unlimited, hard=unlimited)"
|
||||
```
|
||||
|
||||
### `le` and `quantile` label values
|
||||
In Prometheus v3, the values of the `le` label of classic histograms and the
|
||||
`quantile` label of summaries are normalized upon ingestions. In Prometheus v2
|
||||
the value of these labels depended on the scrape protocol (protobuf vs text
|
||||
format) in some situations. This led to label values changing based on the
|
||||
scrape protocol. E.g. a metric exposed as `my_classic_hist{le="1"}` would be
|
||||
ingested as `my_classic_hist{le="1"}` via the text format, but as
|
||||
`my_classic_hist{le="1.0"}` via protobuf. This changed the identity of the
|
||||
metric and caused problems when querying the metric.
|
||||
In Prometheus v3 these label values will always be normalized to a float like
|
||||
representation. I.e. the above example will always result in
|
||||
`my_classic_hist{le="1.0"}` being ingested into prometheus, no matter via which
|
||||
protocol. The effect of this change is that alerts, recording rules and
|
||||
dashboards that directly reference label values as whole numbers such as
|
||||
`le="1"` will stop working.
|
||||
|
||||
Ways to deal with this change either globally or on a per metric basis:
|
||||
|
||||
- Fix references to integer `le`, `quantile` label values, but otherwise do
|
||||
nothing and accept that some queries that span the transition time will produce
|
||||
inaccurate or unexpected results.
|
||||
_This is the recommended solution._
|
||||
- Use `metric_relabel_config` to retain the old labels when scraping targets.
|
||||
This should **only** be applied to metrics that currently produce such labels.
|
||||
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: security-context-demo-2
|
||||
spec:
|
||||
securityContext:
|
||||
runAsUser: 0
|
||||
...
|
||||
metric_relabel_configs:
|
||||
- source_labels:
|
||||
- quantile
|
||||
target_label: quantile
|
||||
regex: (\d+)\.0+
|
||||
- source_labels:
|
||||
- le
|
||||
- __name__
|
||||
target_label: le
|
||||
regex: (\d+)\.0+;.*_bucket
|
||||
```
|
||||
|
||||
See [Configure a Security Context for a Pod or Container](https://kubernetes.io/docs/tasks/configure-pod-container/security-context/)
|
||||
for more details.
|
||||
# Prometheus 2.0 migration guide
|
||||
|
||||
If you're using Docker, then the following snippet would be used:
|
||||
|
||||
```
|
||||
docker run -p 9090:9090 prom/prometheus:latest
|
||||
```
|
||||
|
||||
### Prometheus lifecycle
|
||||
|
||||
If you use the Prometheus `/-/reload` HTTP endpoint to [automatically reload your
|
||||
Prometheus config when it changes](configuration/configuration.md),
|
||||
these endpoints are disabled by default for security reasons in Prometheus 2.0.
|
||||
To enable them, set the `--web.enable-lifecycle` flag.
|
||||
For the Prometheus 1.8 to 2.0 please refer to the [Prometheus v2.55 documentation](https://prometheus.io/docs/prometheus/2.55/migration/).
|
||||
|
|
|
@ -568,7 +568,7 @@ Instant vectors are returned as result type `vector`. The corresponding
|
|||
Each series could have the `"value"` key, or the `"histogram"` key, but not both.
|
||||
|
||||
Series are not guaranteed to be returned in any particular order unless a function
|
||||
such as [`sort`](functions.md#sort) or [`sort_by_label`](functions.md#sort_by_label)`
|
||||
such as [`sort`](functions.md#sort) or [`sort_by_label`](functions.md#sort_by_label)
|
||||
is used.
|
||||
|
||||
### Scalars
|
||||
|
@ -764,6 +764,8 @@ URL query parameters:
|
|||
- `file[]=<string>`: only return rules with the given filepath. If the parameter is repeated, rules with any of the provided filepaths are returned. When the parameter is absent or empty, no filtering is done.
|
||||
- `exclude_alerts=<bool>`: only return rules, do not return active alerts.
|
||||
- `match[]=<label_selector>`: only return rules that have configured labels that satisfy the label selectors. If the parameter is repeated, rules that match any of the sets of label selectors are returned. Note that matching is on the labels in the definition of each rule, not on the values after template expansion (for alerting rules). Optional.
|
||||
- `group_limit=<number>`: The `group_limit` parameter allows you to specify a limit for the number of rule groups that is returned in a single response. If the total number of rule groups exceeds the specified `group_limit` value, the response will include a `groupNextToken` property. You can use the value of this `groupNextToken` property in subsequent requests in the `group_next_token` parameter to paginate over the remaining rule groups. The `groupNextToken` property will not be present in the final response, indicating that you have retrieved all the available rule groups. Please note that there are no guarantees regarding the consistency of the response if the rule groups are being modified during the pagination process.
|
||||
- `group_next_token`: the pagination token that was returned in previous request when the `group_limit` property is set. The pagination token is used to iteratively paginate over a large number of rule groups. To use the `group_next_token` parameter, the `group_limit` parameter also need to be present. If a rule group that coincides with the next token is removed while you are paginating over the rule groups, a response with status code 400 will be returned.
|
||||
|
||||
```json
|
||||
$ curl http://localhost:9090/api/v1/rules
|
||||
|
@ -903,7 +905,7 @@ curl -G http://localhost:9091/api/v1/targets/metadata \
|
|||
```
|
||||
|
||||
The following example returns metadata for all metrics for all targets with
|
||||
label `instance="127.0.0.1:9090`.
|
||||
label `instance="127.0.0.1:9090"`.
|
||||
|
||||
```json
|
||||
curl -G http://localhost:9091/api/v1/targets/metadata \
|
||||
|
@ -1188,9 +1190,11 @@ The following endpoint returns various cardinality statistics about the Promethe
|
|||
GET /api/v1/status/tsdb
|
||||
```
|
||||
URL query parameters:
|
||||
|
||||
- `limit=<number>`: Limit the number of returned items to a given number for each set of statistics. By default, 10 items are returned.
|
||||
|
||||
The `data` section of the query result consists of
|
||||
The `data` section of the query result consists of:
|
||||
|
||||
- **headStats**: This provides the following data about the head block of the TSDB:
|
||||
- **numSeries**: The number of series.
|
||||
- **chunkCount**: The number of chunks.
|
||||
|
@ -1266,13 +1270,13 @@ The following endpoint returns information about the WAL replay:
|
|||
GET /api/v1/status/walreplay
|
||||
```
|
||||
|
||||
**read**: The number of segments replayed so far.
|
||||
**total**: The total number segments needed to be replayed.
|
||||
**progress**: The progress of the replay (0 - 100%).
|
||||
**state**: The state of the replay. Possible states:
|
||||
- **waiting**: Waiting for the replay to start.
|
||||
- **in progress**: The replay is in progress.
|
||||
- **done**: The replay has finished.
|
||||
- **read**: The number of segments replayed so far.
|
||||
- **total**: The total number segments needed to be replayed.
|
||||
- **progress**: The progress of the replay (0 - 100%).
|
||||
- **state**: The state of the replay. Possible states:
|
||||
- **waiting**: Waiting for the replay to start.
|
||||
- **in progress**: The replay is in progress.
|
||||
- **done**: The replay has finished.
|
||||
|
||||
```json
|
||||
$ curl http://localhost:9090/api/v1/status/walreplay
|
||||
|
|
|
@ -464,6 +464,97 @@ by the number of seconds under the specified time range window, and should be
|
|||
used primarily for human readability. Use `rate` in recording rules so that
|
||||
increases are tracked consistently on a per-second basis.
|
||||
|
||||
## `info()` (experimental)
|
||||
|
||||
_The `info` function is an experiment to improve UX
|
||||
around including labels from [info metrics](https://grafana.com/blog/2021/08/04/how-to-use-promql-joins-for-more-effective-queries-of-prometheus-metrics-at-scale/#info-metrics).
|
||||
The behavior of this function may change in future versions of Prometheus,
|
||||
including its removal from PromQL. `info` has to be enabled via the
|
||||
[feature flag](../feature_flags.md#experimental-promql-functions) `--enable-feature=promql-experimental-functions`._
|
||||
|
||||
`info(v instant-vector, [data-label-selector instant-vector])` finds, for each time
|
||||
series in `v`, all info series with matching _identifying_ labels (more on
|
||||
this later), and adds the union of their _data_ (i.e., non-identifying) labels
|
||||
to the time series. The second argument `data-label-selector` is optional.
|
||||
It is not a real instant vector, but uses a subset of its syntax.
|
||||
It must start and end with curly braces (`{ ... }`) and may only contain label matchers.
|
||||
The label matchers are used to constrain which info series to consider
|
||||
and which data labels to add to `v`.
|
||||
|
||||
Identifying labels of an info series are the subset of labels that uniquely
|
||||
identify the info series. The remaining labels are considered
|
||||
_data labels_ (also called non-identifying). (Note that Prometheus's concept
|
||||
of time series identity always includes _all_ the labels. For the sake of the `info`
|
||||
function, we “logically” define info series identity in a different way than
|
||||
in the conventional Prometheus view.) The identifying labels of an info series
|
||||
are used to join it to regular (non-info) series, i.e. those series that have
|
||||
the same labels as the identifying labels of the info series. The data labels, which are
|
||||
the ones added to the regular series by the `info` function, effectively encode
|
||||
metadata key value pairs. (This implies that a change in the data labels
|
||||
in the conventional Prometheus view constitutes the end of one info series and
|
||||
the beginning of a new info series, while the “logical” view of the `info` function is
|
||||
that the same info series continues to exist, just with different “data”.)
|
||||
|
||||
The conventional approach of adding data labels is sometimes called a “join query”,
|
||||
as illustrated by the following example:
|
||||
|
||||
```
|
||||
rate(http_server_request_duration_seconds_count[2m])
|
||||
* on (job, instance) group_left (k8s_cluster_name)
|
||||
target_info
|
||||
```
|
||||
|
||||
The core of the query is the expression `rate(http_server_request_duration_seconds_count[2m])`.
|
||||
But to add data labels from an info metric, the user has to use elaborate
|
||||
(and not very obvious) syntax to specify which info metric to use (`target_info`), what the
|
||||
identifying labels are (`on (job, instance)`), and which data labels to add
|
||||
(`group_left (k8s_cluster_name)`).
|
||||
|
||||
This query is not only verbose and hard to write, it might also run into an “identity crisis”:
|
||||
If any of the data labels of `target_info` changes, Prometheus sees that as a change of series
|
||||
(as alluded to above, Prometheus just has no native concept of non-identifying labels).
|
||||
If the old `target_info` series is not properly marked as stale (which can happen with certain ingestion paths),
|
||||
the query above will fail for up to 5m (the lookback delta) because it will find a conflicting
|
||||
match with both the old and the new version of `target_info`.
|
||||
|
||||
The `info` function not only resolves this conflict in favor of the newer series, it also simplifies the syntax
|
||||
because it knows about the available info series and what their identifying labels are. The example query
|
||||
looks like this with the `info` function:
|
||||
|
||||
```
|
||||
info(
|
||||
rate(http_server_request_duration_seconds_count[2m]),
|
||||
{k8s_cluster_name=~".+"}
|
||||
)
|
||||
```
|
||||
|
||||
The common case of adding _all_ data labels can be achieved by
|
||||
omitting the 2nd argument of the `info` function entirely, simplifying
|
||||
the example even more:
|
||||
|
||||
```
|
||||
info(rate(http_server_request_duration_seconds_count[2m]))
|
||||
```
|
||||
|
||||
While `info` normally automatically finds all matching info series, it's possible to
|
||||
restrict them by providing a `__name__` label matcher, e.g.
|
||||
`{__name__="target_info"}`.
|
||||
|
||||
### Limitations
|
||||
|
||||
In its current iteration, `info` defaults to considering only info series with
|
||||
the name `target_info`. It also assumes that the identifying info series labels are
|
||||
`instance` and `job`. `info` does support other info series names however, through
|
||||
`__name__` label matchers. E.g., one can explicitly say to consider both
|
||||
`target_info` and `build_info` as follows:
|
||||
`{__name__=~"(target|build)_info"}`. However, the identifying labels always
|
||||
have to be `instance` and `job`.
|
||||
|
||||
These limitations are partially defeating the purpose of the `info` function.
|
||||
At the current stage, this is an experiment to find out how useful the approach
|
||||
turns out to be in practice. A final version of the `info` function will indeed
|
||||
consider all matching info series and with their appropriate identifying labels.
|
||||
|
||||
## `irate()`
|
||||
|
||||
`irate(v range-vector)` calculates the per-second instant rate of increase of
|
||||
|
|
|
@ -9,7 +9,7 @@ Prometheus promises API stability within a major version, and strives to avoid
|
|||
breaking changes for key features. Some features, which are cosmetic, still
|
||||
under development, or depend on 3rd party services, are not covered by this.
|
||||
|
||||
Things considered stable for 2.x:
|
||||
Things considered stable for 3.x:
|
||||
|
||||
* The query language and data model
|
||||
* Alerting and recording rules
|
||||
|
@ -18,21 +18,25 @@ Things considered stable for 2.x:
|
|||
* Configuration file format (minus the service discovery remote read/write, see below)
|
||||
* Rule/alert file format
|
||||
* Console template syntax and semantics
|
||||
* Remote write sending, per the [1.0 specification](https://prometheus.io/docs/concepts/remote_write_spec/).
|
||||
* Remote write sending, per the [1.0 specification](https://prometheus.io/docs/concepts/remote_write_spec/) and receiving
|
||||
* Agent mode
|
||||
* OTLP receiver endpoint
|
||||
|
||||
Things considered unstable for 2.x:
|
||||
Things considered unstable for 3.x:
|
||||
|
||||
* Any feature listed as experimental or subject to change, including:
|
||||
* The [`holt_winters` PromQL function](https://github.com/prometheus/prometheus/issues/2458)
|
||||
* Remote write receiving, remote read and the remote read endpoint
|
||||
* The [`double_exponential_smoothing` PromQL function](https://github.com/prometheus/prometheus/issues/2458)
|
||||
* Remote read and the remote read endpoint
|
||||
* Server-side HTTPS and basic authentication
|
||||
* Service discovery integrations, with the exception of `static_configs` and `file_sd_configs`
|
||||
* Service discovery integrations, with the exception of `static_configs`, `file_sd_configs` and `http_sd_config`
|
||||
* Go APIs of packages that are part of the server
|
||||
* HTML generated by the web UI
|
||||
* The metrics in the /metrics endpoint of Prometheus itself
|
||||
* Exact on-disk format. Potential changes however, will be forward compatible and transparently handled by Prometheus
|
||||
* The format of the logs
|
||||
|
||||
Prometheus 2.x stability guarantees can be found [in the 2.x documentation](https://prometheus.io/docs/prometheus/2.55/stability/).
|
||||
|
||||
As long as you are not using any features marked as experimental/unstable, an
|
||||
upgrade within a major version can usually be performed without any operational
|
||||
adjustments and very little risk that anything will break. Any breaking changes
|
||||
|
|
31
documentation/examples/prometheus-otlp.yml
Normal file
31
documentation/examples/prometheus-otlp.yml
Normal file
|
@ -0,0 +1,31 @@
|
|||
# my global config
|
||||
global:
|
||||
evaluation_interval: 15s # Evaluate rules every 15 seconds. The default is every 1 minute.
|
||||
|
||||
otlp:
|
||||
# Recommended attributes to be promoted to labels.
|
||||
promote_resource_attributes:
|
||||
- service.instance.id
|
||||
- service.name
|
||||
- service.namespace
|
||||
- cloud.availability_zone
|
||||
- cloud.region
|
||||
- container.name
|
||||
- deployment.environment.name
|
||||
- k8s.cluster.name
|
||||
- k8s.container.name
|
||||
- k8s.cronjob.name
|
||||
- k8s.daemonset.name
|
||||
- k8s.deployment.name
|
||||
- k8s.job.name
|
||||
- k8s.namespace.name
|
||||
- k8s.pod.name
|
||||
- k8s.replicaset.name
|
||||
- k8s.statefulset.name
|
||||
# Ingest OTLP data keeping UTF-8 characters in metric/label names.
|
||||
translation_strategy: NoUTF8EscapingWithSuffixes
|
||||
|
||||
storage:
|
||||
# OTLP is a push-based protocol, Out of order samples is a common scenario.
|
||||
tsdb:
|
||||
out_of_order_time_window: 30m
|
|
@ -6,9 +6,9 @@ require (
|
|||
github.com/alecthomas/kingpin/v2 v2.4.0
|
||||
github.com/gogo/protobuf v1.3.2
|
||||
github.com/golang/snappy v0.0.4
|
||||
github.com/influxdata/influxdb v1.11.6
|
||||
github.com/prometheus/client_golang v1.20.4
|
||||
github.com/prometheus/common v0.60.0
|
||||
github.com/influxdata/influxdb v1.11.7
|
||||
github.com/prometheus/client_golang v1.20.5
|
||||
github.com/prometheus/common v0.60.1
|
||||
github.com/prometheus/prometheus v0.53.1
|
||||
github.com/stretchr/testify v1.9.0
|
||||
)
|
||||
|
|
|
@ -166,8 +166,8 @@ github.com/hetznercloud/hcloud-go/v2 v2.9.0 h1:s0N6R7Zoi2DPfMtUF5o9VeUBzTtHVY6MI
|
|||
github.com/hetznercloud/hcloud-go/v2 v2.9.0/go.mod h1:qtW/TuU7Bs16ibXl/ktJarWqU2LwHr7eGlwoilHxtgg=
|
||||
github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4=
|
||||
github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY=
|
||||
github.com/influxdata/influxdb v1.11.6 h1:zS5MRY+RQ5/XFTer5R8xQRnY17JYSbacvO6OaP164wU=
|
||||
github.com/influxdata/influxdb v1.11.6/go.mod h1:F10NoQb9qa04lME3pTPWQrYt4JZ/ke1Eei+1ttgHHrg=
|
||||
github.com/influxdata/influxdb v1.11.7 h1:C31A+S9YfjTCOuAv9Qs0ZdQufslOZZBtejjxiV8QNQw=
|
||||
github.com/influxdata/influxdb v1.11.7/go.mod h1:zRTAuk/Ie/V1LGxJUv8jfDmfv+ypz22lxfhc1MxC3rI=
|
||||
github.com/ionos-cloud/sdk-go/v6 v6.1.11 h1:J/uRN4UWO3wCyGOeDdMKv8LWRzKu6UIkLEaes38Kzh8=
|
||||
github.com/ionos-cloud/sdk-go/v6 v6.1.11/go.mod h1:EzEgRIDxBELvfoa/uBN0kOQaqovLjUWEB7iW4/Q+t4k=
|
||||
github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg=
|
||||
|
@ -253,8 +253,8 @@ github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXP
|
|||
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
|
||||
github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
|
||||
github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
|
||||
github.com/prometheus/client_golang v1.20.4 h1:Tgh3Yr67PaOv/uTqloMsCEdeuFTatm5zIq5+qNN23vI=
|
||||
github.com/prometheus/client_golang v1.20.4/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE=
|
||||
github.com/prometheus/client_golang v1.20.5 h1:cxppBPuYhUnsO6yo/aoRol4L7q7UFfdm+bR9r+8l63Y=
|
||||
github.com/prometheus/client_golang v1.20.5/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE=
|
||||
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
||||
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
|
@ -264,8 +264,8 @@ github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y8
|
|||
github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
|
||||
github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc=
|
||||
github.com/prometheus/common v0.29.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
|
||||
github.com/prometheus/common v0.60.0 h1:+V9PAREWNvJMAuJ1x1BaWl9dewMW4YrHZQbx0sJNllA=
|
||||
github.com/prometheus/common v0.60.0/go.mod h1:h0LYf1R1deLSKtD4Vdg8gy4RuOvENW2J/h19V5NADQw=
|
||||
github.com/prometheus/common v0.60.1 h1:FUas6GcOw66yB/73KC+BOZoFJmbo/1pojoILArPAaSc=
|
||||
github.com/prometheus/common v0.60.1/go.mod h1:h0LYf1R1deLSKtD4Vdg8gy4RuOvENW2J/h19V5NADQw=
|
||||
github.com/prometheus/common/sigv4 v0.1.0 h1:qoVebwtwwEhS85Czm2dSROY5fTo2PAPEVdDeppTwGX4=
|
||||
github.com/prometheus/common/sigv4 v0.1.0/go.mod h1:2Jkxxk9yYvCkE5G1sQT7GuEXm57JrvHu9k5YwTjsNtI=
|
||||
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||
|
|
59
go.mod
59
go.mod
|
@ -5,8 +5,8 @@ go 1.22.0
|
|||
toolchain go1.23.0
|
||||
|
||||
require (
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.14.0
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.7.0
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.16.0
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.8.0
|
||||
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.7.0
|
||||
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4 v4.3.0
|
||||
github.com/Code-Hex/go-generics-cache v1.5.1
|
||||
|
@ -17,10 +17,10 @@ require (
|
|||
github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3
|
||||
github.com/cespare/xxhash/v2 v2.3.0
|
||||
github.com/dennwc/varint v1.0.0
|
||||
github.com/digitalocean/godo v1.126.0
|
||||
github.com/digitalocean/godo v1.128.0
|
||||
github.com/docker/docker v27.3.1+incompatible
|
||||
github.com/edsrzf/mmap-go v1.1.0
|
||||
github.com/envoyproxy/go-control-plane v0.13.0
|
||||
github.com/edsrzf/mmap-go v1.2.0
|
||||
github.com/envoyproxy/go-control-plane v0.13.1
|
||||
github.com/envoyproxy/protoc-gen-validate v1.1.0
|
||||
github.com/facette/natsort v0.0.0-20181210072756-2cd4dd1e2dcb
|
||||
github.com/fsnotify/fsnotify v1.7.0
|
||||
|
@ -34,14 +34,14 @@ require (
|
|||
github.com/gophercloud/gophercloud v1.14.1
|
||||
github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.16.0
|
||||
github.com/hashicorp/consul/api v1.29.4
|
||||
github.com/hashicorp/consul/api v1.30.0
|
||||
github.com/hashicorp/nomad/api v0.0.0-20240717122358-3d93bd3778f3
|
||||
github.com/hetznercloud/hcloud-go/v2 v2.13.1
|
||||
github.com/hetznercloud/hcloud-go/v2 v2.15.0
|
||||
github.com/ionos-cloud/sdk-go/v6 v6.2.1
|
||||
github.com/json-iterator/go v1.1.12
|
||||
github.com/klauspost/compress v1.17.10
|
||||
github.com/klauspost/compress v1.17.11
|
||||
github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b
|
||||
github.com/linode/linodego v1.41.0
|
||||
github.com/linode/linodego v1.42.0
|
||||
github.com/miekg/dns v1.1.62
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822
|
||||
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f
|
||||
|
@ -50,25 +50,26 @@ require (
|
|||
github.com/oklog/ulid v1.3.1
|
||||
github.com/ovh/go-ovh v1.6.0
|
||||
github.com/prometheus/alertmanager v0.27.0
|
||||
github.com/prometheus/client_golang v1.20.4
|
||||
github.com/prometheus/client_golang v1.20.5
|
||||
github.com/prometheus/client_model v0.6.1
|
||||
github.com/prometheus/common v0.60.0
|
||||
github.com/prometheus/common v0.60.1
|
||||
github.com/prometheus/common/assets v0.2.0
|
||||
github.com/prometheus/common/sigv4 v0.1.0
|
||||
github.com/prometheus/exporter-toolkit v0.13.0
|
||||
github.com/prometheus/exporter-toolkit v0.13.1
|
||||
github.com/scaleway/scaleway-sdk-go v1.0.0-beta.30
|
||||
github.com/shurcooL/httpfs v0.0.0-20230704072500-f1e31cf0ba5c
|
||||
github.com/stretchr/testify v1.9.0
|
||||
github.com/vultr/govultr/v2 v2.17.2
|
||||
go.opentelemetry.io/collector/pdata v1.16.0
|
||||
go.opentelemetry.io/collector/semconv v0.110.0
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.55.0
|
||||
go.opentelemetry.io/otel v1.30.0
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.30.0
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.30.0
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.30.0
|
||||
go.opentelemetry.io/otel/sdk v1.30.0
|
||||
go.opentelemetry.io/otel/trace v1.30.0
|
||||
go.opentelemetry.io/collector/pdata v1.18.0
|
||||
go.opentelemetry.io/collector/semconv v0.112.0
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.56.0
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.56.0
|
||||
go.opentelemetry.io/otel v1.31.0
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.31.0
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.31.0
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.31.0
|
||||
go.opentelemetry.io/otel/sdk v1.31.0
|
||||
go.opentelemetry.io/otel/trace v1.31.0
|
||||
go.uber.org/atomic v1.11.0
|
||||
go.uber.org/automaxprocs v1.6.0
|
||||
go.uber.org/goleak v1.3.0
|
||||
|
@ -78,10 +79,10 @@ require (
|
|||
golang.org/x/sys v0.26.0
|
||||
golang.org/x/text v0.19.0
|
||||
golang.org/x/tools v0.26.0
|
||||
google.golang.org/api v0.199.0
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20240903143218-8af14fe29dc1
|
||||
google.golang.org/api v0.204.0
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20241015192408-796eee8c2d53
|
||||
google.golang.org/grpc v1.67.1
|
||||
google.golang.org/protobuf v1.34.2
|
||||
google.golang.org/protobuf v1.35.1
|
||||
gopkg.in/yaml.v2 v2.4.0
|
||||
gopkg.in/yaml.v3 v3.0.1
|
||||
k8s.io/api v0.31.1
|
||||
|
@ -92,8 +93,8 @@ require (
|
|||
)
|
||||
|
||||
require (
|
||||
cloud.google.com/go/auth v0.9.5 // indirect
|
||||
cloud.google.com/go/auth/oauth2adapt v0.2.4 // indirect
|
||||
cloud.google.com/go/auth v0.10.0 // indirect
|
||||
cloud.google.com/go/auth/oauth2adapt v0.2.5 // indirect
|
||||
cloud.google.com/go/compute/metadata v0.5.2 // indirect
|
||||
github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 // indirect
|
||||
github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 // indirect
|
||||
|
@ -184,15 +185,15 @@ require (
|
|||
github.com/xhit/go-str2duration/v2 v2.1.0 // indirect
|
||||
go.mongodb.org/mongo-driver v1.14.0 // indirect
|
||||
go.opencensus.io v0.24.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v1.30.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v1.31.0 // indirect
|
||||
go.opentelemetry.io/proto/otlp v1.3.1 // indirect
|
||||
golang.org/x/crypto v0.28.0 // indirect
|
||||
golang.org/x/exp v0.0.0-20240119083558-1b970713d09a // indirect
|
||||
golang.org/x/mod v0.21.0 // indirect
|
||||
golang.org/x/net v0.30.0 // indirect
|
||||
golang.org/x/term v0.25.0 // indirect
|
||||
golang.org/x/time v0.6.0 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 // indirect
|
||||
golang.org/x/time v0.7.0 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20241021214115-324edc3d5d38 // indirect
|
||||
gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect
|
||||
gopkg.in/inf.v0 v0.9.1 // indirect
|
||||
gopkg.in/ini.v1 v1.67.0 // indirect
|
||||
|
|
134
go.sum
134
go.sum
|
@ -12,10 +12,10 @@ cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bP
|
|||
cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk=
|
||||
cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs=
|
||||
cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY=
|
||||
cloud.google.com/go/auth v0.9.5 h1:4CTn43Eynw40aFVr3GpPqsQponx2jv0BQpjvajsbbzw=
|
||||
cloud.google.com/go/auth v0.9.5/go.mod h1:Xo0n7n66eHyOWWCnitop6870Ilwo3PiZyodVkkH1xWM=
|
||||
cloud.google.com/go/auth/oauth2adapt v0.2.4 h1:0GWE/FUsXhf6C+jAkWgYm7X9tK8cuEIfy19DBn6B6bY=
|
||||
cloud.google.com/go/auth/oauth2adapt v0.2.4/go.mod h1:jC/jOpwFP6JBxhB3P5Rr0a9HLMC/Pe3eaL4NmdvqPtc=
|
||||
cloud.google.com/go/auth v0.10.0 h1:tWlkvFAh+wwTOzXIjrwM64karR1iTBZ/GRr0S/DULYo=
|
||||
cloud.google.com/go/auth v0.10.0/go.mod h1:xxA5AqpDrvS+Gkmo9RqrGGRh6WSNKKOXhY3zNOr38tI=
|
||||
cloud.google.com/go/auth/oauth2adapt v0.2.5 h1:2p29+dePqsCHPP1bqDJcKj4qxRyYCcbzKpFyKGt3MTk=
|
||||
cloud.google.com/go/auth/oauth2adapt v0.2.5/go.mod h1:AlmsELtlEBnaNTL7jCj8VQFLy6mbZv0s4Q7NGBeQ5E8=
|
||||
cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
|
||||
cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE=
|
||||
cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc=
|
||||
|
@ -36,10 +36,12 @@ cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohl
|
|||
cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs=
|
||||
cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0=
|
||||
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.14.0 h1:nyQWyZvwGTvunIMxi1Y9uXkcyr+I7TeNrr/foo4Kpk8=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.14.0/go.mod h1:l38EPgmsp71HHLq9j7De57JcKOWPyhrsW1Awm1JS6K0=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.7.0 h1:tfLQ34V6F7tVSwoTf/4lH5sE0o6eCJuNDTmH09nDpbc=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.7.0/go.mod h1:9kIvujWAA58nmPmWB1m23fyWic1kYZMxD9CxaWn4Qpg=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.16.0 h1:JZg6HRh6W6U4OLl6lk7BZ7BLisIzM9dG1R50zUk9C/M=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.16.0/go.mod h1:YL1xnZ6QejvQHWJrX/AvhFl4WW4rqHVoKspWNVwFk0M=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.8.0 h1:B/dfvscEQtew9dVuoxqxrUKKv8Ih2f55PydknDamU+g=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.8.0/go.mod h1:fiPSssYvltE08HJchL04dOy+RD4hgrjph0cwGGMntdI=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache v0.3.0 h1:+m0M/LFxN43KvULkDNfdXOgrjtg6UYJPFBJyuEcRCAw=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache v0.3.0/go.mod h1:PwOyop78lveYMRs6oCxjiVyBdyCgIYH6XHIVZO9/SFQ=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 h1:ywEEhmNahHBihViHepv3xPBn1663uRv2t2q/ESv9seY=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0/go.mod h1:iZDifYGJTIgIIkYRNWPENUnqx6bJ2xnSDFI2tjwZNuY=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.7.0 h1:LkHbJbgF3YyvC53aqYGR+wWQDn2Rdp9AQdGndf9QvY4=
|
||||
|
@ -52,6 +54,8 @@ github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources v1.
|
|||
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources v1.1.1/go.mod h1:c/wcGeGx5FUPbM/JltUYHZcKmigwyVLJlDq+4HdtXaw=
|
||||
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8=
|
||||
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E=
|
||||
github.com/AzureAD/microsoft-authentication-extensions-for-go/cache v0.1.1 h1:WJTmL004Abzc5wDB5VtZG2PJk5ndYDgVacGqfirKxjM=
|
||||
github.com/AzureAD/microsoft-authentication-extensions-for-go/cache v0.1.1/go.mod h1:tCcJZ0uHAmvjsVYzEFivsRTN00oz5BEsRgQHu5JZ9WE=
|
||||
github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 h1:XHOnouVk1mxXfQidrMEnLlPk9UMeRtyBTnEFtxkV0kU=
|
||||
github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI=
|
||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
|
@ -121,8 +125,10 @@ github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1
|
|||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/dennwc/varint v1.0.0 h1:kGNFFSSw8ToIy3obO/kKr8U9GZYUAxQEVuix4zfDWzE=
|
||||
github.com/dennwc/varint v1.0.0/go.mod h1:hnItb35rvZvJrbTALZtY/iQfDs48JKRG1RPpgziApxA=
|
||||
github.com/digitalocean/godo v1.126.0 h1:+Znh7VMQj/E8ArbjWnc7OKGjWfzC+I8OCSRp7r1MdD8=
|
||||
github.com/digitalocean/godo v1.126.0/go.mod h1:PU8JB6I1XYkQIdHFop8lLAY9ojp6M0XcU0TWaQSxbrc=
|
||||
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78=
|
||||
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc=
|
||||
github.com/digitalocean/godo v1.128.0 h1:cGn/ibMSRZ9+8etbzMv2MnnCEPTTGlEnx3HHTPwdk1U=
|
||||
github.com/digitalocean/godo v1.128.0/go.mod h1:PU8JB6I1XYkQIdHFop8lLAY9ojp6M0XcU0TWaQSxbrc=
|
||||
github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0=
|
||||
github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E=
|
||||
github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI=
|
||||
|
@ -133,15 +139,15 @@ github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKoh
|
|||
github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec=
|
||||
github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4=
|
||||
github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
|
||||
github.com/edsrzf/mmap-go v1.1.0 h1:6EUwBLQ/Mcr1EYLE4Tn1VdW1A4ckqCQWZBw8Hr0kjpQ=
|
||||
github.com/edsrzf/mmap-go v1.1.0/go.mod h1:19H/e8pUPLicwkyNgOykDXkJ9F0MHE+Z52B8EIth78Q=
|
||||
github.com/edsrzf/mmap-go v1.2.0 h1:hXLYlkbaPzt1SaQk+anYwKSRNhufIDCchSPkUD6dD84=
|
||||
github.com/edsrzf/mmap-go v1.2.0/go.mod h1:19H/e8pUPLicwkyNgOykDXkJ9F0MHE+Z52B8EIth78Q=
|
||||
github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g=
|
||||
github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
|
||||
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
|
||||
github.com/envoyproxy/go-control-plane v0.13.0 h1:HzkeUz1Knt+3bK+8LG1bxOO/jzWZmdxpwC51i202les=
|
||||
github.com/envoyproxy/go-control-plane v0.13.0/go.mod h1:GRaKG3dwvFoTg4nj7aXdZnvMg4d7nvT/wl9WgVXn3Q8=
|
||||
github.com/envoyproxy/go-control-plane v0.13.1 h1:vPfJZCkob6yTMEgS+0TwfTUfbHjfy/6vOJ8hUWX/uXE=
|
||||
github.com/envoyproxy/go-control-plane v0.13.1/go.mod h1:X45hY0mufo6Fd0KW3rqsGvQMw58jvjymeCzBU3mWyHw=
|
||||
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
|
||||
github.com/envoyproxy/protoc-gen-validate v1.1.0 h1:tntQDh69XqOCOZsDz0lVJQez/2L6Uu2PdjCQwWCJ3bM=
|
||||
github.com/envoyproxy/protoc-gen-validate v1.1.0/go.mod h1:sXRDRVmzEbkM7CVcM06s9shE/m23dg3wzjl0UWqJ2q4=
|
||||
|
@ -300,10 +306,8 @@ github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4
|
|||
github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0 h1:asbCHRVmodnJTuQ3qamDwqVOIjwqUPTYmYuemVOx+Ys=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0/go.mod h1:ggCgvZ2r7uOoQjOyu2Y1NhHmEPPzzuhWgcza5M1Ji1I=
|
||||
github.com/hashicorp/consul/api v1.29.4 h1:P6slzxDLBOxUSj3fWo2o65VuKtbtOXFi7TSSgtXutuE=
|
||||
github.com/hashicorp/consul/api v1.29.4/go.mod h1:HUlfw+l2Zy68ceJavv2zAyArl2fqhGWnMycyt56sBgg=
|
||||
github.com/hashicorp/consul/proto-public v0.6.2 h1:+DA/3g/IiKlJZb88NBn0ZgXrxJp2NlvCZdEyl+qxvL0=
|
||||
github.com/hashicorp/consul/proto-public v0.6.2/go.mod h1:cXXbOg74KBNGajC+o8RlA502Esf0R9prcoJgiOX/2Tg=
|
||||
github.com/hashicorp/consul/api v1.30.0 h1:ArHVMMILb1nQv8vZSGIwwQd2gtc+oSQZ6CalyiyH2XQ=
|
||||
github.com/hashicorp/consul/api v1.30.0/go.mod h1:B2uGchvaXVW2JhFoS8nqTxMD5PBykr4ebY4JWHTTeLM=
|
||||
github.com/hashicorp/consul/sdk v0.16.1 h1:V8TxTnImoPD5cj0U9Spl0TUxcytjcbbJeADFF07KdHg=
|
||||
github.com/hashicorp/consul/sdk v0.16.1/go.mod h1:fSXvwxB2hmh1FMZCNl6PwX0Q/1wdWtHJcZ7Ea5tns0s=
|
||||
github.com/hashicorp/cronexpr v1.1.2 h1:wG/ZYIKT+RT3QkOdgYc+xsKWVRgnxJ1OJtjjy84fJ9A=
|
||||
|
@ -353,8 +357,8 @@ github.com/hashicorp/nomad/api v0.0.0-20240717122358-3d93bd3778f3 h1:fgVfQ4AC1av
|
|||
github.com/hashicorp/nomad/api v0.0.0-20240717122358-3d93bd3778f3/go.mod h1:svtxn6QnrQ69P23VvIWMR34tg3vmwLz4UdUzm1dSCgE=
|
||||
github.com/hashicorp/serf v0.10.1 h1:Z1H2J60yRKvfDYAOZLd2MU0ND4AH/WDz7xYHDWQsIPY=
|
||||
github.com/hashicorp/serf v0.10.1/go.mod h1:yL2t6BqATOLGc5HF7qbFkTfXoPIY0WZdWHfEvMqbG+4=
|
||||
github.com/hetznercloud/hcloud-go/v2 v2.13.1 h1:jq0GP4QaYE5d8xR/Zw17s9qoaESRJMXfGmtD1a/qckQ=
|
||||
github.com/hetznercloud/hcloud-go/v2 v2.13.1/go.mod h1:dhix40Br3fDiBhwaSG/zgaYOFFddpfBm/6R1Zz0IiF0=
|
||||
github.com/hetznercloud/hcloud-go/v2 v2.15.0 h1:6mpMJ/RuX1woZj+MCJdyKNEX9129KDkEIDeeyfr4GD4=
|
||||
github.com/hetznercloud/hcloud-go/v2 v2.15.0/go.mod h1:h8sHav+27Xa+48cVMAvAUMELov5h298Ilg2vflyTHgg=
|
||||
github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
|
||||
github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4=
|
||||
github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY=
|
||||
|
@ -381,10 +385,12 @@ github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/X
|
|||
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
|
||||
github.com/julienschmidt/httprouter v1.3.0 h1:U0609e9tgbseu3rBINet9P48AI/D3oJs4dN7jwJOQ1U=
|
||||
github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM=
|
||||
github.com/keybase/go-keychain v0.0.0-20231219164618-57a3676c3af6 h1:IsMZxCuZqKuao2vNdfD82fjjgPLfyHLpR41Z88viRWs=
|
||||
github.com/keybase/go-keychain v0.0.0-20231219164618-57a3676c3af6/go.mod h1:3VeWNIJaW+O5xpRQbPp0Ybqu1vJd/pm7s2F473HRrkw=
|
||||
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
|
||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||
github.com/klauspost/compress v1.17.10 h1:oXAz+Vh0PMUvJczoi+flxpnBEPxoER1IaAnU/NMPtT0=
|
||||
github.com/klauspost/compress v1.17.10/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0=
|
||||
github.com/klauspost/compress v1.17.11 h1:In6xLpyWOi1+C7tXUUWv2ot1QvBjxevKAaI6IXrJmUc=
|
||||
github.com/klauspost/compress v1.17.11/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0=
|
||||
github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b h1:udzkj9S/zlT5X367kqJis0QP7YMxobob6zhzq6Yre00=
|
||||
github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b/go.mod h1:pcaDhQK0/NJZEvtCO0qQPPropqV0sJOJ6YW7X+9kRwM=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
|
@ -399,8 +405,8 @@ github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
|||
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||
github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
|
||||
github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
|
||||
github.com/linode/linodego v1.41.0 h1:GcP7JIBr9iLRJ9FwAtb9/WCT1DuPJS/xUApapfdjtiY=
|
||||
github.com/linode/linodego v1.41.0/go.mod h1:Ow4/XZ0yvWBzt3iAHwchvhSx30AyLintsSMvvQ2/SJY=
|
||||
github.com/linode/linodego v1.42.0 h1:ZSbi4MtvwrfB9Y6bknesorvvueBGGilcmh2D5dq76RM=
|
||||
github.com/linode/linodego v1.42.0/go.mod h1:2yzmY6pegPBDgx2HDllmt0eIk2IlzqcgK6NR0wFCFRY=
|
||||
github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0=
|
||||
github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
|
||||
github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
|
||||
|
@ -500,8 +506,8 @@ github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5Fsn
|
|||
github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU=
|
||||
github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
|
||||
github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
|
||||
github.com/prometheus/client_golang v1.20.4 h1:Tgh3Yr67PaOv/uTqloMsCEdeuFTatm5zIq5+qNN23vI=
|
||||
github.com/prometheus/client_golang v1.20.4/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE=
|
||||
github.com/prometheus/client_golang v1.20.5 h1:cxppBPuYhUnsO6yo/aoRol4L7q7UFfdm+bR9r+8l63Y=
|
||||
github.com/prometheus/client_golang v1.20.5/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE=
|
||||
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
||||
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
|
@ -513,14 +519,14 @@ github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8b
|
|||
github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
|
||||
github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc=
|
||||
github.com/prometheus/common v0.29.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
|
||||
github.com/prometheus/common v0.60.0 h1:+V9PAREWNvJMAuJ1x1BaWl9dewMW4YrHZQbx0sJNllA=
|
||||
github.com/prometheus/common v0.60.0/go.mod h1:h0LYf1R1deLSKtD4Vdg8gy4RuOvENW2J/h19V5NADQw=
|
||||
github.com/prometheus/common v0.60.1 h1:FUas6GcOw66yB/73KC+BOZoFJmbo/1pojoILArPAaSc=
|
||||
github.com/prometheus/common v0.60.1/go.mod h1:h0LYf1R1deLSKtD4Vdg8gy4RuOvENW2J/h19V5NADQw=
|
||||
github.com/prometheus/common/assets v0.2.0 h1:0P5OrzoHrYBOSM1OigWL3mY8ZvV2N4zIE/5AahrSrfM=
|
||||
github.com/prometheus/common/assets v0.2.0/go.mod h1:D17UVUE12bHbim7HzwUvtqm6gwBEaDQ0F+hIGbFbccI=
|
||||
github.com/prometheus/common/sigv4 v0.1.0 h1:qoVebwtwwEhS85Czm2dSROY5fTo2PAPEVdDeppTwGX4=
|
||||
github.com/prometheus/common/sigv4 v0.1.0/go.mod h1:2Jkxxk9yYvCkE5G1sQT7GuEXm57JrvHu9k5YwTjsNtI=
|
||||
github.com/prometheus/exporter-toolkit v0.13.0 h1:lmA0Q+8IaXgmFRKw09RldZmZdnvu9wwcDLIXGmTPw1c=
|
||||
github.com/prometheus/exporter-toolkit v0.13.0/go.mod h1:2uop99EZl80KdXhv/MxVI2181fMcwlsumFOqBecGkG0=
|
||||
github.com/prometheus/exporter-toolkit v0.13.1 h1:Evsh0gWQo2bdOHlnz9+0Nm7/OFfIwhE2Ws4A2jIlR04=
|
||||
github.com/prometheus/exporter-toolkit v0.13.1/go.mod h1:ujdv2YIOxtdFxxqtloLpbqmxd5J0Le6IITUvIRSWjj0=
|
||||
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
|
||||
github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A=
|
||||
|
@ -528,10 +534,12 @@ github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4O
|
|||
github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
|
||||
github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc=
|
||||
github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk=
|
||||
github.com/redis/go-redis/v9 v9.6.1 h1:HHDteefn6ZkTtY5fGUE8tj8uy85AHk6zP7CpzIAM0y4=
|
||||
github.com/redis/go-redis/v9 v9.6.1/go.mod h1:0C0c6ycQsdpVNQpxb1njEQIqkx5UcsM8FJCQLgE9+RA=
|
||||
github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
|
||||
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
||||
github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8=
|
||||
github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4=
|
||||
github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII=
|
||||
github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o=
|
||||
github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
|
||||
github.com/scaleway/scaleway-sdk-go v1.0.0-beta.30 h1:yoKAVkEVwAqbGbR8n87rHQ1dulL25rKloGadb3vm770=
|
||||
github.com/scaleway/scaleway-sdk-go v1.0.0-beta.30/go.mod h1:sH0u6fq6x4R5M7WxkoQFY/o7UaiItec0o1LinLCJNq8=
|
||||
|
@ -587,26 +595,28 @@ go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
|||
go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
||||
go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0=
|
||||
go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo=
|
||||
go.opentelemetry.io/collector/pdata v1.16.0 h1:g02K8jlRnmQ7TQDuXpdgVL6vIxIVqr5Gbb1qIR27rto=
|
||||
go.opentelemetry.io/collector/pdata v1.16.0/go.mod h1:YZZJIt2ehxosYf/Y1pbvexjNWsIGNNrzzlCTO9jC1F4=
|
||||
go.opentelemetry.io/collector/semconv v0.110.0 h1:KHQnOHe3gUz0zsxe8ph9kN5OTypCFD4V+06AiBTfeNk=
|
||||
go.opentelemetry.io/collector/semconv v0.110.0/go.mod h1:zCJ5njhWpejR+A40kiEoeFm1xq1uzyZwMnRNX6/D82A=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.55.0 h1:ZIg3ZT/aQ7AfKqdwp7ECpOK6vHqquXXuyTjIO8ZdmPs=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.55.0/go.mod h1:DQAwmETtZV00skUwgD6+0U89g80NKsJE3DCKeLLPQMI=
|
||||
go.opentelemetry.io/otel v1.30.0 h1:F2t8sK4qf1fAmY9ua4ohFS/K+FUuOPemHUIXHtktrts=
|
||||
go.opentelemetry.io/otel v1.30.0/go.mod h1:tFw4Br9b7fOS+uEao81PJjVMjW/5fvNCbpsDIXqP0pc=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.30.0 h1:lsInsfvhVIfOI6qHVyysXMNDnjO9Npvl7tlDPJFBVd4=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.30.0/go.mod h1:KQsVNh4OjgjTG0G6EiNi1jVpnaeeKsKMRwbLN+f1+8M=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.30.0 h1:m0yTiGDLUvVYaTFbAvCkVYIYcvwKt3G7OLoN77NUs/8=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.30.0/go.mod h1:wBQbT4UekBfegL2nx0Xk1vBcnzyBPsIVm9hRG4fYcr4=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.30.0 h1:umZgi92IyxfXd/l4kaDhnKgY8rnN/cZcF1LKc6I8OQ8=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.30.0/go.mod h1:4lVs6obhSVRb1EW5FhOuBTyiQhtRtAnnva9vD3yRfq8=
|
||||
go.opentelemetry.io/otel/metric v1.30.0 h1:4xNulvn9gjzo4hjg+wzIKG7iNFEaBMX00Qd4QIZs7+w=
|
||||
go.opentelemetry.io/otel/metric v1.30.0/go.mod h1:aXTfST94tswhWEb+5QjlSqG+cZlmyXy/u8jFpor3WqQ=
|
||||
go.opentelemetry.io/otel/sdk v1.30.0 h1:cHdik6irO49R5IysVhdn8oaiR9m8XluDaJAs4DfOrYE=
|
||||
go.opentelemetry.io/otel/sdk v1.30.0/go.mod h1:p14X4Ok8S+sygzblytT1nqG98QG2KYKv++HE0LY/mhg=
|
||||
go.opentelemetry.io/otel/trace v1.30.0 h1:7UBkkYzeg3C7kQX8VAidWh2biiQbtAKjyIML8dQ9wmc=
|
||||
go.opentelemetry.io/otel/trace v1.30.0/go.mod h1:5EyKqTzzmyqB9bwtCCq6pDLktPK6fmGf/Dph+8VI02o=
|
||||
go.opentelemetry.io/collector/pdata v1.18.0 h1:/yg2rO2dxqDM2p6GutsMCxXN6sKlXwyIz/ZYyUPONBg=
|
||||
go.opentelemetry.io/collector/pdata v1.18.0/go.mod h1:Ox1YVLe87cZDB/TL30i4SUz1cA5s6AM6SpFMfY61ICs=
|
||||
go.opentelemetry.io/collector/semconv v0.112.0 h1:JPQyvZhlNLVSuVI+FScONaiFygB7h7NTZceUEKIQUEc=
|
||||
go.opentelemetry.io/collector/semconv v0.112.0/go.mod h1:zCJ5njhWpejR+A40kiEoeFm1xq1uzyZwMnRNX6/D82A=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.56.0 h1:4BZHA+B1wXEQoGNHxW8mURaLhcdGwvRnmhGbm+odRbc=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.56.0/go.mod h1:3qi2EEwMgB4xnKgPLqsDP3j9qxnHDZeHsnAxfjQqTko=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.56.0 h1:UP6IpuHFkUgOQL9FFQFrZ+5LiwhhYRbi7VZSIx6Nj5s=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.56.0/go.mod h1:qxuZLtbq5QDtdeSHsS7bcf6EH6uO6jUAgk764zd3rhM=
|
||||
go.opentelemetry.io/otel v1.31.0 h1:NsJcKPIW0D0H3NgzPDHmo0WW6SptzPdqg/L1zsIm2hY=
|
||||
go.opentelemetry.io/otel v1.31.0/go.mod h1:O0C14Yl9FgkjqcCZAsE053C13OaddMYr/hz6clDkEJE=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.31.0 h1:K0XaT3DwHAcV4nKLzcQvwAgSyisUghWoY20I7huthMk=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.31.0/go.mod h1:B5Ki776z/MBnVha1Nzwp5arlzBbE3+1jk+pGmaP5HME=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.31.0 h1:FFeLy03iVTXP6ffeN2iXrxfGsZGCjVx0/4KlizjyBwU=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.31.0/go.mod h1:TMu73/k1CP8nBUpDLc71Wj/Kf7ZS9FK5b53VapRsP9o=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.31.0 h1:lUsI2TYsQw2r1IASwoROaCnjdj2cvC2+Jbxvk6nHnWU=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.31.0/go.mod h1:2HpZxxQurfGxJlJDblybejHB6RX6pmExPNe517hREw4=
|
||||
go.opentelemetry.io/otel/metric v1.31.0 h1:FSErL0ATQAmYHUIzSezZibnyVlft1ybhy4ozRPcF2fE=
|
||||
go.opentelemetry.io/otel/metric v1.31.0/go.mod h1:C3dEloVbLuYoX41KpmAhOqNriGbA+qqH6PQ5E5mUfnY=
|
||||
go.opentelemetry.io/otel/sdk v1.31.0 h1:xLY3abVHYZ5HSfOg3l2E5LUj2Cwva5Y7yGxnSW9H5Gk=
|
||||
go.opentelemetry.io/otel/sdk v1.31.0/go.mod h1:TfRbMdhvxIIr/B2N2LQW2S5v9m3gOQ/08KsbbO5BPT0=
|
||||
go.opentelemetry.io/otel/trace v1.31.0 h1:ffjsj1aRouKewfr85U2aGagJ46+MvodynlQ1HYdmJys=
|
||||
go.opentelemetry.io/otel/trace v1.31.0/go.mod h1:TXZkRk7SM2ZQLtR6eoAWQFIHPvzQ06FJAsO1tJg480A=
|
||||
go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0=
|
||||
go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8=
|
||||
go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE=
|
||||
|
@ -816,8 +826,8 @@ golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxb
|
|||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
|
||||
golang.org/x/time v0.6.0 h1:eTDhh4ZXt5Qf0augr54TN6suAUudPcawVZeIAPU7D4U=
|
||||
golang.org/x/time v0.6.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
|
||||
golang.org/x/time v0.7.0 h1:ntUhktv3OPE6TgYxXWv9vKvUSJyIFJlyohwbkEwPrKQ=
|
||||
golang.org/x/time v0.7.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
|
||||
|
@ -882,8 +892,8 @@ google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/
|
|||
google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
|
||||
google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
|
||||
google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
|
||||
google.golang.org/api v0.199.0 h1:aWUXClp+VFJmqE0JPvpZOK3LDQMyFKYIow4etYd9qxs=
|
||||
google.golang.org/api v0.199.0/go.mod h1:ohG4qSztDJmZdjK/Ar6MhbAmb/Rpi4JHOqagsh90K28=
|
||||
google.golang.org/api v0.204.0 h1:3PjmQQEDkR/ENVZZwIYB4W/KzYtN8OrqnNcHWpeR8E4=
|
||||
google.golang.org/api v0.204.0/go.mod h1:69y8QSoKIbL9F94bWgWAq6wGqGwyjBgi2y8rAK8zLag=
|
||||
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
|
@ -918,10 +928,10 @@ google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1m
|
|||
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
|
||||
google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA=
|
||||
google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20240903143218-8af14fe29dc1 h1:hjSy6tcFQZ171igDaN5QHOw2n6vx40juYbC/x67CEhc=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20240903143218-8af14fe29dc1/go.mod h1:qpvKtACPCQhAdu3PyQgV4l3LMXZEtft7y8QcarRsp9I=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 h1:pPJltXNxVzT4pK9yD8vR9X75DaWYYmLGMsEvBfFQZzQ=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20241015192408-796eee8c2d53 h1:fVoAXEKA4+yufmbdVYv+SE73+cPZbbbe8paLsHfkK+U=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20241015192408-796eee8c2d53/go.mod h1:riSXTwQ4+nqmPGtobMFyW5FqVAmIs0St6VPp4Ug7CE4=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20241021214115-324edc3d5d38 h1:zciRKQ4kBpFgpfC5QQCVtnnNAcLIqweL7plyZRQHVpI=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20241021214115-324edc3d5d38/go.mod h1:GX3210XPVPUjJbTUbvwI8f2IpZDMZuPJWDzDuebbviI=
|
||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
|
||||
google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
|
||||
|
@ -948,8 +958,8 @@ google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpAD
|
|||
google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4=
|
||||
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
|
||||
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
|
||||
google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg=
|
||||
google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw=
|
||||
google.golang.org/protobuf v1.35.1 h1:m3LfL6/Ca+fqnjnlqQXNpFPABW1UD7mjh8KO2mKFytA=
|
||||
google.golang.org/protobuf v1.35.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
|
||||
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
|
|
|
@ -14,6 +14,7 @@
|
|||
package histogram
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"math"
|
||||
"strings"
|
||||
|
@ -784,16 +785,16 @@ func (h *FloatHistogram) Validate() error {
|
|||
return fmt.Errorf("custom buckets: %w", err)
|
||||
}
|
||||
if h.ZeroCount != 0 {
|
||||
return fmt.Errorf("custom buckets: must have zero count of 0")
|
||||
return errors.New("custom buckets: must have zero count of 0")
|
||||
}
|
||||
if h.ZeroThreshold != 0 {
|
||||
return fmt.Errorf("custom buckets: must have zero threshold of 0")
|
||||
return errors.New("custom buckets: must have zero threshold of 0")
|
||||
}
|
||||
if len(h.NegativeSpans) > 0 {
|
||||
return fmt.Errorf("custom buckets: must not have negative spans")
|
||||
return errors.New("custom buckets: must not have negative spans")
|
||||
}
|
||||
if len(h.NegativeBuckets) > 0 {
|
||||
return fmt.Errorf("custom buckets: must not have negative buckets")
|
||||
return errors.New("custom buckets: must not have negative buckets")
|
||||
}
|
||||
} else {
|
||||
if err := checkHistogramSpans(h.PositiveSpans, len(h.PositiveBuckets)); err != nil {
|
||||
|
@ -807,7 +808,7 @@ func (h *FloatHistogram) Validate() error {
|
|||
return fmt.Errorf("negative side: %w", err)
|
||||
}
|
||||
if h.CustomValues != nil {
|
||||
return fmt.Errorf("histogram with exponential schema must not have custom bounds")
|
||||
return errors.New("histogram with exponential schema must not have custom bounds")
|
||||
}
|
||||
}
|
||||
err := checkHistogramBuckets(h.PositiveBuckets, &pCount, false)
|
||||
|
@ -948,10 +949,10 @@ func (h *FloatHistogram) floatBucketIterator(
|
|||
positive bool, absoluteStartValue float64, targetSchema int32,
|
||||
) floatBucketIterator {
|
||||
if h.UsesCustomBuckets() && targetSchema != h.Schema {
|
||||
panic(fmt.Errorf("cannot merge from custom buckets schema to exponential schema"))
|
||||
panic(errors.New("cannot merge from custom buckets schema to exponential schema"))
|
||||
}
|
||||
if !h.UsesCustomBuckets() && IsCustomBucketsSchema(targetSchema) {
|
||||
panic(fmt.Errorf("cannot merge from exponential buckets schema to custom schema"))
|
||||
panic(errors.New("cannot merge from exponential buckets schema to custom schema"))
|
||||
}
|
||||
if targetSchema > h.Schema {
|
||||
panic(fmt.Errorf("cannot merge from schema %d to %d", h.Schema, targetSchema))
|
||||
|
|
|
@ -14,6 +14,7 @@
|
|||
package histogram
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"math"
|
||||
"slices"
|
||||
|
@ -432,16 +433,16 @@ func (h *Histogram) Validate() error {
|
|||
return fmt.Errorf("custom buckets: %w", err)
|
||||
}
|
||||
if h.ZeroCount != 0 {
|
||||
return fmt.Errorf("custom buckets: must have zero count of 0")
|
||||
return errors.New("custom buckets: must have zero count of 0")
|
||||
}
|
||||
if h.ZeroThreshold != 0 {
|
||||
return fmt.Errorf("custom buckets: must have zero threshold of 0")
|
||||
return errors.New("custom buckets: must have zero threshold of 0")
|
||||
}
|
||||
if len(h.NegativeSpans) > 0 {
|
||||
return fmt.Errorf("custom buckets: must not have negative spans")
|
||||
return errors.New("custom buckets: must not have negative spans")
|
||||
}
|
||||
if len(h.NegativeBuckets) > 0 {
|
||||
return fmt.Errorf("custom buckets: must not have negative buckets")
|
||||
return errors.New("custom buckets: must not have negative buckets")
|
||||
}
|
||||
} else {
|
||||
if err := checkHistogramSpans(h.PositiveSpans, len(h.PositiveBuckets)); err != nil {
|
||||
|
@ -455,7 +456,7 @@ func (h *Histogram) Validate() error {
|
|||
return fmt.Errorf("negative side: %w", err)
|
||||
}
|
||||
if h.CustomValues != nil {
|
||||
return fmt.Errorf("histogram with exponential schema must not have custom bounds")
|
||||
return errors.New("histogram with exponential schema must not have custom bounds")
|
||||
}
|
||||
}
|
||||
err := checkHistogramBuckets(h.PositiveBuckets, &pCount, true)
|
||||
|
|
|
@ -16,6 +16,7 @@ package relabel
|
|||
import (
|
||||
"crypto/md5"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
@ -114,10 +115,10 @@ func (c *Config) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
|||
|
||||
func (c *Config) Validate() error {
|
||||
if c.Action == "" {
|
||||
return fmt.Errorf("relabel action cannot be empty")
|
||||
return errors.New("relabel action cannot be empty")
|
||||
}
|
||||
if c.Modulus == 0 && c.Action == HashMod {
|
||||
return fmt.Errorf("relabel configuration for hashmod requires non-zero modulus")
|
||||
return errors.New("relabel configuration for hashmod requires non-zero modulus")
|
||||
}
|
||||
if (c.Action == Replace || c.Action == HashMod || c.Action == Lowercase || c.Action == Uppercase || c.Action == KeepEqual || c.Action == DropEqual) && c.TargetLabel == "" {
|
||||
return fmt.Errorf("relabel configuration for %s action requires 'target_label' value", c.Action)
|
||||
|
|
|
@ -111,6 +111,20 @@ func (g *RuleGroups) Validate(node ruleGroups) (errs []error) {
|
|||
)
|
||||
}
|
||||
|
||||
for k, v := range g.Labels {
|
||||
if !model.LabelName(k).IsValid() || k == model.MetricNameLabel {
|
||||
errs = append(
|
||||
errs, fmt.Errorf("invalid label name: %s", k),
|
||||
)
|
||||
}
|
||||
|
||||
if !model.LabelValue(v).IsValid() {
|
||||
errs = append(
|
||||
errs, fmt.Errorf("invalid label value: %s", v),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
set[g.Name] = struct{}{}
|
||||
|
||||
for i, r := range g.Rules {
|
||||
|
@ -141,6 +155,7 @@ type RuleGroup struct {
|
|||
QueryOffset *model.Duration `yaml:"query_offset,omitempty"`
|
||||
Limit int `yaml:"limit,omitempty"`
|
||||
Rules []RuleNode `yaml:"rules"`
|
||||
Labels map[string]string `yaml:"labels,omitempty"`
|
||||
}
|
||||
|
||||
// Rule describes an alerting or recording rule.
|
||||
|
@ -169,14 +184,14 @@ type RuleNode struct {
|
|||
func (r *RuleNode) Validate() (nodes []WrappedError) {
|
||||
if r.Record.Value != "" && r.Alert.Value != "" {
|
||||
nodes = append(nodes, WrappedError{
|
||||
err: fmt.Errorf("only one of 'record' and 'alert' must be set"),
|
||||
err: errors.New("only one of 'record' and 'alert' must be set"),
|
||||
node: &r.Record,
|
||||
nodeAlt: &r.Alert,
|
||||
})
|
||||
}
|
||||
if r.Record.Value == "" && r.Alert.Value == "" {
|
||||
nodes = append(nodes, WrappedError{
|
||||
err: fmt.Errorf("one of 'record' or 'alert' must be set"),
|
||||
err: errors.New("one of 'record' or 'alert' must be set"),
|
||||
node: &r.Record,
|
||||
nodeAlt: &r.Alert,
|
||||
})
|
||||
|
@ -184,7 +199,7 @@ func (r *RuleNode) Validate() (nodes []WrappedError) {
|
|||
|
||||
if r.Expr.Value == "" {
|
||||
nodes = append(nodes, WrappedError{
|
||||
err: fmt.Errorf("field 'expr' must be set in rule"),
|
||||
err: errors.New("field 'expr' must be set in rule"),
|
||||
node: &r.Expr,
|
||||
})
|
||||
} else if _, err := parser.ParseExpr(r.Expr.Value); err != nil {
|
||||
|
@ -196,19 +211,19 @@ func (r *RuleNode) Validate() (nodes []WrappedError) {
|
|||
if r.Record.Value != "" {
|
||||
if len(r.Annotations) > 0 {
|
||||
nodes = append(nodes, WrappedError{
|
||||
err: fmt.Errorf("invalid field 'annotations' in recording rule"),
|
||||
err: errors.New("invalid field 'annotations' in recording rule"),
|
||||
node: &r.Record,
|
||||
})
|
||||
}
|
||||
if r.For != 0 {
|
||||
nodes = append(nodes, WrappedError{
|
||||
err: fmt.Errorf("invalid field 'for' in recording rule"),
|
||||
err: errors.New("invalid field 'for' in recording rule"),
|
||||
node: &r.Record,
|
||||
})
|
||||
}
|
||||
if r.KeepFiringFor != 0 {
|
||||
nodes = append(nodes, WrappedError{
|
||||
err: fmt.Errorf("invalid field 'keep_firing_for' in recording rule"),
|
||||
err: errors.New("invalid field 'keep_firing_for' in recording rule"),
|
||||
node: &r.Record,
|
||||
})
|
||||
}
|
||||
|
|
|
@ -107,6 +107,23 @@ groups:
|
|||
severity: "page"
|
||||
annotations:
|
||||
summary: "Instance {{ $labels.instance }} down"
|
||||
`,
|
||||
shouldPass: true,
|
||||
},
|
||||
{
|
||||
ruleString: `
|
||||
groups:
|
||||
- name: example
|
||||
labels:
|
||||
team: myteam
|
||||
rules:
|
||||
- alert: InstanceDown
|
||||
expr: up == 0
|
||||
for: 5m
|
||||
labels:
|
||||
severity: "page"
|
||||
annotations:
|
||||
summary: "Instance {{ $labels.instance }} down"
|
||||
`,
|
||||
shouldPass: true,
|
||||
},
|
||||
|
|
|
@ -40,6 +40,10 @@ var newTestParserFns = map[string]newParser{
|
|||
"omtext": func(b []byte, st *labels.SymbolTable) Parser {
|
||||
return NewOpenMetricsParser(b, st, WithOMParserCTSeriesSkipped())
|
||||
},
|
||||
"omtext_with_nhcb": func(b []byte, st *labels.SymbolTable) Parser {
|
||||
p := NewOpenMetricsParser(b, st, WithOMParserCTSeriesSkipped())
|
||||
return NewNHCBParser(p, st, false)
|
||||
},
|
||||
}
|
||||
|
||||
// BenchmarkParse benchmarks parsing, mimicking how scrape/scrape.go#append use it.
|
||||
|
@ -78,6 +82,10 @@ func BenchmarkParse(b *testing.B) {
|
|||
// We don't pass compareToExpfmtFormat: expfmt.TypeOpenMetrics as expfmt does not support OM exemplars, see https://github.com/prometheus/common/issues/703.
|
||||
{dataFile: "omtestdata.txt", parser: "omtext"},
|
||||
{dataFile: "promtestdata.txt", parser: "omtext"}, // Compare how omtext parser deals with Prometheus text format vs promtext.
|
||||
|
||||
// NHCB.
|
||||
{dataFile: "omhistogramdata.txt", parser: "omtext"}, // Measure OM parser baseline for histograms.
|
||||
{dataFile: "omhistogramdata.txt", parser: "omtext_with_nhcb"}, // Measure NHCB over OM parser.
|
||||
} {
|
||||
var buf []byte
|
||||
dataCase := bcase.dataFile
|
||||
|
|
|
@ -14,6 +14,8 @@
|
|||
package textparse
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"mime"
|
||||
|
||||
"github.com/prometheus/common/model"
|
||||
|
@ -23,8 +25,7 @@ import (
|
|||
"github.com/prometheus/prometheus/model/labels"
|
||||
)
|
||||
|
||||
// Parser parses samples from a byte slice of samples in the official
|
||||
// Prometheus and OpenMetrics text exposition formats.
|
||||
// Parser parses samples from a byte slice of samples in different exposition formats.
|
||||
type Parser interface {
|
||||
// Series returns the bytes of a series with a simple float64 as a
|
||||
// value, the timestamp if set, and the value of the current sample.
|
||||
|
@ -58,6 +59,8 @@ type Parser interface {
|
|||
|
||||
// Metric writes the labels of the current sample into the passed labels.
|
||||
// It returns the string from which the metric was parsed.
|
||||
// The values of the "le" labels of classic histograms and "quantile" labels
|
||||
// of summaries should follow the OpenMetrics formatting rules.
|
||||
Metric(l *labels.Labels) string
|
||||
|
||||
// Exemplar writes the exemplar of the current sample into the passed
|
||||
|
@ -78,28 +81,65 @@ type Parser interface {
|
|||
Next() (Entry, error)
|
||||
}
|
||||
|
||||
// New returns a new parser of the byte slice.
|
||||
//
|
||||
// This function always returns a valid parser, but might additionally
|
||||
// return an error if the content type cannot be parsed.
|
||||
func New(b []byte, contentType string, parseClassicHistograms, skipOMCTSeries bool, st *labels.SymbolTable) (Parser, error) {
|
||||
// extractMediaType returns the mediaType of a required parser. It tries first to
|
||||
// extract a valid and supported mediaType from contentType. If that fails,
|
||||
// the provided fallbackType (possibly an empty string) is returned, together with
|
||||
// an error. fallbackType is used as-is without further validation.
|
||||
func extractMediaType(contentType, fallbackType string) (string, error) {
|
||||
if contentType == "" {
|
||||
return NewPromParser(b, st), nil
|
||||
if fallbackType == "" {
|
||||
return "", errors.New("non-compliant scrape target sending blank Content-Type and no fallback_scrape_protocol specified for target")
|
||||
}
|
||||
return fallbackType, fmt.Errorf("non-compliant scrape target sending blank Content-Type, using fallback_scrape_protocol %q", fallbackType)
|
||||
}
|
||||
|
||||
// We have a contentType, parse it.
|
||||
mediaType, _, err := mime.ParseMediaType(contentType)
|
||||
if err != nil {
|
||||
return NewPromParser(b, st), err
|
||||
if fallbackType == "" {
|
||||
retErr := fmt.Errorf("cannot parse Content-Type %q and no fallback_scrape_protocol for target", contentType)
|
||||
return "", errors.Join(retErr, err)
|
||||
}
|
||||
retErr := fmt.Errorf("could not parse received Content-Type %q, using fallback_scrape_protocol %q", contentType, fallbackType)
|
||||
return fallbackType, errors.Join(retErr, err)
|
||||
}
|
||||
|
||||
// We have a valid media type, either we recognise it and can use it
|
||||
// or we have to error.
|
||||
switch mediaType {
|
||||
case "application/openmetrics-text", "application/vnd.google.protobuf", "text/plain":
|
||||
return mediaType, nil
|
||||
}
|
||||
// We're here because we have no recognised mediaType.
|
||||
if fallbackType == "" {
|
||||
return "", fmt.Errorf("received unsupported Content-Type %q and no fallback_scrape_protocol specified for target", contentType)
|
||||
}
|
||||
return fallbackType, fmt.Errorf("received unsupported Content-Type %q, using fallback_scrape_protocol %q", contentType, fallbackType)
|
||||
}
|
||||
|
||||
// New returns a new parser of the byte slice.
|
||||
//
|
||||
// This function no longer guarantees to return a valid parser.
|
||||
//
|
||||
// It only returns a valid parser if the supplied contentType and fallbackType allow.
|
||||
// An error may also be returned if fallbackType had to be used or there was some
|
||||
// other error parsing the supplied Content-Type.
|
||||
// If the returned parser is nil then the scrape must fail.
|
||||
func New(b []byte, contentType, fallbackType string, parseClassicHistograms, skipOMCTSeries bool, st *labels.SymbolTable) (Parser, error) {
|
||||
mediaType, err := extractMediaType(contentType, fallbackType)
|
||||
// err may be nil or something we want to warn about.
|
||||
|
||||
switch mediaType {
|
||||
case "application/openmetrics-text":
|
||||
return NewOpenMetricsParser(b, st, func(o *openMetricsParserOptions) {
|
||||
o.SkipCTSeries = skipOMCTSeries
|
||||
}), nil
|
||||
}), err
|
||||
case "application/vnd.google.protobuf":
|
||||
return NewProtobufParser(b, parseClassicHistograms, st), nil
|
||||
return NewProtobufParser(b, parseClassicHistograms, st), err
|
||||
case "text/plain":
|
||||
return NewPromParser(b, st), err
|
||||
default:
|
||||
return NewPromParser(b, st), nil
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -22,6 +22,7 @@ import (
|
|||
"github.com/prometheus/common/model"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/prometheus/prometheus/config"
|
||||
"github.com/prometheus/prometheus/model/exemplar"
|
||||
"github.com/prometheus/prometheus/model/histogram"
|
||||
"github.com/prometheus/prometheus/model/labels"
|
||||
|
@ -31,6 +32,10 @@ import (
|
|||
func TestNewParser(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
requireNilParser := func(t *testing.T, p Parser) {
|
||||
require.Nil(t, p)
|
||||
}
|
||||
|
||||
requirePromParser := func(t *testing.T, p Parser) {
|
||||
require.NotNil(t, p)
|
||||
_, ok := p.(*PromParser)
|
||||
|
@ -43,32 +48,81 @@ func TestNewParser(t *testing.T) {
|
|||
require.True(t, ok)
|
||||
}
|
||||
|
||||
requireProtobufParser := func(t *testing.T, p Parser) {
|
||||
require.NotNil(t, p)
|
||||
_, ok := p.(*ProtobufParser)
|
||||
require.True(t, ok)
|
||||
}
|
||||
|
||||
for name, tt := range map[string]*struct {
|
||||
contentType string
|
||||
fallbackScrapeProtocol config.ScrapeProtocol
|
||||
validateParser func(*testing.T, Parser)
|
||||
err string
|
||||
}{
|
||||
"empty-string": {
|
||||
validateParser: requireNilParser,
|
||||
err: "non-compliant scrape target sending blank Content-Type and no fallback_scrape_protocol specified for target",
|
||||
},
|
||||
"empty-string-fallback-text-plain": {
|
||||
validateParser: requirePromParser,
|
||||
fallbackScrapeProtocol: config.PrometheusText0_0_4,
|
||||
err: "non-compliant scrape target sending blank Content-Type, using fallback_scrape_protocol \"text/plain\"",
|
||||
},
|
||||
"invalid-content-type-1": {
|
||||
contentType: "invalid/",
|
||||
validateParser: requireNilParser,
|
||||
err: "expected token after slash",
|
||||
},
|
||||
"invalid-content-type-1-fallback-text-plain": {
|
||||
contentType: "invalid/",
|
||||
validateParser: requirePromParser,
|
||||
fallbackScrapeProtocol: config.PrometheusText0_0_4,
|
||||
err: "expected token after slash",
|
||||
},
|
||||
"invalid-content-type-1-fallback-openmetrics": {
|
||||
contentType: "invalid/",
|
||||
validateParser: requireOpenMetricsParser,
|
||||
fallbackScrapeProtocol: config.OpenMetricsText0_0_1,
|
||||
err: "expected token after slash",
|
||||
},
|
||||
"invalid-content-type-1-fallback-protobuf": {
|
||||
contentType: "invalid/",
|
||||
validateParser: requireProtobufParser,
|
||||
fallbackScrapeProtocol: config.PrometheusProto,
|
||||
err: "expected token after slash",
|
||||
},
|
||||
"invalid-content-type-2": {
|
||||
contentType: "invalid/invalid/invalid",
|
||||
validateParser: requireNilParser,
|
||||
err: "unexpected content after media subtype",
|
||||
},
|
||||
"invalid-content-type-2-fallback-text-plain": {
|
||||
contentType: "invalid/invalid/invalid",
|
||||
validateParser: requirePromParser,
|
||||
fallbackScrapeProtocol: config.PrometheusText1_0_0,
|
||||
err: "unexpected content after media subtype",
|
||||
},
|
||||
"invalid-content-type-3": {
|
||||
contentType: "/",
|
||||
validateParser: requireNilParser,
|
||||
err: "no media type",
|
||||
},
|
||||
"invalid-content-type-3-fallback-text-plain": {
|
||||
contentType: "/",
|
||||
validateParser: requirePromParser,
|
||||
fallbackScrapeProtocol: config.PrometheusText1_0_0,
|
||||
err: "no media type",
|
||||
},
|
||||
"invalid-content-type-4": {
|
||||
contentType: "application/openmetrics-text; charset=UTF-8; charset=utf-8",
|
||||
validateParser: requirePromParser,
|
||||
validateParser: requireNilParser,
|
||||
err: "duplicate parameter name",
|
||||
},
|
||||
"invalid-content-type-4-fallback-open-metrics": {
|
||||
contentType: "application/openmetrics-text; charset=UTF-8; charset=utf-8",
|
||||
validateParser: requireOpenMetricsParser,
|
||||
fallbackScrapeProtocol: config.OpenMetricsText1_0_0,
|
||||
err: "duplicate parameter name",
|
||||
},
|
||||
"openmetrics": {
|
||||
|
@ -87,20 +141,33 @@ func TestNewParser(t *testing.T) {
|
|||
contentType: "text/plain",
|
||||
validateParser: requirePromParser,
|
||||
},
|
||||
"protobuf": {
|
||||
contentType: "application/vnd.google.protobuf",
|
||||
validateParser: requireProtobufParser,
|
||||
},
|
||||
"plain-text-with-version": {
|
||||
contentType: "text/plain; version=0.0.4",
|
||||
validateParser: requirePromParser,
|
||||
},
|
||||
"some-other-valid-content-type": {
|
||||
contentType: "text/html",
|
||||
validateParser: requireNilParser,
|
||||
err: "received unsupported Content-Type \"text/html\" and no fallback_scrape_protocol specified for target",
|
||||
},
|
||||
"some-other-valid-content-type-fallback-text-plain": {
|
||||
contentType: "text/html",
|
||||
validateParser: requirePromParser,
|
||||
fallbackScrapeProtocol: config.PrometheusText0_0_4,
|
||||
err: "received unsupported Content-Type \"text/html\", using fallback_scrape_protocol \"text/plain\"",
|
||||
},
|
||||
} {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
tt := tt // Copy to local variable before going parallel.
|
||||
t.Parallel()
|
||||
|
||||
p, err := New([]byte{}, tt.contentType, false, false, labels.NewSymbolTable())
|
||||
fallbackProtoMediaType := tt.fallbackScrapeProtocol.HeaderMediaType()
|
||||
|
||||
p, err := New([]byte{}, tt.contentType, fallbackProtoMediaType, false, false, labels.NewSymbolTable())
|
||||
tt.validateParser(t, p)
|
||||
if tt.err == "" {
|
||||
require.NoError(t, err)
|
||||
|
@ -172,13 +239,13 @@ func testParse(t *testing.T, p Parser) (ret []parsedEntry) {
|
|||
}
|
||||
|
||||
p.Metric(&got.lset)
|
||||
for e := (exemplar.Exemplar{}); p.Exemplar(&e); {
|
||||
got.es = append(got.es, e)
|
||||
}
|
||||
// Parser reuses int pointer.
|
||||
if ct := p.CreatedTimestamp(); ct != nil {
|
||||
got.ct = int64p(*ct)
|
||||
}
|
||||
for e := (exemplar.Exemplar{}); p.Exemplar(&e); {
|
||||
got.es = append(got.es, e)
|
||||
}
|
||||
case EntryType:
|
||||
m, got.typ = p.Type()
|
||||
got.m = string(m)
|
||||
|
|
374
model/textparse/nhcbparse.go
Normal file
374
model/textparse/nhcbparse.go
Normal file
|
@ -0,0 +1,374 @@
|
|||
// Copyright 2024 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package textparse
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"io"
|
||||
"math"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/prometheus/common/model"
|
||||
|
||||
"github.com/prometheus/prometheus/model/exemplar"
|
||||
"github.com/prometheus/prometheus/model/histogram"
|
||||
"github.com/prometheus/prometheus/model/labels"
|
||||
"github.com/prometheus/prometheus/util/convertnhcb"
|
||||
)
|
||||
|
||||
type collectionState int
|
||||
|
||||
const (
|
||||
stateStart collectionState = iota
|
||||
stateCollecting
|
||||
stateEmitting
|
||||
)
|
||||
|
||||
// The NHCBParser wraps a Parser and converts classic histograms to native
|
||||
// histograms with custom buckets.
|
||||
//
|
||||
// Since Parser interface is line based, this parser needs to keep track
|
||||
// of the last classic histogram series it saw to collate them into a
|
||||
// single native histogram.
|
||||
//
|
||||
// Note:
|
||||
// - Only series that have the histogram metadata type are considered for
|
||||
// conversion.
|
||||
// - The classic series are also returned if keepClassicHistograms is true.
|
||||
type NHCBParser struct {
|
||||
// The parser we're wrapping.
|
||||
parser Parser
|
||||
// Option to keep classic histograms along with converted histograms.
|
||||
keepClassicHistograms bool
|
||||
|
||||
// Labels builder.
|
||||
builder labels.ScratchBuilder
|
||||
|
||||
// State of the parser.
|
||||
state collectionState
|
||||
|
||||
// Caches the values from the underlying parser.
|
||||
// For Series and Histogram.
|
||||
bytes []byte
|
||||
ts *int64
|
||||
value float64
|
||||
h *histogram.Histogram
|
||||
fh *histogram.FloatHistogram
|
||||
// For Metric.
|
||||
lset labels.Labels
|
||||
metricString string
|
||||
// For Type.
|
||||
bName []byte
|
||||
typ model.MetricType
|
||||
|
||||
// Caches the entry itself if we are inserting a converted NHCB
|
||||
// halfway through.
|
||||
entry Entry
|
||||
err error
|
||||
|
||||
// Caches the values and metric for the inserted converted NHCB.
|
||||
bytesNHCB []byte
|
||||
hNHCB *histogram.Histogram
|
||||
fhNHCB *histogram.FloatHistogram
|
||||
lsetNHCB labels.Labels
|
||||
exemplars []exemplar.Exemplar
|
||||
ctNHCB *int64
|
||||
metricStringNHCB string
|
||||
|
||||
// Collates values from the classic histogram series to build
|
||||
// the converted histogram later.
|
||||
tempLsetNHCB labels.Labels
|
||||
tempNHCB convertnhcb.TempHistogram
|
||||
tempExemplars []exemplar.Exemplar
|
||||
tempExemplarCount int
|
||||
tempCT *int64
|
||||
|
||||
// Remembers the last base histogram metric name (assuming it's
|
||||
// a classic histogram) so we can tell if the next float series
|
||||
// is part of the same classic histogram.
|
||||
lastHistogramName string
|
||||
lastHistogramLabelsHash uint64
|
||||
lastHistogramExponential bool
|
||||
// Reused buffer for hashing labels.
|
||||
hBuffer []byte
|
||||
}
|
||||
|
||||
func NewNHCBParser(p Parser, st *labels.SymbolTable, keepClassicHistograms bool) Parser {
|
||||
return &NHCBParser{
|
||||
parser: p,
|
||||
keepClassicHistograms: keepClassicHistograms,
|
||||
builder: labels.NewScratchBuilderWithSymbolTable(st, 16),
|
||||
tempNHCB: convertnhcb.NewTempHistogram(),
|
||||
}
|
||||
}
|
||||
|
||||
func (p *NHCBParser) Series() ([]byte, *int64, float64) {
|
||||
return p.bytes, p.ts, p.value
|
||||
}
|
||||
|
||||
func (p *NHCBParser) Histogram() ([]byte, *int64, *histogram.Histogram, *histogram.FloatHistogram) {
|
||||
if p.state == stateEmitting {
|
||||
return p.bytesNHCB, p.ts, p.hNHCB, p.fhNHCB
|
||||
}
|
||||
return p.bytes, p.ts, p.h, p.fh
|
||||
}
|
||||
|
||||
func (p *NHCBParser) Help() ([]byte, []byte) {
|
||||
return p.parser.Help()
|
||||
}
|
||||
|
||||
func (p *NHCBParser) Type() ([]byte, model.MetricType) {
|
||||
return p.bName, p.typ
|
||||
}
|
||||
|
||||
func (p *NHCBParser) Unit() ([]byte, []byte) {
|
||||
return p.parser.Unit()
|
||||
}
|
||||
|
||||
func (p *NHCBParser) Comment() []byte {
|
||||
return p.parser.Comment()
|
||||
}
|
||||
|
||||
func (p *NHCBParser) Metric(l *labels.Labels) string {
|
||||
if p.state == stateEmitting {
|
||||
*l = p.lsetNHCB
|
||||
return p.metricStringNHCB
|
||||
}
|
||||
*l = p.lset
|
||||
return p.metricString
|
||||
}
|
||||
|
||||
func (p *NHCBParser) Exemplar(ex *exemplar.Exemplar) bool {
|
||||
if p.state == stateEmitting {
|
||||
if len(p.exemplars) == 0 {
|
||||
return false
|
||||
}
|
||||
*ex = p.exemplars[0]
|
||||
p.exemplars = p.exemplars[1:]
|
||||
return true
|
||||
}
|
||||
return p.parser.Exemplar(ex)
|
||||
}
|
||||
|
||||
func (p *NHCBParser) CreatedTimestamp() *int64 {
|
||||
switch p.state {
|
||||
case stateStart:
|
||||
if p.entry == EntrySeries || p.entry == EntryHistogram {
|
||||
return p.parser.CreatedTimestamp()
|
||||
}
|
||||
case stateCollecting:
|
||||
return p.tempCT
|
||||
case stateEmitting:
|
||||
return p.ctNHCB
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *NHCBParser) Next() (Entry, error) {
|
||||
if p.state == stateEmitting {
|
||||
p.state = stateStart
|
||||
if p.entry == EntrySeries {
|
||||
isNHCB := p.handleClassicHistogramSeries(p.lset)
|
||||
if isNHCB && !p.keepClassicHistograms {
|
||||
// Do not return the classic histogram series if it was converted to NHCB and we are not keeping classic histograms.
|
||||
return p.Next()
|
||||
}
|
||||
}
|
||||
return p.entry, p.err
|
||||
}
|
||||
|
||||
p.entry, p.err = p.parser.Next()
|
||||
if p.err != nil {
|
||||
if errors.Is(p.err, io.EOF) && p.processNHCB() {
|
||||
return EntryHistogram, nil
|
||||
}
|
||||
return EntryInvalid, p.err
|
||||
}
|
||||
switch p.entry {
|
||||
case EntrySeries:
|
||||
p.bytes, p.ts, p.value = p.parser.Series()
|
||||
p.metricString = p.parser.Metric(&p.lset)
|
||||
// Check the label set to see if we can continue or need to emit the NHCB.
|
||||
var isNHCB bool
|
||||
if p.compareLabels() {
|
||||
// Labels differ. Check if we can emit the NHCB.
|
||||
if p.processNHCB() {
|
||||
return EntryHistogram, nil
|
||||
}
|
||||
isNHCB = p.handleClassicHistogramSeries(p.lset)
|
||||
} else {
|
||||
// Labels are the same. Check if after an exponential histogram.
|
||||
if p.lastHistogramExponential {
|
||||
isNHCB = false
|
||||
} else {
|
||||
isNHCB = p.handleClassicHistogramSeries(p.lset)
|
||||
}
|
||||
}
|
||||
if isNHCB && !p.keepClassicHistograms {
|
||||
// Do not return the classic histogram series if it was converted to NHCB and we are not keeping classic histograms.
|
||||
return p.Next()
|
||||
}
|
||||
return p.entry, p.err
|
||||
case EntryHistogram:
|
||||
p.bytes, p.ts, p.h, p.fh = p.parser.Histogram()
|
||||
p.metricString = p.parser.Metric(&p.lset)
|
||||
p.storeExponentialLabels()
|
||||
case EntryType:
|
||||
p.bName, p.typ = p.parser.Type()
|
||||
}
|
||||
if p.processNHCB() {
|
||||
return EntryHistogram, nil
|
||||
}
|
||||
return p.entry, p.err
|
||||
}
|
||||
|
||||
// Return true if labels have changed and we should emit the NHCB.
|
||||
func (p *NHCBParser) compareLabels() bool {
|
||||
if p.state != stateCollecting {
|
||||
return false
|
||||
}
|
||||
if p.typ != model.MetricTypeHistogram {
|
||||
// Different metric type.
|
||||
return true
|
||||
}
|
||||
if p.lastHistogramName != convertnhcb.GetHistogramMetricBaseName(p.lset.Get(labels.MetricName)) {
|
||||
// Different metric name.
|
||||
return true
|
||||
}
|
||||
nextHash, _ := p.lset.HashWithoutLabels(p.hBuffer, labels.BucketLabel)
|
||||
// Different label values.
|
||||
return p.lastHistogramLabelsHash != nextHash
|
||||
}
|
||||
|
||||
// Save the label set of the classic histogram without suffix and bucket `le` label.
|
||||
func (p *NHCBParser) storeClassicLabels() {
|
||||
p.lastHistogramName = convertnhcb.GetHistogramMetricBaseName(p.lset.Get(labels.MetricName))
|
||||
p.lastHistogramLabelsHash, _ = p.lset.HashWithoutLabels(p.hBuffer, labels.BucketLabel)
|
||||
p.lastHistogramExponential = false
|
||||
}
|
||||
|
||||
func (p *NHCBParser) storeExponentialLabels() {
|
||||
p.lastHistogramName = p.lset.Get(labels.MetricName)
|
||||
p.lastHistogramLabelsHash, _ = p.lset.HashWithoutLabels(p.hBuffer)
|
||||
p.lastHistogramExponential = true
|
||||
}
|
||||
|
||||
// handleClassicHistogramSeries collates the classic histogram series to be converted to NHCB
|
||||
// if it is actually a classic histogram series (and not a normal float series) and if there
|
||||
// isn't already a native histogram with the same name (assuming it is always processed
|
||||
// right before the classic histograms) and returns true if the collation was done.
|
||||
func (p *NHCBParser) handleClassicHistogramSeries(lset labels.Labels) bool {
|
||||
if p.typ != model.MetricTypeHistogram {
|
||||
return false
|
||||
}
|
||||
mName := lset.Get(labels.MetricName)
|
||||
// Sanity check to ensure that the TYPE metadata entry name is the same as the base name.
|
||||
if convertnhcb.GetHistogramMetricBaseName(mName) != string(p.bName) {
|
||||
return false
|
||||
}
|
||||
switch {
|
||||
case strings.HasSuffix(mName, "_bucket") && lset.Has(labels.BucketLabel):
|
||||
le, err := strconv.ParseFloat(lset.Get(labels.BucketLabel), 64)
|
||||
if err == nil && !math.IsNaN(le) {
|
||||
p.processClassicHistogramSeries(lset, "_bucket", func(hist *convertnhcb.TempHistogram) {
|
||||
_ = hist.SetBucketCount(le, p.value)
|
||||
})
|
||||
return true
|
||||
}
|
||||
case strings.HasSuffix(mName, "_count"):
|
||||
p.processClassicHistogramSeries(lset, "_count", func(hist *convertnhcb.TempHistogram) {
|
||||
_ = hist.SetCount(p.value)
|
||||
})
|
||||
return true
|
||||
case strings.HasSuffix(mName, "_sum"):
|
||||
p.processClassicHistogramSeries(lset, "_sum", func(hist *convertnhcb.TempHistogram) {
|
||||
_ = hist.SetSum(p.value)
|
||||
})
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (p *NHCBParser) processClassicHistogramSeries(lset labels.Labels, suffix string, updateHist func(*convertnhcb.TempHistogram)) {
|
||||
if p.state != stateCollecting {
|
||||
p.storeClassicLabels()
|
||||
p.tempCT = p.parser.CreatedTimestamp()
|
||||
p.state = stateCollecting
|
||||
p.tempLsetNHCB = convertnhcb.GetHistogramMetricBase(lset, suffix)
|
||||
}
|
||||
p.storeExemplars()
|
||||
updateHist(&p.tempNHCB)
|
||||
}
|
||||
|
||||
func (p *NHCBParser) storeExemplars() {
|
||||
for ex := p.nextExemplarPtr(); p.parser.Exemplar(ex); ex = p.nextExemplarPtr() {
|
||||
p.tempExemplarCount++
|
||||
}
|
||||
}
|
||||
|
||||
func (p *NHCBParser) nextExemplarPtr() *exemplar.Exemplar {
|
||||
switch {
|
||||
case p.tempExemplarCount == len(p.tempExemplars)-1:
|
||||
// Reuse the previously allocated exemplar, it was not filled up.
|
||||
case len(p.tempExemplars) == cap(p.tempExemplars):
|
||||
// Let the runtime grow the slice.
|
||||
p.tempExemplars = append(p.tempExemplars, exemplar.Exemplar{})
|
||||
default:
|
||||
// Take the next element into use.
|
||||
p.tempExemplars = p.tempExemplars[:len(p.tempExemplars)+1]
|
||||
}
|
||||
return &p.tempExemplars[len(p.tempExemplars)-1]
|
||||
}
|
||||
|
||||
func (p *NHCBParser) swapExemplars() {
|
||||
p.exemplars = p.tempExemplars[:p.tempExemplarCount]
|
||||
p.tempExemplars = p.tempExemplars[:0]
|
||||
}
|
||||
|
||||
// processNHCB converts the collated classic histogram series to NHCB and caches the info
|
||||
// to be returned to callers. Retruns true if the conversion was successful.
|
||||
func (p *NHCBParser) processNHCB() bool {
|
||||
if p.state != stateCollecting {
|
||||
return false
|
||||
}
|
||||
h, fh, err := p.tempNHCB.Convert()
|
||||
if err == nil {
|
||||
if h != nil {
|
||||
if err := h.Validate(); err != nil {
|
||||
return false
|
||||
}
|
||||
p.hNHCB = h
|
||||
p.fhNHCB = nil
|
||||
} else if fh != nil {
|
||||
if err := fh.Validate(); err != nil {
|
||||
return false
|
||||
}
|
||||
p.hNHCB = nil
|
||||
p.fhNHCB = fh
|
||||
}
|
||||
p.metricStringNHCB = p.tempLsetNHCB.Get(labels.MetricName) + strings.ReplaceAll(p.tempLsetNHCB.DropMetricName().String(), ", ", ",")
|
||||
p.bytesNHCB = []byte(p.metricStringNHCB)
|
||||
p.lsetNHCB = p.tempLsetNHCB
|
||||
p.swapExemplars()
|
||||
p.ctNHCB = p.tempCT
|
||||
p.state = stateEmitting
|
||||
} else {
|
||||
p.state = stateStart
|
||||
}
|
||||
p.tempNHCB.Reset()
|
||||
p.tempExemplarCount = 0
|
||||
p.tempCT = nil
|
||||
return err == nil
|
||||
}
|
984
model/textparse/nhcbparse_test.go
Normal file
984
model/textparse/nhcbparse_test.go
Normal file
|
@ -0,0 +1,984 @@
|
|||
// Copyright 2024 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package textparse
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"strconv"
|
||||
"testing"
|
||||
|
||||
"github.com/gogo/protobuf/proto"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/prometheus/common/model"
|
||||
|
||||
"github.com/prometheus/prometheus/model/exemplar"
|
||||
"github.com/prometheus/prometheus/model/histogram"
|
||||
"github.com/prometheus/prometheus/model/labels"
|
||||
dto "github.com/prometheus/prometheus/prompb/io/prometheus/client"
|
||||
)
|
||||
|
||||
func TestNHCBParserOnOMParser(t *testing.T) {
|
||||
// The input is taken originally from TestOpenMetricsParse, with additional tests for the NHCBParser.
|
||||
|
||||
input := `# HELP go_gc_duration_seconds A summary of the GC invocation durations.
|
||||
# TYPE go_gc_duration_seconds summary
|
||||
# UNIT go_gc_duration_seconds seconds
|
||||
go_gc_duration_seconds{quantile="0"} 4.9351e-05
|
||||
go_gc_duration_seconds{quantile="0.25"} 7.424100000000001e-05
|
||||
go_gc_duration_seconds{quantile="0.5",a="b"} 8.3835e-05
|
||||
# HELP nohelp1
|
||||
# HELP help2 escape \ \n \\ \" \x chars
|
||||
# UNIT nounit
|
||||
go_gc_duration_seconds{quantile="1.0",a="b"} 8.3835e-05
|
||||
go_gc_duration_seconds_count 99
|
||||
some:aggregate:rate5m{a_b="c"} 1
|
||||
# HELP go_goroutines Number of goroutines that currently exist.
|
||||
# TYPE go_goroutines gauge
|
||||
go_goroutines 33 123.123
|
||||
# TYPE hh histogram
|
||||
hh_bucket{le="+Inf"} 1
|
||||
# TYPE gh gaugehistogram
|
||||
gh_bucket{le="+Inf"} 1
|
||||
# TYPE hhh histogram
|
||||
hhh_bucket{le="+Inf"} 1 # {id="histogram-bucket-test"} 4
|
||||
hhh_count 1 # {id="histogram-count-test"} 4
|
||||
# TYPE ggh gaugehistogram
|
||||
ggh_bucket{le="+Inf"} 1 # {id="gaugehistogram-bucket-test",xx="yy"} 4 123.123
|
||||
ggh_count 1 # {id="gaugehistogram-count-test",xx="yy"} 4 123.123
|
||||
# TYPE smr_seconds summary
|
||||
smr_seconds_count 2.0 # {id="summary-count-test"} 1 123.321
|
||||
smr_seconds_sum 42.0 # {id="summary-sum-test"} 1 123.321
|
||||
# TYPE ii info
|
||||
ii{foo="bar"} 1
|
||||
# TYPE ss stateset
|
||||
ss{ss="foo"} 1
|
||||
ss{ss="bar"} 0
|
||||
ss{A="a"} 0
|
||||
# TYPE un unknown
|
||||
_metric_starting_with_underscore 1
|
||||
testmetric{_label_starting_with_underscore="foo"} 1
|
||||
testmetric{label="\"bar\""} 1
|
||||
# HELP foo Counter with and without labels to certify CT is parsed for both cases
|
||||
# TYPE foo counter
|
||||
foo_total 17.0 1520879607.789 # {id="counter-test"} 5
|
||||
foo_created 1520872607.123
|
||||
foo_total{a="b"} 17.0 1520879607.789 # {id="counter-test"} 5
|
||||
foo_created{a="b"} 1520872607.123
|
||||
# HELP bar Summary with CT at the end, making sure we find CT even if it's multiple lines a far
|
||||
# TYPE bar summary
|
||||
bar_count 17.0
|
||||
bar_sum 324789.3
|
||||
bar{quantile="0.95"} 123.7
|
||||
bar{quantile="0.99"} 150.0
|
||||
bar_created 1520872608.124
|
||||
# HELP baz Histogram with the same objective as above's summary
|
||||
# TYPE baz histogram
|
||||
baz_bucket{le="0.0"} 0
|
||||
baz_bucket{le="+Inf"} 17
|
||||
baz_count 17
|
||||
baz_sum 324789.3
|
||||
baz_created 1520872609.125
|
||||
# HELP fizz_created Gauge which shouldn't be parsed as CT
|
||||
# TYPE fizz_created gauge
|
||||
fizz_created 17.0
|
||||
# HELP something Histogram with _created between buckets and summary
|
||||
# TYPE something histogram
|
||||
something_count 18
|
||||
something_sum 324789.4
|
||||
something_created 1520430001
|
||||
something_bucket{le="0.0"} 1
|
||||
something_bucket{le="+Inf"} 18
|
||||
something_count{a="b"} 9
|
||||
something_sum{a="b"} 42123.0
|
||||
something_bucket{a="b",le="0.0"} 8
|
||||
something_bucket{a="b",le="+Inf"} 9
|
||||
something_created{a="b"} 1520430002
|
||||
# HELP yum Summary with _created between sum and quantiles
|
||||
# TYPE yum summary
|
||||
yum_count 20
|
||||
yum_sum 324789.5
|
||||
yum_created 1520430003
|
||||
yum{quantile="0.95"} 123.7
|
||||
yum{quantile="0.99"} 150.0
|
||||
# HELP foobar Summary with _created as the first line
|
||||
# TYPE foobar summary
|
||||
foobar_count 21
|
||||
foobar_created 1520430004
|
||||
foobar_sum 324789.6
|
||||
foobar{quantile="0.95"} 123.8
|
||||
foobar{quantile="0.99"} 150.1`
|
||||
|
||||
input += "\n# HELP metric foo\x00bar"
|
||||
input += "\nnull_byte_metric{a=\"abc\x00\"} 1"
|
||||
input += "\n# EOF\n"
|
||||
|
||||
exp := []parsedEntry{
|
||||
{
|
||||
m: "go_gc_duration_seconds",
|
||||
help: "A summary of the GC invocation durations.",
|
||||
}, {
|
||||
m: "go_gc_duration_seconds",
|
||||
typ: model.MetricTypeSummary,
|
||||
}, {
|
||||
m: "go_gc_duration_seconds",
|
||||
unit: "seconds",
|
||||
}, {
|
||||
m: `go_gc_duration_seconds{quantile="0"}`,
|
||||
v: 4.9351e-05,
|
||||
lset: labels.FromStrings("__name__", "go_gc_duration_seconds", "quantile", "0.0"),
|
||||
}, {
|
||||
m: `go_gc_duration_seconds{quantile="0.25"}`,
|
||||
v: 7.424100000000001e-05,
|
||||
lset: labels.FromStrings("__name__", "go_gc_duration_seconds", "quantile", "0.25"),
|
||||
}, {
|
||||
m: `go_gc_duration_seconds{quantile="0.5",a="b"}`,
|
||||
v: 8.3835e-05,
|
||||
lset: labels.FromStrings("__name__", "go_gc_duration_seconds", "quantile", "0.5", "a", "b"),
|
||||
}, {
|
||||
m: "nohelp1",
|
||||
help: "",
|
||||
}, {
|
||||
m: "help2",
|
||||
help: "escape \\ \n \\ \" \\x chars",
|
||||
}, {
|
||||
m: "nounit",
|
||||
unit: "",
|
||||
}, {
|
||||
m: `go_gc_duration_seconds{quantile="1.0",a="b"}`,
|
||||
v: 8.3835e-05,
|
||||
lset: labels.FromStrings("__name__", "go_gc_duration_seconds", "quantile", "1.0", "a", "b"),
|
||||
}, {
|
||||
m: `go_gc_duration_seconds_count`,
|
||||
v: 99,
|
||||
lset: labels.FromStrings("__name__", "go_gc_duration_seconds_count"),
|
||||
}, {
|
||||
m: `some:aggregate:rate5m{a_b="c"}`,
|
||||
v: 1,
|
||||
lset: labels.FromStrings("__name__", "some:aggregate:rate5m", "a_b", "c"),
|
||||
}, {
|
||||
m: "go_goroutines",
|
||||
help: "Number of goroutines that currently exist.",
|
||||
}, {
|
||||
m: "go_goroutines",
|
||||
typ: model.MetricTypeGauge,
|
||||
}, {
|
||||
m: `go_goroutines`,
|
||||
v: 33,
|
||||
t: int64p(123123),
|
||||
lset: labels.FromStrings("__name__", "go_goroutines"),
|
||||
}, {
|
||||
m: "hh",
|
||||
typ: model.MetricTypeHistogram,
|
||||
}, {
|
||||
m: `hh{}`,
|
||||
shs: &histogram.Histogram{
|
||||
Schema: histogram.CustomBucketsSchema,
|
||||
Count: 1,
|
||||
Sum: 0.0,
|
||||
PositiveSpans: []histogram.Span{{Offset: 0, Length: 1}},
|
||||
PositiveBuckets: []int64{1},
|
||||
// Custom values are empty as we do not store the +Inf boundary.
|
||||
},
|
||||
lset: labels.FromStrings("__name__", "hh"),
|
||||
}, {
|
||||
m: "gh",
|
||||
typ: model.MetricTypeGaugeHistogram,
|
||||
}, {
|
||||
m: `gh_bucket{le="+Inf"}`,
|
||||
v: 1,
|
||||
lset: labels.FromStrings("__name__", "gh_bucket", "le", "+Inf"),
|
||||
}, {
|
||||
m: "hhh",
|
||||
typ: model.MetricTypeHistogram,
|
||||
}, {
|
||||
m: `hhh{}`,
|
||||
shs: &histogram.Histogram{
|
||||
Schema: histogram.CustomBucketsSchema,
|
||||
Count: 1,
|
||||
Sum: 0.0,
|
||||
PositiveSpans: []histogram.Span{{Offset: 0, Length: 1}},
|
||||
PositiveBuckets: []int64{1},
|
||||
// Custom values are empty as we do not store the +Inf boundary.
|
||||
},
|
||||
lset: labels.FromStrings("__name__", "hhh"),
|
||||
es: []exemplar.Exemplar{
|
||||
{Labels: labels.FromStrings("id", "histogram-bucket-test"), Value: 4},
|
||||
{Labels: labels.FromStrings("id", "histogram-count-test"), Value: 4},
|
||||
},
|
||||
}, {
|
||||
m: "ggh",
|
||||
typ: model.MetricTypeGaugeHistogram,
|
||||
}, {
|
||||
m: `ggh_bucket{le="+Inf"}`,
|
||||
v: 1,
|
||||
lset: labels.FromStrings("__name__", "ggh_bucket", "le", "+Inf"),
|
||||
es: []exemplar.Exemplar{{Labels: labels.FromStrings("id", "gaugehistogram-bucket-test", "xx", "yy"), Value: 4, HasTs: true, Ts: 123123}},
|
||||
}, {
|
||||
m: `ggh_count`,
|
||||
v: 1,
|
||||
lset: labels.FromStrings("__name__", "ggh_count"),
|
||||
es: []exemplar.Exemplar{{Labels: labels.FromStrings("id", "gaugehistogram-count-test", "xx", "yy"), Value: 4, HasTs: true, Ts: 123123}},
|
||||
}, {
|
||||
m: "smr_seconds",
|
||||
typ: model.MetricTypeSummary,
|
||||
}, {
|
||||
m: `smr_seconds_count`,
|
||||
v: 2,
|
||||
lset: labels.FromStrings("__name__", "smr_seconds_count"),
|
||||
es: []exemplar.Exemplar{{Labels: labels.FromStrings("id", "summary-count-test"), Value: 1, HasTs: true, Ts: 123321}},
|
||||
}, {
|
||||
m: `smr_seconds_sum`,
|
||||
v: 42,
|
||||
lset: labels.FromStrings("__name__", "smr_seconds_sum"),
|
||||
es: []exemplar.Exemplar{{Labels: labels.FromStrings("id", "summary-sum-test"), Value: 1, HasTs: true, Ts: 123321}},
|
||||
}, {
|
||||
m: "ii",
|
||||
typ: model.MetricTypeInfo,
|
||||
}, {
|
||||
m: `ii{foo="bar"}`,
|
||||
v: 1,
|
||||
lset: labels.FromStrings("__name__", "ii", "foo", "bar"),
|
||||
}, {
|
||||
m: "ss",
|
||||
typ: model.MetricTypeStateset,
|
||||
}, {
|
||||
m: `ss{ss="foo"}`,
|
||||
v: 1,
|
||||
lset: labels.FromStrings("__name__", "ss", "ss", "foo"),
|
||||
}, {
|
||||
m: `ss{ss="bar"}`,
|
||||
v: 0,
|
||||
lset: labels.FromStrings("__name__", "ss", "ss", "bar"),
|
||||
}, {
|
||||
m: `ss{A="a"}`,
|
||||
v: 0,
|
||||
lset: labels.FromStrings("A", "a", "__name__", "ss"),
|
||||
}, {
|
||||
m: "un",
|
||||
typ: model.MetricTypeUnknown,
|
||||
}, {
|
||||
m: "_metric_starting_with_underscore",
|
||||
v: 1,
|
||||
lset: labels.FromStrings("__name__", "_metric_starting_with_underscore"),
|
||||
}, {
|
||||
m: "testmetric{_label_starting_with_underscore=\"foo\"}",
|
||||
v: 1,
|
||||
lset: labels.FromStrings("__name__", "testmetric", "_label_starting_with_underscore", "foo"),
|
||||
}, {
|
||||
m: "testmetric{label=\"\\\"bar\\\"\"}",
|
||||
v: 1,
|
||||
lset: labels.FromStrings("__name__", "testmetric", "label", `"bar"`),
|
||||
}, {
|
||||
m: "foo",
|
||||
help: "Counter with and without labels to certify CT is parsed for both cases",
|
||||
}, {
|
||||
m: "foo",
|
||||
typ: model.MetricTypeCounter,
|
||||
}, {
|
||||
m: "foo_total",
|
||||
v: 17,
|
||||
lset: labels.FromStrings("__name__", "foo_total"),
|
||||
t: int64p(1520879607789),
|
||||
es: []exemplar.Exemplar{{Labels: labels.FromStrings("id", "counter-test"), Value: 5}},
|
||||
ct: int64p(1520872607123),
|
||||
}, {
|
||||
m: `foo_total{a="b"}`,
|
||||
v: 17.0,
|
||||
lset: labels.FromStrings("__name__", "foo_total", "a", "b"),
|
||||
t: int64p(1520879607789),
|
||||
es: []exemplar.Exemplar{{Labels: labels.FromStrings("id", "counter-test"), Value: 5}},
|
||||
ct: int64p(1520872607123),
|
||||
}, {
|
||||
m: "bar",
|
||||
help: "Summary with CT at the end, making sure we find CT even if it's multiple lines a far",
|
||||
}, {
|
||||
m: "bar",
|
||||
typ: model.MetricTypeSummary,
|
||||
}, {
|
||||
m: "bar_count",
|
||||
v: 17.0,
|
||||
lset: labels.FromStrings("__name__", "bar_count"),
|
||||
ct: int64p(1520872608124),
|
||||
}, {
|
||||
m: "bar_sum",
|
||||
v: 324789.3,
|
||||
lset: labels.FromStrings("__name__", "bar_sum"),
|
||||
ct: int64p(1520872608124),
|
||||
}, {
|
||||
m: `bar{quantile="0.95"}`,
|
||||
v: 123.7,
|
||||
lset: labels.FromStrings("__name__", "bar", "quantile", "0.95"),
|
||||
ct: int64p(1520872608124),
|
||||
}, {
|
||||
m: `bar{quantile="0.99"}`,
|
||||
v: 150.0,
|
||||
lset: labels.FromStrings("__name__", "bar", "quantile", "0.99"),
|
||||
ct: int64p(1520872608124),
|
||||
}, {
|
||||
m: "baz",
|
||||
help: "Histogram with the same objective as above's summary",
|
||||
}, {
|
||||
m: "baz",
|
||||
typ: model.MetricTypeHistogram,
|
||||
}, {
|
||||
m: `baz{}`,
|
||||
shs: &histogram.Histogram{
|
||||
Schema: histogram.CustomBucketsSchema,
|
||||
Count: 17,
|
||||
Sum: 324789.3,
|
||||
PositiveSpans: []histogram.Span{{Offset: 1, Length: 1}}, // The first bucket has 0 count so we don't store it and Offset is 1.
|
||||
PositiveBuckets: []int64{17},
|
||||
CustomValues: []float64{0.0}, // We do not store the +Inf boundary.
|
||||
},
|
||||
lset: labels.FromStrings("__name__", "baz"),
|
||||
ct: int64p(1520872609125),
|
||||
}, {
|
||||
m: "fizz_created",
|
||||
help: "Gauge which shouldn't be parsed as CT",
|
||||
}, {
|
||||
m: "fizz_created",
|
||||
typ: model.MetricTypeGauge,
|
||||
}, {
|
||||
m: `fizz_created`,
|
||||
v: 17,
|
||||
lset: labels.FromStrings("__name__", "fizz_created"),
|
||||
}, {
|
||||
m: "something",
|
||||
help: "Histogram with _created between buckets and summary",
|
||||
}, {
|
||||
m: "something",
|
||||
typ: model.MetricTypeHistogram,
|
||||
}, {
|
||||
m: `something{}`,
|
||||
shs: &histogram.Histogram{
|
||||
Schema: histogram.CustomBucketsSchema,
|
||||
Count: 18,
|
||||
Sum: 324789.4,
|
||||
PositiveSpans: []histogram.Span{{Offset: 0, Length: 2}},
|
||||
PositiveBuckets: []int64{1, 16},
|
||||
CustomValues: []float64{0.0}, // We do not store the +Inf boundary.
|
||||
},
|
||||
lset: labels.FromStrings("__name__", "something"),
|
||||
ct: int64p(1520430001000),
|
||||
}, {
|
||||
m: `something{a="b"}`,
|
||||
shs: &histogram.Histogram{
|
||||
Schema: histogram.CustomBucketsSchema,
|
||||
Count: 9,
|
||||
Sum: 42123.0,
|
||||
PositiveSpans: []histogram.Span{{Offset: 0, Length: 2}},
|
||||
PositiveBuckets: []int64{8, -7},
|
||||
CustomValues: []float64{0.0}, // We do not store the +Inf boundary.
|
||||
},
|
||||
lset: labels.FromStrings("__name__", "something", "a", "b"),
|
||||
ct: int64p(1520430002000),
|
||||
}, {
|
||||
m: "yum",
|
||||
help: "Summary with _created between sum and quantiles",
|
||||
}, {
|
||||
m: "yum",
|
||||
typ: model.MetricTypeSummary,
|
||||
}, {
|
||||
m: `yum_count`,
|
||||
v: 20,
|
||||
lset: labels.FromStrings("__name__", "yum_count"),
|
||||
ct: int64p(1520430003000),
|
||||
}, {
|
||||
m: `yum_sum`,
|
||||
v: 324789.5,
|
||||
lset: labels.FromStrings("__name__", "yum_sum"),
|
||||
ct: int64p(1520430003000),
|
||||
}, {
|
||||
m: `yum{quantile="0.95"}`,
|
||||
v: 123.7,
|
||||
lset: labels.FromStrings("__name__", "yum", "quantile", "0.95"),
|
||||
ct: int64p(1520430003000),
|
||||
}, {
|
||||
m: `yum{quantile="0.99"}`,
|
||||
v: 150.0,
|
||||
lset: labels.FromStrings("__name__", "yum", "quantile", "0.99"),
|
||||
ct: int64p(1520430003000),
|
||||
}, {
|
||||
m: "foobar",
|
||||
help: "Summary with _created as the first line",
|
||||
}, {
|
||||
m: "foobar",
|
||||
typ: model.MetricTypeSummary,
|
||||
}, {
|
||||
m: `foobar_count`,
|
||||
v: 21,
|
||||
lset: labels.FromStrings("__name__", "foobar_count"),
|
||||
ct: int64p(1520430004000),
|
||||
}, {
|
||||
m: `foobar_sum`,
|
||||
v: 324789.6,
|
||||
lset: labels.FromStrings("__name__", "foobar_sum"),
|
||||
ct: int64p(1520430004000),
|
||||
}, {
|
||||
m: `foobar{quantile="0.95"}`,
|
||||
v: 123.8,
|
||||
lset: labels.FromStrings("__name__", "foobar", "quantile", "0.95"),
|
||||
ct: int64p(1520430004000),
|
||||
}, {
|
||||
m: `foobar{quantile="0.99"}`,
|
||||
v: 150.1,
|
||||
lset: labels.FromStrings("__name__", "foobar", "quantile", "0.99"),
|
||||
ct: int64p(1520430004000),
|
||||
}, {
|
||||
m: "metric",
|
||||
help: "foo\x00bar",
|
||||
}, {
|
||||
m: "null_byte_metric{a=\"abc\x00\"}",
|
||||
v: 1,
|
||||
lset: labels.FromStrings("__name__", "null_byte_metric", "a", "abc\x00"),
|
||||
},
|
||||
}
|
||||
|
||||
p := NewOpenMetricsParser([]byte(input), labels.NewSymbolTable(), WithOMParserCTSeriesSkipped())
|
||||
p = NewNHCBParser(p, labels.NewSymbolTable(), false)
|
||||
got := testParse(t, p)
|
||||
requireEntries(t, exp, got)
|
||||
}
|
||||
|
||||
func TestNHCBParserOMParser_MultipleHistograms(t *testing.T) {
|
||||
// The input is taken originally from TestOpenMetricsParse, with additional tests for the NHCBParser.
|
||||
|
||||
input := `# HELP something Histogram with _created between buckets and summary
|
||||
# TYPE something histogram
|
||||
something_count 18
|
||||
something_sum 324789.4
|
||||
something_bucket{le="0.0"} 1 # {id="something-test"} -2.0
|
||||
something_bucket{le="1.0"} 16 # {id="something-test"} 0.5
|
||||
something_bucket{le="+Inf"} 18 # {id="something-test"} 8
|
||||
something_count{a="b"} 9
|
||||
something_sum{a="b"} 42123.0
|
||||
something_bucket{a="b",le="0.0"} 8 # {id="something-test"} 0.0 123.321
|
||||
something_bucket{a="b",le="1.0"} 8
|
||||
something_bucket{a="b",le="+Inf"} 9 # {id="something-test"} 2e100 123.000
|
||||
# EOF
|
||||
`
|
||||
|
||||
exp := []parsedEntry{
|
||||
{
|
||||
m: "something",
|
||||
help: "Histogram with _created between buckets and summary",
|
||||
}, {
|
||||
m: "something",
|
||||
typ: model.MetricTypeHistogram,
|
||||
}, {
|
||||
m: `something{}`,
|
||||
shs: &histogram.Histogram{
|
||||
Schema: histogram.CustomBucketsSchema,
|
||||
Count: 18,
|
||||
Sum: 324789.4,
|
||||
PositiveSpans: []histogram.Span{{Offset: 0, Length: 3}},
|
||||
PositiveBuckets: []int64{1, 14, -13},
|
||||
CustomValues: []float64{0.0, 1.0}, // We do not store the +Inf boundary.
|
||||
},
|
||||
lset: labels.FromStrings("__name__", "something"),
|
||||
es: []exemplar.Exemplar{
|
||||
{Labels: labels.FromStrings("id", "something-test"), Value: -2.0},
|
||||
{Labels: labels.FromStrings("id", "something-test"), Value: 0.5},
|
||||
{Labels: labels.FromStrings("id", "something-test"), Value: 8.0},
|
||||
},
|
||||
}, {
|
||||
m: `something{a="b"}`,
|
||||
shs: &histogram.Histogram{
|
||||
Schema: histogram.CustomBucketsSchema,
|
||||
Count: 9,
|
||||
Sum: 42123.0,
|
||||
PositiveSpans: []histogram.Span{{Offset: 0, Length: 3}},
|
||||
PositiveBuckets: []int64{8, -8, 1},
|
||||
CustomValues: []float64{0.0, 1.0}, // We do not store the +Inf boundary.
|
||||
},
|
||||
lset: labels.FromStrings("__name__", "something", "a", "b"),
|
||||
es: []exemplar.Exemplar{
|
||||
{Labels: labels.FromStrings("id", "something-test"), Value: 0.0, HasTs: true, Ts: 123321},
|
||||
{Labels: labels.FromStrings("id", "something-test"), Value: 2e100, HasTs: true, Ts: 123000},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
p := NewOpenMetricsParser([]byte(input), labels.NewSymbolTable(), WithOMParserCTSeriesSkipped())
|
||||
|
||||
p = NewNHCBParser(p, labels.NewSymbolTable(), false)
|
||||
got := testParse(t, p)
|
||||
requireEntries(t, exp, got)
|
||||
}
|
||||
|
||||
// Verify the requirement tables from
|
||||
// https://github.com/prometheus/prometheus/issues/13532 .
|
||||
// "classic" means the option "always_scrape_classic_histograms".
|
||||
// "nhcb" means the option "convert_classic_histograms_to_nhcb".
|
||||
//
|
||||
// Case 1. Only classic histogram is exposed.
|
||||
//
|
||||
// | Scrape Config | Expect classic | Expect exponential | Expect NHCB |.
|
||||
// | classic=false, nhcb=false | YES | NO | NO |.
|
||||
// | classic=true, nhcb=false | YES | NO | NO |.
|
||||
// | classic=false, nhcb=true | NO | NO | YES |.
|
||||
// | classic=true, nhcb=true | YES | NO | YES |.
|
||||
//
|
||||
// Case 2. Both classic and exponential histograms are exposed.
|
||||
//
|
||||
// | Scrape Config | Expect classic | Expect exponential | Expect NHCB |.
|
||||
// | classic=false, nhcb=false | NO | YES | NO |.
|
||||
// | classic=true, nhcb=false | YES | YES | NO |.
|
||||
// | classic=false, nhcb=true | NO | YES | NO |.
|
||||
// | classic=true, nhcb=true | YES | YES | NO |.
|
||||
//
|
||||
// Case 3. Only exponential histogram is exposed.
|
||||
//
|
||||
// | Scrape Config | Expect classic | Expect exponential | Expect NHCB |.
|
||||
// | classic=false, nhcb=false | NO | YES | NO |.
|
||||
// | classic=true, nhcb=false | NO | YES | NO |.
|
||||
// | classic=false, nhcb=true | NO | YES | NO |.
|
||||
// | classic=true, nhcb=true | NO | YES | NO |.
|
||||
func TestNHCBParser_NoNHCBWhenExponential(t *testing.T) {
|
||||
type requirement struct {
|
||||
expectClassic bool
|
||||
expectExponential bool
|
||||
expectNHCB bool
|
||||
}
|
||||
|
||||
cases := []map[string]requirement{
|
||||
// Case 1.
|
||||
{
|
||||
"classic=false, nhcb=false": {expectClassic: true, expectExponential: false, expectNHCB: false},
|
||||
"classic=true, nhcb=false": {expectClassic: true, expectExponential: false, expectNHCB: false},
|
||||
"classic=false, nhcb=true": {expectClassic: false, expectExponential: false, expectNHCB: true},
|
||||
"classic=true, nhcb=true": {expectClassic: true, expectExponential: false, expectNHCB: true},
|
||||
},
|
||||
// Case 2.
|
||||
{
|
||||
"classic=false, nhcb=false": {expectClassic: false, expectExponential: true, expectNHCB: false},
|
||||
"classic=true, nhcb=false": {expectClassic: true, expectExponential: true, expectNHCB: false},
|
||||
"classic=false, nhcb=true": {expectClassic: false, expectExponential: true, expectNHCB: false},
|
||||
"classic=true, nhcb=true": {expectClassic: true, expectExponential: true, expectNHCB: false},
|
||||
},
|
||||
// Case 3.
|
||||
{
|
||||
"classic=false, nhcb=false": {expectClassic: false, expectExponential: true, expectNHCB: false},
|
||||
"classic=true, nhcb=false": {expectClassic: false, expectExponential: true, expectNHCB: false},
|
||||
"classic=false, nhcb=true": {expectClassic: false, expectExponential: true, expectNHCB: false},
|
||||
"classic=true, nhcb=true": {expectClassic: false, expectExponential: true, expectNHCB: false},
|
||||
},
|
||||
}
|
||||
|
||||
// Create parser from keep classic option.
|
||||
type parserFactory func(bool) Parser
|
||||
|
||||
type testCase struct {
|
||||
name string
|
||||
parser parserFactory
|
||||
classic bool
|
||||
nhcb bool
|
||||
exp []parsedEntry
|
||||
}
|
||||
|
||||
type parserOptions struct {
|
||||
useUTF8sep bool
|
||||
hasCreatedTimeStamp bool
|
||||
}
|
||||
// Defines the parser name, the Parser factory and the test cases
|
||||
// supported by the parser and parser options.
|
||||
parsers := []func() (string, parserFactory, []int, parserOptions){
|
||||
func() (string, parserFactory, []int, parserOptions) {
|
||||
factory := func(keepClassic bool) Parser {
|
||||
inputBuf := createTestProtoBufHistogram(t)
|
||||
return NewProtobufParser(inputBuf.Bytes(), keepClassic, labels.NewSymbolTable())
|
||||
}
|
||||
return "ProtoBuf", factory, []int{1, 2, 3}, parserOptions{useUTF8sep: true, hasCreatedTimeStamp: true}
|
||||
},
|
||||
func() (string, parserFactory, []int, parserOptions) {
|
||||
factory := func(keepClassic bool) Parser {
|
||||
input := createTestOpenMetricsHistogram()
|
||||
return NewOpenMetricsParser([]byte(input), labels.NewSymbolTable(), WithOMParserCTSeriesSkipped())
|
||||
}
|
||||
return "OpenMetrics", factory, []int{1}, parserOptions{hasCreatedTimeStamp: true}
|
||||
},
|
||||
func() (string, parserFactory, []int, parserOptions) {
|
||||
factory := func(keepClassic bool) Parser {
|
||||
input := createTestPromHistogram()
|
||||
return NewPromParser([]byte(input), labels.NewSymbolTable())
|
||||
}
|
||||
return "Prometheus", factory, []int{1}, parserOptions{}
|
||||
},
|
||||
}
|
||||
|
||||
testCases := []testCase{}
|
||||
for _, parser := range parsers {
|
||||
for _, classic := range []bool{false, true} {
|
||||
for _, nhcb := range []bool{false, true} {
|
||||
parserName, parser, supportedCases, options := parser()
|
||||
requirementName := "classic=" + strconv.FormatBool(classic) + ", nhcb=" + strconv.FormatBool(nhcb)
|
||||
tc := testCase{
|
||||
name: "parser=" + parserName + ", " + requirementName,
|
||||
parser: parser,
|
||||
classic: classic,
|
||||
nhcb: nhcb,
|
||||
exp: []parsedEntry{},
|
||||
}
|
||||
for _, caseNumber := range supportedCases {
|
||||
caseI := cases[caseNumber-1]
|
||||
req, ok := caseI[requirementName]
|
||||
require.True(t, ok, "Case %d does not have requirement %s", caseNumber, requirementName)
|
||||
metric := "test_histogram" + strconv.Itoa(caseNumber)
|
||||
tc.exp = append(tc.exp, parsedEntry{
|
||||
m: metric,
|
||||
help: "Test histogram " + strconv.Itoa(caseNumber),
|
||||
})
|
||||
tc.exp = append(tc.exp, parsedEntry{
|
||||
m: metric,
|
||||
typ: model.MetricTypeHistogram,
|
||||
})
|
||||
|
||||
var ct *int64
|
||||
if options.hasCreatedTimeStamp {
|
||||
ct = int64p(1000)
|
||||
}
|
||||
|
||||
var bucketForMetric func(string) string
|
||||
if options.useUTF8sep {
|
||||
bucketForMetric = func(s string) string {
|
||||
return "_bucket\xffle\xff" + s
|
||||
}
|
||||
} else {
|
||||
bucketForMetric = func(s string) string {
|
||||
return "_bucket{le=\"" + s + "\"}"
|
||||
}
|
||||
}
|
||||
|
||||
if req.expectExponential {
|
||||
// Always expect exponential histogram first.
|
||||
exponentialSeries := []parsedEntry{
|
||||
{
|
||||
m: metric,
|
||||
shs: &histogram.Histogram{
|
||||
Schema: 3,
|
||||
Count: 175,
|
||||
Sum: 0.0008280461746287094,
|
||||
ZeroThreshold: 2.938735877055719e-39,
|
||||
ZeroCount: 2,
|
||||
PositiveSpans: []histogram.Span{{Offset: -161, Length: 1}, {Offset: 8, Length: 3}},
|
||||
NegativeSpans: []histogram.Span{{Offset: -162, Length: 1}, {Offset: 23, Length: 4}},
|
||||
PositiveBuckets: []int64{1, 2, -1, -1},
|
||||
NegativeBuckets: []int64{1, 3, -2, -1, 1},
|
||||
},
|
||||
lset: labels.FromStrings("__name__", metric),
|
||||
t: int64p(1234568),
|
||||
ct: ct,
|
||||
},
|
||||
}
|
||||
tc.exp = append(tc.exp, exponentialSeries...)
|
||||
}
|
||||
if req.expectClassic {
|
||||
// Always expect classic histogram series after exponential.
|
||||
classicSeries := []parsedEntry{
|
||||
{
|
||||
m: metric + "_count",
|
||||
v: 175,
|
||||
lset: labels.FromStrings("__name__", metric+"_count"),
|
||||
t: int64p(1234568),
|
||||
ct: ct,
|
||||
},
|
||||
{
|
||||
m: metric + "_sum",
|
||||
v: 0.0008280461746287094,
|
||||
lset: labels.FromStrings("__name__", metric+"_sum"),
|
||||
t: int64p(1234568),
|
||||
ct: ct,
|
||||
},
|
||||
{
|
||||
m: metric + bucketForMetric("-0.0004899999999999998"),
|
||||
v: 2,
|
||||
lset: labels.FromStrings("__name__", metric+"_bucket", "le", "-0.0004899999999999998"),
|
||||
t: int64p(1234568),
|
||||
ct: ct,
|
||||
},
|
||||
{
|
||||
m: metric + bucketForMetric("-0.0003899999999999998"),
|
||||
v: 4,
|
||||
lset: labels.FromStrings("__name__", metric+"_bucket", "le", "-0.0003899999999999998"),
|
||||
t: int64p(1234568),
|
||||
ct: ct,
|
||||
},
|
||||
{
|
||||
m: metric + bucketForMetric("-0.0002899999999999998"),
|
||||
v: 16,
|
||||
lset: labels.FromStrings("__name__", metric+"_bucket", "le", "-0.0002899999999999998"),
|
||||
t: int64p(1234568),
|
||||
ct: ct,
|
||||
},
|
||||
{
|
||||
m: metric + bucketForMetric("+Inf"),
|
||||
v: 175,
|
||||
lset: labels.FromStrings("__name__", metric+"_bucket", "le", "+Inf"),
|
||||
t: int64p(1234568),
|
||||
ct: ct,
|
||||
},
|
||||
}
|
||||
tc.exp = append(tc.exp, classicSeries...)
|
||||
}
|
||||
if req.expectNHCB {
|
||||
// Always expect NHCB series after classic.
|
||||
nhcbSeries := []parsedEntry{
|
||||
{
|
||||
m: metric + "{}",
|
||||
shs: &histogram.Histogram{
|
||||
Schema: histogram.CustomBucketsSchema,
|
||||
Count: 175,
|
||||
Sum: 0.0008280461746287094,
|
||||
PositiveSpans: []histogram.Span{{Length: 4}},
|
||||
PositiveBuckets: []int64{2, 0, 10, 147},
|
||||
CustomValues: []float64{-0.0004899999999999998, -0.0003899999999999998, -0.0002899999999999998},
|
||||
},
|
||||
lset: labels.FromStrings("__name__", metric),
|
||||
t: int64p(1234568),
|
||||
ct: ct,
|
||||
},
|
||||
}
|
||||
tc.exp = append(tc.exp, nhcbSeries...)
|
||||
}
|
||||
}
|
||||
testCases = append(testCases, tc)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
p := tc.parser(tc.classic)
|
||||
if tc.nhcb {
|
||||
p = NewNHCBParser(p, labels.NewSymbolTable(), tc.classic)
|
||||
}
|
||||
got := testParse(t, p)
|
||||
requireEntries(t, tc.exp, got)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func createTestProtoBufHistogram(t *testing.T) *bytes.Buffer {
|
||||
testMetricFamilies := []string{`name: "test_histogram1"
|
||||
help: "Test histogram 1"
|
||||
type: HISTOGRAM
|
||||
metric: <
|
||||
histogram: <
|
||||
created_timestamp: <
|
||||
seconds: 1
|
||||
nanos: 1
|
||||
>
|
||||
sample_count: 175
|
||||
sample_sum: 0.0008280461746287094
|
||||
bucket: <
|
||||
cumulative_count: 2
|
||||
upper_bound: -0.0004899999999999998
|
||||
>
|
||||
bucket: <
|
||||
cumulative_count: 4
|
||||
upper_bound: -0.0003899999999999998
|
||||
>
|
||||
bucket: <
|
||||
cumulative_count: 16
|
||||
upper_bound: -0.0002899999999999998
|
||||
>
|
||||
>
|
||||
timestamp_ms: 1234568
|
||||
>`, `name: "test_histogram2"
|
||||
help: "Test histogram 2"
|
||||
type: HISTOGRAM
|
||||
metric: <
|
||||
histogram: <
|
||||
created_timestamp: <
|
||||
seconds: 1
|
||||
nanos: 1
|
||||
>
|
||||
sample_count: 175
|
||||
sample_sum: 0.0008280461746287094
|
||||
bucket: <
|
||||
cumulative_count: 2
|
||||
upper_bound: -0.0004899999999999998
|
||||
>
|
||||
bucket: <
|
||||
cumulative_count: 4
|
||||
upper_bound: -0.0003899999999999998
|
||||
>
|
||||
bucket: <
|
||||
cumulative_count: 16
|
||||
upper_bound: -0.0002899999999999998
|
||||
>
|
||||
schema: 3
|
||||
zero_threshold: 2.938735877055719e-39
|
||||
zero_count: 2
|
||||
negative_span: <
|
||||
offset: -162
|
||||
length: 1
|
||||
>
|
||||
negative_span: <
|
||||
offset: 23
|
||||
length: 4
|
||||
>
|
||||
negative_delta: 1
|
||||
negative_delta: 3
|
||||
negative_delta: -2
|
||||
negative_delta: -1
|
||||
negative_delta: 1
|
||||
positive_span: <
|
||||
offset: -161
|
||||
length: 1
|
||||
>
|
||||
positive_span: <
|
||||
offset: 8
|
||||
length: 3
|
||||
>
|
||||
positive_delta: 1
|
||||
positive_delta: 2
|
||||
positive_delta: -1
|
||||
positive_delta: -1
|
||||
>
|
||||
timestamp_ms: 1234568
|
||||
>`, `name: "test_histogram3"
|
||||
help: "Test histogram 3"
|
||||
type: HISTOGRAM
|
||||
metric: <
|
||||
histogram: <
|
||||
created_timestamp: <
|
||||
seconds: 1
|
||||
nanos: 1
|
||||
>
|
||||
sample_count: 175
|
||||
sample_sum: 0.0008280461746287094
|
||||
schema: 3
|
||||
zero_threshold: 2.938735877055719e-39
|
||||
zero_count: 2
|
||||
negative_span: <
|
||||
offset: -162
|
||||
length: 1
|
||||
>
|
||||
negative_span: <
|
||||
offset: 23
|
||||
length: 4
|
||||
>
|
||||
negative_delta: 1
|
||||
negative_delta: 3
|
||||
negative_delta: -2
|
||||
negative_delta: -1
|
||||
negative_delta: 1
|
||||
positive_span: <
|
||||
offset: -161
|
||||
length: 1
|
||||
>
|
||||
positive_span: <
|
||||
offset: 8
|
||||
length: 3
|
||||
>
|
||||
positive_delta: 1
|
||||
positive_delta: 2
|
||||
positive_delta: -1
|
||||
positive_delta: -1
|
||||
>
|
||||
timestamp_ms: 1234568
|
||||
>
|
||||
`}
|
||||
|
||||
varintBuf := make([]byte, binary.MaxVarintLen32)
|
||||
buf := &bytes.Buffer{}
|
||||
|
||||
for _, tmf := range testMetricFamilies {
|
||||
pb := &dto.MetricFamily{}
|
||||
// From text to proto message.
|
||||
require.NoError(t, proto.UnmarshalText(tmf, pb))
|
||||
// From proto message to binary protobuf.
|
||||
protoBuf, err := proto.Marshal(pb)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Write first length, then binary protobuf.
|
||||
varintLength := binary.PutUvarint(varintBuf, uint64(len(protoBuf)))
|
||||
buf.Write(varintBuf[:varintLength])
|
||||
buf.Write(protoBuf)
|
||||
}
|
||||
|
||||
return buf
|
||||
}
|
||||
|
||||
func createTestOpenMetricsHistogram() string {
|
||||
return `# HELP test_histogram1 Test histogram 1
|
||||
# TYPE test_histogram1 histogram
|
||||
test_histogram1_count 175 1234.568
|
||||
test_histogram1_sum 0.0008280461746287094 1234.568
|
||||
test_histogram1_bucket{le="-0.0004899999999999998"} 2 1234.568
|
||||
test_histogram1_bucket{le="-0.0003899999999999998"} 4 1234.568
|
||||
test_histogram1_bucket{le="-0.0002899999999999998"} 16 1234.568
|
||||
test_histogram1_bucket{le="+Inf"} 175 1234.568
|
||||
test_histogram1_created 1
|
||||
# EOF`
|
||||
}
|
||||
|
||||
func createTestPromHistogram() string {
|
||||
return `# HELP test_histogram1 Test histogram 1
|
||||
# TYPE test_histogram1 histogram
|
||||
test_histogram1_count 175 1234568
|
||||
test_histogram1_sum 0.0008280461746287094 1234768
|
||||
test_histogram1_bucket{le="-0.0004899999999999998"} 2 1234568
|
||||
test_histogram1_bucket{le="-0.0003899999999999998"} 4 1234568
|
||||
test_histogram1_bucket{le="-0.0002899999999999998"} 16 1234568
|
||||
test_histogram1_bucket{le="+Inf"} 175 1234568`
|
||||
}
|
||||
|
||||
func TestNHCBParserErrorHandling(t *testing.T) {
|
||||
input := `# HELP something Histogram with non cumulative buckets
|
||||
# TYPE something histogram
|
||||
something_count 18
|
||||
something_sum 324789.4
|
||||
something_created 1520430001
|
||||
something_bucket{le="0.0"} 18
|
||||
something_bucket{le="+Inf"} 1
|
||||
something_count{a="b"} 9
|
||||
something_sum{a="b"} 42123
|
||||
something_created{a="b"} 1520430002
|
||||
something_bucket{a="b",le="0.0"} 1
|
||||
something_bucket{a="b",le="+Inf"} 9
|
||||
# EOF`
|
||||
exp := []parsedEntry{
|
||||
{
|
||||
m: "something",
|
||||
help: "Histogram with non cumulative buckets",
|
||||
},
|
||||
{
|
||||
m: "something",
|
||||
typ: model.MetricTypeHistogram,
|
||||
},
|
||||
// The parser should skip the series with non-cumulative buckets.
|
||||
{
|
||||
m: `something{a="b"}`,
|
||||
shs: &histogram.Histogram{
|
||||
Schema: histogram.CustomBucketsSchema,
|
||||
Count: 9,
|
||||
Sum: 42123.0,
|
||||
PositiveSpans: []histogram.Span{{Offset: 0, Length: 2}},
|
||||
PositiveBuckets: []int64{1, 7},
|
||||
CustomValues: []float64{0.0}, // We do not store the +Inf boundary.
|
||||
},
|
||||
lset: labels.FromStrings("__name__", "something", "a", "b"),
|
||||
ct: int64p(1520430002000),
|
||||
},
|
||||
}
|
||||
|
||||
p := NewOpenMetricsParser([]byte(input), labels.NewSymbolTable(), WithOMParserCTSeriesSkipped())
|
||||
p = NewNHCBParser(p, labels.NewSymbolTable(), false)
|
||||
got := testParse(t, p)
|
||||
requireEntries(t, exp, got)
|
||||
}
|
|
@ -22,6 +22,7 @@ import (
|
|||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"strconv"
|
||||
"strings"
|
||||
"unicode/utf8"
|
||||
|
||||
|
@ -101,6 +102,8 @@ type OpenMetricsParser struct {
|
|||
// Created timestamp parsing state.
|
||||
ct int64
|
||||
ctHashSet uint64
|
||||
// ignoreExemplar instructs the parser to not overwrite exemplars (to keep them while peeking ahead).
|
||||
ignoreExemplar bool
|
||||
// visitedMFName is the metric family name of the last visited metric when peeking ahead
|
||||
// for _created series during the execution of the CreatedTimestamp method.
|
||||
visitedMFName []byte
|
||||
|
@ -210,7 +213,7 @@ func (p *OpenMetricsParser) Metric(l *labels.Labels) string {
|
|||
label := unreplace(s[a:b])
|
||||
c := p.offsets[i+2] - p.start
|
||||
d := p.offsets[i+3] - p.start
|
||||
value := unreplace(s[c:d])
|
||||
value := normalizeFloatsInLabelValues(p.mtype, label, unreplace(s[c:d]))
|
||||
|
||||
p.builder.Add(label, value)
|
||||
}
|
||||
|
@ -295,6 +298,14 @@ func (p *OpenMetricsParser) CreatedTimestamp() *int64 {
|
|||
|
||||
p.skipCTSeries = false
|
||||
|
||||
p.ignoreExemplar = true
|
||||
savedStart := p.start
|
||||
defer func() {
|
||||
p.ignoreExemplar = false
|
||||
p.start = savedStart
|
||||
p.l = resetLexer
|
||||
}()
|
||||
|
||||
for {
|
||||
eType, err := p.Next()
|
||||
if err != nil {
|
||||
|
@ -302,12 +313,12 @@ func (p *OpenMetricsParser) CreatedTimestamp() *int64 {
|
|||
// This might result in partial scrape with wrong/missing CT, but only
|
||||
// spec improvement would help.
|
||||
// TODO: Make sure OM 1.1/2.0 pass CT via metadata or exemplar-like to avoid this.
|
||||
p.resetCTParseValues(resetLexer)
|
||||
p.resetCTParseValues()
|
||||
return nil
|
||||
}
|
||||
if eType != EntrySeries {
|
||||
// Assume we hit different family, no CT line found.
|
||||
p.resetCTParseValues(resetLexer)
|
||||
p.resetCTParseValues()
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -321,14 +332,14 @@ func (p *OpenMetricsParser) CreatedTimestamp() *int64 {
|
|||
peekedHash := p.seriesHash(&buf, peekedName[:len(peekedName)-8])
|
||||
if peekedHash != currHash {
|
||||
// Found CT line for a different series, for our series no CT.
|
||||
p.resetCTParseValues(resetLexer)
|
||||
p.resetCTParseValues()
|
||||
return nil
|
||||
}
|
||||
|
||||
// All timestamps in OpenMetrics are Unix Epoch in seconds. Convert to milliseconds.
|
||||
// https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#timestamps
|
||||
ct := int64(p.val * 1000.0)
|
||||
p.setCTParseValues(ct, currHash, currName, true, resetLexer)
|
||||
p.setCTParseValues(ct, currHash, currName, true)
|
||||
return &ct
|
||||
}
|
||||
}
|
||||
|
@ -370,17 +381,15 @@ func (p *OpenMetricsParser) seriesHash(offsetsArr *[]byte, metricFamilyName []by
|
|||
|
||||
// setCTParseValues sets the parser to the state after CreatedTimestamp method was called and CT was found.
|
||||
// This is useful to prevent re-parsing the same series again and early return the CT value.
|
||||
func (p *OpenMetricsParser) setCTParseValues(ct int64, ctHashSet uint64, mfName []byte, skipCTSeries bool, resetLexer *openMetricsLexer) {
|
||||
func (p *OpenMetricsParser) setCTParseValues(ct int64, ctHashSet uint64, mfName []byte, skipCTSeries bool) {
|
||||
p.ct = ct
|
||||
p.l = resetLexer
|
||||
p.ctHashSet = ctHashSet
|
||||
p.visitedMFName = mfName
|
||||
p.skipCTSeries = skipCTSeries // Do we need to set it?
|
||||
}
|
||||
|
||||
// resetCtParseValues resets the parser to the state before CreatedTimestamp method was called.
|
||||
func (p *OpenMetricsParser) resetCTParseValues(resetLexer *openMetricsLexer) {
|
||||
p.l = resetLexer
|
||||
func (p *OpenMetricsParser) resetCTParseValues() {
|
||||
p.ctHashSet = 0
|
||||
p.skipCTSeries = true
|
||||
}
|
||||
|
@ -416,10 +425,12 @@ func (p *OpenMetricsParser) Next() (Entry, error) {
|
|||
|
||||
p.start = p.l.i
|
||||
p.offsets = p.offsets[:0]
|
||||
if !p.ignoreExemplar {
|
||||
p.eOffsets = p.eOffsets[:0]
|
||||
p.exemplar = p.exemplar[:0]
|
||||
p.exemplarVal = 0
|
||||
p.hasExemplarTs = false
|
||||
}
|
||||
|
||||
switch t := p.nextToken(); t {
|
||||
case tEOFWord:
|
||||
|
@ -544,6 +555,16 @@ func (p *OpenMetricsParser) Next() (Entry, error) {
|
|||
|
||||
func (p *OpenMetricsParser) parseComment() error {
|
||||
var err error
|
||||
|
||||
if p.ignoreExemplar {
|
||||
for t := p.nextToken(); t != tLinebreak; t = p.nextToken() {
|
||||
if t == tEOF {
|
||||
return errors.New("data does not end with # EOF")
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Parse the labels.
|
||||
p.eOffsets, err = p.parseLVals(p.eOffsets, true)
|
||||
if err != nil {
|
||||
|
@ -723,3 +744,15 @@ func (p *OpenMetricsParser) getFloatValue(t token, after string) (float64, error
|
|||
}
|
||||
return val, nil
|
||||
}
|
||||
|
||||
// normalizeFloatsInLabelValues ensures that values of the "le" labels of classic histograms and "quantile" labels
|
||||
// of summaries follow OpenMetrics formatting rules.
|
||||
func normalizeFloatsInLabelValues(t model.MetricType, l, v string) string {
|
||||
if (t == model.MetricTypeSummary && l == model.QuantileLabel) || (t == model.MetricTypeHistogram && l == model.BucketLabel) {
|
||||
f, err := strconv.ParseFloat(v, 64)
|
||||
if err == nil {
|
||||
return formatOpenMetricsFloat(f)
|
||||
}
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
|
|
@ -74,6 +74,7 @@ foo_total{a="b"} 17.0 1520879607.789 # {id="counter-test"} 5
|
|||
foo_created{a="b"} 1520872607.123
|
||||
foo_total{le="c"} 21.0
|
||||
foo_created{le="c"} 1520872621.123
|
||||
foo_total{le="1"} 10.0
|
||||
# HELP bar Summary with CT at the end, making sure we find CT even if it's multiple lines a far
|
||||
# TYPE bar summary
|
||||
bar_count 17.0
|
||||
|
@ -97,6 +98,7 @@ something_count 18
|
|||
something_sum 324789.4
|
||||
something_created 1520430001
|
||||
something_bucket{le="0.0"} 1
|
||||
something_bucket{le="1"} 2
|
||||
something_bucket{le="+Inf"} 18
|
||||
# HELP yum Summary with _created between sum and quantiles
|
||||
# TYPE yum summary
|
||||
|
@ -130,7 +132,7 @@ foobar{quantile="0.99"} 150.1`
|
|||
}, {
|
||||
m: `go_gc_duration_seconds{quantile="0"}`,
|
||||
v: 4.9351e-05,
|
||||
lset: labels.FromStrings("__name__", "go_gc_duration_seconds", "quantile", "0"),
|
||||
lset: labels.FromStrings("__name__", "go_gc_duration_seconds", "quantile", "0.0"),
|
||||
}, {
|
||||
m: `go_gc_duration_seconds{quantile="0.25"}`,
|
||||
v: 7.424100000000001e-05,
|
||||
|
@ -302,6 +304,10 @@ foobar{quantile="0.99"} 150.1`
|
|||
v: 21.0,
|
||||
lset: labels.FromStrings("__name__", "foo_total", "le", "c"),
|
||||
ct: int64p(1520872621123),
|
||||
}, {
|
||||
m: `foo_total{le="1"}`,
|
||||
v: 10.0,
|
||||
lset: labels.FromStrings("__name__", "foo_total", "le", "1"),
|
||||
}, {
|
||||
m: "bar",
|
||||
help: "Summary with CT at the end, making sure we find CT even if it's multiple lines a far",
|
||||
|
@ -385,6 +391,11 @@ foobar{quantile="0.99"} 150.1`
|
|||
v: 1,
|
||||
lset: labels.FromStrings("__name__", "something_bucket", "le", "0.0"),
|
||||
ct: int64p(1520430001000),
|
||||
}, {
|
||||
m: `something_bucket{le="1"}`,
|
||||
v: 2,
|
||||
lset: labels.FromStrings("__name__", "something_bucket", "le", "1.0"),
|
||||
ct: int64p(1520430001000),
|
||||
}, {
|
||||
m: `something_bucket{le="+Inf"}`,
|
||||
v: 18,
|
||||
|
@ -492,7 +503,7 @@ func TestUTF8OpenMetricsParse(t *testing.T) {
|
|||
}, {
|
||||
m: `{"go.gc_duration_seconds",quantile="0"}`,
|
||||
v: 4.9351e-05,
|
||||
lset: labels.FromStrings("__name__", "go.gc_duration_seconds", "quantile", "0"),
|
||||
lset: labels.FromStrings("__name__", "go.gc_duration_seconds", "quantile", "0.0"),
|
||||
ct: int64p(1520872607123),
|
||||
}, {
|
||||
m: `{"go.gc_duration_seconds",quantile="0.25"}`,
|
||||
|
|
|
@ -239,7 +239,8 @@ func (p *PromParser) Metric(l *labels.Labels) string {
|
|||
label := unreplace(s[a:b])
|
||||
c := p.offsets[i+2] - p.start
|
||||
d := p.offsets[i+3] - p.start
|
||||
value := unreplace(s[c:d])
|
||||
value := normalizeFloatsInLabelValues(p.mtype, label, unreplace(s[c:d]))
|
||||
|
||||
p.builder.Add(label, value)
|
||||
}
|
||||
|
||||
|
@ -508,7 +509,7 @@ func yoloString(b []byte) string {
|
|||
func parseFloat(s string) (float64, error) {
|
||||
// Keep to pre-Go 1.13 float formats.
|
||||
if strings.ContainsAny(s, "pP_") {
|
||||
return 0, fmt.Errorf("unsupported character in float")
|
||||
return 0, errors.New("unsupported character in float")
|
||||
}
|
||||
return strconv.ParseFloat(s, 64)
|
||||
}
|
||||
|
|
|
@ -31,6 +31,13 @@ go_gc_duration_seconds{quantile="0.25",} 7.424100000000001e-05
|
|||
go_gc_duration_seconds{quantile="0.5",a="b"} 8.3835e-05
|
||||
go_gc_duration_seconds{quantile="0.8", a="b"} 8.3835e-05
|
||||
go_gc_duration_seconds{ quantile="0.9", a="b"} 8.3835e-05
|
||||
# HELP prometheus_http_request_duration_seconds Histogram of latencies for HTTP requests.
|
||||
# TYPE prometheus_http_request_duration_seconds histogram
|
||||
prometheus_http_request_duration_seconds_bucket{handler="/",le="1"} 423
|
||||
prometheus_http_request_duration_seconds_bucket{handler="/",le="2"} 1423
|
||||
prometheus_http_request_duration_seconds_bucket{handler="/",le="+Inf"} 1423
|
||||
prometheus_http_request_duration_seconds_sum{handler="/"} 2000
|
||||
prometheus_http_request_duration_seconds_count{handler="/"} 1423
|
||||
# Hrandom comment starting with prefix of HELP
|
||||
#
|
||||
wind_speed{A="2",c="3"} 12345
|
||||
|
@ -50,7 +57,8 @@ some:aggregate:rate5m{a_b="c"} 1
|
|||
go_goroutines 33 123123
|
||||
_metric_starting_with_underscore 1
|
||||
testmetric{_label_starting_with_underscore="foo"} 1
|
||||
testmetric{label="\"bar\""} 1`
|
||||
testmetric{label="\"bar\""} 1
|
||||
testmetric{le="10"} 1`
|
||||
input += "\n# HELP metric foo\x00bar"
|
||||
input += "\nnull_byte_metric{a=\"abc\x00\"} 1"
|
||||
|
||||
|
@ -64,7 +72,7 @@ testmetric{label="\"bar\""} 1`
|
|||
}, {
|
||||
m: `go_gc_duration_seconds{quantile="0"}`,
|
||||
v: 4.9351e-05,
|
||||
lset: labels.FromStrings("__name__", "go_gc_duration_seconds", "quantile", "0"),
|
||||
lset: labels.FromStrings("__name__", "go_gc_duration_seconds", "quantile", "0.0"),
|
||||
}, {
|
||||
m: `go_gc_duration_seconds{quantile="0.25",}`,
|
||||
v: 7.424100000000001e-05,
|
||||
|
@ -81,6 +89,32 @@ testmetric{label="\"bar\""} 1`
|
|||
m: `go_gc_duration_seconds{ quantile="0.9", a="b"}`,
|
||||
v: 8.3835e-05,
|
||||
lset: labels.FromStrings("__name__", "go_gc_duration_seconds", "quantile", "0.9", "a", "b"),
|
||||
}, {
|
||||
m: "prometheus_http_request_duration_seconds",
|
||||
help: "Histogram of latencies for HTTP requests.",
|
||||
}, {
|
||||
m: "prometheus_http_request_duration_seconds",
|
||||
typ: model.MetricTypeHistogram,
|
||||
}, {
|
||||
m: `prometheus_http_request_duration_seconds_bucket{handler="/",le="1"}`,
|
||||
v: 423,
|
||||
lset: labels.FromStrings("__name__", "prometheus_http_request_duration_seconds_bucket", "handler", "/", "le", "1.0"),
|
||||
}, {
|
||||
m: `prometheus_http_request_duration_seconds_bucket{handler="/",le="2"}`,
|
||||
v: 1423,
|
||||
lset: labels.FromStrings("__name__", "prometheus_http_request_duration_seconds_bucket", "handler", "/", "le", "2.0"),
|
||||
}, {
|
||||
m: `prometheus_http_request_duration_seconds_bucket{handler="/",le="+Inf"}`,
|
||||
v: 1423,
|
||||
lset: labels.FromStrings("__name__", "prometheus_http_request_duration_seconds_bucket", "handler", "/", "le", "+Inf"),
|
||||
}, {
|
||||
m: `prometheus_http_request_duration_seconds_sum{handler="/"}`,
|
||||
v: 2000,
|
||||
lset: labels.FromStrings("__name__", "prometheus_http_request_duration_seconds_sum", "handler", "/"),
|
||||
}, {
|
||||
m: `prometheus_http_request_duration_seconds_count{handler="/"}`,
|
||||
v: 1423,
|
||||
lset: labels.FromStrings("__name__", "prometheus_http_request_duration_seconds_count", "handler", "/"),
|
||||
}, {
|
||||
comment: "# Hrandom comment starting with prefix of HELP",
|
||||
}, {
|
||||
|
@ -151,6 +185,10 @@ testmetric{label="\"bar\""} 1`
|
|||
m: "testmetric{label=\"\\\"bar\\\"\"}",
|
||||
v: 1,
|
||||
lset: labels.FromStrings("__name__", "testmetric", "label", `"bar"`),
|
||||
}, {
|
||||
m: `testmetric{le="10"}`,
|
||||
v: 1,
|
||||
lset: labels.FromStrings("__name__", "testmetric", "le", "10"),
|
||||
}, {
|
||||
m: "metric",
|
||||
help: "foo\x00bar",
|
||||
|
@ -197,7 +235,7 @@ func TestUTF8PromParse(t *testing.T) {
|
|||
}, {
|
||||
m: `{"go.gc_duration_seconds",quantile="0"}`,
|
||||
v: 4.9351e-05,
|
||||
lset: labels.FromStrings("__name__", "go.gc_duration_seconds", "quantile", "0"),
|
||||
lset: labels.FromStrings("__name__", "go.gc_duration_seconds", "quantile", "0.0"),
|
||||
}, {
|
||||
m: `{"go.gc_duration_seconds",quantile="0.25",}`,
|
||||
v: 7.424100000000001e-05,
|
||||
|
|
|
@ -20,7 +20,9 @@ import (
|
|||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"unicode/utf8"
|
||||
|
||||
"github.com/gogo/protobuf/proto"
|
||||
|
@ -34,6 +36,15 @@ import (
|
|||
dto "github.com/prometheus/prometheus/prompb/io/prometheus/client"
|
||||
)
|
||||
|
||||
// floatFormatBufPool is exclusively used in formatOpenMetricsFloat.
|
||||
var floatFormatBufPool = sync.Pool{
|
||||
New: func() interface{} {
|
||||
// To contain at most 17 digits and additional syntax for a float64.
|
||||
b := make([]byte, 0, 24)
|
||||
return &b
|
||||
},
|
||||
}
|
||||
|
||||
// ProtobufParser is a very inefficient way of unmarshaling the old Prometheus
|
||||
// protobuf format and then present it as it if were parsed by a
|
||||
// Prometheus-2-style text parser. This is only done so that we can easily plug
|
||||
|
@ -629,11 +640,15 @@ func formatOpenMetricsFloat(f float64) string {
|
|||
case math.IsInf(f, -1):
|
||||
return "-Inf"
|
||||
}
|
||||
s := fmt.Sprint(f)
|
||||
if strings.ContainsAny(s, "e.") {
|
||||
return s
|
||||
bp := floatFormatBufPool.Get().(*[]byte)
|
||||
defer floatFormatBufPool.Put(bp)
|
||||
|
||||
*bp = strconv.AppendFloat((*bp)[:0], f, 'g', -1, 64)
|
||||
if bytes.ContainsAny(*bp, "e.") {
|
||||
return string(*bp)
|
||||
}
|
||||
return s + ".0"
|
||||
*bp = append(*bp, '.', '0')
|
||||
return string(*bp)
|
||||
}
|
||||
|
||||
// isNativeHistogram returns false iff the provided histograms has no spans at
|
||||
|
|
|
@ -409,6 +409,49 @@ metric: <
|
|||
>
|
||||
>
|
||||
|
||||
`,
|
||||
`name: "test_histogram3"
|
||||
help: "Similar histogram as before but now with integer buckets."
|
||||
type: HISTOGRAM
|
||||
metric: <
|
||||
histogram: <
|
||||
sample_count: 6
|
||||
sample_sum: 50
|
||||
bucket: <
|
||||
cumulative_count: 2
|
||||
upper_bound: -20
|
||||
>
|
||||
bucket: <
|
||||
cumulative_count: 4
|
||||
upper_bound: 20
|
||||
exemplar: <
|
||||
label: <
|
||||
name: "dummyID"
|
||||
value: "59727"
|
||||
>
|
||||
value: 15
|
||||
timestamp: <
|
||||
seconds: 1625851153
|
||||
nanos: 146848499
|
||||
>
|
||||
>
|
||||
>
|
||||
bucket: <
|
||||
cumulative_count: 6
|
||||
upper_bound: 30
|
||||
exemplar: <
|
||||
label: <
|
||||
name: "dummyID"
|
||||
value: "5617"
|
||||
>
|
||||
value: 25
|
||||
>
|
||||
>
|
||||
schema: 0
|
||||
zero_threshold: 0
|
||||
>
|
||||
>
|
||||
|
||||
`,
|
||||
`name: "test_histogram_family"
|
||||
help: "Test histogram metric family with two very simple histograms."
|
||||
|
@ -1050,6 +1093,66 @@ func TestProtobufParse(t *testing.T) {
|
|||
"le", "+Inf",
|
||||
),
|
||||
},
|
||||
{
|
||||
m: "test_histogram3",
|
||||
help: "Similar histogram as before but now with integer buckets.",
|
||||
},
|
||||
{
|
||||
m: "test_histogram3",
|
||||
typ: model.MetricTypeHistogram,
|
||||
},
|
||||
{
|
||||
m: "test_histogram3_count",
|
||||
v: 6,
|
||||
lset: labels.FromStrings(
|
||||
"__name__", "test_histogram3_count",
|
||||
),
|
||||
},
|
||||
{
|
||||
m: "test_histogram3_sum",
|
||||
v: 50,
|
||||
lset: labels.FromStrings(
|
||||
"__name__", "test_histogram3_sum",
|
||||
),
|
||||
},
|
||||
{
|
||||
m: "test_histogram3_bucket\xffle\xff-20.0",
|
||||
v: 2,
|
||||
lset: labels.FromStrings(
|
||||
"__name__", "test_histogram3_bucket",
|
||||
"le", "-20.0",
|
||||
),
|
||||
},
|
||||
{
|
||||
m: "test_histogram3_bucket\xffle\xff20.0",
|
||||
v: 4,
|
||||
lset: labels.FromStrings(
|
||||
"__name__", "test_histogram3_bucket",
|
||||
"le", "20.0",
|
||||
),
|
||||
es: []exemplar.Exemplar{
|
||||
{Labels: labels.FromStrings("dummyID", "59727"), Value: 15, HasTs: true, Ts: 1625851153146},
|
||||
},
|
||||
},
|
||||
{
|
||||
m: "test_histogram3_bucket\xffle\xff30.0",
|
||||
v: 6,
|
||||
lset: labels.FromStrings(
|
||||
"__name__", "test_histogram3_bucket",
|
||||
"le", "30.0",
|
||||
),
|
||||
es: []exemplar.Exemplar{
|
||||
{Labels: labels.FromStrings("dummyID", "5617"), Value: 25, HasTs: false},
|
||||
},
|
||||
},
|
||||
{
|
||||
m: "test_histogram3_bucket\xffle\xff+Inf",
|
||||
v: 6,
|
||||
lset: labels.FromStrings(
|
||||
"__name__", "test_histogram3_bucket",
|
||||
"le", "+Inf",
|
||||
),
|
||||
},
|
||||
{
|
||||
m: "test_histogram_family",
|
||||
help: "Test histogram metric family with two very simple histograms.",
|
||||
|
@ -1857,6 +1960,66 @@ func TestProtobufParse(t *testing.T) {
|
|||
"le", "+Inf",
|
||||
),
|
||||
},
|
||||
{
|
||||
m: "test_histogram3",
|
||||
help: "Similar histogram as before but now with integer buckets.",
|
||||
},
|
||||
{
|
||||
m: "test_histogram3",
|
||||
typ: model.MetricTypeHistogram,
|
||||
},
|
||||
{
|
||||
m: "test_histogram3_count",
|
||||
v: 6,
|
||||
lset: labels.FromStrings(
|
||||
"__name__", "test_histogram3_count",
|
||||
),
|
||||
},
|
||||
{
|
||||
m: "test_histogram3_sum",
|
||||
v: 50,
|
||||
lset: labels.FromStrings(
|
||||
"__name__", "test_histogram3_sum",
|
||||
),
|
||||
},
|
||||
{
|
||||
m: "test_histogram3_bucket\xffle\xff-20.0",
|
||||
v: 2,
|
||||
lset: labels.FromStrings(
|
||||
"__name__", "test_histogram3_bucket",
|
||||
"le", "-20.0",
|
||||
),
|
||||
},
|
||||
{
|
||||
m: "test_histogram3_bucket\xffle\xff20.0",
|
||||
v: 4,
|
||||
lset: labels.FromStrings(
|
||||
"__name__", "test_histogram3_bucket",
|
||||
"le", "20.0",
|
||||
),
|
||||
es: []exemplar.Exemplar{
|
||||
{Labels: labels.FromStrings("dummyID", "59727"), Value: 15, HasTs: true, Ts: 1625851153146},
|
||||
},
|
||||
},
|
||||
{
|
||||
m: "test_histogram3_bucket\xffle\xff30.0",
|
||||
v: 6,
|
||||
lset: labels.FromStrings(
|
||||
"__name__", "test_histogram3_bucket",
|
||||
"le", "30.0",
|
||||
),
|
||||
es: []exemplar.Exemplar{
|
||||
{Labels: labels.FromStrings("dummyID", "5617"), Value: 25, HasTs: false},
|
||||
},
|
||||
},
|
||||
{
|
||||
m: "test_histogram3_bucket\xffle\xff+Inf",
|
||||
v: 6,
|
||||
lset: labels.FromStrings(
|
||||
"__name__", "test_histogram3_bucket",
|
||||
"le", "+Inf",
|
||||
),
|
||||
},
|
||||
{
|
||||
m: "test_histogram_family",
|
||||
help: "Test histogram metric family with two very simple histograms.",
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue