diff --git a/.github/stale.yml b/.github/stale.yml
deleted file mode 100644
index 66a72af533..0000000000
--- a/.github/stale.yml
+++ /dev/null
@@ -1,56 +0,0 @@
-# Configuration for probot-stale - https://github.com/probot/stale
-
-# Number of days of inactivity before an Issue or Pull Request becomes stale
-daysUntilStale: 60
-
-# Number of days of inactivity before an Issue or Pull Request with the stale label is closed.
-# Set to false to disable. If disabled, issues still need to be closed manually, but will remain marked as stale.
-daysUntilClose: false
-
-# Only issues or pull requests with all of these labels are check if stale. Defaults to `[]` (disabled)
-onlyLabels: []
-
-# Issues or Pull Requests with these labels will never be considered stale. Set to `[]` to disable
-exemptLabels:
- - keepalive
-
-# Set to true to ignore issues in a project (defaults to false)
-exemptProjects: false
-
-# Set to true to ignore issues in a milestone (defaults to false)
-exemptMilestones: false
-
-# Set to true to ignore issues with an assignee (defaults to false)
-exemptAssignees: false
-
-# Label to use when marking as stale
-staleLabel: stale
-
-# Comment to post when marking as stale. Set to `false` to disable
-markComment: false
-
-# Comment to post when removing the stale label.
-# unmarkComment: >
-# Your comment here.
-
-# Comment to post when closing a stale Issue or Pull Request.
-# closeComment: >
-# Your comment here.
-
-# Limit the number of actions per hour, from 1-30. Default is 30
-limitPerRun: 30
-
-# Limit to only `issues` or `pulls`
-only: pulls
-
-# Optionally, specify configuration settings that are specific to just 'issues' or 'pulls':
-# pulls:
-# daysUntilStale: 30
-# markComment: >
-# This pull request has been automatically marked as stale because it has not had
-# recent activity. It will be closed if no further activity occurs. Thank you
-# for your contributions.
-
-# issues:
-# exemptLabels:
-# - confirmed
diff --git a/.github/workflows/buf-lint.yml b/.github/workflows/buf-lint.yml
index cbfeb2ba5b..3f6cf76e16 100644
--- a/.github/workflows/buf-lint.yml
+++ b/.github/workflows/buf-lint.yml
@@ -13,7 +13,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
- - uses: bufbuild/buf-setup-action@35c243d7f2a909b1d4e40399b348a7fdab27d78d # v1.34.0
+ - uses: bufbuild/buf-setup-action@54abbed4fe8d8d45173eca4798b0c39a53a7b658 # v1.39.0
with:
github_token: ${{ secrets.GITHUB_TOKEN }}
- uses: bufbuild/buf-lint-action@06f9dd823d873146471cfaaf108a993fe00e5325 # v1.1.1
diff --git a/.github/workflows/buf.yml b/.github/workflows/buf.yml
index 8b964ef24c..632d38cb00 100644
--- a/.github/workflows/buf.yml
+++ b/.github/workflows/buf.yml
@@ -13,7 +13,7 @@ jobs:
if: github.repository_owner == 'prometheus'
steps:
- uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
- - uses: bufbuild/buf-setup-action@35c243d7f2a909b1d4e40399b348a7fdab27d78d # v1.34.0
+ - uses: bufbuild/buf-setup-action@54abbed4fe8d8d45173eca4798b0c39a53a7b658 # v1.39.0
with:
github_token: ${{ secrets.GITHUB_TOKEN }}
- uses: bufbuild/buf-lint-action@06f9dd823d873146471cfaaf108a993fe00e5325 # v1.1.1
diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
index 8b3624383c..5934a3dafe 100644
--- a/.github/workflows/ci.yml
+++ b/.github/workflows/ci.yml
@@ -14,7 +14,7 @@ jobs:
image: quay.io/prometheus/golang-builder:1.22-base
steps:
- uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
- - uses: prometheus/promci@3cb0c3871f223bd5ce1226995bd52ffb314798b6 # v0.1.0
+ - uses: prometheus/promci@45166329da36d74895901808f1c8c97efafc7f84 # v0.3.0
- uses: ./.github/promci/actions/setup_environment
- run: make GOOPTS=--tags=stringlabels GO_ONLY=1 SKIP_GOLANGCI_LINT=1
- run: go test --tags=stringlabels ./tsdb/ -test.tsdb-isolation=false
@@ -28,7 +28,7 @@ jobs:
image: quay.io/prometheus/golang-builder:1.22-base
steps:
- uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
- - uses: prometheus/promci@3cb0c3871f223bd5ce1226995bd52ffb314798b6 # v0.1.0
+ - uses: prometheus/promci@45166329da36d74895901808f1c8c97efafc7f84 # v0.3.0
- uses: ./.github/promci/actions/setup_environment
- run: go test --tags=dedupelabels ./...
- run: GOARCH=386 go test ./cmd/prometheus
@@ -58,7 +58,7 @@ jobs:
steps:
- uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
- - uses: prometheus/promci@3cb0c3871f223bd5ce1226995bd52ffb314798b6 # v0.1.0
+ - uses: prometheus/promci@45166329da36d74895901808f1c8c97efafc7f84 # v0.3.0
- uses: ./.github/promci/actions/setup_environment
with:
enable_go: false
@@ -75,7 +75,7 @@ jobs:
runs-on: windows-latest
steps:
- uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
- - uses: actions/setup-go@cdcb36043654635271a94b9a6d1392de5bb323a7 # v5.0.1
+ - uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 # v5.0.2
with:
go-version: 1.22.x
- run: |
@@ -115,7 +115,7 @@ jobs:
thread: [ 0, 1, 2 ]
steps:
- uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
- - uses: prometheus/promci@3cb0c3871f223bd5ce1226995bd52ffb314798b6 # v0.1.0
+ - uses: prometheus/promci@45166329da36d74895901808f1c8c97efafc7f84 # v0.3.0
- uses: ./.github/promci/actions/build
with:
promu_opts: "-p linux/amd64 -p windows/amd64 -p linux/arm64 -p darwin/amd64 -p darwin/arm64 -p linux/386"
@@ -138,11 +138,23 @@ jobs:
# should also be updated.
steps:
- uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
- - uses: prometheus/promci@3cb0c3871f223bd5ce1226995bd52ffb314798b6 # v0.1.0
+ - uses: prometheus/promci@45166329da36d74895901808f1c8c97efafc7f84 # v0.3.0
- uses: ./.github/promci/actions/build
with:
parallelism: 12
thread: ${{ matrix.thread }}
+ build_all_status:
+ name: Report status of build Prometheus for all architectures
+ runs-on: ubuntu-latest
+ needs: [build_all]
+ if: github.event_name == 'pull_request' && startsWith(github.event.pull_request.base.ref, 'release-')
+ steps:
+ - name: Successful build
+ if: ${{ !(contains(needs.*.result, 'failure')) && !(contains(needs.*.result, 'cancelled')) }}
+ run: exit 0
+ - name: Failing or cancelled build
+ if: ${{ contains(needs.*.result, 'failure') || contains(needs.*.result, 'cancelled') }}
+ run: exit 1
check_generated_parser:
name: Check generated parser
runs-on: ubuntu-latest
@@ -150,7 +162,7 @@ jobs:
- name: Checkout repository
uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
- name: Install Go
- uses: actions/setup-go@cdcb36043654635271a94b9a6d1392de5bb323a7 # v5.0.1
+ uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 # v5.0.2
with:
cache: false
go-version: 1.22.x
@@ -163,18 +175,18 @@ jobs:
- name: Checkout repository
uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
- name: Install Go
- uses: actions/setup-go@cdcb36043654635271a94b9a6d1392de5bb323a7 # v5.0.1
+ uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 # v5.0.2
with:
go-version: 1.22.x
- name: Install snmp_exporter/generator dependencies
run: sudo apt-get update && sudo apt-get -y install libsnmp-dev
if: github.repository == 'prometheus/snmp_exporter'
- name: Lint
- uses: golangci/golangci-lint-action@a4f60bb28d35aeee14e6880718e0c85ff1882e64 # v6.0.1
+ uses: golangci/golangci-lint-action@aaa42aa0628b4ae2578232a66b541047968fac86 # v6.1.0
with:
args: --verbose
# Make sure to sync this with Makefile.common and scripts/golangci-lint.yml.
- version: v1.59.1
+ version: v1.60.2
fuzzing:
uses: ./.github/workflows/fuzzing.yml
if: github.event_name == 'pull_request'
@@ -188,7 +200,7 @@ jobs:
if: github.event_name == 'push' && github.event.ref == 'refs/heads/main'
steps:
- uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
- - uses: prometheus/promci@3cb0c3871f223bd5ce1226995bd52ffb314798b6 # v0.1.0
+ - uses: prometheus/promci@45166329da36d74895901808f1c8c97efafc7f84 # v0.3.0
- uses: ./.github/promci/actions/publish_main
with:
docker_hub_login: ${{ secrets.docker_hub_login }}
@@ -202,7 +214,7 @@ jobs:
if: github.event_name == 'push' && startsWith(github.ref, 'refs/tags/v2.')
steps:
- uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
- - uses: prometheus/promci@3cb0c3871f223bd5ce1226995bd52ffb314798b6 # v0.1.0
+ - uses: prometheus/promci@45166329da36d74895901808f1c8c97efafc7f84 # v0.3.0
- uses: ./.github/promci/actions/publish_release
with:
docker_hub_login: ${{ secrets.docker_hub_login }}
@@ -217,9 +229,9 @@ jobs:
steps:
- name: Checkout
uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
- - uses: prometheus/promci@3cb0c3871f223bd5ce1226995bd52ffb314798b6 # v0.1.0
+ - uses: prometheus/promci@45166329da36d74895901808f1c8c97efafc7f84 # v0.3.0
- name: Install nodejs
- uses: actions/setup-node@60edb5dd545a775178f52524783378180af0d1f8 # v4.0.2
+ uses: actions/setup-node@1e60f620b9541d16bece96c5465dc8ee9832be0b # v4.0.3
with:
node-version-file: "web/ui/.nvmrc"
registry-url: "https://registry.npmjs.org"
diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml
index 12ffc659c2..89aa2ba29b 100644
--- a/.github/workflows/codeql-analysis.yml
+++ b/.github/workflows/codeql-analysis.yml
@@ -27,12 +27,12 @@ jobs:
uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
- name: Initialize CodeQL
- uses: github/codeql-action/init@b611370bb5703a7efb587f9d136a52ea24c5c38c # v3.25.11
+ uses: github/codeql-action/init@4dd16135b69a43b6c8efb853346f8437d92d3c93 # v3.26.6
with:
languages: ${{ matrix.language }}
- name: Autobuild
- uses: github/codeql-action/autobuild@b611370bb5703a7efb587f9d136a52ea24c5c38c # v3.25.11
+ uses: github/codeql-action/autobuild@4dd16135b69a43b6c8efb853346f8437d92d3c93 # v3.26.6
- name: Perform CodeQL Analysis
- uses: github/codeql-action/analyze@b611370bb5703a7efb587f9d136a52ea24c5c38c # v3.25.11
+ uses: github/codeql-action/analyze@4dd16135b69a43b6c8efb853346f8437d92d3c93 # v3.26.6
diff --git a/.github/workflows/fuzzing.yml b/.github/workflows/fuzzing.yml
index dc510e5966..f3953cb2a4 100644
--- a/.github/workflows/fuzzing.yml
+++ b/.github/workflows/fuzzing.yml
@@ -21,7 +21,7 @@ jobs:
fuzz-seconds: 600
dry-run: false
- name: Upload Crash
- uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808 # v4.3.3
+ uses: actions/upload-artifact@0b2256b8c012f0828dc542b3febcab082c67f72b # v4.3.4
if: failure() && steps.build.outcome == 'success'
with:
name: artifacts
diff --git a/.github/workflows/scorecards.yml b/.github/workflows/scorecards.yml
index c82fa87a1e..82cccb2bc1 100644
--- a/.github/workflows/scorecards.yml
+++ b/.github/workflows/scorecards.yml
@@ -26,7 +26,7 @@ jobs:
persist-credentials: false
- name: "Run analysis"
- uses: ossf/scorecard-action@dc50aa9510b46c811795eb24b2f1ba02a914e534 # tag=v2.3.3
+ uses: ossf/scorecard-action@62b2cac7ed8198b15735ed49ab1e5cf35480ba46 # tag=v2.4.0
with:
results_file: results.sarif
results_format: sarif
@@ -37,7 +37,7 @@ jobs:
# Upload the results as artifacts (optional). Commenting out will disable uploads of run results in SARIF
# format to the repository Actions tab.
- name: "Upload artifact"
- uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808 # tag=v4.3.3
+ uses: actions/upload-artifact@0b2256b8c012f0828dc542b3febcab082c67f72b # tag=v4.3.4
with:
name: SARIF file
path: results.sarif
@@ -45,6 +45,6 @@ jobs:
# Upload the results to GitHub's code scanning dashboard.
- name: "Upload to code-scanning"
- uses: github/codeql-action/upload-sarif@b611370bb5703a7efb587f9d136a52ea24c5c38c # tag=v3.25.11
+ uses: github/codeql-action/upload-sarif@4dd16135b69a43b6c8efb853346f8437d92d3c93 # tag=v3.26.6
with:
sarif_file: results.sarif
diff --git a/.github/workflows/stale.yml b/.github/workflows/stale.yml
new file mode 100644
index 0000000000..d71bcbc9d8
--- /dev/null
+++ b/.github/workflows/stale.yml
@@ -0,0 +1,31 @@
+name: Stale Check
+on:
+ workflow_dispatch: {}
+ schedule:
+ - cron: '16 22 * * *'
+permissions:
+ issues: write
+ pull-requests: write
+jobs:
+ stale:
+ if: github.repository_owner == 'prometheus' || github.repository_owner == 'prometheus-community' # Don't run this workflow on forks.
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/stale@28ca1036281a5e5922ead5184a1bbf96e5fc984e # v9.0.0
+ with:
+ repo-token: ${{ secrets.GITHUB_TOKEN }}
+ # opt out of defaults to avoid marking issues as stale and closing them
+ # https://github.com/actions/stale#days-before-close
+ # https://github.com/actions/stale#days-before-stale
+ days-before-stale: -1
+ days-before-close: -1
+ # Setting it to empty string to skip comments.
+ # https://github.com/actions/stale#stale-pr-message
+ # https://github.com/actions/stale#stale-issue-message
+ stale-pr-message: ''
+ stale-issue-message: ''
+ operations-per-run: 30
+ # override days-before-stale, for only marking the pull requests as stale
+ days-before-pr-stale: 60
+ stale-pr-label: stale
+ exempt-pr-labels: keepalive
diff --git a/.golangci.yml b/.golangci.yml
index e924fe3d5b..303cd33d8b 100644
--- a/.golangci.yml
+++ b/.golangci.yml
@@ -25,15 +25,34 @@ linters:
- loggercheck
issues:
+ max-issues-per-linter: 0
max-same-issues: 0
+ # The default exclusions are too aggressive. For one, they
+ # essentially disable any linting on doc comments. We disable
+ # default exclusions here and add exclusions fitting our codebase
+ # further down.
+ exclude-use-default: false
exclude-files:
# Skip autogenerated files.
- ^.*\.(pb|y)\.go$
exclude-dirs:
- # Copied it from a different source
+ # Copied it from a different source.
- storage/remote/otlptranslator/prometheusremotewrite
- storage/remote/otlptranslator/prometheus
exclude-rules:
+ - linters:
+ - errcheck
+ # Taken from the default exclusions (that are otherwise disabled above).
+ text: Error return value of .((os\.)?std(out|err)\..*|.*Close|.*Flush|os\.Remove(All)?|.*print(f|ln)?|os\.(Un)?Setenv). is not checked
+ - linters:
+ - govet
+ # We use many Seek methods that do not follow the usual pattern.
+ text: "stdmethods: method Seek.* should have signature Seek"
+ - linters:
+ - revive
+ # We have stopped at some point to write doc comments on exported symbols.
+ # TODO(beorn7): Maybe we should enforce this again? There are ~500 offenders right now.
+ text: exported (.+) should have comment( \(or a comment on this block\))? or be unexported
- linters:
- gocritic
text: "appendAssign"
@@ -94,15 +113,14 @@ linters-settings:
errorf: false
revive:
# By default, revive will enable only the linting rules that are named in the configuration file.
- # So, it's needed to explicitly set in configuration all required rules.
- # The following configuration enables all the rules from the defaults.toml
- # https://github.com/mgechev/revive/blob/master/defaults.toml
+ # So, it's needed to explicitly enable all required rules here.
rules:
# https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md
- name: blank-imports
+ - name: comment-spacings
- name: context-as-argument
arguments:
- # allow functions with test or bench signatures
+ # Allow functions with test or bench signatures.
- allowTypesBefore: "*testing.T,testing.TB"
- name: context-keys-type
- name: dot-imports
@@ -118,6 +136,8 @@ linters-settings:
- name: increment-decrement
- name: indent-error-flow
- name: package-comments
+ # TODO(beorn7): Currently, we have a lot of missing package doc comments. Maybe we should have them.
+ disabled: true
- name: range
- name: receiver-naming
- name: redefines-builtin-id
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 850554bf94..37cbea6ef5 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -3,8 +3,59 @@
## unreleased
* [FEATURE] OTLP receiver: Add new option `otlp.promote_resource_attributes`, for any OTel resource attributes that should be promoted to metric labels. #14200
-* [FEATURE] Remote-Write: Add sender and receiver support for [Remote Write 2.0-rc.2](https://prometheus.io/docs/specs/remote_write_spec_2_0/) specification #14395 #14427 #14444
-* [ENHANCEMENT] Remote-Write: 1.x messages against Remote Write 2.x Receivers will have now correct values for `prometheus_storage__failed_total` in case of partial errors #14444
+* [ENHANCEMENT] OTLP receiver: Warn when encountering exponential histograms with zero count and non-zero sum. #14706
+* [BUGFIX] tsdb/wlog.Watcher.readSegmentForGC: Only count unknown record types against record_decode_failures_total metric. #14042
+
+## 2.54.1 / 2024-08-27
+
+* [BUGFIX] Scraping: allow multiple samples on same series, with explicit timestamps. #14685
+* [BUGFIX] Docker SD: fix crash in `match_first_network` mode when container is reconnected to a new network. #14654
+* [BUGFIX] PromQL: fix experimental native histograms getting corrupted due to vector selector bug in range queries. #14538
+* [BUGFIX] PromQL: fix experimental native histogram counter reset detection on stale samples. #14514
+* [BUGFIX] PromQL: fix native histograms getting corrupted due to vector selector bug in range queries. #14605
+
+## 2.54.0 / 2024-08-09
+
+Release 2.54 brings a release candidate of a major new version of [Remote Write: 2.0](https://prometheus.io/docs/specs/remote_write_spec_2_0/).
+This is experimental at this time and may still change.
+Remote-write v2 is enabled by default, but can be disabled via feature-flag `web.remote-write-receiver.accepted-protobuf-messages`.
+
+* [CHANGE] Remote-Write: `highest_timestamp_in_seconds` and `queue_highest_sent_timestamp_seconds` metrics now initialized to 0. #14437
+* [CHANGE] API: Split warnings from info annotations in API response. #14327
+* [FEATURE] Remote-Write: Version 2.0 experimental, plus metadata in WAL via feature flag `metadata-wal-records` (defaults on). #14395,#14427,#14444
+* [FEATURE] PromQL: add limitk() and limit_ratio() aggregation operators. #12503
+* [ENHANCEMENT] PromQL: Accept underscores in literal numbers, e.g. 1_000_000 for 1 million. #12821
+* [ENHANCEMENT] PromQL: float literal numbers and durations are now interchangeable (experimental). Example: `time() - my_timestamp > 10m`. #9138
+* [ENHANCEMENT] PromQL: use Kahan summation for sum(). #14074,#14362
+* [ENHANCEMENT] PromQL (experimental native histograms): Optimize `histogram_count` and `histogram_sum` functions. #14097
+* [ENHANCEMENT] TSDB: Better support for out-of-order experimental native histogram samples. #14438
+* [ENHANCEMENT] TSDB: Optimise seek within index. #14393
+* [ENHANCEMENT] TSDB: Optimise deletion of stale series. #14307
+* [ENHANCEMENT] TSDB: Reduce locking to optimise adding and removing series. #13286,#14286
+* [ENHANCEMENT] TSDB: Small optimisation: streamline special handling for out-of-order data. #14396,#14584
+* [ENHANCEMENT] Regexps: Optimize patterns with multiple prefixes. #13843,#14368
+* [ENHANCEMENT] Regexps: Optimize patterns containing multiple literal strings. #14173
+* [ENHANCEMENT] AWS SD: expose Primary IPv6 addresses as __meta_ec2_primary_ipv6_addresses. #14156
+* [ENHANCEMENT] Docker SD: add MatchFirstNetwork for containers with multiple networks. #10490
+* [ENHANCEMENT] OpenStack SD: Use `flavor.original_name` if available. #14312
+* [ENHANCEMENT] UI (experimental native histograms): more accurate representation. #13680,#14430
+* [ENHANCEMENT] Agent: `out_of_order_time_window` config option now applies to agent. #14094
+* [ENHANCEMENT] Notifier: Send any outstanding Alertmanager notifications when shutting down. #14290
+* [ENHANCEMENT] Rules: Add label-matcher support to Rules API. #10194
+* [ENHANCEMENT] HTTP API: Add url to message logged on error while sending response. #14209
+* [BUGFIX] TSDB: Exclude OOO chunks mapped after compaction starts (introduced by #14396). #14584
+* [BUGFIX] CLI: escape `|` characters when generating docs. #14420
+* [BUGFIX] PromQL (experimental native histograms): Fix some binary operators between native histogram values. #14454
+* [BUGFIX] TSDB: LabelNames API could fail during compaction. #14279
+* [BUGFIX] TSDB: Fix rare issue where pending OOO read can be left dangling if creating querier fails. #14341
+* [BUGFIX] TSDB: fix check for context cancellation in LabelNamesFor. #14302
+* [BUGFIX] Rules: Fix rare panic on reload. #14366
+* [BUGFIX] Config: In YAML marshalling, do not output a regexp field if it was never set. #14004
+* [BUGFIX] Remote-Write: reject samples with future timestamps. #14304
+* [BUGFIX] Remote-Write: Fix data corruption in remote write if max_sample_age is applied. #14078
+* [BUGFIX] Notifier: Fix Alertmanager discovery not updating under heavy load. #14174
+* [BUGFIX] Regexes: some Unicode characters were not matched by case-insensitive comparison. #14170,#14299
+* [BUGFIX] Remote-Read: Resolve occasional segmentation fault on query. #14515
## 2.53.1 / 2024-07-10
diff --git a/Makefile.common b/Makefile.common
index e3da72ab47..34d65bb56d 100644
--- a/Makefile.common
+++ b/Makefile.common
@@ -61,7 +61,7 @@ PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_
SKIP_GOLANGCI_LINT :=
GOLANGCI_LINT :=
GOLANGCI_LINT_OPTS ?=
-GOLANGCI_LINT_VERSION ?= v1.59.1
+GOLANGCI_LINT_VERSION ?= v1.60.2
# golangci-lint only supports linux, darwin and windows platforms on i386/amd64/arm64.
# windows isn't included here because of the path separator being different.
ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux darwin))
diff --git a/README.md b/README.md
index cd14ed2ecb..df974e1097 100644
--- a/README.md
+++ b/README.md
@@ -12,9 +12,10 @@ examples and guides.
[][hub]
[](https://goreportcard.com/report/github.com/prometheus/prometheus)
[](https://bestpractices.coreinfrastructure.org/projects/486)
+[](https://securityscorecards.dev/viewer/?uri=github.com/prometheus/prometheus)
+[](https://clomonitor.io/projects/cncf/prometheus)
[](https://gitpod.io/#https://github.com/prometheus/prometheus)
[](https://bugs.chromium.org/p/oss-fuzz/issues/list?sort=-opened&can=1&q=proj:prometheus)
-[](https://securityscorecards.dev/viewer/?uri=github.com/prometheus/prometheus)
diff --git a/SECURITY-INSIGHTS.yml b/SECURITY-INSIGHTS.yml
new file mode 100644
index 0000000000..009b356214
--- /dev/null
+++ b/SECURITY-INSIGHTS.yml
@@ -0,0 +1,48 @@
+header:
+ schema-version: '1.0.0'
+ expiration-date: '2025-07-30T01:00:00.000Z'
+ last-updated: '2024-07-30'
+ last-reviewed: '2024-07-30'
+ project-url: https://github.com/prometheus/prometheus
+ changelog: https://github.com/prometheus/prometheus/blob/main/CHANGELOG.md
+ license: https://github.com/prometheus/prometheus/blob/main/LICENSE
+project-lifecycle:
+ status: active
+ bug-fixes-only: false
+ core-maintainers:
+ - https://github.com/prometheus/prometheus/blob/main/MAINTAINERS.md
+contribution-policy:
+ accepts-pull-requests: true
+ accepts-automated-pull-requests: true
+dependencies:
+ third-party-packages: true
+ dependencies-lists:
+ - https://github.com/prometheus/prometheus/blob/main/go.mod
+ - https://github.com/prometheus/prometheus/blob/main/web/ui/package.json
+ env-dependencies-policy:
+ policy-url: https://github.com/prometheus/prometheus/blob/main/CONTRIBUTING.md#dependency-management
+distribution-points:
+ - https://github.com/prometheus/prometheus/releases
+documentation:
+ - https://prometheus.io/docs/introduction/overview/
+security-contacts:
+ - type: email
+ value: prometheus-team@googlegroups.com
+security-testing:
+ - tool-type: sca
+ tool-name: Dependabot
+ tool-version: latest
+ integration:
+ ad-hoc: false
+ ci: true
+ before-release: true
+ - tool-type: sast
+ tool-name: CodeQL
+ tool-version: latest
+ integration:
+ ad-hoc: false
+ ci: true
+ before-release: true
+vulnerability-reporting:
+ accepts-vulnerability-reports: true
+ security-policy: https://github.com/prometheus/prometheus/security/policy
diff --git a/VERSION b/VERSION
index f419e2c6f1..3a40665f50 100644
--- a/VERSION
+++ b/VERSION
@@ -1 +1 @@
-2.53.1
+2.54.1
diff --git a/cmd/prometheus/main.go b/cmd/prometheus/main.go
index 1d844ddba6..65ffd7de5a 100644
--- a/cmd/prometheus/main.go
+++ b/cmd/prometheus/main.go
@@ -152,6 +152,7 @@ type flagConfig struct {
queryConcurrency int
queryMaxSamples int
RemoteFlushDeadline model.Duration
+ nameEscapingScheme string
featureList []string
memlimitRatio float64
@@ -168,6 +169,8 @@ type flagConfig struct {
corsRegexString string
promlogConfig promlog.Config
+
+ promqlEnableDelayedNameRemoval bool
}
// setFeatureListOptions sets the corresponding options from the featureList.
@@ -234,6 +237,15 @@ func (c *flagConfig) setFeatureListOptions(logger log.Logger) error {
config.DefaultConfig.GlobalConfig.ScrapeProtocols = config.DefaultProtoFirstScrapeProtocols
config.DefaultGlobalConfig.ScrapeProtocols = config.DefaultProtoFirstScrapeProtocols
level.Info(logger).Log("msg", "Experimental created timestamp zero ingestion enabled. Changed default scrape_protocols to prefer PrometheusProto format.", "global.scrape_protocols", fmt.Sprintf("%v", config.DefaultGlobalConfig.ScrapeProtocols))
+ case "delayed-compaction":
+ c.tsdb.EnableDelayedCompaction = true
+ level.Info(logger).Log("msg", "Experimental delayed compaction is enabled.")
+ case "promql-delayed-name-removal":
+ c.promqlEnableDelayedNameRemoval = true
+ level.Info(logger).Log("msg", "Experimental PromQL delayed name removal enabled.")
+ case "utf8-names":
+ model.NameValidationScheme = model.UTF8Validation
+ level.Info(logger).Log("msg", "Experimental UTF-8 support enabled")
case "":
continue
case "promql-at-modifier", "promql-negative-offset":
@@ -290,8 +302,8 @@ func main() {
a.Flag("config.file", "Prometheus configuration file path.").
Default("prometheus.yml").StringVar(&cfg.configFile)
- a.Flag("web.listen-address", "Address to listen on for UI, API, and telemetry.").
- Default("0.0.0.0:9090").StringVar(&cfg.web.ListenAddress)
+ a.Flag("web.listen-address", "Address to listen on for UI, API, and telemetry. Can be repeated.").
+ Default("0.0.0.0:9090").StringsVar(&cfg.web.ListenAddresses)
a.Flag("auto-gomemlimit.ratio", "The ratio of reserved GOMEMLIMIT memory to the detected maximum container or system memory").
Default("0.9").FloatVar(&cfg.memlimitRatio)
@@ -305,7 +317,7 @@ func main() {
"Maximum duration before timing out read of the request, and closing idle connections.").
Default("5m").SetValue(&cfg.webTimeout)
- a.Flag("web.max-connections", "Maximum number of simultaneous connections.").
+ a.Flag("web.max-connections", "Maximum number of simultaneous connections across all listeners.").
Default("512").IntVar(&cfg.web.MaxConnections)
a.Flag("web.external-url",
@@ -381,6 +393,9 @@ func main() {
serverOnlyFlag(a, "storage.tsdb.allow-overlapping-blocks", "[DEPRECATED] This flag has no effect. Overlapping blocks are enabled by default now.").
Default("true").Hidden().BoolVar(&b)
+ serverOnlyFlag(a, "storage.tsdb.allow-overlapping-compaction", "Allow compaction of overlapping blocks. If set to false, TSDB stops vertical compaction and leaves overlapping blocks there. The use case is to let another component handle the compaction of overlapping blocks.").
+ Default("true").Hidden().BoolVar(&cfg.tsdb.EnableOverlappingCompaction)
+
serverOnlyFlag(a, "storage.tsdb.wal-compression", "Compress the tsdb WAL.").
Hidden().Default("true").BoolVar(&cfg.tsdb.WALCompression)
@@ -475,7 +490,9 @@ func main() {
a.Flag("scrape.discovery-reload-interval", "Interval used by scrape manager to throttle target groups updates.").
Hidden().Default("5s").SetValue(&cfg.scrape.DiscoveryReloadInterval)
- a.Flag("enable-feature", "Comma separated feature names to enable. Valid options: agent, auto-gomemlimit, exemplar-storage, expand-external-labels, memory-snapshot-on-shutdown, promql-per-step-stats, promql-experimental-functions, remote-write-receiver (DEPRECATED), extra-scrape-metrics, new-service-discovery-manager, auto-gomaxprocs, no-default-scrape-port, native-histograms, otlp-write-receiver, created-timestamp-zero-ingestion, concurrent-rule-eval. See https://prometheus.io/docs/prometheus/latest/feature_flags/ for more details.").
+ a.Flag("scrape.name-escaping-scheme", `Method for escaping legacy invalid names when sending to Prometheus that does not support UTF-8. Can be one of "values", "underscores", or "dots".`).Default(scrape.DefaultNameEscapingScheme.String()).StringVar(&cfg.nameEscapingScheme)
+
+ a.Flag("enable-feature", "Comma separated feature names to enable. Valid options: agent, auto-gomaxprocs, auto-gomemlimit, concurrent-rule-eval, created-timestamp-zero-ingestion, delayed-compaction, exemplar-storage, expand-external-labels, extra-scrape-metrics, memory-snapshot-on-shutdown, native-histograms, new-service-discovery-manager, no-default-scrape-port, otlp-write-receiver, promql-experimental-functions, promql-delayed-name-removal, promql-per-step-stats, remote-write-receiver (DEPRECATED), utf8-names. See https://prometheus.io/docs/prometheus/latest/feature_flags/ for more details.").
Default("").StringsVar(&cfg.featureList)
promlogflag.AddFlags(a, &cfg.promlogConfig)
@@ -503,6 +520,15 @@ func main() {
os.Exit(1)
}
+ if cfg.nameEscapingScheme != "" {
+ scheme, err := model.ToEscapingScheme(cfg.nameEscapingScheme)
+ if err != nil {
+ fmt.Fprintf(os.Stderr, `Invalid name escaping scheme: %q; Needs to be one of "values", "underscores", or "dots"`, cfg.nameEscapingScheme)
+ os.Exit(1)
+ }
+ model.NameEscapingScheme = scheme
+ }
+
if agentMode && len(serverOnlyFlags) > 0 {
fmt.Fprintf(os.Stderr, "The following flag(s) can not be used in agent mode: %q", serverOnlyFlags)
os.Exit(3)
@@ -523,7 +549,7 @@ func main() {
localStoragePath = cfg.agentStoragePath
}
- cfg.web.ExternalURL, err = computeExternalURL(cfg.prometheusURL, cfg.web.ListenAddress)
+ cfg.web.ExternalURL, err = computeExternalURL(cfg.prometheusURL, cfg.web.ListenAddresses[0])
if err != nil {
fmt.Fprintln(os.Stderr, fmt.Errorf("parse external URL %q: %w", cfg.prometheusURL, err))
os.Exit(2)
@@ -778,9 +804,10 @@ func main() {
NoStepSubqueryIntervalFn: noStepSubqueryInterval.Get,
// EnableAtModifier and EnableNegativeOffset have to be
// always on for regular PromQL as of Prometheus v2.33.
- EnableAtModifier: true,
- EnableNegativeOffset: true,
- EnablePerStepStats: cfg.enablePerStepStats,
+ EnableAtModifier: true,
+ EnableNegativeOffset: true,
+ EnablePerStepStats: cfg.enablePerStepStats,
+ EnableDelayedNameRemoval: cfg.promqlEnableDelayedNameRemoval,
}
queryEngine = promql.NewEngine(opts)
@@ -969,9 +996,9 @@ func main() {
})
}
- listener, err := webHandler.Listener()
+ listeners, err := webHandler.Listeners()
if err != nil {
- level.Error(logger).Log("msg", "Unable to start web listener", "err", err)
+ level.Error(logger).Log("msg", "Unable to start web listeners", "err", err)
os.Exit(1)
}
@@ -1266,7 +1293,7 @@ func main() {
// Web handler.
g.Add(
func() error {
- if err := webHandler.Run(ctxWeb, listener, *webConfig); err != nil {
+ if err := webHandler.Run(ctxWeb, listeners, *webConfig); err != nil {
return fmt.Errorf("error starting web server: %w", err)
}
return nil
@@ -1715,6 +1742,8 @@ type tsdbOptions struct {
MaxExemplars int64
EnableMemorySnapshotOnShutdown bool
EnableNativeHistograms bool
+ EnableDelayedCompaction bool
+ EnableOverlappingCompaction bool
}
func (opts tsdbOptions) ToTSDBOptions() tsdb.Options {
@@ -1735,7 +1764,8 @@ func (opts tsdbOptions) ToTSDBOptions() tsdb.Options {
EnableMemorySnapshotOnShutdown: opts.EnableMemorySnapshotOnShutdown,
EnableNativeHistograms: opts.EnableNativeHistograms,
OutOfOrderTimeWindow: opts.OutOfOrderTimeWindow,
- EnableOverlappingCompaction: true,
+ EnableDelayedCompaction: opts.EnableDelayedCompaction,
+ EnableOverlappingCompaction: opts.EnableOverlappingCompaction,
}
}
diff --git a/cmd/promtool/backfill.go b/cmd/promtool/backfill.go
index 400cae421a..16491f0416 100644
--- a/cmd/promtool/backfill.go
+++ b/cmd/promtool/backfill.go
@@ -85,7 +85,7 @@ func getCompatibleBlockDuration(maxBlockDuration int64) int64 {
return blockDuration
}
-func createBlocks(input []byte, mint, maxt, maxBlockDuration int64, maxSamplesInAppender int, outputDir string, humanReadable, quiet bool) (returnErr error) {
+func createBlocks(input []byte, mint, maxt, maxBlockDuration int64, maxSamplesInAppender int, outputDir string, humanReadable, quiet bool, customLabels map[string]string) (returnErr error) {
blockDuration := getCompatibleBlockDuration(maxBlockDuration)
mint = blockDuration * (mint / blockDuration)
@@ -102,6 +102,8 @@ func createBlocks(input []byte, mint, maxt, maxBlockDuration int64, maxSamplesIn
nextSampleTs int64 = math.MaxInt64
)
+ lb := labels.NewBuilder(labels.EmptyLabels())
+
for t := mint; t <= maxt; t += blockDuration {
tsUpper := t + blockDuration
if nextSampleTs != math.MaxInt64 && nextSampleTs >= tsUpper {
@@ -162,7 +164,13 @@ func createBlocks(input []byte, mint, maxt, maxBlockDuration int64, maxSamplesIn
l := labels.Labels{}
p.Metric(&l)
- if _, err := app.Append(0, l, *ts, v); err != nil {
+ lb.Reset(l)
+ for name, value := range customLabels {
+ lb.Set(name, value)
+ }
+ lbls := lb.Labels()
+
+ if _, err := app.Append(0, lbls, *ts, v); err != nil {
return fmt.Errorf("add sample: %w", err)
}
@@ -221,13 +229,13 @@ func createBlocks(input []byte, mint, maxt, maxBlockDuration int64, maxSamplesIn
return nil
}
-func backfill(maxSamplesInAppender int, input []byte, outputDir string, humanReadable, quiet bool, maxBlockDuration time.Duration) (err error) {
+func backfill(maxSamplesInAppender int, input []byte, outputDir string, humanReadable, quiet bool, maxBlockDuration time.Duration, customLabels map[string]string) (err error) {
p := textparse.NewOpenMetricsParser(input, nil) // Don't need a SymbolTable to get max and min timestamps.
maxt, mint, err := getMinAndMaxTimestamps(p)
if err != nil {
return fmt.Errorf("getting min and max timestamp: %w", err)
}
- if err = createBlocks(input, mint, maxt, int64(maxBlockDuration/time.Millisecond), maxSamplesInAppender, outputDir, humanReadable, quiet); err != nil {
+ if err = createBlocks(input, mint, maxt, int64(maxBlockDuration/time.Millisecond), maxSamplesInAppender, outputDir, humanReadable, quiet, customLabels); err != nil {
return fmt.Errorf("block creation: %w", err)
}
return nil
diff --git a/cmd/promtool/backfill_test.go b/cmd/promtool/backfill_test.go
index 32abfa46a8..b818194e86 100644
--- a/cmd/promtool/backfill_test.go
+++ b/cmd/promtool/backfill_test.go
@@ -92,6 +92,7 @@ func TestBackfill(t *testing.T) {
Description string
MaxSamplesInAppender int
MaxBlockDuration time.Duration
+ Labels map[string]string
Expected struct {
MinTime int64
MaxTime int64
@@ -636,6 +637,49 @@ http_requests_total{code="400"} 1024 7199
},
},
},
+ {
+ ToParse: `# HELP http_requests_total The total number of HTTP requests.
+# TYPE http_requests_total counter
+http_requests_total{code="200"} 1 1624463088.000
+http_requests_total{code="200"} 2 1629503088.000
+http_requests_total{code="200"} 3 1629863088.000
+# EOF
+`,
+ IsOk: true,
+ Description: "Sample with external labels.",
+ MaxSamplesInAppender: 5000,
+ MaxBlockDuration: 2048 * time.Hour,
+ Labels: map[string]string{"cluster_id": "123", "org_id": "999"},
+ Expected: struct {
+ MinTime int64
+ MaxTime int64
+ NumBlocks int
+ BlockDuration int64
+ Samples []backfillSample
+ }{
+ MinTime: 1624463088000,
+ MaxTime: 1629863088000,
+ NumBlocks: 2,
+ BlockDuration: int64(1458 * time.Hour / time.Millisecond),
+ Samples: []backfillSample{
+ {
+ Timestamp: 1624463088000,
+ Value: 1,
+ Labels: labels.FromStrings("__name__", "http_requests_total", "code", "200", "cluster_id", "123", "org_id", "999"),
+ },
+ {
+ Timestamp: 1629503088000,
+ Value: 2,
+ Labels: labels.FromStrings("__name__", "http_requests_total", "code", "200", "cluster_id", "123", "org_id", "999"),
+ },
+ {
+ Timestamp: 1629863088000,
+ Value: 3,
+ Labels: labels.FromStrings("__name__", "http_requests_total", "code", "200", "cluster_id", "123", "org_id", "999"),
+ },
+ },
+ },
+ },
{
ToParse: `# HELP rpc_duration_seconds A summary of the RPC duration in seconds.
# TYPE rpc_duration_seconds summary
@@ -689,7 +733,7 @@ after_eof 1 2
outputDir := t.TempDir()
- err := backfill(test.MaxSamplesInAppender, []byte(test.ToParse), outputDir, false, false, test.MaxBlockDuration)
+ err := backfill(test.MaxSamplesInAppender, []byte(test.ToParse), outputDir, false, false, test.MaxBlockDuration, test.Labels)
if !test.IsOk {
require.Error(t, err, test.Description)
diff --git a/cmd/promtool/main.go b/cmd/promtool/main.go
index e1d275e97e..e713a177fd 100644
--- a/cmd/promtool/main.go
+++ b/cmd/promtool/main.go
@@ -204,6 +204,7 @@ func main() {
pushMetricsHeaders := pushMetricsCmd.Flag("header", "Prometheus remote write header.").StringMap()
testCmd := app.Command("test", "Unit testing.")
+ junitOutFile := testCmd.Flag("junit", "File path to store JUnit XML test results.").OpenFile(os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0o644)
testRulesCmd := testCmd.Command("rules", "Unit tests for rules.")
testRulesRun := testRulesCmd.Flag("run", "If set, will only run test groups whose names match the regular expression. Can be specified multiple times.").Strings()
testRulesFiles := testRulesCmd.Arg(
@@ -252,6 +253,7 @@ func main() {
importQuiet := importCmd.Flag("quiet", "Do not print created blocks.").Short('q').Bool()
maxBlockDuration := importCmd.Flag("max-block-duration", "Maximum duration created blocks may span. Anything less than 2h is ignored.").Hidden().PlaceHolder("").Duration()
openMetricsImportCmd := importCmd.Command("openmetrics", "Import samples from OpenMetrics input and produce TSDB blocks. Please refer to the storage docs for more details.")
+ openMetricsLabels := openMetricsImportCmd.Flag("label", "Label to attach to metrics. Can be specified multiple times. Example --label=label_name=label_value").StringMap()
importFilePath := openMetricsImportCmd.Arg("input file", "OpenMetrics file to read samples from.").Required().String()
importDBPath := openMetricsImportCmd.Arg("output directory", "Output directory for generated blocks.").Default(defaultDBPath).String()
importRulesCmd := importCmd.Command("rules", "Create blocks of data for new recording rules.")
@@ -378,7 +380,11 @@ func main() {
os.Exit(QueryLabels(serverURL, httpRoundTripper, *queryLabelsMatch, *queryLabelsName, *queryLabelsBegin, *queryLabelsEnd, p))
case testRulesCmd.FullCommand():
- os.Exit(RulesUnitTest(
+ results := io.Discard
+ if *junitOutFile != nil {
+ results = *junitOutFile
+ }
+ os.Exit(RulesUnitTestResult(results,
promqltest.LazyLoaderOpts{
EnableAtModifier: true,
EnableNegativeOffset: true,
@@ -403,7 +409,7 @@ func main() {
os.Exit(checkErr(dumpSamples(ctx, *dumpOpenMetricsPath, *dumpOpenMetricsSandboxDirRoot, *dumpOpenMetricsMinTime, *dumpOpenMetricsMaxTime, *dumpOpenMetricsMatch, formatSeriesSetOpenMetrics)))
// TODO(aSquare14): Work on adding support for custom block size.
case openMetricsImportCmd.FullCommand():
- os.Exit(backfillOpenMetrics(*importFilePath, *importDBPath, *importHumanReadable, *importQuiet, *maxBlockDuration))
+ os.Exit(backfillOpenMetrics(*importFilePath, *importDBPath, *importHumanReadable, *importQuiet, *maxBlockDuration, *openMetricsLabels))
case importRulesCmd.FullCommand():
os.Exit(checkErr(importRules(serverURL, httpRoundTripper, *importRulesStart, *importRulesEnd, *importRulesOutputDir, *importRulesEvalInterval, *maxBlockDuration, *importRulesFiles...)))
@@ -466,7 +472,7 @@ func (ls lintConfig) lintDuplicateRules() bool {
return ls.all || ls.duplicateRules
}
-// Check server status - healthy & ready.
+// CheckServerStatus - healthy & ready.
func CheckServerStatus(serverURL *url.URL, checkEndpoint string, roundTripper http.RoundTripper) error {
if serverURL.Scheme == "" {
serverURL.Scheme = "http"
diff --git a/cmd/promtool/metrics.go b/cmd/promtool/metrics.go
index b58b7b2a01..a6813e4d18 100644
--- a/cmd/promtool/metrics.go
+++ b/cmd/promtool/metrics.go
@@ -31,7 +31,7 @@ import (
"github.com/prometheus/prometheus/util/fmtutil"
)
-// Push metrics to a prometheus remote write (for testing purpose only).
+// PushMetrics to a prometheus remote write (for testing purpose only).
func PushMetrics(url *url.URL, roundTripper http.RoundTripper, headers map[string]string, timeout time.Duration, labels map[string]string, files ...string) int {
addressURL, err := url.Parse(url.String())
if err != nil {
diff --git a/cmd/promtool/tsdb.go b/cmd/promtool/tsdb.go
index 2ed7244b1c..971ea8ab00 100644
--- a/cmd/promtool/tsdb.go
+++ b/cmd/promtool/tsdb.go
@@ -823,7 +823,7 @@ func checkErr(err error) int {
return 0
}
-func backfillOpenMetrics(path, outputDir string, humanReadable, quiet bool, maxBlockDuration time.Duration) int {
+func backfillOpenMetrics(path, outputDir string, humanReadable, quiet bool, maxBlockDuration time.Duration, customLabels map[string]string) int {
inputFile, err := fileutil.OpenMmapFile(path)
if err != nil {
return checkErr(err)
@@ -834,7 +834,7 @@ func backfillOpenMetrics(path, outputDir string, humanReadable, quiet bool, maxB
return checkErr(fmt.Errorf("create output dir: %w", err))
}
- return checkErr(backfill(5000, inputFile.Bytes(), outputDir, humanReadable, quiet, maxBlockDuration))
+ return checkErr(backfill(5000, inputFile.Bytes(), outputDir, humanReadable, quiet, maxBlockDuration, customLabels))
}
func displayHistogram(dataType string, datas []int, total int) {
@@ -866,16 +866,16 @@ func displayHistogram(dataType string, datas []int, total int) {
fmt.Println()
}
-func generateBucket(min, max int) (start, end, step int) {
- s := (max - min) / 10
+func generateBucket(minVal, maxVal int) (start, end, step int) {
+ s := (maxVal - minVal) / 10
step = 10
for step < s && step <= 10000 {
step *= 10
}
- start = min - min%step
- end = max - max%step + step
+ start = minVal - minVal%step
+ end = maxVal - maxVal%step + step
return
}
diff --git a/cmd/promtool/tsdb_test.go b/cmd/promtool/tsdb_test.go
index 75089b168b..ffc5467b47 100644
--- a/cmd/promtool/tsdb_test.go
+++ b/cmd/promtool/tsdb_test.go
@@ -20,6 +20,7 @@ import (
"math"
"os"
"runtime"
+ "slices"
"strings"
"testing"
"time"
@@ -152,12 +153,18 @@ func TestTSDBDump(t *testing.T) {
expectedMetrics, err := os.ReadFile(tt.expectedDump)
require.NoError(t, err)
expectedMetrics = normalizeNewLine(expectedMetrics)
- // even though in case of one matcher samples are not sorted, the order in the cases above should stay the same.
- require.Equal(t, string(expectedMetrics), dumpedMetrics)
+ // Sort both, because Prometheus does not guarantee the output order.
+ require.Equal(t, sortLines(string(expectedMetrics)), sortLines(dumpedMetrics))
})
}
}
+func sortLines(buf string) string {
+ lines := strings.Split(buf, "\n")
+ slices.Sort(lines)
+ return strings.Join(lines, "\n")
+}
+
func TestTSDBDumpOpenMetrics(t *testing.T) {
storage := promqltest.LoadedStorage(t, `
load 1m
@@ -169,7 +176,7 @@ func TestTSDBDumpOpenMetrics(t *testing.T) {
require.NoError(t, err)
expectedMetrics = normalizeNewLine(expectedMetrics)
dumpedMetrics := getDumpedSamples(t, storage.Dir(), math.MinInt64, math.MaxInt64, []string{"{__name__=~'(?s:.*)'}"}, formatSeriesSetOpenMetrics)
- require.Equal(t, string(expectedMetrics), dumpedMetrics)
+ require.Equal(t, sortLines(string(expectedMetrics)), sortLines(dumpedMetrics))
}
func TestTSDBDumpOpenMetricsRoundTrip(t *testing.T) {
@@ -179,7 +186,7 @@ func TestTSDBDumpOpenMetricsRoundTrip(t *testing.T) {
dbDir := t.TempDir()
// Import samples from OM format
- err = backfill(5000, initialMetrics, dbDir, false, false, 2*time.Hour)
+ err = backfill(5000, initialMetrics, dbDir, false, false, 2*time.Hour, map[string]string{})
require.NoError(t, err)
db, err := tsdb.Open(dbDir, nil, nil, tsdb.DefaultOptions(), nil)
require.NoError(t, err)
diff --git a/cmd/promtool/unittest.go b/cmd/promtool/unittest.go
index 5451c5296c..7030635d1c 100644
--- a/cmd/promtool/unittest.go
+++ b/cmd/promtool/unittest.go
@@ -18,6 +18,7 @@ import (
"encoding/json"
"errors"
"fmt"
+ "io"
"os"
"path/filepath"
"sort"
@@ -29,9 +30,10 @@ import (
"github.com/google/go-cmp/cmp"
"github.com/grafana/regexp"
"github.com/nsf/jsondiff"
- "github.com/prometheus/common/model"
"gopkg.in/yaml.v2"
+ "github.com/prometheus/common/model"
+
"github.com/prometheus/prometheus/model/histogram"
"github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/promql"
@@ -39,12 +41,18 @@ import (
"github.com/prometheus/prometheus/promql/promqltest"
"github.com/prometheus/prometheus/rules"
"github.com/prometheus/prometheus/storage"
+ "github.com/prometheus/prometheus/util/junitxml"
)
// RulesUnitTest does unit testing of rules based on the unit testing files provided.
// More info about the file format can be found in the docs.
func RulesUnitTest(queryOpts promqltest.LazyLoaderOpts, runStrings []string, diffFlag bool, files ...string) int {
+ return RulesUnitTestResult(io.Discard, queryOpts, runStrings, diffFlag, files...)
+}
+
+func RulesUnitTestResult(results io.Writer, queryOpts promqltest.LazyLoaderOpts, runStrings []string, diffFlag bool, files ...string) int {
failed := false
+ junit := &junitxml.JUnitXML{}
var run *regexp.Regexp
if runStrings != nil {
@@ -52,7 +60,7 @@ func RulesUnitTest(queryOpts promqltest.LazyLoaderOpts, runStrings []string, dif
}
for _, f := range files {
- if errs := ruleUnitTest(f, queryOpts, run, diffFlag); errs != nil {
+ if errs := ruleUnitTest(f, queryOpts, run, diffFlag, junit.Suite(f)); errs != nil {
fmt.Fprintln(os.Stderr, " FAILED:")
for _, e := range errs {
fmt.Fprintln(os.Stderr, e.Error())
@@ -64,25 +72,30 @@ func RulesUnitTest(queryOpts promqltest.LazyLoaderOpts, runStrings []string, dif
}
fmt.Println()
}
+ err := junit.WriteXML(results)
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "failed to write JUnit XML: %s\n", err)
+ }
if failed {
return failureExitCode
}
return successExitCode
}
-func ruleUnitTest(filename string, queryOpts promqltest.LazyLoaderOpts, run *regexp.Regexp, diffFlag bool) []error {
- fmt.Println("Unit Testing: ", filename)
-
+func ruleUnitTest(filename string, queryOpts promqltest.LazyLoaderOpts, run *regexp.Regexp, diffFlag bool, ts *junitxml.TestSuite) []error {
b, err := os.ReadFile(filename)
if err != nil {
+ ts.Abort(err)
return []error{err}
}
var unitTestInp unitTestFile
if err := yaml.UnmarshalStrict(b, &unitTestInp); err != nil {
+ ts.Abort(err)
return []error{err}
}
if err := resolveAndGlobFilepaths(filepath.Dir(filename), &unitTestInp); err != nil {
+ ts.Abort(err)
return []error{err}
}
@@ -91,29 +104,38 @@ func ruleUnitTest(filename string, queryOpts promqltest.LazyLoaderOpts, run *reg
}
evalInterval := time.Duration(unitTestInp.EvaluationInterval)
-
+ ts.Settime(time.Now().Format("2006-01-02T15:04:05"))
// Giving number for groups mentioned in the file for ordering.
// Lower number group should be evaluated before higher number group.
groupOrderMap := make(map[string]int)
for i, gn := range unitTestInp.GroupEvalOrder {
if _, ok := groupOrderMap[gn]; ok {
- return []error{fmt.Errorf("group name repeated in evaluation order: %s", gn)}
+ err := fmt.Errorf("group name repeated in evaluation order: %s", gn)
+ ts.Abort(err)
+ return []error{err}
}
groupOrderMap[gn] = i
}
// Testing.
var errs []error
- for _, t := range unitTestInp.Tests {
+ for i, t := range unitTestInp.Tests {
if !matchesRun(t.TestGroupName, run) {
continue
}
-
+ testname := t.TestGroupName
+ if testname == "" {
+ testname = fmt.Sprintf("unnamed#%d", i)
+ }
+ tc := ts.Case(testname)
if t.Interval == 0 {
t.Interval = unitTestInp.EvaluationInterval
}
ers := t.test(evalInterval, groupOrderMap, queryOpts, diffFlag, unitTestInp.RuleFiles...)
if ers != nil {
+ for _, e := range ers {
+ tc.Fail(e.Error())
+ }
errs = append(errs, ers...)
}
}
diff --git a/cmd/promtool/unittest_test.go b/cmd/promtool/unittest_test.go
index 2dbd5a4e51..9bbac28e9f 100644
--- a/cmd/promtool/unittest_test.go
+++ b/cmd/promtool/unittest_test.go
@@ -14,11 +14,15 @@
package main
import (
+ "bytes"
+ "encoding/xml"
+ "fmt"
"testing"
"github.com/stretchr/testify/require"
"github.com/prometheus/prometheus/promql/promqltest"
+ "github.com/prometheus/prometheus/util/junitxml"
)
func TestRulesUnitTest(t *testing.T) {
@@ -125,13 +129,59 @@ func TestRulesUnitTest(t *testing.T) {
want: 0,
},
}
+ reuseFiles := []string{}
+ reuseCount := [2]int{}
for _, tt := range tests {
+ if (tt.queryOpts == promqltest.LazyLoaderOpts{
+ EnableNegativeOffset: true,
+ } || tt.queryOpts == promqltest.LazyLoaderOpts{
+ EnableAtModifier: true,
+ }) {
+ reuseFiles = append(reuseFiles, tt.args.files...)
+ reuseCount[tt.want] += len(tt.args.files)
+ }
t.Run(tt.name, func(t *testing.T) {
if got := RulesUnitTest(tt.queryOpts, nil, false, tt.args.files...); got != tt.want {
t.Errorf("RulesUnitTest() = %v, want %v", got, tt.want)
}
})
}
+ t.Run("Junit xml output ", func(t *testing.T) {
+ var buf bytes.Buffer
+ if got := RulesUnitTestResult(&buf, promqltest.LazyLoaderOpts{}, nil, false, reuseFiles...); got != 1 {
+ t.Errorf("RulesUnitTestResults() = %v, want 1", got)
+ }
+ var test junitxml.JUnitXML
+ output := buf.Bytes()
+ err := xml.Unmarshal(output, &test)
+ if err != nil {
+ fmt.Println("error in decoding XML:", err)
+ return
+ }
+ var total int
+ var passes int
+ var failures int
+ var cases int
+ total = len(test.Suites)
+ if total != len(reuseFiles) {
+ t.Errorf("JUnit output had %d testsuite elements; expected %d\n", total, len(reuseFiles))
+ }
+
+ for _, i := range test.Suites {
+ if i.FailureCount == 0 {
+ passes++
+ } else {
+ failures++
+ }
+ cases += len(i.Cases)
+ }
+ if total != passes+failures {
+ t.Errorf("JUnit output mismatch: Total testsuites (%d) does not equal the sum of passes (%d) and failures (%d).", total, passes, failures)
+ }
+ if cases < total {
+ t.Errorf("JUnit output had %d suites without test cases\n", total-cases)
+ }
+ })
}
func TestRulesUnitTestRun(t *testing.T) {
diff --git a/config/config.go b/config/config.go
index 9139838813..c9e8efbf3e 100644
--- a/config/config.go
+++ b/config/config.go
@@ -37,6 +37,7 @@ import (
"github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/model/relabel"
"github.com/prometheus/prometheus/storage/remote/azuread"
+ "github.com/prometheus/prometheus/storage/remote/googleiam"
)
var (
@@ -66,6 +67,11 @@ var (
}
)
+const (
+ LegacyValidationConfig = "legacy"
+ UTF8ValidationConfig = "utf8"
+)
+
// Load parses the YAML input s into a Config.
func Load(s string, expandExternalLabels bool, logger log.Logger) (*Config, error) {
cfg := &Config{}
@@ -215,6 +221,7 @@ var (
// DefaultRemoteReadConfig is the default remote read configuration.
DefaultRemoteReadConfig = RemoteReadConfig{
RemoteTimeout: model.Duration(1 * time.Minute),
+ ChunkedReadLimit: DefaultChunkedReadLimit,
HTTPClientConfig: config.DefaultHTTPClientConfig,
FilterExternalLabels: true,
}
@@ -445,6 +452,8 @@ type GlobalConfig struct {
// Keep no more than this many dropped targets per job.
// 0 means no limit.
KeepDroppedTargets uint `yaml:"keep_dropped_targets,omitempty"`
+ // Allow UTF8 Metric and Label Names.
+ MetricNameValidationScheme string `yaml:"metric_name_validation_scheme,omitempty"`
}
// ScrapeProtocol represents supported protocol for scraping metrics.
@@ -470,6 +479,7 @@ var (
PrometheusText0_0_4 ScrapeProtocol = "PrometheusText0.0.4"
OpenMetricsText0_0_1 ScrapeProtocol = "OpenMetricsText0.0.1"
OpenMetricsText1_0_0 ScrapeProtocol = "OpenMetricsText1.0.0"
+ UTF8NamesHeader string = model.EscapingKey + "=" + model.AllowUTF8
ScrapeProtocolsHeaders = map[ScrapeProtocol]string{
PrometheusProto: "application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=delimited",
@@ -655,6 +665,8 @@ type ScrapeConfig struct {
// Keep no more than this many dropped targets per job.
// 0 means no limit.
KeepDroppedTargets uint `yaml:"keep_dropped_targets,omitempty"`
+ // Allow UTF8 Metric and Label Names.
+ MetricNameValidationScheme string `yaml:"metric_name_validation_scheme,omitempty"`
// We cannot do proper Go type embedding below as the parser will then parse
// values arbitrarily into the overflow maps of further-down types.
@@ -761,6 +773,19 @@ func (c *ScrapeConfig) Validate(globalConfig GlobalConfig) error {
return fmt.Errorf("%w for scrape config with job name %q", err, c.JobName)
}
+ switch globalConfig.MetricNameValidationScheme {
+ case "", LegacyValidationConfig:
+ case UTF8ValidationConfig:
+ if model.NameValidationScheme != model.UTF8Validation {
+ return fmt.Errorf("utf8 name validation requested but feature not enabled via --enable-feature=utf8-names")
+ }
+ default:
+ return fmt.Errorf("unknown name validation method specified, must be either 'legacy' or 'utf8', got %s", globalConfig.MetricNameValidationScheme)
+ }
+ if c.MetricNameValidationScheme == "" {
+ c.MetricNameValidationScheme = globalConfig.MetricNameValidationScheme
+ }
+
return nil
}
@@ -1089,8 +1114,9 @@ func (m RemoteWriteProtoMsgs) String() string {
}
var (
- // RemoteWriteProtoMsgV1 represents the deprecated `prometheus.WriteRequest` protobuf
- // message introduced in the https://prometheus.io/docs/specs/remote_write_spec/.
+ // RemoteWriteProtoMsgV1 represents the `prometheus.WriteRequest` protobuf
+ // message introduced in the https://prometheus.io/docs/specs/remote_write_spec/,
+ // which will eventually be deprecated.
//
// NOTE: This string is used for both HTTP header values and config value, so don't change
// this reference.
@@ -1123,6 +1149,7 @@ type RemoteWriteConfig struct {
MetadataConfig MetadataConfig `yaml:"metadata_config,omitempty"`
SigV4Config *sigv4.SigV4Config `yaml:"sigv4,omitempty"`
AzureADConfig *azuread.AzureADConfig `yaml:"azuread,omitempty"`
+ GoogleIAMConfig *googleiam.Config `yaml:"google_iam,omitempty"`
}
// SetDirectory joins any relative file paths with dir.
@@ -1160,17 +1187,33 @@ func (c *RemoteWriteConfig) UnmarshalYAML(unmarshal func(interface{}) error) err
return err
}
- httpClientConfigAuthEnabled := c.HTTPClientConfig.BasicAuth != nil ||
- c.HTTPClientConfig.Authorization != nil || c.HTTPClientConfig.OAuth2 != nil
+ return validateAuthConfigs(c)
+}
- if httpClientConfigAuthEnabled && (c.SigV4Config != nil || c.AzureADConfig != nil) {
- return fmt.Errorf("at most one of basic_auth, authorization, oauth2, sigv4, & azuread must be configured")
+// validateAuthConfigs validates that at most one of basic_auth, authorization, oauth2, sigv4, azuread or google_iam must be configured.
+func validateAuthConfigs(c *RemoteWriteConfig) error {
+ var authConfigured []string
+ if c.HTTPClientConfig.BasicAuth != nil {
+ authConfigured = append(authConfigured, "basic_auth")
}
-
- if c.SigV4Config != nil && c.AzureADConfig != nil {
- return fmt.Errorf("at most one of basic_auth, authorization, oauth2, sigv4, & azuread must be configured")
+ if c.HTTPClientConfig.Authorization != nil {
+ authConfigured = append(authConfigured, "authorization")
+ }
+ if c.HTTPClientConfig.OAuth2 != nil {
+ authConfigured = append(authConfigured, "oauth2")
+ }
+ if c.SigV4Config != nil {
+ authConfigured = append(authConfigured, "sigv4")
+ }
+ if c.AzureADConfig != nil {
+ authConfigured = append(authConfigured, "azuread")
+ }
+ if c.GoogleIAMConfig != nil {
+ authConfigured = append(authConfigured, "google_iam")
+ }
+ if len(authConfigured) > 1 {
+ return fmt.Errorf("at most one of basic_auth, authorization, oauth2, sigv4, azuread or google_iam must be configured. Currently configured: %v", authConfigured)
}
-
return nil
}
@@ -1189,7 +1232,7 @@ func validateHeadersForTracing(headers map[string]string) error {
func validateHeaders(headers map[string]string) error {
for header := range headers {
if strings.ToLower(header) == "authorization" {
- return errors.New("authorization header must be changed via the basic_auth, authorization, oauth2, sigv4, or azuread parameter")
+ return errors.New("authorization header must be changed via the basic_auth, authorization, oauth2, sigv4, azuread or google_iam parameter")
}
if _, ok := reservedHeaders[strings.ToLower(header)]; ok {
return fmt.Errorf("%s is a reserved header. It must not be changed", header)
@@ -1237,13 +1280,20 @@ type MetadataConfig struct {
MaxSamplesPerSend int `yaml:"max_samples_per_send,omitempty"`
}
+const (
+ // DefaultChunkedReadLimit is the default value for the maximum size of the protobuf frame client allows.
+ // 50MB is the default. This is equivalent to ~100k full XOR chunks and average labelset.
+ DefaultChunkedReadLimit = 5e+7
+)
+
// RemoteReadConfig is the configuration for reading from remote storage.
type RemoteReadConfig struct {
- URL *config.URL `yaml:"url"`
- RemoteTimeout model.Duration `yaml:"remote_timeout,omitempty"`
- Headers map[string]string `yaml:"headers,omitempty"`
- ReadRecent bool `yaml:"read_recent,omitempty"`
- Name string `yaml:"name,omitempty"`
+ URL *config.URL `yaml:"url"`
+ RemoteTimeout model.Duration `yaml:"remote_timeout,omitempty"`
+ ChunkedReadLimit uint64 `yaml:"chunked_read_limit,omitempty"`
+ Headers map[string]string `yaml:"headers,omitempty"`
+ ReadRecent bool `yaml:"read_recent,omitempty"`
+ Name string `yaml:"name,omitempty"`
// We cannot do proper Go type embedding below as the parser will then parse
// values arbitrarily into the overflow maps of further-down types.
diff --git a/config/config_test.go b/config/config_test.go
index b684fdb50c..2219061823 100644
--- a/config/config_test.go
+++ b/config/config_test.go
@@ -16,6 +16,7 @@ package config
import (
"crypto/tls"
"encoding/json"
+ "fmt"
"net/url"
"os"
"path/filepath"
@@ -164,10 +165,11 @@ var expectedConf = &Config{
RemoteReadConfigs: []*RemoteReadConfig{
{
- URL: mustParseURL("http://remote1/read"),
- RemoteTimeout: model.Duration(1 * time.Minute),
- ReadRecent: true,
- Name: "default",
+ URL: mustParseURL("http://remote1/read"),
+ RemoteTimeout: model.Duration(1 * time.Minute),
+ ChunkedReadLimit: DefaultChunkedReadLimit,
+ ReadRecent: true,
+ Name: "default",
HTTPClientConfig: config.HTTPClientConfig{
FollowRedirects: true,
EnableHTTP2: false,
@@ -177,6 +179,7 @@ var expectedConf = &Config{
{
URL: mustParseURL("http://remote3/read"),
RemoteTimeout: model.Duration(1 * time.Minute),
+ ChunkedReadLimit: DefaultChunkedReadLimit,
ReadRecent: false,
Name: "read_special",
RequiredMatchers: model.LabelSet{"job": "special"},
@@ -1826,7 +1829,7 @@ var expectedErrors = []struct {
},
{
filename: "remote_write_authorization_header.bad.yml",
- errMsg: `authorization header must be changed via the basic_auth, authorization, oauth2, sigv4, or azuread parameter`,
+ errMsg: `authorization header must be changed via the basic_auth, authorization, oauth2, sigv4, azuread or google_iam parameter`,
},
{
filename: "remote_write_wrong_msg.bad.yml",
@@ -2300,3 +2303,52 @@ func TestScrapeConfigDisableCompression(t *testing.T) {
require.False(t, got.ScrapeConfigs[0].EnableCompression)
}
+
+func TestScrapeConfigNameValidationSettings(t *testing.T) {
+ model.NameValidationScheme = model.UTF8Validation
+ defer func() {
+ model.NameValidationScheme = model.LegacyValidation
+ }()
+
+ tests := []struct {
+ name string
+ inputFile string
+ expectScheme string
+ }{
+ {
+ name: "blank config implies default",
+ inputFile: "scrape_config_default_validation_mode",
+ expectScheme: "",
+ },
+ {
+ name: "global setting implies local settings",
+ inputFile: "scrape_config_global_validation_mode",
+ expectScheme: "utf8",
+ },
+ {
+ name: "local setting",
+ inputFile: "scrape_config_local_validation_mode",
+ expectScheme: "utf8",
+ },
+ {
+ name: "local setting overrides global setting",
+ inputFile: "scrape_config_local_global_validation_mode",
+ expectScheme: "legacy",
+ },
+ }
+
+ for _, tc := range tests {
+ t.Run(tc.name, func(t *testing.T) {
+ want, err := LoadFile(fmt.Sprintf("testdata/%s.yml", tc.inputFile), false, false, log.NewNopLogger())
+ require.NoError(t, err)
+
+ out, err := yaml.Marshal(want)
+
+ require.NoError(t, err)
+ got := &Config{}
+ require.NoError(t, yaml.UnmarshalStrict(out, got))
+
+ require.Equal(t, tc.expectScheme, got.ScrapeConfigs[0].MetricNameValidationScheme)
+ })
+ }
+}
diff --git a/config/testdata/scrape_config_default_validation_mode.yml b/config/testdata/scrape_config_default_validation_mode.yml
new file mode 100644
index 0000000000..96680d6438
--- /dev/null
+++ b/config/testdata/scrape_config_default_validation_mode.yml
@@ -0,0 +1,2 @@
+scrape_configs:
+ - job_name: prometheus
diff --git a/config/testdata/scrape_config_global_validation_mode.yml b/config/testdata/scrape_config_global_validation_mode.yml
new file mode 100644
index 0000000000..1548554397
--- /dev/null
+++ b/config/testdata/scrape_config_global_validation_mode.yml
@@ -0,0 +1,4 @@
+global:
+ metric_name_validation_scheme: utf8
+scrape_configs:
+ - job_name: prometheus
diff --git a/config/testdata/scrape_config_local_global_validation_mode.yml b/config/testdata/scrape_config_local_global_validation_mode.yml
new file mode 100644
index 0000000000..d13605e21d
--- /dev/null
+++ b/config/testdata/scrape_config_local_global_validation_mode.yml
@@ -0,0 +1,5 @@
+global:
+ metric_name_validation_scheme: utf8
+scrape_configs:
+ - job_name: prometheus
+ metric_name_validation_scheme: legacy
diff --git a/config/testdata/scrape_config_local_validation_mode.yml b/config/testdata/scrape_config_local_validation_mode.yml
new file mode 100644
index 0000000000..fad4235806
--- /dev/null
+++ b/config/testdata/scrape_config_local_validation_mode.yml
@@ -0,0 +1,3 @@
+scrape_configs:
+ - job_name: prometheus
+ metric_name_validation_scheme: utf8
diff --git a/discovery/discoverer_metrics_noop.go b/discovery/discoverer_metrics_noop.go
index 638317ace1..4321204b6c 100644
--- a/discovery/discoverer_metrics_noop.go
+++ b/discovery/discoverer_metrics_noop.go
@@ -13,7 +13,7 @@
package discovery
-// Create a dummy metrics struct, because this SD doesn't have any metrics.
+// NoopDiscovererMetrics creates a dummy metrics struct, because this SD doesn't have any metrics.
type NoopDiscovererMetrics struct{}
var _ DiscovererMetrics = (*NoopDiscovererMetrics)(nil)
diff --git a/discovery/discovery.go b/discovery/discovery.go
index a5826f8176..a91faf6c86 100644
--- a/discovery/discovery.go
+++ b/discovery/discovery.go
@@ -39,7 +39,7 @@ type Discoverer interface {
Run(ctx context.Context, up chan<- []*targetgroup.Group)
}
-// Internal metrics of service discovery mechanisms.
+// DiscovererMetrics are internal metrics of service discovery mechanisms.
type DiscovererMetrics interface {
Register() error
Unregister()
@@ -56,7 +56,7 @@ type DiscovererOptions struct {
HTTPClientOptions []config.HTTPClientOption
}
-// Metrics used by the "refresh" package.
+// RefreshMetrics are used by the "refresh" package.
// We define them here in the "discovery" package in order to avoid a cyclic dependency between
// "discovery" and "refresh".
type RefreshMetrics struct {
@@ -64,17 +64,18 @@ type RefreshMetrics struct {
Duration prometheus.Observer
}
-// Instantiate the metrics used by the "refresh" package.
+// RefreshMetricsInstantiator instantiates the metrics used by the "refresh" package.
type RefreshMetricsInstantiator interface {
Instantiate(mech string) *RefreshMetrics
}
-// An interface for registering, unregistering, and instantiating metrics for the "refresh" package.
-// Refresh metrics are registered and unregistered outside of the service discovery mechanism.
-// This is so that the same metrics can be reused across different service discovery mechanisms.
-// To manage refresh metrics inside the SD mechanism, we'd need to use const labels which are
-// specific to that SD. However, doing so would also expose too many unused metrics on
-// the Prometheus /metrics endpoint.
+// RefreshMetricsManager is an interface for registering, unregistering, and
+// instantiating metrics for the "refresh" package. Refresh metrics are
+// registered and unregistered outside of the service discovery mechanism. This
+// is so that the same metrics can be reused across different service discovery
+// mechanisms. To manage refresh metrics inside the SD mechanism, we'd need to
+// use const labels which are specific to that SD. However, doing so would also
+// expose too many unused metrics on the Prometheus /metrics endpoint.
type RefreshMetricsManager interface {
DiscovererMetrics
RefreshMetricsInstantiator
@@ -145,7 +146,8 @@ func (c StaticConfig) NewDiscoverer(DiscovererOptions) (Discoverer, error) {
return staticDiscoverer(c), nil
}
-// No metrics are needed for this service discovery mechanism.
+// NewDiscovererMetrics returns NoopDiscovererMetrics because no metrics are
+// needed for this service discovery mechanism.
func (c StaticConfig) NewDiscovererMetrics(prometheus.Registerer, RefreshMetricsInstantiator) DiscovererMetrics {
return &NoopDiscovererMetrics{}
}
diff --git a/discovery/eureka/client.go b/discovery/eureka/client.go
index 5a90968f1b..52e8ce7b48 100644
--- a/discovery/eureka/client.go
+++ b/discovery/eureka/client.go
@@ -97,7 +97,6 @@ func fetchApps(ctx context.Context, server string, client *http.Client) (*Applic
resp.Body.Close()
}()
- //nolint:usestdlibvars
if resp.StatusCode/100 != 2 {
return nil, fmt.Errorf("non 2xx status '%d' response during eureka service discovery", resp.StatusCode)
}
diff --git a/discovery/hetzner/robot.go b/discovery/hetzner/robot.go
index 64155bfaed..516470b05a 100644
--- a/discovery/hetzner/robot.go
+++ b/discovery/hetzner/robot.go
@@ -87,7 +87,6 @@ func (d *robotDiscovery) refresh(context.Context) ([]*targetgroup.Group, error)
resp.Body.Close()
}()
- //nolint:usestdlibvars
if resp.StatusCode/100 != 2 {
return nil, fmt.Errorf("non 2xx status '%d' response during hetzner service discovery with role robot", resp.StatusCode)
}
diff --git a/discovery/kubernetes/endpoints_test.go b/discovery/kubernetes/endpoints_test.go
index e877657dba..3ea98c5db9 100644
--- a/discovery/kubernetes/endpoints_test.go
+++ b/discovery/kubernetes/endpoints_test.go
@@ -970,7 +970,7 @@ func TestEndpointsDiscoveryEmptyPodStatus(t *testing.T) {
}.Run(t)
}
-// TestEndpointsUpdatePod makes sure that Endpoints discovery detects underlying Pods changes.
+// TestEndpointsDiscoveryUpdatePod makes sure that Endpoints discovery detects underlying Pods changes.
// See https://github.com/prometheus/prometheus/issues/11305 for more details.
func TestEndpointsDiscoveryUpdatePod(t *testing.T) {
pod := &v1.Pod{
diff --git a/discovery/kubernetes/kubernetes_test.go b/discovery/kubernetes/kubernetes_test.go
index 552f8a4453..50f25a20ab 100644
--- a/discovery/kubernetes/kubernetes_test.go
+++ b/discovery/kubernetes/kubernetes_test.go
@@ -154,7 +154,7 @@ func (d k8sDiscoveryTest) Run(t *testing.T) {
// readResultWithTimeout reads all targetgroups from channel with timeout.
// It merges targetgroups by source and sends the result to result channel.
-func readResultWithTimeout(t *testing.T, ctx context.Context, ch <-chan []*targetgroup.Group, max int, stopAfter time.Duration, resChan chan<- map[string]*targetgroup.Group) {
+func readResultWithTimeout(t *testing.T, ctx context.Context, ch <-chan []*targetgroup.Group, maxGroups int, stopAfter time.Duration, resChan chan<- map[string]*targetgroup.Group) {
res := make(map[string]*targetgroup.Group)
timeout := time.After(stopAfter)
Loop:
@@ -167,7 +167,7 @@ Loop:
}
res[tg.Source] = tg
}
- if len(res) == max {
+ if len(res) == maxGroups {
// Reached max target groups we may get, break fast.
break Loop
}
@@ -175,10 +175,10 @@ Loop:
// Because we use queue, an object that is created then
// deleted or updated may be processed only once.
// So possibly we may skip events, timed out here.
- t.Logf("timed out, got %d (max: %d) items, some events are skipped", len(res), max)
+ t.Logf("timed out, got %d (max: %d) items, some events are skipped", len(res), maxGroups)
break Loop
case <-ctx.Done():
- t.Logf("stopped, got %d (max: %d) items", len(res), max)
+ t.Logf("stopped, got %d (max: %d) items", len(res), maxGroups)
break Loop
}
}
diff --git a/discovery/legacymanager/manager_test.go b/discovery/legacymanager/manager_test.go
index a455a8e341..f1be963113 100644
--- a/discovery/legacymanager/manager_test.go
+++ b/discovery/legacymanager/manager_test.go
@@ -1090,7 +1090,6 @@ func TestCoordinationWithReceiver(t *testing.T) {
}
for _, tc := range testCases {
- tc := tc
t.Run(tc.title, func(t *testing.T) {
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
diff --git a/discovery/manager.go b/discovery/manager.go
index 897d7d151c..cefa90a866 100644
--- a/discovery/manager.go
+++ b/discovery/manager.go
@@ -64,7 +64,7 @@ func (p *Provider) Config() interface{} {
return p.config
}
-// Registers the metrics needed for SD mechanisms.
+// CreateAndRegisterSDMetrics registers the metrics needed for SD mechanisms.
// Does not register the metrics for the Discovery Manager.
// TODO(ptodev): Add ability to unregister the metrics?
func CreateAndRegisterSDMetrics(reg prometheus.Registerer) (map[string]DiscovererMetrics, error) {
@@ -212,9 +212,7 @@ func (m *Manager) ApplyConfig(cfg map[string]Configs) error {
m.metrics.FailedConfigs.Set(float64(failedCount))
var (
- wg sync.WaitGroup
- // keep shows if we keep any providers after reload.
- keep bool
+ wg sync.WaitGroup
newProviders []*Provider
)
for _, prov := range m.providers {
@@ -228,13 +226,12 @@ func (m *Manager) ApplyConfig(cfg map[string]Configs) error {
continue
}
newProviders = append(newProviders, prov)
- // refTargets keeps reference targets used to populate new subs' targets
+ // refTargets keeps reference targets used to populate new subs' targets as they should be the same.
var refTargets map[string]*targetgroup.Group
prov.mu.Lock()
m.targetsMtx.Lock()
for s := range prov.subs {
- keep = true
refTargets = m.targets[poolKey{s, prov.name}]
// Remove obsolete subs' targets.
if _, ok := prov.newSubs[s]; !ok {
@@ -267,7 +264,9 @@ func (m *Manager) ApplyConfig(cfg map[string]Configs) error {
// While startProvider does pull the trigger, it may take some time to do so, therefore
// we pull the trigger as soon as possible so that downstream managers can populate their state.
// See https://github.com/prometheus/prometheus/pull/8639 for details.
- if keep {
+ // This also helps making the downstream managers drop stale targets as soon as possible.
+ // See https://github.com/prometheus/prometheus/pull/13147 for details.
+ if len(m.providers) > 0 {
select {
case m.triggerSend <- struct{}{}:
default:
@@ -288,7 +287,9 @@ func (m *Manager) StartCustomProvider(ctx context.Context, name string, worker D
name: {},
},
}
+ m.mtx.Lock()
m.providers = append(m.providers, p)
+ m.mtx.Unlock()
m.startProvider(ctx, p)
}
@@ -393,8 +394,16 @@ func (m *Manager) updateGroup(poolKey poolKey, tgs []*targetgroup.Group) {
m.targets[poolKey] = make(map[string]*targetgroup.Group)
}
for _, tg := range tgs {
- if tg != nil { // Some Discoverers send nil target group so need to check for it to avoid panics.
+ // Some Discoverers send nil target group so need to check for it to avoid panics.
+ if tg == nil {
+ continue
+ }
+ if len(tg.Targets) > 0 {
m.targets[poolKey][tg.Source] = tg
+ } else {
+ // The target group is empty, drop the corresponding entry to avoid leaks.
+ // In case the group yielded targets before, allGroups() will take care of making consumers drop them.
+ delete(m.targets[poolKey], tg.Source)
}
}
}
@@ -403,19 +412,33 @@ func (m *Manager) allGroups() map[string][]*targetgroup.Group {
tSets := map[string][]*targetgroup.Group{}
n := map[string]int{}
+ m.mtx.RLock()
m.targetsMtx.Lock()
- defer m.targetsMtx.Unlock()
- for pkey, tsets := range m.targets {
- for _, tg := range tsets {
- // Even if the target group 'tg' is empty we still need to send it to the 'Scrape manager'
- // to signal that it needs to stop all scrape loops for this target set.
- tSets[pkey.setName] = append(tSets[pkey.setName], tg)
- n[pkey.setName] += len(tg.Targets)
+ for _, p := range m.providers {
+ p.mu.RLock()
+ for s := range p.subs {
+ // Send empty lists for subs without any targets to make sure old stale targets are dropped by consumers.
+ // See: https://github.com/prometheus/prometheus/issues/12858 for details.
+ if _, ok := tSets[s]; !ok {
+ tSets[s] = []*targetgroup.Group{}
+ n[s] = 0
+ }
+ if tsets, ok := m.targets[poolKey{s, p.name}]; ok {
+ for _, tg := range tsets {
+ tSets[s] = append(tSets[s], tg)
+ n[s] += len(tg.Targets)
+ }
+ }
}
+ p.mu.RUnlock()
}
+ m.targetsMtx.Unlock()
+ m.mtx.RUnlock()
+
for setName, v := range n {
m.metrics.DiscoveredTargets.WithLabelValues(setName).Set(float64(v))
}
+
return tSets
}
diff --git a/discovery/manager_test.go b/discovery/manager_test.go
index be07edbdb4..831cefe514 100644
--- a/discovery/manager_test.go
+++ b/discovery/manager_test.go
@@ -939,11 +939,13 @@ func TestTargetSetTargetGroupsPresentOnConfigChange(t *testing.T) {
discoveryManager.ApplyConfig(c)
// Original targets should be present as soon as possible.
+ // An empty list should be sent for prometheus2 to drop any stale targets
syncedTargets = <-discoveryManager.SyncCh()
mu.Unlock()
- require.Len(t, syncedTargets, 1)
+ require.Len(t, syncedTargets, 2)
verifySyncedPresence(t, syncedTargets, "prometheus", "{__address__=\"foo:9090\"}", true)
require.Len(t, syncedTargets["prometheus"], 1)
+ require.Empty(t, syncedTargets["prometheus2"])
// prometheus2 configs should be ready on second sync.
syncedTargets = <-discoveryManager.SyncCh()
@@ -1049,8 +1051,8 @@ func TestDiscovererConfigs(t *testing.T) {
}
// TestTargetSetRecreatesEmptyStaticConfigs ensures that reloading a config file after
-// removing all targets from the static_configs sends an update with empty targetGroups.
-// This is required to signal the receiver that this target set has no current targets.
+// removing all targets from the static_configs cleans the corresponding targetGroups entries to avoid leaks and sends an empty update.
+// The update is required to signal the consumers that the previous targets should be dropped.
func TestTargetSetRecreatesEmptyStaticConfigs(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
@@ -1083,16 +1085,14 @@ func TestTargetSetRecreatesEmptyStaticConfigs(t *testing.T) {
discoveryManager.ApplyConfig(c)
syncedTargets = <-discoveryManager.SyncCh()
+ require.Len(t, discoveryManager.targets, 1)
p = pk("static", "prometheus", 1)
targetGroups, ok := discoveryManager.targets[p]
- require.True(t, ok, "'%v' should be present in target groups", p)
- group, ok := targetGroups[""]
- require.True(t, ok, "missing '' key in target groups %v", targetGroups)
-
- require.Empty(t, group.Targets, "Invalid number of targets.")
- require.Len(t, syncedTargets, 1)
- require.Len(t, syncedTargets["prometheus"], 1)
- require.Nil(t, syncedTargets["prometheus"][0].Labels)
+ require.True(t, ok, "'%v' should be present in targets", p)
+ // Otherwise the targetGroups will leak, see https://github.com/prometheus/prometheus/issues/12436.
+ require.Empty(t, targetGroups, 0, "'%v' should no longer have any associated target groups", p)
+ require.Len(t, syncedTargets, 1, "an update with no targetGroups should still be sent.")
+ require.Empty(t, syncedTargets["prometheus"], 0)
}
func TestIdenticalConfigurationsAreCoalesced(t *testing.T) {
@@ -1275,6 +1275,7 @@ func TestCoordinationWithReceiver(t *testing.T) {
Targets: []model.LabelSet{{"__instance__": "1"}},
},
},
+ "mock1": {},
},
},
{
diff --git a/discovery/metrics_refresh.go b/discovery/metrics_refresh.go
index d621165ced..ef49e591a3 100644
--- a/discovery/metrics_refresh.go
+++ b/discovery/metrics_refresh.go
@@ -17,7 +17,7 @@ import (
"github.com/prometheus/client_golang/prometheus"
)
-// Metric vectors for the "refresh" package.
+// RefreshMetricsVecs are metric vectors for the "refresh" package.
// We define them here in the "discovery" package in order to avoid a cyclic dependency between
// "discovery" and "refresh".
type RefreshMetricsVecs struct {
diff --git a/discovery/moby/docker.go b/discovery/moby/docker.go
index 11445092ee..68f6fe3ccc 100644
--- a/discovery/moby/docker.go
+++ b/discovery/moby/docker.go
@@ -19,6 +19,7 @@ import (
"net"
"net/http"
"net/url"
+ "sort"
"strconv"
"time"
@@ -251,28 +252,26 @@ func (d *DockerDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group, er
}
if d.matchFirstNetwork && len(networks) > 1 {
- // Match user defined network
- if containerNetworkMode.IsUserDefined() {
- networkMode := string(containerNetworkMode)
- networks = map[string]*network.EndpointSettings{networkMode: networks[networkMode]}
- } else {
- // Get first network if container network mode has "none" value.
- // This case appears under certain condition:
- // 1. Container created with network set to "--net=none".
- // 2. Disconnect network "none".
- // 3. Reconnect network with user defined networks.
- var first string
- for k, n := range networks {
- if n != nil {
- first = k
- break
- }
+ // Sort networks by name and take first non-nil network.
+ keys := make([]string, 0, len(networks))
+ for k, n := range networks {
+ if n != nil {
+ keys = append(keys, k)
}
- networks = map[string]*network.EndpointSettings{first: networks[first]}
+ }
+ if len(keys) > 0 {
+ sort.Strings(keys)
+ firstNetworkMode := keys[0]
+ firstNetwork := networks[firstNetworkMode]
+ networks = map[string]*network.EndpointSettings{firstNetworkMode: firstNetwork}
}
}
for _, n := range networks {
+ if n == nil {
+ continue
+ }
+
var added bool
for _, p := range c.Ports {
diff --git a/discovery/moby/docker_test.go b/discovery/moby/docker_test.go
index c108ddf582..398393a15a 100644
--- a/discovery/moby/docker_test.go
+++ b/discovery/moby/docker_test.go
@@ -60,9 +60,9 @@ host: %s
tg := tgs[0]
require.NotNil(t, tg)
require.NotNil(t, tg.Targets)
- require.Len(t, tg.Targets, 6)
+ require.Len(t, tg.Targets, 8)
- for i, lbls := range []model.LabelSet{
+ expected := []model.LabelSet{
{
"__address__": "172.19.0.2:9100",
"__meta_docker_container_id": "c301b928faceb1a18fe379f6bc178727ef920bb30b0f9b8592b32b36255a0eca",
@@ -163,7 +163,43 @@ host: %s
"__meta_docker_network_scope": "local",
"__meta_docker_port_private": "9104",
},
- } {
+ {
+ "__address__": "172.20.0.3:3306",
+ "__meta_docker_container_id": "f84b2a0cfaa58d9e70b0657e2b3c6f44f0e973de4163a871299b4acf127b224f",
+ "__meta_docker_container_label_com_docker_compose_project": "dockersd",
+ "__meta_docker_container_label_com_docker_compose_service": "mysql",
+ "__meta_docker_container_label_com_docker_compose_version": "2.2.2",
+ "__meta_docker_container_name": "/dockersd_multi_networks",
+ "__meta_docker_container_network_mode": "dockersd_private_none",
+ "__meta_docker_network_id": "e804771e55254a360fdb70dfdd78d3610fdde231b14ef2f837a00ac1eeb9e601",
+ "__meta_docker_network_ingress": "false",
+ "__meta_docker_network_internal": "false",
+ "__meta_docker_network_ip": "172.20.0.3",
+ "__meta_docker_network_name": "dockersd_private",
+ "__meta_docker_network_scope": "local",
+ "__meta_docker_port_private": "3306",
+ },
+ {
+ "__address__": "172.20.0.3:33060",
+ "__meta_docker_container_id": "f84b2a0cfaa58d9e70b0657e2b3c6f44f0e973de4163a871299b4acf127b224f",
+ "__meta_docker_container_label_com_docker_compose_project": "dockersd",
+ "__meta_docker_container_label_com_docker_compose_service": "mysql",
+ "__meta_docker_container_label_com_docker_compose_version": "2.2.2",
+ "__meta_docker_container_name": "/dockersd_multi_networks",
+ "__meta_docker_container_network_mode": "dockersd_private_none",
+ "__meta_docker_network_id": "e804771e55254a360fdb70dfdd78d3610fdde231b14ef2f837a00ac1eeb9e601",
+ "__meta_docker_network_ingress": "false",
+ "__meta_docker_network_internal": "false",
+ "__meta_docker_network_ip": "172.20.0.3",
+ "__meta_docker_network_name": "dockersd_private",
+ "__meta_docker_network_scope": "local",
+ "__meta_docker_port_private": "33060",
+ },
+ }
+ sortFunc(expected)
+ sortFunc(tg.Targets)
+
+ for i, lbls := range expected {
t.Run(fmt.Sprintf("item %d", i), func(t *testing.T) {
require.Equal(t, lbls, tg.Targets[i])
})
@@ -202,13 +238,8 @@ host: %s
tg := tgs[0]
require.NotNil(t, tg)
require.NotNil(t, tg.Targets)
- require.Len(t, tg.Targets, 9)
+ require.Len(t, tg.Targets, 13)
- sortFunc := func(labelSets []model.LabelSet) {
- sort.Slice(labelSets, func(i, j int) bool {
- return labelSets[i]["__address__"] < labelSets[j]["__address__"]
- })
- }
expected := []model.LabelSet{
{
"__address__": "172.19.0.2:9100",
@@ -359,6 +390,70 @@ host: %s
"__meta_docker_network_scope": "local",
"__meta_docker_port_private": "9104",
},
+ {
+ "__address__": "172.20.0.3:3306",
+ "__meta_docker_container_id": "f84b2a0cfaa58d9e70b0657e2b3c6f44f0e973de4163a871299b4acf127b224f",
+ "__meta_docker_container_label_com_docker_compose_project": "dockersd",
+ "__meta_docker_container_label_com_docker_compose_service": "mysql",
+ "__meta_docker_container_label_com_docker_compose_version": "2.2.2",
+ "__meta_docker_container_name": "/dockersd_multi_networks",
+ "__meta_docker_container_network_mode": "dockersd_private_none",
+ "__meta_docker_network_id": "e804771e55254a360fdb70dfdd78d3610fdde231b14ef2f837a00ac1eeb9e601",
+ "__meta_docker_network_ingress": "false",
+ "__meta_docker_network_internal": "false",
+ "__meta_docker_network_ip": "172.20.0.3",
+ "__meta_docker_network_name": "dockersd_private",
+ "__meta_docker_network_scope": "local",
+ "__meta_docker_port_private": "3306",
+ },
+ {
+ "__address__": "172.20.0.3:33060",
+ "__meta_docker_container_id": "f84b2a0cfaa58d9e70b0657e2b3c6f44f0e973de4163a871299b4acf127b224f",
+ "__meta_docker_container_label_com_docker_compose_project": "dockersd",
+ "__meta_docker_container_label_com_docker_compose_service": "mysql",
+ "__meta_docker_container_label_com_docker_compose_version": "2.2.2",
+ "__meta_docker_container_name": "/dockersd_multi_networks",
+ "__meta_docker_container_network_mode": "dockersd_private_none",
+ "__meta_docker_network_id": "e804771e55254a360fdb70dfdd78d3610fdde231b14ef2f837a00ac1eeb9e601",
+ "__meta_docker_network_ingress": "false",
+ "__meta_docker_network_internal": "false",
+ "__meta_docker_network_ip": "172.20.0.3",
+ "__meta_docker_network_name": "dockersd_private",
+ "__meta_docker_network_scope": "local",
+ "__meta_docker_port_private": "33060",
+ },
+ {
+ "__address__": "172.21.0.3:3306",
+ "__meta_docker_container_id": "f84b2a0cfaa58d9e70b0657e2b3c6f44f0e973de4163a871299b4acf127b224f",
+ "__meta_docker_container_label_com_docker_compose_project": "dockersd",
+ "__meta_docker_container_label_com_docker_compose_service": "mysql",
+ "__meta_docker_container_label_com_docker_compose_version": "2.2.2",
+ "__meta_docker_container_name": "/dockersd_multi_networks",
+ "__meta_docker_container_network_mode": "dockersd_private_none",
+ "__meta_docker_network_id": "bfcf66a6b64f7d518f009e34290dc3f3c66a08164257ad1afc3bd31d75f656e8",
+ "__meta_docker_network_ingress": "false",
+ "__meta_docker_network_internal": "false",
+ "__meta_docker_network_ip": "172.21.0.3",
+ "__meta_docker_network_name": "dockersd_private1",
+ "__meta_docker_network_scope": "local",
+ "__meta_docker_port_private": "3306",
+ },
+ {
+ "__address__": "172.21.0.3:33060",
+ "__meta_docker_container_id": "f84b2a0cfaa58d9e70b0657e2b3c6f44f0e973de4163a871299b4acf127b224f",
+ "__meta_docker_container_label_com_docker_compose_project": "dockersd",
+ "__meta_docker_container_label_com_docker_compose_service": "mysql",
+ "__meta_docker_container_label_com_docker_compose_version": "2.2.2",
+ "__meta_docker_container_name": "/dockersd_multi_networks",
+ "__meta_docker_container_network_mode": "dockersd_private_none",
+ "__meta_docker_network_id": "bfcf66a6b64f7d518f009e34290dc3f3c66a08164257ad1afc3bd31d75f656e8",
+ "__meta_docker_network_ingress": "false",
+ "__meta_docker_network_internal": "false",
+ "__meta_docker_network_ip": "172.21.0.3",
+ "__meta_docker_network_name": "dockersd_private1",
+ "__meta_docker_network_scope": "local",
+ "__meta_docker_port_private": "33060",
+ },
}
sortFunc(expected)
@@ -370,3 +465,9 @@ host: %s
})
}
}
+
+func sortFunc(labelSets []model.LabelSet) {
+ sort.Slice(labelSets, func(i, j int) bool {
+ return labelSets[i]["__address__"] < labelSets[j]["__address__"]
+ })
+}
diff --git a/discovery/moby/testdata/dockerprom/containers/json.json b/discovery/moby/testdata/dockerprom/containers/json.json
index ebfc56b6d5..33406bf9a4 100644
--- a/discovery/moby/testdata/dockerprom/containers/json.json
+++ b/discovery/moby/testdata/dockerprom/containers/json.json
@@ -228,5 +228,74 @@
"Networks": {}
},
"Mounts": []
+ },
+ {
+ "Id": "f84b2a0cfaa58d9e70b0657e2b3c6f44f0e973de4163a871299b4acf127b224f",
+ "Names": [
+ "/dockersd_multi_networks"
+ ],
+ "Image": "mysql:5.7.29",
+ "ImageID": "sha256:16ae2f4625ba63a250462bedeece422e741de9f0caf3b1d89fd5b257aca80cd1",
+ "Command": "mysqld",
+ "Created": 1616273136,
+ "Ports": [
+ {
+ "PrivatePort": 3306,
+ "Type": "tcp"
+ },
+ {
+ "PrivatePort": 33060,
+ "Type": "tcp"
+ }
+ ],
+ "Labels": {
+ "com.docker.compose.project": "dockersd",
+ "com.docker.compose.service": "mysql",
+ "com.docker.compose.version": "2.2.2"
+ },
+ "State": "running",
+ "Status": "Up 40 seconds",
+ "HostConfig": {
+ "NetworkMode": "dockersd_private_none"
+ },
+ "NetworkSettings": {
+ "Networks": {
+ "dockersd_private": {
+ "IPAMConfig": null,
+ "Links": null,
+ "Aliases": null,
+ "NetworkID": "e804771e55254a360fdb70dfdd78d3610fdde231b14ef2f837a00ac1eeb9e601",
+ "EndpointID": "972d6807997369605ace863af58de6cb90c787a5bf2ffc4105662d393ae539b7",
+ "Gateway": "172.20.0.1",
+ "IPAddress": "172.20.0.3",
+ "IPPrefixLen": 16,
+ "IPv6Gateway": "",
+ "GlobalIPv6Address": "",
+ "GlobalIPv6PrefixLen": 0,
+ "MacAddress": "02:42:ac:14:00:02",
+ "DriverOpts": null
+ },
+ "dockersd_private1": {
+ "IPAMConfig": {},
+ "Links": null,
+ "Aliases": [
+ "mysql",
+ "mysql",
+ "f9ade4b83199"
+ ],
+ "NetworkID": "bfcf66a6b64f7d518f009e34290dc3f3c66a08164257ad1afc3bd31d75f656e8",
+ "EndpointID": "91a98405344ee1cb7d977cafabe634837876651544b32da20a5e0155868e6f5f",
+ "Gateway": "172.21.0.1",
+ "IPAddress": "172.21.0.3",
+ "IPPrefixLen": 24,
+ "IPv6Gateway": "",
+ "GlobalIPv6Address": "",
+ "GlobalIPv6PrefixLen": 0,
+ "MacAddress": "02:42:ac:15:00:02",
+ "DriverOpts": null
+ }
+ }
+ },
+ "Mounts": []
}
]
diff --git a/discovery/util.go b/discovery/util.go
index 83cc640dd9..4e2a088518 100644
--- a/discovery/util.go
+++ b/discovery/util.go
@@ -19,8 +19,8 @@ import (
"github.com/prometheus/client_golang/prometheus"
)
-// A utility to be used by implementations of discovery.Discoverer
-// which need to manage the lifetime of their metrics.
+// MetricRegisterer is used by implementations of discovery.Discoverer that need
+// to manage the lifetime of their metrics.
type MetricRegisterer interface {
RegisterMetrics() error
UnregisterMetrics()
@@ -34,7 +34,7 @@ type metricRegistererImpl struct {
var _ MetricRegisterer = &metricRegistererImpl{}
-// Creates an instance of a MetricRegisterer.
+// NewMetricRegisterer creates an instance of a MetricRegisterer.
// Typically called inside the implementation of the NewDiscoverer() method.
func NewMetricRegisterer(reg prometheus.Registerer, metrics []prometheus.Collector) MetricRegisterer {
return &metricRegistererImpl{
diff --git a/docs/command-line/prometheus.md b/docs/command-line/prometheus.md
index 2faf65105e..7d9e5a3c80 100644
--- a/docs/command-line/prometheus.md
+++ b/docs/command-line/prometheus.md
@@ -15,11 +15,11 @@ The Prometheus monitoring server
| -h
, --help
| Show context-sensitive help (also try --help-long and --help-man). | |
| --version
| Show application version. | |
| --config.file
| Prometheus configuration file path. | `prometheus.yml` |
-| --web.listen-address
| Address to listen on for UI, API, and telemetry. | `0.0.0.0:9090` |
+| --web.listen-address
... | Address to listen on for UI, API, and telemetry. Can be repeated. | `0.0.0.0:9090` |
| --auto-gomemlimit.ratio
| The ratio of reserved GOMEMLIMIT memory to the detected maximum container or system memory | `0.9` |
| --web.config.file
| [EXPERIMENTAL] Path to configuration file that can enable TLS or authentication. | |
| --web.read-timeout
| Maximum duration before timing out read of the request, and closing idle connections. | `5m` |
-| --web.max-connections
| Maximum number of simultaneous connections. | `512` |
+| --web.max-connections
| Maximum number of simultaneous connections across all listeners. | `512` |
| --web.external-url
| The URL under which Prometheus is externally reachable (for example, if Prometheus is served via a reverse proxy). Used for generating relative and absolute links back to Prometheus itself. If the URL has a path portion, it will be used to prefix all HTTP endpoints served by Prometheus. If omitted, relevant URL components will be derived automatically. | |
| --web.route-prefix
| Prefix for the internal routes of web endpoints. Defaults to path of --web.external-url. | |
| --web.user-assets
| Path to static asset directory, available at /user. | |
@@ -56,7 +56,8 @@ The Prometheus monitoring server
| --query.timeout
| Maximum time a query may take before being aborted. Use with server mode only. | `2m` |
| --query.max-concurrency
| Maximum number of queries executed concurrently. Use with server mode only. | `20` |
| --query.max-samples
| Maximum number of samples a single query can load into memory. Note that queries will fail if they try to load more samples than this into memory, so this also limits the number of samples a query can return. Use with server mode only. | `50000000` |
-| --enable-feature
| Comma separated feature names to enable. Valid options: agent, auto-gomemlimit, exemplar-storage, expand-external-labels, memory-snapshot-on-shutdown, promql-per-step-stats, promql-experimental-functions, remote-write-receiver (DEPRECATED), extra-scrape-metrics, new-service-discovery-manager, auto-gomaxprocs, no-default-scrape-port, native-histograms, otlp-write-receiver, created-timestamp-zero-ingestion, concurrent-rule-eval. See https://prometheus.io/docs/prometheus/latest/feature_flags/ for more details. | |
+| --scrape.name-escaping-scheme
| Method for escaping legacy invalid names when sending to Prometheus that does not support UTF-8. Can be one of "values", "underscores", or "dots". | `values` |
+| --enable-feature
... | Comma separated feature names to enable. Valid options: agent, auto-gomaxprocs, auto-gomemlimit, concurrent-rule-eval, created-timestamp-zero-ingestion, delayed-compaction, exemplar-storage, expand-external-labels, extra-scrape-metrics, memory-snapshot-on-shutdown, native-histograms, new-service-discovery-manager, no-default-scrape-port, otlp-write-receiver, promql-experimental-functions, promql-delayed-name-removal, promql-per-step-stats, remote-write-receiver (DEPRECATED), utf8-names. See https://prometheus.io/docs/prometheus/latest/feature_flags/ for more details. | |
| --log.level
| Only log messages with the given severity or above. One of: [debug, info, warn, error] | `info` |
| --log.format
| Output format of log messages. One of: [logfmt, json] | `logfmt` |
diff --git a/docs/command-line/promtool.md b/docs/command-line/promtool.md
index 443cd3f0cb..6d74200e65 100644
--- a/docs/command-line/promtool.md
+++ b/docs/command-line/promtool.md
@@ -15,7 +15,7 @@ Tooling for the Prometheus monitoring system.
| -h
, --help
| Show context-sensitive help (also try --help-long and --help-man). |
| --version
| Show application version. |
| --experimental
| Enable experimental commands. |
-| --enable-feature
| Comma separated feature names to enable (only PromQL related and no-default-scrape-port). See https://prometheus.io/docs/prometheus/latest/feature_flags/ for the options and more details. |
+| --enable-feature
... | Comma separated feature names to enable (only PromQL related and no-default-scrape-port). See https://prometheus.io/docs/prometheus/latest/feature_flags/ for the options and more details. |
@@ -281,7 +281,7 @@ Run series query.
| Flag | Description |
| --- | --- |
-| --match
| Series selector. Can be specified multiple times. |
+| --match
... | Series selector. Can be specified multiple times. |
| --start
| Start time (RFC3339 or Unix timestamp). |
| --end
| End time (RFC3339 or Unix timestamp). |
@@ -309,7 +309,7 @@ Run labels query.
| --- | --- |
| --start
| Start time (RFC3339 or Unix timestamp). |
| --end
| End time (RFC3339 or Unix timestamp). |
-| --match
| Series selector. Can be specified multiple times. |
+| --match
... | Series selector. Can be specified multiple times. |
@@ -338,7 +338,7 @@ Run queries against your Prometheus to analyze the usage pattern of certain metr
| --type
| Type of metric: histogram. | |
| --duration
| Time frame to analyze. | `1h` |
| --time
| Query time (RFC3339 or Unix timestamp), defaults to now. | |
-| --match
| Series selector. Can be specified multiple times. | |
+| --match
... | Series selector. Can be specified multiple times. | |
@@ -442,6 +442,15 @@ Unit testing.
+#### Flags
+
+| Flag | Description |
+| --- | --- |
+| --junit
| File path to store JUnit XML test results. |
+
+
+
+
##### `promtool test rules`
Unit tests for rules.
@@ -452,7 +461,7 @@ Unit tests for rules.
| Flag | Description | Default |
| --- | --- | --- |
-| --run
| If set, will only run test groups whose names match the regular expression. Can be specified multiple times. | |
+| --run
... | If set, will only run test groups whose names match the regular expression. Can be specified multiple times. | |
| --diff
| [Experimental] Print colored differential output between expected & received output. | `false` |
@@ -569,7 +578,7 @@ Dump samples from a TSDB.
| --sandbox-dir-root
| Root directory where a sandbox directory would be created in case WAL replay generates chunks. The sandbox directory is cleaned up at the end. | `data/` |
| --min-time
| Minimum timestamp to dump. | `-9223372036854775808` |
| --max-time
| Maximum timestamp to dump. | `9223372036854775807` |
-| --match
| Series selector. Can be specified multiple times. | `{__name__=~'(?s:.*)'}` |
+| --match
... | Series selector. Can be specified multiple times. | `{__name__=~'(?s:.*)'}` |
@@ -596,7 +605,7 @@ Dump samples from a TSDB.
| --sandbox-dir-root
| Root directory where a sandbox directory would be created in case WAL replay generates chunks. The sandbox directory is cleaned up at the end. | `data/` |
| --min-time
| Minimum timestamp to dump. | `-9223372036854775808` |
| --max-time
| Maximum timestamp to dump. | `9223372036854775807` |
-| --match
| Series selector. Can be specified multiple times. | `{__name__=~'(?s:.*)'}` |
+| --match
... | Series selector. Can be specified multiple times. | `{__name__=~'(?s:.*)'}` |
@@ -632,6 +641,15 @@ Import samples from OpenMetrics input and produce TSDB blocks. Please refer to t
+###### Flags
+
+| Flag | Description |
+| --- | --- |
+| --label
| Label to attach to metrics. Can be specified multiple times. Example --label=label_name=label_value |
+
+
+
+
###### Arguments
| Argument | Description | Default | Required |
diff --git a/docs/configuration/configuration.md b/docs/configuration/configuration.md
index 5aa57b3ba6..a42126cf22 100644
--- a/docs/configuration/configuration.md
+++ b/docs/configuration/configuration.md
@@ -70,7 +70,7 @@ global:
# How frequently to evaluate rules.
[ evaluation_interval: | default = 1m ]
-
+
# Offset the rule evaluation timestamp of this particular group by the specified duration into the past to ensure the underlying metrics have been received.
# Metric availability delays are more likely to occur when Prometheus is running as a remote write target, but can also occur when there's anomalies with scraping.
[ rule_query_offset: | default = 0s ]
@@ -121,6 +121,11 @@ global:
# that will be kept in memory. 0 means no limit.
[ keep_dropped_targets: | default = 0 ]
+ # Specifies the validation scheme for metric and label names. Either blank or
+ # "legacy" for letters, numbers, colons, and underscores; or "utf8" for full
+ # UTF-8 support.
+ [ metric_name_validation_scheme | default "legacy" ]
+
runtime:
# Configure the Go garbage collector GOGC parameter
# See: https://tip.golang.org/doc/gc-guide#GOGC
@@ -302,6 +307,17 @@ tls_config:
[ proxy_connect_header:
[ : [, ...] ] ]
+# Custom HTTP headers to be sent along with each request.
+# Headers that are set by Prometheus itself can't be overwritten.
+http_headers:
+ # Header name.
+ [ :
+ # Header values.
+ [ values: [, ...] ]
+ # Headers values. Hidden in configuration page.
+ [ secrets: [, ...] ]
+ # Files to read header values from.
+ [ files: [, ...] ] ]
# List of Azure service discovery configurations.
azure_sd_configs:
@@ -461,6 +477,11 @@ metric_relabel_configs:
# that will be kept in memory. 0 means no limit.
[ keep_dropped_targets: | default = 0 ]
+# Specifies the validation scheme for metric and label names. Either blank or
+# "legacy" for letters, numbers, colons, and underscores; or "utf8" for full
+# UTF-8 support.
+[ metric_name_validation_scheme | default "legacy" ]
+
# Limit on total number of positive and negative buckets allowed in a single
# native histogram. The resolution of a histogram with more buckets will be
# reduced until the number of buckets is within the limit. If the limit cannot
@@ -947,7 +968,9 @@ tls_config:
# The host to use if the container is in host networking mode.
[ host_networking_host: | default = "localhost" ]
-# Match the first network if the container has multiple networks defined, thus avoiding collecting duplicate targets.
+# Sort all non-nil networks in ascending order based on network name and
+# get the first network if the container has multiple networks defined,
+# thus avoiding collecting duplicate targets.
[ match_first_network: | default = true ]
# Optional filters to limit the discovery process to a subset of available
@@ -3265,12 +3288,16 @@ Initially, aside from the configured per-target labels, a target's `job`
label is set to the `job_name` value of the respective scrape configuration.
The `__address__` label is set to the `:` address of the target.
After relabeling, the `instance` label is set to the value of `__address__` by default if
-it was not set during relabeling. The `__scheme__` and `__metrics_path__` labels
-are set to the scheme and metrics path of the target respectively. The `__param_`
-label is set to the value of the first passed URL parameter called ``.
+it was not set during relabeling.
+
+The `__scheme__` and `__metrics_path__` labels
+are set to the scheme and metrics path of the target respectively, as specified in `scrape_config`.
+
+The `__param_`
+label is set to the value of the first passed URL parameter called ``, as defined in `scrape_config`.
The `__scrape_interval__` and `__scrape_timeout__` labels are set to the target's
-interval and timeout.
+interval and timeout, as specified in `scrape_config`.
Additional labels prefixed with `__meta_` may be available during the
relabeling phase. They are set by the service discovery mechanism that provided
@@ -3401,8 +3428,8 @@ authorization:
# It is mutually exclusive with `credentials`.
[ credentials_file: ]
-# Optionally configures AWS's Signature Verification 4 signing process to
-# sign requests. Cannot be set at the same time as basic_auth, authorization, or oauth2.
+# Optionally configures AWS's Signature Verification 4 signing process to sign requests.
+# Cannot be set at the same time as basic_auth, authorization, oauth2, azuread or google_iam.
# To use the default credentials from the AWS SDK, use `sigv4: {}`.
sigv4:
# The AWS region. If blank, the region from the default credentials chain
@@ -3655,12 +3682,12 @@ sigv4:
[ role_arn: ]
# Optional OAuth 2.0 configuration.
-# Cannot be used at the same time as basic_auth, authorization, sigv4, or azuread.
+# Cannot be used at the same time as basic_auth, authorization, sigv4, azuread or google_iam.
oauth2:
[ ]
# Optional AzureAD configuration.
-# Cannot be used at the same time as basic_auth, authorization, oauth2, or sigv4.
+# Cannot be used at the same time as basic_auth, authorization, oauth2, sigv4 or google_iam.
azuread:
# The Azure Cloud. Options are 'AzurePublic', 'AzureChina', or 'AzureGovernment'.
[ cloud: | default = AzurePublic ]
@@ -3680,6 +3707,14 @@ azuread:
[ sdk:
[ tenant_id: ] ]
+# WARNING: Remote write is NOT SUPPORTED by Google Cloud. This configuration is reserved for future use.
+# Optional Google Cloud Monitoring configuration.
+# Cannot be used at the same time as basic_auth, authorization, oauth2, sigv4 or azuread.
+# To use the default credentials from the Google Cloud SDK, use `google_iam: {}`.
+google_iam:
+ # Service account key with monitoring write permessions.
+ credentials_file:
+
# Configures the remote write request's TLS settings.
tls_config:
[ ]
diff --git a/docs/configuration/unit_testing_rules.md b/docs/configuration/unit_testing_rules.md
index 163fcb91f1..7fc676a251 100644
--- a/docs/configuration/unit_testing_rules.md
+++ b/docs/configuration/unit_testing_rules.md
@@ -92,7 +92,7 @@ series:
#
# Native histogram notation:
# Native histograms can be used instead of floating point numbers using the following notation:
-# {{schema:1 sum:-0.3 count:3.1 z_bucket:7.1 z_bucket_w:0.05 buckets:[5.1 10 7] offset:-3 n_buckets:[4.1 5] n_offset:-5}}
+# {{schema:1 sum:-0.3 count:3.1 z_bucket:7.1 z_bucket_w:0.05 buckets:[5.1 10 7] offset:-3 n_buckets:[4.1 5] n_offset:-5 counter_reset_hint:gauge}}
# Native histograms support the same expanding notation as floating point numbers, i.e. 'axn', 'a+bxn' and 'a-bxn'.
# All properties are optional and default to 0. The order is not important. The following properties are supported:
# - schema (int):
@@ -119,6 +119,8 @@ series:
# Observation counts in negative buckets. Each represents an absolute count.
# - n_offset (int):
# The starting index of the first entry in the negative buckets.
+# - counter_reset_hint (one of 'unknown', 'reset', 'not_reset' or 'gauge')
+# The counter reset hint associated with this histogram. Defaults to 'unknown' if not set.
values:
```
diff --git a/docs/feature_flags.md b/docs/feature_flags.md
index 24d70647fd..7b07a04d0e 100644
--- a/docs/feature_flags.md
+++ b/docs/feature_flags.md
@@ -200,8 +200,9 @@ won't work when you push OTLP metrics.
`--enable-feature=promql-experimental-functions`
-Enables PromQL functions that are considered experimental and whose name or
-semantics could change.
+Enables PromQL functions that are considered experimental. These functions
+might change their name, syntax, or semantics. They might also get removed
+entirely.
## Created Timestamps Zero Injection
@@ -234,3 +235,33 @@ metadata changes as WAL records on a per-series basis.
This must be used if
you are also using remote write 2.0 as it will only gather metadata from the WAL.
+
+## Delay compaction start time
+
+`--enable-feature=delayed-compaction`
+
+A random offset, up to `10%` of the chunk range, is added to the Head compaction start time. This assists Prometheus instances in avoiding simultaneous compactions and reduces the load on shared resources.
+
+Only auto Head compactions and the operations directly resulting from them are subject to this delay.
+
+In the event of multiple consecutive Head compactions being possible, only the first compaction experiences this delay.
+
+Note that during this delay, the Head continues its usual operations, which include serving and appending series.
+
+Despite the delay in compaction, the blocks produced are time-aligned in the same manner as they would be if the delay was not in place.
+
+## Delay __name__ label removal for PromQL engine
+
+`--enable-feature=promql-delayed-name-removal`
+
+When enabled, Prometheus will change the way in which the `__name__` label is removed from PromQL query results (for functions and expressions for which this is necessary). Specifically, it will delay the removal to the last step of the query evaluation, instead of every time an expression or function creating derived metrics is evaluated.
+
+This allows optionally preserving the `__name__` label via the `label_replace` and `label_join` functions, and helps prevent the "vector cannot contain metrics with the same labelset" error, which can happen when applying a regex-matcher to the `__name__` label.
+
+## UTF-8 Name Support
+
+`--enable-feature=utf8-names`
+
+When enabled, changes the metric and label name validation scheme inside Prometheus to allow the full UTF-8 character set.
+By itself, this flag does not enable the request of UTF-8 names via content negotiation.
+Users will also have to set `metric_name_validation_scheme` in scrape configs to enable the feature either on the global config or on a per-scrape config basis.
diff --git a/docs/querying/basics.md b/docs/querying/basics.md
index 1c72adb3e5..81ffb4e0f3 100644
--- a/docs/querying/basics.md
+++ b/docs/querying/basics.md
@@ -8,9 +8,15 @@ sort_rank: 1
Prometheus provides a functional query language called PromQL (Prometheus Query
Language) that lets the user select and aggregate time series data in real
-time. The result of an expression can either be shown as a graph, viewed as
-tabular data in Prometheus's expression browser, or consumed by external
-systems via the [HTTP API](api.md).
+time.
+
+When you send a query request to Prometheus, it can be an _instant query_, evaluated at one point in time,
+or a _range query_ at equally-spaced steps between a start and an end time. PromQL works exactly the same
+in each cases; the range query is just like an instant query run multiple times at different timestamps.
+
+In the Prometheus UI, the "Table" tab is for instant queries and the "Graph" tab is for range queries.
+
+Other programs can fetch the result of a PromQL expression via the [HTTP API](api.md).
## Examples
@@ -35,7 +41,7 @@ vector is the only type which can be graphed.
_Notes about the experimental native histograms:_
* Ingesting native histograms has to be enabled via a [feature
- flag](../../feature_flags.md#native-histograms).
+ flag](../feature_flags.md#native-histograms).
* Once native histograms have been ingested into the TSDB (and even after
disabling the feature flag again), both instant vectors and range vectors may
now contain samples that aren't simple floating point numbers (float samples)
@@ -94,9 +100,7 @@ Examples:
## Time series selectors
-Time series selectors are responsible for selecting the times series and raw or inferred sample timestamps and values.
-
-Time series *selectors* are not to be confused with higher level concept of instant and range *queries* that can execute the time series *selectors*. A higher level instant query would evaluate the given selector at one point in time, however the range query would evaluate the selector at multiple different times in between a minimum and maximum timestamp at regular steps.
+These are the basic building-blocks that instruct PromQL what data to fetch.
### Instant vector selectors
diff --git a/docs/querying/functions.md b/docs/querying/functions.md
index de65e693d0..e13628c5c5 100644
--- a/docs/querying/functions.md
+++ b/docs/querying/functions.md
@@ -617,9 +617,9 @@ Like `sort`, `sort_desc` only affects the results of instant queries, as range q
## `sort_by_label()`
-**This function has to be enabled via the [feature flag](../feature_flags/) `--enable-feature=promql-experimental-functions`.**
+**This function has to be enabled via the [feature flag](../feature_flags.md#experimental-promql-functions) `--enable-feature=promql-experimental-functions`.**
-`sort_by_label(v instant-vector, label string, ...)` returns vector elements sorted by their label values and sample value in case of label values being equal, in ascending order.
+`sort_by_label(v instant-vector, label string, ...)` returns vector elements sorted by the values of the given labels in ascending order. In case these label values are equal, elements are sorted by their full label sets.
Please note that the sort by label functions only affect the results of instant queries, as range query results always have a fixed output ordering.
@@ -627,7 +627,7 @@ This function uses [natural sort order](https://en.wikipedia.org/wiki/Natural_so
## `sort_by_label_desc()`
-**This function has to be enabled via the [feature flag](../feature_flags/) `--enable-feature=promql-experimental-functions`.**
+**This function has to be enabled via the [feature flag](../feature_flags.md#experimental-promql-functions) `--enable-feature=promql-experimental-functions`.**
Same as `sort_by_label`, but sorts in descending order.
@@ -676,7 +676,7 @@ over time and return an instant vector with per-series aggregation results:
* `last_over_time(range-vector)`: the most recent point value in the specified interval.
* `present_over_time(range-vector)`: the value 1 for any series in the specified interval.
-If the [feature flag](../feature_flags/)
+If the [feature flag](../feature_flags.md#experimental-promql-functions)
`--enable-feature=promql-experimental-functions` is set, the following
additional functions are available:
diff --git a/documentation/examples/remote_storage/example_write_adapter/README.md b/documentation/examples/remote_storage/example_write_adapter/README.md
index 739cf3be36..968d2b25cb 100644
--- a/documentation/examples/remote_storage/example_write_adapter/README.md
+++ b/documentation/examples/remote_storage/example_write_adapter/README.md
@@ -19,7 +19,7 @@ remote_write:
protobuf_message: "io.prometheus.write.v2.Request"
```
-or for deprecated Remote Write 1.0 message:
+or for the eventually deprecated Remote Write 1.0 message:
```yaml
remote_write:
diff --git a/documentation/examples/remote_storage/go.mod b/documentation/examples/remote_storage/go.mod
index 4c41a66061..e5e052469b 100644
--- a/documentation/examples/remote_storage/go.mod
+++ b/documentation/examples/remote_storage/go.mod
@@ -8,19 +8,19 @@ require (
github.com/gogo/protobuf v1.3.2
github.com/golang/snappy v0.0.4
github.com/influxdata/influxdb v1.11.5
- github.com/prometheus/client_golang v1.19.1
- github.com/prometheus/common v0.55.0
+ github.com/prometheus/client_golang v1.20.0
+ github.com/prometheus/common v0.57.0
github.com/prometheus/prometheus v0.53.1
github.com/stretchr/testify v1.9.0
)
require (
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1 // indirect
- github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.2 // indirect
- github.com/Azure/azure-sdk-for-go/sdk/internal v1.6.0 // indirect
+ github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.6.0 // indirect
+ github.com/Azure/azure-sdk-for-go/sdk/internal v1.8.0 // indirect
github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 // indirect
github.com/alecthomas/units v0.0.0-20231202071711-9a357b53e9c9 // indirect
- github.com/aws/aws-sdk-go v1.53.16 // indirect
+ github.com/aws/aws-sdk-go v1.55.5 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/cespare/xxhash/v2 v2.3.0 // indirect
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
@@ -35,8 +35,7 @@ require (
github.com/jmespath/go-jmespath v0.4.0 // indirect
github.com/jpillora/backoff v1.0.0 // indirect
github.com/json-iterator/go v1.1.12 // indirect
- github.com/klauspost/compress v1.17.8 // indirect
- github.com/kr/text v0.2.0 // indirect
+ github.com/klauspost/compress v1.17.9 // indirect
github.com/kylelemons/godebug v1.1.0 // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
github.com/modern-go/reflect2 v1.0.2 // indirect
@@ -56,14 +55,14 @@ require (
go.opentelemetry.io/otel/trace v1.27.0 // indirect
go.uber.org/atomic v1.11.0 // indirect
go.uber.org/multierr v1.11.0 // indirect
- golang.org/x/crypto v0.24.0 // indirect
- golang.org/x/net v0.26.0 // indirect
+ golang.org/x/crypto v0.25.0 // indirect
+ golang.org/x/net v0.27.0 // indirect
golang.org/x/oauth2 v0.21.0 // indirect
- golang.org/x/sys v0.21.0 // indirect
+ golang.org/x/sys v0.22.0 // indirect
golang.org/x/text v0.16.0 // indirect
golang.org/x/time v0.5.0 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20240528184218-531527333157 // indirect
- google.golang.org/grpc v1.64.0 // indirect
+ google.golang.org/grpc v1.65.0 // indirect
google.golang.org/protobuf v1.34.2 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
diff --git a/documentation/examples/remote_storage/go.sum b/documentation/examples/remote_storage/go.sum
index 9898d75d70..34c474ef89 100644
--- a/documentation/examples/remote_storage/go.sum
+++ b/documentation/examples/remote_storage/go.sum
@@ -1,9 +1,9 @@
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1 h1:E+OJmp2tPvt1W+amx48v1eqbjDYsgN+RzP4q16yV5eM=
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1/go.mod h1:a6xsAQUZg+VsS3TJ05SRp524Hs4pZ/AeFSr5ENf0Yjo=
-github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.2 h1:FDif4R1+UUR+00q6wquyX90K7A8dN+R5E8GEadoP7sU=
-github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.2/go.mod h1:aiYBYui4BJ/BJCAIKs92XiPyQfTaBWqvHujDwKb6CBU=
-github.com/Azure/azure-sdk-for-go/sdk/internal v1.6.0 h1:sUFnFjzDUie80h24I7mrKtwCKgLY9L8h5Tp2x9+TWqk=
-github.com/Azure/azure-sdk-for-go/sdk/internal v1.6.0/go.mod h1:52JbnQTp15qg5mRkMBHwp0j0ZFwHJ42Sx3zVV5RE9p0=
+github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.6.0 h1:U2rTu3Ef+7w9FHKIAXM6ZyqF3UOWJZ12zIm8zECAFfg=
+github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.6.0/go.mod h1:9kIvujWAA58nmPmWB1m23fyWic1kYZMxD9CxaWn4Qpg=
+github.com/Azure/azure-sdk-for-go/sdk/internal v1.8.0 h1:jBQA3cKT4L2rWMpgE7Yt3Hwh2aUj8KXjIGLxjHeYNNo=
+github.com/Azure/azure-sdk-for-go/sdk/internal v1.8.0/go.mod h1:4OG6tQ9EOP/MT0NMjDlRzWoVFxfu9rN9B2X+tlSVktg=
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.7.0 h1:LkHbJbgF3YyvC53aqYGR+wWQDn2Rdp9AQdGndf9QvY4=
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.7.0/go.mod h1:QyiQdW4f4/BIfB8ZutZ2s+28RAgfa/pT+zS++ZHyM1I=
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4 v4.3.0 h1:bXwSugBiSbgtz7rOtbfGf+woewp4f06orW9OP5BjHLA=
@@ -26,8 +26,8 @@ github.com/alecthomas/units v0.0.0-20231202071711-9a357b53e9c9/go.mod h1:OMCwj8V
github.com/armon/go-metrics v0.4.1 h1:hR91U9KYmb6bLBYLQjyM+3j+rcd/UhE+G78SFnF8gJA=
github.com/armon/go-metrics v0.4.1/go.mod h1:E6amYzXo6aW1tqzoZGT755KkbgrJsSdpwZ+3JqfkOG4=
github.com/aws/aws-sdk-go v1.38.35/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro=
-github.com/aws/aws-sdk-go v1.53.16 h1:8oZjKQO/ml1WLUZw5hvF7pvYjPf8o9f57Wldoy/q9Qc=
-github.com/aws/aws-sdk-go v1.53.16/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk=
+github.com/aws/aws-sdk-go v1.55.5 h1:KKUZBfBoyqy5d3swXyiC7Q76ic40rYcbqH7qjh59kzU=
+github.com/aws/aws-sdk-go v1.55.5/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU=
github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3 h1:6df1vn4bBlDDo4tARvBm7l6KA9iVMnE3NWizDeWSrps=
github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3/go.mod h1:CIWtjkly68+yqLPbvwwR/fjNJA/idrtULjZWh2v1ys0=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
@@ -37,9 +37,8 @@ github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6r
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
-github.com/cncf/xds/go v0.0.0-20240318125728-8a4994d93e50 h1:DBmgJDC9dTfkVyGgipamEh2BpGYxScCH1TOF1LL1cXc=
-github.com/cncf/xds/go v0.0.0-20240318125728-8a4994d93e50/go.mod h1:5e1+Vvlzido69INQaVO6d87Qn543Xr6nooe9Kz7oBFM=
-github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
+github.com/cncf/xds/go v0.0.0-20240423153145-555b57ec207b h1:ga8SEFjZ60pxLcmhnThWgvH2wg8376yUJmPhEH4H3kw=
+github.com/cncf/xds/go v0.0.0-20240423153145-555b57ec207b/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
@@ -50,8 +49,6 @@ github.com/digitalocean/godo v1.117.0 h1:WVlTe09melDYTd7VCVyvHcNWbgB+uI1O115+5LO
github.com/digitalocean/godo v1.117.0/go.mod h1:Vk0vpCot2HOAJwc5WE8wljZGtJ3ZtWIc8MQ8rF38sdo=
github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0=
github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E=
-github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI=
-github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ=
github.com/docker/docker v26.1.3+incompatible h1:lLCzRbrVZrljpVNobJu1J2FHk8V0s4BawoZippkc+xo=
github.com/docker/docker v26.1.3+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ=
@@ -190,8 +187,8 @@ github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7V
github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM=
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
-github.com/klauspost/compress v1.17.8 h1:YcnTYrq7MikUT7k0Yb5eceMmALQPYBW/Xltxn0NAMnU=
-github.com/klauspost/compress v1.17.8/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw=
+github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA=
+github.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw=
github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b h1:udzkj9S/zlT5X367kqJis0QP7YMxobob6zhzq6Yre00=
github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b/go.mod h1:pcaDhQK0/NJZEvtCO0qQPPropqV0sJOJ6YW7X+9kRwM=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
@@ -256,8 +253,8 @@ github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXP
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
-github.com/prometheus/client_golang v1.19.1 h1:wZWJDwK+NameRJuPGDhlnFgx8e8HN3XHQeLaYJFJBOE=
-github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho=
+github.com/prometheus/client_golang v1.20.0 h1:jBzTZ7B099Rg24tny+qngoynol8LtVYlA2bqx3vEloI=
+github.com/prometheus/client_golang v1.20.0/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE=
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
@@ -267,8 +264,8 @@ github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y8
github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc=
github.com/prometheus/common v0.29.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
-github.com/prometheus/common v0.55.0 h1:KEi6DK7lXW/m7Ig5i47x0vRzuBsHuvJdi5ee6Y3G1dc=
-github.com/prometheus/common v0.55.0/go.mod h1:2SECS4xJG1kd8XF9IcM1gMX6510RAEL65zxzNImwdc8=
+github.com/prometheus/common v0.57.0 h1:Ro/rKjwdq9mZn1K5QPctzh+MA4Lp0BuYk5ZZEVhoNcY=
+github.com/prometheus/common v0.57.0/go.mod h1:7uRPFSUTbfZWsJ7MHY56sqt7hLQu3bxXHDnNhl8E9qI=
github.com/prometheus/common/sigv4 v0.1.0 h1:qoVebwtwwEhS85Czm2dSROY5fTo2PAPEVdDeppTwGX4=
github.com/prometheus/common/sigv4 v0.1.0/go.mod h1:2Jkxxk9yYvCkE5G1sQT7GuEXm57JrvHu9k5YwTjsNtI=
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
@@ -279,8 +276,8 @@ github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0leargg
github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk=
github.com/prometheus/prometheus v0.53.1-0.20240704074759-c137febfcf8c h1:6GEA48LnonkYZhQ654v7QTIP5uBTbCEVm49oIhif5lc=
github.com/prometheus/prometheus v0.53.1-0.20240704074759-c137febfcf8c/go.mod h1:FcNs5wa7M9yV8IlxlB/05s5oy9vULUIlu/tZsviRIT8=
-github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
-github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog=
+github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8=
+github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4=
github.com/scaleway/scaleway-sdk-go v1.0.0-beta.27 h1:yGAraK1uUjlhSXgNMIy8o/J4LFNcy7yeipBqt9N9mVg=
github.com/scaleway/scaleway-sdk-go v1.0.0-beta.27/go.mod h1:fCa7OJZ/9DRTnOKmxvT6pn+LPWUptQAmHF/SBJUGEcg=
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
@@ -326,8 +323,8 @@ golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnf
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
-golang.org/x/crypto v0.24.0 h1:mnl8DM0o513X8fdIkmyFE/5hTYxbwYOjDS/+rK6qpRI=
-golang.org/x/crypto v0.24.0/go.mod h1:Z1PMYSOR5nyMcyAVAIQSKCDwalqy85Aqn1x3Ws4L5DM=
+golang.org/x/crypto v0.25.0 h1:ypSNr+bnYL2YhwoMt2zPxHFmbAN1KZs/njMG3hxUp30=
+golang.org/x/crypto v0.25.0/go.mod h1:T+wALwcMOSE0kXgUAnPAHqTLW+XHgcELELW8VaDgm/M=
golang.org/x/exp v0.0.0-20240119083558-1b970713d09a h1:Q8/wZp0KX97QFTc2ywcOE0YRjZPVIx+MXInMzdvQqcA=
golang.org/x/exp v0.0.0-20240119083558-1b970713d09a/go.mod h1:idGWGoKP1toJGkd5/ig9ZLuPcZBC3ewk7SzmH0uou08=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
@@ -347,8 +344,8 @@ golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81R
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
-golang.org/x/net v0.26.0 h1:soB7SVo0PWrY4vPW/+ay0jKDNScG2X9wFeYlXIvJsOQ=
-golang.org/x/net v0.26.0/go.mod h1:5YKkiSynbBIh3p6iOc/vibscux0x38BZDkn8sCUPxHE=
+golang.org/x/net v0.27.0 h1:5K3Njcw06/l2y9vpGCSdcxWOYHOUk3dVNGDXN+FvAys=
+golang.org/x/net v0.27.0/go.mod h1:dDi0PyhWNoiUOrAS8uXv/vnScO4wnHQO4mj9fn/RytE=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.21.0 h1:tsimM75w1tF/uws5rbeHzIWxEqElMehnc+iW793zsZs=
@@ -376,11 +373,11 @@ golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.21.0 h1:rF+pYz3DAGSQAxAu1CbC7catZg4ebC4UIeIhKxBZvws=
-golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
+golang.org/x/sys v0.22.0 h1:RI27ohtqKCnwULzJLqkv897zojh5/DwS/ENaMzUOaWI=
+golang.org/x/sys v0.22.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
-golang.org/x/term v0.21.0 h1:WVXCp+/EBEHOj53Rvu+7KiT/iElMrO8ACK16SMZ3jaA=
-golang.org/x/term v0.21.0/go.mod h1:ooXLefLobQVslOqselCNF4SxFAaoS6KujMbsGzSDmX0=
+golang.org/x/term v0.22.0 h1:BbsgPEJULsl2fV/AT3v15Mjva5yXKQDyKf+TbDz7QJk=
+golang.org/x/term v0.22.0/go.mod h1:F3qCibpT5AMpCRfhfT53vVJwhLtIVHhB9XDjfFvnMI4=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
@@ -405,8 +402,8 @@ google.golang.org/genproto/googleapis/api v0.0.0-20240528184218-531527333157 h1:
google.golang.org/genproto/googleapis/api v0.0.0-20240528184218-531527333157/go.mod h1:99sLkeliLXfdj2J75X3Ho+rrVCaJze0uwN7zDDkjPVU=
google.golang.org/genproto/googleapis/rpc v0.0.0-20240528184218-531527333157 h1:Zy9XzmMEflZ/MAaA7vNcoebnRAld7FsPW1EeBB7V0m8=
google.golang.org/genproto/googleapis/rpc v0.0.0-20240528184218-531527333157/go.mod h1:EfXuqaE1J41VCDicxHzUDm+8rk+7ZdXzHV0IhO/I6s0=
-google.golang.org/grpc v1.64.0 h1:KH3VH9y/MgNQg1dE7b3XfVK0GsPSIzJwdF617gUSbvY=
-google.golang.org/grpc v1.64.0/go.mod h1:oxjF8E3FBnjp+/gVFYdWacaLDx9na1aqy9oovLpxQYg=
+google.golang.org/grpc v1.65.0 h1:bs/cUb4lp1G5iImFFd3u5ixQzweKizoZJAwBNLR42lc=
+google.golang.org/grpc v1.65.0/go.mod h1:WgYC2ypjlB0EiQi6wdKixMqukr6lBc0Vo+oOgjrM5ZQ=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
diff --git a/go.mod b/go.mod
index 1ae650d3d7..a37b19e65b 100644
--- a/go.mod
+++ b/go.mod
@@ -13,15 +13,15 @@ require (
github.com/KimMachineGun/automemlimit v0.6.1
github.com/alecthomas/kingpin/v2 v2.4.0
github.com/alecthomas/units v0.0.0-20240626203959-61d1e3462e30
- github.com/aws/aws-sdk-go v1.54.19
+ github.com/aws/aws-sdk-go v1.55.5
github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3
github.com/cespare/xxhash/v2 v2.3.0
github.com/dennwc/varint v1.0.0
- github.com/digitalocean/godo v1.118.0
- github.com/docker/docker v27.0.3+incompatible
+ github.com/digitalocean/godo v1.119.0
+ github.com/docker/docker v27.1.1+incompatible
github.com/edsrzf/mmap-go v1.1.0
github.com/envoyproxy/go-control-plane v0.12.0
- github.com/envoyproxy/protoc-gen-validate v1.0.4
+ github.com/envoyproxy/protoc-gen-validate v1.1.0
github.com/facette/natsort v0.0.0-20181210072756-2cd4dd1e2dcb
github.com/fsnotify/fsnotify v1.7.0
github.com/go-kit/log v0.2.1
@@ -33,17 +33,17 @@ require (
github.com/google/go-cmp v0.6.0
github.com/google/pprof v0.0.0-20240711041743-f6c9dda6c6da
github.com/google/uuid v1.6.0
- github.com/gophercloud/gophercloud v1.13.0
+ github.com/gophercloud/gophercloud v1.14.0
github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc
github.com/grpc-ecosystem/grpc-gateway v1.16.0
- github.com/hashicorp/consul/api v1.29.2
+ github.com/hashicorp/consul/api v1.29.4
github.com/hashicorp/nomad/api v0.0.0-20240717122358-3d93bd3778f3
- github.com/hetznercloud/hcloud-go/v2 v2.10.2
- github.com/ionos-cloud/sdk-go/v6 v6.1.11
+ github.com/hetznercloud/hcloud-go/v2 v2.13.1
+ github.com/ionos-cloud/sdk-go/v6 v6.2.1
github.com/json-iterator/go v1.1.12
github.com/klauspost/compress v1.17.9
github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b
- github.com/linode/linodego v1.37.0
+ github.com/linode/linodego v1.38.0
github.com/miekg/dns v1.1.61
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f
@@ -52,9 +52,9 @@ require (
github.com/oklog/ulid v1.3.1
github.com/ovh/go-ovh v1.6.0
github.com/prometheus/alertmanager v0.27.0
- github.com/prometheus/client_golang v1.19.1
+ github.com/prometheus/client_golang v1.20.2
github.com/prometheus/client_model v0.6.1
- github.com/prometheus/common v0.55.0
+ github.com/prometheus/common v0.56.0
github.com/prometheus/common/assets v0.2.0
github.com/prometheus/common/sigv4 v0.1.0
github.com/prometheus/exporter-toolkit v0.11.0
@@ -75,15 +75,14 @@ require (
go.uber.org/automaxprocs v1.5.3
go.uber.org/goleak v1.3.0
go.uber.org/multierr v1.11.0
- golang.org/x/net v0.27.0
- golang.org/x/oauth2 v0.21.0
+ golang.org/x/oauth2 v0.22.0
golang.org/x/sync v0.7.0
golang.org/x/sys v0.22.0
golang.org/x/text v0.16.0
golang.org/x/time v0.5.0
golang.org/x/tools v0.23.0
- google.golang.org/api v0.188.0
- google.golang.org/genproto/googleapis/api v0.0.0-20240711142825-46eb208f015d
+ google.golang.org/api v0.190.0
+ google.golang.org/genproto/googleapis/api v0.0.0-20240725223205-93522f1f2a9f
google.golang.org/grpc v1.65.0
google.golang.org/protobuf v1.34.2
gopkg.in/yaml.v2 v2.4.0
@@ -96,9 +95,9 @@ require (
)
require (
- cloud.google.com/go/auth v0.7.0 // indirect
- cloud.google.com/go/auth/oauth2adapt v0.2.2 // indirect
- cloud.google.com/go/compute/metadata v0.4.0 // indirect
+ cloud.google.com/go/auth v0.7.3 // indirect
+ cloud.google.com/go/auth/oauth2adapt v0.2.3 // indirect
+ cloud.google.com/go/compute/metadata v0.5.0 // indirect
github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 // indirect
github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 // indirect
github.com/Microsoft/go-winio v0.6.1 // indirect
@@ -140,9 +139,9 @@ require (
github.com/google/gnostic-models v0.6.8 // indirect
github.com/google/go-querystring v1.1.0 // indirect
github.com/google/gofuzz v1.2.0 // indirect
- github.com/google/s2a-go v0.1.7 // indirect
+ github.com/google/s2a-go v0.1.8 // indirect
github.com/googleapis/enterprise-certificate-proxy v0.3.2 // indirect
- github.com/googleapis/gax-go/v2 v2.12.5 // indirect
+ github.com/googleapis/gax-go/v2 v2.13.0 // indirect
github.com/gorilla/websocket v1.5.0 // indirect
github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 // indirect
github.com/hashicorp/cronexpr v1.1.2 // indirect
@@ -191,8 +190,9 @@ require (
golang.org/x/crypto v0.25.0 // indirect
golang.org/x/exp v0.0.0-20240119083558-1b970713d09a // indirect
golang.org/x/mod v0.19.0 // indirect
+ golang.org/x/net v0.27.0 // indirect
golang.org/x/term v0.22.0 // indirect
- google.golang.org/genproto/googleapis/rpc v0.0.0-20240708141625-4ad9e859172b // indirect
+ google.golang.org/genproto/googleapis/rpc v0.0.0-20240730163845-b1a4ccb954bf // indirect
gopkg.in/inf.v0 v0.9.1 // indirect
gopkg.in/ini.v1 v1.67.0 // indirect
gotest.tools/v3 v3.0.3 // indirect
diff --git a/go.sum b/go.sum
index ac5aaa9188..bdfe438062 100644
--- a/go.sum
+++ b/go.sum
@@ -12,18 +12,18 @@ cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bP
cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk=
cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs=
cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY=
-cloud.google.com/go/auth v0.7.0 h1:kf/x9B3WTbBUHkC+1VS8wwwli9TzhSt0vSTVBmMR8Ts=
-cloud.google.com/go/auth v0.7.0/go.mod h1:D+WqdrpcjmiCgWrXmLLxOVq1GACoE36chW6KXoEvuIw=
-cloud.google.com/go/auth/oauth2adapt v0.2.2 h1:+TTV8aXpjeChS9M+aTtN/TjdQnzJvmzKFt//oWu7HX4=
-cloud.google.com/go/auth/oauth2adapt v0.2.2/go.mod h1:wcYjgpZI9+Yu7LyYBg4pqSiaRkfEK3GQcpb7C/uyF1Q=
+cloud.google.com/go/auth v0.7.3 h1:98Vr+5jMaCZ5NZk6e/uBgf60phTk/XN84r8QEWB9yjY=
+cloud.google.com/go/auth v0.7.3/go.mod h1:HJtWUx1P5eqjy/f6Iq5KeytNpbAcGolPhOgyop2LlzA=
+cloud.google.com/go/auth/oauth2adapt v0.2.3 h1:MlxF+Pd3OmSudg/b1yZ5lJwoXCEaeedAguodky1PcKI=
+cloud.google.com/go/auth/oauth2adapt v0.2.3/go.mod h1:tMQXOfZzFuNuUxOypHlQEXgdfX5cuhwU+ffUuXRJE8I=
cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE=
cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc=
cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg=
cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc=
cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ=
-cloud.google.com/go/compute/metadata v0.4.0 h1:vHzJCWaM4g8XIcm8kopr3XmDA4Gy/lblD3EhhSux05c=
-cloud.google.com/go/compute/metadata v0.4.0/go.mod h1:SIQh1Kkb4ZJ8zJ874fqVkslA29PRXuleyj6vOzlbK7M=
+cloud.google.com/go/compute/metadata v0.5.0 h1:Zr0eK8JbFv6+Wi4ilXAR8FJ3wyNdpxHKJNPos6LTZOY=
+cloud.google.com/go/compute/metadata v0.5.0/go.mod h1:aHnloV2TPI38yx4s9+wAZhHykWvVCfu7hQbF+9CWoiY=
cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk=
cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
@@ -92,8 +92,8 @@ github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:W
github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQwij/eHl5CU=
github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
github.com/aws/aws-sdk-go v1.38.35/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro=
-github.com/aws/aws-sdk-go v1.54.19 h1:tyWV+07jagrNiCcGRzRhdtVjQs7Vy41NwsuOcl0IbVI=
-github.com/aws/aws-sdk-go v1.54.19/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU=
+github.com/aws/aws-sdk-go v1.55.5 h1:KKUZBfBoyqy5d3swXyiC7Q76ic40rYcbqH7qjh59kzU=
+github.com/aws/aws-sdk-go v1.55.5/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU=
github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g=
github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3 h1:6df1vn4bBlDDo4tARvBm7l6KA9iVMnE3NWizDeWSrps=
github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3/go.mod h1:CIWtjkly68+yqLPbvwwR/fjNJA/idrtULjZWh2v1ys0=
@@ -143,14 +143,14 @@ github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8Yc
github.com/dennwc/varint v1.0.0 h1:kGNFFSSw8ToIy3obO/kKr8U9GZYUAxQEVuix4zfDWzE=
github.com/dennwc/varint v1.0.0/go.mod h1:hnItb35rvZvJrbTALZtY/iQfDs48JKRG1RPpgziApxA=
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
-github.com/digitalocean/godo v1.118.0 h1:lkzGFQmACrVCp7UqH1sAi4JK/PWwlc5aaxubgorKmC4=
-github.com/digitalocean/godo v1.118.0/go.mod h1:Vk0vpCot2HOAJwc5WE8wljZGtJ3ZtWIc8MQ8rF38sdo=
+github.com/digitalocean/godo v1.119.0 h1:dmFNQwSIAcH3z+FVovHLkazKDC2uA8oOlGvg5+H4vRw=
+github.com/digitalocean/godo v1.119.0/go.mod h1:WQVH83OHUy6gC4gXpEVQKtxTd4L5oCp+5OialidkPLY=
github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0=
github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E=
github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI=
github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ=
-github.com/docker/docker v27.0.3+incompatible h1:aBGI9TeQ4MPlhquTQKq9XbK79rKFVwXNUAYz9aXyEBE=
-github.com/docker/docker v27.0.3+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
+github.com/docker/docker v27.1.1+incompatible h1:hO/M4MtV36kzKldqnA37IWhebRA+LnqqcqDja6kVaKY=
+github.com/docker/docker v27.1.1+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ=
github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec=
github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4=
@@ -171,8 +171,8 @@ github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1m
github.com/envoyproxy/go-control-plane v0.12.0 h1:4X+VP1GHd1Mhj6IB5mMeGbLCleqxjletLK6K0rbxyZI=
github.com/envoyproxy/go-control-plane v0.12.0/go.mod h1:ZBTaoJ23lqITozF0M6G4/IragXCQKCnYbmlmtHvwRG0=
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
-github.com/envoyproxy/protoc-gen-validate v1.0.4 h1:gVPz/FMfvh57HdSJQyvBtF00j8JU4zdyUgIUNhlgg0A=
-github.com/envoyproxy/protoc-gen-validate v1.0.4/go.mod h1:qys6tmnRsYrQqIhm2bvKZH4Blx/1gTIZ2UKVY1M+Yew=
+github.com/envoyproxy/protoc-gen-validate v1.1.0 h1:tntQDh69XqOCOZsDz0lVJQez/2L6Uu2PdjCQwWCJ3bM=
+github.com/envoyproxy/protoc-gen-validate v1.1.0/go.mod h1:sXRDRVmzEbkM7CVcM06s9shE/m23dg3wzjl0UWqJ2q4=
github.com/evanphx/json-patch v5.6.0+incompatible h1:jBYDEEiFBPxA0v50tFdvOzQQTCvpL6mnFh5mB2/l16U=
github.com/evanphx/json-patch v5.6.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
github.com/facette/natsort v0.0.0-20181210072756-2cd4dd1e2dcb h1:IT4JYU7k4ikYg1SCxNI1/Tieq/NFvh6dzLdgi7eu0tM=
@@ -322,8 +322,8 @@ github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hf
github.com/google/pprof v0.0.0-20240711041743-f6c9dda6c6da h1:xRmpO92tb8y+Z85iUOMOicpCfaYcv7o3Cg3wKrIpg8g=
github.com/google/pprof v0.0.0-20240711041743-f6c9dda6c6da/go.mod h1:K1liHPHnj73Fdn/EKuT8nrFqBihUSKXoLYU0BuatOYo=
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
-github.com/google/s2a-go v0.1.7 h1:60BLSyTrOV4/haCDW4zb1guZItoSq8foHCXrAnjBo/o=
-github.com/google/s2a-go v0.1.7/go.mod h1:50CgR4k1jNlWBu4UfS4AcfhVe1r6pdZPygJ3R8F0Qdw=
+github.com/google/s2a-go v0.1.8 h1:zZDs9gcbt9ZPLV0ndSyQk6Kacx2g/X+SKYovpnz3SMM=
+github.com/google/s2a-go v0.1.8/go.mod h1:6iNWHTpQ+nfNRN5E00MSdfDwVesa8hhS32PhPO8deJA=
github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
@@ -332,10 +332,10 @@ github.com/googleapis/enterprise-certificate-proxy v0.3.2 h1:Vie5ybvEvT75RniqhfF
github.com/googleapis/enterprise-certificate-proxy v0.3.2/go.mod h1:VLSiSSBs/ksPL8kq3OBOQ6WRI2QnaFynd1DCjZ62+V0=
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
-github.com/googleapis/gax-go/v2 v2.12.5 h1:8gw9KZK8TiVKB6q3zHY3SBzLnrGp6HQjyfYBYGmXdxA=
-github.com/googleapis/gax-go/v2 v2.12.5/go.mod h1:BUDKcWo+RaKq5SC9vVYL0wLADa3VcfswbOMMRmB9H3E=
-github.com/gophercloud/gophercloud v1.13.0 h1:8iY9d1DAbzMW6Vok1AxbbK5ZaUjzMp0tdyt4fX9IeJ0=
-github.com/gophercloud/gophercloud v1.13.0/go.mod h1:aAVqcocTSXh2vYFZ1JTvx4EQmfgzxRcNupUfxZbBNDM=
+github.com/googleapis/gax-go/v2 v2.13.0 h1:yitjD5f7jQHhyDsnhKEBU52NdvvdSeGzlAnDPT0hH1s=
+github.com/googleapis/gax-go/v2 v2.13.0/go.mod h1:Z/fvTZXF8/uw7Xu5GuslPw+bplx6SS338j1Is2S+B7A=
+github.com/gophercloud/gophercloud v1.14.0 h1:Bt9zQDhPrbd4qX7EILGmy+i7GP35cc+AAL2+wIJpUE8=
+github.com/gophercloud/gophercloud v1.14.0/go.mod h1:aAVqcocTSXh2vYFZ1JTvx4EQmfgzxRcNupUfxZbBNDM=
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg=
github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
@@ -353,8 +353,8 @@ github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFb
github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 h1:bkypFPDjIYGfCYD5mRBvpqxfYX1YCS1PXdKYWi8FsN0=
github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0/go.mod h1:P+Lt/0by1T8bfcF3z737NnSbmxQAppXMRziHUxPOC8k=
github.com/hashicorp/consul/api v1.3.0/go.mod h1:MmDNSzIMUjNpY/mQ398R4bk2FnqQLoPndWW5VkKPlCE=
-github.com/hashicorp/consul/api v1.29.2 h1:aYyRn8EdE2mSfG14S1+L9Qkjtz8RzmaWh6AcNGRNwPw=
-github.com/hashicorp/consul/api v1.29.2/go.mod h1:0YObcaLNDSbtlgzIRtmRXI1ZkeuK0trCBxwZQ4MYnIk=
+github.com/hashicorp/consul/api v1.29.4 h1:P6slzxDLBOxUSj3fWo2o65VuKtbtOXFi7TSSgtXutuE=
+github.com/hashicorp/consul/api v1.29.4/go.mod h1:HUlfw+l2Zy68ceJavv2zAyArl2fqhGWnMycyt56sBgg=
github.com/hashicorp/consul/proto-public v0.6.2 h1:+DA/3g/IiKlJZb88NBn0ZgXrxJp2NlvCZdEyl+qxvL0=
github.com/hashicorp/consul/proto-public v0.6.2/go.mod h1:cXXbOg74KBNGajC+o8RlA502Esf0R9prcoJgiOX/2Tg=
github.com/hashicorp/consul/sdk v0.3.0/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8=
@@ -414,8 +414,8 @@ github.com/hashicorp/nomad/api v0.0.0-20240717122358-3d93bd3778f3/go.mod h1:svtx
github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc=
github.com/hashicorp/serf v0.10.1 h1:Z1H2J60yRKvfDYAOZLd2MU0ND4AH/WDz7xYHDWQsIPY=
github.com/hashicorp/serf v0.10.1/go.mod h1:yL2t6BqATOLGc5HF7qbFkTfXoPIY0WZdWHfEvMqbG+4=
-github.com/hetznercloud/hcloud-go/v2 v2.10.2 h1:9gyTUPhfNbfbS40Spgij5mV5k37bOZgt8iHKCbfGs5I=
-github.com/hetznercloud/hcloud-go/v2 v2.10.2/go.mod h1:xQ+8KhIS62W0D78Dpi57jsufWh844gUw1az5OUvaeq8=
+github.com/hetznercloud/hcloud-go/v2 v2.13.1 h1:jq0GP4QaYE5d8xR/Zw17s9qoaESRJMXfGmtD1a/qckQ=
+github.com/hetznercloud/hcloud-go/v2 v2.13.1/go.mod h1:dhix40Br3fDiBhwaSG/zgaYOFFddpfBm/6R1Zz0IiF0=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg=
github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
@@ -423,8 +423,8 @@ github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4=
github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY=
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo=
-github.com/ionos-cloud/sdk-go/v6 v6.1.11 h1:J/uRN4UWO3wCyGOeDdMKv8LWRzKu6UIkLEaes38Kzh8=
-github.com/ionos-cloud/sdk-go/v6 v6.1.11/go.mod h1:EzEgRIDxBELvfoa/uBN0kOQaqovLjUWEB7iW4/Q+t4k=
+github.com/ionos-cloud/sdk-go/v6 v6.2.1 h1:mxxN+frNVmbFrmmFfXnBC3g2USYJrl6mc1LW2iNYbFY=
+github.com/ionos-cloud/sdk-go/v6 v6.2.1/go.mod h1:SXrO9OGyWjd2rZhAhEpdYN6VUAODzzqRdqA9BCviQtI=
github.com/jarcoal/httpmock v1.3.1 h1:iUx3whfZWVf3jT01hQTO/Eo5sAYtB2/rqaUuOtpInww=
github.com/jarcoal/httpmock v1.3.1/go.mod h1:3yb8rc4BI7TCBhFY8ng0gjuLKJNquuDNiPaZjnENuYg=
github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
@@ -472,8 +472,8 @@ github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0
github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM=
github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4=
-github.com/linode/linodego v1.37.0 h1:B/2Spzv9jYXzKA+p+GD8fVCNJ7Wuw6P91ZDD9eCkkso=
-github.com/linode/linodego v1.37.0/go.mod h1:L7GXKFD3PoN2xSEtFc04wIXP5WK65O10jYQx0PQISWQ=
+github.com/linode/linodego v1.38.0 h1:wP3oW9OhGc6vhze8NPf2knbwH4TzSbrjzuCd9okjbTY=
+github.com/linode/linodego v1.38.0/go.mod h1:L7GXKFD3PoN2xSEtFc04wIXP5WK65O10jYQx0PQISWQ=
github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ=
github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0=
github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
@@ -610,8 +610,8 @@ github.com/prometheus/client_golang v1.3.0/go.mod h1:hJaj2vgQTGQmVCsAACORcieXFeD
github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU=
github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
-github.com/prometheus/client_golang v1.19.1 h1:wZWJDwK+NameRJuPGDhlnFgx8e8HN3XHQeLaYJFJBOE=
-github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho=
+github.com/prometheus/client_golang v1.20.2 h1:5ctymQzZlyOON1666svgwn3s6IKWgfbjsejTMiXIyjg=
+github.com/prometheus/client_golang v1.20.2/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE=
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
@@ -627,8 +627,8 @@ github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8b
github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc=
github.com/prometheus/common v0.29.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
-github.com/prometheus/common v0.55.0 h1:KEi6DK7lXW/m7Ig5i47x0vRzuBsHuvJdi5ee6Y3G1dc=
-github.com/prometheus/common v0.55.0/go.mod h1:2SECS4xJG1kd8XF9IcM1gMX6510RAEL65zxzNImwdc8=
+github.com/prometheus/common v0.56.0 h1:UffReloqkBtvtQEYDg2s+uDPGRrJyC6vZWPGXf6OhPY=
+github.com/prometheus/common v0.56.0/go.mod h1:7uRPFSUTbfZWsJ7MHY56sqt7hLQu3bxXHDnNhl8E9qI=
github.com/prometheus/common/assets v0.2.0 h1:0P5OrzoHrYBOSM1OigWL3mY8ZvV2N4zIE/5AahrSrfM=
github.com/prometheus/common/assets v0.2.0/go.mod h1:D17UVUE12bHbim7HzwUvtqm6gwBEaDQ0F+hIGbFbccI=
github.com/prometheus/common/sigv4 v0.1.0 h1:qoVebwtwwEhS85Czm2dSROY5fTo2PAPEVdDeppTwGX4=
@@ -867,8 +867,8 @@ golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4Iltr
golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
-golang.org/x/oauth2 v0.21.0 h1:tsimM75w1tF/uws5rbeHzIWxEqElMehnc+iW793zsZs=
-golang.org/x/oauth2 v0.21.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI=
+golang.org/x/oauth2 v0.22.0 h1:BzDx2FehcG7jJwgWLELCdmLuxk2i+x9UDpSiss2u0ZA=
+golang.org/x/oauth2 v0.22.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
@@ -1049,8 +1049,8 @@ google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/
google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
-google.golang.org/api v0.188.0 h1:51y8fJ/b1AaaBRJr4yWm96fPcuxSo0JcegXE3DaHQHw=
-google.golang.org/api v0.188.0/go.mod h1:VR0d+2SIiWOYG3r/jdm7adPW9hI2aRv9ETOSCQ9Beag=
+google.golang.org/api v0.190.0 h1:ASM+IhLY1zljNdLu19W1jTmU6A+gMk6M46Wlur61s+Q=
+google.golang.org/api v0.190.0/go.mod h1:QIr6I9iedBLnfqoD6L6Vze1UvS5Hzj5r2aUBOaZnLHo=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
@@ -1087,10 +1087,10 @@ google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1m
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA=
google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/genproto/googleapis/api v0.0.0-20240711142825-46eb208f015d h1:kHjw/5UfflP/L5EbledDrcG4C2597RtymmGRZvHiCuY=
-google.golang.org/genproto/googleapis/api v0.0.0-20240711142825-46eb208f015d/go.mod h1:mw8MG/Qz5wfgYr6VqVCiZcHe/GJEfI+oGGDCohaVgB0=
-google.golang.org/genproto/googleapis/rpc v0.0.0-20240708141625-4ad9e859172b h1:04+jVzTs2XBnOZcPsLnmrTGqltqJbZQ1Ey26hjYdQQ0=
-google.golang.org/genproto/googleapis/rpc v0.0.0-20240708141625-4ad9e859172b/go.mod h1:Ue6ibwXGpU+dqIcODieyLOcgj7z8+IcskoNIgZxtrFY=
+google.golang.org/genproto/googleapis/api v0.0.0-20240725223205-93522f1f2a9f h1:b1Ln/PG8orm0SsBbHZWke8dDp2lrCD4jSmfglFpTZbk=
+google.golang.org/genproto/googleapis/api v0.0.0-20240725223205-93522f1f2a9f/go.mod h1:AHT0dDg3SoMOgZGnZk29b5xTbPHMoEC8qthmBLJCpys=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20240730163845-b1a4ccb954bf h1:liao9UHurZLtiEwBgT9LMOnKYsHze6eA6w1KQCMVN2Q=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20240730163845-b1a4ccb954bf/go.mod h1:Ue6ibwXGpU+dqIcODieyLOcgj7z8+IcskoNIgZxtrFY=
google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM=
diff --git a/model/exemplar/exemplar.go b/model/exemplar/exemplar.go
index 08f55374ef..2c28b17257 100644
--- a/model/exemplar/exemplar.go
+++ b/model/exemplar/exemplar.go
@@ -15,7 +15,9 @@ package exemplar
import "github.com/prometheus/prometheus/model/labels"
-// The combined length of the label names and values of an Exemplar's LabelSet MUST NOT exceed 128 UTF-8 characters
+// ExemplarMaxLabelSetLength is defined by OpenMetrics: "The combined length of
+// the label names and values of an Exemplar's LabelSet MUST NOT exceed 128
+// UTF-8 characters."
// https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#exemplars
const ExemplarMaxLabelSetLength = 128
@@ -49,7 +51,7 @@ func (e Exemplar) Equals(e2 Exemplar) bool {
return e.Value == e2.Value
}
-// Sort first by timestamp, then value, then labels.
+// Compare first timestamps, then values, then labels.
func Compare(a, b Exemplar) int {
if a.Ts < b.Ts {
return -1
diff --git a/model/labels/labels.go b/model/labels/labels.go
index 01514abf38..f4de7496ce 100644
--- a/model/labels/labels.go
+++ b/model/labels/labels.go
@@ -38,10 +38,10 @@ func (ls Labels) Bytes(buf []byte) []byte {
b.WriteByte(labelSep)
for i, l := range ls {
if i > 0 {
- b.WriteByte(seps[0])
+ b.WriteByte(sep)
}
b.WriteString(l.Name)
- b.WriteByte(seps[0])
+ b.WriteByte(sep)
b.WriteString(l.Value)
}
return b.Bytes()
@@ -86,9 +86,9 @@ func (ls Labels) Hash() uint64 {
}
b = append(b, v.Name...)
- b = append(b, seps[0])
+ b = append(b, sep)
b = append(b, v.Value...)
- b = append(b, seps[0])
+ b = append(b, sep)
}
return xxhash.Sum64(b)
}
@@ -106,9 +106,9 @@ func (ls Labels) HashForLabels(b []byte, names ...string) (uint64, []byte) {
i++
default:
b = append(b, ls[i].Name...)
- b = append(b, seps[0])
+ b = append(b, sep)
b = append(b, ls[i].Value...)
- b = append(b, seps[0])
+ b = append(b, sep)
i++
j++
}
@@ -130,9 +130,9 @@ func (ls Labels) HashWithoutLabels(b []byte, names ...string) (uint64, []byte) {
continue
}
b = append(b, ls[i].Name...)
- b = append(b, seps[0])
+ b = append(b, sep)
b = append(b, ls[i].Value...)
- b = append(b, seps[0])
+ b = append(b, sep)
}
return xxhash.Sum64(b), b
}
@@ -151,10 +151,10 @@ func (ls Labels) BytesWithLabels(buf []byte, names ...string) []byte {
i++
default:
if b.Len() > 1 {
- b.WriteByte(seps[0])
+ b.WriteByte(sep)
}
b.WriteString(ls[i].Name)
- b.WriteByte(seps[0])
+ b.WriteByte(sep)
b.WriteString(ls[i].Value)
i++
j++
@@ -177,10 +177,10 @@ func (ls Labels) BytesWithoutLabels(buf []byte, names ...string) []byte {
continue
}
if b.Len() > 1 {
- b.WriteByte(seps[0])
+ b.WriteByte(sep)
}
b.WriteString(ls[i].Name)
- b.WriteByte(seps[0])
+ b.WriteByte(sep)
b.WriteString(ls[i].Value)
}
return b.Bytes()
@@ -315,7 +315,8 @@ func Compare(a, b Labels) int {
return len(a) - len(b)
}
-// Copy labels from b on top of whatever was in ls previously, reusing memory or expanding if needed.
+// CopyFrom copies labels from b on top of whatever was in ls previously,
+// reusing memory or expanding if needed.
func (ls *Labels) CopyFrom(b Labels) {
(*ls) = append((*ls)[:0], b...)
}
@@ -422,7 +423,7 @@ type ScratchBuilder struct {
add Labels
}
-// Symbol-table is no-op, just for api parity with dedupelabels.
+// SymbolTable is no-op, just for api parity with dedupelabels.
type SymbolTable struct{}
func NewSymbolTable() *SymbolTable { return nil }
@@ -458,7 +459,7 @@ func (b *ScratchBuilder) Add(name, value string) {
b.add = append(b.add, Label{Name: name, Value: value})
}
-// Add a name/value pair, using []byte instead of string.
+// UnsafeAddBytes adds a name/value pair, using []byte instead of string.
// The '-tags stringlabels' version of this function is unsafe, hence the name.
// This version is safe - it copies the strings immediately - but we keep the same name so everything compiles.
func (b *ScratchBuilder) UnsafeAddBytes(name, value []byte) {
@@ -475,14 +476,14 @@ func (b *ScratchBuilder) Assign(ls Labels) {
b.add = append(b.add[:0], ls...) // Copy on top of our slice, so we don't retain the input slice.
}
-// Return the name/value pairs added so far as a Labels object.
+// Labels returns the name/value pairs added so far as a Labels object.
// Note: if you want them sorted, call Sort() first.
func (b *ScratchBuilder) Labels() Labels {
// Copy the slice, so the next use of ScratchBuilder doesn't overwrite.
return append([]Label{}, b.add...)
}
-// Write the newly-built Labels out to ls.
+// Overwrite the newly-built Labels out to ls.
// Callers must ensure that there are no other references to ls, or any strings fetched from it.
func (b *ScratchBuilder) Overwrite(ls *Labels) {
*ls = append((*ls)[:0], b.add...)
diff --git a/model/labels/labels_common.go b/model/labels/labels_common.go
index 4bc94f84fe..d7bdc1e076 100644
--- a/model/labels/labels_common.go
+++ b/model/labels/labels_common.go
@@ -29,10 +29,11 @@ const (
BucketLabel = "le"
InstanceName = "instance"
- labelSep = '\xfe'
+ labelSep = '\xfe' // Used at beginning of `Bytes` return.
+ sep = '\xff' // Used between labels in `Bytes` and `Hash`.
)
-var seps = []byte{'\xff'}
+var seps = []byte{sep} // Used with Hash, which has no WriteByte method.
// Label is a key/value pair of strings.
type Label struct {
@@ -94,12 +95,23 @@ func (ls *Labels) UnmarshalYAML(unmarshal func(interface{}) error) error {
}
// IsValid checks if the metric name or label names are valid.
-func (ls Labels) IsValid() bool {
+func (ls Labels) IsValid(validationScheme model.ValidationScheme) bool {
err := ls.Validate(func(l Label) error {
- if l.Name == model.MetricNameLabel && !model.IsValidMetricName(model.LabelValue(l.Value)) {
- return strconv.ErrSyntax
+ if l.Name == model.MetricNameLabel {
+ // If the default validation scheme has been overridden with legacy mode,
+ // we need to call the special legacy validation checker.
+ if validationScheme == model.LegacyValidation && model.NameValidationScheme == model.UTF8Validation && !model.IsValidLegacyMetricName(string(model.LabelValue(l.Value))) {
+ return strconv.ErrSyntax
+ }
+ if !model.IsValidMetricName(model.LabelValue(l.Value)) {
+ return strconv.ErrSyntax
+ }
}
- if !model.LabelName(l.Name).IsValid() || !model.LabelValue(l.Value).IsValid() {
+ if validationScheme == model.LegacyValidation && model.NameValidationScheme == model.UTF8Validation {
+ if !model.LabelName(l.Name).IsValidLegacy() || !model.LabelValue(l.Value).IsValid() {
+ return strconv.ErrSyntax
+ }
+ } else if !model.LabelName(l.Name).IsValid() || !model.LabelValue(l.Value).IsValid() {
return strconv.ErrSyntax
}
return nil
diff --git a/model/labels/labels_dedupelabels.go b/model/labels/labels_dedupelabels.go
index 0e5bb048be..da8a88cc15 100644
--- a/model/labels/labels_dedupelabels.go
+++ b/model/labels/labels_dedupelabels.go
@@ -146,13 +146,13 @@ func (ls Labels) Bytes(buf []byte) []byte {
b := bytes.NewBuffer(buf[:0])
for i := 0; i < len(ls.data); {
if i > 0 {
- b.WriteByte(seps[0])
+ b.WriteByte(sep)
}
var name, value string
name, i = decodeString(ls.syms, ls.data, i)
value, i = decodeString(ls.syms, ls.data, i)
b.WriteString(name)
- b.WriteByte(seps[0])
+ b.WriteByte(sep)
b.WriteString(value)
}
return b.Bytes()
@@ -201,9 +201,9 @@ func (ls Labels) Hash() uint64 {
}
b = append(b, name...)
- b = append(b, seps[0])
+ b = append(b, sep)
b = append(b, value...)
- b = append(b, seps[0])
+ b = append(b, sep)
pos = newPos
}
return xxhash.Sum64(b)
@@ -226,9 +226,9 @@ func (ls Labels) HashForLabels(b []byte, names ...string) (uint64, []byte) {
}
if name == names[j] {
b = append(b, name...)
- b = append(b, seps[0])
+ b = append(b, sep)
b = append(b, value...)
- b = append(b, seps[0])
+ b = append(b, sep)
}
}
@@ -252,9 +252,9 @@ func (ls Labels) HashWithoutLabels(b []byte, names ...string) (uint64, []byte) {
continue
}
b = append(b, name...)
- b = append(b, seps[0])
+ b = append(b, sep)
b = append(b, value...)
- b = append(b, seps[0])
+ b = append(b, sep)
}
return xxhash.Sum64(b), b
}
@@ -275,10 +275,10 @@ func (ls Labels) BytesWithLabels(buf []byte, names ...string) []byte {
}
if lName == names[j] {
if b.Len() > 1 {
- b.WriteByte(seps[0])
+ b.WriteByte(sep)
}
b.WriteString(lName)
- b.WriteByte(seps[0])
+ b.WriteByte(sep)
b.WriteString(lValue)
}
pos = newPos
@@ -299,10 +299,10 @@ func (ls Labels) BytesWithoutLabels(buf []byte, names ...string) []byte {
}
if j == len(names) || lName != names[j] {
if b.Len() > 1 {
- b.WriteByte(seps[0])
+ b.WriteByte(sep)
}
b.WriteString(lName)
- b.WriteByte(seps[0])
+ b.WriteByte(sep)
b.WriteString(lValue)
}
pos = newPos
diff --git a/model/labels/labels_stringlabels.go b/model/labels/labels_stringlabels.go
index bccceb61fe..c8bce51234 100644
--- a/model/labels/labels_stringlabels.go
+++ b/model/labels/labels_stringlabels.go
@@ -112,9 +112,9 @@ func (ls Labels) HashForLabels(b []byte, names ...string) (uint64, []byte) {
}
if name == names[j] {
b = append(b, name...)
- b = append(b, seps[0])
+ b = append(b, sep)
b = append(b, value...)
- b = append(b, seps[0])
+ b = append(b, sep)
}
}
@@ -138,9 +138,9 @@ func (ls Labels) HashWithoutLabels(b []byte, names ...string) (uint64, []byte) {
continue
}
b = append(b, name...)
- b = append(b, seps[0])
+ b = append(b, sep)
b = append(b, value...)
- b = append(b, seps[0])
+ b = append(b, sep)
}
return xxhash.Sum64(b), b
}
diff --git a/model/labels/labels_test.go b/model/labels/labels_test.go
index d8910cdc85..9208908311 100644
--- a/model/labels/labels_test.go
+++ b/model/labels/labels_test.go
@@ -21,6 +21,7 @@ import (
"strings"
"testing"
+ "github.com/prometheus/common/model"
"github.com/stretchr/testify/require"
"gopkg.in/yaml.v2"
)
@@ -272,11 +273,86 @@ func TestLabels_IsValid(t *testing.T) {
},
} {
t.Run("", func(t *testing.T) {
- require.Equal(t, test.expected, test.input.IsValid())
+ require.Equal(t, test.expected, test.input.IsValid(model.LegacyValidation))
})
}
}
+func TestLabels_ValidationModes(t *testing.T) {
+ for _, test := range []struct {
+ input Labels
+ globalMode model.ValidationScheme
+ callMode model.ValidationScheme
+ expected bool
+ }{
+ {
+ input: FromStrings(
+ "__name__", "test.metric",
+ "hostname", "localhost",
+ "job", "check",
+ ),
+ globalMode: model.UTF8Validation,
+ callMode: model.UTF8Validation,
+ expected: true,
+ },
+ {
+ input: FromStrings(
+ "__name__", "test",
+ "\xc5 bad utf8", "localhost",
+ "job", "check",
+ ),
+ globalMode: model.UTF8Validation,
+ callMode: model.UTF8Validation,
+ expected: false,
+ },
+ {
+ // Setting the common model to legacy validation and then trying to check for UTF-8 on a
+ // per-call basis is not supported.
+ input: FromStrings(
+ "__name__", "test.utf8.metric",
+ "hostname", "localhost",
+ "job", "check",
+ ),
+ globalMode: model.LegacyValidation,
+ callMode: model.UTF8Validation,
+ expected: false,
+ },
+ {
+ input: FromStrings(
+ "__name__", "test",
+ "hostname", "localhost",
+ "job", "check",
+ ),
+ globalMode: model.LegacyValidation,
+ callMode: model.LegacyValidation,
+ expected: true,
+ },
+ {
+ input: FromStrings(
+ "__name__", "test.utf8.metric",
+ "hostname", "localhost",
+ "job", "check",
+ ),
+ globalMode: model.UTF8Validation,
+ callMode: model.LegacyValidation,
+ expected: false,
+ },
+ {
+ input: FromStrings(
+ "__name__", "test",
+ "host.name", "localhost",
+ "job", "check",
+ ),
+ globalMode: model.UTF8Validation,
+ callMode: model.LegacyValidation,
+ expected: false,
+ },
+ } {
+ model.NameValidationScheme = test.globalMode
+ require.Equal(t, test.expected, test.input.IsValid(test.callMode))
+ }
+}
+
func TestLabels_Equal(t *testing.T) {
labels := FromStrings(
"aaa", "111",
diff --git a/model/labels/sharding.go b/model/labels/sharding.go
index 5e3e89fbbb..8b3a369397 100644
--- a/model/labels/sharding.go
+++ b/model/labels/sharding.go
@@ -39,9 +39,9 @@ func StableHash(ls Labels) uint64 {
}
b = append(b, v.Name...)
- b = append(b, seps[0])
+ b = append(b, sep)
b = append(b, v.Value...)
- b = append(b, seps[0])
+ b = append(b, sep)
}
return xxhash.Sum64(b)
}
diff --git a/model/labels/sharding_dedupelabels.go b/model/labels/sharding_dedupelabels.go
index 5912724f9b..5bf41b05d6 100644
--- a/model/labels/sharding_dedupelabels.go
+++ b/model/labels/sharding_dedupelabels.go
@@ -43,9 +43,9 @@ func StableHash(ls Labels) uint64 {
}
b = append(b, name...)
- b = append(b, seps[0])
+ b = append(b, sep)
b = append(b, value...)
- b = append(b, seps[0])
+ b = append(b, sep)
pos = newPos
}
return xxhash.Sum64(b)
diff --git a/model/labels/sharding_stringlabels.go b/model/labels/sharding_stringlabels.go
index 3ad2027d8c..798f268eb9 100644
--- a/model/labels/sharding_stringlabels.go
+++ b/model/labels/sharding_stringlabels.go
@@ -43,9 +43,9 @@ func StableHash(ls Labels) uint64 {
}
b = append(b, v.Name...)
- b = append(b, seps[0])
+ b = append(b, sep)
b = append(b, v.Value...)
- b = append(b, seps[0])
+ b = append(b, sep)
}
if h != nil {
return h.Sum64()
diff --git a/model/relabel/relabel.go b/model/relabel/relabel.go
index 4f33edda43..a880465969 100644
--- a/model/relabel/relabel.go
+++ b/model/relabel/relabel.go
@@ -213,6 +213,10 @@ func (re Regexp) IsZero() bool {
// String returns the original string used to compile the regular expression.
func (re Regexp) String() string {
+ if re.Regexp == nil {
+ return ""
+ }
+
str := re.Regexp.String()
// Trim the anchor `^(?:` prefix and `)$` suffix.
return str[4 : len(str)-2]
diff --git a/model/relabel/relabel_test.go b/model/relabel/relabel_test.go
index 0f11f7068d..fc9952134d 100644
--- a/model/relabel/relabel_test.go
+++ b/model/relabel/relabel_test.go
@@ -900,3 +900,16 @@ action: replace
})
}
}
+
+func TestRegexp_ShouldMarshalAndUnmarshalZeroValue(t *testing.T) {
+ var zero Regexp
+
+ marshalled, err := yaml.Marshal(&zero)
+ require.NoError(t, err)
+ require.Equal(t, "null\n", string(marshalled))
+
+ var unmarshalled Regexp
+ err = yaml.Unmarshal(marshalled, &unmarshalled)
+ require.NoError(t, err)
+ require.Nil(t, unmarshalled.Regexp)
+}
diff --git a/model/textparse/interface.go b/model/textparse/interface.go
index df01dbc34f..0b5d9281e4 100644
--- a/model/textparse/interface.go
+++ b/model/textparse/interface.go
@@ -106,8 +106,8 @@ const (
EntryInvalid Entry = -1
EntryType Entry = 0
EntryHelp Entry = 1
- EntrySeries Entry = 2 // A series with a simple float64 as value.
+ EntrySeries Entry = 2 // EntrySeries marks a series with a simple float64 as value.
EntryComment Entry = 3
EntryUnit Entry = 4
- EntryHistogram Entry = 5 // A series with a native histogram as a value.
+ EntryHistogram Entry = 5 // EntryHistogram marks a series with a native histogram as a value.
)
diff --git a/model/textparse/openmetricsparse.go b/model/textparse/openmetricsparse.go
index b7ad1dd85c..5f0415d3ee 100644
--- a/model/textparse/openmetricsparse.go
+++ b/model/textparse/openmetricsparse.go
@@ -94,16 +94,46 @@ type OpenMetricsParser struct {
exemplarVal float64
exemplarTs int64
hasExemplarTs bool
+
+ skipCTSeries bool
}
-// NewOpenMetricsParser returns a new parser of the byte slice.
-func NewOpenMetricsParser(b []byte, st *labels.SymbolTable) Parser {
- return &OpenMetricsParser{
- l: &openMetricsLexer{b: b},
- builder: labels.NewScratchBuilderWithSymbolTable(st, 16),
+type openMetricsParserOptions struct {
+ SkipCTSeries bool
+}
+
+type OpenMetricsOption func(*openMetricsParserOptions)
+
+// WithOMParserCTSeriesSkipped turns off exposing _created lines
+// as series, which makes those only used for parsing created timestamp
+// for `CreatedTimestamp` method purposes.
+//
+// It's recommended to use this option to avoid using _created lines for other
+// purposes than created timestamp, but leave false by default for the
+// best-effort compatibility.
+func WithOMParserCTSeriesSkipped() OpenMetricsOption {
+ return func(o *openMetricsParserOptions) {
+ o.SkipCTSeries = true
}
}
+// NewOpenMetricsParser returns a new parser for the byte slice with option to skip CT series parsing.
+func NewOpenMetricsParser(b []byte, st *labels.SymbolTable, opts ...OpenMetricsOption) Parser {
+ options := &openMetricsParserOptions{}
+
+ for _, opt := range opts {
+ opt(options)
+ }
+
+ parser := &OpenMetricsParser{
+ l: &openMetricsLexer{b: b},
+ builder: labels.NewScratchBuilderWithSymbolTable(st, 16),
+ skipCTSeries: options.SkipCTSeries,
+ }
+
+ return parser
+}
+
// Series returns the bytes of the series, the timestamp if set, and the value
// of the current sample.
func (p *OpenMetricsParser) Series() ([]byte, *int64, float64) {
@@ -219,10 +249,90 @@ func (p *OpenMetricsParser) Exemplar(e *exemplar.Exemplar) bool {
return true
}
-// CreatedTimestamp returns nil as it's not implemented yet.
-// TODO(bwplotka): https://github.com/prometheus/prometheus/issues/12980
+// CreatedTimestamp returns the created timestamp for a current Metric if exists or nil.
+// NOTE(Maniktherana): Might use additional CPU/mem resources due to deep copy of parser required for peeking given 1.0 OM specification on _created series.
func (p *OpenMetricsParser) CreatedTimestamp() *int64 {
- return nil
+ if !TypeRequiresCT(p.mtype) {
+ // Not a CT supported metric type, fast path.
+ return nil
+ }
+
+ var (
+ currLset labels.Labels
+ buf []byte
+ peekWithoutNameLsetHash uint64
+ )
+ p.Metric(&currLset)
+ currFamilyLsetHash, buf := currLset.HashWithoutLabels(buf, labels.MetricName, "le", "quantile")
+ // Search for the _created line for the currFamilyLsetHash using ephemeral parser until
+ // we see EOF or new metric family. We have to do it as we don't know where (and if)
+ // that CT line is.
+ // TODO(bwplotka): Make sure OM 1.1/2.0 pass CT via metadata or exemplar-like to avoid this.
+ peek := deepCopy(p)
+ for {
+ eType, err := peek.Next()
+ if err != nil {
+ // This means peek will give error too later on, so def no CT line found.
+ // This might result in partial scrape with wrong/missing CT, but only
+ // spec improvement would help.
+ // TODO(bwplotka): Make sure OM 1.1/2.0 pass CT via metadata or exemplar-like to avoid this.
+ return nil
+ }
+ if eType != EntrySeries {
+ // Assume we hit different family, no CT line found.
+ return nil
+ }
+
+ var peekedLset labels.Labels
+ peek.Metric(&peekedLset)
+ peekedName := peekedLset.Get(model.MetricNameLabel)
+ if !strings.HasSuffix(peekedName, "_created") {
+ // Not a CT line, search more.
+ continue
+ }
+
+ // We got a CT line here, but let's search if CT line is actually for our series, edge case.
+ peekWithoutNameLsetHash, _ = peekedLset.HashWithoutLabels(buf, labels.MetricName, "le", "quantile")
+ if peekWithoutNameLsetHash != currFamilyLsetHash {
+ // CT line for a different series, for our series no CT.
+ return nil
+ }
+ ct := int64(peek.val)
+ return &ct
+ }
+}
+
+// TypeRequiresCT returns true if the metric type requires a _created timestamp.
+func TypeRequiresCT(t model.MetricType) bool {
+ switch t {
+ case model.MetricTypeCounter, model.MetricTypeSummary, model.MetricTypeHistogram:
+ return true
+ default:
+ return false
+ }
+}
+
+// deepCopy creates a copy of a parser without re-using the slices' original memory addresses.
+func deepCopy(p *OpenMetricsParser) OpenMetricsParser {
+ newB := make([]byte, len(p.l.b))
+ copy(newB, p.l.b)
+
+ newLexer := &openMetricsLexer{
+ b: newB,
+ i: p.l.i,
+ start: p.l.start,
+ err: p.l.err,
+ state: p.l.state,
+ }
+
+ newParser := OpenMetricsParser{
+ l: newLexer,
+ builder: p.builder,
+ mtype: p.mtype,
+ val: p.val,
+ skipCTSeries: false,
+ }
+ return newParser
}
// nextToken returns the next token from the openMetricsLexer.
@@ -337,7 +447,13 @@ func (p *OpenMetricsParser) Next() (Entry, error) {
}
p.series = p.l.b[p.start:p.l.i]
- return p.parseMetricSuffix(p.nextToken())
+ if err := p.parseSeriesEndOfLine(p.nextToken()); err != nil {
+ return EntryInvalid, err
+ }
+ if p.skipCTSeries && p.isCreatedSeries() {
+ return p.Next()
+ }
+ return EntrySeries, nil
case tMName:
p.offsets = append(p.offsets, p.start, p.l.i)
p.series = p.l.b[p.start:p.l.i]
@@ -351,8 +467,14 @@ func (p *OpenMetricsParser) Next() (Entry, error) {
p.series = p.l.b[p.start:p.l.i]
t2 = p.nextToken()
}
- return p.parseMetricSuffix(t2)
+ if err := p.parseSeriesEndOfLine(t2); err != nil {
+ return EntryInvalid, err
+ }
+ if p.skipCTSeries && p.isCreatedSeries() {
+ return p.Next()
+ }
+ return EntrySeries, nil
default:
err = p.parseError("expected a valid start token", t)
}
@@ -467,51 +589,64 @@ func (p *OpenMetricsParser) parseLVals(offsets []int, isExemplar bool) ([]int, e
}
}
-// parseMetricSuffix parses the end of the line after the metric name and
-// labels. It starts parsing with the provided token.
-func (p *OpenMetricsParser) parseMetricSuffix(t token) (Entry, error) {
+// isCreatedSeries returns true if the current series is a _created series.
+func (p *OpenMetricsParser) isCreatedSeries() bool {
+ var newLbs labels.Labels
+ p.Metric(&newLbs)
+ name := newLbs.Get(model.MetricNameLabel)
+ if TypeRequiresCT(p.mtype) && strings.HasSuffix(name, "_created") {
+ return true
+ }
+ return false
+}
+
+// parseSeriesEndOfLine parses the series end of the line (value, optional
+// timestamp, commentary, etc.) after the metric name and labels.
+// It starts parsing with the provided token.
+func (p *OpenMetricsParser) parseSeriesEndOfLine(t token) error {
if p.offsets[0] == -1 {
- return EntryInvalid, fmt.Errorf("metric name not set while parsing: %q", p.l.b[p.start:p.l.i])
+ return fmt.Errorf("metric name not set while parsing: %q", p.l.b[p.start:p.l.i])
}
var err error
p.val, err = p.getFloatValue(t, "metric")
if err != nil {
- return EntryInvalid, err
+ return err
}
p.hasTS = false
switch t2 := p.nextToken(); t2 {
case tEOF:
- return EntryInvalid, errors.New("data does not end with # EOF")
+ return errors.New("data does not end with # EOF")
case tLinebreak:
break
case tComment:
if err := p.parseComment(); err != nil {
- return EntryInvalid, err
+ return err
}
case tTimestamp:
p.hasTS = true
var ts float64
// A float is enough to hold what we need for millisecond resolution.
if ts, err = parseFloat(yoloString(p.l.buf()[1:])); err != nil {
- return EntryInvalid, fmt.Errorf("%w while parsing: %q", err, p.l.b[p.start:p.l.i])
+ return fmt.Errorf("%w while parsing: %q", err, p.l.b[p.start:p.l.i])
}
if math.IsNaN(ts) || math.IsInf(ts, 0) {
- return EntryInvalid, fmt.Errorf("invalid timestamp %f", ts)
+ return fmt.Errorf("invalid timestamp %f", ts)
}
p.ts = int64(ts * 1000)
switch t3 := p.nextToken(); t3 {
case tLinebreak:
case tComment:
if err := p.parseComment(); err != nil {
- return EntryInvalid, err
+ return err
}
default:
- return EntryInvalid, p.parseError("expected next entry after timestamp", t3)
+ return p.parseError("expected next entry after timestamp", t3)
}
}
- return EntrySeries, nil
+
+ return nil
}
func (p *OpenMetricsParser) getFloatValue(t token, after string) (float64, error) {
diff --git a/model/textparse/openmetricsparse_test.go b/model/textparse/openmetricsparse_test.go
index bc76a540d3..cadaabc99f 100644
--- a/model/textparse/openmetricsparse_test.go
+++ b/model/textparse/openmetricsparse_test.go
@@ -14,6 +14,7 @@
package textparse
import (
+ "errors"
"io"
"testing"
@@ -24,6 +25,8 @@ import (
"github.com/prometheus/prometheus/model/labels"
)
+func int64p(x int64) *int64 { return &x }
+
func TestOpenMetricsParse(t *testing.T) {
input := `# HELP go_gc_duration_seconds A summary of the GC invocation durations.
# TYPE go_gc_duration_seconds summary
@@ -63,15 +66,34 @@ ss{A="a"} 0
_metric_starting_with_underscore 1
testmetric{_label_starting_with_underscore="foo"} 1
testmetric{label="\"bar\""} 1
+# HELP foo Counter with and without labels to certify CT is parsed for both cases
# TYPE foo counter
-foo_total 17.0 1520879607.789 # {id="counter-test"} 5`
+foo_total 17.0 1520879607.789 # {id="counter-test"} 5
+foo_created 1000
+foo_total{a="b"} 17.0 1520879607.789 # {id="counter-test"} 5
+foo_created{a="b"} 1000
+# HELP bar Summary with CT at the end, making sure we find CT even if it's multiple lines a far
+# TYPE bar summary
+bar_count 17.0
+bar_sum 324789.3
+bar{quantile="0.95"} 123.7
+bar{quantile="0.99"} 150.0
+bar_created 1520430000
+# HELP baz Histogram with the same objective as above's summary
+# TYPE baz histogram
+baz_bucket{le="0.0"} 0
+baz_bucket{le="+Inf"} 17
+baz_count 17
+baz_sum 324789.3
+baz_created 1520430000
+# HELP fizz_created Gauge which shouldn't be parsed as CT
+# TYPE fizz_created gauge
+fizz_created 17.0`
input += "\n# HELP metric foo\x00bar"
input += "\nnull_byte_metric{a=\"abc\x00\"} 1"
input += "\n# EOF\n"
- int64p := func(x int64) *int64 { return &x }
-
exp := []expectedParse{
{
m: "go_gc_duration_seconds",
@@ -216,6 +238,9 @@ foo_total 17.0 1520879607.789 # {id="counter-test"} 5`
m: "testmetric{label=\"\\\"bar\\\"\"}",
v: 1,
lset: labels.FromStrings("__name__", "testmetric", "label", `"bar"`),
+ }, {
+ m: "foo",
+ help: "Counter with and without labels to certify CT is parsed for both cases",
}, {
m: "foo",
typ: model.MetricTypeCounter,
@@ -225,6 +250,76 @@ foo_total 17.0 1520879607.789 # {id="counter-test"} 5`
lset: labels.FromStrings("__name__", "foo_total"),
t: int64p(1520879607789),
e: &exemplar.Exemplar{Labels: labels.FromStrings("id", "counter-test"), Value: 5},
+ ct: int64p(1000),
+ }, {
+ m: `foo_total{a="b"}`,
+ v: 17.0,
+ lset: labels.FromStrings("__name__", "foo_total", "a", "b"),
+ t: int64p(1520879607789),
+ e: &exemplar.Exemplar{Labels: labels.FromStrings("id", "counter-test"), Value: 5},
+ ct: int64p(1000),
+ }, {
+ m: "bar",
+ help: "Summary with CT at the end, making sure we find CT even if it's multiple lines a far",
+ }, {
+ m: "bar",
+ typ: model.MetricTypeSummary,
+ }, {
+ m: "bar_count",
+ v: 17.0,
+ lset: labels.FromStrings("__name__", "bar_count"),
+ ct: int64p(1520430000),
+ }, {
+ m: "bar_sum",
+ v: 324789.3,
+ lset: labels.FromStrings("__name__", "bar_sum"),
+ ct: int64p(1520430000),
+ }, {
+ m: `bar{quantile="0.95"}`,
+ v: 123.7,
+ lset: labels.FromStrings("__name__", "bar", "quantile", "0.95"),
+ ct: int64p(1520430000),
+ }, {
+ m: `bar{quantile="0.99"}`,
+ v: 150.0,
+ lset: labels.FromStrings("__name__", "bar", "quantile", "0.99"),
+ ct: int64p(1520430000),
+ }, {
+ m: "baz",
+ help: "Histogram with the same objective as above's summary",
+ }, {
+ m: "baz",
+ typ: model.MetricTypeHistogram,
+ }, {
+ m: `baz_bucket{le="0.0"}`,
+ v: 0,
+ lset: labels.FromStrings("__name__", "baz_bucket", "le", "0.0"),
+ ct: int64p(1520430000),
+ }, {
+ m: `baz_bucket{le="+Inf"}`,
+ v: 17,
+ lset: labels.FromStrings("__name__", "baz_bucket", "le", "+Inf"),
+ ct: int64p(1520430000),
+ }, {
+ m: `baz_count`,
+ v: 17,
+ lset: labels.FromStrings("__name__", "baz_count"),
+ ct: int64p(1520430000),
+ }, {
+ m: `baz_sum`,
+ v: 324789.3,
+ lset: labels.FromStrings("__name__", "baz_sum"),
+ ct: int64p(1520430000),
+ }, {
+ m: "fizz_created",
+ help: "Gauge which shouldn't be parsed as CT",
+ }, {
+ m: "fizz_created",
+ typ: model.MetricTypeGauge,
+ }, {
+ m: `fizz_created`,
+ v: 17,
+ lset: labels.FromStrings("__name__", "fizz_created"),
}, {
m: "metric",
help: "foo\x00bar",
@@ -235,8 +330,8 @@ foo_total 17.0 1520879607.789 # {id="counter-test"} 5`
},
}
- p := NewOpenMetricsParser([]byte(input), labels.NewSymbolTable())
- checkParseResults(t, p, exp)
+ p := NewOpenMetricsParser([]byte(input), labels.NewSymbolTable(), WithOMParserCTSeriesSkipped())
+ checkParseResultsWithCT(t, p, exp, true)
}
func TestUTF8OpenMetricsParse(t *testing.T) {
@@ -251,6 +346,7 @@ func TestUTF8OpenMetricsParse(t *testing.T) {
# UNIT "go.gc_duration_seconds" seconds
{"go.gc_duration_seconds",quantile="0"} 4.9351e-05
{"go.gc_duration_seconds",quantile="0.25"} 7.424100000000001e-05
+{"go.gc_duration_seconds_created"} 12313
{"go.gc_duration_seconds",quantile="0.5",a="b"} 8.3835e-05
{"http.status",q="0.9",a="b"} 8.3835e-05
{"http.status",q="0.9",a="b"} 8.3835e-05
@@ -274,10 +370,12 @@ func TestUTF8OpenMetricsParse(t *testing.T) {
m: `{"go.gc_duration_seconds",quantile="0"}`,
v: 4.9351e-05,
lset: labels.FromStrings("__name__", "go.gc_duration_seconds", "quantile", "0"),
+ ct: int64p(12313),
}, {
m: `{"go.gc_duration_seconds",quantile="0.25"}`,
v: 7.424100000000001e-05,
lset: labels.FromStrings("__name__", "go.gc_duration_seconds", "quantile", "0.25"),
+ ct: int64p(12313),
}, {
m: `{"go.gc_duration_seconds",quantile="0.5",a="b"}`,
v: 8.3835e-05,
@@ -306,8 +404,8 @@ choices}`, "strange©™\n'quoted' \"name\"", "6"),
},
}
- p := NewOpenMetricsParser([]byte(input), labels.NewSymbolTable())
- checkParseResults(t, p, exp)
+ p := NewOpenMetricsParser([]byte(input), labels.NewSymbolTable(), WithOMParserCTSeriesSkipped())
+ checkParseResultsWithCT(t, p, exp, true)
}
func TestOpenMetricsParseErrors(t *testing.T) {
@@ -598,10 +696,6 @@ func TestOpenMetricsParseErrors(t *testing.T) {
input: "# TYPE hhh histogram\nhhh_bucket{le=\"+Inf\"} 1 # {aa=\"bb\"} 4 -Inf",
err: `invalid exemplar timestamp -Inf`,
},
- {
- input: "# TYPE hhh histogram\nhhh_bucket{le=\"+Inf\"} 1 # {aa=\"bb\"} 4 Inf",
- err: `invalid exemplar timestamp +Inf`,
- },
}
for i, c := range cases {
@@ -684,3 +778,217 @@ func TestOMNullByteHandling(t *testing.T) {
require.Equal(t, c.err, err.Error(), "test %d", i)
}
}
+
+// While not desirable, there are cases were CT fails to parse and
+// these tests show them.
+// TODO(maniktherana): Make sure OM 1.1/2.0 pass CT via metadata or exemplar-like to avoid this.
+func TestCTParseFailures(t *testing.T) {
+ input := `# HELP something Histogram with _created between buckets and summary
+# TYPE something histogram
+something_count 17
+something_sum 324789.3
+something_created 1520430001
+something_bucket{le="0.0"} 0
+something_bucket{le="+Inf"} 17
+# HELP thing Histogram with _created as first line
+# TYPE thing histogram
+thing_created 1520430002
+thing_count 17
+thing_sum 324789.3
+thing_bucket{le="0.0"} 0
+thing_bucket{le="+Inf"} 17
+# HELP yum Summary with _created between sum and quantiles
+# TYPE yum summary
+yum_count 17.0
+yum_sum 324789.3
+yum_created 1520430003
+yum{quantile="0.95"} 123.7
+yum{quantile="0.99"} 150.0
+# HELP foobar Summary with _created as the first line
+# TYPE foobar summary
+foobar_created 1520430004
+foobar_count 17.0
+foobar_sum 324789.3
+foobar{quantile="0.95"} 123.7
+foobar{quantile="0.99"} 150.0`
+
+ input += "\n# EOF\n"
+
+ int64p := func(x int64) *int64 { return &x }
+
+ type expectCT struct {
+ m string
+ ct *int64
+ typ model.MetricType
+ help string
+ isErr bool
+ }
+
+ exp := []expectCT{
+ {
+ m: "something",
+ help: "Histogram with _created between buckets and summary",
+ isErr: false,
+ }, {
+ m: "something",
+ typ: model.MetricTypeHistogram,
+ isErr: false,
+ }, {
+ m: `something_count`,
+ ct: int64p(1520430001),
+ isErr: false,
+ }, {
+ m: `something_sum`,
+ ct: int64p(1520430001),
+ isErr: false,
+ }, {
+ m: `something_bucket{le="0.0"}`,
+ ct: int64p(1520430001),
+ isErr: true,
+ }, {
+ m: `something_bucket{le="+Inf"}`,
+ ct: int64p(1520430001),
+ isErr: true,
+ }, {
+ m: "thing",
+ help: "Histogram with _created as first line",
+ isErr: false,
+ }, {
+ m: "thing",
+ typ: model.MetricTypeHistogram,
+ isErr: false,
+ }, {
+ m: `thing_count`,
+ ct: int64p(1520430002),
+ isErr: true,
+ }, {
+ m: `thing_sum`,
+ ct: int64p(1520430002),
+ isErr: true,
+ }, {
+ m: `thing_bucket{le="0.0"}`,
+ ct: int64p(1520430002),
+ isErr: true,
+ }, {
+ m: `thing_bucket{le="+Inf"}`,
+ ct: int64p(1520430002),
+ isErr: true,
+ }, {
+ m: "yum",
+ help: "Summary with _created between summary and quantiles",
+ isErr: false,
+ }, {
+ m: "yum",
+ typ: model.MetricTypeSummary,
+ isErr: false,
+ }, {
+ m: "yum_count",
+ ct: int64p(1520430003),
+ isErr: false,
+ }, {
+ m: "yum_sum",
+ ct: int64p(1520430003),
+ isErr: false,
+ }, {
+ m: `yum{quantile="0.95"}`,
+ ct: int64p(1520430003),
+ isErr: true,
+ }, {
+ m: `yum{quantile="0.99"}`,
+ ct: int64p(1520430003),
+ isErr: true,
+ }, {
+ m: "foobar",
+ help: "Summary with _created as the first line",
+ isErr: false,
+ }, {
+ m: "foobar",
+ typ: model.MetricTypeSummary,
+ isErr: false,
+ }, {
+ m: "foobar_count",
+ ct: int64p(1520430004),
+ isErr: true,
+ }, {
+ m: "foobar_sum",
+ ct: int64p(1520430004),
+ isErr: true,
+ }, {
+ m: `foobar{quantile="0.95"}`,
+ ct: int64p(1520430004),
+ isErr: true,
+ }, {
+ m: `foobar{quantile="0.99"}`,
+ ct: int64p(1520430004),
+ isErr: true,
+ },
+ }
+
+ p := NewOpenMetricsParser([]byte(input), labels.NewSymbolTable(), WithOMParserCTSeriesSkipped())
+ i := 0
+
+ var res labels.Labels
+ for {
+ et, err := p.Next()
+ if errors.Is(err, io.EOF) {
+ break
+ }
+ require.NoError(t, err)
+
+ switch et {
+ case EntrySeries:
+ p.Metric(&res)
+
+ if ct := p.CreatedTimestamp(); exp[i].isErr {
+ require.Nil(t, ct)
+ } else {
+ require.Equal(t, *exp[i].ct, *ct)
+ }
+ default:
+ i++
+ continue
+ }
+ i++
+ }
+}
+
+func TestDeepCopy(t *testing.T) {
+ input := []byte(`# HELP go_goroutines A gauge goroutines.
+# TYPE go_goroutines gauge
+go_goroutines 33 123.123
+# TYPE go_gc_duration_seconds summary
+go_gc_duration_seconds
+go_gc_duration_seconds_created`)
+
+ st := labels.NewSymbolTable()
+ parser := NewOpenMetricsParser(input, st, WithOMParserCTSeriesSkipped()).(*OpenMetricsParser)
+
+ // Modify the original parser state
+ _, err := parser.Next()
+ require.NoError(t, err)
+ require.Equal(t, "go_goroutines", string(parser.l.b[parser.offsets[0]:parser.offsets[1]]))
+ require.True(t, parser.skipCTSeries)
+
+ // Create a deep copy of the parser
+ copyParser := deepCopy(parser)
+ etype, err := copyParser.Next()
+ require.NoError(t, err)
+ require.Equal(t, EntryType, etype)
+ require.True(t, parser.skipCTSeries)
+ require.False(t, copyParser.skipCTSeries)
+
+ // Modify the original parser further
+ parser.Next()
+ parser.Next()
+ parser.Next()
+ require.Equal(t, "go_gc_duration_seconds", string(parser.l.b[parser.offsets[0]:parser.offsets[1]]))
+ require.Equal(t, "summary", string(parser.mtype))
+ require.False(t, copyParser.skipCTSeries)
+ require.True(t, parser.skipCTSeries)
+
+ // Ensure the copy remains unchanged
+ copyParser.Next()
+ copyParser.Next()
+ require.Equal(t, "go_gc_duration_seconds", string(copyParser.l.b[copyParser.offsets[0]:copyParser.offsets[1]]))
+ require.False(t, copyParser.skipCTSeries)
+}
diff --git a/model/textparse/promparse_test.go b/model/textparse/promparse_test.go
index 66986291d7..7971d23b7e 100644
--- a/model/textparse/promparse_test.go
+++ b/model/textparse/promparse_test.go
@@ -18,6 +18,7 @@ import (
"errors"
"io"
"os"
+ "strings"
"testing"
"github.com/klauspost/compress/gzip"
@@ -41,6 +42,7 @@ type expectedParse struct {
unit string
comment string
e *exemplar.Exemplar
+ ct *int64
}
func TestPromParse(t *testing.T) {
@@ -188,6 +190,10 @@ testmetric{label="\"bar\""} 1`
}
func checkParseResults(t *testing.T, p Parser, exp []expectedParse) {
+ checkParseResultsWithCT(t, p, exp, false)
+}
+
+func checkParseResultsWithCT(t *testing.T, p Parser, exp []expectedParse, ctLinesRemoved bool) {
i := 0
var res labels.Labels
@@ -205,6 +211,14 @@ func checkParseResults(t *testing.T, p Parser, exp []expectedParse) {
p.Metric(&res)
+ if ctLinesRemoved {
+ // Are CT series skipped?
+ _, typ := p.Type()
+ if TypeRequiresCT(typ) && strings.HasSuffix(res.Get(labels.MetricName), "_created") {
+ t.Fatalf("we exped created lines skipped")
+ }
+ }
+
require.Equal(t, exp[i].m, string(m))
require.Equal(t, exp[i].t, ts)
require.Equal(t, exp[i].v, v)
@@ -218,6 +232,11 @@ func checkParseResults(t *testing.T, p Parser, exp []expectedParse) {
require.True(t, found)
testutil.RequireEqual(t, *exp[i].e, e)
}
+ if ct := p.CreatedTimestamp(); ct != nil {
+ require.Equal(t, *exp[i].ct, *ct)
+ } else {
+ require.Nil(t, exp[i].ct)
+ }
case EntryType:
m, typ := p.Type()
@@ -475,8 +494,10 @@ const (
func BenchmarkParse(b *testing.B) {
for parserName, parser := range map[string]func([]byte, *labels.SymbolTable) Parser{
- "prometheus": NewPromParser,
- "openmetrics": NewOpenMetricsParser,
+ "prometheus": NewPromParser,
+ "openmetrics": func(b []byte, st *labels.SymbolTable) Parser {
+ return NewOpenMetricsParser(b, st)
+ },
} {
for _, fn := range []string{"promtestdata.txt", "promtestdata.nometa.txt"} {
f, err := os.Open(fn)
diff --git a/model/textparse/protobufparse.go b/model/textparse/protobufparse.go
index d44e9227da..92ba8a01c7 100644
--- a/model/textparse/protobufparse.go
+++ b/model/textparse/protobufparse.go
@@ -47,7 +47,7 @@ import (
// the re-arrangement work is actually causing problems (which has to be seen),
// that expectation needs to be changed.
type ProtobufParser struct {
- in []byte // The intput to parse.
+ in []byte // The input to parse.
inPos int // Position within the input.
metricPos int // Position within Metric slice.
// fieldPos is the position within a Summary or (legacy) Histogram. -2
@@ -71,7 +71,7 @@ type ProtobufParser struct {
mf *dto.MetricFamily
- // Wether to also parse a classic histogram that is also present as a
+ // Whether to also parse a classic histogram that is also present as a
// native histogram.
parseClassicHistograms bool
@@ -405,6 +405,7 @@ func (p *ProtobufParser) Next() (Entry, error) {
switch p.state {
case EntryInvalid:
p.metricPos = 0
+ p.exemplarPos = 0
p.fieldPos = -2
n, err := readDelimited(p.in[p.inPos:], p.mf)
p.inPos += n
@@ -481,6 +482,7 @@ func (p *ProtobufParser) Next() (Entry, error) {
p.metricPos++
p.fieldPos = -2
p.fieldsDone = false
+ p.exemplarPos = 0
// If this is a metric family containing native
// histograms, we have to switch back to native
// histograms after parsing a classic histogram.
diff --git a/model/textparse/protobufparse_test.go b/model/textparse/protobufparse_test.go
index af0b3e27b0..938c71078f 100644
--- a/model/textparse/protobufparse_test.go
+++ b/model/textparse/protobufparse_test.go
@@ -695,6 +695,70 @@ metric: <
timestamp_ms: 1234568
>
+`,
+
+ `name: "test_histogram_with_native_histogram_exemplars2"
+help: "Another histogram with native histogram exemplars."
+type: HISTOGRAM
+metric: <
+ histogram: <
+ sample_count: 175
+ sample_sum: 0.0008280461746287094
+ bucket: <
+ cumulative_count: 2
+ upper_bound: -0.0004899999999999998
+ >
+ bucket: <
+ cumulative_count: 4
+ upper_bound: -0.0003899999999999998
+ >
+ bucket: <
+ cumulative_count: 16
+ upper_bound: -0.0002899999999999998
+ >
+ schema: 3
+ zero_threshold: 2.938735877055719e-39
+ zero_count: 2
+ negative_span: <
+ offset: -162
+ length: 1
+ >
+ negative_span: <
+ offset: 23
+ length: 4
+ >
+ negative_delta: 1
+ negative_delta: 3
+ negative_delta: -2
+ negative_delta: -1
+ negative_delta: 1
+ positive_span: <
+ offset: -161
+ length: 1
+ >
+ positive_span: <
+ offset: 8
+ length: 3
+ >
+ positive_delta: 1
+ positive_delta: 2
+ positive_delta: -1
+ positive_delta: -1
+ exemplars: <
+ label: <
+ name: "dummyID"
+ value: "59780"
+ >
+ value: -0.00039
+ timestamp: <
+ seconds: 1625851155
+ nanos: 146848499
+ >
+ >
+ >
+ timestamp_ms: 1234568
+>
+
`,
}
@@ -1276,6 +1340,41 @@ func TestProtobufParse(t *testing.T) {
{Labels: labels.FromStrings("dummyID", "59772"), Value: -0.00052, HasTs: true, Ts: 1625851160156},
},
},
+ {
+ m: "test_histogram_with_native_histogram_exemplars2",
+ help: "Another histogram with native histogram exemplars.",
+ },
+ {
+ m: "test_histogram_with_native_histogram_exemplars2",
+ typ: model.MetricTypeHistogram,
+ },
+ {
+ m: "test_histogram_with_native_histogram_exemplars2",
+ t: 1234568,
+ shs: &histogram.Histogram{
+ Count: 175,
+ ZeroCount: 2,
+ Sum: 0.0008280461746287094,
+ ZeroThreshold: 2.938735877055719e-39,
+ Schema: 3,
+ PositiveSpans: []histogram.Span{
+ {Offset: -161, Length: 1},
+ {Offset: 8, Length: 3},
+ },
+ NegativeSpans: []histogram.Span{
+ {Offset: -162, Length: 1},
+ {Offset: 23, Length: 4},
+ },
+ PositiveBuckets: []int64{1, 2, -1, -1},
+ NegativeBuckets: []int64{1, 3, -2, -1, 1},
+ },
+ lset: labels.FromStrings(
+ "__name__", "test_histogram_with_native_histogram_exemplars2",
+ ),
+ e: []exemplar.Exemplar{
+ {Labels: labels.FromStrings("dummyID", "59780"), Value: -0.00039, HasTs: true, Ts: 1625851155146},
+ },
+ },
},
},
{
@@ -1995,15 +2094,15 @@ func TestProtobufParse(t *testing.T) {
"__name__", "without_quantiles_sum",
),
},
- { // 78
+ { // 81
m: "empty_histogram",
help: "A histogram without observations and with a zero threshold of zero but with a no-op span to identify it as a native histogram.",
},
- { // 79
+ { // 82
m: "empty_histogram",
typ: model.MetricTypeHistogram,
},
- { // 80
+ { // 83
m: "empty_histogram",
shs: &histogram.Histogram{
CounterResetHint: histogram.UnknownCounterReset,
@@ -2014,15 +2113,15 @@ func TestProtobufParse(t *testing.T) {
"__name__", "empty_histogram",
),
},
- { // 81
+ { // 84
m: "test_counter_with_createdtimestamp",
help: "A counter with a created timestamp.",
},
- { // 82
+ { // 85
m: "test_counter_with_createdtimestamp",
typ: model.MetricTypeCounter,
},
- { // 83
+ { // 86
m: "test_counter_with_createdtimestamp",
v: 42,
ct: 1000,
@@ -2030,15 +2129,15 @@ func TestProtobufParse(t *testing.T) {
"__name__", "test_counter_with_createdtimestamp",
),
},
- { // 84
+ { // 87
m: "test_summary_with_createdtimestamp",
help: "A summary with a created timestamp.",
},
- { // 85
+ { // 88
m: "test_summary_with_createdtimestamp",
typ: model.MetricTypeSummary,
},
- { // 86
+ { // 89
m: "test_summary_with_createdtimestamp_count",
v: 42,
ct: 1000,
@@ -2046,7 +2145,7 @@ func TestProtobufParse(t *testing.T) {
"__name__", "test_summary_with_createdtimestamp_count",
),
},
- { // 87
+ { // 90
m: "test_summary_with_createdtimestamp_sum",
v: 1.234,
ct: 1000,
@@ -2054,15 +2153,15 @@ func TestProtobufParse(t *testing.T) {
"__name__", "test_summary_with_createdtimestamp_sum",
),
},
- { // 88
+ { // 91
m: "test_histogram_with_createdtimestamp",
help: "A histogram with a created timestamp.",
},
- { // 89
+ { // 92
m: "test_histogram_with_createdtimestamp",
typ: model.MetricTypeHistogram,
},
- { // 90
+ { // 93
m: "test_histogram_with_createdtimestamp",
ct: 1000,
shs: &histogram.Histogram{
@@ -2074,15 +2173,15 @@ func TestProtobufParse(t *testing.T) {
"__name__", "test_histogram_with_createdtimestamp",
),
},
- { // 91
+ { // 94
m: "test_gaugehistogram_with_createdtimestamp",
help: "A gauge histogram with a created timestamp.",
},
- { // 92
+ { // 95
m: "test_gaugehistogram_with_createdtimestamp",
typ: model.MetricTypeGaugeHistogram,
},
- { // 93
+ { // 96
m: "test_gaugehistogram_with_createdtimestamp",
ct: 1000,
shs: &histogram.Histogram{
@@ -2094,15 +2193,15 @@ func TestProtobufParse(t *testing.T) {
"__name__", "test_gaugehistogram_with_createdtimestamp",
),
},
- { // 94
+ { // 97
m: "test_histogram_with_native_histogram_exemplars",
help: "A histogram with native histogram exemplars.",
},
- { // 95
+ { // 98
m: "test_histogram_with_native_histogram_exemplars",
typ: model.MetricTypeHistogram,
},
- { // 96
+ { // 99
m: "test_histogram_with_native_histogram_exemplars",
t: 1234568,
shs: &histogram.Histogram{
@@ -2130,7 +2229,7 @@ func TestProtobufParse(t *testing.T) {
{Labels: labels.FromStrings("dummyID", "59772"), Value: -0.00052, HasTs: true, Ts: 1625851160156},
},
},
- { // 97
+ { // 100
m: "test_histogram_with_native_histogram_exemplars_count",
t: 1234568,
v: 175,
@@ -2138,7 +2237,7 @@ func TestProtobufParse(t *testing.T) {
"__name__", "test_histogram_with_native_histogram_exemplars_count",
),
},
- { // 98
+ { // 101
m: "test_histogram_with_native_histogram_exemplars_sum",
t: 1234568,
v: 0.0008280461746287094,
@@ -2146,7 +2245,7 @@ func TestProtobufParse(t *testing.T) {
"__name__", "test_histogram_with_native_histogram_exemplars_sum",
),
},
- { // 99
+ { // 102
m: "test_histogram_with_native_histogram_exemplars_bucket\xffle\xff-0.0004899999999999998",
t: 1234568,
v: 2,
@@ -2155,7 +2254,7 @@ func TestProtobufParse(t *testing.T) {
"le", "-0.0004899999999999998",
),
},
- { // 100
+ { // 103
m: "test_histogram_with_native_histogram_exemplars_bucket\xffle\xff-0.0003899999999999998",
t: 1234568,
v: 4,
@@ -2167,7 +2266,7 @@ func TestProtobufParse(t *testing.T) {
{Labels: labels.FromStrings("dummyID", "59727"), Value: -0.00039, HasTs: true, Ts: 1625851155146},
},
},
- { // 101
+ { // 104
m: "test_histogram_with_native_histogram_exemplars_bucket\xffle\xff-0.0002899999999999998",
t: 1234568,
v: 16,
@@ -2179,7 +2278,7 @@ func TestProtobufParse(t *testing.T) {
{Labels: labels.FromStrings("dummyID", "5617"), Value: -0.00029, HasTs: false},
},
},
- { // 102
+ { // 105
m: "test_histogram_with_native_histogram_exemplars_bucket\xffle\xff+Inf",
t: 1234568,
v: 175,
@@ -2188,6 +2287,93 @@ func TestProtobufParse(t *testing.T) {
"le", "+Inf",
),
},
+ { // 106
+ m: "test_histogram_with_native_histogram_exemplars2",
+ help: "Another histogram with native histogram exemplars.",
+ },
+ { // 107
+ m: "test_histogram_with_native_histogram_exemplars2",
+ typ: model.MetricTypeHistogram,
+ },
+ { // 108
+ m: "test_histogram_with_native_histogram_exemplars2",
+ t: 1234568,
+ shs: &histogram.Histogram{
+ Count: 175,
+ ZeroCount: 2,
+ Sum: 0.0008280461746287094,
+ ZeroThreshold: 2.938735877055719e-39,
+ Schema: 3,
+ PositiveSpans: []histogram.Span{
+ {Offset: -161, Length: 1},
+ {Offset: 8, Length: 3},
+ },
+ NegativeSpans: []histogram.Span{
+ {Offset: -162, Length: 1},
+ {Offset: 23, Length: 4},
+ },
+ PositiveBuckets: []int64{1, 2, -1, -1},
+ NegativeBuckets: []int64{1, 3, -2, -1, 1},
+ },
+ lset: labels.FromStrings(
+ "__name__", "test_histogram_with_native_histogram_exemplars2",
+ ),
+ e: []exemplar.Exemplar{
+ {Labels: labels.FromStrings("dummyID", "59780"), Value: -0.00039, HasTs: true, Ts: 1625851155146},
+ },
+ },
+ { // 109
+ m: "test_histogram_with_native_histogram_exemplars2_count",
+ t: 1234568,
+ v: 175,
+ lset: labels.FromStrings(
+ "__name__", "test_histogram_with_native_histogram_exemplars2_count",
+ ),
+ },
+ { // 110
+ m: "test_histogram_with_native_histogram_exemplars2_sum",
+ t: 1234568,
+ v: 0.0008280461746287094,
+ lset: labels.FromStrings(
+ "__name__", "test_histogram_with_native_histogram_exemplars2_sum",
+ ),
+ },
+ { // 111
+ m: "test_histogram_with_native_histogram_exemplars2_bucket\xffle\xff-0.0004899999999999998",
+ t: 1234568,
+ v: 2,
+ lset: labels.FromStrings(
+ "__name__", "test_histogram_with_native_histogram_exemplars2_bucket",
+ "le", "-0.0004899999999999998",
+ ),
+ },
+ { // 112
+ m: "test_histogram_with_native_histogram_exemplars2_bucket\xffle\xff-0.0003899999999999998",
+ t: 1234568,
+ v: 4,
+ lset: labels.FromStrings(
+ "__name__", "test_histogram_with_native_histogram_exemplars2_bucket",
+ "le", "-0.0003899999999999998",
+ ),
+ },
+ { // 113
+ m: "test_histogram_with_native_histogram_exemplars2_bucket\xffle\xff-0.0002899999999999998",
+ t: 1234568,
+ v: 16,
+ lset: labels.FromStrings(
+ "__name__", "test_histogram_with_native_histogram_exemplars2_bucket",
+ "le", "-0.0002899999999999998",
+ ),
+ },
+ { // 114
+ m: "test_histogram_with_native_histogram_exemplars2_bucket\xffle\xff+Inf",
+ t: 1234568,
+ v: 175,
+ lset: labels.FromStrings(
+ "__name__", "test_histogram_with_native_histogram_exemplars2_bucket",
+ "le", "+Inf",
+ ),
+ },
},
},
}
diff --git a/notifier/notifier.go b/notifier/notifier.go
index 68b0d4961e..218e4cb8c7 100644
--- a/notifier/notifier.go
+++ b/notifier/notifier.go
@@ -674,7 +674,6 @@ func (n *Manager) sendOne(ctx context.Context, c *http.Client, url string, b []b
}()
// Any HTTP status 2xx is OK.
- //nolint:usestdlibvars
if resp.StatusCode/100 != 2 {
return fmt.Errorf("bad response status %s", resp.Status)
}
diff --git a/notifier/notifier_test.go b/notifier/notifier_test.go
index 2cdaa9e06d..cf922a537c 100644
--- a/notifier/notifier_test.go
+++ b/notifier/notifier_test.go
@@ -711,7 +711,7 @@ func TestHangingNotifier(t *testing.T) {
)
var (
- sendTimeout = 10 * time.Millisecond
+ sendTimeout = 100 * time.Millisecond
sdUpdatert = sendTimeout / 2
done = make(chan struct{})
diff --git a/prompb/io/prometheus/write/v2/types.pb.go b/prompb/io/prometheus/write/v2/types.pb.go
index 54eaf2f12c..18f309ba0a 100644
--- a/prompb/io/prometheus/write/v2/types.pb.go
+++ b/prompb/io/prometheus/write/v2/types.pb.go
@@ -359,15 +359,10 @@ type Exemplar struct {
// value represents an exact example value. This can be useful when the exemplar
// is attached to a histogram, which only gives an estimated value through buckets.
Value float64 `protobuf:"fixed64,2,opt,name=value,proto3" json:"value,omitempty"`
- // timestamp represents an optional timestamp of the sample in ms.
+ // timestamp represents the timestamp of the exemplar in ms.
//
// For Go, see github.com/prometheus/prometheus/model/timestamp/timestamp.go
// for conversion from/to time.Time to Prometheus timestamp.
- //
- // Note that the "optional" keyword is omitted due to
- // https://cloud.google.com/apis/design/design_patterns.md#optional_primitive_fields
- // Zero value means value not set. If you need to use exactly zero value for
- // the timestamp, use 1 millisecond before or after.
Timestamp int64 `protobuf:"varint,3,opt,name=timestamp,proto3" json:"timestamp,omitempty"`
}
diff --git a/prompb/io/prometheus/write/v2/types.proto b/prompb/io/prometheus/write/v2/types.proto
index 44bc3f06b0..18e4a0c575 100644
--- a/prompb/io/prometheus/write/v2/types.proto
+++ b/prompb/io/prometheus/write/v2/types.proto
@@ -105,15 +105,10 @@ message Exemplar {
// value represents an exact example value. This can be useful when the exemplar
// is attached to a histogram, which only gives an estimated value through buckets.
double value = 2;
- // timestamp represents an optional timestamp of the sample in ms.
+ // timestamp represents the timestamp of the exemplar in ms.
//
// For Go, see github.com/prometheus/prometheus/model/timestamp/timestamp.go
// for conversion from/to time.Time to Prometheus timestamp.
- //
- // Note that the "optional" keyword is omitted due to
- // https://cloud.google.com/apis/design/design_patterns.md#optional_primitive_fields
- // Zero value means value not set. If you need to use exactly zero value for
- // the timestamp, use 1 millisecond before or after.
int64 timestamp = 3;
}
diff --git a/promql/bench_test.go b/promql/bench_test.go
index bd67280294..33523b2dbc 100644
--- a/promql/bench_test.go
+++ b/promql/bench_test.go
@@ -165,6 +165,9 @@ func rangeQueryCases() []benchCase {
{
expr: "sum(a_X)",
},
+ {
+ expr: "avg(a_X)",
+ },
{
expr: "sum without (l)(h_X)",
},
diff --git a/promql/engine.go b/promql/engine.go
index 25e67db633..b54ce2d6dc 100644
--- a/promql/engine.go
+++ b/promql/engine.go
@@ -313,6 +313,11 @@ type EngineOpts struct {
// EnablePerStepStats if true allows for per-step stats to be computed on request. Disabled otherwise.
EnablePerStepStats bool
+
+ // EnableDelayedNameRemoval delays the removal of the __name__ label to the last step of the query evaluation.
+ // This is useful in certain scenarios where the __name__ label must be preserved or where applying a
+ // regex-matcher to the __name__ label may otherwise lead to duplicate labelset errors.
+ EnableDelayedNameRemoval bool
}
// Engine handles the lifetime of queries from beginning to end.
@@ -330,6 +335,7 @@ type Engine struct {
enableAtModifier bool
enableNegativeOffset bool
enablePerStepStats bool
+ enableDelayedNameRemoval bool
}
// NewEngine returns a new engine.
@@ -420,6 +426,7 @@ func NewEngine(opts EngineOpts) *Engine {
enableAtModifier: opts.EnableAtModifier,
enableNegativeOffset: opts.EnableNegativeOffset,
enablePerStepStats: opts.EnablePerStepStats,
+ enableDelayedNameRemoval: opts.EnableDelayedNameRemoval,
}
}
@@ -573,7 +580,7 @@ func (ng *Engine) validateOpts(expr parser.Expr) error {
return validationErr
}
-// NewTestQuery: inject special behaviour into Query for testing.
+// NewTestQuery injects special behaviour into Query for testing.
func (ng *Engine) NewTestQuery(f func(context.Context) error) Query {
qry := &query{
q: "test statement",
@@ -712,6 +719,7 @@ func (ng *Engine) execEvalStmt(ctx context.Context, query *query, s *parser.Eval
lookbackDelta: s.LookbackDelta,
samplesStats: query.sampleStats,
noStepSubqueryIntervalFn: ng.noStepSubqueryIntervalFn,
+ enableDelayedNameRemoval: ng.enableDelayedNameRemoval,
}
query.sampleStats.InitStepTracking(start, start, 1)
@@ -743,9 +751,9 @@ func (ng *Engine) execEvalStmt(ctx context.Context, query *query, s *parser.Eval
// Point might have a different timestamp, force it to the evaluation
// timestamp as that is when we ran the evaluation.
if len(s.Histograms) > 0 {
- vector[i] = Sample{Metric: s.Metric, H: s.Histograms[0].H, T: start}
+ vector[i] = Sample{Metric: s.Metric, H: s.Histograms[0].H, T: start, DropName: s.DropName}
} else {
- vector[i] = Sample{Metric: s.Metric, F: s.Floats[0].F, T: start}
+ vector[i] = Sample{Metric: s.Metric, F: s.Floats[0].F, T: start, DropName: s.DropName}
}
}
return vector, warnings, nil
@@ -770,6 +778,7 @@ func (ng *Engine) execEvalStmt(ctx context.Context, query *query, s *parser.Eval
lookbackDelta: s.LookbackDelta,
samplesStats: query.sampleStats,
noStepSubqueryIntervalFn: ng.noStepSubqueryIntervalFn,
+ enableDelayedNameRemoval: ng.enableDelayedNameRemoval,
}
query.sampleStats.InitStepTracking(evaluator.startTimestamp, evaluator.endTimestamp, evaluator.interval)
val, warnings, err := evaluator.Eval(s.Expr)
@@ -1032,6 +1041,7 @@ type evaluator struct {
lookbackDelta time.Duration
samplesStats *stats.QuerySamples
noStepSubqueryIntervalFn func(rangeMillis int64) int64
+ enableDelayedNameRemoval bool
}
// errorf causes a panic with the input formatted into an error.
@@ -1057,7 +1067,7 @@ func (ev *evaluator) recover(expr parser.Expr, ws *annotations.Annotations, errp
buf := make([]byte, 64<<10)
buf = buf[:runtime.Stack(buf, false)]
- level.Error(ev.logger).Log("msg", "runtime panic in parser", "expr", expr.String(), "err", e, "stacktrace", string(buf))
+ level.Error(ev.logger).Log("msg", "runtime panic during query evaluation", "expr", expr.String(), "err", e, "stacktrace", string(buf))
*errp = fmt.Errorf("unexpected error: %w", err)
case errWithWarnings:
*errp = err.err
@@ -1073,6 +1083,9 @@ func (ev *evaluator) Eval(expr parser.Expr) (v parser.Value, ws annotations.Anno
defer ev.recover(expr, &ws, &err)
v, ws = ev.eval(expr)
+ if ev.enableDelayedNameRemoval {
+ ev.cleanupMetricLabels(v)
+ }
return v, ws, nil
}
@@ -1101,6 +1114,9 @@ type EvalNodeHelper struct {
rightSigs map[string]Sample
matchedSigs map[string]map[uint64]struct{}
resultMetric map[string]labels.Labels
+
+ // Additional options for the evaluation.
+ enableDelayedNameRemoval bool
}
func (enh *EvalNodeHelper) resetBuilder(lbls labels.Labels) {
@@ -1150,7 +1166,7 @@ func (ev *evaluator) rangeEval(prepSeries func(labels.Labels, *EvalSeriesHelper)
biggestLen = len(matrixes[i])
}
}
- enh := &EvalNodeHelper{Out: make(Vector, 0, biggestLen)}
+ enh := &EvalNodeHelper{Out: make(Vector, 0, biggestLen), enableDelayedNameRemoval: ev.enableDelayedNameRemoval}
type seriesAndTimestamp struct {
Series
ts int64
@@ -1196,12 +1212,12 @@ func (ev *evaluator) rangeEval(prepSeries func(labels.Labels, *EvalSeriesHelper)
for si, series := range matrixes[i] {
switch {
case len(series.Floats) > 0 && series.Floats[0].T == ts:
- vectors[i] = append(vectors[i], Sample{Metric: series.Metric, F: series.Floats[0].F, T: ts})
+ vectors[i] = append(vectors[i], Sample{Metric: series.Metric, F: series.Floats[0].F, T: ts, DropName: series.DropName})
// Move input vectors forward so we don't have to re-scan the same
// past points at the next step.
matrixes[i][si].Floats = series.Floats[1:]
case len(series.Histograms) > 0 && series.Histograms[0].T == ts:
- vectors[i] = append(vectors[i], Sample{Metric: series.Metric, H: series.Histograms[0].H, T: ts})
+ vectors[i] = append(vectors[i], Sample{Metric: series.Metric, H: series.Histograms[0].H, T: ts, DropName: series.DropName})
matrixes[i][si].Histograms = series.Histograms[1:]
default:
continue
@@ -1240,15 +1256,15 @@ func (ev *evaluator) rangeEval(prepSeries func(labels.Labels, *EvalSeriesHelper)
// If this could be an instant query, shortcut so as not to change sort order.
if ev.endTimestamp == ev.startTimestamp {
- if result.ContainsSameLabelset() {
+ if !ev.enableDelayedNameRemoval && result.ContainsSameLabelset() {
ev.errorf("vector cannot contain metrics with the same labelset")
}
mat := make(Matrix, len(result))
for i, s := range result {
if s.H == nil {
- mat[i] = Series{Metric: s.Metric, Floats: []FPoint{{T: ts, F: s.F}}}
+ mat[i] = Series{Metric: s.Metric, Floats: []FPoint{{T: ts, F: s.F}}, DropName: s.DropName}
} else {
- mat[i] = Series{Metric: s.Metric, Histograms: []HPoint{{T: ts, H: s.H}}}
+ mat[i] = Series{Metric: s.Metric, Histograms: []HPoint{{T: ts, H: s.H}}, DropName: s.DropName}
}
}
ev.currentSamples = originalNumSamples + mat.TotalSamples()
@@ -1266,7 +1282,7 @@ func (ev *evaluator) rangeEval(prepSeries func(labels.Labels, *EvalSeriesHelper)
}
ss.ts = ts
} else {
- ss = seriesAndTimestamp{Series{Metric: sample.Metric}, ts}
+ ss = seriesAndTimestamp{Series{Metric: sample.Metric, DropName: sample.DropName}, ts}
}
addToSeries(&ss.Series, enh.Ts, sample.F, sample.H, numSteps)
seriess[h] = ss
@@ -1302,7 +1318,7 @@ func (ev *evaluator) rangeEvalAgg(aggExpr *parser.AggregateExpr, sortedGrouping
var warnings annotations.Annotations
- enh := &EvalNodeHelper{}
+ enh := &EvalNodeHelper{enableDelayedNameRemoval: ev.enableDelayedNameRemoval}
tempNumSamples := ev.currentSamples
// Create a mapping from input series to output groups.
@@ -1611,10 +1627,17 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, annotations.Annotatio
var prevSS *Series
inMatrix := make(Matrix, 1)
inArgs[matrixArgIndex] = inMatrix
- enh := &EvalNodeHelper{Out: make(Vector, 0, 1)}
+ enh := &EvalNodeHelper{Out: make(Vector, 0, 1), enableDelayedNameRemoval: ev.enableDelayedNameRemoval}
// Process all the calls for one time series at a time.
it := storage.NewBuffer(selRange)
var chkIter chunkenc.Iterator
+
+ // The last_over_time function acts like offset; thus, it
+ // should keep the metric name. For all the other range
+ // vector functions, the only change needed is to drop the
+ // metric name in the output.
+ dropName := e.Func.Name != "last_over_time"
+
for i, s := range selVS.Series {
if err := contextDone(ev.ctx, "expression evaluation"); err != nil {
ev.error(err)
@@ -1629,15 +1652,12 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, annotations.Annotatio
chkIter = s.Iterator(chkIter)
it.Reset(chkIter)
metric := selVS.Series[i].Labels()
- // The last_over_time function acts like offset; thus, it
- // should keep the metric name. For all the other range
- // vector functions, the only change needed is to drop the
- // metric name in the output.
- if e.Func.Name != "last_over_time" {
+ if !ev.enableDelayedNameRemoval && dropName {
metric = metric.DropMetricName()
}
ss := Series{
- Metric: metric,
+ Metric: metric,
+ DropName: dropName,
}
inMatrix[0].Metric = selVS.Series[i].Labels()
for ts, step := ev.startTimestamp, -1; ts <= ev.endTimestamp; ts += ev.interval {
@@ -1752,16 +1772,16 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, annotations.Annotatio
return Matrix{
Series{
- Metric: createLabelsForAbsentFunction(e.Args[0]),
- Floats: newp,
+ Metric: createLabelsForAbsentFunction(e.Args[0]),
+ Floats: newp,
+ DropName: dropName,
},
}, warnings
}
- if mat.ContainsSameLabelset() {
+ if !ev.enableDelayedNameRemoval && mat.ContainsSameLabelset() {
ev.errorf("vector cannot contain metrics with the same labelset")
}
-
return mat, warnings
case *parser.ParenExpr:
@@ -1772,12 +1792,15 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, annotations.Annotatio
mat := val.(Matrix)
if e.Op == parser.SUB {
for i := range mat {
- mat[i].Metric = mat[i].Metric.DropMetricName()
+ if !ev.enableDelayedNameRemoval {
+ mat[i].Metric = mat[i].Metric.DropMetricName()
+ }
+ mat[i].DropName = true
for j := range mat[i].Floats {
mat[i].Floats[j].F = -mat[i].Floats[j].F
}
}
- if mat.ContainsSameLabelset() {
+ if !ev.enableDelayedNameRemoval && mat.ContainsSameLabelset() {
ev.errorf("vector cannot contain metrics with the same labelset")
}
}
@@ -1913,6 +1936,7 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, annotations.Annotatio
lookbackDelta: ev.lookbackDelta,
samplesStats: ev.samplesStats.NewChild(),
noStepSubqueryIntervalFn: ev.noStepSubqueryIntervalFn,
+ enableDelayedNameRemoval: ev.enableDelayedNameRemoval,
}
if e.Step != 0 {
@@ -1957,6 +1981,7 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, annotations.Annotatio
lookbackDelta: ev.lookbackDelta,
samplesStats: ev.samplesStats.NewChild(),
noStepSubqueryIntervalFn: ev.noStepSubqueryIntervalFn,
+ enableDelayedNameRemoval: ev.enableDelayedNameRemoval,
}
res, ws := newEv.eval(e.Expr)
ev.currentSamples = newEv.currentSamples
@@ -2356,6 +2381,11 @@ loop:
} else {
histograms = append(histograms, HPoint{H: &histogram.FloatHistogram{}})
}
+ if histograms[n].H == nil {
+ // Make sure to pass non-nil H to AtFloatHistogram so that it does a deep-copy.
+ // Not an issue in the loop above since that uses an intermediate buffer.
+ histograms[n].H = &histogram.FloatHistogram{}
+ }
histograms[n].T, histograms[n].H = it.AtFloatHistogram(histograms[n].H)
if value.IsStaleNaN(histograms[n].H.Sum) {
histograms = histograms[:n]
@@ -2548,7 +2578,7 @@ func (ev *evaluator) VectorBinop(op parser.ItemType, lhs, rhs Vector, matching *
continue
}
metric := resultMetric(ls.Metric, rs.Metric, op, matching, enh)
- if returnBool {
+ if !ev.enableDelayedNameRemoval && returnBool {
metric = metric.DropMetricName()
}
insertedSigs, exists := matchedSigs[sig]
@@ -2573,9 +2603,10 @@ func (ev *evaluator) VectorBinop(op parser.ItemType, lhs, rhs Vector, matching *
}
enh.Out = append(enh.Out, Sample{
- Metric: metric,
- F: floatValue,
- H: histogramValue,
+ Metric: metric,
+ F: floatValue,
+ H: histogramValue,
+ DropName: returnBool,
})
}
return enh.Out, lastErr
@@ -2675,7 +2706,10 @@ func (ev *evaluator) VectorscalarBinop(op parser.ItemType, lhs Vector, rhs Scala
lhsSample.F = float
lhsSample.H = histogram
if shouldDropMetricName(op) || returnBool {
- lhsSample.Metric = lhsSample.Metric.DropMetricName()
+ if !ev.enableDelayedNameRemoval {
+ lhsSample.Metric = lhsSample.Metric.DropMetricName()
+ }
+ lhsSample.DropName = true
}
enh.Out = append(enh.Out, lhsSample)
}
@@ -2773,15 +2807,20 @@ func vectorElemBinop(op parser.ItemType, lhs, rhs float64, hlhs, hrhs *histogram
}
type groupedAggregation struct {
- seen bool // Was this output groups seen in the input at this timestamp.
- hasFloat bool // Has at least 1 float64 sample aggregated.
- hasHistogram bool // Has at least 1 histogram sample aggregated.
- floatValue float64
- histogramValue *histogram.FloatHistogram
- floatMean float64 // Mean, or "compensating value" for Kahan summation.
- groupCount int
- groupAggrComplete bool // Used by LIMITK to short-cut series loop when we've reached K elem on every group
- heap vectorByValueHeap
+ floatValue float64
+ histogramValue *histogram.FloatHistogram
+ floatMean float64
+ floatKahanC float64 // "Compensating value" for Kahan summation.
+ groupCount float64
+ heap vectorByValueHeap
+
+ // All bools together for better packing within the struct.
+ seen bool // Was this output groups seen in the input at this timestamp.
+ hasFloat bool // Has at least 1 float64 sample aggregated.
+ hasHistogram bool // Has at least 1 histogram sample aggregated.
+ incompatibleHistograms bool // If true, group has seen mixed exponential and custom buckets, or incompatible custom buckets.
+ groupAggrComplete bool // Used by LIMITK to short-cut series loop when we've reached K elem on every group.
+ incrementalMean bool // True after reverting to incremental calculation of the mean value.
}
// aggregation evaluates sum, avg, count, stdvar, stddev or quantile at one timestep on inputMatrix.
@@ -2805,15 +2844,14 @@ func (ev *evaluator) aggregation(e *parser.AggregateExpr, q float64, inputMatrix
// Initialize this group if it's the first time we've seen it.
if !group.seen {
*group = groupedAggregation{
- seen: true,
- floatValue: f,
- groupCount: 1,
+ seen: true,
+ floatValue: f,
+ floatMean: f,
+ incompatibleHistograms: false,
+ groupCount: 1,
}
switch op {
- case parser.AVG:
- group.floatMean = f
- fallthrough
- case parser.SUM:
+ case parser.AVG, parser.SUM:
if h == nil {
group.hasFloat = true
} else {
@@ -2821,7 +2859,6 @@ func (ev *evaluator) aggregation(e *parser.AggregateExpr, q float64, inputMatrix
group.hasHistogram = true
}
case parser.STDVAR, parser.STDDEV:
- group.floatMean = f
group.floatValue = 0
case parser.QUANTILE:
group.heap = make(vectorByValueHeap, 1)
@@ -2832,6 +2869,10 @@ func (ev *evaluator) aggregation(e *parser.AggregateExpr, q float64, inputMatrix
continue
}
+ if group.incompatibleHistograms {
+ continue
+ }
+
switch op {
case parser.SUM:
if h != nil {
@@ -2840,6 +2881,7 @@ func (ev *evaluator) aggregation(e *parser.AggregateExpr, q float64, inputMatrix
_, err := group.histogramValue.Add(h)
if err != nil {
handleAggregationError(err, e, inputMatrix[si].Metric.Get(model.MetricNameLabel), &annos)
+ group.incompatibleHistograms = true
}
}
// Otherwise the aggregation contained floats
@@ -2847,7 +2889,7 @@ func (ev *evaluator) aggregation(e *parser.AggregateExpr, q float64, inputMatrix
// point in copying the histogram in that case.
} else {
group.hasFloat = true
- group.floatValue, group.floatMean = kahanSumInc(f, group.floatValue, group.floatMean)
+ group.floatValue, group.floatKahanC = kahanSumInc(f, group.floatValue, group.floatKahanC)
}
case parser.AVG:
@@ -2855,15 +2897,19 @@ func (ev *evaluator) aggregation(e *parser.AggregateExpr, q float64, inputMatrix
if h != nil {
group.hasHistogram = true
if group.histogramValue != nil {
- left := h.Copy().Div(float64(group.groupCount))
- right := group.histogramValue.Copy().Div(float64(group.groupCount))
+ left := h.Copy().Div(group.groupCount)
+ right := group.histogramValue.Copy().Div(group.groupCount)
toAdd, err := left.Sub(right)
if err != nil {
handleAggregationError(err, e, inputMatrix[si].Metric.Get(model.MetricNameLabel), &annos)
+ group.incompatibleHistograms = true
+ continue
}
_, err = group.histogramValue.Add(toAdd)
if err != nil {
handleAggregationError(err, e, inputMatrix[si].Metric.Get(model.MetricNameLabel), &annos)
+ group.incompatibleHistograms = true
+ continue
}
}
// Otherwise the aggregation contained floats
@@ -2871,6 +2917,22 @@ func (ev *evaluator) aggregation(e *parser.AggregateExpr, q float64, inputMatrix
// point in copying the histogram in that case.
} else {
group.hasFloat = true
+ if !group.incrementalMean {
+ newV, newC := kahanSumInc(f, group.floatValue, group.floatKahanC)
+ if !math.IsInf(newV, 0) {
+ // The sum doesn't overflow, so we propagate it to the
+ // group struct and continue with the regular
+ // calculation of the mean value.
+ group.floatValue, group.floatKahanC = newV, newC
+ break
+ }
+ // If we are here, we know that the sum _would_ overflow. So
+ // instead of continue to sum up, we revert to incremental
+ // calculation of the mean value from here on.
+ group.incrementalMean = true
+ group.floatMean = group.floatValue / (group.groupCount - 1)
+ group.floatKahanC /= group.groupCount - 1
+ }
if math.IsInf(group.floatMean, 0) {
if math.IsInf(f, 0) && (group.floatMean > 0) == (f > 0) {
// The `floatMean` and `s.F` values are `Inf` of the same sign. They
@@ -2888,8 +2950,13 @@ func (ev *evaluator) aggregation(e *parser.AggregateExpr, q float64, inputMatrix
break
}
}
- // Divide each side of the `-` by `group.groupCount` to avoid float64 overflows.
- group.floatMean += f/float64(group.groupCount) - group.floatMean/float64(group.groupCount)
+ currentMean := group.floatMean + group.floatKahanC
+ group.floatMean, group.floatKahanC = kahanSumInc(
+ // Divide each side of the `-` by `group.groupCount` to avoid float64 overflows.
+ f/group.groupCount-currentMean/group.groupCount,
+ group.floatMean,
+ group.floatKahanC,
+ )
}
case parser.GROUP:
@@ -2912,7 +2979,7 @@ func (ev *evaluator) aggregation(e *parser.AggregateExpr, q float64, inputMatrix
if h == nil { // Ignore native histograms.
group.groupCount++
delta := f - group.floatMean
- group.floatMean += delta / float64(group.groupCount)
+ group.floatMean += delta / group.groupCount
group.floatValue += delta * (f - group.floatMean)
}
@@ -2938,20 +3005,25 @@ func (ev *evaluator) aggregation(e *parser.AggregateExpr, q float64, inputMatrix
annos.Add(annotations.NewMixedFloatsHistogramsAggWarning(e.Expr.PositionRange()))
continue
}
- if aggr.hasHistogram {
+ switch {
+ case aggr.incompatibleHistograms:
+ continue
+ case aggr.hasHistogram:
aggr.histogramValue = aggr.histogramValue.Compact(0)
- } else {
- aggr.floatValue = aggr.floatMean
+ case aggr.incrementalMean:
+ aggr.floatValue = aggr.floatMean + aggr.floatKahanC
+ default:
+ aggr.floatValue = (aggr.floatValue + aggr.floatKahanC) / aggr.groupCount
}
case parser.COUNT:
- aggr.floatValue = float64(aggr.groupCount)
+ aggr.floatValue = aggr.groupCount
case parser.STDVAR:
- aggr.floatValue /= float64(aggr.groupCount)
+ aggr.floatValue /= aggr.groupCount
case parser.STDDEV:
- aggr.floatValue = math.Sqrt(aggr.floatValue / float64(aggr.groupCount))
+ aggr.floatValue = math.Sqrt(aggr.floatValue / aggr.groupCount)
case parser.QUANTILE:
aggr.floatValue = quantile(q, aggr.heap)
@@ -2962,10 +3034,13 @@ func (ev *evaluator) aggregation(e *parser.AggregateExpr, q float64, inputMatrix
annos.Add(annotations.NewMixedFloatsHistogramsAggWarning(e.Expr.PositionRange()))
continue
}
- if aggr.hasHistogram {
+ switch {
+ case aggr.incompatibleHistograms:
+ continue
+ case aggr.hasHistogram:
aggr.histogramValue.Compact(0)
- } else {
- aggr.floatValue += aggr.floatMean // Add Kahan summation compensating term.
+ default:
+ aggr.floatValue += aggr.floatKahanC
}
default:
// For other aggregations, we already have the right value.
@@ -2973,6 +3048,7 @@ func (ev *evaluator) aggregation(e *parser.AggregateExpr, q float64, inputMatrix
ss := &outputMatrix[ri]
addToSeries(ss, enh.Ts, aggr.floatValue, aggr.histogramValue, numSteps)
+ ss.DropName = inputMatrix[ri].DropName
}
return annos
@@ -2999,7 +3075,7 @@ seriesLoop:
if !ok {
continue
}
- s = Sample{Metric: inputMatrix[si].Metric, F: f}
+ s = Sample{Metric: inputMatrix[si].Metric, F: f, DropName: inputMatrix[si].DropName}
group := &groups[seriesToResult[si]]
// Initialize this group if it's the first time we've seen it.
@@ -3083,16 +3159,16 @@ seriesLoop:
mat = make(Matrix, 0, len(groups))
}
- add := func(lbls labels.Labels, f float64) {
+ add := func(lbls labels.Labels, f float64, dropName bool) {
// If this could be an instant query, add directly to the matrix so the result is in consistent order.
if ev.endTimestamp == ev.startTimestamp {
- mat = append(mat, Series{Metric: lbls, Floats: []FPoint{{T: enh.Ts, F: f}}})
+ mat = append(mat, Series{Metric: lbls, Floats: []FPoint{{T: enh.Ts, F: f}}, DropName: dropName})
} else {
// Otherwise the results are added into seriess elements.
hash := lbls.Hash()
ss, ok := seriess[hash]
if !ok {
- ss = Series{Metric: lbls}
+ ss = Series{Metric: lbls, DropName: dropName}
}
addToSeries(&ss, enh.Ts, f, nil, numSteps)
seriess[hash] = ss
@@ -3109,7 +3185,7 @@ seriesLoop:
sort.Sort(sort.Reverse(aggr.heap))
}
for _, v := range aggr.heap {
- add(v.Metric, v.F)
+ add(v.Metric, v.F, v.DropName)
}
case parser.BOTTOMK:
@@ -3118,12 +3194,12 @@ seriesLoop:
sort.Sort(sort.Reverse((*vectorByReverseValueHeap)(&aggr.heap)))
}
for _, v := range aggr.heap {
- add(v.Metric, v.F)
+ add(v.Metric, v.F, v.DropName)
}
case parser.LIMITK, parser.LIMIT_RATIO:
for _, v := range aggr.heap {
- add(v.Metric, v.F)
+ add(v.Metric, v.F, v.DropName)
}
}
}
@@ -3131,7 +3207,7 @@ seriesLoop:
return mat, annos
}
-// aggregationK evaluates count_values on vec.
+// aggregationCountValues evaluates count_values on vec.
// Outputs as many series per group as there are values in the input.
func (ev *evaluator) aggregationCountValues(e *parser.AggregateExpr, grouping []string, valueLabel string, vec Vector, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
type groupCount struct {
@@ -3175,6 +3251,30 @@ func (ev *evaluator) aggregationCountValues(e *parser.AggregateExpr, grouping []
return enh.Out, nil
}
+func (ev *evaluator) cleanupMetricLabels(v parser.Value) {
+ if v.Type() == parser.ValueTypeMatrix {
+ mat := v.(Matrix)
+ for i := range mat {
+ if mat[i].DropName {
+ mat[i].Metric = mat[i].Metric.DropMetricName()
+ }
+ }
+ if mat.ContainsSameLabelset() {
+ ev.errorf("vector cannot contain metrics with the same labelset")
+ }
+ } else if v.Type() == parser.ValueTypeVector {
+ vec := v.(Vector)
+ for i := range vec {
+ if vec[i].DropName {
+ vec[i].Metric = vec[i].Metric.DropMetricName()
+ }
+ }
+ if vec.ContainsSameLabelset() {
+ ev.errorf("vector cannot contain metrics with the same labelset")
+ }
+ }
+}
+
func addToSeries(ss *Series, ts int64, f float64, h *histogram.FloatHistogram, numSteps int) {
if h == nil {
if ss.Floats == nil {
@@ -3485,14 +3585,14 @@ func makeInt64Pointer(val int64) *int64 {
return valp
}
-// Add RatioSampler interface to allow unit-testing (previously: Randomizer).
+// RatioSampler allows unit-testing (previously: Randomizer).
type RatioSampler interface {
// Return this sample "offset" between [0.0, 1.0]
sampleOffset(ts int64, sample *Sample) float64
AddRatioSample(r float64, sample *Sample) bool
}
-// Use Hash(labels.String()) / maxUint64 as a "deterministic"
+// HashRatioSampler uses Hash(labels.String()) / maxUint64 as a "deterministic"
// value in [0.0, 1.0].
type HashRatioSampler struct{}
diff --git a/promql/engine_test.go b/promql/engine_test.go
index 523c0613df..923d1264d6 100644
--- a/promql/engine_test.go
+++ b/promql/engine_test.go
@@ -17,7 +17,6 @@ import (
"context"
"errors"
"fmt"
- "math"
"os"
"sort"
"strconv"
@@ -26,7 +25,6 @@ import (
"time"
"github.com/stretchr/testify/require"
- "go.uber.org/goleak"
"github.com/prometheus/prometheus/model/histogram"
"github.com/prometheus/prometheus/model/labels"
@@ -51,7 +49,7 @@ const (
func TestMain(m *testing.M) {
// Enable experimental functions testing
parser.EnableExperimentalFunctions = true
- goleak.VerifyTestMain(m)
+ testutil.TolerantVerifyLeak(m)
}
func TestQueryConcurrency(t *testing.T) {
@@ -1715,7 +1713,8 @@ load 1ms
{F: 3600, T: 6 * 60 * 1000},
{F: 3600, T: 7 * 60 * 1000},
},
- Metric: labels.EmptyLabels(),
+ Metric: labels.EmptyLabels(),
+ DropName: true,
},
},
},
@@ -1931,20 +1930,24 @@ func TestSubquerySelector(t *testing.T) {
nil,
promql.Matrix{
promql.Series{
- Floats: []promql.FPoint{{F: 3, T: 7985000}, {F: 3, T: 7990000}, {F: 3, T: 7995000}, {F: 3, T: 8000000}},
- Metric: labels.FromStrings("job", "api-server", "instance", "0", "group", "canary"),
+ Floats: []promql.FPoint{{F: 3, T: 7985000}, {F: 3, T: 7990000}, {F: 3, T: 7995000}, {F: 3, T: 8000000}},
+ Metric: labels.FromStrings("job", "api-server", "instance", "0", "group", "canary"),
+ DropName: true,
},
promql.Series{
- Floats: []promql.FPoint{{F: 4, T: 7985000}, {F: 4, T: 7990000}, {F: 4, T: 7995000}, {F: 4, T: 8000000}},
- Metric: labels.FromStrings("job", "api-server", "instance", "1", "group", "canary"),
+ Floats: []promql.FPoint{{F: 4, T: 7985000}, {F: 4, T: 7990000}, {F: 4, T: 7995000}, {F: 4, T: 8000000}},
+ Metric: labels.FromStrings("job", "api-server", "instance", "1", "group", "canary"),
+ DropName: true,
},
promql.Series{
- Floats: []promql.FPoint{{F: 1, T: 7985000}, {F: 1, T: 7990000}, {F: 1, T: 7995000}, {F: 1, T: 8000000}},
- Metric: labels.FromStrings("job", "api-server", "instance", "0", "group", "production"),
+ Floats: []promql.FPoint{{F: 1, T: 7985000}, {F: 1, T: 7990000}, {F: 1, T: 7995000}, {F: 1, T: 8000000}},
+ Metric: labels.FromStrings("job", "api-server", "instance", "0", "group", "production"),
+ DropName: true,
},
promql.Series{
- Floats: []promql.FPoint{{F: 2, T: 7985000}, {F: 2, T: 7990000}, {F: 2, T: 7995000}, {F: 2, T: 8000000}},
- Metric: labels.FromStrings("job", "api-server", "instance", "1", "group", "production"),
+ Floats: []promql.FPoint{{F: 2, T: 7985000}, {F: 2, T: 7990000}, {F: 2, T: 7995000}, {F: 2, T: 8000000}},
+ Metric: labels.FromStrings("job", "api-server", "instance", "1", "group", "production"),
+ DropName: true,
},
},
nil,
@@ -3098,217 +3101,6 @@ func TestInstantQueryWithRangeVectorSelector(t *testing.T) {
}
}
-func TestNativeHistogram_Sum_Count_Add_AvgOperator(t *testing.T) {
- // TODO(codesome): Integrate histograms into the PromQL testing framework
- // and write more tests there.
- cases := []struct {
- histograms []histogram.Histogram
- expected histogram.FloatHistogram
- expectedAvg histogram.FloatHistogram
- }{
- {
- histograms: []histogram.Histogram{
- {
- CounterResetHint: histogram.GaugeType,
- Schema: 0,
- Count: 25,
- Sum: 1234.5,
- ZeroThreshold: 0.001,
- ZeroCount: 4,
- PositiveSpans: []histogram.Span{
- {Offset: 0, Length: 2},
- {Offset: 1, Length: 2},
- },
- PositiveBuckets: []int64{1, 1, -1, 0},
- NegativeSpans: []histogram.Span{
- {Offset: 0, Length: 2},
- {Offset: 2, Length: 2},
- },
- NegativeBuckets: []int64{2, 2, -3, 8},
- },
- {
- CounterResetHint: histogram.GaugeType,
- Schema: 0,
- Count: 41,
- Sum: 2345.6,
- ZeroThreshold: 0.001,
- ZeroCount: 5,
- PositiveSpans: []histogram.Span{
- {Offset: 0, Length: 4},
- {Offset: 0, Length: 0},
- {Offset: 0, Length: 3},
- },
- PositiveBuckets: []int64{1, 2, -2, 1, -1, 0, 0},
- NegativeSpans: []histogram.Span{
- {Offset: 1, Length: 4},
- {Offset: 2, Length: 0},
- {Offset: 2, Length: 3},
- },
- NegativeBuckets: []int64{1, 3, -2, 5, -2, 0, -3},
- },
- {
- CounterResetHint: histogram.GaugeType,
- Schema: 0,
- Count: 41,
- Sum: 1111.1,
- ZeroThreshold: 0.001,
- ZeroCount: 5,
- PositiveSpans: []histogram.Span{
- {Offset: 0, Length: 4},
- {Offset: 0, Length: 0},
- {Offset: 0, Length: 3},
- },
- PositiveBuckets: []int64{1, 2, -2, 1, -1, 0, 0},
- NegativeSpans: []histogram.Span{
- {Offset: 1, Length: 4},
- {Offset: 2, Length: 0},
- {Offset: 2, Length: 3},
- },
- NegativeBuckets: []int64{1, 3, -2, 5, -2, 0, -3},
- },
- {
- CounterResetHint: histogram.GaugeType,
- Schema: 1, // Everything is 0 just to make the count 4 so avg has nicer numbers.
- },
- },
- expected: histogram.FloatHistogram{
- CounterResetHint: histogram.GaugeType,
- Schema: 0,
- ZeroThreshold: 0.001,
- ZeroCount: 14,
- Count: 107,
- Sum: 4691.2,
- PositiveSpans: []histogram.Span{
- {Offset: 0, Length: 7},
- },
- PositiveBuckets: []float64{3, 8, 2, 5, 3, 2, 2},
- NegativeSpans: []histogram.Span{
- {Offset: 0, Length: 6},
- {Offset: 3, Length: 3},
- },
- NegativeBuckets: []float64{2, 6, 8, 4, 15, 9, 10, 10, 4},
- },
- expectedAvg: histogram.FloatHistogram{
- CounterResetHint: histogram.GaugeType,
- Schema: 0,
- ZeroThreshold: 0.001,
- ZeroCount: 3.5,
- Count: 26.75,
- Sum: 1172.8,
- PositiveSpans: []histogram.Span{
- {Offset: 0, Length: 7},
- },
- PositiveBuckets: []float64{0.75, 2, 0.5, 1.25, 0.75, 0.5, 0.5},
- NegativeSpans: []histogram.Span{
- {Offset: 0, Length: 6},
- {Offset: 3, Length: 3},
- },
- NegativeBuckets: []float64{0.5, 1.5, 2, 1, 3.75, 2.25, 2.5, 2.5, 1},
- },
- },
- }
-
- idx0 := int64(0)
- for _, c := range cases {
- for _, floatHisto := range []bool{true, false} {
- t.Run(fmt.Sprintf("floatHistogram=%t %d", floatHisto, idx0), func(t *testing.T) {
- storage := teststorage.New(t)
- t.Cleanup(func() { storage.Close() })
-
- seriesName := "sparse_histogram_series"
- seriesNameOverTime := "sparse_histogram_series_over_time"
-
- engine := newTestEngine()
-
- ts := idx0 * int64(10*time.Minute/time.Millisecond)
- app := storage.Appender(context.Background())
- _, err := app.Append(0, labels.FromStrings("__name__", "float_series", "idx", "0"), ts, 42)
- require.NoError(t, err)
- for idx1, h := range c.histograms {
- lbls := labels.FromStrings("__name__", seriesName, "idx", strconv.Itoa(idx1))
- // Since we mutate h later, we need to create a copy here.
- var err error
- if floatHisto {
- _, err = app.AppendHistogram(0, lbls, ts, nil, h.Copy().ToFloat(nil))
- } else {
- _, err = app.AppendHistogram(0, lbls, ts, h.Copy(), nil)
- }
- require.NoError(t, err)
-
- lbls = labels.FromStrings("__name__", seriesNameOverTime)
- newTs := ts + int64(idx1)*int64(time.Minute/time.Millisecond)
- // Since we mutate h later, we need to create a copy here.
- if floatHisto {
- _, err = app.AppendHistogram(0, lbls, newTs, nil, h.Copy().ToFloat(nil))
- } else {
- _, err = app.AppendHistogram(0, lbls, newTs, h.Copy(), nil)
- }
- require.NoError(t, err)
- }
- require.NoError(t, app.Commit())
-
- queryAndCheck := func(queryString string, ts int64, exp promql.Vector) {
- qry, err := engine.NewInstantQuery(context.Background(), storage, nil, queryString, timestamp.Time(ts))
- require.NoError(t, err)
-
- res := qry.Exec(context.Background())
- require.NoError(t, res.Err)
- require.Empty(t, res.Warnings)
-
- vector, err := res.Vector()
- require.NoError(t, err)
-
- testutil.RequireEqual(t, exp, vector)
- }
- queryAndCheckAnnotations := func(queryString string, ts int64, expWarnings annotations.Annotations) {
- qry, err := engine.NewInstantQuery(context.Background(), storage, nil, queryString, timestamp.Time(ts))
- require.NoError(t, err)
-
- res := qry.Exec(context.Background())
- require.NoError(t, res.Err)
- require.Equal(t, expWarnings, res.Warnings)
- }
-
- // sum().
- queryString := fmt.Sprintf("sum(%s)", seriesName)
- queryAndCheck(queryString, ts, []promql.Sample{{T: ts, H: &c.expected, Metric: labels.EmptyLabels()}})
-
- queryString = `sum({idx="0"})`
- var annos annotations.Annotations
- annos.Add(annotations.NewMixedFloatsHistogramsAggWarning(posrange.PositionRange{Start: 4, End: 13}))
- queryAndCheckAnnotations(queryString, ts, annos)
-
- // + operator.
- queryString = fmt.Sprintf(`%s{idx="0"}`, seriesName)
- for idx := 1; idx < len(c.histograms); idx++ {
- queryString += fmt.Sprintf(` + ignoring(idx) %s{idx="%d"}`, seriesName, idx)
- }
- queryAndCheck(queryString, ts, []promql.Sample{{T: ts, H: &c.expected, Metric: labels.EmptyLabels()}})
-
- // count().
- queryString = fmt.Sprintf("count(%s)", seriesName)
- queryAndCheck(queryString, ts, []promql.Sample{{T: ts, F: 4, Metric: labels.EmptyLabels()}})
-
- // avg().
- queryString = fmt.Sprintf("avg(%s)", seriesName)
- queryAndCheck(queryString, ts, []promql.Sample{{T: ts, H: &c.expectedAvg, Metric: labels.EmptyLabels()}})
-
- offset := int64(len(c.histograms) - 1)
- newTs := ts + offset*int64(time.Minute/time.Millisecond)
-
- // sum_over_time().
- queryString = fmt.Sprintf("sum_over_time(%s[%dm:1m])", seriesNameOverTime, offset)
- queryAndCheck(queryString, newTs, []promql.Sample{{T: newTs, H: &c.expected, Metric: labels.EmptyLabels()}})
-
- // avg_over_time().
- queryString = fmt.Sprintf("avg_over_time(%s[%dm:1m])", seriesNameOverTime, offset)
- queryAndCheck(queryString, newTs, []promql.Sample{{T: newTs, H: &c.expectedAvg, Metric: labels.EmptyLabels()}})
- })
- idx0++
- }
- }
-}
-
func TestNativeHistogram_SubOperator(t *testing.T) {
// TODO(codesome): Integrate histograms into the PromQL testing framework
// and write more tests there.
@@ -3544,171 +3336,6 @@ func TestNativeHistogram_SubOperator(t *testing.T) {
}
}
-func TestNativeHistogram_MulDivOperator(t *testing.T) {
- // TODO(codesome): Integrate histograms into the PromQL testing framework
- // and write more tests there.
- originalHistogram := histogram.Histogram{
- Schema: 0,
- Count: 21,
- Sum: 33,
- ZeroThreshold: 0.001,
- ZeroCount: 3,
- PositiveSpans: []histogram.Span{
- {Offset: 0, Length: 3},
- },
- PositiveBuckets: []int64{3, 0, 0},
- NegativeSpans: []histogram.Span{
- {Offset: 0, Length: 3},
- },
- NegativeBuckets: []int64{3, 0, 0},
- }
-
- cases := []struct {
- scalar float64
- histogram histogram.Histogram
- expectedMul histogram.FloatHistogram
- expectedDiv histogram.FloatHistogram
- }{
- {
- scalar: 3,
- histogram: originalHistogram,
- expectedMul: histogram.FloatHistogram{
- Schema: 0,
- Count: 63,
- Sum: 99,
- ZeroThreshold: 0.001,
- ZeroCount: 9,
- PositiveSpans: []histogram.Span{
- {Offset: 0, Length: 3},
- },
- PositiveBuckets: []float64{9, 9, 9},
- NegativeSpans: []histogram.Span{
- {Offset: 0, Length: 3},
- },
- NegativeBuckets: []float64{9, 9, 9},
- },
- expectedDiv: histogram.FloatHistogram{
- Schema: 0,
- Count: 7,
- Sum: 11,
- ZeroThreshold: 0.001,
- ZeroCount: 1,
- PositiveSpans: []histogram.Span{
- {Offset: 0, Length: 3},
- },
- PositiveBuckets: []float64{1, 1, 1},
- NegativeSpans: []histogram.Span{
- {Offset: 0, Length: 3},
- },
- NegativeBuckets: []float64{1, 1, 1},
- },
- },
- {
- scalar: 0,
- histogram: originalHistogram,
- expectedMul: histogram.FloatHistogram{
- Schema: 0,
- Count: 0,
- Sum: 0,
- ZeroThreshold: 0.001,
- ZeroCount: 0,
- PositiveSpans: []histogram.Span{
- {Offset: 0, Length: 3},
- },
- PositiveBuckets: []float64{0, 0, 0},
- NegativeSpans: []histogram.Span{
- {Offset: 0, Length: 3},
- },
- NegativeBuckets: []float64{0, 0, 0},
- },
- expectedDiv: histogram.FloatHistogram{
- Schema: 0,
- Count: math.Inf(1),
- Sum: math.Inf(1),
- ZeroThreshold: 0.001,
- ZeroCount: math.Inf(1),
- PositiveSpans: []histogram.Span{
- {Offset: 0, Length: 3},
- },
- PositiveBuckets: []float64{math.Inf(1), math.Inf(1), math.Inf(1)},
- NegativeSpans: []histogram.Span{
- {Offset: 0, Length: 3},
- },
- NegativeBuckets: []float64{math.Inf(1), math.Inf(1), math.Inf(1)},
- },
- },
- }
-
- idx0 := int64(0)
- for _, c := range cases {
- for _, floatHisto := range []bool{true, false} {
- t.Run(fmt.Sprintf("floatHistogram=%t %d", floatHisto, idx0), func(t *testing.T) {
- storage := teststorage.New(t)
- t.Cleanup(func() { storage.Close() })
-
- seriesName := "sparse_histogram_series"
- floatSeriesName := "float_series"
-
- engine := newTestEngine()
-
- ts := idx0 * int64(10*time.Minute/time.Millisecond)
- app := storage.Appender(context.Background())
- h := c.histogram
- lbls := labels.FromStrings("__name__", seriesName)
- // Since we mutate h later, we need to create a copy here.
- var err error
- if floatHisto {
- _, err = app.AppendHistogram(0, lbls, ts, nil, h.Copy().ToFloat(nil))
- } else {
- _, err = app.AppendHistogram(0, lbls, ts, h.Copy(), nil)
- }
- require.NoError(t, err)
- _, err = app.Append(0, labels.FromStrings("__name__", floatSeriesName), ts, c.scalar)
- require.NoError(t, err)
- require.NoError(t, app.Commit())
-
- queryAndCheck := func(queryString string, exp promql.Vector) {
- qry, err := engine.NewInstantQuery(context.Background(), storage, nil, queryString, timestamp.Time(ts))
- require.NoError(t, err)
-
- res := qry.Exec(context.Background())
- require.NoError(t, res.Err)
-
- vector, err := res.Vector()
- require.NoError(t, err)
-
- testutil.RequireEqual(t, exp, vector)
- }
-
- // histogram * scalar.
- queryString := fmt.Sprintf(`%s * %f`, seriesName, c.scalar)
- queryAndCheck(queryString, []promql.Sample{{T: ts, H: &c.expectedMul, Metric: labels.EmptyLabels()}})
-
- // scalar * histogram.
- queryString = fmt.Sprintf(`%f * %s`, c.scalar, seriesName)
- queryAndCheck(queryString, []promql.Sample{{T: ts, H: &c.expectedMul, Metric: labels.EmptyLabels()}})
-
- // histogram * float.
- queryString = fmt.Sprintf(`%s * %s`, seriesName, floatSeriesName)
- queryAndCheck(queryString, []promql.Sample{{T: ts, H: &c.expectedMul, Metric: labels.EmptyLabels()}})
-
- // float * histogram.
- queryString = fmt.Sprintf(`%s * %s`, floatSeriesName, seriesName)
- queryAndCheck(queryString, []promql.Sample{{T: ts, H: &c.expectedMul, Metric: labels.EmptyLabels()}})
-
- // histogram / scalar.
- queryString = fmt.Sprintf(`%s / %f`, seriesName, c.scalar)
- queryAndCheck(queryString, []promql.Sample{{T: ts, H: &c.expectedDiv, Metric: labels.EmptyLabels()}})
-
- // histogram / float.
- queryString = fmt.Sprintf(`%s / %s`, seriesName, floatSeriesName)
- queryAndCheck(queryString, []promql.Sample{{T: ts, H: &c.expectedDiv, Metric: labels.EmptyLabels()}})
- })
- idx0++
- }
- }
-}
-
func TestQueryLookbackDelta(t *testing.T) {
var (
load = `load 5m
@@ -3798,3 +3425,62 @@ func makeInt64Pointer(val int64) *int64 {
*valp = val
return valp
}
+
+func TestHistogramCopyFromIteratorRegression(t *testing.T) {
+ // Loading the following histograms creates two chunks because there's a
+ // counter reset. Not only the counter is lower in the last histogram
+ // but also there's missing buckets.
+ // This in turns means that chunk iterators will have different spans.
+ load := `load 1m
+histogram {{sum:4 count:4 buckets:[2 2]}} {{sum:6 count:6 buckets:[3 3]}} {{sum:1 count:1 buckets:[1]}}
+`
+ storage := promqltest.LoadedStorage(t, load)
+ t.Cleanup(func() { storage.Close() })
+ engine := promqltest.NewTestEngine(false, 0, promqltest.DefaultMaxSamplesPerQuery)
+
+ verify := func(t *testing.T, qry promql.Query, expected []histogram.FloatHistogram) {
+ res := qry.Exec(context.Background())
+ require.NoError(t, res.Err)
+
+ m, ok := res.Value.(promql.Matrix)
+ require.True(t, ok)
+
+ require.Len(t, m, 1)
+ series := m[0]
+
+ require.Empty(t, series.Floats)
+ require.Len(t, series.Histograms, len(expected))
+ for i, e := range expected {
+ series.Histograms[i].H.CounterResetHint = histogram.UnknownCounterReset // Don't care.
+ require.Equal(t, &e, series.Histograms[i].H)
+ }
+ }
+
+ qry, err := engine.NewRangeQuery(context.Background(), storage, nil, "increase(histogram[60s])", time.Unix(0, 0), time.Unix(0, 0).Add(1*time.Minute), time.Minute)
+ require.NoError(t, err)
+ verify(t, qry, []histogram.FloatHistogram{
+ {
+ Count: 2,
+ Sum: 2, // Increase from 4 to 6 is 2.
+ PositiveSpans: []histogram.Span{{Offset: 0, Length: 2}}, // Two buckets changed between the first and second histogram.
+ PositiveBuckets: []float64{1, 1}, // Increase from 2 to 3 is 1 in both buckets.
+ },
+ })
+
+ qry, err = engine.NewInstantQuery(context.Background(), storage, nil, "histogram[60s]", time.Unix(0, 0).Add(2*time.Minute))
+ require.NoError(t, err)
+ verify(t, qry, []histogram.FloatHistogram{
+ {
+ Count: 6,
+ Sum: 6,
+ PositiveSpans: []histogram.Span{{Offset: 0, Length: 2}},
+ PositiveBuckets: []float64{3, 3},
+ },
+ {
+ Count: 1,
+ Sum: 1,
+ PositiveSpans: []histogram.Span{{Offset: 0, Length: 1}},
+ PositiveBuckets: []float64{1},
+ },
+ })
+}
diff --git a/promql/functions.go b/promql/functions.go
index dcc2cd7590..9fa7fbe190 100644
--- a/promql/functions.go
+++ b/promql/functions.go
@@ -97,9 +97,10 @@ func extrapolatedRate(vals []parser.Value, args parser.Expressions, enh *EvalNod
lastT = samples.Histograms[numSamplesMinusOne].T
var newAnnos annotations.Annotations
resultHistogram, newAnnos = histogramRate(samples.Histograms, isCounter, metricName, args[0].PositionRange())
+ annos.Merge(newAnnos)
if resultHistogram == nil {
// The histograms are not compatible with each other.
- return enh.Out, annos.Merge(newAnnos)
+ return enh.Out, annos
}
case len(samples.Floats) > 1:
numSamplesMinusOne = len(samples.Floats) - 1
@@ -178,17 +179,29 @@ func extrapolatedRate(vals []parser.Value, args parser.Expressions, enh *EvalNod
// Otherwise, it returns the calculated histogram and an empty annotation.
func histogramRate(points []HPoint, isCounter bool, metricName string, pos posrange.PositionRange) (*histogram.FloatHistogram, annotations.Annotations) {
prev := points[0].H
+ usingCustomBuckets := prev.UsesCustomBuckets()
last := points[len(points)-1].H
if last == nil {
return nil, annotations.New().Add(annotations.NewMixedFloatsHistogramsWarning(metricName, pos))
}
+
minSchema := prev.Schema
if last.Schema < minSchema {
minSchema = last.Schema
}
+ if last.UsesCustomBuckets() != usingCustomBuckets {
+ return nil, annotations.New().Add(annotations.NewMixedExponentialCustomHistogramsWarning(metricName, pos))
+ }
+
var annos annotations.Annotations
+ // We check for gauge type histograms in the loop below, but the loop below does not run on the first and last point,
+ // so check the first and last point now.
+ if isCounter && (prev.CounterResetHint == histogram.GaugeType || last.CounterResetHint == histogram.GaugeType) {
+ annos.Add(annotations.NewNativeHistogramNotCounterWarning(metricName, pos))
+ }
+
// First iteration to find out two things:
// - What's the smallest relevant schema?
// - Are all data points histograms?
@@ -208,6 +221,9 @@ func histogramRate(points []HPoint, isCounter bool, metricName string, pos posra
if curr.Schema < minSchema {
minSchema = curr.Schema
}
+ if curr.UsesCustomBuckets() != usingCustomBuckets {
+ return nil, annotations.New().Add(annotations.NewMixedExponentialCustomHistogramsWarning(metricName, pos))
+ }
}
h := last.CopyToSchema(minSchema)
@@ -241,7 +257,7 @@ func histogramRate(points []HPoint, isCounter bool, metricName string, pos posra
}
h.CounterResetHint = histogram.GaugeType
- return h.Compact(0), nil
+ return h.Compact(0), annos
}
// === delta(Matrix parser.ValueTypeMatrix) (Vector, Annotations) ===
@@ -390,17 +406,22 @@ func funcSortDesc(vals []parser.Value, args parser.Expressions, enh *EvalNodeHel
// === sort_by_label(vector parser.ValueTypeVector, label parser.ValueTypeString...) (Vector, Annotations) ===
func funcSortByLabel(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
- // In case the labels are the same, NaN should sort to the bottom, so take
- // ascending sort with NaN first and reverse it.
- var anno annotations.Annotations
- vals[0], anno = funcSort(vals, args, enh)
- labels := stringSliceFromArgs(args[1:])
+ // First, sort by the full label set. This ensures a consistent ordering in case sorting by the
+ // labels provided as arguments is not conclusive.
slices.SortFunc(vals[0].(Vector), func(a, b Sample) int {
- // Iterate over each given label
+ return labels.Compare(a.Metric, b.Metric)
+ })
+
+ labels := stringSliceFromArgs(args[1:])
+ // Next, sort by the labels provided as arguments.
+ slices.SortFunc(vals[0].(Vector), func(a, b Sample) int {
+ // Iterate over each given label.
for _, label := range labels {
lv1 := a.Metric.Get(label)
lv2 := b.Metric.Get(label)
+ // If we encounter multiple samples with the same label values, the sorting which was
+ // performed in the first step will act as a "tie breaker".
if lv1 == lv2 {
continue
}
@@ -415,22 +436,27 @@ func funcSortByLabel(vals []parser.Value, args parser.Expressions, enh *EvalNode
return 0
})
- return vals[0].(Vector), anno
+ return vals[0].(Vector), nil
}
// === sort_by_label_desc(vector parser.ValueTypeVector, label parser.ValueTypeString...) (Vector, Annotations) ===
func funcSortByLabelDesc(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
- // In case the labels are the same, NaN should sort to the bottom, so take
- // ascending sort with NaN first and reverse it.
- var anno annotations.Annotations
- vals[0], anno = funcSortDesc(vals, args, enh)
- labels := stringSliceFromArgs(args[1:])
+ // First, sort by the full label set. This ensures a consistent ordering in case sorting by the
+ // labels provided as arguments is not conclusive.
slices.SortFunc(vals[0].(Vector), func(a, b Sample) int {
- // Iterate over each given label
+ return labels.Compare(b.Metric, a.Metric)
+ })
+
+ labels := stringSliceFromArgs(args[1:])
+ // Next, sort by the labels provided as arguments.
+ slices.SortFunc(vals[0].(Vector), func(a, b Sample) int {
+ // Iterate over each given label.
for _, label := range labels {
lv1 := a.Metric.Get(label)
lv2 := b.Metric.Get(label)
+ // If we encounter multiple samples with the same label values, the sorting which was
+ // performed in the first step will act as a "tie breaker".
if lv1 == lv2 {
continue
}
@@ -445,21 +471,25 @@ func funcSortByLabelDesc(vals []parser.Value, args parser.Expressions, enh *Eval
return 0
})
- return vals[0].(Vector), anno
+ return vals[0].(Vector), nil
}
// === clamp(Vector parser.ValueTypeVector, min, max Scalar) (Vector, Annotations) ===
func funcClamp(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
vec := vals[0].(Vector)
- min := vals[1].(Vector)[0].F
- max := vals[2].(Vector)[0].F
- if max < min {
+ minVal := vals[1].(Vector)[0].F
+ maxVal := vals[2].(Vector)[0].F
+ if maxVal < minVal {
return enh.Out, nil
}
for _, el := range vec {
+ if !enh.enableDelayedNameRemoval {
+ el.Metric = el.Metric.DropMetricName()
+ }
enh.Out = append(enh.Out, Sample{
- Metric: el.Metric.DropMetricName(),
- F: math.Max(min, math.Min(max, el.F)),
+ Metric: el.Metric,
+ F: math.Max(minVal, math.Min(maxVal, el.F)),
+ DropName: true,
})
}
return enh.Out, nil
@@ -468,11 +498,15 @@ func funcClamp(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper
// === clamp_max(Vector parser.ValueTypeVector, max Scalar) (Vector, Annotations) ===
func funcClampMax(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
vec := vals[0].(Vector)
- max := vals[1].(Vector)[0].F
+ maxVal := vals[1].(Vector)[0].F
for _, el := range vec {
+ if !enh.enableDelayedNameRemoval {
+ el.Metric = el.Metric.DropMetricName()
+ }
enh.Out = append(enh.Out, Sample{
- Metric: el.Metric.DropMetricName(),
- F: math.Min(max, el.F),
+ Metric: el.Metric,
+ F: math.Min(maxVal, el.F),
+ DropName: true,
})
}
return enh.Out, nil
@@ -481,11 +515,15 @@ func funcClampMax(vals []parser.Value, args parser.Expressions, enh *EvalNodeHel
// === clamp_min(Vector parser.ValueTypeVector, min Scalar) (Vector, Annotations) ===
func funcClampMin(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
vec := vals[0].(Vector)
- min := vals[1].(Vector)[0].F
+ minVal := vals[1].(Vector)[0].F
for _, el := range vec {
+ if !enh.enableDelayedNameRemoval {
+ el.Metric = el.Metric.DropMetricName()
+ }
enh.Out = append(enh.Out, Sample{
- Metric: el.Metric.DropMetricName(),
- F: math.Max(min, el.F),
+ Metric: el.Metric,
+ F: math.Max(minVal, el.F),
+ DropName: true,
})
}
return enh.Out, nil
@@ -506,8 +544,9 @@ func funcRound(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper
for _, el := range vec {
f := math.Floor(el.F*toNearestInverse+0.5) / toNearestInverse
enh.Out = append(enh.Out, Sample{
- Metric: el.Metric.DropMetricName(),
- F: f,
+ Metric: el.Metric,
+ F: f,
+ DropName: true,
})
}
return enh.Out, nil
@@ -573,9 +612,28 @@ func funcAvgOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNode
return vec, nil
}
return aggrOverTime(vals, enh, func(s Series) float64 {
- var mean, count, c float64
+ var (
+ sum, mean, count, kahanC float64
+ incrementalMean bool
+ )
for _, f := range s.Floats {
count++
+ if !incrementalMean {
+ newSum, newC := kahanSumInc(f.F, sum, kahanC)
+ // Perform regular mean calculation as long as
+ // the sum doesn't overflow and (in any case)
+ // for the first iteration (even if we start
+ // with ±Inf) to not run into division-by-zero
+ // problems below.
+ if count == 1 || !math.IsInf(newSum, 0) {
+ sum, kahanC = newSum, newC
+ continue
+ }
+ // Handle overflow by reverting to incremental calculation of the mean value.
+ incrementalMean = true
+ mean = sum / (count - 1)
+ kahanC /= count - 1
+ }
if math.IsInf(mean, 0) {
if math.IsInf(f.F, 0) && (mean > 0) == (f.F > 0) {
// The `mean` and `f.F` values are `Inf` of the same sign. They
@@ -593,13 +651,13 @@ func funcAvgOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNode
continue
}
}
- mean, c = kahanSumInc(f.F/count-mean/count, mean, c)
+ correctedMean := mean + kahanC
+ mean, kahanC = kahanSumInc(f.F/count-correctedMean/count, mean, kahanC)
}
-
- if math.IsInf(mean, 0) {
- return mean
+ if incrementalMean {
+ return mean + kahanC
}
- return mean + c
+ return (sum + kahanC) / count
}), nil
}
@@ -665,13 +723,13 @@ func funcMaxOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNode
return enh.Out, nil
}
return aggrOverTime(vals, enh, func(s Series) float64 {
- max := s.Floats[0].F
+ maxVal := s.Floats[0].F
for _, f := range s.Floats {
- if f.F > max || math.IsNaN(max) {
- max = f.F
+ if f.F > maxVal || math.IsNaN(maxVal) {
+ maxVal = f.F
}
}
- return max
+ return maxVal
}), nil
}
@@ -685,13 +743,13 @@ func funcMinOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNode
return enh.Out, nil
}
return aggrOverTime(vals, enh, func(s Series) float64 {
- min := s.Floats[0].F
+ minVal := s.Floats[0].F
for _, f := range s.Floats {
- if f.F < min || math.IsNaN(min) {
- min = f.F
+ if f.F < minVal || math.IsNaN(minVal) {
+ minVal = f.F
}
}
- return min
+ return minVal
}), nil
}
@@ -837,9 +895,13 @@ func funcPresentOverTime(vals []parser.Value, args parser.Expressions, enh *Eval
func simpleFunc(vals []parser.Value, enh *EvalNodeHelper, f func(float64) float64) Vector {
for _, el := range vals[0].(Vector) {
if el.H == nil { // Process only float samples.
+ if !enh.enableDelayedNameRemoval {
+ el.Metric = el.Metric.DropMetricName()
+ }
enh.Out = append(enh.Out, Sample{
- Metric: el.Metric.DropMetricName(),
- F: f(el.F),
+ Metric: el.Metric,
+ F: f(el.F),
+ DropName: true,
})
}
}
@@ -983,9 +1045,13 @@ func funcSgn(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper)
func funcTimestamp(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
vec := vals[0].(Vector)
for _, el := range vec {
+ if !enh.enableDelayedNameRemoval {
+ el.Metric = el.Metric.DropMetricName()
+ }
enh.Out = append(enh.Out, Sample{
- Metric: el.Metric.DropMetricName(),
- F: float64(el.T) / 1000,
+ Metric: el.Metric,
+ F: float64(el.T) / 1000,
+ DropName: true,
})
}
return enh.Out, nil
@@ -1092,9 +1158,13 @@ func funcHistogramCount(vals []parser.Value, args parser.Expressions, enh *EvalN
if sample.H == nil {
continue
}
+ if !enh.enableDelayedNameRemoval {
+ sample.Metric = sample.Metric.DropMetricName()
+ }
enh.Out = append(enh.Out, Sample{
- Metric: sample.Metric.DropMetricName(),
- F: sample.H.Count,
+ Metric: sample.Metric,
+ F: sample.H.Count,
+ DropName: true,
})
}
return enh.Out, nil
@@ -1109,9 +1179,13 @@ func funcHistogramSum(vals []parser.Value, args parser.Expressions, enh *EvalNod
if sample.H == nil {
continue
}
+ if !enh.enableDelayedNameRemoval {
+ sample.Metric = sample.Metric.DropMetricName()
+ }
enh.Out = append(enh.Out, Sample{
- Metric: sample.Metric.DropMetricName(),
- F: sample.H.Sum,
+ Metric: sample.Metric,
+ F: sample.H.Sum,
+ DropName: true,
})
}
return enh.Out, nil
@@ -1126,9 +1200,13 @@ func funcHistogramAvg(vals []parser.Value, args parser.Expressions, enh *EvalNod
if sample.H == nil {
continue
}
+ if !enh.enableDelayedNameRemoval {
+ sample.Metric = sample.Metric.DropMetricName()
+ }
enh.Out = append(enh.Out, Sample{
- Metric: sample.Metric.DropMetricName(),
- F: sample.H.Sum / sample.H.Count,
+ Metric: sample.Metric,
+ F: sample.H.Sum / sample.H.Count,
+ DropName: true,
})
}
return enh.Out, nil
@@ -1165,9 +1243,13 @@ func funcHistogramStdDev(vals []parser.Value, args parser.Expressions, enh *Eval
}
variance += cVariance
variance /= sample.H.Count
+ if !enh.enableDelayedNameRemoval {
+ sample.Metric = sample.Metric.DropMetricName()
+ }
enh.Out = append(enh.Out, Sample{
- Metric: sample.Metric.DropMetricName(),
- F: math.Sqrt(variance),
+ Metric: sample.Metric,
+ F: math.Sqrt(variance),
+ DropName: true,
})
}
return enh.Out, nil
@@ -1204,9 +1286,13 @@ func funcHistogramStdVar(vals []parser.Value, args parser.Expressions, enh *Eval
}
variance += cVariance
variance /= sample.H.Count
+ if !enh.enableDelayedNameRemoval {
+ sample.Metric = sample.Metric.DropMetricName()
+ }
enh.Out = append(enh.Out, Sample{
- Metric: sample.Metric.DropMetricName(),
- F: variance,
+ Metric: sample.Metric,
+ F: variance,
+ DropName: true,
})
}
return enh.Out, nil
@@ -1223,9 +1309,13 @@ func funcHistogramFraction(vals []parser.Value, args parser.Expressions, enh *Ev
if sample.H == nil {
continue
}
+ if !enh.enableDelayedNameRemoval {
+ sample.Metric = sample.Metric.DropMetricName()
+ }
enh.Out = append(enh.Out, Sample{
- Metric: sample.Metric.DropMetricName(),
- F: histogramFraction(lower, upper, sample.H),
+ Metric: sample.Metric,
+ F: histogramFraction(lower, upper, sample.H),
+ DropName: true,
})
}
return enh.Out, nil
@@ -1293,9 +1383,13 @@ func funcHistogramQuantile(vals []parser.Value, args parser.Expressions, enh *Ev
continue
}
+ if !enh.enableDelayedNameRemoval {
+ sample.Metric = sample.Metric.DropMetricName()
+ }
enh.Out = append(enh.Out, Sample{
- Metric: sample.Metric.DropMetricName(),
- F: histogramQuantile(q, sample.H),
+ Metric: sample.Metric,
+ F: histogramQuantile(q, sample.H),
+ DropName: true,
})
}
@@ -1397,6 +1491,11 @@ func (ev *evaluator) evalLabelReplace(args parser.Expressions) (parser.Value, an
lb.Reset(el.Metric)
lb.Set(dst, string(res))
matrix[i].Metric = lb.Labels()
+ if dst == model.MetricNameLabel {
+ matrix[i].DropName = false
+ } else {
+ matrix[i].DropName = el.DropName
+ }
}
}
if matrix.ContainsSameLabelset() {
@@ -1451,6 +1550,12 @@ func (ev *evaluator) evalLabelJoin(args parser.Expressions) (parser.Value, annot
lb.Reset(el.Metric)
lb.Set(dst, strval)
matrix[i].Metric = lb.Labels()
+
+ if dst == model.MetricNameLabel {
+ matrix[i].DropName = false
+ } else {
+ matrix[i].DropName = el.DropName
+ }
}
return matrix, ws
@@ -1473,9 +1578,13 @@ func dateWrapper(vals []parser.Value, enh *EvalNodeHelper, f func(time.Time) flo
for _, el := range vals[0].(Vector) {
t := time.Unix(int64(el.F), 0).UTC()
+ if !enh.enableDelayedNameRemoval {
+ el.Metric = el.Metric.DropMetricName()
+ }
enh.Out = append(enh.Out, Sample{
- Metric: el.Metric.DropMetricName(),
- F: f(t),
+ Metric: el.Metric,
+ F: f(t),
+ DropName: true,
})
}
return enh.Out
diff --git a/promql/fuzz.go b/promql/fuzz.go
index 3fd50b9496..5f08e6a72c 100644
--- a/promql/fuzz.go
+++ b/promql/fuzz.go
@@ -68,6 +68,10 @@ func fuzzParseMetricWithContentType(in []byte, contentType string) int {
panic(warning)
}
+ if contentType == "application/openmetrics-text" {
+ p = textparse.NewOpenMetricsParser(in, symbolTable)
+ }
+
var err error
for {
_, err = p.Next()
diff --git a/promql/histogram_stats_iterator.go b/promql/histogram_stats_iterator.go
index dfafea5f8c..459d5924ae 100644
--- a/promql/histogram_stats_iterator.go
+++ b/promql/histogram_stats_iterator.go
@@ -48,7 +48,6 @@ func (f *histogramStatsIterator) AtHistogram(h *histogram.Histogram) (int64, *hi
var t int64
t, f.currentH = f.Iterator.AtHistogram(f.currentH)
if value.IsStaleNaN(f.currentH.Sum) {
- f.setLastH(f.currentH)
h = &histogram.Histogram{Sum: f.currentH.Sum}
return t, h
}
@@ -63,9 +62,13 @@ func (f *histogramStatsIterator) AtHistogram(h *histogram.Histogram) (int64, *hi
return t, h
}
- h.CounterResetHint = f.getResetHint(f.currentH)
- h.Count = f.currentH.Count
- h.Sum = f.currentH.Sum
+ returnValue := histogram.Histogram{
+ CounterResetHint: f.getResetHint(f.currentH),
+ Count: f.currentH.Count,
+ Sum: f.currentH.Sum,
+ }
+ returnValue.CopyTo(h)
+
f.setLastH(f.currentH)
return t, h
}
@@ -77,7 +80,6 @@ func (f *histogramStatsIterator) AtFloatHistogram(fh *histogram.FloatHistogram)
var t int64
t, f.currentFH = f.Iterator.AtFloatHistogram(f.currentFH)
if value.IsStaleNaN(f.currentFH.Sum) {
- f.setLastFH(f.currentFH)
return t, &histogram.FloatHistogram{Sum: f.currentFH.Sum}
}
@@ -91,9 +93,13 @@ func (f *histogramStatsIterator) AtFloatHistogram(fh *histogram.FloatHistogram)
return t, fh
}
- fh.CounterResetHint = f.getFloatResetHint(f.currentFH.CounterResetHint)
- fh.Count = f.currentFH.Count
- fh.Sum = f.currentFH.Sum
+ returnValue := histogram.FloatHistogram{
+ CounterResetHint: f.getFloatResetHint(f.currentFH.CounterResetHint),
+ Count: f.currentFH.Count,
+ Sum: f.currentFH.Sum,
+ }
+ returnValue.CopyTo(fh)
+
f.setLastFH(f.currentFH)
return t, fh
}
diff --git a/promql/histogram_stats_iterator_test.go b/promql/histogram_stats_iterator_test.go
index b71a9d6029..7a2953d3e2 100644
--- a/promql/histogram_stats_iterator_test.go
+++ b/promql/histogram_stats_iterator_test.go
@@ -14,62 +14,132 @@
package promql
import (
+ "fmt"
+ "math"
"testing"
"github.com/stretchr/testify/require"
"github.com/prometheus/prometheus/model/histogram"
"github.com/prometheus/prometheus/model/labels"
+ "github.com/prometheus/prometheus/model/value"
"github.com/prometheus/prometheus/tsdb/chunkenc"
"github.com/prometheus/prometheus/tsdb/tsdbutil"
)
func TestHistogramStatsDecoding(t *testing.T) {
- histograms := []*histogram.Histogram{
- tsdbutil.GenerateTestHistogram(0),
- tsdbutil.GenerateTestHistogram(1),
- tsdbutil.GenerateTestHistogram(2),
- tsdbutil.GenerateTestHistogram(2),
- }
- histograms[0].CounterResetHint = histogram.NotCounterReset
- histograms[1].CounterResetHint = histogram.UnknownCounterReset
- histograms[2].CounterResetHint = histogram.CounterReset
- histograms[3].CounterResetHint = histogram.UnknownCounterReset
-
- expectedHints := []histogram.CounterResetHint{
- histogram.NotCounterReset,
- histogram.NotCounterReset,
- histogram.CounterReset,
- histogram.NotCounterReset,
+ cases := []struct {
+ name string
+ histograms []*histogram.Histogram
+ expectedHints []histogram.CounterResetHint
+ }{
+ {
+ name: "unknown counter reset triggers detection",
+ histograms: []*histogram.Histogram{
+ tsdbutil.GenerateTestHistogramWithHint(0, histogram.NotCounterReset),
+ tsdbutil.GenerateTestHistogramWithHint(1, histogram.UnknownCounterReset),
+ tsdbutil.GenerateTestHistogramWithHint(2, histogram.CounterReset),
+ tsdbutil.GenerateTestHistogramWithHint(2, histogram.UnknownCounterReset),
+ },
+ expectedHints: []histogram.CounterResetHint{
+ histogram.NotCounterReset,
+ histogram.NotCounterReset,
+ histogram.CounterReset,
+ histogram.NotCounterReset,
+ },
+ },
+ {
+ name: "stale sample before unknown reset hint",
+ histograms: []*histogram.Histogram{
+ tsdbutil.GenerateTestHistogramWithHint(0, histogram.NotCounterReset),
+ tsdbutil.GenerateTestHistogramWithHint(1, histogram.UnknownCounterReset),
+ {Sum: math.Float64frombits(value.StaleNaN)},
+ tsdbutil.GenerateTestHistogramWithHint(1, histogram.UnknownCounterReset),
+ },
+ expectedHints: []histogram.CounterResetHint{
+ histogram.NotCounterReset,
+ histogram.NotCounterReset,
+ histogram.UnknownCounterReset,
+ histogram.NotCounterReset,
+ },
+ },
+ {
+ name: "unknown counter reset at the beginning",
+ histograms: []*histogram.Histogram{
+ tsdbutil.GenerateTestHistogramWithHint(1, histogram.UnknownCounterReset),
+ },
+ expectedHints: []histogram.CounterResetHint{
+ histogram.NotCounterReset,
+ },
+ },
+ {
+ name: "detect real counter reset",
+ histograms: []*histogram.Histogram{
+ tsdbutil.GenerateTestHistogramWithHint(2, histogram.UnknownCounterReset),
+ tsdbutil.GenerateTestHistogramWithHint(1, histogram.UnknownCounterReset),
+ },
+ expectedHints: []histogram.CounterResetHint{
+ histogram.NotCounterReset,
+ histogram.CounterReset,
+ },
+ },
+ {
+ name: "detect real counter reset after stale NaN",
+ histograms: []*histogram.Histogram{
+ tsdbutil.GenerateTestHistogramWithHint(2, histogram.UnknownCounterReset),
+ {Sum: math.Float64frombits(value.StaleNaN)},
+ tsdbutil.GenerateTestHistogramWithHint(1, histogram.UnknownCounterReset),
+ },
+ expectedHints: []histogram.CounterResetHint{
+ histogram.NotCounterReset,
+ histogram.UnknownCounterReset,
+ histogram.CounterReset,
+ },
+ },
}
- t.Run("histogram_stats", func(t *testing.T) {
- decodedStats := make([]*histogram.Histogram, 0)
- statsIterator := NewHistogramStatsIterator(newHistogramSeries(histograms).Iterator(nil))
- for statsIterator.Next() != chunkenc.ValNone {
- _, h := statsIterator.AtHistogram(nil)
- decodedStats = append(decodedStats, h)
- }
- for i := 0; i < len(histograms); i++ {
- require.Equal(t, expectedHints[i], decodedStats[i].CounterResetHint)
- require.Equal(t, histograms[i].Count, decodedStats[i].Count)
- require.Equal(t, histograms[i].Sum, decodedStats[i].Sum)
- }
- })
- t.Run("float_histogram_stats", func(t *testing.T) {
- decodedStats := make([]*histogram.FloatHistogram, 0)
- statsIterator := NewHistogramStatsIterator(newHistogramSeries(histograms).Iterator(nil))
- for statsIterator.Next() != chunkenc.ValNone {
- _, h := statsIterator.AtFloatHistogram(nil)
- decodedStats = append(decodedStats, h)
- }
- for i := 0; i < len(histograms); i++ {
- fh := histograms[i].ToFloat(nil)
- require.Equal(t, expectedHints[i], decodedStats[i].CounterResetHint)
- require.Equal(t, fh.Count, decodedStats[i].Count)
- require.Equal(t, fh.Sum, decodedStats[i].Sum)
- }
- })
+ for _, tc := range cases {
+ t.Run(tc.name, func(t *testing.T) {
+ t.Run("histogram_stats", func(t *testing.T) {
+ decodedStats := make([]*histogram.Histogram, 0)
+ statsIterator := NewHistogramStatsIterator(newHistogramSeries(tc.histograms).Iterator(nil))
+ for statsIterator.Next() != chunkenc.ValNone {
+ _, h := statsIterator.AtHistogram(nil)
+ decodedStats = append(decodedStats, h)
+ }
+ for i := 0; i < len(tc.histograms); i++ {
+ require.Equal(t, tc.expectedHints[i], decodedStats[i].CounterResetHint, fmt.Sprintf("mismatch in counter reset hint for histogram %d", i))
+ h := tc.histograms[i]
+ if value.IsStaleNaN(h.Sum) {
+ require.True(t, value.IsStaleNaN(decodedStats[i].Sum))
+ require.Equal(t, uint64(0), decodedStats[i].Count)
+ } else {
+ require.Equal(t, tc.histograms[i].Count, decodedStats[i].Count)
+ require.Equal(t, tc.histograms[i].Sum, decodedStats[i].Sum)
+ }
+ }
+ })
+ t.Run("float_histogram_stats", func(t *testing.T) {
+ decodedStats := make([]*histogram.FloatHistogram, 0)
+ statsIterator := NewHistogramStatsIterator(newHistogramSeries(tc.histograms).Iterator(nil))
+ for statsIterator.Next() != chunkenc.ValNone {
+ _, h := statsIterator.AtFloatHistogram(nil)
+ decodedStats = append(decodedStats, h)
+ }
+ for i := 0; i < len(tc.histograms); i++ {
+ require.Equal(t, tc.expectedHints[i], decodedStats[i].CounterResetHint)
+ fh := tc.histograms[i].ToFloat(nil)
+ if value.IsStaleNaN(fh.Sum) {
+ require.True(t, value.IsStaleNaN(decodedStats[i].Sum))
+ require.Equal(t, float64(0), decodedStats[i].Count)
+ } else {
+ require.Equal(t, fh.Count, decodedStats[i].Count)
+ require.Equal(t, fh.Sum, decodedStats[i].Sum)
+ }
+ }
+ })
+ })
+ }
}
type histogramSeries struct {
diff --git a/promql/parser/ast.go b/promql/parser/ast.go
index 830e8a2c5e..162d7817ab 100644
--- a/promql/parser/ast.go
+++ b/promql/parser/ast.go
@@ -352,8 +352,7 @@ func (f inspector) Visit(node Node, path []Node) (Visitor, error) {
// f(node, path); node must not be nil. If f returns a nil error, Inspect invokes f
// for all the non-nil children of node, recursively.
func Inspect(node Node, f inspector) {
- //nolint: errcheck
- Walk(f, node, nil)
+ Walk(f, node, nil) //nolint:errcheck
}
// Children returns a list of all child nodes of a syntax tree node.
@@ -419,7 +418,7 @@ func mergeRanges(first, last Node) posrange.PositionRange {
}
}
-// Item implements the Node interface.
+// PositionRange implements the Node interface.
// This makes it possible to call mergeRanges on them.
func (i *Item) PositionRange() posrange.PositionRange {
return posrange.PositionRange{
diff --git a/promql/parser/generated_parser.y b/promql/parser/generated_parser.y
index b99e67424f..da24be0c44 100644
--- a/promql/parser/generated_parser.y
+++ b/promql/parser/generated_parser.y
@@ -23,6 +23,8 @@ import (
"github.com/prometheus/prometheus/model/value"
"github.com/prometheus/prometheus/model/histogram"
"github.com/prometheus/prometheus/promql/parser/posrange"
+
+ "github.com/prometheus/common/model"
)
%}
@@ -84,6 +86,7 @@ NEGATIVE_BUCKETS_DESC
ZERO_BUCKET_DESC
ZERO_BUCKET_WIDTH_DESC
CUSTOM_VALUES_DESC
+COUNTER_RESET_HINT_DESC
%token histogramDescEnd
// Operators.
@@ -149,6 +152,14 @@ START
END
%token preprocessorEnd
+// Counter reset hints.
+%token counterResetHintsStart
+%token -
+UNKNOWN_COUNTER_RESET
+COUNTER_RESET
+NOT_COUNTER_RESET
+GAUGE_TYPE
+%token counterResetHintsEnd
// Start symbols for the generated parser.
%token startSymbolsStart
@@ -163,7 +174,7 @@ START_METRIC_SELECTOR
// Type definitions for grammar rules.
%type label_match_list
%type label_matcher
-%type
- aggregate_op grouping_label match_op maybe_label metric_identifier unary_op at_modifier_preprocessors string_identifier
+%type
- aggregate_op grouping_label match_op maybe_label metric_identifier unary_op at_modifier_preprocessors string_identifier counter_reset_hint
%type label_set metric
%type label_set_list
%type