Merge branch 'main' into mantine-ui

Signed-off-by: Julius Volz <julius.volz@gmail.com>
This commit is contained in:
Julius Volz 2024-09-04 20:58:19 +02:00
commit 490eef6509
181 changed files with 9993 additions and 3069 deletions

56
.github/stale.yml vendored
View file

@ -1,56 +0,0 @@
# Configuration for probot-stale - https://github.com/probot/stale
# Number of days of inactivity before an Issue or Pull Request becomes stale
daysUntilStale: 60
# Number of days of inactivity before an Issue or Pull Request with the stale label is closed.
# Set to false to disable. If disabled, issues still need to be closed manually, but will remain marked as stale.
daysUntilClose: false
# Only issues or pull requests with all of these labels are check if stale. Defaults to `[]` (disabled)
onlyLabels: []
# Issues or Pull Requests with these labels will never be considered stale. Set to `[]` to disable
exemptLabels:
- keepalive
# Set to true to ignore issues in a project (defaults to false)
exemptProjects: false
# Set to true to ignore issues in a milestone (defaults to false)
exemptMilestones: false
# Set to true to ignore issues with an assignee (defaults to false)
exemptAssignees: false
# Label to use when marking as stale
staleLabel: stale
# Comment to post when marking as stale. Set to `false` to disable
markComment: false
# Comment to post when removing the stale label.
# unmarkComment: >
# Your comment here.
# Comment to post when closing a stale Issue or Pull Request.
# closeComment: >
# Your comment here.
# Limit the number of actions per hour, from 1-30. Default is 30
limitPerRun: 30
# Limit to only `issues` or `pulls`
only: pulls
# Optionally, specify configuration settings that are specific to just 'issues' or 'pulls':
# pulls:
# daysUntilStale: 30
# markComment: >
# This pull request has been automatically marked as stale because it has not had
# recent activity. It will be closed if no further activity occurs. Thank you
# for your contributions.
# issues:
# exemptLabels:
# - confirmed

View file

@ -13,7 +13,7 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
- uses: bufbuild/buf-setup-action@35c243d7f2a909b1d4e40399b348a7fdab27d78d # v1.34.0 - uses: bufbuild/buf-setup-action@54abbed4fe8d8d45173eca4798b0c39a53a7b658 # v1.39.0
with: with:
github_token: ${{ secrets.GITHUB_TOKEN }} github_token: ${{ secrets.GITHUB_TOKEN }}
- uses: bufbuild/buf-lint-action@06f9dd823d873146471cfaaf108a993fe00e5325 # v1.1.1 - uses: bufbuild/buf-lint-action@06f9dd823d873146471cfaaf108a993fe00e5325 # v1.1.1

View file

@ -13,7 +13,7 @@ jobs:
if: github.repository_owner == 'prometheus' if: github.repository_owner == 'prometheus'
steps: steps:
- uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
- uses: bufbuild/buf-setup-action@35c243d7f2a909b1d4e40399b348a7fdab27d78d # v1.34.0 - uses: bufbuild/buf-setup-action@54abbed4fe8d8d45173eca4798b0c39a53a7b658 # v1.39.0
with: with:
github_token: ${{ secrets.GITHUB_TOKEN }} github_token: ${{ secrets.GITHUB_TOKEN }}
- uses: bufbuild/buf-lint-action@06f9dd823d873146471cfaaf108a993fe00e5325 # v1.1.1 - uses: bufbuild/buf-lint-action@06f9dd823d873146471cfaaf108a993fe00e5325 # v1.1.1

View file

@ -14,7 +14,7 @@ jobs:
image: quay.io/prometheus/golang-builder:1.22-base image: quay.io/prometheus/golang-builder:1.22-base
steps: steps:
- uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
- uses: prometheus/promci@3cb0c3871f223bd5ce1226995bd52ffb314798b6 # v0.1.0 - uses: prometheus/promci@45166329da36d74895901808f1c8c97efafc7f84 # v0.3.0
- uses: ./.github/promci/actions/setup_environment - uses: ./.github/promci/actions/setup_environment
- run: make GOOPTS=--tags=stringlabels GO_ONLY=1 SKIP_GOLANGCI_LINT=1 - run: make GOOPTS=--tags=stringlabels GO_ONLY=1 SKIP_GOLANGCI_LINT=1
- run: go test --tags=stringlabels ./tsdb/ -test.tsdb-isolation=false - run: go test --tags=stringlabels ./tsdb/ -test.tsdb-isolation=false
@ -28,7 +28,7 @@ jobs:
image: quay.io/prometheus/golang-builder:1.22-base image: quay.io/prometheus/golang-builder:1.22-base
steps: steps:
- uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
- uses: prometheus/promci@3cb0c3871f223bd5ce1226995bd52ffb314798b6 # v0.1.0 - uses: prometheus/promci@45166329da36d74895901808f1c8c97efafc7f84 # v0.3.0
- uses: ./.github/promci/actions/setup_environment - uses: ./.github/promci/actions/setup_environment
- run: go test --tags=dedupelabels ./... - run: go test --tags=dedupelabels ./...
- run: GOARCH=386 go test ./cmd/prometheus - run: GOARCH=386 go test ./cmd/prometheus
@ -58,7 +58,7 @@ jobs:
steps: steps:
- uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
- uses: prometheus/promci@3cb0c3871f223bd5ce1226995bd52ffb314798b6 # v0.1.0 - uses: prometheus/promci@45166329da36d74895901808f1c8c97efafc7f84 # v0.3.0
- uses: ./.github/promci/actions/setup_environment - uses: ./.github/promci/actions/setup_environment
with: with:
enable_go: false enable_go: false
@ -75,7 +75,7 @@ jobs:
runs-on: windows-latest runs-on: windows-latest
steps: steps:
- uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
- uses: actions/setup-go@cdcb36043654635271a94b9a6d1392de5bb323a7 # v5.0.1 - uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 # v5.0.2
with: with:
go-version: 1.22.x go-version: 1.22.x
- run: | - run: |
@ -115,7 +115,7 @@ jobs:
thread: [ 0, 1, 2 ] thread: [ 0, 1, 2 ]
steps: steps:
- uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
- uses: prometheus/promci@3cb0c3871f223bd5ce1226995bd52ffb314798b6 # v0.1.0 - uses: prometheus/promci@45166329da36d74895901808f1c8c97efafc7f84 # v0.3.0
- uses: ./.github/promci/actions/build - uses: ./.github/promci/actions/build
with: with:
promu_opts: "-p linux/amd64 -p windows/amd64 -p linux/arm64 -p darwin/amd64 -p darwin/arm64 -p linux/386" promu_opts: "-p linux/amd64 -p windows/amd64 -p linux/arm64 -p darwin/amd64 -p darwin/arm64 -p linux/386"
@ -138,7 +138,7 @@ jobs:
# should also be updated. # should also be updated.
steps: steps:
- uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
- uses: prometheus/promci@3cb0c3871f223bd5ce1226995bd52ffb314798b6 # v0.1.0 - uses: prometheus/promci@45166329da36d74895901808f1c8c97efafc7f84 # v0.3.0
- uses: ./.github/promci/actions/build - uses: ./.github/promci/actions/build
with: with:
parallelism: 12 parallelism: 12
@ -162,7 +162,7 @@ jobs:
- name: Checkout repository - name: Checkout repository
uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
- name: Install Go - name: Install Go
uses: actions/setup-go@cdcb36043654635271a94b9a6d1392de5bb323a7 # v5.0.1 uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 # v5.0.2
with: with:
cache: false cache: false
go-version: 1.22.x go-version: 1.22.x
@ -175,18 +175,18 @@ jobs:
- name: Checkout repository - name: Checkout repository
uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
- name: Install Go - name: Install Go
uses: actions/setup-go@cdcb36043654635271a94b9a6d1392de5bb323a7 # v5.0.1 uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 # v5.0.2
with: with:
go-version: 1.22.x go-version: 1.22.x
- name: Install snmp_exporter/generator dependencies - name: Install snmp_exporter/generator dependencies
run: sudo apt-get update && sudo apt-get -y install libsnmp-dev run: sudo apt-get update && sudo apt-get -y install libsnmp-dev
if: github.repository == 'prometheus/snmp_exporter' if: github.repository == 'prometheus/snmp_exporter'
- name: Lint - name: Lint
uses: golangci/golangci-lint-action@a4f60bb28d35aeee14e6880718e0c85ff1882e64 # v6.0.1 uses: golangci/golangci-lint-action@aaa42aa0628b4ae2578232a66b541047968fac86 # v6.1.0
with: with:
args: --verbose args: --verbose
# Make sure to sync this with Makefile.common and scripts/golangci-lint.yml. # Make sure to sync this with Makefile.common and scripts/golangci-lint.yml.
version: v1.59.1 version: v1.60.2
fuzzing: fuzzing:
uses: ./.github/workflows/fuzzing.yml uses: ./.github/workflows/fuzzing.yml
if: github.event_name == 'pull_request' if: github.event_name == 'pull_request'
@ -200,7 +200,7 @@ jobs:
if: github.event_name == 'push' && github.event.ref == 'refs/heads/main' if: github.event_name == 'push' && github.event.ref == 'refs/heads/main'
steps: steps:
- uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
- uses: prometheus/promci@3cb0c3871f223bd5ce1226995bd52ffb314798b6 # v0.1.0 - uses: prometheus/promci@45166329da36d74895901808f1c8c97efafc7f84 # v0.3.0
- uses: ./.github/promci/actions/publish_main - uses: ./.github/promci/actions/publish_main
with: with:
docker_hub_login: ${{ secrets.docker_hub_login }} docker_hub_login: ${{ secrets.docker_hub_login }}
@ -214,7 +214,7 @@ jobs:
if: github.event_name == 'push' && startsWith(github.ref, 'refs/tags/v2.') if: github.event_name == 'push' && startsWith(github.ref, 'refs/tags/v2.')
steps: steps:
- uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
- uses: prometheus/promci@3cb0c3871f223bd5ce1226995bd52ffb314798b6 # v0.1.0 - uses: prometheus/promci@45166329da36d74895901808f1c8c97efafc7f84 # v0.3.0
- uses: ./.github/promci/actions/publish_release - uses: ./.github/promci/actions/publish_release
with: with:
docker_hub_login: ${{ secrets.docker_hub_login }} docker_hub_login: ${{ secrets.docker_hub_login }}
@ -229,9 +229,9 @@ jobs:
steps: steps:
- name: Checkout - name: Checkout
uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
- uses: prometheus/promci@3cb0c3871f223bd5ce1226995bd52ffb314798b6 # v0.1.0 - uses: prometheus/promci@45166329da36d74895901808f1c8c97efafc7f84 # v0.3.0
- name: Install nodejs - name: Install nodejs
uses: actions/setup-node@60edb5dd545a775178f52524783378180af0d1f8 # v4.0.2 uses: actions/setup-node@1e60f620b9541d16bece96c5465dc8ee9832be0b # v4.0.3
with: with:
node-version-file: "web/ui/.nvmrc" node-version-file: "web/ui/.nvmrc"
registry-url: "https://registry.npmjs.org" registry-url: "https://registry.npmjs.org"

View file

@ -27,12 +27,12 @@ jobs:
uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
- name: Initialize CodeQL - name: Initialize CodeQL
uses: github/codeql-action/init@b611370bb5703a7efb587f9d136a52ea24c5c38c # v3.25.11 uses: github/codeql-action/init@4dd16135b69a43b6c8efb853346f8437d92d3c93 # v3.26.6
with: with:
languages: ${{ matrix.language }} languages: ${{ matrix.language }}
- name: Autobuild - name: Autobuild
uses: github/codeql-action/autobuild@b611370bb5703a7efb587f9d136a52ea24c5c38c # v3.25.11 uses: github/codeql-action/autobuild@4dd16135b69a43b6c8efb853346f8437d92d3c93 # v3.26.6
- name: Perform CodeQL Analysis - name: Perform CodeQL Analysis
uses: github/codeql-action/analyze@b611370bb5703a7efb587f9d136a52ea24c5c38c # v3.25.11 uses: github/codeql-action/analyze@4dd16135b69a43b6c8efb853346f8437d92d3c93 # v3.26.6

View file

@ -21,7 +21,7 @@ jobs:
fuzz-seconds: 600 fuzz-seconds: 600
dry-run: false dry-run: false
- name: Upload Crash - name: Upload Crash
uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808 # v4.3.3 uses: actions/upload-artifact@0b2256b8c012f0828dc542b3febcab082c67f72b # v4.3.4
if: failure() && steps.build.outcome == 'success' if: failure() && steps.build.outcome == 'success'
with: with:
name: artifacts name: artifacts

View file

@ -26,7 +26,7 @@ jobs:
persist-credentials: false persist-credentials: false
- name: "Run analysis" - name: "Run analysis"
uses: ossf/scorecard-action@dc50aa9510b46c811795eb24b2f1ba02a914e534 # tag=v2.3.3 uses: ossf/scorecard-action@62b2cac7ed8198b15735ed49ab1e5cf35480ba46 # tag=v2.4.0
with: with:
results_file: results.sarif results_file: results.sarif
results_format: sarif results_format: sarif
@ -37,7 +37,7 @@ jobs:
# Upload the results as artifacts (optional). Commenting out will disable uploads of run results in SARIF # Upload the results as artifacts (optional). Commenting out will disable uploads of run results in SARIF
# format to the repository Actions tab. # format to the repository Actions tab.
- name: "Upload artifact" - name: "Upload artifact"
uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808 # tag=v4.3.3 uses: actions/upload-artifact@0b2256b8c012f0828dc542b3febcab082c67f72b # tag=v4.3.4
with: with:
name: SARIF file name: SARIF file
path: results.sarif path: results.sarif
@ -45,6 +45,6 @@ jobs:
# Upload the results to GitHub's code scanning dashboard. # Upload the results to GitHub's code scanning dashboard.
- name: "Upload to code-scanning" - name: "Upload to code-scanning"
uses: github/codeql-action/upload-sarif@b611370bb5703a7efb587f9d136a52ea24c5c38c # tag=v3.25.11 uses: github/codeql-action/upload-sarif@4dd16135b69a43b6c8efb853346f8437d92d3c93 # tag=v3.26.6
with: with:
sarif_file: results.sarif sarif_file: results.sarif

31
.github/workflows/stale.yml vendored Normal file
View file

@ -0,0 +1,31 @@
name: Stale Check
on:
workflow_dispatch: {}
schedule:
- cron: '16 22 * * *'
permissions:
issues: write
pull-requests: write
jobs:
stale:
if: github.repository_owner == 'prometheus' || github.repository_owner == 'prometheus-community' # Don't run this workflow on forks.
runs-on: ubuntu-latest
steps:
- uses: actions/stale@28ca1036281a5e5922ead5184a1bbf96e5fc984e # v9.0.0
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
# opt out of defaults to avoid marking issues as stale and closing them
# https://github.com/actions/stale#days-before-close
# https://github.com/actions/stale#days-before-stale
days-before-stale: -1
days-before-close: -1
# Setting it to empty string to skip comments.
# https://github.com/actions/stale#stale-pr-message
# https://github.com/actions/stale#stale-issue-message
stale-pr-message: ''
stale-issue-message: ''
operations-per-run: 30
# override days-before-stale, for only marking the pull requests as stale
days-before-pr-stale: 60
stale-pr-label: stale
exempt-pr-labels: keepalive

View file

@ -25,15 +25,34 @@ linters:
- loggercheck - loggercheck
issues: issues:
max-issues-per-linter: 0
max-same-issues: 0 max-same-issues: 0
# The default exclusions are too aggressive. For one, they
# essentially disable any linting on doc comments. We disable
# default exclusions here and add exclusions fitting our codebase
# further down.
exclude-use-default: false
exclude-files: exclude-files:
# Skip autogenerated files. # Skip autogenerated files.
- ^.*\.(pb|y)\.go$ - ^.*\.(pb|y)\.go$
exclude-dirs: exclude-dirs:
# Copied it from a different source # Copied it from a different source.
- storage/remote/otlptranslator/prometheusremotewrite - storage/remote/otlptranslator/prometheusremotewrite
- storage/remote/otlptranslator/prometheus - storage/remote/otlptranslator/prometheus
exclude-rules: exclude-rules:
- linters:
- errcheck
# Taken from the default exclusions (that are otherwise disabled above).
text: Error return value of .((os\.)?std(out|err)\..*|.*Close|.*Flush|os\.Remove(All)?|.*print(f|ln)?|os\.(Un)?Setenv). is not checked
- linters:
- govet
# We use many Seek methods that do not follow the usual pattern.
text: "stdmethods: method Seek.* should have signature Seek"
- linters:
- revive
# We have stopped at some point to write doc comments on exported symbols.
# TODO(beorn7): Maybe we should enforce this again? There are ~500 offenders right now.
text: exported (.+) should have comment( \(or a comment on this block\))? or be unexported
- linters: - linters:
- gocritic - gocritic
text: "appendAssign" text: "appendAssign"
@ -94,15 +113,14 @@ linters-settings:
errorf: false errorf: false
revive: revive:
# By default, revive will enable only the linting rules that are named in the configuration file. # By default, revive will enable only the linting rules that are named in the configuration file.
# So, it's needed to explicitly set in configuration all required rules. # So, it's needed to explicitly enable all required rules here.
# The following configuration enables all the rules from the defaults.toml
# https://github.com/mgechev/revive/blob/master/defaults.toml
rules: rules:
# https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md
- name: blank-imports - name: blank-imports
- name: comment-spacings
- name: context-as-argument - name: context-as-argument
arguments: arguments:
# allow functions with test or bench signatures # Allow functions with test or bench signatures.
- allowTypesBefore: "*testing.T,testing.TB" - allowTypesBefore: "*testing.T,testing.TB"
- name: context-keys-type - name: context-keys-type
- name: dot-imports - name: dot-imports
@ -118,6 +136,8 @@ linters-settings:
- name: increment-decrement - name: increment-decrement
- name: indent-error-flow - name: indent-error-flow
- name: package-comments - name: package-comments
# TODO(beorn7): Currently, we have a lot of missing package doc comments. Maybe we should have them.
disabled: true
- name: range - name: range
- name: receiver-naming - name: receiver-naming
- name: redefines-builtin-id - name: redefines-builtin-id

View file

@ -3,8 +3,59 @@
## unreleased ## unreleased
* [FEATURE] OTLP receiver: Add new option `otlp.promote_resource_attributes`, for any OTel resource attributes that should be promoted to metric labels. #14200 * [FEATURE] OTLP receiver: Add new option `otlp.promote_resource_attributes`, for any OTel resource attributes that should be promoted to metric labels. #14200
* [FEATURE] Remote-Write: Add sender and receiver support for [Remote Write 2.0-rc.2](https://prometheus.io/docs/specs/remote_write_spec_2_0/) specification #14395 #14427 #14444 * [ENHANCEMENT] OTLP receiver: Warn when encountering exponential histograms with zero count and non-zero sum. #14706
* [ENHANCEMENT] Remote-Write: 1.x messages against Remote Write 2.x Receivers will have now correct values for `prometheus_storage_<samples|histograms|exemplar>_failed_total` in case of partial errors #14444 * [BUGFIX] tsdb/wlog.Watcher.readSegmentForGC: Only count unknown record types against record_decode_failures_total metric. #14042
## 2.54.1 / 2024-08-27
* [BUGFIX] Scraping: allow multiple samples on same series, with explicit timestamps. #14685
* [BUGFIX] Docker SD: fix crash in `match_first_network` mode when container is reconnected to a new network. #14654
* [BUGFIX] PromQL: fix experimental native histograms getting corrupted due to vector selector bug in range queries. #14538
* [BUGFIX] PromQL: fix experimental native histogram counter reset detection on stale samples. #14514
* [BUGFIX] PromQL: fix native histograms getting corrupted due to vector selector bug in range queries. #14605
## 2.54.0 / 2024-08-09
Release 2.54 brings a release candidate of a major new version of [Remote Write: 2.0](https://prometheus.io/docs/specs/remote_write_spec_2_0/).
This is experimental at this time and may still change.
Remote-write v2 is enabled by default, but can be disabled via feature-flag `web.remote-write-receiver.accepted-protobuf-messages`.
* [CHANGE] Remote-Write: `highest_timestamp_in_seconds` and `queue_highest_sent_timestamp_seconds` metrics now initialized to 0. #14437
* [CHANGE] API: Split warnings from info annotations in API response. #14327
* [FEATURE] Remote-Write: Version 2.0 experimental, plus metadata in WAL via feature flag `metadata-wal-records` (defaults on). #14395,#14427,#14444
* [FEATURE] PromQL: add limitk() and limit_ratio() aggregation operators. #12503
* [ENHANCEMENT] PromQL: Accept underscores in literal numbers, e.g. 1_000_000 for 1 million. #12821
* [ENHANCEMENT] PromQL: float literal numbers and durations are now interchangeable (experimental). Example: `time() - my_timestamp > 10m`. #9138
* [ENHANCEMENT] PromQL: use Kahan summation for sum(). #14074,#14362
* [ENHANCEMENT] PromQL (experimental native histograms): Optimize `histogram_count` and `histogram_sum` functions. #14097
* [ENHANCEMENT] TSDB: Better support for out-of-order experimental native histogram samples. #14438
* [ENHANCEMENT] TSDB: Optimise seek within index. #14393
* [ENHANCEMENT] TSDB: Optimise deletion of stale series. #14307
* [ENHANCEMENT] TSDB: Reduce locking to optimise adding and removing series. #13286,#14286
* [ENHANCEMENT] TSDB: Small optimisation: streamline special handling for out-of-order data. #14396,#14584
* [ENHANCEMENT] Regexps: Optimize patterns with multiple prefixes. #13843,#14368
* [ENHANCEMENT] Regexps: Optimize patterns containing multiple literal strings. #14173
* [ENHANCEMENT] AWS SD: expose Primary IPv6 addresses as __meta_ec2_primary_ipv6_addresses. #14156
* [ENHANCEMENT] Docker SD: add MatchFirstNetwork for containers with multiple networks. #10490
* [ENHANCEMENT] OpenStack SD: Use `flavor.original_name` if available. #14312
* [ENHANCEMENT] UI (experimental native histograms): more accurate representation. #13680,#14430
* [ENHANCEMENT] Agent: `out_of_order_time_window` config option now applies to agent. #14094
* [ENHANCEMENT] Notifier: Send any outstanding Alertmanager notifications when shutting down. #14290
* [ENHANCEMENT] Rules: Add label-matcher support to Rules API. #10194
* [ENHANCEMENT] HTTP API: Add url to message logged on error while sending response. #14209
* [BUGFIX] TSDB: Exclude OOO chunks mapped after compaction starts (introduced by #14396). #14584
* [BUGFIX] CLI: escape `|` characters when generating docs. #14420
* [BUGFIX] PromQL (experimental native histograms): Fix some binary operators between native histogram values. #14454
* [BUGFIX] TSDB: LabelNames API could fail during compaction. #14279
* [BUGFIX] TSDB: Fix rare issue where pending OOO read can be left dangling if creating querier fails. #14341
* [BUGFIX] TSDB: fix check for context cancellation in LabelNamesFor. #14302
* [BUGFIX] Rules: Fix rare panic on reload. #14366
* [BUGFIX] Config: In YAML marshalling, do not output a regexp field if it was never set. #14004
* [BUGFIX] Remote-Write: reject samples with future timestamps. #14304
* [BUGFIX] Remote-Write: Fix data corruption in remote write if max_sample_age is applied. #14078
* [BUGFIX] Notifier: Fix Alertmanager discovery not updating under heavy load. #14174
* [BUGFIX] Regexes: some Unicode characters were not matched by case-insensitive comparison. #14170,#14299
* [BUGFIX] Remote-Read: Resolve occasional segmentation fault on query. #14515
## 2.53.1 / 2024-07-10 ## 2.53.1 / 2024-07-10

View file

@ -61,7 +61,7 @@ PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_
SKIP_GOLANGCI_LINT := SKIP_GOLANGCI_LINT :=
GOLANGCI_LINT := GOLANGCI_LINT :=
GOLANGCI_LINT_OPTS ?= GOLANGCI_LINT_OPTS ?=
GOLANGCI_LINT_VERSION ?= v1.59.1 GOLANGCI_LINT_VERSION ?= v1.60.2
# golangci-lint only supports linux, darwin and windows platforms on i386/amd64/arm64. # golangci-lint only supports linux, darwin and windows platforms on i386/amd64/arm64.
# windows isn't included here because of the path separator being different. # windows isn't included here because of the path separator being different.
ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux darwin)) ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux darwin))

View file

@ -1 +1 @@
2.53.1 2.54.1

View file

@ -152,6 +152,7 @@ type flagConfig struct {
queryConcurrency int queryConcurrency int
queryMaxSamples int queryMaxSamples int
RemoteFlushDeadline model.Duration RemoteFlushDeadline model.Duration
nameEscapingScheme string
featureList []string featureList []string
memlimitRatio float64 memlimitRatio float64
@ -168,6 +169,8 @@ type flagConfig struct {
corsRegexString string corsRegexString string
promlogConfig promlog.Config promlogConfig promlog.Config
promqlEnableDelayedNameRemoval bool
} }
// setFeatureListOptions sets the corresponding options from the featureList. // setFeatureListOptions sets the corresponding options from the featureList.
@ -234,6 +237,15 @@ func (c *flagConfig) setFeatureListOptions(logger log.Logger) error {
config.DefaultConfig.GlobalConfig.ScrapeProtocols = config.DefaultProtoFirstScrapeProtocols config.DefaultConfig.GlobalConfig.ScrapeProtocols = config.DefaultProtoFirstScrapeProtocols
config.DefaultGlobalConfig.ScrapeProtocols = config.DefaultProtoFirstScrapeProtocols config.DefaultGlobalConfig.ScrapeProtocols = config.DefaultProtoFirstScrapeProtocols
level.Info(logger).Log("msg", "Experimental created timestamp zero ingestion enabled. Changed default scrape_protocols to prefer PrometheusProto format.", "global.scrape_protocols", fmt.Sprintf("%v", config.DefaultGlobalConfig.ScrapeProtocols)) level.Info(logger).Log("msg", "Experimental created timestamp zero ingestion enabled. Changed default scrape_protocols to prefer PrometheusProto format.", "global.scrape_protocols", fmt.Sprintf("%v", config.DefaultGlobalConfig.ScrapeProtocols))
case "delayed-compaction":
c.tsdb.EnableDelayedCompaction = true
level.Info(logger).Log("msg", "Experimental delayed compaction is enabled.")
case "promql-delayed-name-removal":
c.promqlEnableDelayedNameRemoval = true
level.Info(logger).Log("msg", "Experimental PromQL delayed name removal enabled.")
case "utf8-names":
model.NameValidationScheme = model.UTF8Validation
level.Info(logger).Log("msg", "Experimental UTF-8 support enabled")
case "": case "":
continue continue
case "promql-at-modifier", "promql-negative-offset": case "promql-at-modifier", "promql-negative-offset":
@ -293,8 +305,8 @@ func main() {
a.Flag("config.file", "Prometheus configuration file path."). a.Flag("config.file", "Prometheus configuration file path.").
Default("prometheus.yml").StringVar(&cfg.configFile) Default("prometheus.yml").StringVar(&cfg.configFile)
a.Flag("web.listen-address", "Address to listen on for UI, API, and telemetry."). a.Flag("web.listen-address", "Address to listen on for UI, API, and telemetry. Can be repeated.").
Default("0.0.0.0:9090").StringVar(&cfg.web.ListenAddress) Default("0.0.0.0:9090").StringsVar(&cfg.web.ListenAddresses)
a.Flag("auto-gomemlimit.ratio", "The ratio of reserved GOMEMLIMIT memory to the detected maximum container or system memory"). a.Flag("auto-gomemlimit.ratio", "The ratio of reserved GOMEMLIMIT memory to the detected maximum container or system memory").
Default("0.9").FloatVar(&cfg.memlimitRatio) Default("0.9").FloatVar(&cfg.memlimitRatio)
@ -308,7 +320,7 @@ func main() {
"Maximum duration before timing out read of the request, and closing idle connections."). "Maximum duration before timing out read of the request, and closing idle connections.").
Default("5m").SetValue(&cfg.webTimeout) Default("5m").SetValue(&cfg.webTimeout)
a.Flag("web.max-connections", "Maximum number of simultaneous connections."). a.Flag("web.max-connections", "Maximum number of simultaneous connections across all listeners.").
Default("512").IntVar(&cfg.web.MaxConnections) Default("512").IntVar(&cfg.web.MaxConnections)
a.Flag("web.external-url", a.Flag("web.external-url",
@ -384,6 +396,9 @@ func main() {
serverOnlyFlag(a, "storage.tsdb.allow-overlapping-blocks", "[DEPRECATED] This flag has no effect. Overlapping blocks are enabled by default now."). serverOnlyFlag(a, "storage.tsdb.allow-overlapping-blocks", "[DEPRECATED] This flag has no effect. Overlapping blocks are enabled by default now.").
Default("true").Hidden().BoolVar(&b) Default("true").Hidden().BoolVar(&b)
serverOnlyFlag(a, "storage.tsdb.allow-overlapping-compaction", "Allow compaction of overlapping blocks. If set to false, TSDB stops vertical compaction and leaves overlapping blocks there. The use case is to let another component handle the compaction of overlapping blocks.").
Default("true").Hidden().BoolVar(&cfg.tsdb.EnableOverlappingCompaction)
serverOnlyFlag(a, "storage.tsdb.wal-compression", "Compress the tsdb WAL."). serverOnlyFlag(a, "storage.tsdb.wal-compression", "Compress the tsdb WAL.").
Hidden().Default("true").BoolVar(&cfg.tsdb.WALCompression) Hidden().Default("true").BoolVar(&cfg.tsdb.WALCompression)
@ -478,7 +493,9 @@ func main() {
a.Flag("scrape.discovery-reload-interval", "Interval used by scrape manager to throttle target groups updates."). a.Flag("scrape.discovery-reload-interval", "Interval used by scrape manager to throttle target groups updates.").
Hidden().Default("5s").SetValue(&cfg.scrape.DiscoveryReloadInterval) Hidden().Default("5s").SetValue(&cfg.scrape.DiscoveryReloadInterval)
a.Flag("enable-feature", "Comma separated feature names to enable. Valid options: agent, auto-gomemlimit, exemplar-storage, expand-external-labels, memory-snapshot-on-shutdown, promql-per-step-stats, promql-experimental-functions, remote-write-receiver (DEPRECATED), extra-scrape-metrics, new-service-discovery-manager, auto-gomaxprocs, no-default-scrape-port, native-histograms, otlp-write-receiver, created-timestamp-zero-ingestion, concurrent-rule-eval, new-ui. See https://prometheus.io/docs/prometheus/latest/feature_flags/ for more details."). a.Flag("scrape.name-escaping-scheme", `Method for escaping legacy invalid names when sending to Prometheus that does not support UTF-8. Can be one of "values", "underscores", or "dots".`).Default(scrape.DefaultNameEscapingScheme.String()).StringVar(&cfg.nameEscapingScheme)
a.Flag("enable-feature", "Comma separated feature names to enable. Valid options: agent, auto-gomaxprocs, auto-gomemlimit, concurrent-rule-eval, created-timestamp-zero-ingestion, delayed-compaction, exemplar-storage, expand-external-labels, extra-scrape-metrics, memory-snapshot-on-shutdown, native-histograms, new-service-discovery-manager, new-ui, no-default-scrape-port, otlp-write-receiver, promql-experimental-functions, promql-delayed-name-removal, promql-per-step-stats, remote-write-receiver (DEPRECATED), utf8-names. See https://prometheus.io/docs/prometheus/latest/feature_flags/ for more details.").
Default("").StringsVar(&cfg.featureList) Default("").StringsVar(&cfg.featureList)
promlogflag.AddFlags(a, &cfg.promlogConfig) promlogflag.AddFlags(a, &cfg.promlogConfig)
@ -506,6 +523,15 @@ func main() {
os.Exit(1) os.Exit(1)
} }
if cfg.nameEscapingScheme != "" {
scheme, err := model.ToEscapingScheme(cfg.nameEscapingScheme)
if err != nil {
fmt.Fprintf(os.Stderr, `Invalid name escaping scheme: %q; Needs to be one of "values", "underscores", or "dots"`, cfg.nameEscapingScheme)
os.Exit(1)
}
model.NameEscapingScheme = scheme
}
if agentMode && len(serverOnlyFlags) > 0 { if agentMode && len(serverOnlyFlags) > 0 {
fmt.Fprintf(os.Stderr, "The following flag(s) can not be used in agent mode: %q", serverOnlyFlags) fmt.Fprintf(os.Stderr, "The following flag(s) can not be used in agent mode: %q", serverOnlyFlags)
os.Exit(3) os.Exit(3)
@ -526,7 +552,7 @@ func main() {
localStoragePath = cfg.agentStoragePath localStoragePath = cfg.agentStoragePath
} }
cfg.web.ExternalURL, err = computeExternalURL(cfg.prometheusURL, cfg.web.ListenAddress) cfg.web.ExternalURL, err = computeExternalURL(cfg.prometheusURL, cfg.web.ListenAddresses[0])
if err != nil { if err != nil {
fmt.Fprintln(os.Stderr, fmt.Errorf("parse external URL %q: %w", cfg.prometheusURL, err)) fmt.Fprintln(os.Stderr, fmt.Errorf("parse external URL %q: %w", cfg.prometheusURL, err))
os.Exit(2) os.Exit(2)
@ -784,6 +810,7 @@ func main() {
EnableAtModifier: true, EnableAtModifier: true,
EnableNegativeOffset: true, EnableNegativeOffset: true,
EnablePerStepStats: cfg.enablePerStepStats, EnablePerStepStats: cfg.enablePerStepStats,
EnableDelayedNameRemoval: cfg.promqlEnableDelayedNameRemoval,
} }
queryEngine = promql.NewEngine(opts) queryEngine = promql.NewEngine(opts)
@ -972,15 +999,21 @@ func main() {
}) })
} }
listener, err := webHandler.Listener() listeners, err := webHandler.Listeners()
if err != nil { if err != nil {
level.Error(logger).Log("msg", "Unable to start web listener", "err", err) level.Error(logger).Log("msg", "Unable to start web listeners", "err", err)
if err := queryEngine.Close(); err != nil {
level.Warn(logger).Log("msg", "Closing query engine failed", "err", err)
}
os.Exit(1) os.Exit(1)
} }
err = toolkit_web.Validate(*webConfig) err = toolkit_web.Validate(*webConfig)
if err != nil { if err != nil {
level.Error(logger).Log("msg", "Unable to validate web configuration file", "err", err) level.Error(logger).Log("msg", "Unable to validate web configuration file", "err", err)
if err := queryEngine.Close(); err != nil {
level.Warn(logger).Log("msg", "Closing query engine failed", "err", err)
}
os.Exit(1) os.Exit(1)
} }
@ -1002,6 +1035,9 @@ func main() {
case <-cancel: case <-cancel:
reloadReady.Close() reloadReady.Close()
} }
if err := queryEngine.Close(); err != nil {
level.Warn(logger).Log("msg", "Closing query engine failed", "err", err)
}
return nil return nil
}, },
func(err error) { func(err error) {
@ -1269,7 +1305,7 @@ func main() {
// Web handler. // Web handler.
g.Add( g.Add(
func() error { func() error {
if err := webHandler.Run(ctxWeb, listener, *webConfig); err != nil { if err := webHandler.Run(ctxWeb, listeners, *webConfig); err != nil {
return fmt.Errorf("error starting web server: %w", err) return fmt.Errorf("error starting web server: %w", err)
} }
return nil return nil
@ -1718,6 +1754,8 @@ type tsdbOptions struct {
MaxExemplars int64 MaxExemplars int64
EnableMemorySnapshotOnShutdown bool EnableMemorySnapshotOnShutdown bool
EnableNativeHistograms bool EnableNativeHistograms bool
EnableDelayedCompaction bool
EnableOverlappingCompaction bool
} }
func (opts tsdbOptions) ToTSDBOptions() tsdb.Options { func (opts tsdbOptions) ToTSDBOptions() tsdb.Options {
@ -1738,7 +1776,8 @@ func (opts tsdbOptions) ToTSDBOptions() tsdb.Options {
EnableMemorySnapshotOnShutdown: opts.EnableMemorySnapshotOnShutdown, EnableMemorySnapshotOnShutdown: opts.EnableMemorySnapshotOnShutdown,
EnableNativeHistograms: opts.EnableNativeHistograms, EnableNativeHistograms: opts.EnableNativeHistograms,
OutOfOrderTimeWindow: opts.OutOfOrderTimeWindow, OutOfOrderTimeWindow: opts.OutOfOrderTimeWindow,
EnableOverlappingCompaction: true, EnableDelayedCompaction: opts.EnableDelayedCompaction,
EnableOverlappingCompaction: opts.EnableOverlappingCompaction,
} }
} }

View file

@ -85,7 +85,7 @@ func getCompatibleBlockDuration(maxBlockDuration int64) int64 {
return blockDuration return blockDuration
} }
func createBlocks(input []byte, mint, maxt, maxBlockDuration int64, maxSamplesInAppender int, outputDir string, humanReadable, quiet bool) (returnErr error) { func createBlocks(input []byte, mint, maxt, maxBlockDuration int64, maxSamplesInAppender int, outputDir string, humanReadable, quiet bool, customLabels map[string]string) (returnErr error) {
blockDuration := getCompatibleBlockDuration(maxBlockDuration) blockDuration := getCompatibleBlockDuration(maxBlockDuration)
mint = blockDuration * (mint / blockDuration) mint = blockDuration * (mint / blockDuration)
@ -102,6 +102,8 @@ func createBlocks(input []byte, mint, maxt, maxBlockDuration int64, maxSamplesIn
nextSampleTs int64 = math.MaxInt64 nextSampleTs int64 = math.MaxInt64
) )
lb := labels.NewBuilder(labels.EmptyLabels())
for t := mint; t <= maxt; t += blockDuration { for t := mint; t <= maxt; t += blockDuration {
tsUpper := t + blockDuration tsUpper := t + blockDuration
if nextSampleTs != math.MaxInt64 && nextSampleTs >= tsUpper { if nextSampleTs != math.MaxInt64 && nextSampleTs >= tsUpper {
@ -162,7 +164,13 @@ func createBlocks(input []byte, mint, maxt, maxBlockDuration int64, maxSamplesIn
l := labels.Labels{} l := labels.Labels{}
p.Metric(&l) p.Metric(&l)
if _, err := app.Append(0, l, *ts, v); err != nil { lb.Reset(l)
for name, value := range customLabels {
lb.Set(name, value)
}
lbls := lb.Labels()
if _, err := app.Append(0, lbls, *ts, v); err != nil {
return fmt.Errorf("add sample: %w", err) return fmt.Errorf("add sample: %w", err)
} }
@ -221,13 +229,13 @@ func createBlocks(input []byte, mint, maxt, maxBlockDuration int64, maxSamplesIn
return nil return nil
} }
func backfill(maxSamplesInAppender int, input []byte, outputDir string, humanReadable, quiet bool, maxBlockDuration time.Duration) (err error) { func backfill(maxSamplesInAppender int, input []byte, outputDir string, humanReadable, quiet bool, maxBlockDuration time.Duration, customLabels map[string]string) (err error) {
p := textparse.NewOpenMetricsParser(input, nil) // Don't need a SymbolTable to get max and min timestamps. p := textparse.NewOpenMetricsParser(input, nil) // Don't need a SymbolTable to get max and min timestamps.
maxt, mint, err := getMinAndMaxTimestamps(p) maxt, mint, err := getMinAndMaxTimestamps(p)
if err != nil { if err != nil {
return fmt.Errorf("getting min and max timestamp: %w", err) return fmt.Errorf("getting min and max timestamp: %w", err)
} }
if err = createBlocks(input, mint, maxt, int64(maxBlockDuration/time.Millisecond), maxSamplesInAppender, outputDir, humanReadable, quiet); err != nil { if err = createBlocks(input, mint, maxt, int64(maxBlockDuration/time.Millisecond), maxSamplesInAppender, outputDir, humanReadable, quiet, customLabels); err != nil {
return fmt.Errorf("block creation: %w", err) return fmt.Errorf("block creation: %w", err)
} }
return nil return nil

View file

@ -92,6 +92,7 @@ func TestBackfill(t *testing.T) {
Description string Description string
MaxSamplesInAppender int MaxSamplesInAppender int
MaxBlockDuration time.Duration MaxBlockDuration time.Duration
Labels map[string]string
Expected struct { Expected struct {
MinTime int64 MinTime int64
MaxTime int64 MaxTime int64
@ -636,6 +637,49 @@ http_requests_total{code="400"} 1024 7199
}, },
}, },
}, },
{
ToParse: `# HELP http_requests_total The total number of HTTP requests.
# TYPE http_requests_total counter
http_requests_total{code="200"} 1 1624463088.000
http_requests_total{code="200"} 2 1629503088.000
http_requests_total{code="200"} 3 1629863088.000
# EOF
`,
IsOk: true,
Description: "Sample with external labels.",
MaxSamplesInAppender: 5000,
MaxBlockDuration: 2048 * time.Hour,
Labels: map[string]string{"cluster_id": "123", "org_id": "999"},
Expected: struct {
MinTime int64
MaxTime int64
NumBlocks int
BlockDuration int64
Samples []backfillSample
}{
MinTime: 1624463088000,
MaxTime: 1629863088000,
NumBlocks: 2,
BlockDuration: int64(1458 * time.Hour / time.Millisecond),
Samples: []backfillSample{
{
Timestamp: 1624463088000,
Value: 1,
Labels: labels.FromStrings("__name__", "http_requests_total", "code", "200", "cluster_id", "123", "org_id", "999"),
},
{
Timestamp: 1629503088000,
Value: 2,
Labels: labels.FromStrings("__name__", "http_requests_total", "code", "200", "cluster_id", "123", "org_id", "999"),
},
{
Timestamp: 1629863088000,
Value: 3,
Labels: labels.FromStrings("__name__", "http_requests_total", "code", "200", "cluster_id", "123", "org_id", "999"),
},
},
},
},
{ {
ToParse: `# HELP rpc_duration_seconds A summary of the RPC duration in seconds. ToParse: `# HELP rpc_duration_seconds A summary of the RPC duration in seconds.
# TYPE rpc_duration_seconds summary # TYPE rpc_duration_seconds summary
@ -689,7 +733,7 @@ after_eof 1 2
outputDir := t.TempDir() outputDir := t.TempDir()
err := backfill(test.MaxSamplesInAppender, []byte(test.ToParse), outputDir, false, false, test.MaxBlockDuration) err := backfill(test.MaxSamplesInAppender, []byte(test.ToParse), outputDir, false, false, test.MaxBlockDuration, test.Labels)
if !test.IsOk { if !test.IsOk {
require.Error(t, err, test.Description) require.Error(t, err, test.Description)

View file

@ -253,6 +253,7 @@ func main() {
importQuiet := importCmd.Flag("quiet", "Do not print created blocks.").Short('q').Bool() importQuiet := importCmd.Flag("quiet", "Do not print created blocks.").Short('q').Bool()
maxBlockDuration := importCmd.Flag("max-block-duration", "Maximum duration created blocks may span. Anything less than 2h is ignored.").Hidden().PlaceHolder("<duration>").Duration() maxBlockDuration := importCmd.Flag("max-block-duration", "Maximum duration created blocks may span. Anything less than 2h is ignored.").Hidden().PlaceHolder("<duration>").Duration()
openMetricsImportCmd := importCmd.Command("openmetrics", "Import samples from OpenMetrics input and produce TSDB blocks. Please refer to the storage docs for more details.") openMetricsImportCmd := importCmd.Command("openmetrics", "Import samples from OpenMetrics input and produce TSDB blocks. Please refer to the storage docs for more details.")
openMetricsLabels := openMetricsImportCmd.Flag("label", "Label to attach to metrics. Can be specified multiple times. Example --label=label_name=label_value").StringMap()
importFilePath := openMetricsImportCmd.Arg("input file", "OpenMetrics file to read samples from.").Required().String() importFilePath := openMetricsImportCmd.Arg("input file", "OpenMetrics file to read samples from.").Required().String()
importDBPath := openMetricsImportCmd.Arg("output directory", "Output directory for generated blocks.").Default(defaultDBPath).String() importDBPath := openMetricsImportCmd.Arg("output directory", "Output directory for generated blocks.").Default(defaultDBPath).String()
importRulesCmd := importCmd.Command("rules", "Create blocks of data for new recording rules.") importRulesCmd := importCmd.Command("rules", "Create blocks of data for new recording rules.")
@ -408,7 +409,7 @@ func main() {
os.Exit(checkErr(dumpSamples(ctx, *dumpOpenMetricsPath, *dumpOpenMetricsSandboxDirRoot, *dumpOpenMetricsMinTime, *dumpOpenMetricsMaxTime, *dumpOpenMetricsMatch, formatSeriesSetOpenMetrics))) os.Exit(checkErr(dumpSamples(ctx, *dumpOpenMetricsPath, *dumpOpenMetricsSandboxDirRoot, *dumpOpenMetricsMinTime, *dumpOpenMetricsMaxTime, *dumpOpenMetricsMatch, formatSeriesSetOpenMetrics)))
// TODO(aSquare14): Work on adding support for custom block size. // TODO(aSquare14): Work on adding support for custom block size.
case openMetricsImportCmd.FullCommand(): case openMetricsImportCmd.FullCommand():
os.Exit(backfillOpenMetrics(*importFilePath, *importDBPath, *importHumanReadable, *importQuiet, *maxBlockDuration)) os.Exit(backfillOpenMetrics(*importFilePath, *importDBPath, *importHumanReadable, *importQuiet, *maxBlockDuration, *openMetricsLabels))
case importRulesCmd.FullCommand(): case importRulesCmd.FullCommand():
os.Exit(checkErr(importRules(serverURL, httpRoundTripper, *importRulesStart, *importRulesEnd, *importRulesOutputDir, *importRulesEvalInterval, *maxBlockDuration, *importRulesFiles...))) os.Exit(checkErr(importRules(serverURL, httpRoundTripper, *importRulesStart, *importRulesEnd, *importRulesOutputDir, *importRulesEvalInterval, *maxBlockDuration, *importRulesFiles...)))
@ -471,7 +472,7 @@ func (ls lintConfig) lintDuplicateRules() bool {
return ls.all || ls.duplicateRules return ls.all || ls.duplicateRules
} }
// Check server status - healthy & ready. // CheckServerStatus - healthy & ready.
func CheckServerStatus(serverURL *url.URL, checkEndpoint string, roundTripper http.RoundTripper) error { func CheckServerStatus(serverURL *url.URL, checkEndpoint string, roundTripper http.RoundTripper) error {
if serverURL.Scheme == "" { if serverURL.Scheme == "" {
serverURL.Scheme = "http" serverURL.Scheme = "http"

View file

@ -31,7 +31,7 @@ import (
"github.com/prometheus/prometheus/util/fmtutil" "github.com/prometheus/prometheus/util/fmtutil"
) )
// Push metrics to a prometheus remote write (for testing purpose only). // PushMetrics to a prometheus remote write (for testing purpose only).
func PushMetrics(url *url.URL, roundTripper http.RoundTripper, headers map[string]string, timeout time.Duration, labels map[string]string, files ...string) int { func PushMetrics(url *url.URL, roundTripper http.RoundTripper, headers map[string]string, timeout time.Duration, labels map[string]string, files ...string) int {
addressURL, err := url.Parse(url.String()) addressURL, err := url.Parse(url.String())
if err != nil { if err != nil {

View file

@ -823,7 +823,7 @@ func checkErr(err error) int {
return 0 return 0
} }
func backfillOpenMetrics(path, outputDir string, humanReadable, quiet bool, maxBlockDuration time.Duration) int { func backfillOpenMetrics(path, outputDir string, humanReadable, quiet bool, maxBlockDuration time.Duration, customLabels map[string]string) int {
inputFile, err := fileutil.OpenMmapFile(path) inputFile, err := fileutil.OpenMmapFile(path)
if err != nil { if err != nil {
return checkErr(err) return checkErr(err)
@ -834,7 +834,7 @@ func backfillOpenMetrics(path, outputDir string, humanReadable, quiet bool, maxB
return checkErr(fmt.Errorf("create output dir: %w", err)) return checkErr(fmt.Errorf("create output dir: %w", err))
} }
return checkErr(backfill(5000, inputFile.Bytes(), outputDir, humanReadable, quiet, maxBlockDuration)) return checkErr(backfill(5000, inputFile.Bytes(), outputDir, humanReadable, quiet, maxBlockDuration, customLabels))
} }
func displayHistogram(dataType string, datas []int, total int) { func displayHistogram(dataType string, datas []int, total int) {
@ -866,16 +866,16 @@ func displayHistogram(dataType string, datas []int, total int) {
fmt.Println() fmt.Println()
} }
func generateBucket(min, max int) (start, end, step int) { func generateBucket(minVal, maxVal int) (start, end, step int) {
s := (max - min) / 10 s := (maxVal - minVal) / 10
step = 10 step = 10
for step < s && step <= 10000 { for step < s && step <= 10000 {
step *= 10 step *= 10
} }
start = min - min%step start = minVal - minVal%step
end = max - max%step + step end = maxVal - maxVal%step + step
return return
} }

View file

@ -20,6 +20,7 @@ import (
"math" "math"
"os" "os"
"runtime" "runtime"
"slices"
"strings" "strings"
"testing" "testing"
"time" "time"
@ -152,12 +153,18 @@ func TestTSDBDump(t *testing.T) {
expectedMetrics, err := os.ReadFile(tt.expectedDump) expectedMetrics, err := os.ReadFile(tt.expectedDump)
require.NoError(t, err) require.NoError(t, err)
expectedMetrics = normalizeNewLine(expectedMetrics) expectedMetrics = normalizeNewLine(expectedMetrics)
// even though in case of one matcher samples are not sorted, the order in the cases above should stay the same. // Sort both, because Prometheus does not guarantee the output order.
require.Equal(t, string(expectedMetrics), dumpedMetrics) require.Equal(t, sortLines(string(expectedMetrics)), sortLines(dumpedMetrics))
}) })
} }
} }
func sortLines(buf string) string {
lines := strings.Split(buf, "\n")
slices.Sort(lines)
return strings.Join(lines, "\n")
}
func TestTSDBDumpOpenMetrics(t *testing.T) { func TestTSDBDumpOpenMetrics(t *testing.T) {
storage := promqltest.LoadedStorage(t, ` storage := promqltest.LoadedStorage(t, `
load 1m load 1m
@ -169,7 +176,7 @@ func TestTSDBDumpOpenMetrics(t *testing.T) {
require.NoError(t, err) require.NoError(t, err)
expectedMetrics = normalizeNewLine(expectedMetrics) expectedMetrics = normalizeNewLine(expectedMetrics)
dumpedMetrics := getDumpedSamples(t, storage.Dir(), math.MinInt64, math.MaxInt64, []string{"{__name__=~'(?s:.*)'}"}, formatSeriesSetOpenMetrics) dumpedMetrics := getDumpedSamples(t, storage.Dir(), math.MinInt64, math.MaxInt64, []string{"{__name__=~'(?s:.*)'}"}, formatSeriesSetOpenMetrics)
require.Equal(t, string(expectedMetrics), dumpedMetrics) require.Equal(t, sortLines(string(expectedMetrics)), sortLines(dumpedMetrics))
} }
func TestTSDBDumpOpenMetricsRoundTrip(t *testing.T) { func TestTSDBDumpOpenMetricsRoundTrip(t *testing.T) {
@ -179,7 +186,7 @@ func TestTSDBDumpOpenMetricsRoundTrip(t *testing.T) {
dbDir := t.TempDir() dbDir := t.TempDir()
// Import samples from OM format // Import samples from OM format
err = backfill(5000, initialMetrics, dbDir, false, false, 2*time.Hour) err = backfill(5000, initialMetrics, dbDir, false, false, 2*time.Hour, map[string]string{})
require.NoError(t, err) require.NoError(t, err)
db, err := tsdb.Open(dbDir, nil, nil, tsdb.DefaultOptions(), nil) db, err := tsdb.Open(dbDir, nil, nil, tsdb.DefaultOptions(), nil)
require.NoError(t, err) require.NoError(t, err)

View file

@ -67,6 +67,11 @@ var (
} }
) )
const (
LegacyValidationConfig = "legacy"
UTF8ValidationConfig = "utf8"
)
// Load parses the YAML input s into a Config. // Load parses the YAML input s into a Config.
func Load(s string, expandExternalLabels bool, logger log.Logger) (*Config, error) { func Load(s string, expandExternalLabels bool, logger log.Logger) (*Config, error) {
cfg := &Config{} cfg := &Config{}
@ -216,6 +221,7 @@ var (
// DefaultRemoteReadConfig is the default remote read configuration. // DefaultRemoteReadConfig is the default remote read configuration.
DefaultRemoteReadConfig = RemoteReadConfig{ DefaultRemoteReadConfig = RemoteReadConfig{
RemoteTimeout: model.Duration(1 * time.Minute), RemoteTimeout: model.Duration(1 * time.Minute),
ChunkedReadLimit: DefaultChunkedReadLimit,
HTTPClientConfig: config.DefaultHTTPClientConfig, HTTPClientConfig: config.DefaultHTTPClientConfig,
FilterExternalLabels: true, FilterExternalLabels: true,
} }
@ -446,6 +452,8 @@ type GlobalConfig struct {
// Keep no more than this many dropped targets per job. // Keep no more than this many dropped targets per job.
// 0 means no limit. // 0 means no limit.
KeepDroppedTargets uint `yaml:"keep_dropped_targets,omitempty"` KeepDroppedTargets uint `yaml:"keep_dropped_targets,omitempty"`
// Allow UTF8 Metric and Label Names.
MetricNameValidationScheme string `yaml:"metric_name_validation_scheme,omitempty"`
} }
// ScrapeProtocol represents supported protocol for scraping metrics. // ScrapeProtocol represents supported protocol for scraping metrics.
@ -471,6 +479,7 @@ var (
PrometheusText0_0_4 ScrapeProtocol = "PrometheusText0.0.4" PrometheusText0_0_4 ScrapeProtocol = "PrometheusText0.0.4"
OpenMetricsText0_0_1 ScrapeProtocol = "OpenMetricsText0.0.1" OpenMetricsText0_0_1 ScrapeProtocol = "OpenMetricsText0.0.1"
OpenMetricsText1_0_0 ScrapeProtocol = "OpenMetricsText1.0.0" OpenMetricsText1_0_0 ScrapeProtocol = "OpenMetricsText1.0.0"
UTF8NamesHeader string = model.EscapingKey + "=" + model.AllowUTF8
ScrapeProtocolsHeaders = map[ScrapeProtocol]string{ ScrapeProtocolsHeaders = map[ScrapeProtocol]string{
PrometheusProto: "application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=delimited", PrometheusProto: "application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=delimited",
@ -656,6 +665,8 @@ type ScrapeConfig struct {
// Keep no more than this many dropped targets per job. // Keep no more than this many dropped targets per job.
// 0 means no limit. // 0 means no limit.
KeepDroppedTargets uint `yaml:"keep_dropped_targets,omitempty"` KeepDroppedTargets uint `yaml:"keep_dropped_targets,omitempty"`
// Allow UTF8 Metric and Label Names.
MetricNameValidationScheme string `yaml:"metric_name_validation_scheme,omitempty"`
// We cannot do proper Go type embedding below as the parser will then parse // We cannot do proper Go type embedding below as the parser will then parse
// values arbitrarily into the overflow maps of further-down types. // values arbitrarily into the overflow maps of further-down types.
@ -762,6 +773,19 @@ func (c *ScrapeConfig) Validate(globalConfig GlobalConfig) error {
return fmt.Errorf("%w for scrape config with job name %q", err, c.JobName) return fmt.Errorf("%w for scrape config with job name %q", err, c.JobName)
} }
switch globalConfig.MetricNameValidationScheme {
case "", LegacyValidationConfig:
case UTF8ValidationConfig:
if model.NameValidationScheme != model.UTF8Validation {
return fmt.Errorf("utf8 name validation requested but feature not enabled via --enable-feature=utf8-names")
}
default:
return fmt.Errorf("unknown name validation method specified, must be either 'legacy' or 'utf8', got %s", globalConfig.MetricNameValidationScheme)
}
if c.MetricNameValidationScheme == "" {
c.MetricNameValidationScheme = globalConfig.MetricNameValidationScheme
}
return nil return nil
} }
@ -1090,8 +1114,9 @@ func (m RemoteWriteProtoMsgs) String() string {
} }
var ( var (
// RemoteWriteProtoMsgV1 represents the deprecated `prometheus.WriteRequest` protobuf // RemoteWriteProtoMsgV1 represents the `prometheus.WriteRequest` protobuf
// message introduced in the https://prometheus.io/docs/specs/remote_write_spec/. // message introduced in the https://prometheus.io/docs/specs/remote_write_spec/,
// which will eventually be deprecated.
// //
// NOTE: This string is used for both HTTP header values and config value, so don't change // NOTE: This string is used for both HTTP header values and config value, so don't change
// this reference. // this reference.
@ -1255,10 +1280,17 @@ type MetadataConfig struct {
MaxSamplesPerSend int `yaml:"max_samples_per_send,omitempty"` MaxSamplesPerSend int `yaml:"max_samples_per_send,omitempty"`
} }
const (
// DefaultChunkedReadLimit is the default value for the maximum size of the protobuf frame client allows.
// 50MB is the default. This is equivalent to ~100k full XOR chunks and average labelset.
DefaultChunkedReadLimit = 5e+7
)
// RemoteReadConfig is the configuration for reading from remote storage. // RemoteReadConfig is the configuration for reading from remote storage.
type RemoteReadConfig struct { type RemoteReadConfig struct {
URL *config.URL `yaml:"url"` URL *config.URL `yaml:"url"`
RemoteTimeout model.Duration `yaml:"remote_timeout,omitempty"` RemoteTimeout model.Duration `yaml:"remote_timeout,omitempty"`
ChunkedReadLimit uint64 `yaml:"chunked_read_limit,omitempty"`
Headers map[string]string `yaml:"headers,omitempty"` Headers map[string]string `yaml:"headers,omitempty"`
ReadRecent bool `yaml:"read_recent,omitempty"` ReadRecent bool `yaml:"read_recent,omitempty"`
Name string `yaml:"name,omitempty"` Name string `yaml:"name,omitempty"`

View file

@ -16,6 +16,7 @@ package config
import ( import (
"crypto/tls" "crypto/tls"
"encoding/json" "encoding/json"
"fmt"
"net/url" "net/url"
"os" "os"
"path/filepath" "path/filepath"
@ -166,6 +167,7 @@ var expectedConf = &Config{
{ {
URL: mustParseURL("http://remote1/read"), URL: mustParseURL("http://remote1/read"),
RemoteTimeout: model.Duration(1 * time.Minute), RemoteTimeout: model.Duration(1 * time.Minute),
ChunkedReadLimit: DefaultChunkedReadLimit,
ReadRecent: true, ReadRecent: true,
Name: "default", Name: "default",
HTTPClientConfig: config.HTTPClientConfig{ HTTPClientConfig: config.HTTPClientConfig{
@ -177,6 +179,7 @@ var expectedConf = &Config{
{ {
URL: mustParseURL("http://remote3/read"), URL: mustParseURL("http://remote3/read"),
RemoteTimeout: model.Duration(1 * time.Minute), RemoteTimeout: model.Duration(1 * time.Minute),
ChunkedReadLimit: DefaultChunkedReadLimit,
ReadRecent: false, ReadRecent: false,
Name: "read_special", Name: "read_special",
RequiredMatchers: model.LabelSet{"job": "special"}, RequiredMatchers: model.LabelSet{"job": "special"},
@ -2300,3 +2303,52 @@ func TestScrapeConfigDisableCompression(t *testing.T) {
require.False(t, got.ScrapeConfigs[0].EnableCompression) require.False(t, got.ScrapeConfigs[0].EnableCompression)
} }
func TestScrapeConfigNameValidationSettings(t *testing.T) {
model.NameValidationScheme = model.UTF8Validation
defer func() {
model.NameValidationScheme = model.LegacyValidation
}()
tests := []struct {
name string
inputFile string
expectScheme string
}{
{
name: "blank config implies default",
inputFile: "scrape_config_default_validation_mode",
expectScheme: "",
},
{
name: "global setting implies local settings",
inputFile: "scrape_config_global_validation_mode",
expectScheme: "utf8",
},
{
name: "local setting",
inputFile: "scrape_config_local_validation_mode",
expectScheme: "utf8",
},
{
name: "local setting overrides global setting",
inputFile: "scrape_config_local_global_validation_mode",
expectScheme: "legacy",
},
}
for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) {
want, err := LoadFile(fmt.Sprintf("testdata/%s.yml", tc.inputFile), false, false, log.NewNopLogger())
require.NoError(t, err)
out, err := yaml.Marshal(want)
require.NoError(t, err)
got := &Config{}
require.NoError(t, yaml.UnmarshalStrict(out, got))
require.Equal(t, tc.expectScheme, got.ScrapeConfigs[0].MetricNameValidationScheme)
})
}
}

View file

@ -0,0 +1,2 @@
scrape_configs:
- job_name: prometheus

View file

@ -0,0 +1,4 @@
global:
metric_name_validation_scheme: utf8
scrape_configs:
- job_name: prometheus

View file

@ -0,0 +1,5 @@
global:
metric_name_validation_scheme: utf8
scrape_configs:
- job_name: prometheus
metric_name_validation_scheme: legacy

View file

@ -0,0 +1,3 @@
scrape_configs:
- job_name: prometheus
metric_name_validation_scheme: utf8

View file

@ -13,7 +13,7 @@
package discovery package discovery
// Create a dummy metrics struct, because this SD doesn't have any metrics. // NoopDiscovererMetrics creates a dummy metrics struct, because this SD doesn't have any metrics.
type NoopDiscovererMetrics struct{} type NoopDiscovererMetrics struct{}
var _ DiscovererMetrics = (*NoopDiscovererMetrics)(nil) var _ DiscovererMetrics = (*NoopDiscovererMetrics)(nil)

View file

@ -39,7 +39,7 @@ type Discoverer interface {
Run(ctx context.Context, up chan<- []*targetgroup.Group) Run(ctx context.Context, up chan<- []*targetgroup.Group)
} }
// Internal metrics of service discovery mechanisms. // DiscovererMetrics are internal metrics of service discovery mechanisms.
type DiscovererMetrics interface { type DiscovererMetrics interface {
Register() error Register() error
Unregister() Unregister()
@ -56,7 +56,7 @@ type DiscovererOptions struct {
HTTPClientOptions []config.HTTPClientOption HTTPClientOptions []config.HTTPClientOption
} }
// Metrics used by the "refresh" package. // RefreshMetrics are used by the "refresh" package.
// We define them here in the "discovery" package in order to avoid a cyclic dependency between // We define them here in the "discovery" package in order to avoid a cyclic dependency between
// "discovery" and "refresh". // "discovery" and "refresh".
type RefreshMetrics struct { type RefreshMetrics struct {
@ -64,17 +64,18 @@ type RefreshMetrics struct {
Duration prometheus.Observer Duration prometheus.Observer
} }
// Instantiate the metrics used by the "refresh" package. // RefreshMetricsInstantiator instantiates the metrics used by the "refresh" package.
type RefreshMetricsInstantiator interface { type RefreshMetricsInstantiator interface {
Instantiate(mech string) *RefreshMetrics Instantiate(mech string) *RefreshMetrics
} }
// An interface for registering, unregistering, and instantiating metrics for the "refresh" package. // RefreshMetricsManager is an interface for registering, unregistering, and
// Refresh metrics are registered and unregistered outside of the service discovery mechanism. // instantiating metrics for the "refresh" package. Refresh metrics are
// This is so that the same metrics can be reused across different service discovery mechanisms. // registered and unregistered outside of the service discovery mechanism. This
// To manage refresh metrics inside the SD mechanism, we'd need to use const labels which are // is so that the same metrics can be reused across different service discovery
// specific to that SD. However, doing so would also expose too many unused metrics on // mechanisms. To manage refresh metrics inside the SD mechanism, we'd need to
// the Prometheus /metrics endpoint. // use const labels which are specific to that SD. However, doing so would also
// expose too many unused metrics on the Prometheus /metrics endpoint.
type RefreshMetricsManager interface { type RefreshMetricsManager interface {
DiscovererMetrics DiscovererMetrics
RefreshMetricsInstantiator RefreshMetricsInstantiator
@ -145,7 +146,8 @@ func (c StaticConfig) NewDiscoverer(DiscovererOptions) (Discoverer, error) {
return staticDiscoverer(c), nil return staticDiscoverer(c), nil
} }
// No metrics are needed for this service discovery mechanism. // NewDiscovererMetrics returns NoopDiscovererMetrics because no metrics are
// needed for this service discovery mechanism.
func (c StaticConfig) NewDiscovererMetrics(prometheus.Registerer, RefreshMetricsInstantiator) DiscovererMetrics { func (c StaticConfig) NewDiscovererMetrics(prometheus.Registerer, RefreshMetricsInstantiator) DiscovererMetrics {
return &NoopDiscovererMetrics{} return &NoopDiscovererMetrics{}
} }

View file

@ -97,7 +97,6 @@ func fetchApps(ctx context.Context, server string, client *http.Client) (*Applic
resp.Body.Close() resp.Body.Close()
}() }()
//nolint:usestdlibvars
if resp.StatusCode/100 != 2 { if resp.StatusCode/100 != 2 {
return nil, fmt.Errorf("non 2xx status '%d' response during eureka service discovery", resp.StatusCode) return nil, fmt.Errorf("non 2xx status '%d' response during eureka service discovery", resp.StatusCode)
} }

View file

@ -87,7 +87,6 @@ func (d *robotDiscovery) refresh(context.Context) ([]*targetgroup.Group, error)
resp.Body.Close() resp.Body.Close()
}() }()
//nolint:usestdlibvars
if resp.StatusCode/100 != 2 { if resp.StatusCode/100 != 2 {
return nil, fmt.Errorf("non 2xx status '%d' response during hetzner service discovery with role robot", resp.StatusCode) return nil, fmt.Errorf("non 2xx status '%d' response during hetzner service discovery with role robot", resp.StatusCode)
} }

View file

@ -970,7 +970,7 @@ func TestEndpointsDiscoveryEmptyPodStatus(t *testing.T) {
}.Run(t) }.Run(t)
} }
// TestEndpointsUpdatePod makes sure that Endpoints discovery detects underlying Pods changes. // TestEndpointsDiscoveryUpdatePod makes sure that Endpoints discovery detects underlying Pods changes.
// See https://github.com/prometheus/prometheus/issues/11305 for more details. // See https://github.com/prometheus/prometheus/issues/11305 for more details.
func TestEndpointsDiscoveryUpdatePod(t *testing.T) { func TestEndpointsDiscoveryUpdatePod(t *testing.T) {
pod := &v1.Pod{ pod := &v1.Pod{

View file

@ -154,7 +154,7 @@ func (d k8sDiscoveryTest) Run(t *testing.T) {
// readResultWithTimeout reads all targetgroups from channel with timeout. // readResultWithTimeout reads all targetgroups from channel with timeout.
// It merges targetgroups by source and sends the result to result channel. // It merges targetgroups by source and sends the result to result channel.
func readResultWithTimeout(t *testing.T, ctx context.Context, ch <-chan []*targetgroup.Group, max int, stopAfter time.Duration, resChan chan<- map[string]*targetgroup.Group) { func readResultWithTimeout(t *testing.T, ctx context.Context, ch <-chan []*targetgroup.Group, maxGroups int, stopAfter time.Duration, resChan chan<- map[string]*targetgroup.Group) {
res := make(map[string]*targetgroup.Group) res := make(map[string]*targetgroup.Group)
timeout := time.After(stopAfter) timeout := time.After(stopAfter)
Loop: Loop:
@ -167,7 +167,7 @@ Loop:
} }
res[tg.Source] = tg res[tg.Source] = tg
} }
if len(res) == max { if len(res) == maxGroups {
// Reached max target groups we may get, break fast. // Reached max target groups we may get, break fast.
break Loop break Loop
} }
@ -175,10 +175,10 @@ Loop:
// Because we use queue, an object that is created then // Because we use queue, an object that is created then
// deleted or updated may be processed only once. // deleted or updated may be processed only once.
// So possibly we may skip events, timed out here. // So possibly we may skip events, timed out here.
t.Logf("timed out, got %d (max: %d) items, some events are skipped", len(res), max) t.Logf("timed out, got %d (max: %d) items, some events are skipped", len(res), maxGroups)
break Loop break Loop
case <-ctx.Done(): case <-ctx.Done():
t.Logf("stopped, got %d (max: %d) items", len(res), max) t.Logf("stopped, got %d (max: %d) items", len(res), maxGroups)
break Loop break Loop
} }
} }

View file

@ -64,7 +64,7 @@ func (p *Provider) Config() interface{} {
return p.config return p.config
} }
// Registers the metrics needed for SD mechanisms. // CreateAndRegisterSDMetrics registers the metrics needed for SD mechanisms.
// Does not register the metrics for the Discovery Manager. // Does not register the metrics for the Discovery Manager.
// TODO(ptodev): Add ability to unregister the metrics? // TODO(ptodev): Add ability to unregister the metrics?
func CreateAndRegisterSDMetrics(reg prometheus.Registerer) (map[string]DiscovererMetrics, error) { func CreateAndRegisterSDMetrics(reg prometheus.Registerer) (map[string]DiscovererMetrics, error) {
@ -213,8 +213,6 @@ func (m *Manager) ApplyConfig(cfg map[string]Configs) error {
var ( var (
wg sync.WaitGroup wg sync.WaitGroup
// keep shows if we keep any providers after reload.
keep bool
newProviders []*Provider newProviders []*Provider
) )
for _, prov := range m.providers { for _, prov := range m.providers {
@ -228,13 +226,12 @@ func (m *Manager) ApplyConfig(cfg map[string]Configs) error {
continue continue
} }
newProviders = append(newProviders, prov) newProviders = append(newProviders, prov)
// refTargets keeps reference targets used to populate new subs' targets // refTargets keeps reference targets used to populate new subs' targets as they should be the same.
var refTargets map[string]*targetgroup.Group var refTargets map[string]*targetgroup.Group
prov.mu.Lock() prov.mu.Lock()
m.targetsMtx.Lock() m.targetsMtx.Lock()
for s := range prov.subs { for s := range prov.subs {
keep = true
refTargets = m.targets[poolKey{s, prov.name}] refTargets = m.targets[poolKey{s, prov.name}]
// Remove obsolete subs' targets. // Remove obsolete subs' targets.
if _, ok := prov.newSubs[s]; !ok { if _, ok := prov.newSubs[s]; !ok {
@ -267,7 +264,9 @@ func (m *Manager) ApplyConfig(cfg map[string]Configs) error {
// While startProvider does pull the trigger, it may take some time to do so, therefore // While startProvider does pull the trigger, it may take some time to do so, therefore
// we pull the trigger as soon as possible so that downstream managers can populate their state. // we pull the trigger as soon as possible so that downstream managers can populate their state.
// See https://github.com/prometheus/prometheus/pull/8639 for details. // See https://github.com/prometheus/prometheus/pull/8639 for details.
if keep { // This also helps making the downstream managers drop stale targets as soon as possible.
// See https://github.com/prometheus/prometheus/pull/13147 for details.
if len(m.providers) > 0 {
select { select {
case m.triggerSend <- struct{}{}: case m.triggerSend <- struct{}{}:
default: default:
@ -288,7 +287,9 @@ func (m *Manager) StartCustomProvider(ctx context.Context, name string, worker D
name: {}, name: {},
}, },
} }
m.mtx.Lock()
m.providers = append(m.providers, p) m.providers = append(m.providers, p)
m.mtx.Unlock()
m.startProvider(ctx, p) m.startProvider(ctx, p)
} }
@ -393,8 +394,16 @@ func (m *Manager) updateGroup(poolKey poolKey, tgs []*targetgroup.Group) {
m.targets[poolKey] = make(map[string]*targetgroup.Group) m.targets[poolKey] = make(map[string]*targetgroup.Group)
} }
for _, tg := range tgs { for _, tg := range tgs {
if tg != nil { // Some Discoverers send nil target group so need to check for it to avoid panics. // Some Discoverers send nil target group so need to check for it to avoid panics.
if tg == nil {
continue
}
if len(tg.Targets) > 0 {
m.targets[poolKey][tg.Source] = tg m.targets[poolKey][tg.Source] = tg
} else {
// The target group is empty, drop the corresponding entry to avoid leaks.
// In case the group yielded targets before, allGroups() will take care of making consumers drop them.
delete(m.targets[poolKey], tg.Source)
} }
} }
} }
@ -403,19 +412,33 @@ func (m *Manager) allGroups() map[string][]*targetgroup.Group {
tSets := map[string][]*targetgroup.Group{} tSets := map[string][]*targetgroup.Group{}
n := map[string]int{} n := map[string]int{}
m.mtx.RLock()
m.targetsMtx.Lock() m.targetsMtx.Lock()
defer m.targetsMtx.Unlock() for _, p := range m.providers {
for pkey, tsets := range m.targets { p.mu.RLock()
for s := range p.subs {
// Send empty lists for subs without any targets to make sure old stale targets are dropped by consumers.
// See: https://github.com/prometheus/prometheus/issues/12858 for details.
if _, ok := tSets[s]; !ok {
tSets[s] = []*targetgroup.Group{}
n[s] = 0
}
if tsets, ok := m.targets[poolKey{s, p.name}]; ok {
for _, tg := range tsets { for _, tg := range tsets {
// Even if the target group 'tg' is empty we still need to send it to the 'Scrape manager' tSets[s] = append(tSets[s], tg)
// to signal that it needs to stop all scrape loops for this target set. n[s] += len(tg.Targets)
tSets[pkey.setName] = append(tSets[pkey.setName], tg)
n[pkey.setName] += len(tg.Targets)
} }
} }
}
p.mu.RUnlock()
}
m.targetsMtx.Unlock()
m.mtx.RUnlock()
for setName, v := range n { for setName, v := range n {
m.metrics.DiscoveredTargets.WithLabelValues(setName).Set(float64(v)) m.metrics.DiscoveredTargets.WithLabelValues(setName).Set(float64(v))
} }
return tSets return tSets
} }

View file

@ -939,11 +939,13 @@ func TestTargetSetTargetGroupsPresentOnConfigChange(t *testing.T) {
discoveryManager.ApplyConfig(c) discoveryManager.ApplyConfig(c)
// Original targets should be present as soon as possible. // Original targets should be present as soon as possible.
// An empty list should be sent for prometheus2 to drop any stale targets
syncedTargets = <-discoveryManager.SyncCh() syncedTargets = <-discoveryManager.SyncCh()
mu.Unlock() mu.Unlock()
require.Len(t, syncedTargets, 1) require.Len(t, syncedTargets, 2)
verifySyncedPresence(t, syncedTargets, "prometheus", "{__address__=\"foo:9090\"}", true) verifySyncedPresence(t, syncedTargets, "prometheus", "{__address__=\"foo:9090\"}", true)
require.Len(t, syncedTargets["prometheus"], 1) require.Len(t, syncedTargets["prometheus"], 1)
require.Empty(t, syncedTargets["prometheus2"])
// prometheus2 configs should be ready on second sync. // prometheus2 configs should be ready on second sync.
syncedTargets = <-discoveryManager.SyncCh() syncedTargets = <-discoveryManager.SyncCh()
@ -1049,8 +1051,8 @@ func TestDiscovererConfigs(t *testing.T) {
} }
// TestTargetSetRecreatesEmptyStaticConfigs ensures that reloading a config file after // TestTargetSetRecreatesEmptyStaticConfigs ensures that reloading a config file after
// removing all targets from the static_configs sends an update with empty targetGroups. // removing all targets from the static_configs cleans the corresponding targetGroups entries to avoid leaks and sends an empty update.
// This is required to signal the receiver that this target set has no current targets. // The update is required to signal the consumers that the previous targets should be dropped.
func TestTargetSetRecreatesEmptyStaticConfigs(t *testing.T) { func TestTargetSetRecreatesEmptyStaticConfigs(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background()) ctx, cancel := context.WithCancel(context.Background())
defer cancel() defer cancel()
@ -1083,16 +1085,14 @@ func TestTargetSetRecreatesEmptyStaticConfigs(t *testing.T) {
discoveryManager.ApplyConfig(c) discoveryManager.ApplyConfig(c)
syncedTargets = <-discoveryManager.SyncCh() syncedTargets = <-discoveryManager.SyncCh()
require.Len(t, discoveryManager.targets, 1)
p = pk("static", "prometheus", 1) p = pk("static", "prometheus", 1)
targetGroups, ok := discoveryManager.targets[p] targetGroups, ok := discoveryManager.targets[p]
require.True(t, ok, "'%v' should be present in target groups", p) require.True(t, ok, "'%v' should be present in targets", p)
group, ok := targetGroups[""] // Otherwise the targetGroups will leak, see https://github.com/prometheus/prometheus/issues/12436.
require.True(t, ok, "missing '' key in target groups %v", targetGroups) require.Empty(t, targetGroups, 0, "'%v' should no longer have any associated target groups", p)
require.Len(t, syncedTargets, 1, "an update with no targetGroups should still be sent.")
require.Empty(t, group.Targets, "Invalid number of targets.") require.Empty(t, syncedTargets["prometheus"], 0)
require.Len(t, syncedTargets, 1)
require.Len(t, syncedTargets["prometheus"], 1)
require.Nil(t, syncedTargets["prometheus"][0].Labels)
} }
func TestIdenticalConfigurationsAreCoalesced(t *testing.T) { func TestIdenticalConfigurationsAreCoalesced(t *testing.T) {
@ -1275,6 +1275,7 @@ func TestCoordinationWithReceiver(t *testing.T) {
Targets: []model.LabelSet{{"__instance__": "1"}}, Targets: []model.LabelSet{{"__instance__": "1"}},
}, },
}, },
"mock1": {},
}, },
}, },
{ {

View file

@ -17,7 +17,7 @@ import (
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
) )
// Metric vectors for the "refresh" package. // RefreshMetricsVecs are metric vectors for the "refresh" package.
// We define them here in the "discovery" package in order to avoid a cyclic dependency between // We define them here in the "discovery" package in order to avoid a cyclic dependency between
// "discovery" and "refresh". // "discovery" and "refresh".
type RefreshMetricsVecs struct { type RefreshMetricsVecs struct {

View file

@ -19,6 +19,7 @@ import (
"net" "net"
"net/http" "net/http"
"net/url" "net/url"
"sort"
"strconv" "strconv"
"time" "time"
@ -251,28 +252,26 @@ func (d *DockerDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group, er
} }
if d.matchFirstNetwork && len(networks) > 1 { if d.matchFirstNetwork && len(networks) > 1 {
// Match user defined network // Sort networks by name and take first non-nil network.
if containerNetworkMode.IsUserDefined() { keys := make([]string, 0, len(networks))
networkMode := string(containerNetworkMode)
networks = map[string]*network.EndpointSettings{networkMode: networks[networkMode]}
} else {
// Get first network if container network mode has "none" value.
// This case appears under certain condition:
// 1. Container created with network set to "--net=none".
// 2. Disconnect network "none".
// 3. Reconnect network with user defined networks.
var first string
for k, n := range networks { for k, n := range networks {
if n != nil { if n != nil {
first = k keys = append(keys, k)
break
} }
} }
networks = map[string]*network.EndpointSettings{first: networks[first]} if len(keys) > 0 {
sort.Strings(keys)
firstNetworkMode := keys[0]
firstNetwork := networks[firstNetworkMode]
networks = map[string]*network.EndpointSettings{firstNetworkMode: firstNetwork}
} }
} }
for _, n := range networks { for _, n := range networks {
if n == nil {
continue
}
var added bool var added bool
for _, p := range c.Ports { for _, p := range c.Ports {

View file

@ -60,9 +60,9 @@ host: %s
tg := tgs[0] tg := tgs[0]
require.NotNil(t, tg) require.NotNil(t, tg)
require.NotNil(t, tg.Targets) require.NotNil(t, tg.Targets)
require.Len(t, tg.Targets, 6) require.Len(t, tg.Targets, 8)
for i, lbls := range []model.LabelSet{ expected := []model.LabelSet{
{ {
"__address__": "172.19.0.2:9100", "__address__": "172.19.0.2:9100",
"__meta_docker_container_id": "c301b928faceb1a18fe379f6bc178727ef920bb30b0f9b8592b32b36255a0eca", "__meta_docker_container_id": "c301b928faceb1a18fe379f6bc178727ef920bb30b0f9b8592b32b36255a0eca",
@ -163,7 +163,43 @@ host: %s
"__meta_docker_network_scope": "local", "__meta_docker_network_scope": "local",
"__meta_docker_port_private": "9104", "__meta_docker_port_private": "9104",
}, },
} { {
"__address__": "172.20.0.3:3306",
"__meta_docker_container_id": "f84b2a0cfaa58d9e70b0657e2b3c6f44f0e973de4163a871299b4acf127b224f",
"__meta_docker_container_label_com_docker_compose_project": "dockersd",
"__meta_docker_container_label_com_docker_compose_service": "mysql",
"__meta_docker_container_label_com_docker_compose_version": "2.2.2",
"__meta_docker_container_name": "/dockersd_multi_networks",
"__meta_docker_container_network_mode": "dockersd_private_none",
"__meta_docker_network_id": "e804771e55254a360fdb70dfdd78d3610fdde231b14ef2f837a00ac1eeb9e601",
"__meta_docker_network_ingress": "false",
"__meta_docker_network_internal": "false",
"__meta_docker_network_ip": "172.20.0.3",
"__meta_docker_network_name": "dockersd_private",
"__meta_docker_network_scope": "local",
"__meta_docker_port_private": "3306",
},
{
"__address__": "172.20.0.3:33060",
"__meta_docker_container_id": "f84b2a0cfaa58d9e70b0657e2b3c6f44f0e973de4163a871299b4acf127b224f",
"__meta_docker_container_label_com_docker_compose_project": "dockersd",
"__meta_docker_container_label_com_docker_compose_service": "mysql",
"__meta_docker_container_label_com_docker_compose_version": "2.2.2",
"__meta_docker_container_name": "/dockersd_multi_networks",
"__meta_docker_container_network_mode": "dockersd_private_none",
"__meta_docker_network_id": "e804771e55254a360fdb70dfdd78d3610fdde231b14ef2f837a00ac1eeb9e601",
"__meta_docker_network_ingress": "false",
"__meta_docker_network_internal": "false",
"__meta_docker_network_ip": "172.20.0.3",
"__meta_docker_network_name": "dockersd_private",
"__meta_docker_network_scope": "local",
"__meta_docker_port_private": "33060",
},
}
sortFunc(expected)
sortFunc(tg.Targets)
for i, lbls := range expected {
t.Run(fmt.Sprintf("item %d", i), func(t *testing.T) { t.Run(fmt.Sprintf("item %d", i), func(t *testing.T) {
require.Equal(t, lbls, tg.Targets[i]) require.Equal(t, lbls, tg.Targets[i])
}) })
@ -202,13 +238,8 @@ host: %s
tg := tgs[0] tg := tgs[0]
require.NotNil(t, tg) require.NotNil(t, tg)
require.NotNil(t, tg.Targets) require.NotNil(t, tg.Targets)
require.Len(t, tg.Targets, 9) require.Len(t, tg.Targets, 13)
sortFunc := func(labelSets []model.LabelSet) {
sort.Slice(labelSets, func(i, j int) bool {
return labelSets[i]["__address__"] < labelSets[j]["__address__"]
})
}
expected := []model.LabelSet{ expected := []model.LabelSet{
{ {
"__address__": "172.19.0.2:9100", "__address__": "172.19.0.2:9100",
@ -359,6 +390,70 @@ host: %s
"__meta_docker_network_scope": "local", "__meta_docker_network_scope": "local",
"__meta_docker_port_private": "9104", "__meta_docker_port_private": "9104",
}, },
{
"__address__": "172.20.0.3:3306",
"__meta_docker_container_id": "f84b2a0cfaa58d9e70b0657e2b3c6f44f0e973de4163a871299b4acf127b224f",
"__meta_docker_container_label_com_docker_compose_project": "dockersd",
"__meta_docker_container_label_com_docker_compose_service": "mysql",
"__meta_docker_container_label_com_docker_compose_version": "2.2.2",
"__meta_docker_container_name": "/dockersd_multi_networks",
"__meta_docker_container_network_mode": "dockersd_private_none",
"__meta_docker_network_id": "e804771e55254a360fdb70dfdd78d3610fdde231b14ef2f837a00ac1eeb9e601",
"__meta_docker_network_ingress": "false",
"__meta_docker_network_internal": "false",
"__meta_docker_network_ip": "172.20.0.3",
"__meta_docker_network_name": "dockersd_private",
"__meta_docker_network_scope": "local",
"__meta_docker_port_private": "3306",
},
{
"__address__": "172.20.0.3:33060",
"__meta_docker_container_id": "f84b2a0cfaa58d9e70b0657e2b3c6f44f0e973de4163a871299b4acf127b224f",
"__meta_docker_container_label_com_docker_compose_project": "dockersd",
"__meta_docker_container_label_com_docker_compose_service": "mysql",
"__meta_docker_container_label_com_docker_compose_version": "2.2.2",
"__meta_docker_container_name": "/dockersd_multi_networks",
"__meta_docker_container_network_mode": "dockersd_private_none",
"__meta_docker_network_id": "e804771e55254a360fdb70dfdd78d3610fdde231b14ef2f837a00ac1eeb9e601",
"__meta_docker_network_ingress": "false",
"__meta_docker_network_internal": "false",
"__meta_docker_network_ip": "172.20.0.3",
"__meta_docker_network_name": "dockersd_private",
"__meta_docker_network_scope": "local",
"__meta_docker_port_private": "33060",
},
{
"__address__": "172.21.0.3:3306",
"__meta_docker_container_id": "f84b2a0cfaa58d9e70b0657e2b3c6f44f0e973de4163a871299b4acf127b224f",
"__meta_docker_container_label_com_docker_compose_project": "dockersd",
"__meta_docker_container_label_com_docker_compose_service": "mysql",
"__meta_docker_container_label_com_docker_compose_version": "2.2.2",
"__meta_docker_container_name": "/dockersd_multi_networks",
"__meta_docker_container_network_mode": "dockersd_private_none",
"__meta_docker_network_id": "bfcf66a6b64f7d518f009e34290dc3f3c66a08164257ad1afc3bd31d75f656e8",
"__meta_docker_network_ingress": "false",
"__meta_docker_network_internal": "false",
"__meta_docker_network_ip": "172.21.0.3",
"__meta_docker_network_name": "dockersd_private1",
"__meta_docker_network_scope": "local",
"__meta_docker_port_private": "3306",
},
{
"__address__": "172.21.0.3:33060",
"__meta_docker_container_id": "f84b2a0cfaa58d9e70b0657e2b3c6f44f0e973de4163a871299b4acf127b224f",
"__meta_docker_container_label_com_docker_compose_project": "dockersd",
"__meta_docker_container_label_com_docker_compose_service": "mysql",
"__meta_docker_container_label_com_docker_compose_version": "2.2.2",
"__meta_docker_container_name": "/dockersd_multi_networks",
"__meta_docker_container_network_mode": "dockersd_private_none",
"__meta_docker_network_id": "bfcf66a6b64f7d518f009e34290dc3f3c66a08164257ad1afc3bd31d75f656e8",
"__meta_docker_network_ingress": "false",
"__meta_docker_network_internal": "false",
"__meta_docker_network_ip": "172.21.0.3",
"__meta_docker_network_name": "dockersd_private1",
"__meta_docker_network_scope": "local",
"__meta_docker_port_private": "33060",
},
} }
sortFunc(expected) sortFunc(expected)
@ -370,3 +465,9 @@ host: %s
}) })
} }
} }
func sortFunc(labelSets []model.LabelSet) {
sort.Slice(labelSets, func(i, j int) bool {
return labelSets[i]["__address__"] < labelSets[j]["__address__"]
})
}

View file

@ -228,5 +228,74 @@
"Networks": {} "Networks": {}
}, },
"Mounts": [] "Mounts": []
},
{
"Id": "f84b2a0cfaa58d9e70b0657e2b3c6f44f0e973de4163a871299b4acf127b224f",
"Names": [
"/dockersd_multi_networks"
],
"Image": "mysql:5.7.29",
"ImageID": "sha256:16ae2f4625ba63a250462bedeece422e741de9f0caf3b1d89fd5b257aca80cd1",
"Command": "mysqld",
"Created": 1616273136,
"Ports": [
{
"PrivatePort": 3306,
"Type": "tcp"
},
{
"PrivatePort": 33060,
"Type": "tcp"
}
],
"Labels": {
"com.docker.compose.project": "dockersd",
"com.docker.compose.service": "mysql",
"com.docker.compose.version": "2.2.2"
},
"State": "running",
"Status": "Up 40 seconds",
"HostConfig": {
"NetworkMode": "dockersd_private_none"
},
"NetworkSettings": {
"Networks": {
"dockersd_private": {
"IPAMConfig": null,
"Links": null,
"Aliases": null,
"NetworkID": "e804771e55254a360fdb70dfdd78d3610fdde231b14ef2f837a00ac1eeb9e601",
"EndpointID": "972d6807997369605ace863af58de6cb90c787a5bf2ffc4105662d393ae539b7",
"Gateway": "172.20.0.1",
"IPAddress": "172.20.0.3",
"IPPrefixLen": 16,
"IPv6Gateway": "",
"GlobalIPv6Address": "",
"GlobalIPv6PrefixLen": 0,
"MacAddress": "02:42:ac:14:00:02",
"DriverOpts": null
},
"dockersd_private1": {
"IPAMConfig": {},
"Links": null,
"Aliases": [
"mysql",
"mysql",
"f9ade4b83199"
],
"NetworkID": "bfcf66a6b64f7d518f009e34290dc3f3c66a08164257ad1afc3bd31d75f656e8",
"EndpointID": "91a98405344ee1cb7d977cafabe634837876651544b32da20a5e0155868e6f5f",
"Gateway": "172.21.0.1",
"IPAddress": "172.21.0.3",
"IPPrefixLen": 24,
"IPv6Gateway": "",
"GlobalIPv6Address": "",
"GlobalIPv6PrefixLen": 0,
"MacAddress": "02:42:ac:15:00:02",
"DriverOpts": null
}
}
},
"Mounts": []
} }
] ]

View file

@ -19,8 +19,8 @@ import (
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
) )
// A utility to be used by implementations of discovery.Discoverer // MetricRegisterer is used by implementations of discovery.Discoverer that need
// which need to manage the lifetime of their metrics. // to manage the lifetime of their metrics.
type MetricRegisterer interface { type MetricRegisterer interface {
RegisterMetrics() error RegisterMetrics() error
UnregisterMetrics() UnregisterMetrics()
@ -34,7 +34,7 @@ type metricRegistererImpl struct {
var _ MetricRegisterer = &metricRegistererImpl{} var _ MetricRegisterer = &metricRegistererImpl{}
// Creates an instance of a MetricRegisterer. // NewMetricRegisterer creates an instance of a MetricRegisterer.
// Typically called inside the implementation of the NewDiscoverer() method. // Typically called inside the implementation of the NewDiscoverer() method.
func NewMetricRegisterer(reg prometheus.Registerer, metrics []prometheus.Collector) MetricRegisterer { func NewMetricRegisterer(reg prometheus.Registerer, metrics []prometheus.Collector) MetricRegisterer {
return &metricRegistererImpl{ return &metricRegistererImpl{

View file

@ -15,11 +15,11 @@ The Prometheus monitoring server
| <code class="text-nowrap">-h</code>, <code class="text-nowrap">--help</code> | Show context-sensitive help (also try --help-long and --help-man). | | | <code class="text-nowrap">-h</code>, <code class="text-nowrap">--help</code> | Show context-sensitive help (also try --help-long and --help-man). | |
| <code class="text-nowrap">--version</code> | Show application version. | | | <code class="text-nowrap">--version</code> | Show application version. | |
| <code class="text-nowrap">--config.file</code> | Prometheus configuration file path. | `prometheus.yml` | | <code class="text-nowrap">--config.file</code> | Prometheus configuration file path. | `prometheus.yml` |
| <code class="text-nowrap">--web.listen-address</code> | Address to listen on for UI, API, and telemetry. | `0.0.0.0:9090` | | <code class="text-nowrap">--web.listen-address</code> <code class="text-nowrap">...<code class="text-nowrap"> | Address to listen on for UI, API, and telemetry. Can be repeated. | `0.0.0.0:9090` |
| <code class="text-nowrap">--auto-gomemlimit.ratio</code> | The ratio of reserved GOMEMLIMIT memory to the detected maximum container or system memory | `0.9` | | <code class="text-nowrap">--auto-gomemlimit.ratio</code> | The ratio of reserved GOMEMLIMIT memory to the detected maximum container or system memory | `0.9` |
| <code class="text-nowrap">--web.config.file</code> | [EXPERIMENTAL] Path to configuration file that can enable TLS or authentication. | | | <code class="text-nowrap">--web.config.file</code> | [EXPERIMENTAL] Path to configuration file that can enable TLS or authentication. | |
| <code class="text-nowrap">--web.read-timeout</code> | Maximum duration before timing out read of the request, and closing idle connections. | `5m` | | <code class="text-nowrap">--web.read-timeout</code> | Maximum duration before timing out read of the request, and closing idle connections. | `5m` |
| <code class="text-nowrap">--web.max-connections</code> | Maximum number of simultaneous connections. | `512` | | <code class="text-nowrap">--web.max-connections</code> | Maximum number of simultaneous connections across all listeners. | `512` |
| <code class="text-nowrap">--web.external-url</code> | The URL under which Prometheus is externally reachable (for example, if Prometheus is served via a reverse proxy). Used for generating relative and absolute links back to Prometheus itself. If the URL has a path portion, it will be used to prefix all HTTP endpoints served by Prometheus. If omitted, relevant URL components will be derived automatically. | | | <code class="text-nowrap">--web.external-url</code> | The URL under which Prometheus is externally reachable (for example, if Prometheus is served via a reverse proxy). Used for generating relative and absolute links back to Prometheus itself. If the URL has a path portion, it will be used to prefix all HTTP endpoints served by Prometheus. If omitted, relevant URL components will be derived automatically. | |
| <code class="text-nowrap">--web.route-prefix</code> | Prefix for the internal routes of web endpoints. Defaults to path of --web.external-url. | | | <code class="text-nowrap">--web.route-prefix</code> | Prefix for the internal routes of web endpoints. Defaults to path of --web.external-url. | |
| <code class="text-nowrap">--web.user-assets</code> | Path to static asset directory, available at /user. | | | <code class="text-nowrap">--web.user-assets</code> | Path to static asset directory, available at /user. | |
@ -56,7 +56,8 @@ The Prometheus monitoring server
| <code class="text-nowrap">--query.timeout</code> | Maximum time a query may take before being aborted. Use with server mode only. | `2m` | | <code class="text-nowrap">--query.timeout</code> | Maximum time a query may take before being aborted. Use with server mode only. | `2m` |
| <code class="text-nowrap">--query.max-concurrency</code> | Maximum number of queries executed concurrently. Use with server mode only. | `20` | | <code class="text-nowrap">--query.max-concurrency</code> | Maximum number of queries executed concurrently. Use with server mode only. | `20` |
| <code class="text-nowrap">--query.max-samples</code> | Maximum number of samples a single query can load into memory. Note that queries will fail if they try to load more samples than this into memory, so this also limits the number of samples a query can return. Use with server mode only. | `50000000` | | <code class="text-nowrap">--query.max-samples</code> | Maximum number of samples a single query can load into memory. Note that queries will fail if they try to load more samples than this into memory, so this also limits the number of samples a query can return. Use with server mode only. | `50000000` |
| <code class="text-nowrap">--enable-feature</code> | Comma separated feature names to enable. Valid options: agent, auto-gomemlimit, exemplar-storage, expand-external-labels, memory-snapshot-on-shutdown, promql-per-step-stats, promql-experimental-functions, remote-write-receiver (DEPRECATED), extra-scrape-metrics, new-service-discovery-manager, auto-gomaxprocs, no-default-scrape-port, native-histograms, otlp-write-receiver, created-timestamp-zero-ingestion, concurrent-rule-eval, new-ui. See https://prometheus.io/docs/prometheus/latest/feature_flags/ for more details. | | | <code class="text-nowrap">--scrape.name-escaping-scheme</code> | Method for escaping legacy invalid names when sending to Prometheus that does not support UTF-8. Can be one of "values", "underscores", or "dots". | `values` |
| <code class="text-nowrap">--enable-feature</code> <code class="text-nowrap">...<code class="text-nowrap"> | Comma separated feature names to enable. Valid options: agent, auto-gomaxprocs, auto-gomemlimit, concurrent-rule-eval, created-timestamp-zero-ingestion, delayed-compaction, exemplar-storage, expand-external-labels, extra-scrape-metrics, memory-snapshot-on-shutdown, native-histograms, new-service-discovery-manager, new-ui, no-default-scrape-port, otlp-write-receiver, promql-experimental-functions, promql-delayed-name-removal, promql-per-step-stats, remote-write-receiver (DEPRECATED), utf8-names. See https://prometheus.io/docs/prometheus/latest/feature_flags/ for more details. | |
| <code class="text-nowrap">--log.level</code> | Only log messages with the given severity or above. One of: [debug, info, warn, error] | `info` | | <code class="text-nowrap">--log.level</code> | Only log messages with the given severity or above. One of: [debug, info, warn, error] | `info` |
| <code class="text-nowrap">--log.format</code> | Output format of log messages. One of: [logfmt, json] | `logfmt` | | <code class="text-nowrap">--log.format</code> | Output format of log messages. One of: [logfmt, json] | `logfmt` |

View file

@ -15,7 +15,7 @@ Tooling for the Prometheus monitoring system.
| <code class="text-nowrap">-h</code>, <code class="text-nowrap">--help</code> | Show context-sensitive help (also try --help-long and --help-man). | | <code class="text-nowrap">-h</code>, <code class="text-nowrap">--help</code> | Show context-sensitive help (also try --help-long and --help-man). |
| <code class="text-nowrap">--version</code> | Show application version. | | <code class="text-nowrap">--version</code> | Show application version. |
| <code class="text-nowrap">--experimental</code> | Enable experimental commands. | | <code class="text-nowrap">--experimental</code> | Enable experimental commands. |
| <code class="text-nowrap">--enable-feature</code> | Comma separated feature names to enable (only PromQL related and no-default-scrape-port). See https://prometheus.io/docs/prometheus/latest/feature_flags/ for the options and more details. | | <code class="text-nowrap">--enable-feature</code> <code class="text-nowrap">...<code class="text-nowrap"> | Comma separated feature names to enable (only PromQL related and no-default-scrape-port). See https://prometheus.io/docs/prometheus/latest/feature_flags/ for the options and more details. |
@ -281,7 +281,7 @@ Run series query.
| Flag | Description | | Flag | Description |
| --- | --- | | --- | --- |
| <code class="text-nowrap">--match</code> | Series selector. Can be specified multiple times. | | <code class="text-nowrap">--match</code> <code class="text-nowrap">...<code class="text-nowrap"> | Series selector. Can be specified multiple times. |
| <code class="text-nowrap">--start</code> | Start time (RFC3339 or Unix timestamp). | | <code class="text-nowrap">--start</code> | Start time (RFC3339 or Unix timestamp). |
| <code class="text-nowrap">--end</code> | End time (RFC3339 or Unix timestamp). | | <code class="text-nowrap">--end</code> | End time (RFC3339 or Unix timestamp). |
@ -309,7 +309,7 @@ Run labels query.
| --- | --- | | --- | --- |
| <code class="text-nowrap">--start</code> | Start time (RFC3339 or Unix timestamp). | | <code class="text-nowrap">--start</code> | Start time (RFC3339 or Unix timestamp). |
| <code class="text-nowrap">--end</code> | End time (RFC3339 or Unix timestamp). | | <code class="text-nowrap">--end</code> | End time (RFC3339 or Unix timestamp). |
| <code class="text-nowrap">--match</code> | Series selector. Can be specified multiple times. | | <code class="text-nowrap">--match</code> <code class="text-nowrap">...<code class="text-nowrap"> | Series selector. Can be specified multiple times. |
@ -338,7 +338,7 @@ Run queries against your Prometheus to analyze the usage pattern of certain metr
| <code class="text-nowrap">--type</code> | Type of metric: histogram. | | | <code class="text-nowrap">--type</code> | Type of metric: histogram. | |
| <code class="text-nowrap">--duration</code> | Time frame to analyze. | `1h` | | <code class="text-nowrap">--duration</code> | Time frame to analyze. | `1h` |
| <code class="text-nowrap">--time</code> | Query time (RFC3339 or Unix timestamp), defaults to now. | | | <code class="text-nowrap">--time</code> | Query time (RFC3339 or Unix timestamp), defaults to now. | |
| <code class="text-nowrap">--match</code> | Series selector. Can be specified multiple times. | | | <code class="text-nowrap">--match</code> <code class="text-nowrap">...<code class="text-nowrap"> | Series selector. Can be specified multiple times. | |
@ -461,7 +461,7 @@ Unit tests for rules.
| Flag | Description | Default | | Flag | Description | Default |
| --- | --- | --- | | --- | --- | --- |
| <code class="text-nowrap">--run</code> | If set, will only run test groups whose names match the regular expression. Can be specified multiple times. | | | <code class="text-nowrap">--run</code> <code class="text-nowrap">...<code class="text-nowrap"> | If set, will only run test groups whose names match the regular expression. Can be specified multiple times. | |
| <code class="text-nowrap">--diff</code> | [Experimental] Print colored differential output between expected & received output. | `false` | | <code class="text-nowrap">--diff</code> | [Experimental] Print colored differential output between expected & received output. | `false` |
@ -578,7 +578,7 @@ Dump samples from a TSDB.
| <code class="text-nowrap">--sandbox-dir-root</code> | Root directory where a sandbox directory would be created in case WAL replay generates chunks. The sandbox directory is cleaned up at the end. | `data/` | | <code class="text-nowrap">--sandbox-dir-root</code> | Root directory where a sandbox directory would be created in case WAL replay generates chunks. The sandbox directory is cleaned up at the end. | `data/` |
| <code class="text-nowrap">--min-time</code> | Minimum timestamp to dump. | `-9223372036854775808` | | <code class="text-nowrap">--min-time</code> | Minimum timestamp to dump. | `-9223372036854775808` |
| <code class="text-nowrap">--max-time</code> | Maximum timestamp to dump. | `9223372036854775807` | | <code class="text-nowrap">--max-time</code> | Maximum timestamp to dump. | `9223372036854775807` |
| <code class="text-nowrap">--match</code> | Series selector. Can be specified multiple times. | `{__name__=~'(?s:.*)'}` | | <code class="text-nowrap">--match</code> <code class="text-nowrap">...<code class="text-nowrap"> | Series selector. Can be specified multiple times. | `{__name__=~'(?s:.*)'}` |
@ -605,7 +605,7 @@ Dump samples from a TSDB.
| <code class="text-nowrap">--sandbox-dir-root</code> | Root directory where a sandbox directory would be created in case WAL replay generates chunks. The sandbox directory is cleaned up at the end. | `data/` | | <code class="text-nowrap">--sandbox-dir-root</code> | Root directory where a sandbox directory would be created in case WAL replay generates chunks. The sandbox directory is cleaned up at the end. | `data/` |
| <code class="text-nowrap">--min-time</code> | Minimum timestamp to dump. | `-9223372036854775808` | | <code class="text-nowrap">--min-time</code> | Minimum timestamp to dump. | `-9223372036854775808` |
| <code class="text-nowrap">--max-time</code> | Maximum timestamp to dump. | `9223372036854775807` | | <code class="text-nowrap">--max-time</code> | Maximum timestamp to dump. | `9223372036854775807` |
| <code class="text-nowrap">--match</code> | Series selector. Can be specified multiple times. | `{__name__=~'(?s:.*)'}` | | <code class="text-nowrap">--match</code> <code class="text-nowrap">...<code class="text-nowrap"> | Series selector. Can be specified multiple times. | `{__name__=~'(?s:.*)'}` |
@ -641,6 +641,15 @@ Import samples from OpenMetrics input and produce TSDB blocks. Please refer to t
###### Flags
| Flag | Description |
| --- | --- |
| <code class="text-nowrap">--label</code> | Label to attach to metrics. Can be specified multiple times. Example --label=label_name=label_value |
###### Arguments ###### Arguments
| Argument | Description | Default | Required | | Argument | Description | Default | Required |

View file

@ -121,6 +121,11 @@ global:
# that will be kept in memory. 0 means no limit. # that will be kept in memory. 0 means no limit.
[ keep_dropped_targets: <int> | default = 0 ] [ keep_dropped_targets: <int> | default = 0 ]
# Specifies the validation scheme for metric and label names. Either blank or
# "legacy" for letters, numbers, colons, and underscores; or "utf8" for full
# UTF-8 support.
[ metric_name_validation_scheme <string> | default "legacy" ]
runtime: runtime:
# Configure the Go garbage collector GOGC parameter # Configure the Go garbage collector GOGC parameter
# See: https://tip.golang.org/doc/gc-guide#GOGC # See: https://tip.golang.org/doc/gc-guide#GOGC
@ -302,6 +307,17 @@ tls_config:
[ proxy_connect_header: [ proxy_connect_header:
[ <string>: [<secret>, ...] ] ] [ <string>: [<secret>, ...] ] ]
# Custom HTTP headers to be sent along with each request.
# Headers that are set by Prometheus itself can't be overwritten.
http_headers:
# Header name.
[ <string>:
# Header values.
[ values: [<string>, ...] ]
# Headers values. Hidden in configuration page.
[ secrets: [<secret>, ...] ]
# Files to read header values from.
[ files: [<string>, ...] ] ]
# List of Azure service discovery configurations. # List of Azure service discovery configurations.
azure_sd_configs: azure_sd_configs:
@ -461,6 +477,11 @@ metric_relabel_configs:
# that will be kept in memory. 0 means no limit. # that will be kept in memory. 0 means no limit.
[ keep_dropped_targets: <int> | default = 0 ] [ keep_dropped_targets: <int> | default = 0 ]
# Specifies the validation scheme for metric and label names. Either blank or
# "legacy" for letters, numbers, colons, and underscores; or "utf8" for full
# UTF-8 support.
[ metric_name_validation_scheme <string> | default "legacy" ]
# Limit on total number of positive and negative buckets allowed in a single # Limit on total number of positive and negative buckets allowed in a single
# native histogram. The resolution of a histogram with more buckets will be # native histogram. The resolution of a histogram with more buckets will be
# reduced until the number of buckets is within the limit. If the limit cannot # reduced until the number of buckets is within the limit. If the limit cannot
@ -947,7 +968,9 @@ tls_config:
# The host to use if the container is in host networking mode. # The host to use if the container is in host networking mode.
[ host_networking_host: <string> | default = "localhost" ] [ host_networking_host: <string> | default = "localhost" ]
# Match the first network if the container has multiple networks defined, thus avoiding collecting duplicate targets. # Sort all non-nil networks in ascending order based on network name and
# get the first network if the container has multiple networks defined,
# thus avoiding collecting duplicate targets.
[ match_first_network: <boolean> | default = true ] [ match_first_network: <boolean> | default = true ]
# Optional filters to limit the discovery process to a subset of available # Optional filters to limit the discovery process to a subset of available
@ -3265,12 +3288,16 @@ Initially, aside from the configured per-target labels, a target's `job`
label is set to the `job_name` value of the respective scrape configuration. label is set to the `job_name` value of the respective scrape configuration.
The `__address__` label is set to the `<host>:<port>` address of the target. The `__address__` label is set to the `<host>:<port>` address of the target.
After relabeling, the `instance` label is set to the value of `__address__` by default if After relabeling, the `instance` label is set to the value of `__address__` by default if
it was not set during relabeling. The `__scheme__` and `__metrics_path__` labels it was not set during relabeling.
are set to the scheme and metrics path of the target respectively. The `__param_<name>`
label is set to the value of the first passed URL parameter called `<name>`. The `__scheme__` and `__metrics_path__` labels
are set to the scheme and metrics path of the target respectively, as specified in `scrape_config`.
The `__param_<name>`
label is set to the value of the first passed URL parameter called `<name>`, as defined in `scrape_config`.
The `__scrape_interval__` and `__scrape_timeout__` labels are set to the target's The `__scrape_interval__` and `__scrape_timeout__` labels are set to the target's
interval and timeout. interval and timeout, as specified in `scrape_config`.
Additional labels prefixed with `__meta_` may be available during the Additional labels prefixed with `__meta_` may be available during the
relabeling phase. They are set by the service discovery mechanism that provided relabeling phase. They are set by the service discovery mechanism that provided

View file

@ -200,8 +200,9 @@ won't work when you push OTLP metrics.
`--enable-feature=promql-experimental-functions` `--enable-feature=promql-experimental-functions`
Enables PromQL functions that are considered experimental and whose name or Enables PromQL functions that are considered experimental. These functions
semantics could change. might change their name, syntax, or semantics. They might also get removed
entirely.
## Created Timestamps Zero Injection ## Created Timestamps Zero Injection
@ -240,3 +241,33 @@ metadata changes as WAL records on a per-series basis.
This must be used if This must be used if
you are also using remote write 2.0 as it will only gather metadata from the WAL. you are also using remote write 2.0 as it will only gather metadata from the WAL.
## Delay compaction start time
`--enable-feature=delayed-compaction`
A random offset, up to `10%` of the chunk range, is added to the Head compaction start time. This assists Prometheus instances in avoiding simultaneous compactions and reduces the load on shared resources.
Only auto Head compactions and the operations directly resulting from them are subject to this delay.
In the event of multiple consecutive Head compactions being possible, only the first compaction experiences this delay.
Note that during this delay, the Head continues its usual operations, which include serving and appending series.
Despite the delay in compaction, the blocks produced are time-aligned in the same manner as they would be if the delay was not in place.
## Delay __name__ label removal for PromQL engine
`--enable-feature=promql-delayed-name-removal`
When enabled, Prometheus will change the way in which the `__name__` label is removed from PromQL query results (for functions and expressions for which this is necessary). Specifically, it will delay the removal to the last step of the query evaluation, instead of every time an expression or function creating derived metrics is evaluated.
This allows optionally preserving the `__name__` label via the `label_replace` and `label_join` functions, and helps prevent the "vector cannot contain metrics with the same labelset" error, which can happen when applying a regex-matcher to the `__name__` label.
## UTF-8 Name Support
`--enable-feature=utf8-names`
When enabled, changes the metric and label name validation scheme inside Prometheus to allow the full UTF-8 character set.
By itself, this flag does not enable the request of UTF-8 names via content negotiation.
Users will also have to set `metric_name_validation_scheme` in scrape configs to enable the feature either on the global config or on a per-scrape config basis.

View file

@ -41,7 +41,7 @@ vector is the only type which can be graphed.
_Notes about the experimental native histograms:_ _Notes about the experimental native histograms:_
* Ingesting native histograms has to be enabled via a [feature * Ingesting native histograms has to be enabled via a [feature
flag](../../feature_flags.md#native-histograms). flag](../feature_flags.md#native-histograms).
* Once native histograms have been ingested into the TSDB (and even after * Once native histograms have been ingested into the TSDB (and even after
disabling the feature flag again), both instant vectors and range vectors may disabling the feature flag again), both instant vectors and range vectors may
now contain samples that aren't simple floating point numbers (float samples) now contain samples that aren't simple floating point numbers (float samples)

View file

@ -617,9 +617,9 @@ Like `sort`, `sort_desc` only affects the results of instant queries, as range q
## `sort_by_label()` ## `sort_by_label()`
**This function has to be enabled via the [feature flag](../feature_flags/) `--enable-feature=promql-experimental-functions`.** **This function has to be enabled via the [feature flag](../feature_flags.md#experimental-promql-functions) `--enable-feature=promql-experimental-functions`.**
`sort_by_label(v instant-vector, label string, ...)` returns vector elements sorted by their label values and sample value in case of label values being equal, in ascending order. `sort_by_label(v instant-vector, label string, ...)` returns vector elements sorted by the values of the given labels in ascending order. In case these label values are equal, elements are sorted by their full label sets.
Please note that the sort by label functions only affect the results of instant queries, as range query results always have a fixed output ordering. Please note that the sort by label functions only affect the results of instant queries, as range query results always have a fixed output ordering.
@ -627,7 +627,7 @@ This function uses [natural sort order](https://en.wikipedia.org/wiki/Natural_so
## `sort_by_label_desc()` ## `sort_by_label_desc()`
**This function has to be enabled via the [feature flag](../feature_flags/) `--enable-feature=promql-experimental-functions`.** **This function has to be enabled via the [feature flag](../feature_flags.md#experimental-promql-functions) `--enable-feature=promql-experimental-functions`.**
Same as `sort_by_label`, but sorts in descending order. Same as `sort_by_label`, but sorts in descending order.
@ -676,7 +676,7 @@ over time and return an instant vector with per-series aggregation results:
* `last_over_time(range-vector)`: the most recent point value in the specified interval. * `last_over_time(range-vector)`: the most recent point value in the specified interval.
* `present_over_time(range-vector)`: the value 1 for any series in the specified interval. * `present_over_time(range-vector)`: the value 1 for any series in the specified interval.
If the [feature flag](../feature_flags/) If the [feature flag](../feature_flags.md#experimental-promql-functions)
`--enable-feature=promql-experimental-functions` is set, the following `--enable-feature=promql-experimental-functions` is set, the following
additional functions are available: additional functions are available:

View file

@ -19,7 +19,7 @@ remote_write:
protobuf_message: "io.prometheus.write.v2.Request" protobuf_message: "io.prometheus.write.v2.Request"
``` ```
or for deprecated Remote Write 1.0 message: or for the eventually deprecated Remote Write 1.0 message:
```yaml ```yaml
remote_write: remote_write:

View file

@ -8,19 +8,19 @@ require (
github.com/gogo/protobuf v1.3.2 github.com/gogo/protobuf v1.3.2
github.com/golang/snappy v0.0.4 github.com/golang/snappy v0.0.4
github.com/influxdata/influxdb v1.11.5 github.com/influxdata/influxdb v1.11.5
github.com/prometheus/client_golang v1.19.1 github.com/prometheus/client_golang v1.20.0
github.com/prometheus/common v0.55.0 github.com/prometheus/common v0.57.0
github.com/prometheus/prometheus v0.53.1 github.com/prometheus/prometheus v0.53.1
github.com/stretchr/testify v1.9.0 github.com/stretchr/testify v1.9.0
) )
require ( require (
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1 // indirect github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1 // indirect
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.2 // indirect github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.6.0 // indirect
github.com/Azure/azure-sdk-for-go/sdk/internal v1.6.0 // indirect github.com/Azure/azure-sdk-for-go/sdk/internal v1.8.0 // indirect
github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 // indirect github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 // indirect
github.com/alecthomas/units v0.0.0-20231202071711-9a357b53e9c9 // indirect github.com/alecthomas/units v0.0.0-20231202071711-9a357b53e9c9 // indirect
github.com/aws/aws-sdk-go v1.53.16 // indirect github.com/aws/aws-sdk-go v1.55.5 // indirect
github.com/beorn7/perks v1.0.1 // indirect github.com/beorn7/perks v1.0.1 // indirect
github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
@ -35,8 +35,7 @@ require (
github.com/jmespath/go-jmespath v0.4.0 // indirect github.com/jmespath/go-jmespath v0.4.0 // indirect
github.com/jpillora/backoff v1.0.0 // indirect github.com/jpillora/backoff v1.0.0 // indirect
github.com/json-iterator/go v1.1.12 // indirect github.com/json-iterator/go v1.1.12 // indirect
github.com/klauspost/compress v1.17.8 // indirect github.com/klauspost/compress v1.17.9 // indirect
github.com/kr/text v0.2.0 // indirect
github.com/kylelemons/godebug v1.1.0 // indirect github.com/kylelemons/godebug v1.1.0 // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
github.com/modern-go/reflect2 v1.0.2 // indirect github.com/modern-go/reflect2 v1.0.2 // indirect
@ -56,14 +55,14 @@ require (
go.opentelemetry.io/otel/trace v1.27.0 // indirect go.opentelemetry.io/otel/trace v1.27.0 // indirect
go.uber.org/atomic v1.11.0 // indirect go.uber.org/atomic v1.11.0 // indirect
go.uber.org/multierr v1.11.0 // indirect go.uber.org/multierr v1.11.0 // indirect
golang.org/x/crypto v0.24.0 // indirect golang.org/x/crypto v0.25.0 // indirect
golang.org/x/net v0.26.0 // indirect golang.org/x/net v0.27.0 // indirect
golang.org/x/oauth2 v0.21.0 // indirect golang.org/x/oauth2 v0.21.0 // indirect
golang.org/x/sys v0.21.0 // indirect golang.org/x/sys v0.22.0 // indirect
golang.org/x/text v0.16.0 // indirect golang.org/x/text v0.16.0 // indirect
golang.org/x/time v0.5.0 // indirect golang.org/x/time v0.5.0 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20240528184218-531527333157 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20240528184218-531527333157 // indirect
google.golang.org/grpc v1.64.0 // indirect google.golang.org/grpc v1.65.0 // indirect
google.golang.org/protobuf v1.34.2 // indirect google.golang.org/protobuf v1.34.2 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect

View file

@ -1,9 +1,9 @@
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1 h1:E+OJmp2tPvt1W+amx48v1eqbjDYsgN+RzP4q16yV5eM= github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1 h1:E+OJmp2tPvt1W+amx48v1eqbjDYsgN+RzP4q16yV5eM=
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1/go.mod h1:a6xsAQUZg+VsS3TJ05SRp524Hs4pZ/AeFSr5ENf0Yjo= github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1/go.mod h1:a6xsAQUZg+VsS3TJ05SRp524Hs4pZ/AeFSr5ENf0Yjo=
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.2 h1:FDif4R1+UUR+00q6wquyX90K7A8dN+R5E8GEadoP7sU= github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.6.0 h1:U2rTu3Ef+7w9FHKIAXM6ZyqF3UOWJZ12zIm8zECAFfg=
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.2/go.mod h1:aiYBYui4BJ/BJCAIKs92XiPyQfTaBWqvHujDwKb6CBU= github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.6.0/go.mod h1:9kIvujWAA58nmPmWB1m23fyWic1kYZMxD9CxaWn4Qpg=
github.com/Azure/azure-sdk-for-go/sdk/internal v1.6.0 h1:sUFnFjzDUie80h24I7mrKtwCKgLY9L8h5Tp2x9+TWqk= github.com/Azure/azure-sdk-for-go/sdk/internal v1.8.0 h1:jBQA3cKT4L2rWMpgE7Yt3Hwh2aUj8KXjIGLxjHeYNNo=
github.com/Azure/azure-sdk-for-go/sdk/internal v1.6.0/go.mod h1:52JbnQTp15qg5mRkMBHwp0j0ZFwHJ42Sx3zVV5RE9p0= github.com/Azure/azure-sdk-for-go/sdk/internal v1.8.0/go.mod h1:4OG6tQ9EOP/MT0NMjDlRzWoVFxfu9rN9B2X+tlSVktg=
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.7.0 h1:LkHbJbgF3YyvC53aqYGR+wWQDn2Rdp9AQdGndf9QvY4= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.7.0 h1:LkHbJbgF3YyvC53aqYGR+wWQDn2Rdp9AQdGndf9QvY4=
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.7.0/go.mod h1:QyiQdW4f4/BIfB8ZutZ2s+28RAgfa/pT+zS++ZHyM1I= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.7.0/go.mod h1:QyiQdW4f4/BIfB8ZutZ2s+28RAgfa/pT+zS++ZHyM1I=
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4 v4.3.0 h1:bXwSugBiSbgtz7rOtbfGf+woewp4f06orW9OP5BjHLA= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4 v4.3.0 h1:bXwSugBiSbgtz7rOtbfGf+woewp4f06orW9OP5BjHLA=
@ -26,8 +26,8 @@ github.com/alecthomas/units v0.0.0-20231202071711-9a357b53e9c9/go.mod h1:OMCwj8V
github.com/armon/go-metrics v0.4.1 h1:hR91U9KYmb6bLBYLQjyM+3j+rcd/UhE+G78SFnF8gJA= github.com/armon/go-metrics v0.4.1 h1:hR91U9KYmb6bLBYLQjyM+3j+rcd/UhE+G78SFnF8gJA=
github.com/armon/go-metrics v0.4.1/go.mod h1:E6amYzXo6aW1tqzoZGT755KkbgrJsSdpwZ+3JqfkOG4= github.com/armon/go-metrics v0.4.1/go.mod h1:E6amYzXo6aW1tqzoZGT755KkbgrJsSdpwZ+3JqfkOG4=
github.com/aws/aws-sdk-go v1.38.35/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= github.com/aws/aws-sdk-go v1.38.35/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro=
github.com/aws/aws-sdk-go v1.53.16 h1:8oZjKQO/ml1WLUZw5hvF7pvYjPf8o9f57Wldoy/q9Qc= github.com/aws/aws-sdk-go v1.55.5 h1:KKUZBfBoyqy5d3swXyiC7Q76ic40rYcbqH7qjh59kzU=
github.com/aws/aws-sdk-go v1.53.16/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk= github.com/aws/aws-sdk-go v1.55.5/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU=
github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3 h1:6df1vn4bBlDDo4tARvBm7l6KA9iVMnE3NWizDeWSrps= github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3 h1:6df1vn4bBlDDo4tARvBm7l6KA9iVMnE3NWizDeWSrps=
github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3/go.mod h1:CIWtjkly68+yqLPbvwwR/fjNJA/idrtULjZWh2v1ys0= github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3/go.mod h1:CIWtjkly68+yqLPbvwwR/fjNJA/idrtULjZWh2v1ys0=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
@ -37,9 +37,8 @@ github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6r
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/cncf/xds/go v0.0.0-20240318125728-8a4994d93e50 h1:DBmgJDC9dTfkVyGgipamEh2BpGYxScCH1TOF1LL1cXc= github.com/cncf/xds/go v0.0.0-20240423153145-555b57ec207b h1:ga8SEFjZ60pxLcmhnThWgvH2wg8376yUJmPhEH4H3kw=
github.com/cncf/xds/go v0.0.0-20240318125728-8a4994d93e50/go.mod h1:5e1+Vvlzido69INQaVO6d87Qn543Xr6nooe9Kz7oBFM= github.com/cncf/xds/go v0.0.0-20240423153145-555b57ec207b/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8=
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
@ -50,8 +49,6 @@ github.com/digitalocean/godo v1.117.0 h1:WVlTe09melDYTd7VCVyvHcNWbgB+uI1O115+5LO
github.com/digitalocean/godo v1.117.0/go.mod h1:Vk0vpCot2HOAJwc5WE8wljZGtJ3ZtWIc8MQ8rF38sdo= github.com/digitalocean/godo v1.117.0/go.mod h1:Vk0vpCot2HOAJwc5WE8wljZGtJ3ZtWIc8MQ8rF38sdo=
github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0= github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0=
github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E=
github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI=
github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ=
github.com/docker/docker v26.1.3+incompatible h1:lLCzRbrVZrljpVNobJu1J2FHk8V0s4BawoZippkc+xo= github.com/docker/docker v26.1.3+incompatible h1:lLCzRbrVZrljpVNobJu1J2FHk8V0s4BawoZippkc+xo=
github.com/docker/docker v26.1.3+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker v26.1.3+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ=
@ -190,8 +187,8 @@ github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7V
github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM=
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/klauspost/compress v1.17.8 h1:YcnTYrq7MikUT7k0Yb5eceMmALQPYBW/Xltxn0NAMnU= github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA=
github.com/klauspost/compress v1.17.8/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= github.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw=
github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b h1:udzkj9S/zlT5X367kqJis0QP7YMxobob6zhzq6Yre00= github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b h1:udzkj9S/zlT5X367kqJis0QP7YMxobob6zhzq6Yre00=
github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b/go.mod h1:pcaDhQK0/NJZEvtCO0qQPPropqV0sJOJ6YW7X+9kRwM= github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b/go.mod h1:pcaDhQK0/NJZEvtCO0qQPPropqV0sJOJ6YW7X+9kRwM=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
@ -256,8 +253,8 @@ github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXP
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
github.com/prometheus/client_golang v1.19.1 h1:wZWJDwK+NameRJuPGDhlnFgx8e8HN3XHQeLaYJFJBOE= github.com/prometheus/client_golang v1.20.0 h1:jBzTZ7B099Rg24tny+qngoynol8LtVYlA2bqx3vEloI=
github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho= github.com/prometheus/client_golang v1.20.0/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE=
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
@ -267,8 +264,8 @@ github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y8
github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc=
github.com/prometheus/common v0.29.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= github.com/prometheus/common v0.29.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
github.com/prometheus/common v0.55.0 h1:KEi6DK7lXW/m7Ig5i47x0vRzuBsHuvJdi5ee6Y3G1dc= github.com/prometheus/common v0.57.0 h1:Ro/rKjwdq9mZn1K5QPctzh+MA4Lp0BuYk5ZZEVhoNcY=
github.com/prometheus/common v0.55.0/go.mod h1:2SECS4xJG1kd8XF9IcM1gMX6510RAEL65zxzNImwdc8= github.com/prometheus/common v0.57.0/go.mod h1:7uRPFSUTbfZWsJ7MHY56sqt7hLQu3bxXHDnNhl8E9qI=
github.com/prometheus/common/sigv4 v0.1.0 h1:qoVebwtwwEhS85Czm2dSROY5fTo2PAPEVdDeppTwGX4= github.com/prometheus/common/sigv4 v0.1.0 h1:qoVebwtwwEhS85Czm2dSROY5fTo2PAPEVdDeppTwGX4=
github.com/prometheus/common/sigv4 v0.1.0/go.mod h1:2Jkxxk9yYvCkE5G1sQT7GuEXm57JrvHu9k5YwTjsNtI= github.com/prometheus/common/sigv4 v0.1.0/go.mod h1:2Jkxxk9yYvCkE5G1sQT7GuEXm57JrvHu9k5YwTjsNtI=
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
@ -279,8 +276,8 @@ github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0leargg
github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk=
github.com/prometheus/prometheus v0.53.1-0.20240704074759-c137febfcf8c h1:6GEA48LnonkYZhQ654v7QTIP5uBTbCEVm49oIhif5lc= github.com/prometheus/prometheus v0.53.1-0.20240704074759-c137febfcf8c h1:6GEA48LnonkYZhQ654v7QTIP5uBTbCEVm49oIhif5lc=
github.com/prometheus/prometheus v0.53.1-0.20240704074759-c137febfcf8c/go.mod h1:FcNs5wa7M9yV8IlxlB/05s5oy9vULUIlu/tZsviRIT8= github.com/prometheus/prometheus v0.53.1-0.20240704074759-c137febfcf8c/go.mod h1:FcNs5wa7M9yV8IlxlB/05s5oy9vULUIlu/tZsviRIT8=
github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8=
github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4=
github.com/scaleway/scaleway-sdk-go v1.0.0-beta.27 h1:yGAraK1uUjlhSXgNMIy8o/J4LFNcy7yeipBqt9N9mVg= github.com/scaleway/scaleway-sdk-go v1.0.0-beta.27 h1:yGAraK1uUjlhSXgNMIy8o/J4LFNcy7yeipBqt9N9mVg=
github.com/scaleway/scaleway-sdk-go v1.0.0-beta.27/go.mod h1:fCa7OJZ/9DRTnOKmxvT6pn+LPWUptQAmHF/SBJUGEcg= github.com/scaleway/scaleway-sdk-go v1.0.0-beta.27/go.mod h1:fCa7OJZ/9DRTnOKmxvT6pn+LPWUptQAmHF/SBJUGEcg=
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
@ -326,8 +323,8 @@ golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnf
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.24.0 h1:mnl8DM0o513X8fdIkmyFE/5hTYxbwYOjDS/+rK6qpRI= golang.org/x/crypto v0.25.0 h1:ypSNr+bnYL2YhwoMt2zPxHFmbAN1KZs/njMG3hxUp30=
golang.org/x/crypto v0.24.0/go.mod h1:Z1PMYSOR5nyMcyAVAIQSKCDwalqy85Aqn1x3Ws4L5DM= golang.org/x/crypto v0.25.0/go.mod h1:T+wALwcMOSE0kXgUAnPAHqTLW+XHgcELELW8VaDgm/M=
golang.org/x/exp v0.0.0-20240119083558-1b970713d09a h1:Q8/wZp0KX97QFTc2ywcOE0YRjZPVIx+MXInMzdvQqcA= golang.org/x/exp v0.0.0-20240119083558-1b970713d09a h1:Q8/wZp0KX97QFTc2ywcOE0YRjZPVIx+MXInMzdvQqcA=
golang.org/x/exp v0.0.0-20240119083558-1b970713d09a/go.mod h1:idGWGoKP1toJGkd5/ig9ZLuPcZBC3ewk7SzmH0uou08= golang.org/x/exp v0.0.0-20240119083558-1b970713d09a/go.mod h1:idGWGoKP1toJGkd5/ig9ZLuPcZBC3ewk7SzmH0uou08=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
@ -347,8 +344,8 @@ golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81R
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.26.0 h1:soB7SVo0PWrY4vPW/+ay0jKDNScG2X9wFeYlXIvJsOQ= golang.org/x/net v0.27.0 h1:5K3Njcw06/l2y9vpGCSdcxWOYHOUk3dVNGDXN+FvAys=
golang.org/x/net v0.26.0/go.mod h1:5YKkiSynbBIh3p6iOc/vibscux0x38BZDkn8sCUPxHE= golang.org/x/net v0.27.0/go.mod h1:dDi0PyhWNoiUOrAS8uXv/vnScO4wnHQO4mj9fn/RytE=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.21.0 h1:tsimM75w1tF/uws5rbeHzIWxEqElMehnc+iW793zsZs= golang.org/x/oauth2 v0.21.0 h1:tsimM75w1tF/uws5rbeHzIWxEqElMehnc+iW793zsZs=
@ -376,11 +373,11 @@ golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.21.0 h1:rF+pYz3DAGSQAxAu1CbC7catZg4ebC4UIeIhKxBZvws= golang.org/x/sys v0.22.0 h1:RI27ohtqKCnwULzJLqkv897zojh5/DwS/ENaMzUOaWI=
golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.22.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.21.0 h1:WVXCp+/EBEHOj53Rvu+7KiT/iElMrO8ACK16SMZ3jaA= golang.org/x/term v0.22.0 h1:BbsgPEJULsl2fV/AT3v15Mjva5yXKQDyKf+TbDz7QJk=
golang.org/x/term v0.21.0/go.mod h1:ooXLefLobQVslOqselCNF4SxFAaoS6KujMbsGzSDmX0= golang.org/x/term v0.22.0/go.mod h1:F3qCibpT5AMpCRfhfT53vVJwhLtIVHhB9XDjfFvnMI4=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
@ -405,8 +402,8 @@ google.golang.org/genproto/googleapis/api v0.0.0-20240528184218-531527333157 h1:
google.golang.org/genproto/googleapis/api v0.0.0-20240528184218-531527333157/go.mod h1:99sLkeliLXfdj2J75X3Ho+rrVCaJze0uwN7zDDkjPVU= google.golang.org/genproto/googleapis/api v0.0.0-20240528184218-531527333157/go.mod h1:99sLkeliLXfdj2J75X3Ho+rrVCaJze0uwN7zDDkjPVU=
google.golang.org/genproto/googleapis/rpc v0.0.0-20240528184218-531527333157 h1:Zy9XzmMEflZ/MAaA7vNcoebnRAld7FsPW1EeBB7V0m8= google.golang.org/genproto/googleapis/rpc v0.0.0-20240528184218-531527333157 h1:Zy9XzmMEflZ/MAaA7vNcoebnRAld7FsPW1EeBB7V0m8=
google.golang.org/genproto/googleapis/rpc v0.0.0-20240528184218-531527333157/go.mod h1:EfXuqaE1J41VCDicxHzUDm+8rk+7ZdXzHV0IhO/I6s0= google.golang.org/genproto/googleapis/rpc v0.0.0-20240528184218-531527333157/go.mod h1:EfXuqaE1J41VCDicxHzUDm+8rk+7ZdXzHV0IhO/I6s0=
google.golang.org/grpc v1.64.0 h1:KH3VH9y/MgNQg1dE7b3XfVK0GsPSIzJwdF617gUSbvY= google.golang.org/grpc v1.65.0 h1:bs/cUb4lp1G5iImFFd3u5ixQzweKizoZJAwBNLR42lc=
google.golang.org/grpc v1.64.0/go.mod h1:oxjF8E3FBnjp+/gVFYdWacaLDx9na1aqy9oovLpxQYg= google.golang.org/grpc v1.65.0/go.mod h1:WgYC2ypjlB0EiQi6wdKixMqukr6lBc0Vo+oOgjrM5ZQ=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=

32
go.mod
View file

@ -13,15 +13,15 @@ require (
github.com/KimMachineGun/automemlimit v0.6.1 github.com/KimMachineGun/automemlimit v0.6.1
github.com/alecthomas/kingpin/v2 v2.4.0 github.com/alecthomas/kingpin/v2 v2.4.0
github.com/alecthomas/units v0.0.0-20240626203959-61d1e3462e30 github.com/alecthomas/units v0.0.0-20240626203959-61d1e3462e30
github.com/aws/aws-sdk-go v1.54.19 github.com/aws/aws-sdk-go v1.55.5
github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3 github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3
github.com/cespare/xxhash/v2 v2.3.0 github.com/cespare/xxhash/v2 v2.3.0
github.com/dennwc/varint v1.0.0 github.com/dennwc/varint v1.0.0
github.com/digitalocean/godo v1.119.0 github.com/digitalocean/godo v1.119.0
github.com/docker/docker v27.0.3+incompatible github.com/docker/docker v27.1.1+incompatible
github.com/edsrzf/mmap-go v1.1.0 github.com/edsrzf/mmap-go v1.1.0
github.com/envoyproxy/go-control-plane v0.12.0 github.com/envoyproxy/go-control-plane v0.12.0
github.com/envoyproxy/protoc-gen-validate v1.0.4 github.com/envoyproxy/protoc-gen-validate v1.1.0
github.com/facette/natsort v0.0.0-20181210072756-2cd4dd1e2dcb github.com/facette/natsort v0.0.0-20181210072756-2cd4dd1e2dcb
github.com/fsnotify/fsnotify v1.7.0 github.com/fsnotify/fsnotify v1.7.0
github.com/go-kit/log v0.2.1 github.com/go-kit/log v0.2.1
@ -36,10 +36,10 @@ require (
github.com/gophercloud/gophercloud v1.14.0 github.com/gophercloud/gophercloud v1.14.0
github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc
github.com/grpc-ecosystem/grpc-gateway v1.16.0 github.com/grpc-ecosystem/grpc-gateway v1.16.0
github.com/hashicorp/consul/api v1.29.2 github.com/hashicorp/consul/api v1.29.4
github.com/hashicorp/nomad/api v0.0.0-20240717122358-3d93bd3778f3 github.com/hashicorp/nomad/api v0.0.0-20240717122358-3d93bd3778f3
github.com/hetznercloud/hcloud-go/v2 v2.12.0 github.com/hetznercloud/hcloud-go/v2 v2.13.1
github.com/ionos-cloud/sdk-go/v6 v6.1.11 github.com/ionos-cloud/sdk-go/v6 v6.2.1
github.com/json-iterator/go v1.1.12 github.com/json-iterator/go v1.1.12
github.com/klauspost/compress v1.17.9 github.com/klauspost/compress v1.17.9
github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b
@ -52,9 +52,9 @@ require (
github.com/oklog/ulid v1.3.1 github.com/oklog/ulid v1.3.1
github.com/ovh/go-ovh v1.6.0 github.com/ovh/go-ovh v1.6.0
github.com/prometheus/alertmanager v0.27.0 github.com/prometheus/alertmanager v0.27.0
github.com/prometheus/client_golang v1.19.1 github.com/prometheus/client_golang v1.20.2
github.com/prometheus/client_model v0.6.1 github.com/prometheus/client_model v0.6.1
github.com/prometheus/common v0.55.0 github.com/prometheus/common v0.56.0
github.com/prometheus/common/assets v0.2.0 github.com/prometheus/common/assets v0.2.0
github.com/prometheus/common/sigv4 v0.1.0 github.com/prometheus/common/sigv4 v0.1.0
github.com/prometheus/exporter-toolkit v0.11.0 github.com/prometheus/exporter-toolkit v0.11.0
@ -75,15 +75,14 @@ require (
go.uber.org/automaxprocs v1.5.3 go.uber.org/automaxprocs v1.5.3
go.uber.org/goleak v1.3.0 go.uber.org/goleak v1.3.0
go.uber.org/multierr v1.11.0 go.uber.org/multierr v1.11.0
golang.org/x/net v0.27.0 golang.org/x/oauth2 v0.22.0
golang.org/x/oauth2 v0.21.0
golang.org/x/sync v0.7.0 golang.org/x/sync v0.7.0
golang.org/x/sys v0.22.0 golang.org/x/sys v0.22.0
golang.org/x/text v0.16.0 golang.org/x/text v0.16.0
golang.org/x/time v0.5.0 golang.org/x/time v0.5.0
golang.org/x/tools v0.23.0 golang.org/x/tools v0.23.0
google.golang.org/api v0.189.0 google.golang.org/api v0.190.0
google.golang.org/genproto/googleapis/api v0.0.0-20240711142825-46eb208f015d google.golang.org/genproto/googleapis/api v0.0.0-20240725223205-93522f1f2a9f
google.golang.org/grpc v1.65.0 google.golang.org/grpc v1.65.0
google.golang.org/protobuf v1.34.2 google.golang.org/protobuf v1.34.2
gopkg.in/yaml.v2 v2.4.0 gopkg.in/yaml.v2 v2.4.0
@ -96,7 +95,7 @@ require (
) )
require ( require (
cloud.google.com/go/auth v0.7.2 // indirect cloud.google.com/go/auth v0.7.3 // indirect
cloud.google.com/go/auth/oauth2adapt v0.2.3 // indirect cloud.google.com/go/auth/oauth2adapt v0.2.3 // indirect
cloud.google.com/go/compute/metadata v0.5.0 // indirect cloud.google.com/go/compute/metadata v0.5.0 // indirect
github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 // indirect github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 // indirect
@ -140,9 +139,9 @@ require (
github.com/google/gnostic-models v0.6.8 // indirect github.com/google/gnostic-models v0.6.8 // indirect
github.com/google/go-querystring v1.1.0 // indirect github.com/google/go-querystring v1.1.0 // indirect
github.com/google/gofuzz v1.2.0 // indirect github.com/google/gofuzz v1.2.0 // indirect
github.com/google/s2a-go v0.1.7 // indirect github.com/google/s2a-go v0.1.8 // indirect
github.com/googleapis/enterprise-certificate-proxy v0.3.2 // indirect github.com/googleapis/enterprise-certificate-proxy v0.3.2 // indirect
github.com/googleapis/gax-go/v2 v2.12.5 // indirect github.com/googleapis/gax-go/v2 v2.13.0 // indirect
github.com/gorilla/websocket v1.5.0 // indirect github.com/gorilla/websocket v1.5.0 // indirect
github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 // indirect github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 // indirect
github.com/hashicorp/cronexpr v1.1.2 // indirect github.com/hashicorp/cronexpr v1.1.2 // indirect
@ -190,8 +189,9 @@ require (
golang.org/x/crypto v0.25.0 // indirect golang.org/x/crypto v0.25.0 // indirect
golang.org/x/exp v0.0.0-20240119083558-1b970713d09a // indirect golang.org/x/exp v0.0.0-20240119083558-1b970713d09a // indirect
golang.org/x/mod v0.19.0 // indirect golang.org/x/mod v0.19.0 // indirect
golang.org/x/net v0.27.0 // indirect
golang.org/x/term v0.22.0 // indirect golang.org/x/term v0.22.0 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20240722135656-d784300faade // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20240730163845-b1a4ccb954bf // indirect
gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/inf.v0 v0.9.1 // indirect
gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/ini.v1 v1.67.0 // indirect
gotest.tools/v3 v3.0.3 // indirect gotest.tools/v3 v3.0.3 // indirect

60
go.sum
View file

@ -12,8 +12,8 @@ cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bP
cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk=
cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs=
cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY=
cloud.google.com/go/auth v0.7.2 h1:uiha352VrCDMXg+yoBtaD0tUF4Kv9vrtrWPYXwutnDE= cloud.google.com/go/auth v0.7.3 h1:98Vr+5jMaCZ5NZk6e/uBgf60phTk/XN84r8QEWB9yjY=
cloud.google.com/go/auth v0.7.2/go.mod h1:VEc4p5NNxycWQTMQEDQF0bd6aTMb6VgYDXEwiJJQAbs= cloud.google.com/go/auth v0.7.3/go.mod h1:HJtWUx1P5eqjy/f6Iq5KeytNpbAcGolPhOgyop2LlzA=
cloud.google.com/go/auth/oauth2adapt v0.2.3 h1:MlxF+Pd3OmSudg/b1yZ5lJwoXCEaeedAguodky1PcKI= cloud.google.com/go/auth/oauth2adapt v0.2.3 h1:MlxF+Pd3OmSudg/b1yZ5lJwoXCEaeedAguodky1PcKI=
cloud.google.com/go/auth/oauth2adapt v0.2.3/go.mod h1:tMQXOfZzFuNuUxOypHlQEXgdfX5cuhwU+ffUuXRJE8I= cloud.google.com/go/auth/oauth2adapt v0.2.3/go.mod h1:tMQXOfZzFuNuUxOypHlQEXgdfX5cuhwU+ffUuXRJE8I=
cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
@ -92,8 +92,8 @@ github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:W
github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQwij/eHl5CU= github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQwij/eHl5CU=
github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
github.com/aws/aws-sdk-go v1.38.35/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= github.com/aws/aws-sdk-go v1.38.35/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro=
github.com/aws/aws-sdk-go v1.54.19 h1:tyWV+07jagrNiCcGRzRhdtVjQs7Vy41NwsuOcl0IbVI= github.com/aws/aws-sdk-go v1.55.5 h1:KKUZBfBoyqy5d3swXyiC7Q76ic40rYcbqH7qjh59kzU=
github.com/aws/aws-sdk-go v1.54.19/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU= github.com/aws/aws-sdk-go v1.55.5/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU=
github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g= github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g=
github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3 h1:6df1vn4bBlDDo4tARvBm7l6KA9iVMnE3NWizDeWSrps= github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3 h1:6df1vn4bBlDDo4tARvBm7l6KA9iVMnE3NWizDeWSrps=
github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3/go.mod h1:CIWtjkly68+yqLPbvwwR/fjNJA/idrtULjZWh2v1ys0= github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3/go.mod h1:CIWtjkly68+yqLPbvwwR/fjNJA/idrtULjZWh2v1ys0=
@ -149,8 +149,8 @@ github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK
github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E=
github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI= github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI=
github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ= github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ=
github.com/docker/docker v27.0.3+incompatible h1:aBGI9TeQ4MPlhquTQKq9XbK79rKFVwXNUAYz9aXyEBE= github.com/docker/docker v27.1.1+incompatible h1:hO/M4MtV36kzKldqnA37IWhebRA+LnqqcqDja6kVaKY=
github.com/docker/docker v27.0.3+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker v27.1.1+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ=
github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec=
github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4=
@ -171,8 +171,8 @@ github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1m
github.com/envoyproxy/go-control-plane v0.12.0 h1:4X+VP1GHd1Mhj6IB5mMeGbLCleqxjletLK6K0rbxyZI= github.com/envoyproxy/go-control-plane v0.12.0 h1:4X+VP1GHd1Mhj6IB5mMeGbLCleqxjletLK6K0rbxyZI=
github.com/envoyproxy/go-control-plane v0.12.0/go.mod h1:ZBTaoJ23lqITozF0M6G4/IragXCQKCnYbmlmtHvwRG0= github.com/envoyproxy/go-control-plane v0.12.0/go.mod h1:ZBTaoJ23lqITozF0M6G4/IragXCQKCnYbmlmtHvwRG0=
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
github.com/envoyproxy/protoc-gen-validate v1.0.4 h1:gVPz/FMfvh57HdSJQyvBtF00j8JU4zdyUgIUNhlgg0A= github.com/envoyproxy/protoc-gen-validate v1.1.0 h1:tntQDh69XqOCOZsDz0lVJQez/2L6Uu2PdjCQwWCJ3bM=
github.com/envoyproxy/protoc-gen-validate v1.0.4/go.mod h1:qys6tmnRsYrQqIhm2bvKZH4Blx/1gTIZ2UKVY1M+Yew= github.com/envoyproxy/protoc-gen-validate v1.1.0/go.mod h1:sXRDRVmzEbkM7CVcM06s9shE/m23dg3wzjl0UWqJ2q4=
github.com/evanphx/json-patch v5.6.0+incompatible h1:jBYDEEiFBPxA0v50tFdvOzQQTCvpL6mnFh5mB2/l16U= github.com/evanphx/json-patch v5.6.0+incompatible h1:jBYDEEiFBPxA0v50tFdvOzQQTCvpL6mnFh5mB2/l16U=
github.com/evanphx/json-patch v5.6.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch v5.6.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
github.com/facette/natsort v0.0.0-20181210072756-2cd4dd1e2dcb h1:IT4JYU7k4ikYg1SCxNI1/Tieq/NFvh6dzLdgi7eu0tM= github.com/facette/natsort v0.0.0-20181210072756-2cd4dd1e2dcb h1:IT4JYU7k4ikYg1SCxNI1/Tieq/NFvh6dzLdgi7eu0tM=
@ -322,8 +322,8 @@ github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hf
github.com/google/pprof v0.0.0-20240711041743-f6c9dda6c6da h1:xRmpO92tb8y+Z85iUOMOicpCfaYcv7o3Cg3wKrIpg8g= github.com/google/pprof v0.0.0-20240711041743-f6c9dda6c6da h1:xRmpO92tb8y+Z85iUOMOicpCfaYcv7o3Cg3wKrIpg8g=
github.com/google/pprof v0.0.0-20240711041743-f6c9dda6c6da/go.mod h1:K1liHPHnj73Fdn/EKuT8nrFqBihUSKXoLYU0BuatOYo= github.com/google/pprof v0.0.0-20240711041743-f6c9dda6c6da/go.mod h1:K1liHPHnj73Fdn/EKuT8nrFqBihUSKXoLYU0BuatOYo=
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
github.com/google/s2a-go v0.1.7 h1:60BLSyTrOV4/haCDW4zb1guZItoSq8foHCXrAnjBo/o= github.com/google/s2a-go v0.1.8 h1:zZDs9gcbt9ZPLV0ndSyQk6Kacx2g/X+SKYovpnz3SMM=
github.com/google/s2a-go v0.1.7/go.mod h1:50CgR4k1jNlWBu4UfS4AcfhVe1r6pdZPygJ3R8F0Qdw= github.com/google/s2a-go v0.1.8/go.mod h1:6iNWHTpQ+nfNRN5E00MSdfDwVesa8hhS32PhPO8deJA=
github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
@ -332,8 +332,8 @@ github.com/googleapis/enterprise-certificate-proxy v0.3.2 h1:Vie5ybvEvT75RniqhfF
github.com/googleapis/enterprise-certificate-proxy v0.3.2/go.mod h1:VLSiSSBs/ksPL8kq3OBOQ6WRI2QnaFynd1DCjZ62+V0= github.com/googleapis/enterprise-certificate-proxy v0.3.2/go.mod h1:VLSiSSBs/ksPL8kq3OBOQ6WRI2QnaFynd1DCjZ62+V0=
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
github.com/googleapis/gax-go/v2 v2.12.5 h1:8gw9KZK8TiVKB6q3zHY3SBzLnrGp6HQjyfYBYGmXdxA= github.com/googleapis/gax-go/v2 v2.13.0 h1:yitjD5f7jQHhyDsnhKEBU52NdvvdSeGzlAnDPT0hH1s=
github.com/googleapis/gax-go/v2 v2.12.5/go.mod h1:BUDKcWo+RaKq5SC9vVYL0wLADa3VcfswbOMMRmB9H3E= github.com/googleapis/gax-go/v2 v2.13.0/go.mod h1:Z/fvTZXF8/uw7Xu5GuslPw+bplx6SS338j1Is2S+B7A=
github.com/gophercloud/gophercloud v1.14.0 h1:Bt9zQDhPrbd4qX7EILGmy+i7GP35cc+AAL2+wIJpUE8= github.com/gophercloud/gophercloud v1.14.0 h1:Bt9zQDhPrbd4qX7EILGmy+i7GP35cc+AAL2+wIJpUE8=
github.com/gophercloud/gophercloud v1.14.0/go.mod h1:aAVqcocTSXh2vYFZ1JTvx4EQmfgzxRcNupUfxZbBNDM= github.com/gophercloud/gophercloud v1.14.0/go.mod h1:aAVqcocTSXh2vYFZ1JTvx4EQmfgzxRcNupUfxZbBNDM=
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
@ -353,8 +353,8 @@ github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFb
github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 h1:bkypFPDjIYGfCYD5mRBvpqxfYX1YCS1PXdKYWi8FsN0= github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 h1:bkypFPDjIYGfCYD5mRBvpqxfYX1YCS1PXdKYWi8FsN0=
github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0/go.mod h1:P+Lt/0by1T8bfcF3z737NnSbmxQAppXMRziHUxPOC8k= github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0/go.mod h1:P+Lt/0by1T8bfcF3z737NnSbmxQAppXMRziHUxPOC8k=
github.com/hashicorp/consul/api v1.3.0/go.mod h1:MmDNSzIMUjNpY/mQ398R4bk2FnqQLoPndWW5VkKPlCE= github.com/hashicorp/consul/api v1.3.0/go.mod h1:MmDNSzIMUjNpY/mQ398R4bk2FnqQLoPndWW5VkKPlCE=
github.com/hashicorp/consul/api v1.29.2 h1:aYyRn8EdE2mSfG14S1+L9Qkjtz8RzmaWh6AcNGRNwPw= github.com/hashicorp/consul/api v1.29.4 h1:P6slzxDLBOxUSj3fWo2o65VuKtbtOXFi7TSSgtXutuE=
github.com/hashicorp/consul/api v1.29.2/go.mod h1:0YObcaLNDSbtlgzIRtmRXI1ZkeuK0trCBxwZQ4MYnIk= github.com/hashicorp/consul/api v1.29.4/go.mod h1:HUlfw+l2Zy68ceJavv2zAyArl2fqhGWnMycyt56sBgg=
github.com/hashicorp/consul/proto-public v0.6.2 h1:+DA/3g/IiKlJZb88NBn0ZgXrxJp2NlvCZdEyl+qxvL0= github.com/hashicorp/consul/proto-public v0.6.2 h1:+DA/3g/IiKlJZb88NBn0ZgXrxJp2NlvCZdEyl+qxvL0=
github.com/hashicorp/consul/proto-public v0.6.2/go.mod h1:cXXbOg74KBNGajC+o8RlA502Esf0R9prcoJgiOX/2Tg= github.com/hashicorp/consul/proto-public v0.6.2/go.mod h1:cXXbOg74KBNGajC+o8RlA502Esf0R9prcoJgiOX/2Tg=
github.com/hashicorp/consul/sdk v0.3.0/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= github.com/hashicorp/consul/sdk v0.3.0/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8=
@ -414,8 +414,8 @@ github.com/hashicorp/nomad/api v0.0.0-20240717122358-3d93bd3778f3/go.mod h1:svtx
github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc=
github.com/hashicorp/serf v0.10.1 h1:Z1H2J60yRKvfDYAOZLd2MU0ND4AH/WDz7xYHDWQsIPY= github.com/hashicorp/serf v0.10.1 h1:Z1H2J60yRKvfDYAOZLd2MU0ND4AH/WDz7xYHDWQsIPY=
github.com/hashicorp/serf v0.10.1/go.mod h1:yL2t6BqATOLGc5HF7qbFkTfXoPIY0WZdWHfEvMqbG+4= github.com/hashicorp/serf v0.10.1/go.mod h1:yL2t6BqATOLGc5HF7qbFkTfXoPIY0WZdWHfEvMqbG+4=
github.com/hetznercloud/hcloud-go/v2 v2.12.0 h1:nOgfNTo0gyXZJJdM8mo/XH5MO/e80wAEpldRzdWayhY= github.com/hetznercloud/hcloud-go/v2 v2.13.1 h1:jq0GP4QaYE5d8xR/Zw17s9qoaESRJMXfGmtD1a/qckQ=
github.com/hetznercloud/hcloud-go/v2 v2.12.0/go.mod h1:dhix40Br3fDiBhwaSG/zgaYOFFddpfBm/6R1Zz0IiF0= github.com/hetznercloud/hcloud-go/v2 v2.13.1/go.mod h1:dhix40Br3fDiBhwaSG/zgaYOFFddpfBm/6R1Zz0IiF0=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg= github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg=
github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
@ -423,8 +423,8 @@ github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4=
github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY= github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY=
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo= github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo=
github.com/ionos-cloud/sdk-go/v6 v6.1.11 h1:J/uRN4UWO3wCyGOeDdMKv8LWRzKu6UIkLEaes38Kzh8= github.com/ionos-cloud/sdk-go/v6 v6.2.1 h1:mxxN+frNVmbFrmmFfXnBC3g2USYJrl6mc1LW2iNYbFY=
github.com/ionos-cloud/sdk-go/v6 v6.1.11/go.mod h1:EzEgRIDxBELvfoa/uBN0kOQaqovLjUWEB7iW4/Q+t4k= github.com/ionos-cloud/sdk-go/v6 v6.2.1/go.mod h1:SXrO9OGyWjd2rZhAhEpdYN6VUAODzzqRdqA9BCviQtI=
github.com/jarcoal/httpmock v1.3.1 h1:iUx3whfZWVf3jT01hQTO/Eo5sAYtB2/rqaUuOtpInww= github.com/jarcoal/httpmock v1.3.1 h1:iUx3whfZWVf3jT01hQTO/Eo5sAYtB2/rqaUuOtpInww=
github.com/jarcoal/httpmock v1.3.1/go.mod h1:3yb8rc4BI7TCBhFY8ng0gjuLKJNquuDNiPaZjnENuYg= github.com/jarcoal/httpmock v1.3.1/go.mod h1:3yb8rc4BI7TCBhFY8ng0gjuLKJNquuDNiPaZjnENuYg=
github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
@ -608,8 +608,8 @@ github.com/prometheus/client_golang v1.3.0/go.mod h1:hJaj2vgQTGQmVCsAACORcieXFeD
github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU=
github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
github.com/prometheus/client_golang v1.19.1 h1:wZWJDwK+NameRJuPGDhlnFgx8e8HN3XHQeLaYJFJBOE= github.com/prometheus/client_golang v1.20.2 h1:5ctymQzZlyOON1666svgwn3s6IKWgfbjsejTMiXIyjg=
github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho= github.com/prometheus/client_golang v1.20.2/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE=
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
@ -625,8 +625,8 @@ github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8b
github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc=
github.com/prometheus/common v0.29.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= github.com/prometheus/common v0.29.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
github.com/prometheus/common v0.55.0 h1:KEi6DK7lXW/m7Ig5i47x0vRzuBsHuvJdi5ee6Y3G1dc= github.com/prometheus/common v0.56.0 h1:UffReloqkBtvtQEYDg2s+uDPGRrJyC6vZWPGXf6OhPY=
github.com/prometheus/common v0.55.0/go.mod h1:2SECS4xJG1kd8XF9IcM1gMX6510RAEL65zxzNImwdc8= github.com/prometheus/common v0.56.0/go.mod h1:7uRPFSUTbfZWsJ7MHY56sqt7hLQu3bxXHDnNhl8E9qI=
github.com/prometheus/common/assets v0.2.0 h1:0P5OrzoHrYBOSM1OigWL3mY8ZvV2N4zIE/5AahrSrfM= github.com/prometheus/common/assets v0.2.0 h1:0P5OrzoHrYBOSM1OigWL3mY8ZvV2N4zIE/5AahrSrfM=
github.com/prometheus/common/assets v0.2.0/go.mod h1:D17UVUE12bHbim7HzwUvtqm6gwBEaDQ0F+hIGbFbccI= github.com/prometheus/common/assets v0.2.0/go.mod h1:D17UVUE12bHbim7HzwUvtqm6gwBEaDQ0F+hIGbFbccI=
github.com/prometheus/common/sigv4 v0.1.0 h1:qoVebwtwwEhS85Czm2dSROY5fTo2PAPEVdDeppTwGX4= github.com/prometheus/common/sigv4 v0.1.0 h1:qoVebwtwwEhS85Czm2dSROY5fTo2PAPEVdDeppTwGX4=
@ -865,8 +865,8 @@ golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4Iltr
golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.21.0 h1:tsimM75w1tF/uws5rbeHzIWxEqElMehnc+iW793zsZs= golang.org/x/oauth2 v0.22.0 h1:BzDx2FehcG7jJwgWLELCdmLuxk2i+x9UDpSiss2u0ZA=
golang.org/x/oauth2 v0.21.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= golang.org/x/oauth2 v0.22.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
@ -1047,8 +1047,8 @@ google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/
google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
google.golang.org/api v0.189.0 h1:equMo30LypAkdkLMBqfeIqtyAnlyig1JSZArl4XPwdI= google.golang.org/api v0.190.0 h1:ASM+IhLY1zljNdLu19W1jTmU6A+gMk6M46Wlur61s+Q=
google.golang.org/api v0.189.0/go.mod h1:FLWGJKb0hb+pU2j+rJqwbnsF+ym+fQs73rbJ+KAUgy8= google.golang.org/api v0.190.0/go.mod h1:QIr6I9iedBLnfqoD6L6Vze1UvS5Hzj5r2aUBOaZnLHo=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
@ -1085,10 +1085,10 @@ google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1m
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA=
google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto/googleapis/api v0.0.0-20240711142825-46eb208f015d h1:kHjw/5UfflP/L5EbledDrcG4C2597RtymmGRZvHiCuY= google.golang.org/genproto/googleapis/api v0.0.0-20240725223205-93522f1f2a9f h1:b1Ln/PG8orm0SsBbHZWke8dDp2lrCD4jSmfglFpTZbk=
google.golang.org/genproto/googleapis/api v0.0.0-20240711142825-46eb208f015d/go.mod h1:mw8MG/Qz5wfgYr6VqVCiZcHe/GJEfI+oGGDCohaVgB0= google.golang.org/genproto/googleapis/api v0.0.0-20240725223205-93522f1f2a9f/go.mod h1:AHT0dDg3SoMOgZGnZk29b5xTbPHMoEC8qthmBLJCpys=
google.golang.org/genproto/googleapis/rpc v0.0.0-20240722135656-d784300faade h1:oCRSWfwGXQsqlVdErcyTt4A93Y8fo0/9D4b1gnI++qo= google.golang.org/genproto/googleapis/rpc v0.0.0-20240730163845-b1a4ccb954bf h1:liao9UHurZLtiEwBgT9LMOnKYsHze6eA6w1KQCMVN2Q=
google.golang.org/genproto/googleapis/rpc v0.0.0-20240722135656-d784300faade/go.mod h1:Ue6ibwXGpU+dqIcODieyLOcgj7z8+IcskoNIgZxtrFY= google.golang.org/genproto/googleapis/rpc v0.0.0-20240730163845-b1a4ccb954bf/go.mod h1:Ue6ibwXGpU+dqIcODieyLOcgj7z8+IcskoNIgZxtrFY=
google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM= google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM=

View file

@ -15,7 +15,9 @@ package exemplar
import "github.com/prometheus/prometheus/model/labels" import "github.com/prometheus/prometheus/model/labels"
// The combined length of the label names and values of an Exemplar's LabelSet MUST NOT exceed 128 UTF-8 characters // ExemplarMaxLabelSetLength is defined by OpenMetrics: "The combined length of
// the label names and values of an Exemplar's LabelSet MUST NOT exceed 128
// UTF-8 characters."
// https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#exemplars // https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#exemplars
const ExemplarMaxLabelSetLength = 128 const ExemplarMaxLabelSetLength = 128
@ -49,7 +51,7 @@ func (e Exemplar) Equals(e2 Exemplar) bool {
return e.Value == e2.Value return e.Value == e2.Value
} }
// Sort first by timestamp, then value, then labels. // Compare first timestamps, then values, then labels.
func Compare(a, b Exemplar) int { func Compare(a, b Exemplar) int {
if a.Ts < b.Ts { if a.Ts < b.Ts {
return -1 return -1

View file

@ -315,7 +315,8 @@ func Compare(a, b Labels) int {
return len(a) - len(b) return len(a) - len(b)
} }
// Copy labels from b on top of whatever was in ls previously, reusing memory or expanding if needed. // CopyFrom copies labels from b on top of whatever was in ls previously,
// reusing memory or expanding if needed.
func (ls *Labels) CopyFrom(b Labels) { func (ls *Labels) CopyFrom(b Labels) {
(*ls) = append((*ls)[:0], b...) (*ls) = append((*ls)[:0], b...)
} }
@ -422,7 +423,7 @@ type ScratchBuilder struct {
add Labels add Labels
} }
// Symbol-table is no-op, just for api parity with dedupelabels. // SymbolTable is no-op, just for api parity with dedupelabels.
type SymbolTable struct{} type SymbolTable struct{}
func NewSymbolTable() *SymbolTable { return nil } func NewSymbolTable() *SymbolTable { return nil }
@ -458,7 +459,7 @@ func (b *ScratchBuilder) Add(name, value string) {
b.add = append(b.add, Label{Name: name, Value: value}) b.add = append(b.add, Label{Name: name, Value: value})
} }
// Add a name/value pair, using []byte instead of string. // UnsafeAddBytes adds a name/value pair, using []byte instead of string.
// The '-tags stringlabels' version of this function is unsafe, hence the name. // The '-tags stringlabels' version of this function is unsafe, hence the name.
// This version is safe - it copies the strings immediately - but we keep the same name so everything compiles. // This version is safe - it copies the strings immediately - but we keep the same name so everything compiles.
func (b *ScratchBuilder) UnsafeAddBytes(name, value []byte) { func (b *ScratchBuilder) UnsafeAddBytes(name, value []byte) {
@ -475,14 +476,14 @@ func (b *ScratchBuilder) Assign(ls Labels) {
b.add = append(b.add[:0], ls...) // Copy on top of our slice, so we don't retain the input slice. b.add = append(b.add[:0], ls...) // Copy on top of our slice, so we don't retain the input slice.
} }
// Return the name/value pairs added so far as a Labels object. // Labels returns the name/value pairs added so far as a Labels object.
// Note: if you want them sorted, call Sort() first. // Note: if you want them sorted, call Sort() first.
func (b *ScratchBuilder) Labels() Labels { func (b *ScratchBuilder) Labels() Labels {
// Copy the slice, so the next use of ScratchBuilder doesn't overwrite. // Copy the slice, so the next use of ScratchBuilder doesn't overwrite.
return append([]Label{}, b.add...) return append([]Label{}, b.add...)
} }
// Write the newly-built Labels out to ls. // Overwrite the newly-built Labels out to ls.
// Callers must ensure that there are no other references to ls, or any strings fetched from it. // Callers must ensure that there are no other references to ls, or any strings fetched from it.
func (b *ScratchBuilder) Overwrite(ls *Labels) { func (b *ScratchBuilder) Overwrite(ls *Labels) {
*ls = append((*ls)[:0], b.add...) *ls = append((*ls)[:0], b.add...)

View file

@ -95,12 +95,23 @@ func (ls *Labels) UnmarshalYAML(unmarshal func(interface{}) error) error {
} }
// IsValid checks if the metric name or label names are valid. // IsValid checks if the metric name or label names are valid.
func (ls Labels) IsValid() bool { func (ls Labels) IsValid(validationScheme model.ValidationScheme) bool {
err := ls.Validate(func(l Label) error { err := ls.Validate(func(l Label) error {
if l.Name == model.MetricNameLabel && !model.IsValidMetricName(model.LabelValue(l.Value)) { if l.Name == model.MetricNameLabel {
// If the default validation scheme has been overridden with legacy mode,
// we need to call the special legacy validation checker.
if validationScheme == model.LegacyValidation && model.NameValidationScheme == model.UTF8Validation && !model.IsValidLegacyMetricName(string(model.LabelValue(l.Value))) {
return strconv.ErrSyntax return strconv.ErrSyntax
} }
if !model.LabelName(l.Name).IsValid() || !model.LabelValue(l.Value).IsValid() { if !model.IsValidMetricName(model.LabelValue(l.Value)) {
return strconv.ErrSyntax
}
}
if validationScheme == model.LegacyValidation && model.NameValidationScheme == model.UTF8Validation {
if !model.LabelName(l.Name).IsValidLegacy() || !model.LabelValue(l.Value).IsValid() {
return strconv.ErrSyntax
}
} else if !model.LabelName(l.Name).IsValid() || !model.LabelValue(l.Value).IsValid() {
return strconv.ErrSyntax return strconv.ErrSyntax
} }
return nil return nil

View file

@ -21,6 +21,7 @@ import (
"strings" "strings"
"testing" "testing"
"github.com/prometheus/common/model"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
"gopkg.in/yaml.v2" "gopkg.in/yaml.v2"
) )
@ -272,11 +273,86 @@ func TestLabels_IsValid(t *testing.T) {
}, },
} { } {
t.Run("", func(t *testing.T) { t.Run("", func(t *testing.T) {
require.Equal(t, test.expected, test.input.IsValid()) require.Equal(t, test.expected, test.input.IsValid(model.LegacyValidation))
}) })
} }
} }
func TestLabels_ValidationModes(t *testing.T) {
for _, test := range []struct {
input Labels
globalMode model.ValidationScheme
callMode model.ValidationScheme
expected bool
}{
{
input: FromStrings(
"__name__", "test.metric",
"hostname", "localhost",
"job", "check",
),
globalMode: model.UTF8Validation,
callMode: model.UTF8Validation,
expected: true,
},
{
input: FromStrings(
"__name__", "test",
"\xc5 bad utf8", "localhost",
"job", "check",
),
globalMode: model.UTF8Validation,
callMode: model.UTF8Validation,
expected: false,
},
{
// Setting the common model to legacy validation and then trying to check for UTF-8 on a
// per-call basis is not supported.
input: FromStrings(
"__name__", "test.utf8.metric",
"hostname", "localhost",
"job", "check",
),
globalMode: model.LegacyValidation,
callMode: model.UTF8Validation,
expected: false,
},
{
input: FromStrings(
"__name__", "test",
"hostname", "localhost",
"job", "check",
),
globalMode: model.LegacyValidation,
callMode: model.LegacyValidation,
expected: true,
},
{
input: FromStrings(
"__name__", "test.utf8.metric",
"hostname", "localhost",
"job", "check",
),
globalMode: model.UTF8Validation,
callMode: model.LegacyValidation,
expected: false,
},
{
input: FromStrings(
"__name__", "test",
"host.name", "localhost",
"job", "check",
),
globalMode: model.UTF8Validation,
callMode: model.LegacyValidation,
expected: false,
},
} {
model.NameValidationScheme = test.globalMode
require.Equal(t, test.expected, test.input.IsValid(test.callMode))
}
}
func TestLabels_Equal(t *testing.T) { func TestLabels_Equal(t *testing.T) {
labels := FromStrings( labels := FromStrings(
"aaa", "111", "aaa", "111",

View file

@ -106,8 +106,8 @@ const (
EntryInvalid Entry = -1 EntryInvalid Entry = -1
EntryType Entry = 0 EntryType Entry = 0
EntryHelp Entry = 1 EntryHelp Entry = 1
EntrySeries Entry = 2 // A series with a simple float64 as value. EntrySeries Entry = 2 // EntrySeries marks a series with a simple float64 as value.
EntryComment Entry = 3 EntryComment Entry = 3
EntryUnit Entry = 4 EntryUnit Entry = 4
EntryHistogram Entry = 5 // A series with a native histogram as a value. EntryHistogram Entry = 5 // EntryHistogram marks a series with a native histogram as a value.
) )

View file

@ -94,14 +94,44 @@ type OpenMetricsParser struct {
exemplarVal float64 exemplarVal float64
exemplarTs int64 exemplarTs int64
hasExemplarTs bool hasExemplarTs bool
skipCTSeries bool
} }
// NewOpenMetricsParser returns a new parser of the byte slice. type openMetricsParserOptions struct {
func NewOpenMetricsParser(b []byte, st *labels.SymbolTable) Parser { SkipCTSeries bool
return &OpenMetricsParser{ }
type OpenMetricsOption func(*openMetricsParserOptions)
// WithOMParserCTSeriesSkipped turns off exposing _created lines
// as series, which makes those only used for parsing created timestamp
// for `CreatedTimestamp` method purposes.
//
// It's recommended to use this option to avoid using _created lines for other
// purposes than created timestamp, but leave false by default for the
// best-effort compatibility.
func WithOMParserCTSeriesSkipped() OpenMetricsOption {
return func(o *openMetricsParserOptions) {
o.SkipCTSeries = true
}
}
// NewOpenMetricsParser returns a new parser for the byte slice with option to skip CT series parsing.
func NewOpenMetricsParser(b []byte, st *labels.SymbolTable, opts ...OpenMetricsOption) Parser {
options := &openMetricsParserOptions{}
for _, opt := range opts {
opt(options)
}
parser := &OpenMetricsParser{
l: &openMetricsLexer{b: b}, l: &openMetricsLexer{b: b},
builder: labels.NewScratchBuilderWithSymbolTable(st, 16), builder: labels.NewScratchBuilderWithSymbolTable(st, 16),
skipCTSeries: options.SkipCTSeries,
} }
return parser
} }
// Series returns the bytes of the series, the timestamp if set, and the value // Series returns the bytes of the series, the timestamp if set, and the value
@ -219,10 +249,90 @@ func (p *OpenMetricsParser) Exemplar(e *exemplar.Exemplar) bool {
return true return true
} }
// CreatedTimestamp returns nil as it's not implemented yet. // CreatedTimestamp returns the created timestamp for a current Metric if exists or nil.
// TODO(bwplotka): https://github.com/prometheus/prometheus/issues/12980 // NOTE(Maniktherana): Might use additional CPU/mem resources due to deep copy of parser required for peeking given 1.0 OM specification on _created series.
func (p *OpenMetricsParser) CreatedTimestamp() *int64 { func (p *OpenMetricsParser) CreatedTimestamp() *int64 {
if !TypeRequiresCT(p.mtype) {
// Not a CT supported metric type, fast path.
return nil return nil
}
var (
currLset labels.Labels
buf []byte
peekWithoutNameLsetHash uint64
)
p.Metric(&currLset)
currFamilyLsetHash, buf := currLset.HashWithoutLabels(buf, labels.MetricName, "le", "quantile")
// Search for the _created line for the currFamilyLsetHash using ephemeral parser until
// we see EOF or new metric family. We have to do it as we don't know where (and if)
// that CT line is.
// TODO(bwplotka): Make sure OM 1.1/2.0 pass CT via metadata or exemplar-like to avoid this.
peek := deepCopy(p)
for {
eType, err := peek.Next()
if err != nil {
// This means peek will give error too later on, so def no CT line found.
// This might result in partial scrape with wrong/missing CT, but only
// spec improvement would help.
// TODO(bwplotka): Make sure OM 1.1/2.0 pass CT via metadata or exemplar-like to avoid this.
return nil
}
if eType != EntrySeries {
// Assume we hit different family, no CT line found.
return nil
}
var peekedLset labels.Labels
peek.Metric(&peekedLset)
peekedName := peekedLset.Get(model.MetricNameLabel)
if !strings.HasSuffix(peekedName, "_created") {
// Not a CT line, search more.
continue
}
// We got a CT line here, but let's search if CT line is actually for our series, edge case.
peekWithoutNameLsetHash, _ = peekedLset.HashWithoutLabels(buf, labels.MetricName, "le", "quantile")
if peekWithoutNameLsetHash != currFamilyLsetHash {
// CT line for a different series, for our series no CT.
return nil
}
ct := int64(peek.val)
return &ct
}
}
// TypeRequiresCT returns true if the metric type requires a _created timestamp.
func TypeRequiresCT(t model.MetricType) bool {
switch t {
case model.MetricTypeCounter, model.MetricTypeSummary, model.MetricTypeHistogram:
return true
default:
return false
}
}
// deepCopy creates a copy of a parser without re-using the slices' original memory addresses.
func deepCopy(p *OpenMetricsParser) OpenMetricsParser {
newB := make([]byte, len(p.l.b))
copy(newB, p.l.b)
newLexer := &openMetricsLexer{
b: newB,
i: p.l.i,
start: p.l.start,
err: p.l.err,
state: p.l.state,
}
newParser := OpenMetricsParser{
l: newLexer,
builder: p.builder,
mtype: p.mtype,
val: p.val,
skipCTSeries: false,
}
return newParser
} }
// nextToken returns the next token from the openMetricsLexer. // nextToken returns the next token from the openMetricsLexer.
@ -337,7 +447,13 @@ func (p *OpenMetricsParser) Next() (Entry, error) {
} }
p.series = p.l.b[p.start:p.l.i] p.series = p.l.b[p.start:p.l.i]
return p.parseMetricSuffix(p.nextToken()) if err := p.parseSeriesEndOfLine(p.nextToken()); err != nil {
return EntryInvalid, err
}
if p.skipCTSeries && p.isCreatedSeries() {
return p.Next()
}
return EntrySeries, nil
case tMName: case tMName:
p.offsets = append(p.offsets, p.start, p.l.i) p.offsets = append(p.offsets, p.start, p.l.i)
p.series = p.l.b[p.start:p.l.i] p.series = p.l.b[p.start:p.l.i]
@ -351,8 +467,14 @@ func (p *OpenMetricsParser) Next() (Entry, error) {
p.series = p.l.b[p.start:p.l.i] p.series = p.l.b[p.start:p.l.i]
t2 = p.nextToken() t2 = p.nextToken()
} }
return p.parseMetricSuffix(t2)
if err := p.parseSeriesEndOfLine(t2); err != nil {
return EntryInvalid, err
}
if p.skipCTSeries && p.isCreatedSeries() {
return p.Next()
}
return EntrySeries, nil
default: default:
err = p.parseError("expected a valid start token", t) err = p.parseError("expected a valid start token", t)
} }
@ -467,51 +589,64 @@ func (p *OpenMetricsParser) parseLVals(offsets []int, isExemplar bool) ([]int, e
} }
} }
// parseMetricSuffix parses the end of the line after the metric name and // isCreatedSeries returns true if the current series is a _created series.
// labels. It starts parsing with the provided token. func (p *OpenMetricsParser) isCreatedSeries() bool {
func (p *OpenMetricsParser) parseMetricSuffix(t token) (Entry, error) { var newLbs labels.Labels
p.Metric(&newLbs)
name := newLbs.Get(model.MetricNameLabel)
if TypeRequiresCT(p.mtype) && strings.HasSuffix(name, "_created") {
return true
}
return false
}
// parseSeriesEndOfLine parses the series end of the line (value, optional
// timestamp, commentary, etc.) after the metric name and labels.
// It starts parsing with the provided token.
func (p *OpenMetricsParser) parseSeriesEndOfLine(t token) error {
if p.offsets[0] == -1 { if p.offsets[0] == -1 {
return EntryInvalid, fmt.Errorf("metric name not set while parsing: %q", p.l.b[p.start:p.l.i]) return fmt.Errorf("metric name not set while parsing: %q", p.l.b[p.start:p.l.i])
} }
var err error var err error
p.val, err = p.getFloatValue(t, "metric") p.val, err = p.getFloatValue(t, "metric")
if err != nil { if err != nil {
return EntryInvalid, err return err
} }
p.hasTS = false p.hasTS = false
switch t2 := p.nextToken(); t2 { switch t2 := p.nextToken(); t2 {
case tEOF: case tEOF:
return EntryInvalid, errors.New("data does not end with # EOF") return errors.New("data does not end with # EOF")
case tLinebreak: case tLinebreak:
break break
case tComment: case tComment:
if err := p.parseComment(); err != nil { if err := p.parseComment(); err != nil {
return EntryInvalid, err return err
} }
case tTimestamp: case tTimestamp:
p.hasTS = true p.hasTS = true
var ts float64 var ts float64
// A float is enough to hold what we need for millisecond resolution. // A float is enough to hold what we need for millisecond resolution.
if ts, err = parseFloat(yoloString(p.l.buf()[1:])); err != nil { if ts, err = parseFloat(yoloString(p.l.buf()[1:])); err != nil {
return EntryInvalid, fmt.Errorf("%w while parsing: %q", err, p.l.b[p.start:p.l.i]) return fmt.Errorf("%w while parsing: %q", err, p.l.b[p.start:p.l.i])
} }
if math.IsNaN(ts) || math.IsInf(ts, 0) { if math.IsNaN(ts) || math.IsInf(ts, 0) {
return EntryInvalid, fmt.Errorf("invalid timestamp %f", ts) return fmt.Errorf("invalid timestamp %f", ts)
} }
p.ts = int64(ts * 1000) p.ts = int64(ts * 1000)
switch t3 := p.nextToken(); t3 { switch t3 := p.nextToken(); t3 {
case tLinebreak: case tLinebreak:
case tComment: case tComment:
if err := p.parseComment(); err != nil { if err := p.parseComment(); err != nil {
return EntryInvalid, err return err
} }
default: default:
return EntryInvalid, p.parseError("expected next entry after timestamp", t3) return p.parseError("expected next entry after timestamp", t3)
} }
} }
return EntrySeries, nil
return nil
} }
func (p *OpenMetricsParser) getFloatValue(t token, after string) (float64, error) { func (p *OpenMetricsParser) getFloatValue(t token, after string) (float64, error) {

View file

@ -14,6 +14,7 @@
package textparse package textparse
import ( import (
"errors"
"io" "io"
"testing" "testing"
@ -24,6 +25,8 @@ import (
"github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/labels"
) )
func int64p(x int64) *int64 { return &x }
func TestOpenMetricsParse(t *testing.T) { func TestOpenMetricsParse(t *testing.T) {
input := `# HELP go_gc_duration_seconds A summary of the GC invocation durations. input := `# HELP go_gc_duration_seconds A summary of the GC invocation durations.
# TYPE go_gc_duration_seconds summary # TYPE go_gc_duration_seconds summary
@ -63,15 +66,34 @@ ss{A="a"} 0
_metric_starting_with_underscore 1 _metric_starting_with_underscore 1
testmetric{_label_starting_with_underscore="foo"} 1 testmetric{_label_starting_with_underscore="foo"} 1
testmetric{label="\"bar\""} 1 testmetric{label="\"bar\""} 1
# HELP foo Counter with and without labels to certify CT is parsed for both cases
# TYPE foo counter # TYPE foo counter
foo_total 17.0 1520879607.789 # {id="counter-test"} 5` foo_total 17.0 1520879607.789 # {id="counter-test"} 5
foo_created 1000
foo_total{a="b"} 17.0 1520879607.789 # {id="counter-test"} 5
foo_created{a="b"} 1000
# HELP bar Summary with CT at the end, making sure we find CT even if it's multiple lines a far
# TYPE bar summary
bar_count 17.0
bar_sum 324789.3
bar{quantile="0.95"} 123.7
bar{quantile="0.99"} 150.0
bar_created 1520430000
# HELP baz Histogram with the same objective as above's summary
# TYPE baz histogram
baz_bucket{le="0.0"} 0
baz_bucket{le="+Inf"} 17
baz_count 17
baz_sum 324789.3
baz_created 1520430000
# HELP fizz_created Gauge which shouldn't be parsed as CT
# TYPE fizz_created gauge
fizz_created 17.0`
input += "\n# HELP metric foo\x00bar" input += "\n# HELP metric foo\x00bar"
input += "\nnull_byte_metric{a=\"abc\x00\"} 1" input += "\nnull_byte_metric{a=\"abc\x00\"} 1"
input += "\n# EOF\n" input += "\n# EOF\n"
int64p := func(x int64) *int64 { return &x }
exp := []expectedParse{ exp := []expectedParse{
{ {
m: "go_gc_duration_seconds", m: "go_gc_duration_seconds",
@ -216,6 +238,9 @@ foo_total 17.0 1520879607.789 # {id="counter-test"} 5`
m: "testmetric{label=\"\\\"bar\\\"\"}", m: "testmetric{label=\"\\\"bar\\\"\"}",
v: 1, v: 1,
lset: labels.FromStrings("__name__", "testmetric", "label", `"bar"`), lset: labels.FromStrings("__name__", "testmetric", "label", `"bar"`),
}, {
m: "foo",
help: "Counter with and without labels to certify CT is parsed for both cases",
}, { }, {
m: "foo", m: "foo",
typ: model.MetricTypeCounter, typ: model.MetricTypeCounter,
@ -225,6 +250,76 @@ foo_total 17.0 1520879607.789 # {id="counter-test"} 5`
lset: labels.FromStrings("__name__", "foo_total"), lset: labels.FromStrings("__name__", "foo_total"),
t: int64p(1520879607789), t: int64p(1520879607789),
e: &exemplar.Exemplar{Labels: labels.FromStrings("id", "counter-test"), Value: 5}, e: &exemplar.Exemplar{Labels: labels.FromStrings("id", "counter-test"), Value: 5},
ct: int64p(1000),
}, {
m: `foo_total{a="b"}`,
v: 17.0,
lset: labels.FromStrings("__name__", "foo_total", "a", "b"),
t: int64p(1520879607789),
e: &exemplar.Exemplar{Labels: labels.FromStrings("id", "counter-test"), Value: 5},
ct: int64p(1000),
}, {
m: "bar",
help: "Summary with CT at the end, making sure we find CT even if it's multiple lines a far",
}, {
m: "bar",
typ: model.MetricTypeSummary,
}, {
m: "bar_count",
v: 17.0,
lset: labels.FromStrings("__name__", "bar_count"),
ct: int64p(1520430000),
}, {
m: "bar_sum",
v: 324789.3,
lset: labels.FromStrings("__name__", "bar_sum"),
ct: int64p(1520430000),
}, {
m: `bar{quantile="0.95"}`,
v: 123.7,
lset: labels.FromStrings("__name__", "bar", "quantile", "0.95"),
ct: int64p(1520430000),
}, {
m: `bar{quantile="0.99"}`,
v: 150.0,
lset: labels.FromStrings("__name__", "bar", "quantile", "0.99"),
ct: int64p(1520430000),
}, {
m: "baz",
help: "Histogram with the same objective as above's summary",
}, {
m: "baz",
typ: model.MetricTypeHistogram,
}, {
m: `baz_bucket{le="0.0"}`,
v: 0,
lset: labels.FromStrings("__name__", "baz_bucket", "le", "0.0"),
ct: int64p(1520430000),
}, {
m: `baz_bucket{le="+Inf"}`,
v: 17,
lset: labels.FromStrings("__name__", "baz_bucket", "le", "+Inf"),
ct: int64p(1520430000),
}, {
m: `baz_count`,
v: 17,
lset: labels.FromStrings("__name__", "baz_count"),
ct: int64p(1520430000),
}, {
m: `baz_sum`,
v: 324789.3,
lset: labels.FromStrings("__name__", "baz_sum"),
ct: int64p(1520430000),
}, {
m: "fizz_created",
help: "Gauge which shouldn't be parsed as CT",
}, {
m: "fizz_created",
typ: model.MetricTypeGauge,
}, {
m: `fizz_created`,
v: 17,
lset: labels.FromStrings("__name__", "fizz_created"),
}, { }, {
m: "metric", m: "metric",
help: "foo\x00bar", help: "foo\x00bar",
@ -235,8 +330,8 @@ foo_total 17.0 1520879607.789 # {id="counter-test"} 5`
}, },
} }
p := NewOpenMetricsParser([]byte(input), labels.NewSymbolTable()) p := NewOpenMetricsParser([]byte(input), labels.NewSymbolTable(), WithOMParserCTSeriesSkipped())
checkParseResults(t, p, exp) checkParseResultsWithCT(t, p, exp, true)
} }
func TestUTF8OpenMetricsParse(t *testing.T) { func TestUTF8OpenMetricsParse(t *testing.T) {
@ -251,6 +346,7 @@ func TestUTF8OpenMetricsParse(t *testing.T) {
# UNIT "go.gc_duration_seconds" seconds # UNIT "go.gc_duration_seconds" seconds
{"go.gc_duration_seconds",quantile="0"} 4.9351e-05 {"go.gc_duration_seconds",quantile="0"} 4.9351e-05
{"go.gc_duration_seconds",quantile="0.25"} 7.424100000000001e-05 {"go.gc_duration_seconds",quantile="0.25"} 7.424100000000001e-05
{"go.gc_duration_seconds_created"} 12313
{"go.gc_duration_seconds",quantile="0.5",a="b"} 8.3835e-05 {"go.gc_duration_seconds",quantile="0.5",a="b"} 8.3835e-05
{"http.status",q="0.9",a="b"} 8.3835e-05 {"http.status",q="0.9",a="b"} 8.3835e-05
{"http.status",q="0.9",a="b"} 8.3835e-05 {"http.status",q="0.9",a="b"} 8.3835e-05
@ -274,10 +370,12 @@ func TestUTF8OpenMetricsParse(t *testing.T) {
m: `{"go.gc_duration_seconds",quantile="0"}`, m: `{"go.gc_duration_seconds",quantile="0"}`,
v: 4.9351e-05, v: 4.9351e-05,
lset: labels.FromStrings("__name__", "go.gc_duration_seconds", "quantile", "0"), lset: labels.FromStrings("__name__", "go.gc_duration_seconds", "quantile", "0"),
ct: int64p(12313),
}, { }, {
m: `{"go.gc_duration_seconds",quantile="0.25"}`, m: `{"go.gc_duration_seconds",quantile="0.25"}`,
v: 7.424100000000001e-05, v: 7.424100000000001e-05,
lset: labels.FromStrings("__name__", "go.gc_duration_seconds", "quantile", "0.25"), lset: labels.FromStrings("__name__", "go.gc_duration_seconds", "quantile", "0.25"),
ct: int64p(12313),
}, { }, {
m: `{"go.gc_duration_seconds",quantile="0.5",a="b"}`, m: `{"go.gc_duration_seconds",quantile="0.5",a="b"}`,
v: 8.3835e-05, v: 8.3835e-05,
@ -306,8 +404,8 @@ choices}`, "strange©™\n'quoted' \"name\"", "6"),
}, },
} }
p := NewOpenMetricsParser([]byte(input), labels.NewSymbolTable()) p := NewOpenMetricsParser([]byte(input), labels.NewSymbolTable(), WithOMParserCTSeriesSkipped())
checkParseResults(t, p, exp) checkParseResultsWithCT(t, p, exp, true)
} }
func TestOpenMetricsParseErrors(t *testing.T) { func TestOpenMetricsParseErrors(t *testing.T) {
@ -598,10 +696,6 @@ func TestOpenMetricsParseErrors(t *testing.T) {
input: "# TYPE hhh histogram\nhhh_bucket{le=\"+Inf\"} 1 # {aa=\"bb\"} 4 -Inf", input: "# TYPE hhh histogram\nhhh_bucket{le=\"+Inf\"} 1 # {aa=\"bb\"} 4 -Inf",
err: `invalid exemplar timestamp -Inf`, err: `invalid exemplar timestamp -Inf`,
}, },
{
input: "# TYPE hhh histogram\nhhh_bucket{le=\"+Inf\"} 1 # {aa=\"bb\"} 4 Inf",
err: `invalid exemplar timestamp +Inf`,
},
} }
for i, c := range cases { for i, c := range cases {
@ -684,3 +778,217 @@ func TestOMNullByteHandling(t *testing.T) {
require.Equal(t, c.err, err.Error(), "test %d", i) require.Equal(t, c.err, err.Error(), "test %d", i)
} }
} }
// While not desirable, there are cases were CT fails to parse and
// these tests show them.
// TODO(maniktherana): Make sure OM 1.1/2.0 pass CT via metadata or exemplar-like to avoid this.
func TestCTParseFailures(t *testing.T) {
input := `# HELP something Histogram with _created between buckets and summary
# TYPE something histogram
something_count 17
something_sum 324789.3
something_created 1520430001
something_bucket{le="0.0"} 0
something_bucket{le="+Inf"} 17
# HELP thing Histogram with _created as first line
# TYPE thing histogram
thing_created 1520430002
thing_count 17
thing_sum 324789.3
thing_bucket{le="0.0"} 0
thing_bucket{le="+Inf"} 17
# HELP yum Summary with _created between sum and quantiles
# TYPE yum summary
yum_count 17.0
yum_sum 324789.3
yum_created 1520430003
yum{quantile="0.95"} 123.7
yum{quantile="0.99"} 150.0
# HELP foobar Summary with _created as the first line
# TYPE foobar summary
foobar_created 1520430004
foobar_count 17.0
foobar_sum 324789.3
foobar{quantile="0.95"} 123.7
foobar{quantile="0.99"} 150.0`
input += "\n# EOF\n"
int64p := func(x int64) *int64 { return &x }
type expectCT struct {
m string
ct *int64
typ model.MetricType
help string
isErr bool
}
exp := []expectCT{
{
m: "something",
help: "Histogram with _created between buckets and summary",
isErr: false,
}, {
m: "something",
typ: model.MetricTypeHistogram,
isErr: false,
}, {
m: `something_count`,
ct: int64p(1520430001),
isErr: false,
}, {
m: `something_sum`,
ct: int64p(1520430001),
isErr: false,
}, {
m: `something_bucket{le="0.0"}`,
ct: int64p(1520430001),
isErr: true,
}, {
m: `something_bucket{le="+Inf"}`,
ct: int64p(1520430001),
isErr: true,
}, {
m: "thing",
help: "Histogram with _created as first line",
isErr: false,
}, {
m: "thing",
typ: model.MetricTypeHistogram,
isErr: false,
}, {
m: `thing_count`,
ct: int64p(1520430002),
isErr: true,
}, {
m: `thing_sum`,
ct: int64p(1520430002),
isErr: true,
}, {
m: `thing_bucket{le="0.0"}`,
ct: int64p(1520430002),
isErr: true,
}, {
m: `thing_bucket{le="+Inf"}`,
ct: int64p(1520430002),
isErr: true,
}, {
m: "yum",
help: "Summary with _created between summary and quantiles",
isErr: false,
}, {
m: "yum",
typ: model.MetricTypeSummary,
isErr: false,
}, {
m: "yum_count",
ct: int64p(1520430003),
isErr: false,
}, {
m: "yum_sum",
ct: int64p(1520430003),
isErr: false,
}, {
m: `yum{quantile="0.95"}`,
ct: int64p(1520430003),
isErr: true,
}, {
m: `yum{quantile="0.99"}`,
ct: int64p(1520430003),
isErr: true,
}, {
m: "foobar",
help: "Summary with _created as the first line",
isErr: false,
}, {
m: "foobar",
typ: model.MetricTypeSummary,
isErr: false,
}, {
m: "foobar_count",
ct: int64p(1520430004),
isErr: true,
}, {
m: "foobar_sum",
ct: int64p(1520430004),
isErr: true,
}, {
m: `foobar{quantile="0.95"}`,
ct: int64p(1520430004),
isErr: true,
}, {
m: `foobar{quantile="0.99"}`,
ct: int64p(1520430004),
isErr: true,
},
}
p := NewOpenMetricsParser([]byte(input), labels.NewSymbolTable(), WithOMParserCTSeriesSkipped())
i := 0
var res labels.Labels
for {
et, err := p.Next()
if errors.Is(err, io.EOF) {
break
}
require.NoError(t, err)
switch et {
case EntrySeries:
p.Metric(&res)
if ct := p.CreatedTimestamp(); exp[i].isErr {
require.Nil(t, ct)
} else {
require.Equal(t, *exp[i].ct, *ct)
}
default:
i++
continue
}
i++
}
}
func TestDeepCopy(t *testing.T) {
input := []byte(`# HELP go_goroutines A gauge goroutines.
# TYPE go_goroutines gauge
go_goroutines 33 123.123
# TYPE go_gc_duration_seconds summary
go_gc_duration_seconds
go_gc_duration_seconds_created`)
st := labels.NewSymbolTable()
parser := NewOpenMetricsParser(input, st, WithOMParserCTSeriesSkipped()).(*OpenMetricsParser)
// Modify the original parser state
_, err := parser.Next()
require.NoError(t, err)
require.Equal(t, "go_goroutines", string(parser.l.b[parser.offsets[0]:parser.offsets[1]]))
require.True(t, parser.skipCTSeries)
// Create a deep copy of the parser
copyParser := deepCopy(parser)
etype, err := copyParser.Next()
require.NoError(t, err)
require.Equal(t, EntryType, etype)
require.True(t, parser.skipCTSeries)
require.False(t, copyParser.skipCTSeries)
// Modify the original parser further
parser.Next()
parser.Next()
parser.Next()
require.Equal(t, "go_gc_duration_seconds", string(parser.l.b[parser.offsets[0]:parser.offsets[1]]))
require.Equal(t, "summary", string(parser.mtype))
require.False(t, copyParser.skipCTSeries)
require.True(t, parser.skipCTSeries)
// Ensure the copy remains unchanged
copyParser.Next()
copyParser.Next()
require.Equal(t, "go_gc_duration_seconds", string(copyParser.l.b[copyParser.offsets[0]:copyParser.offsets[1]]))
require.False(t, copyParser.skipCTSeries)
}

View file

@ -18,6 +18,7 @@ import (
"errors" "errors"
"io" "io"
"os" "os"
"strings"
"testing" "testing"
"github.com/klauspost/compress/gzip" "github.com/klauspost/compress/gzip"
@ -41,6 +42,7 @@ type expectedParse struct {
unit string unit string
comment string comment string
e *exemplar.Exemplar e *exemplar.Exemplar
ct *int64
} }
func TestPromParse(t *testing.T) { func TestPromParse(t *testing.T) {
@ -188,6 +190,10 @@ testmetric{label="\"bar\""} 1`
} }
func checkParseResults(t *testing.T, p Parser, exp []expectedParse) { func checkParseResults(t *testing.T, p Parser, exp []expectedParse) {
checkParseResultsWithCT(t, p, exp, false)
}
func checkParseResultsWithCT(t *testing.T, p Parser, exp []expectedParse, ctLinesRemoved bool) {
i := 0 i := 0
var res labels.Labels var res labels.Labels
@ -205,6 +211,14 @@ func checkParseResults(t *testing.T, p Parser, exp []expectedParse) {
p.Metric(&res) p.Metric(&res)
if ctLinesRemoved {
// Are CT series skipped?
_, typ := p.Type()
if TypeRequiresCT(typ) && strings.HasSuffix(res.Get(labels.MetricName), "_created") {
t.Fatalf("we exped created lines skipped")
}
}
require.Equal(t, exp[i].m, string(m)) require.Equal(t, exp[i].m, string(m))
require.Equal(t, exp[i].t, ts) require.Equal(t, exp[i].t, ts)
require.Equal(t, exp[i].v, v) require.Equal(t, exp[i].v, v)
@ -218,6 +232,11 @@ func checkParseResults(t *testing.T, p Parser, exp []expectedParse) {
require.True(t, found) require.True(t, found)
testutil.RequireEqual(t, *exp[i].e, e) testutil.RequireEqual(t, *exp[i].e, e)
} }
if ct := p.CreatedTimestamp(); ct != nil {
require.Equal(t, *exp[i].ct, *ct)
} else {
require.Nil(t, exp[i].ct)
}
case EntryType: case EntryType:
m, typ := p.Type() m, typ := p.Type()
@ -476,7 +495,9 @@ const (
func BenchmarkParse(b *testing.B) { func BenchmarkParse(b *testing.B) {
for parserName, parser := range map[string]func([]byte, *labels.SymbolTable) Parser{ for parserName, parser := range map[string]func([]byte, *labels.SymbolTable) Parser{
"prometheus": NewPromParser, "prometheus": NewPromParser,
"openmetrics": NewOpenMetricsParser, "openmetrics": func(b []byte, st *labels.SymbolTable) Parser {
return NewOpenMetricsParser(b, st)
},
} { } {
for _, fn := range []string{"promtestdata.txt", "promtestdata.nometa.txt"} { for _, fn := range []string{"promtestdata.txt", "promtestdata.nometa.txt"} {
f, err := os.Open(fn) f, err := os.Open(fn)

View file

@ -47,7 +47,7 @@ import (
// the re-arrangement work is actually causing problems (which has to be seen), // the re-arrangement work is actually causing problems (which has to be seen),
// that expectation needs to be changed. // that expectation needs to be changed.
type ProtobufParser struct { type ProtobufParser struct {
in []byte // The intput to parse. in []byte // The input to parse.
inPos int // Position within the input. inPos int // Position within the input.
metricPos int // Position within Metric slice. metricPos int // Position within Metric slice.
// fieldPos is the position within a Summary or (legacy) Histogram. -2 // fieldPos is the position within a Summary or (legacy) Histogram. -2
@ -71,7 +71,7 @@ type ProtobufParser struct {
mf *dto.MetricFamily mf *dto.MetricFamily
// Wether to also parse a classic histogram that is also present as a // Whether to also parse a classic histogram that is also present as a
// native histogram. // native histogram.
parseClassicHistograms bool parseClassicHistograms bool
@ -409,6 +409,7 @@ func (p *ProtobufParser) Next() (Entry, error) {
switch p.state { switch p.state {
case EntryInvalid: case EntryInvalid:
p.metricPos = 0 p.metricPos = 0
p.exemplarPos = 0
p.fieldPos = -2 p.fieldPos = -2
n, err := readDelimited(p.in[p.inPos:], p.mf) n, err := readDelimited(p.in[p.inPos:], p.mf)
p.inPos += n p.inPos += n
@ -485,6 +486,7 @@ func (p *ProtobufParser) Next() (Entry, error) {
p.metricPos++ p.metricPos++
p.fieldPos = -2 p.fieldPos = -2
p.fieldsDone = false p.fieldsDone = false
p.exemplarPos = 0
// If this is a metric family containing native // If this is a metric family containing native
// histograms, we have to switch back to native // histograms, we have to switch back to native
// histograms after parsing a classic histogram. // histograms after parsing a classic histogram.

View file

@ -695,6 +695,70 @@ metric: <
timestamp_ms: 1234568 timestamp_ms: 1234568
> >
`,
`name: "test_histogram_with_native_histogram_exemplars2"
help: "Another histogram with native histogram exemplars."
type: HISTOGRAM
metric: <
histogram: <
sample_count: 175
sample_sum: 0.0008280461746287094
bucket: <
cumulative_count: 2
upper_bound: -0.0004899999999999998
>
bucket: <
cumulative_count: 4
upper_bound: -0.0003899999999999998
>
bucket: <
cumulative_count: 16
upper_bound: -0.0002899999999999998
>
schema: 3
zero_threshold: 2.938735877055719e-39
zero_count: 2
negative_span: <
offset: -162
length: 1
>
negative_span: <
offset: 23
length: 4
>
negative_delta: 1
negative_delta: 3
negative_delta: -2
negative_delta: -1
negative_delta: 1
positive_span: <
offset: -161
length: 1
>
positive_span: <
offset: 8
length: 3
>
positive_delta: 1
positive_delta: 2
positive_delta: -1
positive_delta: -1
exemplars: <
label: <
name: "dummyID"
value: "59780"
>
value: -0.00039
timestamp: <
seconds: 1625851155
nanos: 146848499
>
>
>
timestamp_ms: 1234568
>
`, `,
} }
@ -1276,6 +1340,41 @@ func TestProtobufParse(t *testing.T) {
{Labels: labels.FromStrings("dummyID", "59772"), Value: -0.00052, HasTs: true, Ts: 1625851160156}, {Labels: labels.FromStrings("dummyID", "59772"), Value: -0.00052, HasTs: true, Ts: 1625851160156},
}, },
}, },
{
m: "test_histogram_with_native_histogram_exemplars2",
help: "Another histogram with native histogram exemplars.",
},
{
m: "test_histogram_with_native_histogram_exemplars2",
typ: model.MetricTypeHistogram,
},
{
m: "test_histogram_with_native_histogram_exemplars2",
t: 1234568,
shs: &histogram.Histogram{
Count: 175,
ZeroCount: 2,
Sum: 0.0008280461746287094,
ZeroThreshold: 2.938735877055719e-39,
Schema: 3,
PositiveSpans: []histogram.Span{
{Offset: -161, Length: 1},
{Offset: 8, Length: 3},
},
NegativeSpans: []histogram.Span{
{Offset: -162, Length: 1},
{Offset: 23, Length: 4},
},
PositiveBuckets: []int64{1, 2, -1, -1},
NegativeBuckets: []int64{1, 3, -2, -1, 1},
},
lset: labels.FromStrings(
"__name__", "test_histogram_with_native_histogram_exemplars2",
),
e: []exemplar.Exemplar{
{Labels: labels.FromStrings("dummyID", "59780"), Value: -0.00039, HasTs: true, Ts: 1625851155146},
},
},
}, },
}, },
{ {
@ -1995,15 +2094,15 @@ func TestProtobufParse(t *testing.T) {
"__name__", "without_quantiles_sum", "__name__", "without_quantiles_sum",
), ),
}, },
{ // 78 { // 81
m: "empty_histogram", m: "empty_histogram",
help: "A histogram without observations and with a zero threshold of zero but with a no-op span to identify it as a native histogram.", help: "A histogram without observations and with a zero threshold of zero but with a no-op span to identify it as a native histogram.",
}, },
{ // 79 { // 82
m: "empty_histogram", m: "empty_histogram",
typ: model.MetricTypeHistogram, typ: model.MetricTypeHistogram,
}, },
{ // 80 { // 83
m: "empty_histogram", m: "empty_histogram",
shs: &histogram.Histogram{ shs: &histogram.Histogram{
CounterResetHint: histogram.UnknownCounterReset, CounterResetHint: histogram.UnknownCounterReset,
@ -2014,15 +2113,15 @@ func TestProtobufParse(t *testing.T) {
"__name__", "empty_histogram", "__name__", "empty_histogram",
), ),
}, },
{ // 81 { // 84
m: "test_counter_with_createdtimestamp", m: "test_counter_with_createdtimestamp",
help: "A counter with a created timestamp.", help: "A counter with a created timestamp.",
}, },
{ // 82 { // 85
m: "test_counter_with_createdtimestamp", m: "test_counter_with_createdtimestamp",
typ: model.MetricTypeCounter, typ: model.MetricTypeCounter,
}, },
{ // 83 { // 86
m: "test_counter_with_createdtimestamp", m: "test_counter_with_createdtimestamp",
v: 42, v: 42,
ct: 1000, ct: 1000,
@ -2030,15 +2129,15 @@ func TestProtobufParse(t *testing.T) {
"__name__", "test_counter_with_createdtimestamp", "__name__", "test_counter_with_createdtimestamp",
), ),
}, },
{ // 84 { // 87
m: "test_summary_with_createdtimestamp", m: "test_summary_with_createdtimestamp",
help: "A summary with a created timestamp.", help: "A summary with a created timestamp.",
}, },
{ // 85 { // 88
m: "test_summary_with_createdtimestamp", m: "test_summary_with_createdtimestamp",
typ: model.MetricTypeSummary, typ: model.MetricTypeSummary,
}, },
{ // 86 { // 89
m: "test_summary_with_createdtimestamp_count", m: "test_summary_with_createdtimestamp_count",
v: 42, v: 42,
ct: 1000, ct: 1000,
@ -2046,7 +2145,7 @@ func TestProtobufParse(t *testing.T) {
"__name__", "test_summary_with_createdtimestamp_count", "__name__", "test_summary_with_createdtimestamp_count",
), ),
}, },
{ // 87 { // 90
m: "test_summary_with_createdtimestamp_sum", m: "test_summary_with_createdtimestamp_sum",
v: 1.234, v: 1.234,
ct: 1000, ct: 1000,
@ -2054,15 +2153,15 @@ func TestProtobufParse(t *testing.T) {
"__name__", "test_summary_with_createdtimestamp_sum", "__name__", "test_summary_with_createdtimestamp_sum",
), ),
}, },
{ // 88 { // 91
m: "test_histogram_with_createdtimestamp", m: "test_histogram_with_createdtimestamp",
help: "A histogram with a created timestamp.", help: "A histogram with a created timestamp.",
}, },
{ // 89 { // 92
m: "test_histogram_with_createdtimestamp", m: "test_histogram_with_createdtimestamp",
typ: model.MetricTypeHistogram, typ: model.MetricTypeHistogram,
}, },
{ // 90 { // 93
m: "test_histogram_with_createdtimestamp", m: "test_histogram_with_createdtimestamp",
ct: 1000, ct: 1000,
shs: &histogram.Histogram{ shs: &histogram.Histogram{
@ -2074,15 +2173,15 @@ func TestProtobufParse(t *testing.T) {
"__name__", "test_histogram_with_createdtimestamp", "__name__", "test_histogram_with_createdtimestamp",
), ),
}, },
{ // 91 { // 94
m: "test_gaugehistogram_with_createdtimestamp", m: "test_gaugehistogram_with_createdtimestamp",
help: "A gauge histogram with a created timestamp.", help: "A gauge histogram with a created timestamp.",
}, },
{ // 92 { // 95
m: "test_gaugehistogram_with_createdtimestamp", m: "test_gaugehistogram_with_createdtimestamp",
typ: model.MetricTypeGaugeHistogram, typ: model.MetricTypeGaugeHistogram,
}, },
{ // 93 { // 96
m: "test_gaugehistogram_with_createdtimestamp", m: "test_gaugehistogram_with_createdtimestamp",
ct: 1000, ct: 1000,
shs: &histogram.Histogram{ shs: &histogram.Histogram{
@ -2094,15 +2193,15 @@ func TestProtobufParse(t *testing.T) {
"__name__", "test_gaugehistogram_with_createdtimestamp", "__name__", "test_gaugehistogram_with_createdtimestamp",
), ),
}, },
{ // 94 { // 97
m: "test_histogram_with_native_histogram_exemplars", m: "test_histogram_with_native_histogram_exemplars",
help: "A histogram with native histogram exemplars.", help: "A histogram with native histogram exemplars.",
}, },
{ // 95 { // 98
m: "test_histogram_with_native_histogram_exemplars", m: "test_histogram_with_native_histogram_exemplars",
typ: model.MetricTypeHistogram, typ: model.MetricTypeHistogram,
}, },
{ // 96 { // 99
m: "test_histogram_with_native_histogram_exemplars", m: "test_histogram_with_native_histogram_exemplars",
t: 1234568, t: 1234568,
shs: &histogram.Histogram{ shs: &histogram.Histogram{
@ -2130,7 +2229,7 @@ func TestProtobufParse(t *testing.T) {
{Labels: labels.FromStrings("dummyID", "59772"), Value: -0.00052, HasTs: true, Ts: 1625851160156}, {Labels: labels.FromStrings("dummyID", "59772"), Value: -0.00052, HasTs: true, Ts: 1625851160156},
}, },
}, },
{ // 97 { // 100
m: "test_histogram_with_native_histogram_exemplars_count", m: "test_histogram_with_native_histogram_exemplars_count",
t: 1234568, t: 1234568,
v: 175, v: 175,
@ -2138,7 +2237,7 @@ func TestProtobufParse(t *testing.T) {
"__name__", "test_histogram_with_native_histogram_exemplars_count", "__name__", "test_histogram_with_native_histogram_exemplars_count",
), ),
}, },
{ // 98 { // 101
m: "test_histogram_with_native_histogram_exemplars_sum", m: "test_histogram_with_native_histogram_exemplars_sum",
t: 1234568, t: 1234568,
v: 0.0008280461746287094, v: 0.0008280461746287094,
@ -2146,7 +2245,7 @@ func TestProtobufParse(t *testing.T) {
"__name__", "test_histogram_with_native_histogram_exemplars_sum", "__name__", "test_histogram_with_native_histogram_exemplars_sum",
), ),
}, },
{ // 99 { // 102
m: "test_histogram_with_native_histogram_exemplars_bucket\xffle\xff-0.0004899999999999998", m: "test_histogram_with_native_histogram_exemplars_bucket\xffle\xff-0.0004899999999999998",
t: 1234568, t: 1234568,
v: 2, v: 2,
@ -2155,7 +2254,7 @@ func TestProtobufParse(t *testing.T) {
"le", "-0.0004899999999999998", "le", "-0.0004899999999999998",
), ),
}, },
{ // 100 { // 103
m: "test_histogram_with_native_histogram_exemplars_bucket\xffle\xff-0.0003899999999999998", m: "test_histogram_with_native_histogram_exemplars_bucket\xffle\xff-0.0003899999999999998",
t: 1234568, t: 1234568,
v: 4, v: 4,
@ -2167,7 +2266,7 @@ func TestProtobufParse(t *testing.T) {
{Labels: labels.FromStrings("dummyID", "59727"), Value: -0.00039, HasTs: true, Ts: 1625851155146}, {Labels: labels.FromStrings("dummyID", "59727"), Value: -0.00039, HasTs: true, Ts: 1625851155146},
}, },
}, },
{ // 101 { // 104
m: "test_histogram_with_native_histogram_exemplars_bucket\xffle\xff-0.0002899999999999998", m: "test_histogram_with_native_histogram_exemplars_bucket\xffle\xff-0.0002899999999999998",
t: 1234568, t: 1234568,
v: 16, v: 16,
@ -2179,7 +2278,7 @@ func TestProtobufParse(t *testing.T) {
{Labels: labels.FromStrings("dummyID", "5617"), Value: -0.00029, HasTs: false}, {Labels: labels.FromStrings("dummyID", "5617"), Value: -0.00029, HasTs: false},
}, },
}, },
{ // 102 { // 105
m: "test_histogram_with_native_histogram_exemplars_bucket\xffle\xff+Inf", m: "test_histogram_with_native_histogram_exemplars_bucket\xffle\xff+Inf",
t: 1234568, t: 1234568,
v: 175, v: 175,
@ -2188,6 +2287,93 @@ func TestProtobufParse(t *testing.T) {
"le", "+Inf", "le", "+Inf",
), ),
}, },
{ // 106
m: "test_histogram_with_native_histogram_exemplars2",
help: "Another histogram with native histogram exemplars.",
},
{ // 107
m: "test_histogram_with_native_histogram_exemplars2",
typ: model.MetricTypeHistogram,
},
{ // 108
m: "test_histogram_with_native_histogram_exemplars2",
t: 1234568,
shs: &histogram.Histogram{
Count: 175,
ZeroCount: 2,
Sum: 0.0008280461746287094,
ZeroThreshold: 2.938735877055719e-39,
Schema: 3,
PositiveSpans: []histogram.Span{
{Offset: -161, Length: 1},
{Offset: 8, Length: 3},
},
NegativeSpans: []histogram.Span{
{Offset: -162, Length: 1},
{Offset: 23, Length: 4},
},
PositiveBuckets: []int64{1, 2, -1, -1},
NegativeBuckets: []int64{1, 3, -2, -1, 1},
},
lset: labels.FromStrings(
"__name__", "test_histogram_with_native_histogram_exemplars2",
),
e: []exemplar.Exemplar{
{Labels: labels.FromStrings("dummyID", "59780"), Value: -0.00039, HasTs: true, Ts: 1625851155146},
},
},
{ // 109
m: "test_histogram_with_native_histogram_exemplars2_count",
t: 1234568,
v: 175,
lset: labels.FromStrings(
"__name__", "test_histogram_with_native_histogram_exemplars2_count",
),
},
{ // 110
m: "test_histogram_with_native_histogram_exemplars2_sum",
t: 1234568,
v: 0.0008280461746287094,
lset: labels.FromStrings(
"__name__", "test_histogram_with_native_histogram_exemplars2_sum",
),
},
{ // 111
m: "test_histogram_with_native_histogram_exemplars2_bucket\xffle\xff-0.0004899999999999998",
t: 1234568,
v: 2,
lset: labels.FromStrings(
"__name__", "test_histogram_with_native_histogram_exemplars2_bucket",
"le", "-0.0004899999999999998",
),
},
{ // 112
m: "test_histogram_with_native_histogram_exemplars2_bucket\xffle\xff-0.0003899999999999998",
t: 1234568,
v: 4,
lset: labels.FromStrings(
"__name__", "test_histogram_with_native_histogram_exemplars2_bucket",
"le", "-0.0003899999999999998",
),
},
{ // 113
m: "test_histogram_with_native_histogram_exemplars2_bucket\xffle\xff-0.0002899999999999998",
t: 1234568,
v: 16,
lset: labels.FromStrings(
"__name__", "test_histogram_with_native_histogram_exemplars2_bucket",
"le", "-0.0002899999999999998",
),
},
{ // 114
m: "test_histogram_with_native_histogram_exemplars2_bucket\xffle\xff+Inf",
t: 1234568,
v: 175,
lset: labels.FromStrings(
"__name__", "test_histogram_with_native_histogram_exemplars2_bucket",
"le", "+Inf",
),
},
}, },
}, },
} }

View file

@ -674,7 +674,6 @@ func (n *Manager) sendOne(ctx context.Context, c *http.Client, url string, b []b
}() }()
// Any HTTP status 2xx is OK. // Any HTTP status 2xx is OK.
//nolint:usestdlibvars
if resp.StatusCode/100 != 2 { if resp.StatusCode/100 != 2 {
return fmt.Errorf("bad response status %s", resp.Status) return fmt.Errorf("bad response status %s", resp.Status)
} }

View file

@ -711,7 +711,7 @@ func TestHangingNotifier(t *testing.T) {
) )
var ( var (
sendTimeout = 10 * time.Millisecond sendTimeout = 100 * time.Millisecond
sdUpdatert = sendTimeout / 2 sdUpdatert = sendTimeout / 2
done = make(chan struct{}) done = make(chan struct{})

View file

@ -302,15 +302,10 @@ type Exemplar struct {
// value represents an exact example value. This can be useful when the exemplar // value represents an exact example value. This can be useful when the exemplar
// is attached to a histogram, which only gives an estimated value through buckets. // is attached to a histogram, which only gives an estimated value through buckets.
Value float64 `protobuf:"fixed64,2,opt,name=value,proto3" json:"value,omitempty"` Value float64 `protobuf:"fixed64,2,opt,name=value,proto3" json:"value,omitempty"`
// timestamp represents an optional timestamp of the sample in ms. // timestamp represents the timestamp of the exemplar in ms.
// //
// For Go, see github.com/prometheus/prometheus/model/timestamp/timestamp.go // For Go, see github.com/prometheus/prometheus/model/timestamp/timestamp.go
// for conversion from/to time.Time to Prometheus timestamp. // for conversion from/to time.Time to Prometheus timestamp.
//
// Note that the "optional" keyword is omitted due to
// https://cloud.google.com/apis/design/design_patterns.md#optional_primitive_fields
// Zero value means value not set. If you need to use exactly zero value for
// the timestamp, use 1 millisecond before or after.
Timestamp int64 `protobuf:"varint,3,opt,name=timestamp,proto3" json:"timestamp,omitempty"` Timestamp int64 `protobuf:"varint,3,opt,name=timestamp,proto3" json:"timestamp,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"` XXX_unrecognized []byte `json:"-"`

View file

@ -107,15 +107,10 @@ message Exemplar {
// value represents an exact example value. This can be useful when the exemplar // value represents an exact example value. This can be useful when the exemplar
// is attached to a histogram, which only gives an estimated value through buckets. // is attached to a histogram, which only gives an estimated value through buckets.
double value = 2; double value = 2;
// timestamp represents an optional timestamp of the sample in ms. // timestamp represents the timestamp of the exemplar in ms.
// //
// For Go, see github.com/prometheus/prometheus/model/timestamp/timestamp.go // For Go, see github.com/prometheus/prometheus/model/timestamp/timestamp.go
// for conversion from/to time.Time to Prometheus timestamp. // for conversion from/to time.Time to Prometheus timestamp.
//
// Note that the "optional" keyword is omitted due to
// https://cloud.google.com/apis/design/design_patterns.md#optional_primitive_fields
// Zero value means value not set. If you need to use exactly zero value for
// the timestamp, use 1 millisecond before or after.
int64 timestamp = 3; int64 timestamp = 3;
} }

View file

@ -25,6 +25,7 @@ import (
"github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/promql" "github.com/prometheus/prometheus/promql"
"github.com/prometheus/prometheus/promql/parser" "github.com/prometheus/prometheus/promql/parser"
"github.com/prometheus/prometheus/promql/promqltest"
"github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/storage"
"github.com/prometheus/prometheus/tsdb/tsdbutil" "github.com/prometheus/prometheus/tsdb/tsdbutil"
"github.com/prometheus/prometheus/util/teststorage" "github.com/prometheus/prometheus/util/teststorage"
@ -165,6 +166,9 @@ func rangeQueryCases() []benchCase {
{ {
expr: "sum(a_X)", expr: "sum(a_X)",
}, },
{
expr: "avg(a_X)",
},
{ {
expr: "sum without (l)(h_X)", expr: "sum without (l)(h_X)",
}, },
@ -271,7 +275,7 @@ func BenchmarkRangeQuery(b *testing.B) {
MaxSamples: 50000000, MaxSamples: 50000000,
Timeout: 100 * time.Second, Timeout: 100 * time.Second,
} }
engine := promql.NewEngine(opts) engine := promqltest.NewTestEngineWithOpts(b, opts)
const interval = 10000 // 10s interval. const interval = 10000 // 10s interval.
// A day of data plus 10k steps. // A day of data plus 10k steps.
@ -362,7 +366,7 @@ func BenchmarkNativeHistograms(b *testing.B) {
for _, tc := range cases { for _, tc := range cases {
b.Run(tc.name, func(b *testing.B) { b.Run(tc.name, func(b *testing.B) {
ng := promql.NewEngine(opts) ng := promqltest.NewTestEngineWithOpts(b, opts)
for i := 0; i < b.N; i++ { for i := 0; i < b.N; i++ {
qry, err := ng.NewRangeQuery(context.Background(), testStorage, nil, tc.query, start, end, step) qry, err := ng.NewRangeQuery(context.Background(), testStorage, nil, tc.query, start, end, step)
if err != nil { if err != nil {

View file

@ -19,6 +19,7 @@ import (
"context" "context"
"errors" "errors"
"fmt" "fmt"
"io"
"math" "math"
"reflect" "reflect"
"runtime" "runtime"
@ -271,6 +272,8 @@ func contextErr(err error, env string) error {
// //
// 2) Enforcement of the maximum number of concurrent queries. // 2) Enforcement of the maximum number of concurrent queries.
type QueryTracker interface { type QueryTracker interface {
io.Closer
// GetMaxConcurrent returns maximum number of concurrent queries that are allowed by this tracker. // GetMaxConcurrent returns maximum number of concurrent queries that are allowed by this tracker.
GetMaxConcurrent() int GetMaxConcurrent() int
@ -313,6 +316,11 @@ type EngineOpts struct {
// EnablePerStepStats if true allows for per-step stats to be computed on request. Disabled otherwise. // EnablePerStepStats if true allows for per-step stats to be computed on request. Disabled otherwise.
EnablePerStepStats bool EnablePerStepStats bool
// EnableDelayedNameRemoval delays the removal of the __name__ label to the last step of the query evaluation.
// This is useful in certain scenarios where the __name__ label must be preserved or where applying a
// regex-matcher to the __name__ label may otherwise lead to duplicate labelset errors.
EnableDelayedNameRemoval bool
} }
// Engine handles the lifetime of queries from beginning to end. // Engine handles the lifetime of queries from beginning to end.
@ -330,6 +338,7 @@ type Engine struct {
enableAtModifier bool enableAtModifier bool
enableNegativeOffset bool enableNegativeOffset bool
enablePerStepStats bool enablePerStepStats bool
enableDelayedNameRemoval bool
} }
// NewEngine returns a new engine. // NewEngine returns a new engine.
@ -420,9 +429,18 @@ func NewEngine(opts EngineOpts) *Engine {
enableAtModifier: opts.EnableAtModifier, enableAtModifier: opts.EnableAtModifier,
enableNegativeOffset: opts.EnableNegativeOffset, enableNegativeOffset: opts.EnableNegativeOffset,
enablePerStepStats: opts.EnablePerStepStats, enablePerStepStats: opts.EnablePerStepStats,
enableDelayedNameRemoval: opts.EnableDelayedNameRemoval,
} }
} }
// Close closes ng.
func (ng *Engine) Close() error {
if ng.activeQueryTracker != nil {
return ng.activeQueryTracker.Close()
}
return nil
}
// SetQueryLogger sets the query logger. // SetQueryLogger sets the query logger.
func (ng *Engine) SetQueryLogger(l QueryLogger) { func (ng *Engine) SetQueryLogger(l QueryLogger) {
ng.queryLoggerLock.Lock() ng.queryLoggerLock.Lock()
@ -573,7 +591,7 @@ func (ng *Engine) validateOpts(expr parser.Expr) error {
return validationErr return validationErr
} }
// NewTestQuery: inject special behaviour into Query for testing. // NewTestQuery injects special behaviour into Query for testing.
func (ng *Engine) NewTestQuery(f func(context.Context) error) Query { func (ng *Engine) NewTestQuery(f func(context.Context) error) Query {
qry := &query{ qry := &query{
q: "test statement", q: "test statement",
@ -706,16 +724,16 @@ func (ng *Engine) execEvalStmt(ctx context.Context, query *query, s *parser.Eval
startTimestamp: start, startTimestamp: start,
endTimestamp: start, endTimestamp: start,
interval: 1, interval: 1,
ctx: ctxInnerEval,
maxSamples: ng.maxSamplesPerQuery, maxSamples: ng.maxSamplesPerQuery,
logger: ng.logger, logger: ng.logger,
lookbackDelta: s.LookbackDelta, lookbackDelta: s.LookbackDelta,
samplesStats: query.sampleStats, samplesStats: query.sampleStats,
noStepSubqueryIntervalFn: ng.noStepSubqueryIntervalFn, noStepSubqueryIntervalFn: ng.noStepSubqueryIntervalFn,
enableDelayedNameRemoval: ng.enableDelayedNameRemoval,
} }
query.sampleStats.InitStepTracking(start, start, 1) query.sampleStats.InitStepTracking(start, start, 1)
val, warnings, err := evaluator.Eval(s.Expr) val, warnings, err := evaluator.Eval(ctxInnerEval, s.Expr)
evalSpanTimer.Finish() evalSpanTimer.Finish()
@ -743,9 +761,9 @@ func (ng *Engine) execEvalStmt(ctx context.Context, query *query, s *parser.Eval
// Point might have a different timestamp, force it to the evaluation // Point might have a different timestamp, force it to the evaluation
// timestamp as that is when we ran the evaluation. // timestamp as that is when we ran the evaluation.
if len(s.Histograms) > 0 { if len(s.Histograms) > 0 {
vector[i] = Sample{Metric: s.Metric, H: s.Histograms[0].H, T: start} vector[i] = Sample{Metric: s.Metric, H: s.Histograms[0].H, T: start, DropName: s.DropName}
} else { } else {
vector[i] = Sample{Metric: s.Metric, F: s.Floats[0].F, T: start} vector[i] = Sample{Metric: s.Metric, F: s.Floats[0].F, T: start, DropName: s.DropName}
} }
} }
return vector, warnings, nil return vector, warnings, nil
@ -764,15 +782,15 @@ func (ng *Engine) execEvalStmt(ctx context.Context, query *query, s *parser.Eval
startTimestamp: timeMilliseconds(s.Start), startTimestamp: timeMilliseconds(s.Start),
endTimestamp: timeMilliseconds(s.End), endTimestamp: timeMilliseconds(s.End),
interval: durationMilliseconds(s.Interval), interval: durationMilliseconds(s.Interval),
ctx: ctxInnerEval,
maxSamples: ng.maxSamplesPerQuery, maxSamples: ng.maxSamplesPerQuery,
logger: ng.logger, logger: ng.logger,
lookbackDelta: s.LookbackDelta, lookbackDelta: s.LookbackDelta,
samplesStats: query.sampleStats, samplesStats: query.sampleStats,
noStepSubqueryIntervalFn: ng.noStepSubqueryIntervalFn, noStepSubqueryIntervalFn: ng.noStepSubqueryIntervalFn,
enableDelayedNameRemoval: ng.enableDelayedNameRemoval,
} }
query.sampleStats.InitStepTracking(evaluator.startTimestamp, evaluator.endTimestamp, evaluator.interval) query.sampleStats.InitStepTracking(evaluator.startTimestamp, evaluator.endTimestamp, evaluator.interval)
val, warnings, err := evaluator.Eval(s.Expr) val, warnings, err := evaluator.Eval(ctxInnerEval, s.Expr)
evalSpanTimer.Finish() evalSpanTimer.Finish()
@ -984,6 +1002,8 @@ func checkAndExpandSeriesSet(ctx context.Context, expr parser.Expr) (annotations
if e.Series != nil { if e.Series != nil {
return nil, nil return nil, nil
} }
span := trace.SpanFromContext(ctx)
span.AddEvent("expand start", trace.WithAttributes(attribute.String("selector", e.String())))
series, ws, err := expandSeriesSet(ctx, e.UnexpandedSeriesSet) series, ws, err := expandSeriesSet(ctx, e.UnexpandedSeriesSet)
if e.SkipHistogramBuckets { if e.SkipHistogramBuckets {
for i := range series { for i := range series {
@ -991,6 +1011,7 @@ func checkAndExpandSeriesSet(ctx context.Context, expr parser.Expr) (annotations
} }
} }
e.Series = series e.Series = series
span.AddEvent("expand end", trace.WithAttributes(attribute.Int("num_series", len(series))))
return ws, err return ws, err
} }
return nil, nil return nil, nil
@ -1020,8 +1041,6 @@ func (e errWithWarnings) Error() string { return e.err.Error() }
// querier and reports errors. On timeout or cancellation of its context it // querier and reports errors. On timeout or cancellation of its context it
// terminates. // terminates.
type evaluator struct { type evaluator struct {
ctx context.Context
startTimestamp int64 // Start time in milliseconds. startTimestamp int64 // Start time in milliseconds.
endTimestamp int64 // End time in milliseconds. endTimestamp int64 // End time in milliseconds.
interval int64 // Interval in milliseconds. interval int64 // Interval in milliseconds.
@ -1032,6 +1051,7 @@ type evaluator struct {
lookbackDelta time.Duration lookbackDelta time.Duration
samplesStats *stats.QuerySamples samplesStats *stats.QuerySamples
noStepSubqueryIntervalFn func(rangeMillis int64) int64 noStepSubqueryIntervalFn func(rangeMillis int64) int64
enableDelayedNameRemoval bool
} }
// errorf causes a panic with the input formatted into an error. // errorf causes a panic with the input formatted into an error.
@ -1057,7 +1077,7 @@ func (ev *evaluator) recover(expr parser.Expr, ws *annotations.Annotations, errp
buf := make([]byte, 64<<10) buf := make([]byte, 64<<10)
buf = buf[:runtime.Stack(buf, false)] buf = buf[:runtime.Stack(buf, false)]
level.Error(ev.logger).Log("msg", "runtime panic in parser", "expr", expr.String(), "err", e, "stacktrace", string(buf)) level.Error(ev.logger).Log("msg", "runtime panic during query evaluation", "expr", expr.String(), "err", e, "stacktrace", string(buf))
*errp = fmt.Errorf("unexpected error: %w", err) *errp = fmt.Errorf("unexpected error: %w", err)
case errWithWarnings: case errWithWarnings:
*errp = err.err *errp = err.err
@ -1069,10 +1089,13 @@ func (ev *evaluator) recover(expr parser.Expr, ws *annotations.Annotations, errp
} }
} }
func (ev *evaluator) Eval(expr parser.Expr) (v parser.Value, ws annotations.Annotations, err error) { func (ev *evaluator) Eval(ctx context.Context, expr parser.Expr) (v parser.Value, ws annotations.Annotations, err error) {
defer ev.recover(expr, &ws, &err) defer ev.recover(expr, &ws, &err)
v, ws = ev.eval(expr) v, ws = ev.eval(ctx, expr)
if ev.enableDelayedNameRemoval {
ev.cleanupMetricLabels(v)
}
return v, ws, nil return v, ws, nil
} }
@ -1101,6 +1124,9 @@ type EvalNodeHelper struct {
rightSigs map[string]Sample rightSigs map[string]Sample
matchedSigs map[string]map[uint64]struct{} matchedSigs map[string]map[uint64]struct{}
resultMetric map[string]labels.Labels resultMetric map[string]labels.Labels
// Additional options for the evaluation.
enableDelayedNameRemoval bool
} }
func (enh *EvalNodeHelper) resetBuilder(lbls labels.Labels) { func (enh *EvalNodeHelper) resetBuilder(lbls labels.Labels) {
@ -1117,7 +1143,7 @@ func (enh *EvalNodeHelper) resetBuilder(lbls labels.Labels) {
// function call results. // function call results.
// The prepSeries function (if provided) can be used to prepare the helper // The prepSeries function (if provided) can be used to prepare the helper
// for each series, then passed to each call funcCall. // for each series, then passed to each call funcCall.
func (ev *evaluator) rangeEval(prepSeries func(labels.Labels, *EvalSeriesHelper), funcCall func([]parser.Value, [][]EvalSeriesHelper, *EvalNodeHelper) (Vector, annotations.Annotations), exprs ...parser.Expr) (Matrix, annotations.Annotations) { func (ev *evaluator) rangeEval(ctx context.Context, prepSeries func(labels.Labels, *EvalSeriesHelper), funcCall func([]parser.Value, [][]EvalSeriesHelper, *EvalNodeHelper) (Vector, annotations.Annotations), exprs ...parser.Expr) (Matrix, annotations.Annotations) {
numSteps := int((ev.endTimestamp-ev.startTimestamp)/ev.interval) + 1 numSteps := int((ev.endTimestamp-ev.startTimestamp)/ev.interval) + 1
matrixes := make([]Matrix, len(exprs)) matrixes := make([]Matrix, len(exprs))
origMatrixes := make([]Matrix, len(exprs)) origMatrixes := make([]Matrix, len(exprs))
@ -1128,7 +1154,7 @@ func (ev *evaluator) rangeEval(prepSeries func(labels.Labels, *EvalSeriesHelper)
// Functions will take string arguments from the expressions, not the values. // Functions will take string arguments from the expressions, not the values.
if e != nil && e.Type() != parser.ValueTypeString { if e != nil && e.Type() != parser.ValueTypeString {
// ev.currentSamples will be updated to the correct value within the ev.eval call. // ev.currentSamples will be updated to the correct value within the ev.eval call.
val, ws := ev.eval(e) val, ws := ev.eval(ctx, e)
warnings.Merge(ws) warnings.Merge(ws)
matrixes[i] = val.(Matrix) matrixes[i] = val.(Matrix)
@ -1150,7 +1176,7 @@ func (ev *evaluator) rangeEval(prepSeries func(labels.Labels, *EvalSeriesHelper)
biggestLen = len(matrixes[i]) biggestLen = len(matrixes[i])
} }
} }
enh := &EvalNodeHelper{Out: make(Vector, 0, biggestLen)} enh := &EvalNodeHelper{Out: make(Vector, 0, biggestLen), enableDelayedNameRemoval: ev.enableDelayedNameRemoval}
type seriesAndTimestamp struct { type seriesAndTimestamp struct {
Series Series
ts int64 ts int64
@ -1180,7 +1206,7 @@ func (ev *evaluator) rangeEval(prepSeries func(labels.Labels, *EvalSeriesHelper)
} }
for ts := ev.startTimestamp; ts <= ev.endTimestamp; ts += ev.interval { for ts := ev.startTimestamp; ts <= ev.endTimestamp; ts += ev.interval {
if err := contextDone(ev.ctx, "expression evaluation"); err != nil { if err := contextDone(ctx, "expression evaluation"); err != nil {
ev.error(err) ev.error(err)
} }
// Reset number of samples in memory after each timestamp. // Reset number of samples in memory after each timestamp.
@ -1196,12 +1222,12 @@ func (ev *evaluator) rangeEval(prepSeries func(labels.Labels, *EvalSeriesHelper)
for si, series := range matrixes[i] { for si, series := range matrixes[i] {
switch { switch {
case len(series.Floats) > 0 && series.Floats[0].T == ts: case len(series.Floats) > 0 && series.Floats[0].T == ts:
vectors[i] = append(vectors[i], Sample{Metric: series.Metric, F: series.Floats[0].F, T: ts}) vectors[i] = append(vectors[i], Sample{Metric: series.Metric, F: series.Floats[0].F, T: ts, DropName: series.DropName})
// Move input vectors forward so we don't have to re-scan the same // Move input vectors forward so we don't have to re-scan the same
// past points at the next step. // past points at the next step.
matrixes[i][si].Floats = series.Floats[1:] matrixes[i][si].Floats = series.Floats[1:]
case len(series.Histograms) > 0 && series.Histograms[0].T == ts: case len(series.Histograms) > 0 && series.Histograms[0].T == ts:
vectors[i] = append(vectors[i], Sample{Metric: series.Metric, H: series.Histograms[0].H, T: ts}) vectors[i] = append(vectors[i], Sample{Metric: series.Metric, H: series.Histograms[0].H, T: ts, DropName: series.DropName})
matrixes[i][si].Histograms = series.Histograms[1:] matrixes[i][si].Histograms = series.Histograms[1:]
default: default:
continue continue
@ -1240,15 +1266,15 @@ func (ev *evaluator) rangeEval(prepSeries func(labels.Labels, *EvalSeriesHelper)
// If this could be an instant query, shortcut so as not to change sort order. // If this could be an instant query, shortcut so as not to change sort order.
if ev.endTimestamp == ev.startTimestamp { if ev.endTimestamp == ev.startTimestamp {
if result.ContainsSameLabelset() { if !ev.enableDelayedNameRemoval && result.ContainsSameLabelset() {
ev.errorf("vector cannot contain metrics with the same labelset") ev.errorf("vector cannot contain metrics with the same labelset")
} }
mat := make(Matrix, len(result)) mat := make(Matrix, len(result))
for i, s := range result { for i, s := range result {
if s.H == nil { if s.H == nil {
mat[i] = Series{Metric: s.Metric, Floats: []FPoint{{T: ts, F: s.F}}} mat[i] = Series{Metric: s.Metric, Floats: []FPoint{{T: ts, F: s.F}}, DropName: s.DropName}
} else { } else {
mat[i] = Series{Metric: s.Metric, Histograms: []HPoint{{T: ts, H: s.H}}} mat[i] = Series{Metric: s.Metric, Histograms: []HPoint{{T: ts, H: s.H}}, DropName: s.DropName}
} }
} }
ev.currentSamples = originalNumSamples + mat.TotalSamples() ev.currentSamples = originalNumSamples + mat.TotalSamples()
@ -1266,7 +1292,7 @@ func (ev *evaluator) rangeEval(prepSeries func(labels.Labels, *EvalSeriesHelper)
} }
ss.ts = ts ss.ts = ts
} else { } else {
ss = seriesAndTimestamp{Series{Metric: sample.Metric}, ts} ss = seriesAndTimestamp{Series{Metric: sample.Metric, DropName: sample.DropName}, ts}
} }
addToSeries(&ss.Series, enh.Ts, sample.F, sample.H, numSteps) addToSeries(&ss.Series, enh.Ts, sample.F, sample.H, numSteps)
seriess[h] = ss seriess[h] = ss
@ -1290,7 +1316,7 @@ func (ev *evaluator) rangeEval(prepSeries func(labels.Labels, *EvalSeriesHelper)
return mat, warnings return mat, warnings
} }
func (ev *evaluator) rangeEvalAgg(aggExpr *parser.AggregateExpr, sortedGrouping []string, inputMatrix Matrix, param float64) (Matrix, annotations.Annotations) { func (ev *evaluator) rangeEvalAgg(ctx context.Context, aggExpr *parser.AggregateExpr, sortedGrouping []string, inputMatrix Matrix, param float64) (Matrix, annotations.Annotations) {
// Keep a copy of the original point slice so that it can be returned to the pool. // Keep a copy of the original point slice so that it can be returned to the pool.
origMatrix := slices.Clone(inputMatrix) origMatrix := slices.Clone(inputMatrix)
defer func() { defer func() {
@ -1302,7 +1328,7 @@ func (ev *evaluator) rangeEvalAgg(aggExpr *parser.AggregateExpr, sortedGrouping
var warnings annotations.Annotations var warnings annotations.Annotations
enh := &EvalNodeHelper{} enh := &EvalNodeHelper{enableDelayedNameRemoval: ev.enableDelayedNameRemoval}
tempNumSamples := ev.currentSamples tempNumSamples := ev.currentSamples
// Create a mapping from input series to output groups. // Create a mapping from input series to output groups.
@ -1370,7 +1396,7 @@ func (ev *evaluator) rangeEvalAgg(aggExpr *parser.AggregateExpr, sortedGrouping
} }
for ts := ev.startTimestamp; ts <= ev.endTimestamp; ts += ev.interval { for ts := ev.startTimestamp; ts <= ev.endTimestamp; ts += ev.interval {
if err := contextDone(ev.ctx, "expression evaluation"); err != nil { if err := contextDone(ctx, "expression evaluation"); err != nil {
ev.error(err) ev.error(err)
} }
// Reset number of samples in memory after each timestamp. // Reset number of samples in memory after each timestamp.
@ -1421,11 +1447,11 @@ func (ev *evaluator) rangeEvalAgg(aggExpr *parser.AggregateExpr, sortedGrouping
// evalSubquery evaluates given SubqueryExpr and returns an equivalent // evalSubquery evaluates given SubqueryExpr and returns an equivalent
// evaluated MatrixSelector in its place. Note that the Name and LabelMatchers are not set. // evaluated MatrixSelector in its place. Note that the Name and LabelMatchers are not set.
func (ev *evaluator) evalSubquery(subq *parser.SubqueryExpr) (*parser.MatrixSelector, int, annotations.Annotations) { func (ev *evaluator) evalSubquery(ctx context.Context, subq *parser.SubqueryExpr) (*parser.MatrixSelector, int, annotations.Annotations) {
samplesStats := ev.samplesStats samplesStats := ev.samplesStats
// Avoid double counting samples when running a subquery, those samples will be counted in later stage. // Avoid double counting samples when running a subquery, those samples will be counted in later stage.
ev.samplesStats = ev.samplesStats.NewChild() ev.samplesStats = ev.samplesStats.NewChild()
val, ws := ev.eval(subq) val, ws := ev.eval(ctx, subq)
// But do incorporate the peak from the subquery // But do incorporate the peak from the subquery
samplesStats.UpdatePeakFromSubquery(ev.samplesStats) samplesStats.UpdatePeakFromSubquery(ev.samplesStats)
ev.samplesStats = samplesStats ev.samplesStats = samplesStats
@ -1452,18 +1478,20 @@ func (ev *evaluator) evalSubquery(subq *parser.SubqueryExpr) (*parser.MatrixSele
} }
// eval evaluates the given expression as the given AST expression node requires. // eval evaluates the given expression as the given AST expression node requires.
func (ev *evaluator) eval(expr parser.Expr) (parser.Value, annotations.Annotations) { func (ev *evaluator) eval(ctx context.Context, expr parser.Expr) (parser.Value, annotations.Annotations) {
// This is the top-level evaluation method. // This is the top-level evaluation method.
// Thus, we check for timeout/cancellation here. // Thus, we check for timeout/cancellation here.
if err := contextDone(ev.ctx, "expression evaluation"); err != nil { if err := contextDone(ctx, "expression evaluation"); err != nil {
ev.error(err) ev.error(err)
} }
numSteps := int((ev.endTimestamp-ev.startTimestamp)/ev.interval) + 1 numSteps := int((ev.endTimestamp-ev.startTimestamp)/ev.interval) + 1
// Create a new span to help investigate inner evaluation performances. // Create a new span to help investigate inner evaluation performances.
ctxWithSpan, span := otel.Tracer("").Start(ev.ctx, stats.InnerEvalTime.SpanOperation()+" eval "+reflect.TypeOf(expr).String()) ctx, span := otel.Tracer("").Start(ctx, stats.InnerEvalTime.SpanOperation()+" eval "+reflect.TypeOf(expr).String())
ev.ctx = ctxWithSpan
defer span.End() defer span.End()
if ss, ok := expr.(interface{ ShortString() string }); ok {
span.SetAttributes(attribute.String("operation", ss.ShortString()))
}
switch e := expr.(type) { switch e := expr.(type) {
case *parser.AggregateExpr: case *parser.AggregateExpr:
@ -1484,7 +1512,7 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, annotations.Annotatio
sortedGrouping = append(sortedGrouping, valueLabel.Val) sortedGrouping = append(sortedGrouping, valueLabel.Val)
slices.Sort(sortedGrouping) slices.Sort(sortedGrouping)
} }
return ev.rangeEval(nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { return ev.rangeEval(ctx, nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
return ev.aggregationCountValues(e, sortedGrouping, valueLabel.Val, v[0].(Vector), enh) return ev.aggregationCountValues(e, sortedGrouping, valueLabel.Val, v[0].(Vector), enh)
}, e.Expr) }, e.Expr)
} }
@ -1494,16 +1522,16 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, annotations.Annotatio
// param is the number k for topk/bottomk, or q for quantile. // param is the number k for topk/bottomk, or q for quantile.
var fParam float64 var fParam float64
if param != nil { if param != nil {
val, ws := ev.eval(param) val, ws := ev.eval(ctx, param)
warnings.Merge(ws) warnings.Merge(ws)
fParam = val.(Matrix)[0].Floats[0].F fParam = val.(Matrix)[0].Floats[0].F
} }
// Now fetch the data to be aggregated. // Now fetch the data to be aggregated.
val, ws := ev.eval(e.Expr) val, ws := ev.eval(ctx, e.Expr)
warnings.Merge(ws) warnings.Merge(ws)
inputMatrix := val.(Matrix) inputMatrix := val.(Matrix)
result, ws := ev.rangeEvalAgg(e, sortedGrouping, inputMatrix, fParam) result, ws := ev.rangeEvalAgg(ctx, e, sortedGrouping, inputMatrix, fParam)
warnings.Merge(ws) warnings.Merge(ws)
ev.currentSamples = originalNumSamples + result.TotalSamples() ev.currentSamples = originalNumSamples + result.TotalSamples()
ev.samplesStats.UpdatePeak(ev.currentSamples) ev.samplesStats.UpdatePeak(ev.currentSamples)
@ -1521,7 +1549,7 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, annotations.Annotatio
unwrapParenExpr(&arg) unwrapParenExpr(&arg)
vs, ok := arg.(*parser.VectorSelector) vs, ok := arg.(*parser.VectorSelector)
if ok { if ok {
return ev.rangeEvalTimestampFunctionOverVectorSelector(vs, call, e) return ev.rangeEvalTimestampFunctionOverVectorSelector(ctx, vs, call, e)
} }
} }
@ -1545,7 +1573,7 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, annotations.Annotatio
matrixArgIndex = i matrixArgIndex = i
matrixArg = true matrixArg = true
// Replacing parser.SubqueryExpr with parser.MatrixSelector. // Replacing parser.SubqueryExpr with parser.MatrixSelector.
val, totalSamples, ws := ev.evalSubquery(subq) val, totalSamples, ws := ev.evalSubquery(ctx, subq)
e.Args[i] = val e.Args[i] = val
warnings.Merge(ws) warnings.Merge(ws)
defer func() { defer func() {
@ -1560,14 +1588,14 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, annotations.Annotatio
// Special handling for functions that work on series not samples. // Special handling for functions that work on series not samples.
switch e.Func.Name { switch e.Func.Name {
case "label_replace": case "label_replace":
return ev.evalLabelReplace(e.Args) return ev.evalLabelReplace(ctx, e.Args)
case "label_join": case "label_join":
return ev.evalLabelJoin(e.Args) return ev.evalLabelJoin(ctx, e.Args)
} }
if !matrixArg { if !matrixArg {
// Does not have a matrix argument. // Does not have a matrix argument.
return ev.rangeEval(nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { return ev.rangeEval(ctx, nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
vec, annos := call(v, e.Args, enh) vec, annos := call(v, e.Args, enh)
return vec, warnings.Merge(annos) return vec, warnings.Merge(annos)
}, e.Args...) }, e.Args...)
@ -1579,7 +1607,7 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, annotations.Annotatio
otherInArgs := make([]Vector, len(e.Args)) otherInArgs := make([]Vector, len(e.Args))
for i, e := range e.Args { for i, e := range e.Args {
if i != matrixArgIndex { if i != matrixArgIndex {
val, ws := ev.eval(e) val, ws := ev.eval(ctx, e)
otherArgs[i] = val.(Matrix) otherArgs[i] = val.(Matrix)
otherInArgs[i] = Vector{Sample{}} otherInArgs[i] = Vector{Sample{}}
inArgs[i] = otherInArgs[i] inArgs[i] = otherInArgs[i]
@ -1593,7 +1621,7 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, annotations.Annotatio
sel := arg.(*parser.MatrixSelector) sel := arg.(*parser.MatrixSelector)
selVS := sel.VectorSelector.(*parser.VectorSelector) selVS := sel.VectorSelector.(*parser.VectorSelector)
ws, err := checkAndExpandSeriesSet(ev.ctx, sel) ws, err := checkAndExpandSeriesSet(ctx, sel)
warnings.Merge(ws) warnings.Merge(ws)
if err != nil { if err != nil {
ev.error(errWithWarnings{fmt.Errorf("expanding series: %w", err), warnings}) ev.error(errWithWarnings{fmt.Errorf("expanding series: %w", err), warnings})
@ -1611,12 +1639,19 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, annotations.Annotatio
var prevSS *Series var prevSS *Series
inMatrix := make(Matrix, 1) inMatrix := make(Matrix, 1)
inArgs[matrixArgIndex] = inMatrix inArgs[matrixArgIndex] = inMatrix
enh := &EvalNodeHelper{Out: make(Vector, 0, 1)} enh := &EvalNodeHelper{Out: make(Vector, 0, 1), enableDelayedNameRemoval: ev.enableDelayedNameRemoval}
// Process all the calls for one time series at a time. // Process all the calls for one time series at a time.
it := storage.NewBuffer(selRange) it := storage.NewBuffer(selRange)
var chkIter chunkenc.Iterator var chkIter chunkenc.Iterator
// The last_over_time function acts like offset; thus, it
// should keep the metric name. For all the other range
// vector functions, the only change needed is to drop the
// metric name in the output.
dropName := e.Func.Name != "last_over_time"
for i, s := range selVS.Series { for i, s := range selVS.Series {
if err := contextDone(ev.ctx, "expression evaluation"); err != nil { if err := contextDone(ctx, "expression evaluation"); err != nil {
ev.error(err) ev.error(err)
} }
ev.currentSamples -= len(floats) + totalHPointSize(histograms) ev.currentSamples -= len(floats) + totalHPointSize(histograms)
@ -1629,15 +1664,12 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, annotations.Annotatio
chkIter = s.Iterator(chkIter) chkIter = s.Iterator(chkIter)
it.Reset(chkIter) it.Reset(chkIter)
metric := selVS.Series[i].Labels() metric := selVS.Series[i].Labels()
// The last_over_time function acts like offset; thus, it if !ev.enableDelayedNameRemoval && dropName {
// should keep the metric name. For all the other range
// vector functions, the only change needed is to drop the
// metric name in the output.
if e.Func.Name != "last_over_time" {
metric = metric.DropMetricName() metric = metric.DropMetricName()
} }
ss := Series{ ss := Series{
Metric: metric, Metric: metric,
DropName: dropName,
} }
inMatrix[0].Metric = selVS.Series[i].Labels() inMatrix[0].Metric = selVS.Series[i].Labels()
for ts, step := ev.startTimestamp, -1; ts <= ev.endTimestamp; ts += ev.interval { for ts, step := ev.startTimestamp, -1; ts <= ev.endTimestamp; ts += ev.interval {
@ -1754,30 +1786,33 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, annotations.Annotatio
Series{ Series{
Metric: createLabelsForAbsentFunction(e.Args[0]), Metric: createLabelsForAbsentFunction(e.Args[0]),
Floats: newp, Floats: newp,
DropName: dropName,
}, },
}, warnings }, warnings
} }
if mat.ContainsSameLabelset() { if !ev.enableDelayedNameRemoval && mat.ContainsSameLabelset() {
ev.errorf("vector cannot contain metrics with the same labelset") ev.errorf("vector cannot contain metrics with the same labelset")
} }
return mat, warnings return mat, warnings
case *parser.ParenExpr: case *parser.ParenExpr:
return ev.eval(e.Expr) return ev.eval(ctx, e.Expr)
case *parser.UnaryExpr: case *parser.UnaryExpr:
val, ws := ev.eval(e.Expr) val, ws := ev.eval(ctx, e.Expr)
mat := val.(Matrix) mat := val.(Matrix)
if e.Op == parser.SUB { if e.Op == parser.SUB {
for i := range mat { for i := range mat {
if !ev.enableDelayedNameRemoval {
mat[i].Metric = mat[i].Metric.DropMetricName() mat[i].Metric = mat[i].Metric.DropMetricName()
}
mat[i].DropName = true
for j := range mat[i].Floats { for j := range mat[i].Floats {
mat[i].Floats[j].F = -mat[i].Floats[j].F mat[i].Floats[j].F = -mat[i].Floats[j].F
} }
} }
if mat.ContainsSameLabelset() { if !ev.enableDelayedNameRemoval && mat.ContainsSameLabelset() {
ev.errorf("vector cannot contain metrics with the same labelset") ev.errorf("vector cannot contain metrics with the same labelset")
} }
} }
@ -1786,7 +1821,7 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, annotations.Annotatio
case *parser.BinaryExpr: case *parser.BinaryExpr:
switch lt, rt := e.LHS.Type(), e.RHS.Type(); { switch lt, rt := e.LHS.Type(), e.RHS.Type(); {
case lt == parser.ValueTypeScalar && rt == parser.ValueTypeScalar: case lt == parser.ValueTypeScalar && rt == parser.ValueTypeScalar:
return ev.rangeEval(nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { return ev.rangeEval(ctx, nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
val := scalarBinop(e.Op, v[0].(Vector)[0].F, v[1].(Vector)[0].F) val := scalarBinop(e.Op, v[0].(Vector)[0].F, v[1].(Vector)[0].F)
return append(enh.Out, Sample{F: val}), nil return append(enh.Out, Sample{F: val}), nil
}, e.LHS, e.RHS) }, e.LHS, e.RHS)
@ -1799,47 +1834,49 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, annotations.Annotatio
} }
switch e.Op { switch e.Op {
case parser.LAND: case parser.LAND:
return ev.rangeEval(initSignatures, func(v []parser.Value, sh [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { return ev.rangeEval(ctx, initSignatures, func(v []parser.Value, sh [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
return ev.VectorAnd(v[0].(Vector), v[1].(Vector), e.VectorMatching, sh[0], sh[1], enh), nil return ev.VectorAnd(v[0].(Vector), v[1].(Vector), e.VectorMatching, sh[0], sh[1], enh), nil
}, e.LHS, e.RHS) }, e.LHS, e.RHS)
case parser.LOR: case parser.LOR:
return ev.rangeEval(initSignatures, func(v []parser.Value, sh [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { return ev.rangeEval(ctx, initSignatures, func(v []parser.Value, sh [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
return ev.VectorOr(v[0].(Vector), v[1].(Vector), e.VectorMatching, sh[0], sh[1], enh), nil return ev.VectorOr(v[0].(Vector), v[1].(Vector), e.VectorMatching, sh[0], sh[1], enh), nil
}, e.LHS, e.RHS) }, e.LHS, e.RHS)
case parser.LUNLESS: case parser.LUNLESS:
return ev.rangeEval(initSignatures, func(v []parser.Value, sh [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { return ev.rangeEval(ctx, initSignatures, func(v []parser.Value, sh [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
return ev.VectorUnless(v[0].(Vector), v[1].(Vector), e.VectorMatching, sh[0], sh[1], enh), nil return ev.VectorUnless(v[0].(Vector), v[1].(Vector), e.VectorMatching, sh[0], sh[1], enh), nil
}, e.LHS, e.RHS) }, e.LHS, e.RHS)
default: default:
return ev.rangeEval(initSignatures, func(v []parser.Value, sh [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { return ev.rangeEval(ctx, initSignatures, func(v []parser.Value, sh [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
vec, err := ev.VectorBinop(e.Op, v[0].(Vector), v[1].(Vector), e.VectorMatching, e.ReturnBool, sh[0], sh[1], enh) vec, err := ev.VectorBinop(e.Op, v[0].(Vector), v[1].(Vector), e.VectorMatching, e.ReturnBool, sh[0], sh[1], enh)
return vec, handleVectorBinopError(err, e) return vec, handleVectorBinopError(err, e)
}, e.LHS, e.RHS) }, e.LHS, e.RHS)
} }
case lt == parser.ValueTypeVector && rt == parser.ValueTypeScalar: case lt == parser.ValueTypeVector && rt == parser.ValueTypeScalar:
return ev.rangeEval(nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { return ev.rangeEval(ctx, nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
vec, err := ev.VectorscalarBinop(e.Op, v[0].(Vector), Scalar{V: v[1].(Vector)[0].F}, false, e.ReturnBool, enh) vec, err := ev.VectorscalarBinop(e.Op, v[0].(Vector), Scalar{V: v[1].(Vector)[0].F}, false, e.ReturnBool, enh)
return vec, handleVectorBinopError(err, e) return vec, handleVectorBinopError(err, e)
}, e.LHS, e.RHS) }, e.LHS, e.RHS)
case lt == parser.ValueTypeScalar && rt == parser.ValueTypeVector: case lt == parser.ValueTypeScalar && rt == parser.ValueTypeVector:
return ev.rangeEval(nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { return ev.rangeEval(ctx, nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
vec, err := ev.VectorscalarBinop(e.Op, v[1].(Vector), Scalar{V: v[0].(Vector)[0].F}, true, e.ReturnBool, enh) vec, err := ev.VectorscalarBinop(e.Op, v[1].(Vector), Scalar{V: v[0].(Vector)[0].F}, true, e.ReturnBool, enh)
return vec, handleVectorBinopError(err, e) return vec, handleVectorBinopError(err, e)
}, e.LHS, e.RHS) }, e.LHS, e.RHS)
} }
case *parser.NumberLiteral: case *parser.NumberLiteral:
return ev.rangeEval(nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { span.SetAttributes(attribute.Float64("value", e.Val))
return ev.rangeEval(ctx, nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
return append(enh.Out, Sample{F: e.Val, Metric: labels.EmptyLabels()}), nil return append(enh.Out, Sample{F: e.Val, Metric: labels.EmptyLabels()}), nil
}) })
case *parser.StringLiteral: case *parser.StringLiteral:
span.SetAttributes(attribute.String("value", e.Val))
return String{V: e.Val, T: ev.startTimestamp}, nil return String{V: e.Val, T: ev.startTimestamp}, nil
case *parser.VectorSelector: case *parser.VectorSelector:
ws, err := checkAndExpandSeriesSet(ev.ctx, e) ws, err := checkAndExpandSeriesSet(ctx, e)
if err != nil { if err != nil {
ev.error(errWithWarnings{fmt.Errorf("expanding series: %w", err), ws}) ev.error(errWithWarnings{fmt.Errorf("expanding series: %w", err), ws})
} }
@ -1848,7 +1885,7 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, annotations.Annotatio
it := storage.NewMemoizedEmptyIterator(durationMilliseconds(ev.lookbackDelta)) it := storage.NewMemoizedEmptyIterator(durationMilliseconds(ev.lookbackDelta))
var chkIter chunkenc.Iterator var chkIter chunkenc.Iterator
for i, s := range e.Series { for i, s := range e.Series {
if err := contextDone(ev.ctx, "expression evaluation"); err != nil { if err := contextDone(ctx, "expression evaluation"); err != nil {
ev.error(err) ev.error(err)
} }
chkIter = s.Iterator(chkIter) chkIter = s.Iterator(chkIter)
@ -1899,20 +1936,20 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, annotations.Annotatio
if ev.startTimestamp != ev.endTimestamp { if ev.startTimestamp != ev.endTimestamp {
panic(errors.New("cannot do range evaluation of matrix selector")) panic(errors.New("cannot do range evaluation of matrix selector"))
} }
return ev.matrixSelector(e) return ev.matrixSelector(ctx, e)
case *parser.SubqueryExpr: case *parser.SubqueryExpr:
offsetMillis := durationMilliseconds(e.Offset) offsetMillis := durationMilliseconds(e.Offset)
rangeMillis := durationMilliseconds(e.Range) rangeMillis := durationMilliseconds(e.Range)
newEv := &evaluator{ newEv := &evaluator{
endTimestamp: ev.endTimestamp - offsetMillis, endTimestamp: ev.endTimestamp - offsetMillis,
ctx: ev.ctx,
currentSamples: ev.currentSamples, currentSamples: ev.currentSamples,
maxSamples: ev.maxSamples, maxSamples: ev.maxSamples,
logger: ev.logger, logger: ev.logger,
lookbackDelta: ev.lookbackDelta, lookbackDelta: ev.lookbackDelta,
samplesStats: ev.samplesStats.NewChild(), samplesStats: ev.samplesStats.NewChild(),
noStepSubqueryIntervalFn: ev.noStepSubqueryIntervalFn, noStepSubqueryIntervalFn: ev.noStepSubqueryIntervalFn,
enableDelayedNameRemoval: ev.enableDelayedNameRemoval,
} }
if e.Step != 0 { if e.Step != 0 {
@ -1935,7 +1972,7 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, annotations.Annotatio
setOffsetForAtModifier(newEv.startTimestamp, e.Expr) setOffsetForAtModifier(newEv.startTimestamp, e.Expr)
} }
res, ws := newEv.eval(e.Expr) res, ws := newEv.eval(ctx, e.Expr)
ev.currentSamples = newEv.currentSamples ev.currentSamples = newEv.currentSamples
ev.samplesStats.UpdatePeakFromSubquery(newEv.samplesStats) ev.samplesStats.UpdatePeakFromSubquery(newEv.samplesStats)
ev.samplesStats.IncrementSamplesAtTimestamp(ev.endTimestamp, newEv.samplesStats.TotalSamples) ev.samplesStats.IncrementSamplesAtTimestamp(ev.endTimestamp, newEv.samplesStats.TotalSamples)
@ -1943,22 +1980,22 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, annotations.Annotatio
case *parser.StepInvariantExpr: case *parser.StepInvariantExpr:
switch ce := e.Expr.(type) { switch ce := e.Expr.(type) {
case *parser.StringLiteral, *parser.NumberLiteral: case *parser.StringLiteral, *parser.NumberLiteral:
return ev.eval(ce) return ev.eval(ctx, ce)
} }
newEv := &evaluator{ newEv := &evaluator{
startTimestamp: ev.startTimestamp, startTimestamp: ev.startTimestamp,
endTimestamp: ev.startTimestamp, // Always a single evaluation. endTimestamp: ev.startTimestamp, // Always a single evaluation.
interval: ev.interval, interval: ev.interval,
ctx: ev.ctx,
currentSamples: ev.currentSamples, currentSamples: ev.currentSamples,
maxSamples: ev.maxSamples, maxSamples: ev.maxSamples,
logger: ev.logger, logger: ev.logger,
lookbackDelta: ev.lookbackDelta, lookbackDelta: ev.lookbackDelta,
samplesStats: ev.samplesStats.NewChild(), samplesStats: ev.samplesStats.NewChild(),
noStepSubqueryIntervalFn: ev.noStepSubqueryIntervalFn, noStepSubqueryIntervalFn: ev.noStepSubqueryIntervalFn,
enableDelayedNameRemoval: ev.enableDelayedNameRemoval,
} }
res, ws := newEv.eval(e.Expr) res, ws := newEv.eval(ctx, e.Expr)
ev.currentSamples = newEv.currentSamples ev.currentSamples = newEv.currentSamples
ev.samplesStats.UpdatePeakFromSubquery(newEv.samplesStats) ev.samplesStats.UpdatePeakFromSubquery(newEv.samplesStats)
for ts, step := ev.startTimestamp, -1; ts <= ev.endTimestamp; ts += ev.interval { for ts, step := ev.startTimestamp, -1; ts <= ev.endTimestamp; ts += ev.interval {
@ -2034,8 +2071,8 @@ func reuseOrGetFPointSlices(prevSS *Series, numSteps int) (r []FPoint) {
return getFPointSlice(numSteps) return getFPointSlice(numSteps)
} }
func (ev *evaluator) rangeEvalTimestampFunctionOverVectorSelector(vs *parser.VectorSelector, call FunctionCall, e *parser.Call) (parser.Value, annotations.Annotations) { func (ev *evaluator) rangeEvalTimestampFunctionOverVectorSelector(ctx context.Context, vs *parser.VectorSelector, call FunctionCall, e *parser.Call) (parser.Value, annotations.Annotations) {
ws, err := checkAndExpandSeriesSet(ev.ctx, vs) ws, err := checkAndExpandSeriesSet(ctx, vs)
if err != nil { if err != nil {
ev.error(errWithWarnings{fmt.Errorf("expanding series: %w", err), ws}) ev.error(errWithWarnings{fmt.Errorf("expanding series: %w", err), ws})
} }
@ -2046,7 +2083,7 @@ func (ev *evaluator) rangeEvalTimestampFunctionOverVectorSelector(vs *parser.Vec
seriesIterators[i] = storage.NewMemoizedIterator(it, durationMilliseconds(ev.lookbackDelta)) seriesIterators[i] = storage.NewMemoizedIterator(it, durationMilliseconds(ev.lookbackDelta))
} }
return ev.rangeEval(nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { return ev.rangeEval(ctx, nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
if vs.Timestamp != nil { if vs.Timestamp != nil {
// This is a special case for "timestamp()" when the @ modifier is used, to ensure that // This is a special case for "timestamp()" when the @ modifier is used, to ensure that
// we return a point for each time step in this case. // we return a point for each time step in this case.
@ -2182,7 +2219,7 @@ func putMatrixSelectorHPointSlice(p []HPoint) {
} }
// matrixSelector evaluates a *parser.MatrixSelector expression. // matrixSelector evaluates a *parser.MatrixSelector expression.
func (ev *evaluator) matrixSelector(node *parser.MatrixSelector) (Matrix, annotations.Annotations) { func (ev *evaluator) matrixSelector(ctx context.Context, node *parser.MatrixSelector) (Matrix, annotations.Annotations) {
var ( var (
vs = node.VectorSelector.(*parser.VectorSelector) vs = node.VectorSelector.(*parser.VectorSelector)
@ -2193,7 +2230,7 @@ func (ev *evaluator) matrixSelector(node *parser.MatrixSelector) (Matrix, annota
it = storage.NewBuffer(durationMilliseconds(node.Range)) it = storage.NewBuffer(durationMilliseconds(node.Range))
) )
ws, err := checkAndExpandSeriesSet(ev.ctx, node) ws, err := checkAndExpandSeriesSet(ctx, node)
if err != nil { if err != nil {
ev.error(errWithWarnings{fmt.Errorf("expanding series: %w", err), ws}) ev.error(errWithWarnings{fmt.Errorf("expanding series: %w", err), ws})
} }
@ -2201,7 +2238,7 @@ func (ev *evaluator) matrixSelector(node *parser.MatrixSelector) (Matrix, annota
var chkIter chunkenc.Iterator var chkIter chunkenc.Iterator
series := vs.Series series := vs.Series
for i, s := range series { for i, s := range series {
if err := contextDone(ev.ctx, "expression evaluation"); err != nil { if err := contextDone(ctx, "expression evaluation"); err != nil {
ev.error(err) ev.error(err)
} }
chkIter = s.Iterator(chkIter) chkIter = s.Iterator(chkIter)
@ -2356,6 +2393,11 @@ loop:
} else { } else {
histograms = append(histograms, HPoint{H: &histogram.FloatHistogram{}}) histograms = append(histograms, HPoint{H: &histogram.FloatHistogram{}})
} }
if histograms[n].H == nil {
// Make sure to pass non-nil H to AtFloatHistogram so that it does a deep-copy.
// Not an issue in the loop above since that uses an intermediate buffer.
histograms[n].H = &histogram.FloatHistogram{}
}
histograms[n].T, histograms[n].H = it.AtFloatHistogram(histograms[n].H) histograms[n].T, histograms[n].H = it.AtFloatHistogram(histograms[n].H)
if value.IsStaleNaN(histograms[n].H.Sum) { if value.IsStaleNaN(histograms[n].H.Sum) {
histograms = histograms[:n] histograms = histograms[:n]
@ -2548,7 +2590,7 @@ func (ev *evaluator) VectorBinop(op parser.ItemType, lhs, rhs Vector, matching *
continue continue
} }
metric := resultMetric(ls.Metric, rs.Metric, op, matching, enh) metric := resultMetric(ls.Metric, rs.Metric, op, matching, enh)
if returnBool { if !ev.enableDelayedNameRemoval && returnBool {
metric = metric.DropMetricName() metric = metric.DropMetricName()
} }
insertedSigs, exists := matchedSigs[sig] insertedSigs, exists := matchedSigs[sig]
@ -2576,6 +2618,7 @@ func (ev *evaluator) VectorBinop(op parser.ItemType, lhs, rhs Vector, matching *
Metric: metric, Metric: metric,
F: floatValue, F: floatValue,
H: histogramValue, H: histogramValue,
DropName: returnBool,
}) })
} }
return enh.Out, lastErr return enh.Out, lastErr
@ -2675,8 +2718,11 @@ func (ev *evaluator) VectorscalarBinop(op parser.ItemType, lhs Vector, rhs Scala
lhsSample.F = float lhsSample.F = float
lhsSample.H = histogram lhsSample.H = histogram
if shouldDropMetricName(op) || returnBool { if shouldDropMetricName(op) || returnBool {
if !ev.enableDelayedNameRemoval {
lhsSample.Metric = lhsSample.Metric.DropMetricName() lhsSample.Metric = lhsSample.Metric.DropMetricName()
} }
lhsSample.DropName = true
}
enh.Out = append(enh.Out, lhsSample) enh.Out = append(enh.Out, lhsSample)
} }
} }
@ -2773,15 +2819,20 @@ func vectorElemBinop(op parser.ItemType, lhs, rhs float64, hlhs, hrhs *histogram
} }
type groupedAggregation struct { type groupedAggregation struct {
floatValue float64
histogramValue *histogram.FloatHistogram
floatMean float64
floatKahanC float64 // "Compensating value" for Kahan summation.
groupCount float64
heap vectorByValueHeap
// All bools together for better packing within the struct.
seen bool // Was this output groups seen in the input at this timestamp. seen bool // Was this output groups seen in the input at this timestamp.
hasFloat bool // Has at least 1 float64 sample aggregated. hasFloat bool // Has at least 1 float64 sample aggregated.
hasHistogram bool // Has at least 1 histogram sample aggregated. hasHistogram bool // Has at least 1 histogram sample aggregated.
floatValue float64 incompatibleHistograms bool // If true, group has seen mixed exponential and custom buckets, or incompatible custom buckets.
histogramValue *histogram.FloatHistogram groupAggrComplete bool // Used by LIMITK to short-cut series loop when we've reached K elem on every group.
floatMean float64 // Mean, or "compensating value" for Kahan summation. incrementalMean bool // True after reverting to incremental calculation of the mean value.
groupCount int
groupAggrComplete bool // Used by LIMITK to short-cut series loop when we've reached K elem on every group
heap vectorByValueHeap
} }
// aggregation evaluates sum, avg, count, stdvar, stddev or quantile at one timestep on inputMatrix. // aggregation evaluates sum, avg, count, stdvar, stddev or quantile at one timestep on inputMatrix.
@ -2807,13 +2858,12 @@ func (ev *evaluator) aggregation(e *parser.AggregateExpr, q float64, inputMatrix
*group = groupedAggregation{ *group = groupedAggregation{
seen: true, seen: true,
floatValue: f, floatValue: f,
floatMean: f,
incompatibleHistograms: false,
groupCount: 1, groupCount: 1,
} }
switch op { switch op {
case parser.AVG: case parser.AVG, parser.SUM:
group.floatMean = f
fallthrough
case parser.SUM:
if h == nil { if h == nil {
group.hasFloat = true group.hasFloat = true
} else { } else {
@ -2821,7 +2871,6 @@ func (ev *evaluator) aggregation(e *parser.AggregateExpr, q float64, inputMatrix
group.hasHistogram = true group.hasHistogram = true
} }
case parser.STDVAR, parser.STDDEV: case parser.STDVAR, parser.STDDEV:
group.floatMean = f
group.floatValue = 0 group.floatValue = 0
case parser.QUANTILE: case parser.QUANTILE:
group.heap = make(vectorByValueHeap, 1) group.heap = make(vectorByValueHeap, 1)
@ -2832,6 +2881,10 @@ func (ev *evaluator) aggregation(e *parser.AggregateExpr, q float64, inputMatrix
continue continue
} }
if group.incompatibleHistograms {
continue
}
switch op { switch op {
case parser.SUM: case parser.SUM:
if h != nil { if h != nil {
@ -2840,6 +2893,7 @@ func (ev *evaluator) aggregation(e *parser.AggregateExpr, q float64, inputMatrix
_, err := group.histogramValue.Add(h) _, err := group.histogramValue.Add(h)
if err != nil { if err != nil {
handleAggregationError(err, e, inputMatrix[si].Metric.Get(model.MetricNameLabel), &annos) handleAggregationError(err, e, inputMatrix[si].Metric.Get(model.MetricNameLabel), &annos)
group.incompatibleHistograms = true
} }
} }
// Otherwise the aggregation contained floats // Otherwise the aggregation contained floats
@ -2847,7 +2901,7 @@ func (ev *evaluator) aggregation(e *parser.AggregateExpr, q float64, inputMatrix
// point in copying the histogram in that case. // point in copying the histogram in that case.
} else { } else {
group.hasFloat = true group.hasFloat = true
group.floatValue, group.floatMean = kahanSumInc(f, group.floatValue, group.floatMean) group.floatValue, group.floatKahanC = kahanSumInc(f, group.floatValue, group.floatKahanC)
} }
case parser.AVG: case parser.AVG:
@ -2855,15 +2909,19 @@ func (ev *evaluator) aggregation(e *parser.AggregateExpr, q float64, inputMatrix
if h != nil { if h != nil {
group.hasHistogram = true group.hasHistogram = true
if group.histogramValue != nil { if group.histogramValue != nil {
left := h.Copy().Div(float64(group.groupCount)) left := h.Copy().Div(group.groupCount)
right := group.histogramValue.Copy().Div(float64(group.groupCount)) right := group.histogramValue.Copy().Div(group.groupCount)
toAdd, err := left.Sub(right) toAdd, err := left.Sub(right)
if err != nil { if err != nil {
handleAggregationError(err, e, inputMatrix[si].Metric.Get(model.MetricNameLabel), &annos) handleAggregationError(err, e, inputMatrix[si].Metric.Get(model.MetricNameLabel), &annos)
group.incompatibleHistograms = true
continue
} }
_, err = group.histogramValue.Add(toAdd) _, err = group.histogramValue.Add(toAdd)
if err != nil { if err != nil {
handleAggregationError(err, e, inputMatrix[si].Metric.Get(model.MetricNameLabel), &annos) handleAggregationError(err, e, inputMatrix[si].Metric.Get(model.MetricNameLabel), &annos)
group.incompatibleHistograms = true
continue
} }
} }
// Otherwise the aggregation contained floats // Otherwise the aggregation contained floats
@ -2871,6 +2929,22 @@ func (ev *evaluator) aggregation(e *parser.AggregateExpr, q float64, inputMatrix
// point in copying the histogram in that case. // point in copying the histogram in that case.
} else { } else {
group.hasFloat = true group.hasFloat = true
if !group.incrementalMean {
newV, newC := kahanSumInc(f, group.floatValue, group.floatKahanC)
if !math.IsInf(newV, 0) {
// The sum doesn't overflow, so we propagate it to the
// group struct and continue with the regular
// calculation of the mean value.
group.floatValue, group.floatKahanC = newV, newC
break
}
// If we are here, we know that the sum _would_ overflow. So
// instead of continue to sum up, we revert to incremental
// calculation of the mean value from here on.
group.incrementalMean = true
group.floatMean = group.floatValue / (group.groupCount - 1)
group.floatKahanC /= group.groupCount - 1
}
if math.IsInf(group.floatMean, 0) { if math.IsInf(group.floatMean, 0) {
if math.IsInf(f, 0) && (group.floatMean > 0) == (f > 0) { if math.IsInf(f, 0) && (group.floatMean > 0) == (f > 0) {
// The `floatMean` and `s.F` values are `Inf` of the same sign. They // The `floatMean` and `s.F` values are `Inf` of the same sign. They
@ -2888,8 +2962,13 @@ func (ev *evaluator) aggregation(e *parser.AggregateExpr, q float64, inputMatrix
break break
} }
} }
currentMean := group.floatMean + group.floatKahanC
group.floatMean, group.floatKahanC = kahanSumInc(
// Divide each side of the `-` by `group.groupCount` to avoid float64 overflows. // Divide each side of the `-` by `group.groupCount` to avoid float64 overflows.
group.floatMean += f/float64(group.groupCount) - group.floatMean/float64(group.groupCount) f/group.groupCount-currentMean/group.groupCount,
group.floatMean,
group.floatKahanC,
)
} }
case parser.GROUP: case parser.GROUP:
@ -2912,7 +2991,7 @@ func (ev *evaluator) aggregation(e *parser.AggregateExpr, q float64, inputMatrix
if h == nil { // Ignore native histograms. if h == nil { // Ignore native histograms.
group.groupCount++ group.groupCount++
delta := f - group.floatMean delta := f - group.floatMean
group.floatMean += delta / float64(group.groupCount) group.floatMean += delta / group.groupCount
group.floatValue += delta * (f - group.floatMean) group.floatValue += delta * (f - group.floatMean)
} }
@ -2938,20 +3017,25 @@ func (ev *evaluator) aggregation(e *parser.AggregateExpr, q float64, inputMatrix
annos.Add(annotations.NewMixedFloatsHistogramsAggWarning(e.Expr.PositionRange())) annos.Add(annotations.NewMixedFloatsHistogramsAggWarning(e.Expr.PositionRange()))
continue continue
} }
if aggr.hasHistogram { switch {
case aggr.incompatibleHistograms:
continue
case aggr.hasHistogram:
aggr.histogramValue = aggr.histogramValue.Compact(0) aggr.histogramValue = aggr.histogramValue.Compact(0)
} else { case aggr.incrementalMean:
aggr.floatValue = aggr.floatMean aggr.floatValue = aggr.floatMean + aggr.floatKahanC
default:
aggr.floatValue = (aggr.floatValue + aggr.floatKahanC) / aggr.groupCount
} }
case parser.COUNT: case parser.COUNT:
aggr.floatValue = float64(aggr.groupCount) aggr.floatValue = aggr.groupCount
case parser.STDVAR: case parser.STDVAR:
aggr.floatValue /= float64(aggr.groupCount) aggr.floatValue /= aggr.groupCount
case parser.STDDEV: case parser.STDDEV:
aggr.floatValue = math.Sqrt(aggr.floatValue / float64(aggr.groupCount)) aggr.floatValue = math.Sqrt(aggr.floatValue / aggr.groupCount)
case parser.QUANTILE: case parser.QUANTILE:
aggr.floatValue = quantile(q, aggr.heap) aggr.floatValue = quantile(q, aggr.heap)
@ -2962,10 +3046,13 @@ func (ev *evaluator) aggregation(e *parser.AggregateExpr, q float64, inputMatrix
annos.Add(annotations.NewMixedFloatsHistogramsAggWarning(e.Expr.PositionRange())) annos.Add(annotations.NewMixedFloatsHistogramsAggWarning(e.Expr.PositionRange()))
continue continue
} }
if aggr.hasHistogram { switch {
case aggr.incompatibleHistograms:
continue
case aggr.hasHistogram:
aggr.histogramValue.Compact(0) aggr.histogramValue.Compact(0)
} else { default:
aggr.floatValue += aggr.floatMean // Add Kahan summation compensating term. aggr.floatValue += aggr.floatKahanC
} }
default: default:
// For other aggregations, we already have the right value. // For other aggregations, we already have the right value.
@ -2973,6 +3060,7 @@ func (ev *evaluator) aggregation(e *parser.AggregateExpr, q float64, inputMatrix
ss := &outputMatrix[ri] ss := &outputMatrix[ri]
addToSeries(ss, enh.Ts, aggr.floatValue, aggr.histogramValue, numSteps) addToSeries(ss, enh.Ts, aggr.floatValue, aggr.histogramValue, numSteps)
ss.DropName = inputMatrix[ri].DropName
} }
return annos return annos
@ -2999,7 +3087,7 @@ seriesLoop:
if !ok { if !ok {
continue continue
} }
s = Sample{Metric: inputMatrix[si].Metric, F: f} s = Sample{Metric: inputMatrix[si].Metric, F: f, DropName: inputMatrix[si].DropName}
group := &groups[seriesToResult[si]] group := &groups[seriesToResult[si]]
// Initialize this group if it's the first time we've seen it. // Initialize this group if it's the first time we've seen it.
@ -3083,16 +3171,16 @@ seriesLoop:
mat = make(Matrix, 0, len(groups)) mat = make(Matrix, 0, len(groups))
} }
add := func(lbls labels.Labels, f float64) { add := func(lbls labels.Labels, f float64, dropName bool) {
// If this could be an instant query, add directly to the matrix so the result is in consistent order. // If this could be an instant query, add directly to the matrix so the result is in consistent order.
if ev.endTimestamp == ev.startTimestamp { if ev.endTimestamp == ev.startTimestamp {
mat = append(mat, Series{Metric: lbls, Floats: []FPoint{{T: enh.Ts, F: f}}}) mat = append(mat, Series{Metric: lbls, Floats: []FPoint{{T: enh.Ts, F: f}}, DropName: dropName})
} else { } else {
// Otherwise the results are added into seriess elements. // Otherwise the results are added into seriess elements.
hash := lbls.Hash() hash := lbls.Hash()
ss, ok := seriess[hash] ss, ok := seriess[hash]
if !ok { if !ok {
ss = Series{Metric: lbls} ss = Series{Metric: lbls, DropName: dropName}
} }
addToSeries(&ss, enh.Ts, f, nil, numSteps) addToSeries(&ss, enh.Ts, f, nil, numSteps)
seriess[hash] = ss seriess[hash] = ss
@ -3109,7 +3197,7 @@ seriesLoop:
sort.Sort(sort.Reverse(aggr.heap)) sort.Sort(sort.Reverse(aggr.heap))
} }
for _, v := range aggr.heap { for _, v := range aggr.heap {
add(v.Metric, v.F) add(v.Metric, v.F, v.DropName)
} }
case parser.BOTTOMK: case parser.BOTTOMK:
@ -3118,12 +3206,12 @@ seriesLoop:
sort.Sort(sort.Reverse((*vectorByReverseValueHeap)(&aggr.heap))) sort.Sort(sort.Reverse((*vectorByReverseValueHeap)(&aggr.heap)))
} }
for _, v := range aggr.heap { for _, v := range aggr.heap {
add(v.Metric, v.F) add(v.Metric, v.F, v.DropName)
} }
case parser.LIMITK, parser.LIMIT_RATIO: case parser.LIMITK, parser.LIMIT_RATIO:
for _, v := range aggr.heap { for _, v := range aggr.heap {
add(v.Metric, v.F) add(v.Metric, v.F, v.DropName)
} }
} }
} }
@ -3131,7 +3219,7 @@ seriesLoop:
return mat, annos return mat, annos
} }
// aggregationK evaluates count_values on vec. // aggregationCountValues evaluates count_values on vec.
// Outputs as many series per group as there are values in the input. // Outputs as many series per group as there are values in the input.
func (ev *evaluator) aggregationCountValues(e *parser.AggregateExpr, grouping []string, valueLabel string, vec Vector, enh *EvalNodeHelper) (Vector, annotations.Annotations) { func (ev *evaluator) aggregationCountValues(e *parser.AggregateExpr, grouping []string, valueLabel string, vec Vector, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
type groupCount struct { type groupCount struct {
@ -3175,6 +3263,30 @@ func (ev *evaluator) aggregationCountValues(e *parser.AggregateExpr, grouping []
return enh.Out, nil return enh.Out, nil
} }
func (ev *evaluator) cleanupMetricLabels(v parser.Value) {
if v.Type() == parser.ValueTypeMatrix {
mat := v.(Matrix)
for i := range mat {
if mat[i].DropName {
mat[i].Metric = mat[i].Metric.DropMetricName()
}
}
if mat.ContainsSameLabelset() {
ev.errorf("vector cannot contain metrics with the same labelset")
}
} else if v.Type() == parser.ValueTypeVector {
vec := v.(Vector)
for i := range vec {
if vec[i].DropName {
vec[i].Metric = vec[i].Metric.DropMetricName()
}
}
if vec.ContainsSameLabelset() {
ev.errorf("vector cannot contain metrics with the same labelset")
}
}
}
func addToSeries(ss *Series, ts int64, f float64, h *histogram.FloatHistogram, numSteps int) { func addToSeries(ss *Series, ts int64, f float64, h *histogram.FloatHistogram, numSteps int) {
if h == nil { if h == nil {
if ss.Floats == nil { if ss.Floats == nil {
@ -3485,14 +3597,14 @@ func makeInt64Pointer(val int64) *int64 {
return valp return valp
} }
// Add RatioSampler interface to allow unit-testing (previously: Randomizer). // RatioSampler allows unit-testing (previously: Randomizer).
type RatioSampler interface { type RatioSampler interface {
// Return this sample "offset" between [0.0, 1.0] // Return this sample "offset" between [0.0, 1.0]
sampleOffset(ts int64, sample *Sample) float64 sampleOffset(ts int64, sample *Sample) float64
AddRatioSample(r float64, sample *Sample) bool AddRatioSample(r float64, sample *Sample) bool
} }
// Use Hash(labels.String()) / maxUint64 as a "deterministic" // HashRatioSampler uses Hash(labels.String()) / maxUint64 as a "deterministic"
// value in [0.0, 1.0]. // value in [0.0, 1.0].
type HashRatioSampler struct{} type HashRatioSampler struct{}

View file

@ -17,8 +17,6 @@ import (
"context" "context"
"errors" "errors"
"fmt" "fmt"
"math"
"os"
"sort" "sort"
"strconv" "strconv"
"sync" "sync"
@ -56,14 +54,7 @@ func TestMain(m *testing.M) {
func TestQueryConcurrency(t *testing.T) { func TestQueryConcurrency(t *testing.T) {
maxConcurrency := 10 maxConcurrency := 10
dir, err := os.MkdirTemp("", "test_concurrency") queryTracker := promql.NewActiveQueryTracker(t.TempDir(), maxConcurrency, nil)
require.NoError(t, err)
defer os.RemoveAll(dir)
queryTracker := promql.NewActiveQueryTracker(dir, maxConcurrency, nil)
t.Cleanup(func() {
require.NoError(t, queryTracker.Close())
})
opts := promql.EngineOpts{ opts := promql.EngineOpts{
Logger: nil, Logger: nil,
Reg: nil, Reg: nil,
@ -71,15 +62,17 @@ func TestQueryConcurrency(t *testing.T) {
Timeout: 100 * time.Second, Timeout: 100 * time.Second,
ActiveQueryTracker: queryTracker, ActiveQueryTracker: queryTracker,
} }
engine := promqltest.NewTestEngineWithOpts(t, opts)
engine := promql.NewEngine(opts)
ctx, cancelCtx := context.WithCancel(context.Background()) ctx, cancelCtx := context.WithCancel(context.Background())
defer cancelCtx() t.Cleanup(cancelCtx)
block := make(chan struct{}) block := make(chan struct{})
processing := make(chan struct{}) processing := make(chan struct{})
done := make(chan int) done := make(chan int)
defer close(done) t.Cleanup(func() {
close(done)
})
f := func(context.Context) error { f := func(context.Context) error {
select { select {
@ -164,7 +157,7 @@ func TestQueryTimeout(t *testing.T) {
MaxSamples: 10, MaxSamples: 10,
Timeout: 5 * time.Millisecond, Timeout: 5 * time.Millisecond,
} }
engine := promql.NewEngine(opts) engine := promqltest.NewTestEngineWithOpts(t, opts)
ctx, cancelCtx := context.WithCancel(context.Background()) ctx, cancelCtx := context.WithCancel(context.Background())
defer cancelCtx() defer cancelCtx()
@ -189,7 +182,7 @@ func TestQueryCancel(t *testing.T) {
MaxSamples: 10, MaxSamples: 10,
Timeout: 10 * time.Second, Timeout: 10 * time.Second,
} }
engine := promql.NewEngine(opts) engine := promqltest.NewTestEngineWithOpts(t, opts)
ctx, cancelCtx := context.WithCancel(context.Background()) ctx, cancelCtx := context.WithCancel(context.Background())
defer cancelCtx() defer cancelCtx()
@ -263,7 +256,7 @@ func TestQueryError(t *testing.T) {
MaxSamples: 10, MaxSamples: 10,
Timeout: 10 * time.Second, Timeout: 10 * time.Second,
} }
engine := promql.NewEngine(opts) engine := promqltest.NewTestEngineWithOpts(t, opts)
errStorage := promql.ErrStorage{errors.New("storage error")} errStorage := promql.ErrStorage{errors.New("storage error")}
queryable := storage.QueryableFunc(func(mint, maxt int64) (storage.Querier, error) { queryable := storage.QueryableFunc(func(mint, maxt int64) (storage.Querier, error) {
return &errQuerier{err: errStorage}, nil return &errQuerier{err: errStorage}, nil
@ -597,7 +590,7 @@ func TestSelectHintsSetCorrectly(t *testing.T) {
}, },
} { } {
t.Run(tc.query, func(t *testing.T) { t.Run(tc.query, func(t *testing.T) {
engine := promql.NewEngine(opts) engine := promqltest.NewTestEngineWithOpts(t, opts)
hintsRecorder := &noopHintRecordingQueryable{} hintsRecorder := &noopHintRecordingQueryable{}
var ( var (
@ -628,7 +621,7 @@ func TestEngineShutdown(t *testing.T) {
MaxSamples: 10, MaxSamples: 10,
Timeout: 10 * time.Second, Timeout: 10 * time.Second,
} }
engine := promql.NewEngine(opts) engine := promqltest.NewTestEngineWithOpts(t, opts)
ctx, cancelCtx := context.WithCancel(context.Background()) ctx, cancelCtx := context.WithCancel(context.Background())
block := make(chan struct{}) block := make(chan struct{})
@ -764,7 +757,7 @@ load 10s
t.Run(fmt.Sprintf("%d query=%s", i, c.Query), func(t *testing.T) { t.Run(fmt.Sprintf("%d query=%s", i, c.Query), func(t *testing.T) {
var err error var err error
var qry promql.Query var qry promql.Query
engine := newTestEngine() engine := newTestEngine(t)
if c.Interval == 0 { if c.Interval == 0 {
qry, err = engine.NewInstantQuery(context.Background(), storage, nil, c.Query, c.Start) qry, err = engine.NewInstantQuery(context.Background(), storage, nil, c.Query, c.Start)
} else { } else {
@ -1306,7 +1299,7 @@ load 10s
for _, c := range cases { for _, c := range cases {
t.Run(c.Query, func(t *testing.T) { t.Run(c.Query, func(t *testing.T) {
opts := promql.NewPrometheusQueryOpts(true, 0) opts := promql.NewPrometheusQueryOpts(true, 0)
engine := promqltest.NewTestEngine(true, 0, promqltest.DefaultMaxSamplesPerQuery) engine := promqltest.NewTestEngine(t, true, 0, promqltest.DefaultMaxSamplesPerQuery)
runQuery := func(expErr error) *stats.Statistics { runQuery := func(expErr error) *stats.Statistics {
var err error var err error
@ -1333,7 +1326,7 @@ load 10s
if c.SkipMaxCheck { if c.SkipMaxCheck {
return return
} }
engine = promqltest.NewTestEngine(true, 0, stats.Samples.PeakSamples-1) engine = promqltest.NewTestEngine(t, true, 0, stats.Samples.PeakSamples-1)
runQuery(promql.ErrTooManySamples(env)) runQuery(promql.ErrTooManySamples(env))
}) })
} }
@ -1486,7 +1479,7 @@ load 10s
for _, c := range cases { for _, c := range cases {
t.Run(c.Query, func(t *testing.T) { t.Run(c.Query, func(t *testing.T) {
engine := newTestEngine() engine := newTestEngine(t)
testFunc := func(expError error) { testFunc := func(expError error) {
var err error var err error
var qry promql.Query var qry promql.Query
@ -1507,18 +1500,18 @@ load 10s
} }
// Within limit. // Within limit.
engine = promqltest.NewTestEngine(false, 0, c.MaxSamples) engine = promqltest.NewTestEngine(t, false, 0, c.MaxSamples)
testFunc(nil) testFunc(nil)
// Exceeding limit. // Exceeding limit.
engine = promqltest.NewTestEngine(false, 0, c.MaxSamples-1) engine = promqltest.NewTestEngine(t, false, 0, c.MaxSamples-1)
testFunc(promql.ErrTooManySamples(env)) testFunc(promql.ErrTooManySamples(env))
}) })
} }
} }
func TestAtModifier(t *testing.T) { func TestAtModifier(t *testing.T) {
engine := newTestEngine() engine := newTestEngine(t)
storage := promqltest.LoadedStorage(t, ` storage := promqltest.LoadedStorage(t, `
load 10s load 10s
metric{job="1"} 0+1x1000 metric{job="1"} 0+1x1000
@ -1715,6 +1708,7 @@ load 1ms
{F: 3600, T: 7 * 60 * 1000}, {F: 3600, T: 7 * 60 * 1000},
}, },
Metric: labels.EmptyLabels(), Metric: labels.EmptyLabels(),
DropName: true,
}, },
}, },
}, },
@ -1932,18 +1926,22 @@ func TestSubquerySelector(t *testing.T) {
promql.Series{ promql.Series{
Floats: []promql.FPoint{{F: 3, T: 7985000}, {F: 3, T: 7990000}, {F: 3, T: 7995000}, {F: 3, T: 8000000}}, Floats: []promql.FPoint{{F: 3, T: 7985000}, {F: 3, T: 7990000}, {F: 3, T: 7995000}, {F: 3, T: 8000000}},
Metric: labels.FromStrings("job", "api-server", "instance", "0", "group", "canary"), Metric: labels.FromStrings("job", "api-server", "instance", "0", "group", "canary"),
DropName: true,
}, },
promql.Series{ promql.Series{
Floats: []promql.FPoint{{F: 4, T: 7985000}, {F: 4, T: 7990000}, {F: 4, T: 7995000}, {F: 4, T: 8000000}}, Floats: []promql.FPoint{{F: 4, T: 7985000}, {F: 4, T: 7990000}, {F: 4, T: 7995000}, {F: 4, T: 8000000}},
Metric: labels.FromStrings("job", "api-server", "instance", "1", "group", "canary"), Metric: labels.FromStrings("job", "api-server", "instance", "1", "group", "canary"),
DropName: true,
}, },
promql.Series{ promql.Series{
Floats: []promql.FPoint{{F: 1, T: 7985000}, {F: 1, T: 7990000}, {F: 1, T: 7995000}, {F: 1, T: 8000000}}, Floats: []promql.FPoint{{F: 1, T: 7985000}, {F: 1, T: 7990000}, {F: 1, T: 7995000}, {F: 1, T: 8000000}},
Metric: labels.FromStrings("job", "api-server", "instance", "0", "group", "production"), Metric: labels.FromStrings("job", "api-server", "instance", "0", "group", "production"),
DropName: true,
}, },
promql.Series{ promql.Series{
Floats: []promql.FPoint{{F: 2, T: 7985000}, {F: 2, T: 7990000}, {F: 2, T: 7995000}, {F: 2, T: 8000000}}, Floats: []promql.FPoint{{F: 2, T: 7985000}, {F: 2, T: 7990000}, {F: 2, T: 7995000}, {F: 2, T: 8000000}},
Metric: labels.FromStrings("job", "api-server", "instance", "1", "group", "production"), Metric: labels.FromStrings("job", "api-server", "instance", "1", "group", "production"),
DropName: true,
}, },
}, },
nil, nil,
@ -1996,7 +1994,7 @@ func TestSubquerySelector(t *testing.T) {
}, },
} { } {
t.Run("", func(t *testing.T) { t.Run("", func(t *testing.T) {
engine := newTestEngine() engine := newTestEngine(t)
storage := promqltest.LoadedStorage(t, tst.loadString) storage := promqltest.LoadedStorage(t, tst.loadString)
t.Cleanup(func() { storage.Close() }) t.Cleanup(func() { storage.Close() })
@ -2045,7 +2043,7 @@ func TestQueryLogger_basic(t *testing.T) {
MaxSamples: 10, MaxSamples: 10,
Timeout: 10 * time.Second, Timeout: 10 * time.Second,
} }
engine := promql.NewEngine(opts) engine := promqltest.NewTestEngineWithOpts(t, opts)
queryExec := func() { queryExec := func() {
ctx, cancelCtx := context.WithCancel(context.Background()) ctx, cancelCtx := context.WithCancel(context.Background())
@ -2096,7 +2094,7 @@ func TestQueryLogger_fields(t *testing.T) {
MaxSamples: 10, MaxSamples: 10,
Timeout: 10 * time.Second, Timeout: 10 * time.Second,
} }
engine := promql.NewEngine(opts) engine := promqltest.NewTestEngineWithOpts(t, opts)
f1 := NewFakeQueryLogger() f1 := NewFakeQueryLogger()
engine.SetQueryLogger(f1) engine.SetQueryLogger(f1)
@ -2125,7 +2123,7 @@ func TestQueryLogger_error(t *testing.T) {
MaxSamples: 10, MaxSamples: 10,
Timeout: 10 * time.Second, Timeout: 10 * time.Second,
} }
engine := promql.NewEngine(opts) engine := promqltest.NewTestEngineWithOpts(t, opts)
f1 := NewFakeQueryLogger() f1 := NewFakeQueryLogger()
engine.SetQueryLogger(f1) engine.SetQueryLogger(f1)
@ -3008,7 +3006,7 @@ func TestEngineOptsValidation(t *testing.T) {
} }
for _, c := range cases { for _, c := range cases {
eng := promql.NewEngine(c.opts) eng := promqltest.NewTestEngineWithOpts(t, c.opts)
_, err1 := eng.NewInstantQuery(context.Background(), nil, nil, c.query, time.Unix(10, 0)) _, err1 := eng.NewInstantQuery(context.Background(), nil, nil, c.query, time.Unix(10, 0))
_, err2 := eng.NewRangeQuery(context.Background(), nil, nil, c.query, time.Unix(0, 0), time.Unix(10, 0), time.Second) _, err2 := eng.NewRangeQuery(context.Background(), nil, nil, c.query, time.Unix(0, 0), time.Unix(10, 0), time.Second)
if c.fail { if c.fail {
@ -3022,7 +3020,7 @@ func TestEngineOptsValidation(t *testing.T) {
} }
func TestInstantQueryWithRangeVectorSelector(t *testing.T) { func TestInstantQueryWithRangeVectorSelector(t *testing.T) {
engine := newTestEngine() engine := newTestEngine(t)
baseT := timestamp.Time(0) baseT := timestamp.Time(0)
storage := promqltest.LoadedStorage(t, ` storage := promqltest.LoadedStorage(t, `
@ -3097,217 +3095,6 @@ func TestInstantQueryWithRangeVectorSelector(t *testing.T) {
} }
} }
func TestNativeHistogram_Sum_Count_Add_AvgOperator(t *testing.T) {
// TODO(codesome): Integrate histograms into the PromQL testing framework
// and write more tests there.
cases := []struct {
histograms []histogram.Histogram
expected histogram.FloatHistogram
expectedAvg histogram.FloatHistogram
}{
{
histograms: []histogram.Histogram{
{
CounterResetHint: histogram.GaugeType,
Schema: 0,
Count: 25,
Sum: 1234.5,
ZeroThreshold: 0.001,
ZeroCount: 4,
PositiveSpans: []histogram.Span{
{Offset: 0, Length: 2},
{Offset: 1, Length: 2},
},
PositiveBuckets: []int64{1, 1, -1, 0},
NegativeSpans: []histogram.Span{
{Offset: 0, Length: 2},
{Offset: 2, Length: 2},
},
NegativeBuckets: []int64{2, 2, -3, 8},
},
{
CounterResetHint: histogram.GaugeType,
Schema: 0,
Count: 41,
Sum: 2345.6,
ZeroThreshold: 0.001,
ZeroCount: 5,
PositiveSpans: []histogram.Span{
{Offset: 0, Length: 4},
{Offset: 0, Length: 0},
{Offset: 0, Length: 3},
},
PositiveBuckets: []int64{1, 2, -2, 1, -1, 0, 0},
NegativeSpans: []histogram.Span{
{Offset: 1, Length: 4},
{Offset: 2, Length: 0},
{Offset: 2, Length: 3},
},
NegativeBuckets: []int64{1, 3, -2, 5, -2, 0, -3},
},
{
CounterResetHint: histogram.GaugeType,
Schema: 0,
Count: 41,
Sum: 1111.1,
ZeroThreshold: 0.001,
ZeroCount: 5,
PositiveSpans: []histogram.Span{
{Offset: 0, Length: 4},
{Offset: 0, Length: 0},
{Offset: 0, Length: 3},
},
PositiveBuckets: []int64{1, 2, -2, 1, -1, 0, 0},
NegativeSpans: []histogram.Span{
{Offset: 1, Length: 4},
{Offset: 2, Length: 0},
{Offset: 2, Length: 3},
},
NegativeBuckets: []int64{1, 3, -2, 5, -2, 0, -3},
},
{
CounterResetHint: histogram.GaugeType,
Schema: 1, // Everything is 0 just to make the count 4 so avg has nicer numbers.
},
},
expected: histogram.FloatHistogram{
CounterResetHint: histogram.GaugeType,
Schema: 0,
ZeroThreshold: 0.001,
ZeroCount: 14,
Count: 107,
Sum: 4691.2,
PositiveSpans: []histogram.Span{
{Offset: 0, Length: 7},
},
PositiveBuckets: []float64{3, 8, 2, 5, 3, 2, 2},
NegativeSpans: []histogram.Span{
{Offset: 0, Length: 6},
{Offset: 3, Length: 3},
},
NegativeBuckets: []float64{2, 6, 8, 4, 15, 9, 10, 10, 4},
},
expectedAvg: histogram.FloatHistogram{
CounterResetHint: histogram.GaugeType,
Schema: 0,
ZeroThreshold: 0.001,
ZeroCount: 3.5,
Count: 26.75,
Sum: 1172.8,
PositiveSpans: []histogram.Span{
{Offset: 0, Length: 7},
},
PositiveBuckets: []float64{0.75, 2, 0.5, 1.25, 0.75, 0.5, 0.5},
NegativeSpans: []histogram.Span{
{Offset: 0, Length: 6},
{Offset: 3, Length: 3},
},
NegativeBuckets: []float64{0.5, 1.5, 2, 1, 3.75, 2.25, 2.5, 2.5, 1},
},
},
}
idx0 := int64(0)
for _, c := range cases {
for _, floatHisto := range []bool{true, false} {
t.Run(fmt.Sprintf("floatHistogram=%t %d", floatHisto, idx0), func(t *testing.T) {
storage := teststorage.New(t)
t.Cleanup(func() { storage.Close() })
seriesName := "sparse_histogram_series"
seriesNameOverTime := "sparse_histogram_series_over_time"
engine := newTestEngine()
ts := idx0 * int64(10*time.Minute/time.Millisecond)
app := storage.Appender(context.Background())
_, err := app.Append(0, labels.FromStrings("__name__", "float_series", "idx", "0"), ts, 42)
require.NoError(t, err)
for idx1, h := range c.histograms {
lbls := labels.FromStrings("__name__", seriesName, "idx", strconv.Itoa(idx1))
// Since we mutate h later, we need to create a copy here.
var err error
if floatHisto {
_, err = app.AppendHistogram(0, lbls, ts, nil, h.Copy().ToFloat(nil))
} else {
_, err = app.AppendHistogram(0, lbls, ts, h.Copy(), nil)
}
require.NoError(t, err)
lbls = labels.FromStrings("__name__", seriesNameOverTime)
newTs := ts + int64(idx1)*int64(time.Minute/time.Millisecond)
// Since we mutate h later, we need to create a copy here.
if floatHisto {
_, err = app.AppendHistogram(0, lbls, newTs, nil, h.Copy().ToFloat(nil))
} else {
_, err = app.AppendHistogram(0, lbls, newTs, h.Copy(), nil)
}
require.NoError(t, err)
}
require.NoError(t, app.Commit())
queryAndCheck := func(queryString string, ts int64, exp promql.Vector) {
qry, err := engine.NewInstantQuery(context.Background(), storage, nil, queryString, timestamp.Time(ts))
require.NoError(t, err)
res := qry.Exec(context.Background())
require.NoError(t, res.Err)
require.Empty(t, res.Warnings)
vector, err := res.Vector()
require.NoError(t, err)
testutil.RequireEqual(t, exp, vector)
}
queryAndCheckAnnotations := func(queryString string, ts int64, expWarnings annotations.Annotations) {
qry, err := engine.NewInstantQuery(context.Background(), storage, nil, queryString, timestamp.Time(ts))
require.NoError(t, err)
res := qry.Exec(context.Background())
require.NoError(t, res.Err)
require.Equal(t, expWarnings, res.Warnings)
}
// sum().
queryString := fmt.Sprintf("sum(%s)", seriesName)
queryAndCheck(queryString, ts, []promql.Sample{{T: ts, H: &c.expected, Metric: labels.EmptyLabels()}})
queryString = `sum({idx="0"})`
var annos annotations.Annotations
annos.Add(annotations.NewMixedFloatsHistogramsAggWarning(posrange.PositionRange{Start: 4, End: 13}))
queryAndCheckAnnotations(queryString, ts, annos)
// + operator.
queryString = fmt.Sprintf(`%s{idx="0"}`, seriesName)
for idx := 1; idx < len(c.histograms); idx++ {
queryString += fmt.Sprintf(` + ignoring(idx) %s{idx="%d"}`, seriesName, idx)
}
queryAndCheck(queryString, ts, []promql.Sample{{T: ts, H: &c.expected, Metric: labels.EmptyLabels()}})
// count().
queryString = fmt.Sprintf("count(%s)", seriesName)
queryAndCheck(queryString, ts, []promql.Sample{{T: ts, F: 4, Metric: labels.EmptyLabels()}})
// avg().
queryString = fmt.Sprintf("avg(%s)", seriesName)
queryAndCheck(queryString, ts, []promql.Sample{{T: ts, H: &c.expectedAvg, Metric: labels.EmptyLabels()}})
offset := int64(len(c.histograms) - 1)
newTs := ts + offset*int64(time.Minute/time.Millisecond)
// sum_over_time().
queryString = fmt.Sprintf("sum_over_time(%s[%dm:1m])", seriesNameOverTime, offset)
queryAndCheck(queryString, newTs, []promql.Sample{{T: newTs, H: &c.expected, Metric: labels.EmptyLabels()}})
// avg_over_time().
queryString = fmt.Sprintf("avg_over_time(%s[%dm:1m])", seriesNameOverTime, offset)
queryAndCheck(queryString, newTs, []promql.Sample{{T: newTs, H: &c.expectedAvg, Metric: labels.EmptyLabels()}})
})
idx0++
}
}
}
func TestNativeHistogram_SubOperator(t *testing.T) { func TestNativeHistogram_SubOperator(t *testing.T) {
// TODO(codesome): Integrate histograms into the PromQL testing framework // TODO(codesome): Integrate histograms into the PromQL testing framework
// and write more tests there. // and write more tests there.
@ -3487,7 +3274,7 @@ func TestNativeHistogram_SubOperator(t *testing.T) {
for _, c := range cases { for _, c := range cases {
for _, floatHisto := range []bool{true, false} { for _, floatHisto := range []bool{true, false} {
t.Run(fmt.Sprintf("floatHistogram=%t %d", floatHisto, idx0), func(t *testing.T) { t.Run(fmt.Sprintf("floatHistogram=%t %d", floatHisto, idx0), func(t *testing.T) {
engine := newTestEngine() engine := newTestEngine(t)
storage := teststorage.New(t) storage := teststorage.New(t)
t.Cleanup(func() { storage.Close() }) t.Cleanup(func() { storage.Close() })
@ -3543,171 +3330,6 @@ func TestNativeHistogram_SubOperator(t *testing.T) {
} }
} }
func TestNativeHistogram_MulDivOperator(t *testing.T) {
// TODO(codesome): Integrate histograms into the PromQL testing framework
// and write more tests there.
originalHistogram := histogram.Histogram{
Schema: 0,
Count: 21,
Sum: 33,
ZeroThreshold: 0.001,
ZeroCount: 3,
PositiveSpans: []histogram.Span{
{Offset: 0, Length: 3},
},
PositiveBuckets: []int64{3, 0, 0},
NegativeSpans: []histogram.Span{
{Offset: 0, Length: 3},
},
NegativeBuckets: []int64{3, 0, 0},
}
cases := []struct {
scalar float64
histogram histogram.Histogram
expectedMul histogram.FloatHistogram
expectedDiv histogram.FloatHistogram
}{
{
scalar: 3,
histogram: originalHistogram,
expectedMul: histogram.FloatHistogram{
Schema: 0,
Count: 63,
Sum: 99,
ZeroThreshold: 0.001,
ZeroCount: 9,
PositiveSpans: []histogram.Span{
{Offset: 0, Length: 3},
},
PositiveBuckets: []float64{9, 9, 9},
NegativeSpans: []histogram.Span{
{Offset: 0, Length: 3},
},
NegativeBuckets: []float64{9, 9, 9},
},
expectedDiv: histogram.FloatHistogram{
Schema: 0,
Count: 7,
Sum: 11,
ZeroThreshold: 0.001,
ZeroCount: 1,
PositiveSpans: []histogram.Span{
{Offset: 0, Length: 3},
},
PositiveBuckets: []float64{1, 1, 1},
NegativeSpans: []histogram.Span{
{Offset: 0, Length: 3},
},
NegativeBuckets: []float64{1, 1, 1},
},
},
{
scalar: 0,
histogram: originalHistogram,
expectedMul: histogram.FloatHistogram{
Schema: 0,
Count: 0,
Sum: 0,
ZeroThreshold: 0.001,
ZeroCount: 0,
PositiveSpans: []histogram.Span{
{Offset: 0, Length: 3},
},
PositiveBuckets: []float64{0, 0, 0},
NegativeSpans: []histogram.Span{
{Offset: 0, Length: 3},
},
NegativeBuckets: []float64{0, 0, 0},
},
expectedDiv: histogram.FloatHistogram{
Schema: 0,
Count: math.Inf(1),
Sum: math.Inf(1),
ZeroThreshold: 0.001,
ZeroCount: math.Inf(1),
PositiveSpans: []histogram.Span{
{Offset: 0, Length: 3},
},
PositiveBuckets: []float64{math.Inf(1), math.Inf(1), math.Inf(1)},
NegativeSpans: []histogram.Span{
{Offset: 0, Length: 3},
},
NegativeBuckets: []float64{math.Inf(1), math.Inf(1), math.Inf(1)},
},
},
}
idx0 := int64(0)
for _, c := range cases {
for _, floatHisto := range []bool{true, false} {
t.Run(fmt.Sprintf("floatHistogram=%t %d", floatHisto, idx0), func(t *testing.T) {
storage := teststorage.New(t)
t.Cleanup(func() { storage.Close() })
seriesName := "sparse_histogram_series"
floatSeriesName := "float_series"
engine := newTestEngine()
ts := idx0 * int64(10*time.Minute/time.Millisecond)
app := storage.Appender(context.Background())
h := c.histogram
lbls := labels.FromStrings("__name__", seriesName)
// Since we mutate h later, we need to create a copy here.
var err error
if floatHisto {
_, err = app.AppendHistogram(0, lbls, ts, nil, h.Copy().ToFloat(nil))
} else {
_, err = app.AppendHistogram(0, lbls, ts, h.Copy(), nil)
}
require.NoError(t, err)
_, err = app.Append(0, labels.FromStrings("__name__", floatSeriesName), ts, c.scalar)
require.NoError(t, err)
require.NoError(t, app.Commit())
queryAndCheck := func(queryString string, exp promql.Vector) {
qry, err := engine.NewInstantQuery(context.Background(), storage, nil, queryString, timestamp.Time(ts))
require.NoError(t, err)
res := qry.Exec(context.Background())
require.NoError(t, res.Err)
vector, err := res.Vector()
require.NoError(t, err)
testutil.RequireEqual(t, exp, vector)
}
// histogram * scalar.
queryString := fmt.Sprintf(`%s * %f`, seriesName, c.scalar)
queryAndCheck(queryString, []promql.Sample{{T: ts, H: &c.expectedMul, Metric: labels.EmptyLabels()}})
// scalar * histogram.
queryString = fmt.Sprintf(`%f * %s`, c.scalar, seriesName)
queryAndCheck(queryString, []promql.Sample{{T: ts, H: &c.expectedMul, Metric: labels.EmptyLabels()}})
// histogram * float.
queryString = fmt.Sprintf(`%s * %s`, seriesName, floatSeriesName)
queryAndCheck(queryString, []promql.Sample{{T: ts, H: &c.expectedMul, Metric: labels.EmptyLabels()}})
// float * histogram.
queryString = fmt.Sprintf(`%s * %s`, floatSeriesName, seriesName)
queryAndCheck(queryString, []promql.Sample{{T: ts, H: &c.expectedMul, Metric: labels.EmptyLabels()}})
// histogram / scalar.
queryString = fmt.Sprintf(`%s / %f`, seriesName, c.scalar)
queryAndCheck(queryString, []promql.Sample{{T: ts, H: &c.expectedDiv, Metric: labels.EmptyLabels()}})
// histogram / float.
queryString = fmt.Sprintf(`%s / %s`, seriesName, floatSeriesName)
queryAndCheck(queryString, []promql.Sample{{T: ts, H: &c.expectedDiv, Metric: labels.EmptyLabels()}})
})
idx0++
}
}
}
func TestQueryLookbackDelta(t *testing.T) { func TestQueryLookbackDelta(t *testing.T) {
var ( var (
load = `load 5m load = `load 5m
@ -3771,7 +3393,7 @@ metric 0 1 2
for _, c := range cases { for _, c := range cases {
c := c c := c
t.Run(c.name, func(t *testing.T) { t.Run(c.name, func(t *testing.T) {
engine := promqltest.NewTestEngine(false, c.engineLookback, promqltest.DefaultMaxSamplesPerQuery) engine := promqltest.NewTestEngine(t, false, c.engineLookback, promqltest.DefaultMaxSamplesPerQuery)
storage := promqltest.LoadedStorage(t, load) storage := promqltest.LoadedStorage(t, load)
t.Cleanup(func() { storage.Close() }) t.Cleanup(func() { storage.Close() })
@ -3797,3 +3419,62 @@ func makeInt64Pointer(val int64) *int64 {
*valp = val *valp = val
return valp return valp
} }
func TestHistogramCopyFromIteratorRegression(t *testing.T) {
// Loading the following histograms creates two chunks because there's a
// counter reset. Not only the counter is lower in the last histogram
// but also there's missing buckets.
// This in turns means that chunk iterators will have different spans.
load := `load 1m
histogram {{sum:4 count:4 buckets:[2 2]}} {{sum:6 count:6 buckets:[3 3]}} {{sum:1 count:1 buckets:[1]}}
`
storage := promqltest.LoadedStorage(t, load)
t.Cleanup(func() { storage.Close() })
engine := promqltest.NewTestEngine(t, false, 0, promqltest.DefaultMaxSamplesPerQuery)
verify := func(t *testing.T, qry promql.Query, expected []histogram.FloatHistogram) {
res := qry.Exec(context.Background())
require.NoError(t, res.Err)
m, ok := res.Value.(promql.Matrix)
require.True(t, ok)
require.Len(t, m, 1)
series := m[0]
require.Empty(t, series.Floats)
require.Len(t, series.Histograms, len(expected))
for i, e := range expected {
series.Histograms[i].H.CounterResetHint = histogram.UnknownCounterReset // Don't care.
require.Equal(t, &e, series.Histograms[i].H)
}
}
qry, err := engine.NewRangeQuery(context.Background(), storage, nil, "increase(histogram[60s])", time.Unix(0, 0), time.Unix(0, 0).Add(1*time.Minute), time.Minute)
require.NoError(t, err)
verify(t, qry, []histogram.FloatHistogram{
{
Count: 2,
Sum: 2, // Increase from 4 to 6 is 2.
PositiveSpans: []histogram.Span{{Offset: 0, Length: 2}}, // Two buckets changed between the first and second histogram.
PositiveBuckets: []float64{1, 1}, // Increase from 2 to 3 is 1 in both buckets.
},
})
qry, err = engine.NewInstantQuery(context.Background(), storage, nil, "histogram[60s]", time.Unix(0, 0).Add(2*time.Minute))
require.NoError(t, err)
verify(t, qry, []histogram.FloatHistogram{
{
Count: 6,
Sum: 6,
PositiveSpans: []histogram.Span{{Offset: 0, Length: 2}},
PositiveBuckets: []float64{3, 3},
},
{
Count: 1,
Sum: 1,
PositiveSpans: []histogram.Span{{Offset: 0, Length: 1}},
PositiveBuckets: []float64{1},
},
})
}

View file

@ -14,6 +14,7 @@
package promql package promql
import ( import (
"context"
"errors" "errors"
"fmt" "fmt"
"math" "math"
@ -97,9 +98,10 @@ func extrapolatedRate(vals []parser.Value, args parser.Expressions, enh *EvalNod
lastT = samples.Histograms[numSamplesMinusOne].T lastT = samples.Histograms[numSamplesMinusOne].T
var newAnnos annotations.Annotations var newAnnos annotations.Annotations
resultHistogram, newAnnos = histogramRate(samples.Histograms, isCounter, metricName, args[0].PositionRange()) resultHistogram, newAnnos = histogramRate(samples.Histograms, isCounter, metricName, args[0].PositionRange())
annos.Merge(newAnnos)
if resultHistogram == nil { if resultHistogram == nil {
// The histograms are not compatible with each other. // The histograms are not compatible with each other.
return enh.Out, annos.Merge(newAnnos) return enh.Out, annos
} }
case len(samples.Floats) > 1: case len(samples.Floats) > 1:
numSamplesMinusOne = len(samples.Floats) - 1 numSamplesMinusOne = len(samples.Floats) - 1
@ -178,17 +180,29 @@ func extrapolatedRate(vals []parser.Value, args parser.Expressions, enh *EvalNod
// Otherwise, it returns the calculated histogram and an empty annotation. // Otherwise, it returns the calculated histogram and an empty annotation.
func histogramRate(points []HPoint, isCounter bool, metricName string, pos posrange.PositionRange) (*histogram.FloatHistogram, annotations.Annotations) { func histogramRate(points []HPoint, isCounter bool, metricName string, pos posrange.PositionRange) (*histogram.FloatHistogram, annotations.Annotations) {
prev := points[0].H prev := points[0].H
usingCustomBuckets := prev.UsesCustomBuckets()
last := points[len(points)-1].H last := points[len(points)-1].H
if last == nil { if last == nil {
return nil, annotations.New().Add(annotations.NewMixedFloatsHistogramsWarning(metricName, pos)) return nil, annotations.New().Add(annotations.NewMixedFloatsHistogramsWarning(metricName, pos))
} }
minSchema := prev.Schema minSchema := prev.Schema
if last.Schema < minSchema { if last.Schema < minSchema {
minSchema = last.Schema minSchema = last.Schema
} }
if last.UsesCustomBuckets() != usingCustomBuckets {
return nil, annotations.New().Add(annotations.NewMixedExponentialCustomHistogramsWarning(metricName, pos))
}
var annos annotations.Annotations var annos annotations.Annotations
// We check for gauge type histograms in the loop below, but the loop below does not run on the first and last point,
// so check the first and last point now.
if isCounter && (prev.CounterResetHint == histogram.GaugeType || last.CounterResetHint == histogram.GaugeType) {
annos.Add(annotations.NewNativeHistogramNotCounterWarning(metricName, pos))
}
// First iteration to find out two things: // First iteration to find out two things:
// - What's the smallest relevant schema? // - What's the smallest relevant schema?
// - Are all data points histograms? // - Are all data points histograms?
@ -208,6 +222,9 @@ func histogramRate(points []HPoint, isCounter bool, metricName string, pos posra
if curr.Schema < minSchema { if curr.Schema < minSchema {
minSchema = curr.Schema minSchema = curr.Schema
} }
if curr.UsesCustomBuckets() != usingCustomBuckets {
return nil, annotations.New().Add(annotations.NewMixedExponentialCustomHistogramsWarning(metricName, pos))
}
} }
h := last.CopyToSchema(minSchema) h := last.CopyToSchema(minSchema)
@ -241,7 +258,7 @@ func histogramRate(points []HPoint, isCounter bool, metricName string, pos posra
} }
h.CounterResetHint = histogram.GaugeType h.CounterResetHint = histogram.GaugeType
return h.Compact(0), nil return h.Compact(0), annos
} }
// === delta(Matrix parser.ValueTypeMatrix) (Vector, Annotations) === // === delta(Matrix parser.ValueTypeMatrix) (Vector, Annotations) ===
@ -390,17 +407,22 @@ func funcSortDesc(vals []parser.Value, args parser.Expressions, enh *EvalNodeHel
// === sort_by_label(vector parser.ValueTypeVector, label parser.ValueTypeString...) (Vector, Annotations) === // === sort_by_label(vector parser.ValueTypeVector, label parser.ValueTypeString...) (Vector, Annotations) ===
func funcSortByLabel(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { func funcSortByLabel(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
// In case the labels are the same, NaN should sort to the bottom, so take // First, sort by the full label set. This ensures a consistent ordering in case sorting by the
// ascending sort with NaN first and reverse it. // labels provided as arguments is not conclusive.
var anno annotations.Annotations
vals[0], anno = funcSort(vals, args, enh)
labels := stringSliceFromArgs(args[1:])
slices.SortFunc(vals[0].(Vector), func(a, b Sample) int { slices.SortFunc(vals[0].(Vector), func(a, b Sample) int {
// Iterate over each given label return labels.Compare(a.Metric, b.Metric)
})
labels := stringSliceFromArgs(args[1:])
// Next, sort by the labels provided as arguments.
slices.SortFunc(vals[0].(Vector), func(a, b Sample) int {
// Iterate over each given label.
for _, label := range labels { for _, label := range labels {
lv1 := a.Metric.Get(label) lv1 := a.Metric.Get(label)
lv2 := b.Metric.Get(label) lv2 := b.Metric.Get(label)
// If we encounter multiple samples with the same label values, the sorting which was
// performed in the first step will act as a "tie breaker".
if lv1 == lv2 { if lv1 == lv2 {
continue continue
} }
@ -415,22 +437,27 @@ func funcSortByLabel(vals []parser.Value, args parser.Expressions, enh *EvalNode
return 0 return 0
}) })
return vals[0].(Vector), anno return vals[0].(Vector), nil
} }
// === sort_by_label_desc(vector parser.ValueTypeVector, label parser.ValueTypeString...) (Vector, Annotations) === // === sort_by_label_desc(vector parser.ValueTypeVector, label parser.ValueTypeString...) (Vector, Annotations) ===
func funcSortByLabelDesc(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { func funcSortByLabelDesc(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
// In case the labels are the same, NaN should sort to the bottom, so take // First, sort by the full label set. This ensures a consistent ordering in case sorting by the
// ascending sort with NaN first and reverse it. // labels provided as arguments is not conclusive.
var anno annotations.Annotations
vals[0], anno = funcSortDesc(vals, args, enh)
labels := stringSliceFromArgs(args[1:])
slices.SortFunc(vals[0].(Vector), func(a, b Sample) int { slices.SortFunc(vals[0].(Vector), func(a, b Sample) int {
// Iterate over each given label return labels.Compare(b.Metric, a.Metric)
})
labels := stringSliceFromArgs(args[1:])
// Next, sort by the labels provided as arguments.
slices.SortFunc(vals[0].(Vector), func(a, b Sample) int {
// Iterate over each given label.
for _, label := range labels { for _, label := range labels {
lv1 := a.Metric.Get(label) lv1 := a.Metric.Get(label)
lv2 := b.Metric.Get(label) lv2 := b.Metric.Get(label)
// If we encounter multiple samples with the same label values, the sorting which was
// performed in the first step will act as a "tie breaker".
if lv1 == lv2 { if lv1 == lv2 {
continue continue
} }
@ -445,21 +472,25 @@ func funcSortByLabelDesc(vals []parser.Value, args parser.Expressions, enh *Eval
return 0 return 0
}) })
return vals[0].(Vector), anno return vals[0].(Vector), nil
} }
// === clamp(Vector parser.ValueTypeVector, min, max Scalar) (Vector, Annotations) === // === clamp(Vector parser.ValueTypeVector, min, max Scalar) (Vector, Annotations) ===
func funcClamp(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { func funcClamp(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
vec := vals[0].(Vector) vec := vals[0].(Vector)
min := vals[1].(Vector)[0].F minVal := vals[1].(Vector)[0].F
max := vals[2].(Vector)[0].F maxVal := vals[2].(Vector)[0].F
if max < min { if maxVal < minVal {
return enh.Out, nil return enh.Out, nil
} }
for _, el := range vec { for _, el := range vec {
if !enh.enableDelayedNameRemoval {
el.Metric = el.Metric.DropMetricName()
}
enh.Out = append(enh.Out, Sample{ enh.Out = append(enh.Out, Sample{
Metric: el.Metric.DropMetricName(), Metric: el.Metric,
F: math.Max(min, math.Min(max, el.F)), F: math.Max(minVal, math.Min(maxVal, el.F)),
DropName: true,
}) })
} }
return enh.Out, nil return enh.Out, nil
@ -468,11 +499,15 @@ func funcClamp(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper
// === clamp_max(Vector parser.ValueTypeVector, max Scalar) (Vector, Annotations) === // === clamp_max(Vector parser.ValueTypeVector, max Scalar) (Vector, Annotations) ===
func funcClampMax(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { func funcClampMax(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
vec := vals[0].(Vector) vec := vals[0].(Vector)
max := vals[1].(Vector)[0].F maxVal := vals[1].(Vector)[0].F
for _, el := range vec { for _, el := range vec {
if !enh.enableDelayedNameRemoval {
el.Metric = el.Metric.DropMetricName()
}
enh.Out = append(enh.Out, Sample{ enh.Out = append(enh.Out, Sample{
Metric: el.Metric.DropMetricName(), Metric: el.Metric,
F: math.Min(max, el.F), F: math.Min(maxVal, el.F),
DropName: true,
}) })
} }
return enh.Out, nil return enh.Out, nil
@ -481,11 +516,15 @@ func funcClampMax(vals []parser.Value, args parser.Expressions, enh *EvalNodeHel
// === clamp_min(Vector parser.ValueTypeVector, min Scalar) (Vector, Annotations) === // === clamp_min(Vector parser.ValueTypeVector, min Scalar) (Vector, Annotations) ===
func funcClampMin(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { func funcClampMin(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
vec := vals[0].(Vector) vec := vals[0].(Vector)
min := vals[1].(Vector)[0].F minVal := vals[1].(Vector)[0].F
for _, el := range vec { for _, el := range vec {
if !enh.enableDelayedNameRemoval {
el.Metric = el.Metric.DropMetricName()
}
enh.Out = append(enh.Out, Sample{ enh.Out = append(enh.Out, Sample{
Metric: el.Metric.DropMetricName(), Metric: el.Metric,
F: math.Max(min, el.F), F: math.Max(minVal, el.F),
DropName: true,
}) })
} }
return enh.Out, nil return enh.Out, nil
@ -506,8 +545,9 @@ func funcRound(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper
for _, el := range vec { for _, el := range vec {
f := math.Floor(el.F*toNearestInverse+0.5) / toNearestInverse f := math.Floor(el.F*toNearestInverse+0.5) / toNearestInverse
enh.Out = append(enh.Out, Sample{ enh.Out = append(enh.Out, Sample{
Metric: el.Metric.DropMetricName(), Metric: el.Metric,
F: f, F: f,
DropName: true,
}) })
} }
return enh.Out, nil return enh.Out, nil
@ -573,9 +613,28 @@ func funcAvgOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNode
return vec, nil return vec, nil
} }
return aggrOverTime(vals, enh, func(s Series) float64 { return aggrOverTime(vals, enh, func(s Series) float64 {
var mean, count, c float64 var (
sum, mean, count, kahanC float64
incrementalMean bool
)
for _, f := range s.Floats { for _, f := range s.Floats {
count++ count++
if !incrementalMean {
newSum, newC := kahanSumInc(f.F, sum, kahanC)
// Perform regular mean calculation as long as
// the sum doesn't overflow and (in any case)
// for the first iteration (even if we start
// with ±Inf) to not run into division-by-zero
// problems below.
if count == 1 || !math.IsInf(newSum, 0) {
sum, kahanC = newSum, newC
continue
}
// Handle overflow by reverting to incremental calculation of the mean value.
incrementalMean = true
mean = sum / (count - 1)
kahanC /= count - 1
}
if math.IsInf(mean, 0) { if math.IsInf(mean, 0) {
if math.IsInf(f.F, 0) && (mean > 0) == (f.F > 0) { if math.IsInf(f.F, 0) && (mean > 0) == (f.F > 0) {
// The `mean` and `f.F` values are `Inf` of the same sign. They // The `mean` and `f.F` values are `Inf` of the same sign. They
@ -593,13 +652,13 @@ func funcAvgOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNode
continue continue
} }
} }
mean, c = kahanSumInc(f.F/count-mean/count, mean, c) correctedMean := mean + kahanC
mean, kahanC = kahanSumInc(f.F/count-correctedMean/count, mean, kahanC)
} }
if incrementalMean {
if math.IsInf(mean, 0) { return mean + kahanC
return mean
} }
return mean + c return (sum + kahanC) / count
}), nil }), nil
} }
@ -665,13 +724,13 @@ func funcMaxOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNode
return enh.Out, nil return enh.Out, nil
} }
return aggrOverTime(vals, enh, func(s Series) float64 { return aggrOverTime(vals, enh, func(s Series) float64 {
max := s.Floats[0].F maxVal := s.Floats[0].F
for _, f := range s.Floats { for _, f := range s.Floats {
if f.F > max || math.IsNaN(max) { if f.F > maxVal || math.IsNaN(maxVal) {
max = f.F maxVal = f.F
} }
} }
return max return maxVal
}), nil }), nil
} }
@ -685,13 +744,13 @@ func funcMinOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNode
return enh.Out, nil return enh.Out, nil
} }
return aggrOverTime(vals, enh, func(s Series) float64 { return aggrOverTime(vals, enh, func(s Series) float64 {
min := s.Floats[0].F minVal := s.Floats[0].F
for _, f := range s.Floats { for _, f := range s.Floats {
if f.F < min || math.IsNaN(min) { if f.F < minVal || math.IsNaN(minVal) {
min = f.F minVal = f.F
} }
} }
return min return minVal
}), nil }), nil
} }
@ -837,9 +896,13 @@ func funcPresentOverTime(vals []parser.Value, args parser.Expressions, enh *Eval
func simpleFunc(vals []parser.Value, enh *EvalNodeHelper, f func(float64) float64) Vector { func simpleFunc(vals []parser.Value, enh *EvalNodeHelper, f func(float64) float64) Vector {
for _, el := range vals[0].(Vector) { for _, el := range vals[0].(Vector) {
if el.H == nil { // Process only float samples. if el.H == nil { // Process only float samples.
if !enh.enableDelayedNameRemoval {
el.Metric = el.Metric.DropMetricName()
}
enh.Out = append(enh.Out, Sample{ enh.Out = append(enh.Out, Sample{
Metric: el.Metric.DropMetricName(), Metric: el.Metric,
F: f(el.F), F: f(el.F),
DropName: true,
}) })
} }
} }
@ -983,9 +1046,13 @@ func funcSgn(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper)
func funcTimestamp(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { func funcTimestamp(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
vec := vals[0].(Vector) vec := vals[0].(Vector)
for _, el := range vec { for _, el := range vec {
if !enh.enableDelayedNameRemoval {
el.Metric = el.Metric.DropMetricName()
}
enh.Out = append(enh.Out, Sample{ enh.Out = append(enh.Out, Sample{
Metric: el.Metric.DropMetricName(), Metric: el.Metric,
F: float64(el.T) / 1000, F: float64(el.T) / 1000,
DropName: true,
}) })
} }
return enh.Out, nil return enh.Out, nil
@ -1092,9 +1159,13 @@ func funcHistogramCount(vals []parser.Value, args parser.Expressions, enh *EvalN
if sample.H == nil { if sample.H == nil {
continue continue
} }
if !enh.enableDelayedNameRemoval {
sample.Metric = sample.Metric.DropMetricName()
}
enh.Out = append(enh.Out, Sample{ enh.Out = append(enh.Out, Sample{
Metric: sample.Metric.DropMetricName(), Metric: sample.Metric,
F: sample.H.Count, F: sample.H.Count,
DropName: true,
}) })
} }
return enh.Out, nil return enh.Out, nil
@ -1109,9 +1180,13 @@ func funcHistogramSum(vals []parser.Value, args parser.Expressions, enh *EvalNod
if sample.H == nil { if sample.H == nil {
continue continue
} }
if !enh.enableDelayedNameRemoval {
sample.Metric = sample.Metric.DropMetricName()
}
enh.Out = append(enh.Out, Sample{ enh.Out = append(enh.Out, Sample{
Metric: sample.Metric.DropMetricName(), Metric: sample.Metric,
F: sample.H.Sum, F: sample.H.Sum,
DropName: true,
}) })
} }
return enh.Out, nil return enh.Out, nil
@ -1126,9 +1201,13 @@ func funcHistogramAvg(vals []parser.Value, args parser.Expressions, enh *EvalNod
if sample.H == nil { if sample.H == nil {
continue continue
} }
if !enh.enableDelayedNameRemoval {
sample.Metric = sample.Metric.DropMetricName()
}
enh.Out = append(enh.Out, Sample{ enh.Out = append(enh.Out, Sample{
Metric: sample.Metric.DropMetricName(), Metric: sample.Metric,
F: sample.H.Sum / sample.H.Count, F: sample.H.Sum / sample.H.Count,
DropName: true,
}) })
} }
return enh.Out, nil return enh.Out, nil
@ -1165,9 +1244,13 @@ func funcHistogramStdDev(vals []parser.Value, args parser.Expressions, enh *Eval
} }
variance += cVariance variance += cVariance
variance /= sample.H.Count variance /= sample.H.Count
if !enh.enableDelayedNameRemoval {
sample.Metric = sample.Metric.DropMetricName()
}
enh.Out = append(enh.Out, Sample{ enh.Out = append(enh.Out, Sample{
Metric: sample.Metric.DropMetricName(), Metric: sample.Metric,
F: math.Sqrt(variance), F: math.Sqrt(variance),
DropName: true,
}) })
} }
return enh.Out, nil return enh.Out, nil
@ -1204,9 +1287,13 @@ func funcHistogramStdVar(vals []parser.Value, args parser.Expressions, enh *Eval
} }
variance += cVariance variance += cVariance
variance /= sample.H.Count variance /= sample.H.Count
if !enh.enableDelayedNameRemoval {
sample.Metric = sample.Metric.DropMetricName()
}
enh.Out = append(enh.Out, Sample{ enh.Out = append(enh.Out, Sample{
Metric: sample.Metric.DropMetricName(), Metric: sample.Metric,
F: variance, F: variance,
DropName: true,
}) })
} }
return enh.Out, nil return enh.Out, nil
@ -1223,9 +1310,13 @@ func funcHistogramFraction(vals []parser.Value, args parser.Expressions, enh *Ev
if sample.H == nil { if sample.H == nil {
continue continue
} }
if !enh.enableDelayedNameRemoval {
sample.Metric = sample.Metric.DropMetricName()
}
enh.Out = append(enh.Out, Sample{ enh.Out = append(enh.Out, Sample{
Metric: sample.Metric.DropMetricName(), Metric: sample.Metric,
F: histogramFraction(lower, upper, sample.H), F: histogramFraction(lower, upper, sample.H),
DropName: true,
}) })
} }
return enh.Out, nil return enh.Out, nil
@ -1293,9 +1384,13 @@ func funcHistogramQuantile(vals []parser.Value, args parser.Expressions, enh *Ev
continue continue
} }
if !enh.enableDelayedNameRemoval {
sample.Metric = sample.Metric.DropMetricName()
}
enh.Out = append(enh.Out, Sample{ enh.Out = append(enh.Out, Sample{
Metric: sample.Metric.DropMetricName(), Metric: sample.Metric,
F: histogramQuantile(q, sample.H), F: histogramQuantile(q, sample.H),
DropName: true,
}) })
} }
@ -1369,7 +1464,7 @@ func funcChanges(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelp
} }
// label_replace function operates only on series; does not look at timestamps or values. // label_replace function operates only on series; does not look at timestamps or values.
func (ev *evaluator) evalLabelReplace(args parser.Expressions) (parser.Value, annotations.Annotations) { func (ev *evaluator) evalLabelReplace(ctx context.Context, args parser.Expressions) (parser.Value, annotations.Annotations) {
var ( var (
dst = stringFromArg(args[1]) dst = stringFromArg(args[1])
repl = stringFromArg(args[2]) repl = stringFromArg(args[2])
@ -1385,7 +1480,7 @@ func (ev *evaluator) evalLabelReplace(args parser.Expressions) (parser.Value, an
panic(fmt.Errorf("invalid destination label name in label_replace(): %s", dst)) panic(fmt.Errorf("invalid destination label name in label_replace(): %s", dst))
} }
val, ws := ev.eval(args[0]) val, ws := ev.eval(ctx, args[0])
matrix := val.(Matrix) matrix := val.(Matrix)
lb := labels.NewBuilder(labels.EmptyLabels()) lb := labels.NewBuilder(labels.EmptyLabels())
@ -1397,6 +1492,11 @@ func (ev *evaluator) evalLabelReplace(args parser.Expressions) (parser.Value, an
lb.Reset(el.Metric) lb.Reset(el.Metric)
lb.Set(dst, string(res)) lb.Set(dst, string(res))
matrix[i].Metric = lb.Labels() matrix[i].Metric = lb.Labels()
if dst == model.MetricNameLabel {
matrix[i].DropName = false
} else {
matrix[i].DropName = el.DropName
}
} }
} }
if matrix.ContainsSameLabelset() { if matrix.ContainsSameLabelset() {
@ -1421,7 +1521,7 @@ func funcVector(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelpe
} }
// label_join function operates only on series; does not look at timestamps or values. // label_join function operates only on series; does not look at timestamps or values.
func (ev *evaluator) evalLabelJoin(args parser.Expressions) (parser.Value, annotations.Annotations) { func (ev *evaluator) evalLabelJoin(ctx context.Context, args parser.Expressions) (parser.Value, annotations.Annotations) {
var ( var (
dst = stringFromArg(args[1]) dst = stringFromArg(args[1])
sep = stringFromArg(args[2]) sep = stringFromArg(args[2])
@ -1438,7 +1538,7 @@ func (ev *evaluator) evalLabelJoin(args parser.Expressions) (parser.Value, annot
panic(fmt.Errorf("invalid destination label name in label_join(): %s", dst)) panic(fmt.Errorf("invalid destination label name in label_join(): %s", dst))
} }
val, ws := ev.eval(args[0]) val, ws := ev.eval(ctx, args[0])
matrix := val.(Matrix) matrix := val.(Matrix)
srcVals := make([]string, len(srcLabels)) srcVals := make([]string, len(srcLabels))
lb := labels.NewBuilder(labels.EmptyLabels()) lb := labels.NewBuilder(labels.EmptyLabels())
@ -1451,6 +1551,12 @@ func (ev *evaluator) evalLabelJoin(args parser.Expressions) (parser.Value, annot
lb.Reset(el.Metric) lb.Reset(el.Metric)
lb.Set(dst, strval) lb.Set(dst, strval)
matrix[i].Metric = lb.Labels() matrix[i].Metric = lb.Labels()
if dst == model.MetricNameLabel {
matrix[i].DropName = false
} else {
matrix[i].DropName = el.DropName
}
} }
return matrix, ws return matrix, ws
@ -1473,9 +1579,13 @@ func dateWrapper(vals []parser.Value, enh *EvalNodeHelper, f func(time.Time) flo
for _, el := range vals[0].(Vector) { for _, el := range vals[0].(Vector) {
t := time.Unix(int64(el.F), 0).UTC() t := time.Unix(int64(el.F), 0).UTC()
if !enh.enableDelayedNameRemoval {
el.Metric = el.Metric.DropMetricName()
}
enh.Out = append(enh.Out, Sample{ enh.Out = append(enh.Out, Sample{
Metric: el.Metric.DropMetricName(), Metric: el.Metric,
F: f(t), F: f(t),
DropName: true,
}) })
} }
return enh.Out return enh.Out

View file

@ -24,6 +24,7 @@ import (
"github.com/prometheus/prometheus/model/timestamp" "github.com/prometheus/prometheus/model/timestamp"
"github.com/prometheus/prometheus/promql" "github.com/prometheus/prometheus/promql"
"github.com/prometheus/prometheus/promql/parser" "github.com/prometheus/prometheus/promql/parser"
"github.com/prometheus/prometheus/promql/promqltest"
"github.com/prometheus/prometheus/util/teststorage" "github.com/prometheus/prometheus/util/teststorage"
) )
@ -39,7 +40,7 @@ func TestDeriv(t *testing.T) {
MaxSamples: 10000, MaxSamples: 10000,
Timeout: 10 * time.Second, Timeout: 10 * time.Second,
} }
engine := promql.NewEngine(opts) engine := promqltest.NewTestEngineWithOpts(t, opts)
a := storage.Appender(context.Background()) a := storage.Appender(context.Background())

View file

@ -68,6 +68,10 @@ func fuzzParseMetricWithContentType(in []byte, contentType string) int {
panic(warning) panic(warning)
} }
if contentType == "application/openmetrics-text" {
p = textparse.NewOpenMetricsParser(in, symbolTable)
}
var err error var err error
for { for {
_, err = p.Next() _, err = p.Next()

View file

@ -352,8 +352,7 @@ func (f inspector) Visit(node Node, path []Node) (Visitor, error) {
// f(node, path); node must not be nil. If f returns a nil error, Inspect invokes f // f(node, path); node must not be nil. If f returns a nil error, Inspect invokes f
// for all the non-nil children of node, recursively. // for all the non-nil children of node, recursively.
func Inspect(node Node, f inspector) { func Inspect(node Node, f inspector) {
//nolint: errcheck Walk(f, node, nil) //nolint:errcheck
Walk(f, node, nil)
} }
// Children returns a list of all child nodes of a syntax tree node. // Children returns a list of all child nodes of a syntax tree node.
@ -419,7 +418,7 @@ func mergeRanges(first, last Node) posrange.PositionRange {
} }
} }
// Item implements the Node interface. // PositionRange implements the Node interface.
// This makes it possible to call mergeRanges on them. // This makes it possible to call mergeRanges on them.
func (i *Item) PositionRange() posrange.PositionRange { func (i *Item) PositionRange() posrange.PositionRange {
return posrange.PositionRange{ return posrange.PositionRange{

View file

@ -23,6 +23,8 @@ import (
"github.com/prometheus/prometheus/model/value" "github.com/prometheus/prometheus/model/value"
"github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/model/histogram"
"github.com/prometheus/prometheus/promql/parser/posrange" "github.com/prometheus/prometheus/promql/parser/posrange"
"github.com/prometheus/common/model"
) )
%} %}
@ -360,11 +362,19 @@ grouping_label_list:
grouping_label : maybe_label grouping_label : maybe_label
{ {
if !isLabel($1.Val) { if !model.LabelName($1.Val).IsValid() {
yylex.(*parser).unexpected("grouping opts", "label") yylex.(*parser).unexpected("grouping opts", "label")
} }
$$ = $1 $$ = $1
} }
| STRING {
if !model.LabelName(yylex.(*parser).unquoteString($1.Val)).IsValid() {
yylex.(*parser).unexpected("grouping opts", "label")
}
$$ = $1
$$.Pos++
$$.Val = yylex.(*parser).unquoteString($$.Val)
}
| error | error
{ yylex.(*parser).unexpected("grouping opts", "label"); $$ = Item{} } { yylex.(*parser).unexpected("grouping opts", "label"); $$ = Item{} }
; ;

File diff suppressed because it is too large Load diff

View file

@ -617,6 +617,16 @@ func lexBuckets(l *Lexer) stateFn {
l.bracketOpen = false l.bracketOpen = false
l.emit(RIGHT_BRACKET) l.emit(RIGHT_BRACKET)
return lexHistogram return lexHistogram
case isAlpha(r):
// Current word is Inf or NaN.
word := l.input[l.start:l.pos]
if desc, ok := key[strings.ToLower(word)]; ok {
if desc == NUMBER {
l.emit(desc)
return lexStatements
}
}
return lexBuckets
default: default:
return l.errorf("invalid character in buckets description: %q", r) return l.errorf("invalid character in buckets description: %q", r)
} }
@ -727,23 +737,23 @@ func lexValueSequence(l *Lexer) stateFn {
// was only modified to integrate with our lexer. // was only modified to integrate with our lexer.
func lexEscape(l *Lexer) stateFn { func lexEscape(l *Lexer) stateFn {
var n int var n int
var base, max uint32 var base, maxVal uint32
ch := l.next() ch := l.next()
switch ch { switch ch {
case 'a', 'b', 'f', 'n', 'r', 't', 'v', '\\', l.stringOpen: case 'a', 'b', 'f', 'n', 'r', 't', 'v', '\\', l.stringOpen:
return lexString return lexString
case '0', '1', '2', '3', '4', '5', '6', '7': case '0', '1', '2', '3', '4', '5', '6', '7':
n, base, max = 3, 8, 255 n, base, maxVal = 3, 8, 255
case 'x': case 'x':
ch = l.next() ch = l.next()
n, base, max = 2, 16, 255 n, base, maxVal = 2, 16, 255
case 'u': case 'u':
ch = l.next() ch = l.next()
n, base, max = 4, 16, unicode.MaxRune n, base, maxVal = 4, 16, unicode.MaxRune
case 'U': case 'U':
ch = l.next() ch = l.next()
n, base, max = 8, 16, unicode.MaxRune n, base, maxVal = 8, 16, unicode.MaxRune
case eof: case eof:
l.errorf("escape sequence not terminated") l.errorf("escape sequence not terminated")
return lexString return lexString
@ -772,7 +782,7 @@ func lexEscape(l *Lexer) stateFn {
} }
} }
if x > max || 0xD800 <= x && x < 0xE000 { if x > maxVal || 0xD800 <= x && x < 0xE000 {
l.errorf("escape sequence is an invalid Unicode code point") l.errorf("escape sequence is an invalid Unicode code point")
} }
return lexString return lexString
@ -1059,16 +1069,3 @@ func isDigit(r rune) bool {
func isAlpha(r rune) bool { func isAlpha(r rune) bool {
return r == '_' || ('a' <= r && r <= 'z') || ('A' <= r && r <= 'Z') return r == '_' || ('a' <= r && r <= 'z') || ('A' <= r && r <= 'Z')
} }
// isLabel reports whether the string can be used as label.
func isLabel(s string) bool {
if len(s) == 0 || !isAlpha(rune(s[0])) {
return false
}
for _, c := range s[1:] {
if !isAlphaNumeric(c) {
return false
}
}
return true
}

View file

@ -639,6 +639,29 @@ var tests = []struct {
}, },
seriesDesc: true, seriesDesc: true,
}, },
{
input: `{} {{buckets: [Inf NaN] schema:1}}`,
expected: []Item{
{LEFT_BRACE, 0, `{`},
{RIGHT_BRACE, 1, `}`},
{SPACE, 2, ` `},
{OPEN_HIST, 3, `{{`},
{BUCKETS_DESC, 5, `buckets`},
{COLON, 12, `:`},
{SPACE, 13, ` `},
{LEFT_BRACKET, 14, `[`},
{NUMBER, 15, `Inf`},
{SPACE, 18, ` `},
{NUMBER, 19, `NaN`},
{RIGHT_BRACKET, 22, `]`},
{SPACE, 23, ` `},
{SCHEMA_DESC, 24, `schema`},
{COLON, 30, `:`},
{NUMBER, 31, `1`},
{CLOSE_HIST, 32, `}}`},
},
seriesDesc: true,
},
{ // Series with sum as -Inf and count as NaN. { // Series with sum as -Inf and count as NaN.
input: `{} {{buckets: [5 10 7] sum:Inf count:NaN}}`, input: `{} {{buckets: [5 10 7] sum:Inf count:NaN}}`,
expected: []Item{ expected: []Item{

View file

@ -2397,6 +2397,51 @@ var testExpr = []struct {
}, },
}, },
}, },
{
input: `sum by ("foo")({"some.metric"})`,
expected: &AggregateExpr{
Op: SUM,
Expr: &VectorSelector{
LabelMatchers: []*labels.Matcher{
MustLabelMatcher(labels.MatchEqual, model.MetricNameLabel, "some.metric"),
},
PosRange: posrange.PositionRange{
Start: 15,
End: 30,
},
},
Grouping: []string{"foo"},
PosRange: posrange.PositionRange{
Start: 0,
End: 31,
},
},
},
{
input: `sum by ("foo)(some_metric{})`,
fail: true,
errMsg: "unterminated quoted string",
},
{
input: `sum by ("foo", bar, 'baz')({"some.metric"})`,
expected: &AggregateExpr{
Op: SUM,
Expr: &VectorSelector{
LabelMatchers: []*labels.Matcher{
MustLabelMatcher(labels.MatchEqual, model.MetricNameLabel, "some.metric"),
},
PosRange: posrange.PositionRange{
Start: 27,
End: 42,
},
},
Grouping: []string{"foo", "bar", "baz"},
PosRange: posrange.PositionRange{
Start: 0,
End: 43,
},
},
},
{ {
input: "avg by (foo)(some_metric)", input: "avg by (foo)(some_metric)",
expected: &AggregateExpr{ expected: &AggregateExpr{
@ -3844,6 +3889,7 @@ func readable(s string) string {
} }
func TestParseExpressions(t *testing.T) { func TestParseExpressions(t *testing.T) {
model.NameValidationScheme = model.UTF8Validation
for _, test := range testExpr { for _, test := range testExpr {
t.Run(readable(test.input), func(t *testing.T) { t.Run(readable(test.input), func(t *testing.T) {
expr, err := ParseExpr(test.input) expr, err := ParseExpr(test.input)

View file

@ -72,27 +72,48 @@ func (node *AggregateExpr) String() string {
return aggrString return aggrString
} }
func (node *AggregateExpr) ShortString() string {
aggrString := node.getAggOpStr()
return aggrString
}
func (node *AggregateExpr) getAggOpStr() string { func (node *AggregateExpr) getAggOpStr() string {
aggrString := node.Op.String() aggrString := node.Op.String()
switch { switch {
case node.Without: case node.Without:
aggrString += fmt.Sprintf(" without (%s) ", strings.Join(node.Grouping, ", ")) aggrString += fmt.Sprintf(" without (%s) ", joinLabels(node.Grouping))
case len(node.Grouping) > 0: case len(node.Grouping) > 0:
aggrString += fmt.Sprintf(" by (%s) ", strings.Join(node.Grouping, ", ")) aggrString += fmt.Sprintf(" by (%s) ", joinLabels(node.Grouping))
} }
return aggrString return aggrString
} }
func (node *BinaryExpr) String() string { func joinLabels(ss []string) string {
returnBool := "" for i, s := range ss {
if node.ReturnBool { // If the label is already quoted, don't quote it again.
returnBool = " bool" if s[0] != '"' && s[0] != '\'' && s[0] != '`' && !model.IsValidLegacyMetricName(string(model.LabelValue(s))) {
ss[i] = fmt.Sprintf("\"%s\"", s)
} }
}
return strings.Join(ss, ", ")
}
func (node *BinaryExpr) returnBool() string {
if node.ReturnBool {
return " bool"
}
return ""
}
func (node *BinaryExpr) String() string {
matching := node.getMatchingStr() matching := node.getMatchingStr()
return fmt.Sprintf("%s %s%s%s %s", node.LHS, node.Op, returnBool, matching, node.RHS) return fmt.Sprintf("%s %s%s%s %s", node.LHS, node.Op, node.returnBool(), matching, node.RHS)
}
func (node *BinaryExpr) ShortString() string {
return fmt.Sprintf("%s%s%s", node.Op, node.returnBool(), node.getMatchingStr())
} }
func (node *BinaryExpr) getMatchingStr() string { func (node *BinaryExpr) getMatchingStr() string {
@ -120,9 +141,13 @@ func (node *Call) String() string {
return fmt.Sprintf("%s(%s)", node.Func.Name, node.Args) return fmt.Sprintf("%s(%s)", node.Func.Name, node.Args)
} }
func (node *MatrixSelector) String() string { func (node *Call) ShortString() string {
return node.Func.Name
}
func (node *MatrixSelector) atOffset() (string, string) {
// Copy the Vector selector before changing the offset // Copy the Vector selector before changing the offset
vecSelector := *node.VectorSelector.(*VectorSelector) vecSelector := node.VectorSelector.(*VectorSelector)
offset := "" offset := ""
switch { switch {
case vecSelector.OriginalOffset > time.Duration(0): case vecSelector.OriginalOffset > time.Duration(0):
@ -139,7 +164,13 @@ func (node *MatrixSelector) String() string {
case vecSelector.StartOrEnd == END: case vecSelector.StartOrEnd == END:
at = " @ end()" at = " @ end()"
} }
return at, offset
}
func (node *MatrixSelector) String() string {
at, offset := node.atOffset()
// Copy the Vector selector before changing the offset
vecSelector := *node.VectorSelector.(*VectorSelector)
// Do not print the @ and offset twice. // Do not print the @ and offset twice.
offsetVal, atVal, preproc := vecSelector.OriginalOffset, vecSelector.Timestamp, vecSelector.StartOrEnd offsetVal, atVal, preproc := vecSelector.OriginalOffset, vecSelector.Timestamp, vecSelector.StartOrEnd
vecSelector.OriginalOffset = 0 vecSelector.OriginalOffset = 0
@ -153,10 +184,19 @@ func (node *MatrixSelector) String() string {
return str return str
} }
func (node *MatrixSelector) ShortString() string {
at, offset := node.atOffset()
return fmt.Sprintf("[%s]%s%s", model.Duration(node.Range), at, offset)
}
func (node *SubqueryExpr) String() string { func (node *SubqueryExpr) String() string {
return fmt.Sprintf("%s%s", node.Expr.String(), node.getSubqueryTimeSuffix()) return fmt.Sprintf("%s%s", node.Expr.String(), node.getSubqueryTimeSuffix())
} }
func (node *SubqueryExpr) ShortString() string {
return node.getSubqueryTimeSuffix()
}
// getSubqueryTimeSuffix returns the '[<range>:<step>] @ <timestamp> offset <offset>' suffix of the subquery. // getSubqueryTimeSuffix returns the '[<range>:<step>] @ <timestamp> offset <offset>' suffix of the subquery.
func (node *SubqueryExpr) getSubqueryTimeSuffix() string { func (node *SubqueryExpr) getSubqueryTimeSuffix() string {
step := "" step := ""
@ -198,6 +238,10 @@ func (node *UnaryExpr) String() string {
return fmt.Sprintf("%s%s", node.Op, node.Expr) return fmt.Sprintf("%s%s", node.Op, node.Expr)
} }
func (node *UnaryExpr) ShortString() string {
return node.Op.String()
}
func (node *VectorSelector) String() string { func (node *VectorSelector) String() string {
var labelStrings []string var labelStrings []string
if len(node.LabelMatchers) > 1 { if len(node.LabelMatchers) > 1 {

View file

@ -16,6 +16,7 @@ package parser
import ( import (
"testing" "testing"
"github.com/prometheus/common/model"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
"github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/labels"
@ -44,6 +45,14 @@ func TestExprString(t *testing.T) {
in: `sum without(instance) (task:errors:rate10s{job="s"})`, in: `sum without(instance) (task:errors:rate10s{job="s"})`,
out: `sum without (instance) (task:errors:rate10s{job="s"})`, out: `sum without (instance) (task:errors:rate10s{job="s"})`,
}, },
{
in: `sum by("foo.bar") (task:errors:rate10s{job="s"})`,
out: `sum by ("foo.bar") (task:errors:rate10s{job="s"})`,
},
{
in: `sum without("foo.bar") (task:errors:rate10s{job="s"})`,
out: `sum without ("foo.bar") (task:errors:rate10s{job="s"})`,
},
{ {
in: `topk(5, task:errors:rate10s{job="s"})`, in: `topk(5, task:errors:rate10s{job="s"})`,
}, },
@ -157,6 +166,8 @@ func TestExprString(t *testing.T) {
}, },
} }
model.NameValidationScheme = model.UTF8Validation
for _, test := range inputs { for _, test := range inputs {
expr, err := ParseExpr(test.in) expr, err := ParseExpr(test.in)
require.NoError(t, err) require.NoError(t, err)

View file

@ -28,12 +28,12 @@ import (
"github.com/prometheus/prometheus/util/teststorage" "github.com/prometheus/prometheus/util/teststorage"
) )
func newTestEngine() *promql.Engine { func newTestEngine(t *testing.T) *promql.Engine {
return promqltest.NewTestEngine(false, 0, promqltest.DefaultMaxSamplesPerQuery) return promqltest.NewTestEngine(t, false, 0, promqltest.DefaultMaxSamplesPerQuery)
} }
func TestEvaluations(t *testing.T) { func TestEvaluations(t *testing.T) {
promqltest.RunBuiltinTests(t, newTestEngine()) promqltest.RunBuiltinTests(t, newTestEngine(t))
} }
// Run a lot of queries at the same time, to check for race conditions. // Run a lot of queries at the same time, to check for race conditions.
@ -48,7 +48,7 @@ func TestConcurrentRangeQueries(t *testing.T) {
} }
// Enable experimental functions testing // Enable experimental functions testing
parser.EnableExperimentalFunctions = true parser.EnableExperimentalFunctions = true
engine := promql.NewEngine(opts) engine := promqltest.NewTestEngineWithOpts(t, opts)
const interval = 10000 // 10s interval. const interval = 10000 // 10s interval.
// A day of data plus 10k steps. // A day of data plus 10k steps.

View file

@ -79,8 +79,9 @@ func LoadedStorage(t testutil.T, input string) *teststorage.TestStorage {
return test.storage return test.storage
} }
func NewTestEngine(enablePerStepStats bool, lookbackDelta time.Duration, maxSamples int) *promql.Engine { // NewTestEngine creates a promql.Engine with enablePerStepStats, lookbackDelta and maxSamples, and returns it.
return promql.NewEngine(promql.EngineOpts{ func NewTestEngine(tb testing.TB, enablePerStepStats bool, lookbackDelta time.Duration, maxSamples int) *promql.Engine {
return NewTestEngineWithOpts(tb, promql.EngineOpts{
Logger: nil, Logger: nil,
Reg: nil, Reg: nil,
MaxSamples: maxSamples, MaxSamples: maxSamples,
@ -90,9 +91,20 @@ func NewTestEngine(enablePerStepStats bool, lookbackDelta time.Duration, maxSamp
EnableNegativeOffset: true, EnableNegativeOffset: true,
EnablePerStepStats: enablePerStepStats, EnablePerStepStats: enablePerStepStats,
LookbackDelta: lookbackDelta, LookbackDelta: lookbackDelta,
EnableDelayedNameRemoval: true,
}) })
} }
// NewTestEngineWithOpts creates a promql.Engine with opts and returns it.
func NewTestEngineWithOpts(tb testing.TB, opts promql.EngineOpts) *promql.Engine {
tb.Helper()
ng := promql.NewEngine(opts)
tb.Cleanup(func() {
require.NoError(tb, ng.Close())
})
return ng
}
// RunBuiltinTests runs an acceptance test suite against the provided engine. // RunBuiltinTests runs an acceptance test suite against the provided engine.
func RunBuiltinTests(t TBRun, engine promql.QueryEngine) { func RunBuiltinTests(t TBRun, engine promql.QueryEngine) {
t.Cleanup(func() { parser.EnableExperimentalFunctions = false }) t.Cleanup(func() { parser.EnableExperimentalFunctions = false })
@ -651,6 +663,7 @@ type evalCmd struct {
expectedFailRegexp *regexp.Regexp expectedFailRegexp *regexp.Regexp
metrics map[uint64]labels.Labels metrics map[uint64]labels.Labels
expectScalar bool
expected map[uint64]entry expected map[uint64]entry
} }
@ -695,12 +708,15 @@ func (ev *evalCmd) String() string {
// expect adds a sequence of values to the set of expected // expect adds a sequence of values to the set of expected
// results for the query. // results for the query.
func (ev *evalCmd) expect(pos int, vals ...parser.SequenceValue) { func (ev *evalCmd) expect(pos int, vals ...parser.SequenceValue) {
ev.expectScalar = true
ev.expected[0] = entry{pos: pos, vals: vals} ev.expected[0] = entry{pos: pos, vals: vals}
} }
// expectMetric adds a new metric with a sequence of values to the set of expected // expectMetric adds a new metric with a sequence of values to the set of expected
// results for the query. // results for the query.
func (ev *evalCmd) expectMetric(pos int, m labels.Labels, vals ...parser.SequenceValue) { func (ev *evalCmd) expectMetric(pos int, m labels.Labels, vals ...parser.SequenceValue) {
ev.expectScalar = false
h := m.Hash() h := m.Hash()
ev.metrics[h] = m ev.metrics[h] = m
ev.expected[h] = entry{pos: pos, vals: vals} ev.expected[h] = entry{pos: pos, vals: vals}
@ -714,6 +730,10 @@ func (ev *evalCmd) compareResult(result parser.Value) error {
return fmt.Errorf("expected ordered result, but query returned a matrix") return fmt.Errorf("expected ordered result, but query returned a matrix")
} }
if ev.expectScalar {
return fmt.Errorf("expected scalar result, but got matrix %s", val.String())
}
if err := assertMatrixSorted(val); err != nil { if err := assertMatrixSorted(val); err != nil {
return err return err
} }
@ -769,7 +789,7 @@ func (ev *evalCmd) compareResult(result parser.Value) error {
return fmt.Errorf("expected histogram value at index %v for %s to have timestamp %v, but it had timestamp %v (result has %s)", i, ev.metrics[hash], expected.T, actual.T, formatSeriesResult(s)) return fmt.Errorf("expected histogram value at index %v for %s to have timestamp %v, but it had timestamp %v (result has %s)", i, ev.metrics[hash], expected.T, actual.T, formatSeriesResult(s))
} }
if !actual.H.Equals(expected.H.Compact(0)) { if !compareNativeHistogram(expected.H.Compact(0), actual.H.Compact(0)) {
return fmt.Errorf("expected histogram value at index %v (t=%v) for %s to be %v, but got %v (result has %s)", i, actual.T, ev.metrics[hash], expected.H, actual.H, formatSeriesResult(s)) return fmt.Errorf("expected histogram value at index %v (t=%v) for %s to be %v, but got %v (result has %s)", i, actual.T, ev.metrics[hash], expected.H, actual.H, formatSeriesResult(s))
} }
} }
@ -782,6 +802,10 @@ func (ev *evalCmd) compareResult(result parser.Value) error {
} }
case promql.Vector: case promql.Vector:
if ev.expectScalar {
return fmt.Errorf("expected scalar result, but got vector %s", val.String())
}
seen := map[uint64]bool{} seen := map[uint64]bool{}
for pos, v := range val { for pos, v := range val {
fp := v.Metric.Hash() fp := v.Metric.Hash()
@ -804,7 +828,7 @@ func (ev *evalCmd) compareResult(result parser.Value) error {
if expH != nil && v.H == nil { if expH != nil && v.H == nil {
return fmt.Errorf("expected histogram %s for %s but got float value %v", HistogramTestExpression(expH), v.Metric, v.F) return fmt.Errorf("expected histogram %s for %s but got float value %v", HistogramTestExpression(expH), v.Metric, v.F)
} }
if expH != nil && !expH.Compact(0).Equals(v.H) { if expH != nil && !compareNativeHistogram(expH.Compact(0), v.H.Compact(0)) {
return fmt.Errorf("expected %v for %s but got %s", HistogramTestExpression(expH), v.Metric, HistogramTestExpression(v.H)) return fmt.Errorf("expected %v for %s but got %s", HistogramTestExpression(expH), v.Metric, HistogramTestExpression(v.H))
} }
if !almost.Equal(exp0.Value, v.F, defaultEpsilon) { if !almost.Equal(exp0.Value, v.F, defaultEpsilon) {
@ -820,15 +844,15 @@ func (ev *evalCmd) compareResult(result parser.Value) error {
} }
case promql.Scalar: case promql.Scalar:
if len(ev.expected) != 1 { if !ev.expectScalar {
return fmt.Errorf("expected vector result, but got scalar %s", val.String()) return fmt.Errorf("expected vector or matrix result, but got %s", val.String())
} }
exp0 := ev.expected[0].vals[0] exp0 := ev.expected[0].vals[0]
if exp0.Histogram != nil { if exp0.Histogram != nil {
return fmt.Errorf("expected Histogram %v but got scalar %s", exp0.Histogram.TestExpression(), val.String()) return fmt.Errorf("expected histogram %v but got %s", exp0.Histogram.TestExpression(), val.String())
} }
if !almost.Equal(exp0.Value, val.V, defaultEpsilon) { if !almost.Equal(exp0.Value, val.V, defaultEpsilon) {
return fmt.Errorf("expected Scalar %v but got %v", val.V, exp0.Value) return fmt.Errorf("expected scalar %v but got %v", exp0.Value, val.V)
} }
default: default:
@ -837,6 +861,121 @@ func (ev *evalCmd) compareResult(result parser.Value) error {
return nil return nil
} }
// compareNativeHistogram is helper function to compare two native histograms
// which can tolerate some differ in the field of float type, such as Count, Sum.
func compareNativeHistogram(exp, cur *histogram.FloatHistogram) bool {
if exp == nil || cur == nil {
return false
}
if exp.Schema != cur.Schema ||
!almost.Equal(exp.Count, cur.Count, defaultEpsilon) ||
!almost.Equal(exp.Sum, cur.Sum, defaultEpsilon) {
return false
}
if exp.UsesCustomBuckets() {
if !histogram.FloatBucketsMatch(exp.CustomValues, cur.CustomValues) {
return false
}
}
if exp.ZeroThreshold != cur.ZeroThreshold ||
!almost.Equal(exp.ZeroCount, cur.ZeroCount, defaultEpsilon) {
return false
}
if !spansMatch(exp.NegativeSpans, cur.NegativeSpans) {
return false
}
if !floatBucketsMatch(exp.NegativeBuckets, cur.NegativeBuckets) {
return false
}
if !spansMatch(exp.PositiveSpans, cur.PositiveSpans) {
return false
}
if !floatBucketsMatch(exp.PositiveBuckets, cur.PositiveBuckets) {
return false
}
return true
}
func floatBucketsMatch(b1, b2 []float64) bool {
if len(b1) != len(b2) {
return false
}
for i, b := range b1 {
if !almost.Equal(b, b2[i], defaultEpsilon) {
return false
}
}
return true
}
func spansMatch(s1, s2 []histogram.Span) bool {
if len(s1) == 0 && len(s2) == 0 {
return true
}
s1idx, s2idx := 0, 0
for {
if s1idx >= len(s1) {
return allEmptySpans(s2[s2idx:])
}
if s2idx >= len(s2) {
return allEmptySpans(s1[s1idx:])
}
currS1, currS2 := s1[s1idx], s2[s2idx]
s1idx++
s2idx++
if currS1.Length == 0 {
// This span is zero length, so we add consecutive such spans
// until we find a non-zero span.
for ; s1idx < len(s1) && s1[s1idx].Length == 0; s1idx++ {
currS1.Offset += s1[s1idx].Offset
}
if s1idx < len(s1) {
currS1.Offset += s1[s1idx].Offset
currS1.Length = s1[s1idx].Length
s1idx++
}
}
if currS2.Length == 0 {
// This span is zero length, so we add consecutive such spans
// until we find a non-zero span.
for ; s2idx < len(s2) && s2[s2idx].Length == 0; s2idx++ {
currS2.Offset += s2[s2idx].Offset
}
if s2idx < len(s2) {
currS2.Offset += s2[s2idx].Offset
currS2.Length = s2[s2idx].Length
s2idx++
}
}
if currS1.Length == 0 && currS2.Length == 0 {
// The last spans of both set are zero length. Previous spans match.
return true
}
if currS1.Offset != currS2.Offset || currS1.Length != currS2.Length {
return false
}
}
}
func allEmptySpans(s []histogram.Span) bool {
for _, ss := range s {
if ss.Length > 0 {
return false
}
}
return true
}
func (ev *evalCmd) checkExpectedFailure(actual error) error { func (ev *evalCmd) checkExpectedFailure(actual error) error {
if ev.expectedFailMessage != "" { if ev.expectedFailMessage != "" {
if ev.expectedFailMessage != actual.Error() { if ev.expectedFailMessage != actual.Error() {
@ -1003,13 +1142,6 @@ func (t *test) execRangeEval(cmd *evalCmd, engine promql.QueryEngine) error {
return fmt.Errorf("error creating range query for %q (line %d): %w", cmd.expr, cmd.line, err) return fmt.Errorf("error creating range query for %q (line %d): %w", cmd.expr, cmd.line, err)
} }
res := q.Exec(t.context) res := q.Exec(t.context)
countWarnings, _ := res.Warnings.CountWarningsAndInfo()
if !cmd.warn && countWarnings > 0 {
return fmt.Errorf("unexpected warnings evaluating query %q (line %d): %v", cmd.expr, cmd.line, res.Warnings)
}
if cmd.warn && countWarnings == 0 {
return fmt.Errorf("expected warnings evaluating query %q (line %d) but got none", cmd.expr, cmd.line)
}
if res.Err != nil { if res.Err != nil {
if cmd.fail { if cmd.fail {
return cmd.checkExpectedFailure(res.Err) return cmd.checkExpectedFailure(res.Err)
@ -1020,6 +1152,13 @@ func (t *test) execRangeEval(cmd *evalCmd, engine promql.QueryEngine) error {
if res.Err == nil && cmd.fail { if res.Err == nil && cmd.fail {
return fmt.Errorf("expected error evaluating query %q (line %d) but got none", cmd.expr, cmd.line) return fmt.Errorf("expected error evaluating query %q (line %d) but got none", cmd.expr, cmd.line)
} }
countWarnings, _ := res.Warnings.CountWarningsAndInfo()
if !cmd.warn && countWarnings > 0 {
return fmt.Errorf("unexpected warnings evaluating query %q (line %d): %v", cmd.expr, cmd.line, res.Warnings)
}
if cmd.warn && countWarnings == 0 {
return fmt.Errorf("expected warnings evaluating query %q (line %d) but got none", cmd.expr, cmd.line)
}
defer q.Close() defer q.Close()
if err := cmd.compareResult(res.Value); err != nil { if err := cmd.compareResult(res.Value); err != nil {
@ -1050,13 +1189,6 @@ func (t *test) runInstantQuery(iq atModifierTestCase, cmd *evalCmd, engine promq
} }
defer q.Close() defer q.Close()
res := q.Exec(t.context) res := q.Exec(t.context)
countWarnings, _ := res.Warnings.CountWarningsAndInfo()
if !cmd.warn && countWarnings > 0 {
return fmt.Errorf("unexpected warnings evaluating query %q (line %d): %v", iq.expr, cmd.line, res.Warnings)
}
if cmd.warn && countWarnings == 0 {
return fmt.Errorf("expected warnings evaluating query %q (line %d) but got none", iq.expr, cmd.line)
}
if res.Err != nil { if res.Err != nil {
if cmd.fail { if cmd.fail {
if err := cmd.checkExpectedFailure(res.Err); err != nil { if err := cmd.checkExpectedFailure(res.Err); err != nil {
@ -1070,6 +1202,13 @@ func (t *test) runInstantQuery(iq atModifierTestCase, cmd *evalCmd, engine promq
if res.Err == nil && cmd.fail { if res.Err == nil && cmd.fail {
return fmt.Errorf("expected error evaluating query %q (line %d) but got none", iq.expr, cmd.line) return fmt.Errorf("expected error evaluating query %q (line %d) but got none", iq.expr, cmd.line)
} }
countWarnings, _ := res.Warnings.CountWarningsAndInfo()
if !cmd.warn && countWarnings > 0 {
return fmt.Errorf("unexpected warnings evaluating query %q (line %d): %v", iq.expr, cmd.line, res.Warnings)
}
if cmd.warn && countWarnings == 0 {
return fmt.Errorf("expected warnings evaluating query %q (line %d) but got none", iq.expr, cmd.line)
}
err = cmd.compareResult(res.Value) err = cmd.compareResult(res.Value)
if err != nil { if err != nil {
return fmt.Errorf("error in %s %s (line %d): %w", cmd, iq.expr, cmd.line, err) return fmt.Errorf("error in %s %s (line %d): %w", cmd, iq.expr, cmd.line, err)
@ -1247,6 +1386,7 @@ func (ll *LazyLoader) clear() error {
NoStepSubqueryIntervalFn: func(int64) int64 { return durationMilliseconds(ll.SubqueryInterval) }, NoStepSubqueryIntervalFn: func(int64) int64 { return durationMilliseconds(ll.SubqueryInterval) },
EnableAtModifier: ll.opts.EnableAtModifier, EnableAtModifier: ll.opts.EnableAtModifier,
EnableNegativeOffset: ll.opts.EnableNegativeOffset, EnableNegativeOffset: ll.opts.EnableNegativeOffset,
EnableDelayedNameRemoval: true,
} }
ll.queryEngine = promql.NewEngine(opts) ll.queryEngine = promql.NewEngine(opts)
@ -1307,7 +1447,11 @@ func (ll *LazyLoader) Storage() storage.Storage {
// Close closes resources associated with the LazyLoader. // Close closes resources associated with the LazyLoader.
func (ll *LazyLoader) Close() error { func (ll *LazyLoader) Close() error {
ll.cancelCtx() ll.cancelCtx()
return ll.storage.Close() err := ll.queryEngine.Close()
if sErr := ll.storage.Close(); sErr != nil {
return errors.Join(sErr, err)
}
return err
} }
func makeInt64Pointer(val int64) *int64 { func makeInt64Pointer(val int64) *int64 {

View file

@ -554,11 +554,48 @@ eval range from 0 to 5m step 5m testmetric
`, `,
expectedError: `error in eval testmetric (line 5): expected float value at index 0 for {__name__="testmetric"} to have timestamp 300000, but it had timestamp 0 (result has 1 float point [3 @[0]] and 1 histogram point [{count:0, sum:0} @[300000]])`, expectedError: `error in eval testmetric (line 5): expected float value at index 0 for {__name__="testmetric"} to have timestamp 300000, but it had timestamp 0 (result has 1 float point [3 @[0]] and 1 histogram point [{count:0, sum:0} @[300000]])`,
}, },
"instant query with expected scalar result": {
input: `
eval instant at 1m 3
3
`,
},
"instant query with unexpected scalar result": {
input: `
eval instant at 1m 3
2
`,
expectedError: `error in eval 3 (line 2): expected scalar 2 but got 3`,
},
"instant query that returns a scalar but expects a vector": {
input: `
eval instant at 1m 3
{} 3
`,
expectedError: `error in eval 3 (line 2): expected vector or matrix result, but got scalar: 3 @[60000]`,
},
"instant query that returns a vector but expects a scalar": {
input: `
eval instant at 1m vector(3)
3
`,
expectedError: `error in eval vector(3) (line 2): expected scalar result, but got vector {} => 3 @[60000]`,
},
"range query that returns a matrix but expects a scalar": {
input: `
eval range from 0 to 1m step 30s vector(3)
3
`,
expectedError: `error in eval vector(3) (line 2): expected scalar result, but got matrix {} =>
3 @[0]
3 @[30000]
3 @[60000]`,
},
} }
for name, testCase := range testCases { for name, testCase := range testCases {
t.Run(name, func(t *testing.T) { t.Run(name, func(t *testing.T) {
err := runTest(t, testCase.input, NewTestEngine(false, 0, DefaultMaxSamplesPerQuery)) err := runTest(t, testCase.input, NewTestEngine(t, false, 0, DefaultMaxSamplesPerQuery))
if testCase.expectedError == "" { if testCase.expectedError == "" {
require.NoError(t, err) require.NoError(t, err)

View file

@ -503,7 +503,7 @@ eval instant at 1m avg(data{test="-big"})
eval instant at 1m avg(data{test="bigzero"}) eval instant at 1m avg(data{test="bigzero"})
{} 0 {} 0
# Test summing extreme values. # Test summing and averaging extreme values.
clear clear
load 10s load 10s
@ -529,21 +529,39 @@ load 10s
eval instant at 1m sum(data{test="ten"}) eval instant at 1m sum(data{test="ten"})
{} 10 {} 10
eval instant at 1m avg(data{test="ten"})
{} 2.5
eval instant at 1m sum by (group) (data{test="pos_inf"}) eval instant at 1m sum by (group) (data{test="pos_inf"})
{group="1"} Inf {group="1"} Inf
{group="2"} Inf {group="2"} Inf
eval instant at 1m avg by (group) (data{test="pos_inf"})
{group="1"} Inf
{group="2"} Inf
eval instant at 1m sum by (group) (data{test="neg_inf"}) eval instant at 1m sum by (group) (data{test="neg_inf"})
{group="1"} -Inf {group="1"} -Inf
{group="2"} -Inf {group="2"} -Inf
eval instant at 1m avg by (group) (data{test="neg_inf"})
{group="1"} -Inf
{group="2"} -Inf
eval instant at 1m sum(data{test="inf_inf"}) eval instant at 1m sum(data{test="inf_inf"})
{} NaN {} NaN
eval instant at 1m avg(data{test="inf_inf"})
{} NaN
eval instant at 1m sum by (group) (data{test="nan"}) eval instant at 1m sum by (group) (data{test="nan"})
{group="1"} NaN {group="1"} NaN
{group="2"} NaN {group="2"} NaN
eval instant at 1m avg by (group) (data{test="nan"})
{group="1"} NaN
{group="2"} NaN
clear clear
# Test that aggregations are deterministic. # Test that aggregations are deterministic.

View file

@ -523,16 +523,16 @@ load 5m
node_uname_info{job="node_exporter", instance="4m1000", release="1.111.3"} 0+10x10 node_uname_info{job="node_exporter", instance="4m1000", release="1.111.3"} 0+10x10
eval_ordered instant at 50m sort_by_label(http_requests, "instance") eval_ordered instant at 50m sort_by_label(http_requests, "instance")
http_requests{group="production", instance="0", job="api-server"} 100
http_requests{group="canary", instance="0", job="api-server"} 300 http_requests{group="canary", instance="0", job="api-server"} 300
http_requests{group="production", instance="0", job="app-server"} 500
http_requests{group="canary", instance="0", job="app-server"} 700 http_requests{group="canary", instance="0", job="app-server"} 700
http_requests{group="production", instance="1", job="api-server"} 200 http_requests{group="production", instance="0", job="api-server"} 100
http_requests{group="production", instance="0", job="app-server"} 500
http_requests{group="canary", instance="1", job="api-server"} 400 http_requests{group="canary", instance="1", job="api-server"} 400
http_requests{group="production", instance="1", job="app-server"} 600
http_requests{group="canary", instance="1", job="app-server"} 800 http_requests{group="canary", instance="1", job="app-server"} 800
http_requests{group="production", instance="2", job="api-server"} 100 http_requests{group="production", instance="1", job="api-server"} 200
http_requests{group="production", instance="1", job="app-server"} 600
http_requests{group="canary", instance="2", job="api-server"} NaN http_requests{group="canary", instance="2", job="api-server"} NaN
http_requests{group="production", instance="2", job="api-server"} 100
eval_ordered instant at 50m sort_by_label(http_requests, "instance", "group") eval_ordered instant at 50m sort_by_label(http_requests, "instance", "group")
http_requests{group="canary", instance="0", job="api-server"} 300 http_requests{group="canary", instance="0", job="api-server"} 300
@ -585,14 +585,14 @@ eval_ordered instant at 50m sort_by_label(http_requests, "job", "instance", "gro
eval_ordered instant at 50m sort_by_label_desc(http_requests, "instance") eval_ordered instant at 50m sort_by_label_desc(http_requests, "instance")
http_requests{group="production", instance="2", job="api-server"} 100 http_requests{group="production", instance="2", job="api-server"} 100
http_requests{group="canary", instance="2", job="api-server"} NaN http_requests{group="canary", instance="2", job="api-server"} NaN
http_requests{group="canary", instance="1", job="app-server"} 800
http_requests{group="production", instance="1", job="app-server"} 600 http_requests{group="production", instance="1", job="app-server"} 600
http_requests{group="canary", instance="1", job="api-server"} 400
http_requests{group="production", instance="1", job="api-server"} 200 http_requests{group="production", instance="1", job="api-server"} 200
http_requests{group="canary", instance="0", job="app-server"} 700 http_requests{group="canary", instance="1", job="app-server"} 800
http_requests{group="canary", instance="1", job="api-server"} 400
http_requests{group="production", instance="0", job="app-server"} 500 http_requests{group="production", instance="0", job="app-server"} 500
http_requests{group="canary", instance="0", job="api-server"} 300
http_requests{group="production", instance="0", job="api-server"} 100 http_requests{group="production", instance="0", job="api-server"} 100
http_requests{group="canary", instance="0", job="app-server"} 700
http_requests{group="canary", instance="0", job="api-server"} 300
eval_ordered instant at 50m sort_by_label_desc(http_requests, "instance", "group") eval_ordered instant at 50m sort_by_label_desc(http_requests, "instance", "group")
http_requests{group="production", instance="2", job="api-server"} 100 http_requests{group="production", instance="2", job="api-server"} 100
@ -748,7 +748,6 @@ eval instant at 1m avg_over_time(metric6c[1m])
eval instant at 1m sum_over_time(metric6c[1m])/count_over_time(metric6c[1m]) eval instant at 1m sum_over_time(metric6c[1m])/count_over_time(metric6c[1m])
{} NaN {} NaN
eval instant at 1m avg_over_time(metric7[1m]) eval instant at 1m avg_over_time(metric7[1m])
{} NaN {} NaN
@ -783,6 +782,9 @@ load 10s
eval instant at 1m sum_over_time(metric[1m]) eval instant at 1m sum_over_time(metric[1m])
{} 2 {} 2
eval instant at 1m avg_over_time(metric[1m])
{} 0.5
# Tests for stddev_over_time and stdvar_over_time. # Tests for stddev_over_time and stdvar_over_time.
clear clear
load 10s load 10s

View file

@ -482,3 +482,29 @@ load_with_nhcb 5m
eval_fail instant at 50m histogram_quantile(0.99, {__name__=~"request_duration_seconds\\d*_bucket"}) eval_fail instant at 50m histogram_quantile(0.99, {__name__=~"request_duration_seconds\\d*_bucket"})
eval_fail instant at 50m histogram_quantile(0.99, {__name__=~"request_duration_seconds\\d*"}) eval_fail instant at 50m histogram_quantile(0.99, {__name__=~"request_duration_seconds\\d*"})
# Histogram with constant buckets.
load_with_nhcb 1m
const_histogram_bucket{le="0.0"} 1 1 1 1 1
const_histogram_bucket{le="1.0"} 1 1 1 1 1
const_histogram_bucket{le="2.0"} 1 1 1 1 1
const_histogram_bucket{le="+Inf"} 1 1 1 1 1
# There is no change to the bucket count over time, thus rate is 0 in each bucket.
eval instant at 5m rate(const_histogram_bucket[5m])
{le="0.0"} 0
{le="1.0"} 0
{le="2.0"} 0
{le="+Inf"} 0
# Native histograms do not represent empty buckets, so here the zeros are implicit.
eval instant at 5m rate(const_histogram[5m])
{} {{schema:-53 sum:0 count:0 custom_values:[0.0 1.0 2.0]}}
# Zero buckets mean no observations, so there is no value that observations fall below,
# which means that any quantile is a NaN.
eval instant at 5m histogram_quantile(1.0, sum by (le) (rate(const_histogram_bucket[5m])))
{} NaN
eval instant at 5m histogram_quantile(1.0, sum(rate(const_histogram[5m])))
{} NaN

View file

@ -0,0 +1,84 @@
# Test for __name__ label drop.
load 5m
metric{env="1"} 0 60 120
another_metric{env="1"} 60 120 180
# Does not drop __name__ for vector selector
eval instant at 15m metric{env="1"}
metric{env="1"} 120
# Drops __name__ for unary operators
eval instant at 15m -metric
{env="1"} -120
# Drops __name__ for binary operators
eval instant at 15m metric + another_metric
{env="1"} 300
# Does not drop __name__ for binary comparison operators
eval instant at 15m metric <= another_metric
metric{env="1"} 120
# Drops __name__ for binary comparison operators with "bool" modifier
eval instant at 15m metric <= bool another_metric
{env="1"} 1
# Drops __name__ for vector-scalar operations
eval instant at 15m metric * 2
{env="1"} 240
# Drops __name__ for instant-vector functions
eval instant at 15m clamp(metric, 0, 100)
{env="1"} 100
# Drops __name__ for range-vector functions
eval instant at 15m rate(metric{env="1"}[10m])
{env="1"} 0.2
# Does not drop __name__ for last_over_time function
eval instant at 15m last_over_time(metric{env="1"}[10m])
metric{env="1"} 120
# Drops name for other _over_time functions
eval instant at 15m max_over_time(metric{env="1"}[10m])
{env="1"} 120
# Allows relabeling (to-be-dropped) __name__ via label_replace
eval instant at 15m label_replace(rate({env="1"}[10m]), "my_name", "rate_$1", "__name__", "(.+)")
{my_name="rate_metric", env="1"} 0.2
{my_name="rate_another_metric", env="1"} 0.2
# Allows preserving __name__ via label_replace
eval instant at 15m label_replace(rate({env="1"}[10m]), "__name__", "rate_$1", "__name__", "(.+)")
rate_metric{env="1"} 0.2
rate_another_metric{env="1"} 0.2
# Allows relabeling (to-be-dropped) __name__ via label_join
eval instant at 15m label_join(rate({env="1"}[10m]), "my_name", "_", "__name__")
{my_name="metric", env="1"} 0.2
{my_name="another_metric", env="1"} 0.2
# Allows preserving __name__ via label_join
eval instant at 15m label_join(rate({env="1"}[10m]), "__name__", "_", "__name__", "env")
metric_1{env="1"} 0.2
another_metric_1{env="1"} 0.2
# Does not drop metric names fro aggregation operators
eval instant at 15m sum by (__name__, env) (metric{env="1"})
metric{env="1"} 120
# Aggregation operators by __name__ lead to duplicate labelset errors (aggregation is partitioned by not yet removed __name__ label)
# This is an accidental side effect of delayed __name__ label dropping
eval_fail instant at 15m sum by (__name__) (rate({env="1"}[10m]))
# Aggregation operators aggregate metrics with same labelset and to-be-dropped names
# This is an accidental side effect of delayed __name__ label dropping
eval instant at 15m sum(rate({env="1"}[10m])) by (env)
{env="1"} 0.4
# Aggregationk operators propagate __name__ label dropping information
eval instant at 15m topk(10, sum by (__name__, env) (metric{env="1"}))
metric{env="1"} 120
eval instant at 15m topk(10, sum by (__name__, env) (rate(metric{env="1"}[10m])))
{env="1"} 0.2

View file

@ -718,6 +718,52 @@ eval instant at 10m histogram_fraction(-Inf, +Inf, histogram_fraction_4)
eval instant at 10m histogram_sum(scalar(histogram_fraction(-Inf, +Inf, sum(histogram_fraction_4))) * histogram_fraction_4) eval instant at 10m histogram_sum(scalar(histogram_fraction(-Inf, +Inf, sum(histogram_fraction_4))) * histogram_fraction_4)
{} 100 {} 100
# Apply multiplication and division operator to histogram.
load 10m
histogram_mul_div {{schema:0 count:21 sum:33 z_bucket:3 z_bucket_w:0.001 buckets:[3 3 3] n_buckets:[3 3 3]}}x1
float_series_3 3+0x1
float_series_0 0+0x1
eval instant at 10m histogram_mul_div*3
{} {{schema:0 count:63 sum:99 z_bucket:9 z_bucket_w:0.001 buckets:[9 9 9] n_buckets:[9 9 9]}}
eval instant at 10m 3*histogram_mul_div
{} {{schema:0 count:63 sum:99 z_bucket:9 z_bucket_w:0.001 buckets:[9 9 9] n_buckets:[9 9 9]}}
eval instant at 10m histogram_mul_div*float_series_3
{} {{schema:0 count:63 sum:99 z_bucket:9 z_bucket_w:0.001 buckets:[9 9 9] n_buckets:[9 9 9]}}
eval instant at 10m float_series_3*histogram_mul_div
{} {{schema:0 count:63 sum:99 z_bucket:9 z_bucket_w:0.001 buckets:[9 9 9] n_buckets:[9 9 9]}}
eval instant at 10m histogram_mul_div/3
{} {{schema:0 count:7 sum:11 z_bucket:1 z_bucket_w:0.001 buckets:[1 1 1] n_buckets:[1 1 1]}}
eval instant at 10m histogram_mul_div/float_series_3
{} {{schema:0 count:7 sum:11 z_bucket:1 z_bucket_w:0.001 buckets:[1 1 1] n_buckets:[1 1 1]}}
eval instant at 10m histogram_mul_div*0
{} {{schema:0 count:0 sum:0 z_bucket:0 z_bucket_w:0.001 buckets:[0 0 0] n_buckets:[0 0 0]}}
eval instant at 10m 0*histogram_mul_div
{} {{schema:0 count:0 sum:0 z_bucket:0 z_bucket_w:0.001 buckets:[0 0 0] n_buckets:[0 0 0]}}
eval instant at 10m histogram_mul_div*float_series_0
{} {{schema:0 count:0 sum:0 z_bucket:0 z_bucket_w:0.001 buckets:[0 0 0] n_buckets:[0 0 0]}}
eval instant at 10m float_series_0*histogram_mul_div
{} {{schema:0 count:0 sum:0 z_bucket:0 z_bucket_w:0.001 buckets:[0 0 0] n_buckets:[0 0 0]}}
# TODO: (NeerajGartia21) remove all the histogram buckets in case of division with zero. See: https://github.com/prometheus/prometheus/issues/13934
eval instant at 10m histogram_mul_div/0
{} {{schema:0 count:Inf sum:Inf z_bucket:Inf z_bucket_w:0.001 buckets:[Inf Inf Inf] n_buckets:[Inf Inf Inf]}}
eval instant at 10m histogram_mul_div/float_series_0
{} {{schema:0 count:Inf sum:Inf z_bucket:Inf z_bucket_w:0.001 buckets:[Inf Inf Inf] n_buckets:[Inf Inf Inf]}}
eval instant at 10m histogram_mul_div*0/0
{} {{schema:0 count:NaN sum:NaN z_bucket:NaN z_bucket_w:0.001 buckets:[NaN NaN NaN] n_buckets:[NaN NaN NaN]}}
clear clear
# Counter reset only noticeable in a single bucket. # Counter reset only noticeable in a single bucket.
@ -748,3 +794,177 @@ eval instant at 5m histogram_quantile(0.5, custom_buckets_histogram)
eval instant at 5m sum(custom_buckets_histogram) eval instant at 5m sum(custom_buckets_histogram)
{} {{schema:-53 sum:5 count:4 custom_values:[5 10] buckets:[1 2 1]}} {} {{schema:-53 sum:5 count:4 custom_values:[5 10] buckets:[1 2 1]}}
clear
# Test 'this native histogram metric is not a gauge' warning for rate
load 30s
some_metric {{schema:0 sum:1 count:1 buckets:[1] counter_reset_hint:gauge}} {{schema:0 sum:2 count:2 buckets:[2] counter_reset_hint:gauge}} {{schema:0 sum:3 count:3 buckets:[3] counter_reset_hint:gauge}}
# Test the case where we only have two points for rate
eval_warn instant at 30s rate(some_metric[30s])
{} {{count:0.03333333333333333 sum:0.03333333333333333 buckets:[0.03333333333333333]}}
# Test the case where we have more than two points for rate
eval_warn instant at 1m rate(some_metric[1m])
{} {{count:0.03333333333333333 sum:0.03333333333333333 buckets:[0.03333333333333333]}}
clear
# Test rate() over mixed exponential and custom buckets.
load 30s
some_metric {{schema:0 sum:1 count:1 buckets:[1]}} {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}} {{schema:0 sum:5 count:4 buckets:[1 2 1]}} {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}}
# Start and end with exponential, with custom in the middle.
eval_warn instant at 1m rate(some_metric[1m])
# Should produce no results.
# Start and end with custom, with exponential in the middle.
eval_warn instant at 1m30s rate(some_metric[1m])
# Should produce no results.
# Start with custom, end with exponential.
eval_warn instant at 1m rate(some_metric[30s])
# Should produce no results.
# Start with exponential, end with custom.
eval_warn instant at 30s rate(some_metric[30s])
# Should produce no results.
clear
# Histogram with constant buckets.
load 1m
const_histogram {{schema:0 sum:1 count:1 buckets:[1 1 1]}} {{schema:0 sum:1 count:1 buckets:[1 1 1]}} {{schema:0 sum:1 count:1 buckets:[1 1 1]}} {{schema:0 sum:1 count:1 buckets:[1 1 1]}} {{schema:0 sum:1 count:1 buckets:[1 1 1]}}
# There is no change to the bucket count over time, thus rate is 0 in each bucket.
# However native histograms do not represent empty buckets, so here the zeros are implicit.
eval instant at 5m rate(const_histogram[5m])
{} {{schema:0 sum:0 count:0}}
# Zero buckets mean no observations, thus the denominator in the average is 0
# leading to 0/0, which is NaN.
eval instant at 5m histogram_avg(rate(const_histogram[5m]))
{} NaN
# Zero buckets mean no observations, so count is 0.
eval instant at 5m histogram_count(rate(const_histogram[5m]))
{} 0.0
# Zero buckets mean no observations and empty histogram has a sum of 0 by definition.
eval instant at 5m histogram_sum(rate(const_histogram[5m]))
{} 0.0
# Zero buckets mean no observations, thus the denominator in the fraction is 0,
# leading to 0/0, which is NaN.
eval instant at 5m histogram_fraction(0.0, 1.0, rate(const_histogram[5m]))
{} NaN
# Workaround to calculate the observation count corresponding to NaN fraction.
eval instant at 5m histogram_count(rate(const_histogram[5m])) == 0.0 or histogram_fraction(0.0, 1.0, rate(const_histogram[5m])) * histogram_count(rate(const_histogram[5m]))
{} 0.0
# Zero buckets mean no observations, so there is no value that observations fall below,
# which means that any quantile is a NaN.
eval instant at 5m histogram_quantile(1.0, rate(const_histogram[5m]))
{} NaN
# Zero buckets mean no observations, so there is no standard deviation.
eval instant at 5m histogram_stddev(rate(const_histogram[5m]))
{} NaN
# Zero buckets mean no observations, so there is no standard variance.
eval instant at 5m histogram_stdvar(rate(const_histogram[5m]))
{} NaN
clear
# Test mixing exponential and custom buckets.
load 6m
metric{series="exponential"} {{sum:4 count:3 buckets:[1 2 1]}} _ {{sum:4 count:3 buckets:[1 2 1]}}
metric{series="other-exponential"} {{sum:3 count:2 buckets:[1 1 1]}} _ {{sum:3 count:2 buckets:[1 1 1]}}
metric{series="custom"} _ {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}} {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}}
metric{series="other-custom"} _ {{schema:-53 sum:15 count:2 custom_values:[5 10] buckets:[0 2]}} {{schema:-53 sum:15 count:2 custom_values:[5 10] buckets:[0 2]}}
# T=0: only exponential
# T=6: only custom
# T=12: mixed, should be ignored and emit a warning
eval_warn range from 0 to 12m step 6m sum(metric)
{} {{sum:7 count:5 buckets:[2 3 2]}} {{schema:-53 sum:16 count:3 custom_values:[5 10] buckets:[1 2]}} _
eval_warn range from 0 to 12m step 6m avg(metric)
{} {{sum:3.5 count:2.5 buckets:[1 1.5 1]}} {{schema:-53 sum:8 count:1.5 custom_values:[5 10] buckets:[0.5 1]}} _
clear
# Test incompatible custom bucket schemas.
load 6m
metric{series="1"} _ {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}} {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}}
metric{series="2"} {{schema:-53 sum:1 count:1 custom_values:[2] buckets:[1]}} _ {{schema:-53 sum:1 count:1 custom_values:[2] buckets:[1]}}
metric{series="3"} {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}} {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}} {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}}
# T=0: incompatible, should be ignored and emit a warning
# T=6: compatible
# T=12: incompatible followed by compatible, should be ignored and emit a warning
eval_warn range from 0 to 12m step 6m sum(metric)
{} _ {{schema:-53 sum:2 count:2 custom_values:[5 10] buckets:[2]}} _
eval_warn range from 0 to 12m step 6m avg(metric)
{} _ {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}} _
clear
load 1m
metric{group="just-floats", series="1"} 2
metric{group="just-floats", series="2"} 3
metric{group="just-exponential-histograms", series="1"} {{sum:3 count:4 buckets:[1 2 1]}}
metric{group="just-exponential-histograms", series="2"} {{sum:2 count:3 buckets:[1 1 1]}}
metric{group="just-custom-histograms", series="1"} {{schema:-53 sum:1 count:1 custom_values:[2] buckets:[1]}}
metric{group="just-custom-histograms", series="2"} {{schema:-53 sum:3 count:4 custom_values:[2] buckets:[7]}}
metric{group="floats-and-histograms", series="1"} 2
metric{group="floats-and-histograms", series="2"} {{sum:2 count:3 buckets:[1 1 1]}}
metric{group="exponential-and-custom-histograms", series="1"} {{sum:2 count:3 buckets:[1 1 1]}}
metric{group="exponential-and-custom-histograms", series="2"} {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}}
metric{group="incompatible-custom-histograms", series="1"} {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}}
metric{group="incompatible-custom-histograms", series="2"} {{schema:-53 sum:1 count:1 custom_values:[2] buckets:[1]}}
eval_warn instant at 0 sum by (group) (metric)
{group="just-floats"} 5
{group="just-exponential-histograms"} {{sum:5 count:7 buckets:[2 3 2]}}
{group="just-custom-histograms"} {{schema:-53 sum:4 count:5 custom_values:[2] buckets:[8]}}
clear
# Test native histograms with sum, count, avg.
load 10m
histogram_sum{idx="0"} {{schema:0 count:25 sum:1234.5 z_bucket:4 z_bucket_w:0.001 buckets:[1 2 0 1 1] n_buckets:[2 4 0 0 1 9]}}x1
histogram_sum{idx="1"} {{schema:0 count:41 sum:2345.6 z_bucket:5 z_bucket_w:0.001 buckets:[1 3 1 2 1 1 1] n_buckets:[0 1 4 2 7 0 0 0 0 5 5 2]}}x1
histogram_sum{idx="2"} {{schema:0 count:41 sum:1111.1 z_bucket:5 z_bucket_w:0.001 buckets:[1 3 1 2 1 1 1] n_buckets:[0 1 4 2 7 0 0 0 0 5 5 2]}}x1
histogram_sum{idx="3"} {{schema:1 count:0}}x1
histogram_sum_float{idx="0"} 42.0x1
eval instant at 10m sum(histogram_sum)
{} {{schema:0 count:107 sum:4691.2 z_bucket:14 z_bucket_w:0.001 buckets:[3 8 2 5 3 2 2] n_buckets:[2 6 8 4 15 9 0 0 0 10 10 4]}}
eval_warn instant at 10m sum({idx="0"})
eval instant at 10m sum(histogram_sum{idx="0"} + ignoring(idx) histogram_sum{idx="1"} + ignoring(idx) histogram_sum{idx="2"} + ignoring(idx) histogram_sum{idx="3"})
{} {{schema:0 count:107 sum:4691.2 z_bucket:14 z_bucket_w:0.001 buckets:[3 8 2 5 3 2 2] n_buckets:[2 6 8 4 15 9 0 0 0 10 10 4]}}
eval instant at 10m count(histogram_sum)
{} 4
eval instant at 10m avg(histogram_sum)
{} {{schema:0 count:26.75 sum:1172.8 z_bucket:3.5 z_bucket_w:0.001 buckets:[0.75 2 0.5 1.25 0.75 0.5 0.5] n_buckets:[0.5 1.5 2 1 3.75 2.25 0 0 0 2.5 2.5 1]}}
clear
# Test native histograms with sum_over_time, avg_over_time.
load 1m
histogram_sum_over_time {{schema:0 count:25 sum:1234.5 z_bucket:4 z_bucket_w:0.001 buckets:[1 2 0 1 1] n_buckets:[2 4 0 0 1 9]}} {{schema:0 count:41 sum:2345.6 z_bucket:5 z_bucket_w:0.001 buckets:[1 3 1 2 1 1 1] n_buckets:[0 1 4 2 7 0 0 0 0 5 5 2]}} {{schema:0 count:41 sum:1111.1 z_bucket:5 z_bucket_w:0.001 buckets:[1 3 1 2 1 1 1] n_buckets:[0 1 4 2 7 0 0 0 0 5 5 2]}} {{schema:1 count:0}}
eval instant at 3m sum_over_time(histogram_sum_over_time[3m:1m])
{} {{schema:0 count:107 sum:4691.2 z_bucket:14 z_bucket_w:0.001 buckets:[3 8 2 5 3 2 2] n_buckets:[2 6 8 4 15 9 0 0 0 10 10 4]}}
eval instant at 3m avg_over_time(histogram_sum_over_time[3m:1m])
{} {{schema:0 count:26.75 sum:1172.8 z_bucket:3.5 z_bucket_w:0.001 buckets:[0.75 2 0.5 1.25 0.75 0.5 0.5] n_buckets:[0.5 1.5 2 1 3.75 2.25 0 0 0 2.5 2.5 1]}}

View file

@ -68,6 +68,9 @@ type Series struct {
Metric labels.Labels `json:"metric"` Metric labels.Labels `json:"metric"`
Floats []FPoint `json:"values,omitempty"` Floats []FPoint `json:"values,omitempty"`
Histograms []HPoint `json:"histograms,omitempty"` Histograms []HPoint `json:"histograms,omitempty"`
// DropName is used to indicate whether the __name__ label should be dropped
// as part of the query evaluation.
DropName bool `json:"-"`
} }
func (s Series) String() string { func (s Series) String() string {
@ -194,6 +197,9 @@ type Sample struct {
H *histogram.FloatHistogram H *histogram.FloatHistogram
Metric labels.Labels Metric labels.Labels
// DropName is used to indicate whether the __name__ label should be dropped
// as part of the query evaluation.
DropName bool
} }
func (s Sample) String() string { func (s Sample) String() string {

View file

@ -36,7 +36,9 @@ import (
"github.com/prometheus/prometheus/util/testutil" "github.com/prometheus/prometheus/util/testutil"
) )
var testEngine = promql.NewEngine(promql.EngineOpts{ func testEngine(tb testing.TB) *promql.Engine {
tb.Helper()
return promqltest.NewTestEngineWithOpts(tb, promql.EngineOpts{
Logger: nil, Logger: nil,
Reg: nil, Reg: nil,
MaxSamples: 10000, MaxSamples: 10000,
@ -45,7 +47,8 @@ var testEngine = promql.NewEngine(promql.EngineOpts{
EnableAtModifier: true, EnableAtModifier: true,
EnableNegativeOffset: true, EnableNegativeOffset: true,
EnablePerStepStats: true, EnablePerStepStats: true,
}) })
}
func TestAlertingRuleState(t *testing.T) { func TestAlertingRuleState(t *testing.T) {
tests := []struct { tests := []struct {
@ -225,12 +228,14 @@ func TestAlertingRuleLabelsUpdate(t *testing.T) {
}, },
} }
ng := testEngine(t)
baseTime := time.Unix(0, 0) baseTime := time.Unix(0, 0)
for i, result := range results { for i, result := range results {
t.Logf("case %d", i) t.Logf("case %d", i)
evalTime := baseTime.Add(time.Duration(i) * time.Minute) evalTime := baseTime.Add(time.Duration(i) * time.Minute)
result[0].T = timestamp.FromTime(evalTime) result[0].T = timestamp.FromTime(evalTime)
res, err := rule.Eval(context.TODO(), 0, evalTime, EngineQueryFunc(testEngine, storage), nil, 0) res, err := rule.Eval(context.TODO(), 0, evalTime, EngineQueryFunc(ng, storage), nil, 0)
require.NoError(t, err) require.NoError(t, err)
var filteredRes promql.Vector // After removing 'ALERTS_FOR_STATE' samples. var filteredRes promql.Vector // After removing 'ALERTS_FOR_STATE' samples.
@ -247,7 +252,7 @@ func TestAlertingRuleLabelsUpdate(t *testing.T) {
testutil.RequireEqual(t, result, filteredRes) testutil.RequireEqual(t, result, filteredRes)
} }
evalTime := baseTime.Add(time.Duration(len(results)) * time.Minute) evalTime := baseTime.Add(time.Duration(len(results)) * time.Minute)
res, err := rule.Eval(context.TODO(), 0, evalTime, EngineQueryFunc(testEngine, storage), nil, 0) res, err := rule.Eval(context.TODO(), 0, evalTime, EngineQueryFunc(ng, storage), nil, 0)
require.NoError(t, err) require.NoError(t, err)
require.Empty(t, res) require.Empty(t, res)
} }
@ -309,13 +314,15 @@ func TestAlertingRuleExternalLabelsInTemplate(t *testing.T) {
}, },
} }
ng := testEngine(t)
evalTime := time.Unix(0, 0) evalTime := time.Unix(0, 0)
result[0].T = timestamp.FromTime(evalTime) result[0].T = timestamp.FromTime(evalTime)
result[1].T = timestamp.FromTime(evalTime) result[1].T = timestamp.FromTime(evalTime)
var filteredRes promql.Vector // After removing 'ALERTS_FOR_STATE' samples. var filteredRes promql.Vector // After removing 'ALERTS_FOR_STATE' samples.
res, err := ruleWithoutExternalLabels.Eval( res, err := ruleWithoutExternalLabels.Eval(
context.TODO(), 0, evalTime, EngineQueryFunc(testEngine, storage), nil, 0, context.TODO(), 0, evalTime, EngineQueryFunc(ng, storage), nil, 0,
) )
require.NoError(t, err) require.NoError(t, err)
for _, smpl := range res { for _, smpl := range res {
@ -329,7 +336,7 @@ func TestAlertingRuleExternalLabelsInTemplate(t *testing.T) {
} }
res, err = ruleWithExternalLabels.Eval( res, err = ruleWithExternalLabels.Eval(
context.TODO(), 0, evalTime, EngineQueryFunc(testEngine, storage), nil, 0, context.TODO(), 0, evalTime, EngineQueryFunc(ng, storage), nil, 0,
) )
require.NoError(t, err) require.NoError(t, err)
for _, smpl := range res { for _, smpl := range res {
@ -406,9 +413,11 @@ func TestAlertingRuleExternalURLInTemplate(t *testing.T) {
result[0].T = timestamp.FromTime(evalTime) result[0].T = timestamp.FromTime(evalTime)
result[1].T = timestamp.FromTime(evalTime) result[1].T = timestamp.FromTime(evalTime)
ng := testEngine(t)
var filteredRes promql.Vector // After removing 'ALERTS_FOR_STATE' samples. var filteredRes promql.Vector // After removing 'ALERTS_FOR_STATE' samples.
res, err := ruleWithoutExternalURL.Eval( res, err := ruleWithoutExternalURL.Eval(
context.TODO(), 0, evalTime, EngineQueryFunc(testEngine, storage), nil, 0, context.TODO(), 0, evalTime, EngineQueryFunc(ng, storage), nil, 0,
) )
require.NoError(t, err) require.NoError(t, err)
for _, smpl := range res { for _, smpl := range res {
@ -422,7 +431,7 @@ func TestAlertingRuleExternalURLInTemplate(t *testing.T) {
} }
res, err = ruleWithExternalURL.Eval( res, err = ruleWithExternalURL.Eval(
context.TODO(), 0, evalTime, EngineQueryFunc(testEngine, storage), nil, 0, context.TODO(), 0, evalTime, EngineQueryFunc(ng, storage), nil, 0,
) )
require.NoError(t, err) require.NoError(t, err)
for _, smpl := range res { for _, smpl := range res {
@ -475,9 +484,11 @@ func TestAlertingRuleEmptyLabelFromTemplate(t *testing.T) {
evalTime := time.Unix(0, 0) evalTime := time.Unix(0, 0)
result[0].T = timestamp.FromTime(evalTime) result[0].T = timestamp.FromTime(evalTime)
ng := testEngine(t)
var filteredRes promql.Vector // After removing 'ALERTS_FOR_STATE' samples. var filteredRes promql.Vector // After removing 'ALERTS_FOR_STATE' samples.
res, err := rule.Eval( res, err := rule.Eval(
context.TODO(), 0, evalTime, EngineQueryFunc(testEngine, storage), nil, 0, context.TODO(), 0, evalTime, EngineQueryFunc(ng, storage), nil, 0,
) )
require.NoError(t, err) require.NoError(t, err)
for _, smpl := range res { for _, smpl := range res {
@ -520,6 +531,8 @@ instance: {{ $v.Labels.instance }}, value: {{ printf "%.0f" $v.Value }};
) )
evalTime := time.Unix(0, 0) evalTime := time.Unix(0, 0)
ng := testEngine(t)
startQueryCh := make(chan struct{}) startQueryCh := make(chan struct{})
getDoneCh := make(chan struct{}) getDoneCh := make(chan struct{})
slowQueryFunc := func(ctx context.Context, q string, ts time.Time) (promql.Vector, error) { slowQueryFunc := func(ctx context.Context, q string, ts time.Time) (promql.Vector, error) {
@ -533,7 +546,7 @@ instance: {{ $v.Labels.instance }}, value: {{ printf "%.0f" $v.Value }};
require.Fail(t, "unexpected blocking when template expanding.") require.Fail(t, "unexpected blocking when template expanding.")
} }
} }
return EngineQueryFunc(testEngine, storage)(ctx, q, ts) return EngineQueryFunc(ng, storage)(ctx, q, ts)
} }
go func() { go func() {
<-startQueryCh <-startQueryCh
@ -578,7 +591,7 @@ func TestAlertingRuleDuplicate(t *testing.T) {
Timeout: 10 * time.Second, Timeout: 10 * time.Second,
} }
engine := promql.NewEngine(opts) engine := promqltest.NewTestEngineWithOpts(t, opts)
ctx, cancelCtx := context.WithCancel(context.Background()) ctx, cancelCtx := context.WithCancel(context.Background())
defer cancelCtx() defer cancelCtx()
@ -642,13 +655,13 @@ func TestAlertingRuleLimit(t *testing.T) {
) )
evalTime := time.Unix(0, 0) evalTime := time.Unix(0, 0)
ng := testEngine(t)
for _, test := range tests { for _, test := range tests {
switch _, err := rule.Eval(context.TODO(), 0, evalTime, EngineQueryFunc(testEngine, storage), nil, test.limit); { switch _, err := rule.Eval(context.TODO(), 0, evalTime, EngineQueryFunc(ng, storage), nil, test.limit); {
case err != nil: case err != nil:
require.EqualError(t, err, test.err) require.EqualError(t, err, test.err)
case test.err != "": case test.err != "":
t.Errorf("Expected errror %s, got none", test.err) t.Errorf("Expected error %s, got none", test.err)
} }
} }
} }
@ -866,12 +879,13 @@ func TestKeepFiringFor(t *testing.T) {
}, },
} }
ng := testEngine(t)
baseTime := time.Unix(0, 0) baseTime := time.Unix(0, 0)
for i, result := range results { for i, result := range results {
t.Logf("case %d", i) t.Logf("case %d", i)
evalTime := baseTime.Add(time.Duration(i) * time.Minute) evalTime := baseTime.Add(time.Duration(i) * time.Minute)
result[0].T = timestamp.FromTime(evalTime) result[0].T = timestamp.FromTime(evalTime)
res, err := rule.Eval(context.TODO(), 0, evalTime, EngineQueryFunc(testEngine, storage), nil, 0) res, err := rule.Eval(context.TODO(), 0, evalTime, EngineQueryFunc(ng, storage), nil, 0)
require.NoError(t, err) require.NoError(t, err)
var filteredRes promql.Vector // After removing 'ALERTS_FOR_STATE' samples. var filteredRes promql.Vector // After removing 'ALERTS_FOR_STATE' samples.
@ -888,7 +902,7 @@ func TestKeepFiringFor(t *testing.T) {
testutil.RequireEqual(t, result, filteredRes) testutil.RequireEqual(t, result, filteredRes)
} }
evalTime := baseTime.Add(time.Duration(len(results)) * time.Minute) evalTime := baseTime.Add(time.Duration(len(results)) * time.Minute)
res, err := rule.Eval(context.TODO(), 0, evalTime, EngineQueryFunc(testEngine, storage), nil, 0) res, err := rule.Eval(context.TODO(), 0, evalTime, EngineQueryFunc(ng, storage), nil, 0)
require.NoError(t, err) require.NoError(t, err)
require.Empty(t, res) require.Empty(t, res)
} }
@ -923,9 +937,10 @@ func TestPendingAndKeepFiringFor(t *testing.T) {
F: 1, F: 1,
} }
ng := testEngine(t)
baseTime := time.Unix(0, 0) baseTime := time.Unix(0, 0)
result.T = timestamp.FromTime(baseTime) result.T = timestamp.FromTime(baseTime)
res, err := rule.Eval(context.TODO(), 0, baseTime, EngineQueryFunc(testEngine, storage), nil, 0) res, err := rule.Eval(context.TODO(), 0, baseTime, EngineQueryFunc(ng, storage), nil, 0)
require.NoError(t, err) require.NoError(t, err)
require.Len(t, res, 2) require.Len(t, res, 2)
@ -940,7 +955,7 @@ func TestPendingAndKeepFiringFor(t *testing.T) {
} }
evalTime := baseTime.Add(time.Minute) evalTime := baseTime.Add(time.Minute)
res, err = rule.Eval(context.TODO(), 0, evalTime, EngineQueryFunc(testEngine, storage), nil, 0) res, err = rule.Eval(context.TODO(), 0, evalTime, EngineQueryFunc(ng, storage), nil, 0)
require.NoError(t, err) require.NoError(t, err)
require.Empty(t, res) require.Empty(t, res)
} }

View file

@ -158,12 +158,13 @@ func TestAlertingRule(t *testing.T) {
}, },
} }
ng := testEngine(t)
for i, test := range tests { for i, test := range tests {
t.Logf("case %d", i) t.Logf("case %d", i)
evalTime := baseTime.Add(test.time) evalTime := baseTime.Add(test.time)
res, err := rule.Eval(context.TODO(), 0, evalTime, EngineQueryFunc(testEngine, storage), nil, 0) res, err := rule.Eval(context.TODO(), 0, evalTime, EngineQueryFunc(ng, storage), nil, 0)
require.NoError(t, err) require.NoError(t, err)
var filteredRes promql.Vector // After removing 'ALERTS_FOR_STATE' samples. var filteredRes promql.Vector // After removing 'ALERTS_FOR_STATE' samples.
@ -299,6 +300,7 @@ func TestForStateAddSamples(t *testing.T) {
}, },
} }
ng := testEngine(t)
var forState float64 var forState float64
for i, test := range tests { for i, test := range tests {
t.Logf("case %d", i) t.Logf("case %d", i)
@ -311,7 +313,7 @@ func TestForStateAddSamples(t *testing.T) {
forState = float64(value.StaleNaN) forState = float64(value.StaleNaN)
} }
res, err := rule.Eval(context.TODO(), queryOffset, evalTime, EngineQueryFunc(testEngine, storage), nil, 0) res, err := rule.Eval(context.TODO(), queryOffset, evalTime, EngineQueryFunc(ng, storage), nil, 0)
require.NoError(t, err) require.NoError(t, err)
var filteredRes promql.Vector // After removing 'ALERTS' samples. var filteredRes promql.Vector // After removing 'ALERTS' samples.
@ -366,8 +368,9 @@ func TestForStateRestore(t *testing.T) {
expr, err := parser.ParseExpr(`http_requests{group="canary", job="app-server"} < 100`) expr, err := parser.ParseExpr(`http_requests{group="canary", job="app-server"} < 100`)
require.NoError(t, err) require.NoError(t, err)
ng := testEngine(t)
opts := &ManagerOptions{ opts := &ManagerOptions{
QueryFunc: EngineQueryFunc(testEngine, storage), QueryFunc: EngineQueryFunc(ng, storage),
Appendable: storage, Appendable: storage,
Queryable: storage, Queryable: storage,
Context: context.Background(), Context: context.Background(),
@ -538,7 +541,7 @@ func TestStaleness(t *testing.T) {
MaxSamples: 10, MaxSamples: 10,
Timeout: 10 * time.Second, Timeout: 10 * time.Second,
} }
engine := promql.NewEngine(engineOpts) engine := promqltest.NewTestEngineWithOpts(t, engineOpts)
opts := &ManagerOptions{ opts := &ManagerOptions{
QueryFunc: EngineQueryFunc(engine, st), QueryFunc: EngineQueryFunc(engine, st),
Appendable: st, Appendable: st,
@ -772,7 +775,7 @@ func TestUpdate(t *testing.T) {
MaxSamples: 10, MaxSamples: 10,
Timeout: 10 * time.Second, Timeout: 10 * time.Second,
} }
engine := promql.NewEngine(opts) engine := promqltest.NewTestEngineWithOpts(t, opts)
ruleManager := NewManager(&ManagerOptions{ ruleManager := NewManager(&ManagerOptions{
Appendable: st, Appendable: st,
Queryable: st, Queryable: st,
@ -910,7 +913,7 @@ func TestNotify(t *testing.T) {
MaxSamples: 10, MaxSamples: 10,
Timeout: 10 * time.Second, Timeout: 10 * time.Second,
} }
engine := promql.NewEngine(engineOpts) engine := promqltest.NewTestEngineWithOpts(t, engineOpts)
var lastNotified []*Alert var lastNotified []*Alert
notifyFunc := func(ctx context.Context, expr string, alerts ...*Alert) { notifyFunc := func(ctx context.Context, expr string, alerts ...*Alert) {
lastNotified = alerts lastNotified = alerts
@ -985,7 +988,7 @@ func TestMetricsUpdate(t *testing.T) {
MaxSamples: 10, MaxSamples: 10,
Timeout: 10 * time.Second, Timeout: 10 * time.Second,
} }
engine := promql.NewEngine(opts) engine := promqltest.NewTestEngineWithOpts(t, opts)
ruleManager := NewManager(&ManagerOptions{ ruleManager := NewManager(&ManagerOptions{
Appendable: storage, Appendable: storage,
Queryable: storage, Queryable: storage,
@ -1059,7 +1062,7 @@ func TestGroupStalenessOnRemoval(t *testing.T) {
MaxSamples: 10, MaxSamples: 10,
Timeout: 10 * time.Second, Timeout: 10 * time.Second,
} }
engine := promql.NewEngine(opts) engine := promqltest.NewTestEngineWithOpts(t, opts)
ruleManager := NewManager(&ManagerOptions{ ruleManager := NewManager(&ManagerOptions{
Appendable: storage, Appendable: storage,
Queryable: storage, Queryable: storage,
@ -1136,7 +1139,7 @@ func TestMetricsStalenessOnManagerShutdown(t *testing.T) {
MaxSamples: 10, MaxSamples: 10,
Timeout: 10 * time.Second, Timeout: 10 * time.Second,
} }
engine := promql.NewEngine(opts) engine := promqltest.NewTestEngineWithOpts(t, opts)
ruleManager := NewManager(&ManagerOptions{ ruleManager := NewManager(&ManagerOptions{
Appendable: storage, Appendable: storage,
Queryable: storage, Queryable: storage,
@ -1238,7 +1241,7 @@ func TestRuleHealthUpdates(t *testing.T) {
MaxSamples: 10, MaxSamples: 10,
Timeout: 10 * time.Second, Timeout: 10 * time.Second,
} }
engine := promql.NewEngine(engineOpts) engine := promqltest.NewTestEngineWithOpts(t, engineOpts)
opts := &ManagerOptions{ opts := &ManagerOptions{
QueryFunc: EngineQueryFunc(engine, st), QueryFunc: EngineQueryFunc(engine, st),
Appendable: st, Appendable: st,
@ -1335,9 +1338,10 @@ func TestRuleGroupEvalIterationFunc(t *testing.T) {
}, },
} }
ng := testEngine(t)
testFunc := func(tst testInput) { testFunc := func(tst testInput) {
opts := &ManagerOptions{ opts := &ManagerOptions{
QueryFunc: EngineQueryFunc(testEngine, storage), QueryFunc: EngineQueryFunc(ng, storage),
Appendable: storage, Appendable: storage,
Queryable: storage, Queryable: storage,
Context: context.Background(), Context: context.Background(),
@ -1421,8 +1425,9 @@ func TestNativeHistogramsInRecordingRules(t *testing.T) {
} }
require.NoError(t, app.Commit()) require.NoError(t, app.Commit())
ng := testEngine(t)
opts := &ManagerOptions{ opts := &ManagerOptions{
QueryFunc: EngineQueryFunc(testEngine, storage), QueryFunc: EngineQueryFunc(ng, storage),
Appendable: storage, Appendable: storage,
Queryable: storage, Queryable: storage,
Context: context.Background(), Context: context.Background(),

View file

@ -123,10 +123,11 @@ func TestRuleEval(t *testing.T) {
storage := setUpRuleEvalTest(t) storage := setUpRuleEvalTest(t)
t.Cleanup(func() { storage.Close() }) t.Cleanup(func() { storage.Close() })
ng := testEngine(t)
for _, scenario := range ruleEvalTestScenarios { for _, scenario := range ruleEvalTestScenarios {
t.Run(scenario.name, func(t *testing.T) { t.Run(scenario.name, func(t *testing.T) {
rule := NewRecordingRule("test_rule", scenario.expr, scenario.ruleLabels) rule := NewRecordingRule("test_rule", scenario.expr, scenario.ruleLabels)
result, err := rule.Eval(context.TODO(), 0, ruleEvaluationTime, EngineQueryFunc(testEngine, storage), nil, 0) result, err := rule.Eval(context.TODO(), 0, ruleEvaluationTime, EngineQueryFunc(ng, storage), nil, 0)
require.NoError(t, err) require.NoError(t, err)
testutil.RequireEqual(t, scenario.expected, result) testutil.RequireEqual(t, scenario.expected, result)
}) })
@ -137,6 +138,7 @@ func BenchmarkRuleEval(b *testing.B) {
storage := setUpRuleEvalTest(b) storage := setUpRuleEvalTest(b)
b.Cleanup(func() { storage.Close() }) b.Cleanup(func() { storage.Close() })
ng := testEngine(b)
for _, scenario := range ruleEvalTestScenarios { for _, scenario := range ruleEvalTestScenarios {
b.Run(scenario.name, func(b *testing.B) { b.Run(scenario.name, func(b *testing.B) {
rule := NewRecordingRule("test_rule", scenario.expr, scenario.ruleLabels) rule := NewRecordingRule("test_rule", scenario.expr, scenario.ruleLabels)
@ -144,7 +146,7 @@ func BenchmarkRuleEval(b *testing.B) {
b.ResetTimer() b.ResetTimer()
for i := 0; i < b.N; i++ { for i := 0; i < b.N; i++ {
_, err := rule.Eval(context.TODO(), 0, ruleEvaluationTime, EngineQueryFunc(testEngine, storage), nil, 0) _, err := rule.Eval(context.TODO(), 0, ruleEvaluationTime, EngineQueryFunc(ng, storage), nil, 0)
if err != nil { if err != nil {
require.NoError(b, err) require.NoError(b, err)
} }
@ -165,7 +167,7 @@ func TestRuleEvalDuplicate(t *testing.T) {
Timeout: 10 * time.Second, Timeout: 10 * time.Second,
} }
engine := promql.NewEngine(opts) engine := promqltest.NewTestEngineWithOpts(t, opts)
ctx, cancelCtx := context.WithCancel(context.Background()) ctx, cancelCtx := context.WithCancel(context.Background())
defer cancelCtx() defer cancelCtx()
@ -212,10 +214,11 @@ func TestRecordingRuleLimit(t *testing.T) {
labels.FromStrings("test", "test"), labels.FromStrings("test", "test"),
) )
ng := testEngine(t)
evalTime := time.Unix(0, 0) evalTime := time.Unix(0, 0)
for _, test := range tests { for _, test := range tests {
switch _, err := rule.Eval(context.TODO(), 0, evalTime, EngineQueryFunc(testEngine, storage), nil, test.limit); { switch _, err := rule.Eval(context.TODO(), 0, evalTime, EngineQueryFunc(ng, storage), nil, test.limit); {
case err != nil: case err != nil:
require.EqualError(t, err, test.err) require.EqualError(t, err, test.err)
case test.err != "": case test.err != "":

View file

@ -23,7 +23,7 @@ import (
dto "github.com/prometheus/client_model/go" dto "github.com/prometheus/client_model/go"
) )
// Write a MetricFamily into a protobuf. // MetricFamilyToProtobuf writes a MetricFamily into a protobuf.
// This function is intended for testing scraping by providing protobuf serialized input. // This function is intended for testing scraping by providing protobuf serialized input.
func MetricFamilyToProtobuf(metricFamily *dto.MetricFamily) ([]byte, error) { func MetricFamilyToProtobuf(metricFamily *dto.MetricFamily) ([]byte, error) {
buffer := &bytes.Buffer{} buffer := &bytes.Buffer{}
@ -34,7 +34,7 @@ func MetricFamilyToProtobuf(metricFamily *dto.MetricFamily) ([]byte, error) {
return buffer.Bytes(), nil return buffer.Bytes(), nil
} }
// Append a MetricFamily protobuf representation to a buffer. // AddMetricFamilyToProtobuf appends a MetricFamily protobuf representation to a buffer.
// This function is intended for testing scraping by providing protobuf serialized input. // This function is intended for testing scraping by providing protobuf serialized input.
func AddMetricFamilyToProtobuf(buffer *bytes.Buffer, metricFamily *dto.MetricFamily) error { func AddMetricFamilyToProtobuf(buffer *bytes.Buffer, metricFamily *dto.MetricFamily) error {
protoBuf, err := proto.Marshal(metricFamily) protoBuf, err := proto.Marshal(metricFamily)

View file

@ -93,6 +93,8 @@ type Options struct {
skipOffsetting bool skipOffsetting bool
} }
const DefaultNameEscapingScheme = model.ValueEncodingEscaping
// Manager maintains a set of scrape pools and manages start/stop cycles // Manager maintains a set of scrape pools and manages start/stop cycles
// when receiving new target groups from the discovery manager. // when receiving new target groups from the discovery manager.
type Manager struct { type Manager struct {
@ -140,7 +142,7 @@ func (m *Manager) UnregisterMetrics() {
func (m *Manager) reloader() { func (m *Manager) reloader() {
reloadIntervalDuration := m.opts.DiscoveryReloadInterval reloadIntervalDuration := m.opts.DiscoveryReloadInterval
if reloadIntervalDuration < model.Duration(5*time.Second) { if reloadIntervalDuration == model.Duration(0) {
reloadIntervalDuration = model.Duration(5 * time.Second) reloadIntervalDuration = model.Duration(5 * time.Second)
} }

View file

@ -20,6 +20,7 @@ import (
"net/http/httptest" "net/http/httptest"
"net/url" "net/url"
"os" "os"
"sort"
"strconv" "strconv"
"sync" "sync"
"testing" "testing"
@ -36,6 +37,7 @@ import (
"github.com/prometheus/prometheus/config" "github.com/prometheus/prometheus/config"
"github.com/prometheus/prometheus/discovery" "github.com/prometheus/prometheus/discovery"
_ "github.com/prometheus/prometheus/discovery/file"
"github.com/prometheus/prometheus/discovery/targetgroup" "github.com/prometheus/prometheus/discovery/targetgroup"
"github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/model/relabel" "github.com/prometheus/prometheus/model/relabel"
@ -722,8 +724,6 @@ func TestManagerCTZeroIngestion(t *testing.T) {
name string name string
counterSample *dto.Counter counterSample *dto.Counter
enableCTZeroIngestion bool enableCTZeroIngestion bool
expectedValues []float64
}{ }{
{ {
name: "disabled with CT on counter", name: "disabled with CT on counter",
@ -732,7 +732,6 @@ func TestManagerCTZeroIngestion(t *testing.T) {
// Timestamp does not matter as long as it exists in this test. // Timestamp does not matter as long as it exists in this test.
CreatedTimestamp: timestamppb.Now(), CreatedTimestamp: timestamppb.Now(),
}, },
expectedValues: []float64{1.0},
}, },
{ {
name: "enabled with CT on counter", name: "enabled with CT on counter",
@ -742,7 +741,6 @@ func TestManagerCTZeroIngestion(t *testing.T) {
CreatedTimestamp: timestamppb.Now(), CreatedTimestamp: timestamppb.Now(),
}, },
enableCTZeroIngestion: true, enableCTZeroIngestion: true,
expectedValues: []float64{0.0, 1.0},
}, },
{ {
name: "enabled without CT on counter", name: "enabled without CT on counter",
@ -750,7 +748,6 @@ func TestManagerCTZeroIngestion(t *testing.T) {
Value: proto.Float64(1.0), Value: proto.Float64(1.0),
}, },
enableCTZeroIngestion: true, enableCTZeroIngestion: true,
expectedValues: []float64{1.0},
}, },
} { } {
t.Run(tc.name, func(t *testing.T) { t.Run(tc.name, func(t *testing.T) {
@ -817,44 +814,42 @@ func TestManagerCTZeroIngestion(t *testing.T) {
}) })
scrapeManager.reload() scrapeManager.reload()
var got []float64
// Wait for one scrape. // Wait for one scrape.
ctx, cancel := context.WithTimeout(context.Background(), 1*time.Minute) ctx, cancel := context.WithTimeout(context.Background(), 1*time.Minute)
defer cancel() defer cancel()
require.NoError(t, runutil.Retry(100*time.Millisecond, ctx.Done(), func() error { require.NoError(t, runutil.Retry(100*time.Millisecond, ctx.Done(), func() error {
if countFloatSamples(app, mName) != len(tc.expectedValues) {
return fmt.Errorf("expected %v samples", tc.expectedValues)
}
return nil
}), "after 1 minute")
scrapeManager.Stop()
require.Equal(t, tc.expectedValues, getResultFloats(app, mName))
})
}
}
func countFloatSamples(a *collectResultAppender, expectedMetricName string) (count int) {
a.mtx.Lock()
defer a.mtx.Unlock()
for _, f := range a.resultFloats {
if f.metric.Get(model.MetricNameLabel) == expectedMetricName {
count++
}
}
return count
}
func getResultFloats(app *collectResultAppender, expectedMetricName string) (result []float64) {
app.mtx.Lock() app.mtx.Lock()
defer app.mtx.Unlock() defer app.mtx.Unlock()
// Check if scrape happened and grab the relevant samples, they have to be there - or it's a bug
// and it's not worth waiting.
for _, f := range app.resultFloats { for _, f := range app.resultFloats {
if f.metric.Get(model.MetricNameLabel) == expectedMetricName { if f.metric.Get(model.MetricNameLabel) == mName {
result = append(result, f.f) got = append(got, f.f)
} }
} }
return result if len(app.resultFloats) > 0 {
return nil
}
return fmt.Errorf("expected some samples, got none")
}), "after 1 minute")
scrapeManager.Stop()
// Check for zero samples, assuming we only injected always one sample.
// Did it contain CT to inject? If yes, was CT zero enabled?
if tc.counterSample.CreatedTimestamp.IsValid() && tc.enableCTZeroIngestion {
require.Len(t, got, 2)
require.Equal(t, 0.0, got[0])
require.Equal(t, tc.counterSample.GetValue(), got[1])
return
}
// Expect only one, valid sample.
require.Len(t, got, 1)
require.Equal(t, tc.counterSample.GetValue(), got[0])
})
}
} }
func TestUnregisterMetrics(t *testing.T) { func TestUnregisterMetrics(t *testing.T) {
@ -869,3 +864,414 @@ func TestUnregisterMetrics(t *testing.T) {
manager.UnregisterMetrics() manager.UnregisterMetrics()
} }
} }
func applyConfig(
t *testing.T,
config string,
scrapeManager *Manager,
discoveryManager *discovery.Manager,
) {
t.Helper()
cfg := loadConfiguration(t, config)
require.NoError(t, scrapeManager.ApplyConfig(cfg))
c := make(map[string]discovery.Configs)
scfgs, err := cfg.GetScrapeConfigs()
require.NoError(t, err)
for _, v := range scfgs {
c[v.JobName] = v.ServiceDiscoveryConfigs
}
require.NoError(t, discoveryManager.ApplyConfig(c))
}
func runManagers(t *testing.T, ctx context.Context) (*discovery.Manager, *Manager) {
t.Helper()
reg := prometheus.NewRegistry()
sdMetrics, err := discovery.RegisterSDMetrics(reg, discovery.NewRefreshMetrics(reg))
require.NoError(t, err)
discoveryManager := discovery.NewManager(
ctx,
log.NewNopLogger(),
reg,
sdMetrics,
discovery.Updatert(100*time.Millisecond),
)
scrapeManager, err := NewManager(
&Options{DiscoveryReloadInterval: model.Duration(100 * time.Millisecond)},
nil,
nopAppendable{},
prometheus.NewRegistry(),
)
require.NoError(t, err)
go discoveryManager.Run()
go scrapeManager.Run(discoveryManager.SyncCh())
return discoveryManager, scrapeManager
}
func writeIntoFile(t *testing.T, content, filePattern string) *os.File {
t.Helper()
file, err := os.CreateTemp("", filePattern)
require.NoError(t, err)
_, err = file.WriteString(content)
require.NoError(t, err)
return file
}
func requireTargets(
t *testing.T,
scrapeManager *Manager,
jobName string,
waitToAppear bool,
expectedTargets []string,
) {
t.Helper()
require.Eventually(t, func() bool {
targets, ok := scrapeManager.TargetsActive()[jobName]
if !ok {
if waitToAppear {
return false
}
t.Fatalf("job %s shouldn't be dropped", jobName)
}
if expectedTargets == nil {
return targets == nil
}
if len(targets) != len(expectedTargets) {
return false
}
sTargets := []string{}
for _, t := range targets {
sTargets = append(sTargets, t.String())
}
sort.Strings(expectedTargets)
sort.Strings(sTargets)
for i, t := range sTargets {
if t != expectedTargets[i] {
return false
}
}
return true
}, 1*time.Second, 100*time.Millisecond)
}
// TestTargetDisappearsAfterProviderRemoved makes sure that when a provider is dropped, (only) its targets are dropped.
func TestTargetDisappearsAfterProviderRemoved(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
myJob := "my-job"
myJobSDTargetURL := "my:9876"
myJobStaticTargetURL := "my:5432"
sdFileContent := fmt.Sprintf(`[{"targets": ["%s"]}]`, myJobSDTargetURL)
sDFile := writeIntoFile(t, sdFileContent, "*targets.json")
baseConfig := `
scrape_configs:
- job_name: %s
static_configs:
- targets: ['%s']
file_sd_configs:
- files: ['%s']
`
discoveryManager, scrapeManager := runManagers(t, ctx)
defer scrapeManager.Stop()
applyConfig(
t,
fmt.Sprintf(
baseConfig,
myJob,
myJobStaticTargetURL,
sDFile.Name(),
),
scrapeManager,
discoveryManager,
)
// Make sure the jobs targets are taken into account
requireTargets(
t,
scrapeManager,
myJob,
true,
[]string{
fmt.Sprintf("http://%s/metrics", myJobSDTargetURL),
fmt.Sprintf("http://%s/metrics", myJobStaticTargetURL),
},
)
// Apply a new config where a provider is removed
baseConfig = `
scrape_configs:
- job_name: %s
static_configs:
- targets: ['%s']
`
applyConfig(
t,
fmt.Sprintf(
baseConfig,
myJob,
myJobStaticTargetURL,
),
scrapeManager,
discoveryManager,
)
// Make sure the corresponding target was dropped
requireTargets(
t,
scrapeManager,
myJob,
false,
[]string{
fmt.Sprintf("http://%s/metrics", myJobStaticTargetURL),
},
)
// Apply a new config with no providers
baseConfig = `
scrape_configs:
- job_name: %s
`
applyConfig(
t,
fmt.Sprintf(
baseConfig,
myJob,
),
scrapeManager,
discoveryManager,
)
// Make sure the corresponding target was dropped
requireTargets(
t,
scrapeManager,
myJob,
false,
nil,
)
}
// TestOnlyProviderStaleTargetsAreDropped makes sure that when a job has only one provider with multiple targets
// and when the provider can no longer discover some of those targets, only those stale targets are dropped.
func TestOnlyProviderStaleTargetsAreDropped(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
jobName := "my-job"
jobTarget1URL := "foo:9876"
jobTarget2URL := "foo:5432"
sdFile1Content := fmt.Sprintf(`[{"targets": ["%s"]}]`, jobTarget1URL)
sdFile2Content := fmt.Sprintf(`[{"targets": ["%s"]}]`, jobTarget2URL)
sDFile1 := writeIntoFile(t, sdFile1Content, "*targets.json")
sDFile2 := writeIntoFile(t, sdFile2Content, "*targets.json")
baseConfig := `
scrape_configs:
- job_name: %s
file_sd_configs:
- files: ['%s', '%s']
`
discoveryManager, scrapeManager := runManagers(t, ctx)
defer scrapeManager.Stop()
applyConfig(
t,
fmt.Sprintf(baseConfig, jobName, sDFile1.Name(), sDFile2.Name()),
scrapeManager,
discoveryManager,
)
// Make sure the job's targets are taken into account
requireTargets(
t,
scrapeManager,
jobName,
true,
[]string{
fmt.Sprintf("http://%s/metrics", jobTarget1URL),
fmt.Sprintf("http://%s/metrics", jobTarget2URL),
},
)
// Apply the same config for the same job but with a non existing file to make the provider
// unable to discover some targets
applyConfig(
t,
fmt.Sprintf(baseConfig, jobName, sDFile1.Name(), "/idontexistdoi.json"),
scrapeManager,
discoveryManager,
)
// The old target should get dropped
requireTargets(
t,
scrapeManager,
jobName,
false,
[]string{fmt.Sprintf("http://%s/metrics", jobTarget1URL)},
)
}
// TestProviderStaleTargetsAreDropped makes sure that when a job has only one provider and when that provider
// should no longer discover targets, the targets of that provider are dropped.
// See: https://github.com/prometheus/prometheus/issues/12858
func TestProviderStaleTargetsAreDropped(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
jobName := "my-job"
jobTargetURL := "foo:9876"
sdFileContent := fmt.Sprintf(`[{"targets": ["%s"]}]`, jobTargetURL)
sDFile := writeIntoFile(t, sdFileContent, "*targets.json")
baseConfig := `
scrape_configs:
- job_name: %s
file_sd_configs:
- files: ['%s']
`
discoveryManager, scrapeManager := runManagers(t, ctx)
defer scrapeManager.Stop()
applyConfig(
t,
fmt.Sprintf(baseConfig, jobName, sDFile.Name()),
scrapeManager,
discoveryManager,
)
// Make sure the job's targets are taken into account
requireTargets(
t,
scrapeManager,
jobName,
true,
[]string{
fmt.Sprintf("http://%s/metrics", jobTargetURL),
},
)
// Apply the same config for the same job but with a non existing file to make the provider
// unable to discover some targets
applyConfig(
t,
fmt.Sprintf(baseConfig, jobName, "/idontexistdoi.json"),
scrapeManager,
discoveryManager,
)
// The old target should get dropped
requireTargets(
t,
scrapeManager,
jobName,
false,
nil,
)
}
// TestOnlyStaleTargetsAreDropped makes sure that when a job has multiple providers, when one of them should no
// longer discover targets, only the stale targets of that provier are dropped.
func TestOnlyStaleTargetsAreDropped(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
myJob := "my-job"
myJobSDTargetURL := "my:9876"
myJobStaticTargetURL := "my:5432"
otherJob := "other-job"
otherJobTargetURL := "other:1234"
sdFileContent := fmt.Sprintf(`[{"targets": ["%s"]}]`, myJobSDTargetURL)
sDFile := writeIntoFile(t, sdFileContent, "*targets.json")
baseConfig := `
scrape_configs:
- job_name: %s
static_configs:
- targets: ['%s']
file_sd_configs:
- files: ['%s']
- job_name: %s
static_configs:
- targets: ['%s']
`
discoveryManager, scrapeManager := runManagers(t, ctx)
defer scrapeManager.Stop()
// Apply the initial config with an existing file
applyConfig(
t,
fmt.Sprintf(
baseConfig,
myJob,
myJobStaticTargetURL,
sDFile.Name(),
otherJob,
otherJobTargetURL,
),
scrapeManager,
discoveryManager,
)
// Make sure the jobs targets are taken into account
requireTargets(
t,
scrapeManager,
myJob,
true,
[]string{
fmt.Sprintf("http://%s/metrics", myJobSDTargetURL),
fmt.Sprintf("http://%s/metrics", myJobStaticTargetURL),
},
)
requireTargets(
t,
scrapeManager,
otherJob,
true,
[]string{fmt.Sprintf("http://%s/metrics", otherJobTargetURL)},
)
// Apply the same config with a non existing file for myJob
applyConfig(
t,
fmt.Sprintf(
baseConfig,
myJob,
myJobStaticTargetURL,
"/idontexistdoi.json",
otherJob,
otherJobTargetURL,
),
scrapeManager,
discoveryManager,
)
// Only the SD target should get dropped for myJob
requireTargets(
t,
scrapeManager,
myJob,
false,
[]string{
fmt.Sprintf("http://%s/metrics", myJobStaticTargetURL),
},
)
// The otherJob should keep its target
requireTargets(
t,
scrapeManager,
otherJob,
false,
[]string{fmt.Sprintf("http://%s/metrics", otherJobTargetURL)},
)
}

View file

@ -111,6 +111,7 @@ type scrapeLoopOptions struct {
interval time.Duration interval time.Duration
timeout time.Duration timeout time.Duration
scrapeClassicHistograms bool scrapeClassicHistograms bool
validationScheme model.ValidationScheme
mrc []*relabel.Config mrc []*relabel.Config
cache *scrapeCache cache *scrapeCache
@ -186,6 +187,7 @@ func newScrapePool(cfg *config.ScrapeConfig, app storage.Appendable, offsetSeed
options.PassMetadataInContext, options.PassMetadataInContext,
metrics, metrics,
options.skipOffsetting, options.skipOffsetting,
opts.validationScheme,
) )
} }
sp.metrics.targetScrapePoolTargetLimit.WithLabelValues(sp.config.JobName).Set(float64(sp.config.TargetLimit)) sp.metrics.targetScrapePoolTargetLimit.WithLabelValues(sp.config.JobName).Set(float64(sp.config.TargetLimit))
@ -303,6 +305,11 @@ func (sp *scrapePool) restartLoops(reuseCache bool) {
mrc = sp.config.MetricRelabelConfigs mrc = sp.config.MetricRelabelConfigs
) )
validationScheme := model.LegacyValidation
if sp.config.MetricNameValidationScheme == config.UTF8ValidationConfig {
validationScheme = model.UTF8Validation
}
sp.targetMtx.Lock() sp.targetMtx.Lock()
forcedErr := sp.refreshTargetLimitErr() forcedErr := sp.refreshTargetLimitErr()
@ -323,7 +330,7 @@ func (sp *scrapePool) restartLoops(reuseCache bool) {
client: sp.client, client: sp.client,
timeout: timeout, timeout: timeout,
bodySizeLimit: bodySizeLimit, bodySizeLimit: bodySizeLimit,
acceptHeader: acceptHeader(sp.config.ScrapeProtocols), acceptHeader: acceptHeader(sp.config.ScrapeProtocols, validationScheme),
acceptEncodingHeader: acceptEncodingHeader(enableCompression), acceptEncodingHeader: acceptEncodingHeader(enableCompression),
} }
newLoop = sp.newLoop(scrapeLoopOptions{ newLoop = sp.newLoop(scrapeLoopOptions{
@ -341,6 +348,7 @@ func (sp *scrapePool) restartLoops(reuseCache bool) {
cache: cache, cache: cache,
interval: interval, interval: interval,
timeout: timeout, timeout: timeout,
validationScheme: validationScheme,
}) })
) )
if err != nil { if err != nil {
@ -452,6 +460,11 @@ func (sp *scrapePool) sync(targets []*Target) {
scrapeClassicHistograms = sp.config.ScrapeClassicHistograms scrapeClassicHistograms = sp.config.ScrapeClassicHistograms
) )
validationScheme := model.LegacyValidation
if sp.config.MetricNameValidationScheme == config.UTF8ValidationConfig {
validationScheme = model.UTF8Validation
}
sp.targetMtx.Lock() sp.targetMtx.Lock()
for _, t := range targets { for _, t := range targets {
hash := t.hash() hash := t.hash()
@ -467,7 +480,7 @@ func (sp *scrapePool) sync(targets []*Target) {
client: sp.client, client: sp.client,
timeout: timeout, timeout: timeout,
bodySizeLimit: bodySizeLimit, bodySizeLimit: bodySizeLimit,
acceptHeader: acceptHeader(sp.config.ScrapeProtocols), acceptHeader: acceptHeader(sp.config.ScrapeProtocols, validationScheme),
acceptEncodingHeader: acceptEncodingHeader(enableCompression), acceptEncodingHeader: acceptEncodingHeader(enableCompression),
metrics: sp.metrics, metrics: sp.metrics,
} }
@ -714,11 +727,16 @@ var errBodySizeLimit = errors.New("body size limit exceeded")
// acceptHeader transforms preference from the options into specific header values as // acceptHeader transforms preference from the options into specific header values as
// https://www.rfc-editor.org/rfc/rfc9110.html#name-accept defines. // https://www.rfc-editor.org/rfc/rfc9110.html#name-accept defines.
// No validation is here, we expect scrape protocols to be validated already. // No validation is here, we expect scrape protocols to be validated already.
func acceptHeader(sps []config.ScrapeProtocol) string { func acceptHeader(sps []config.ScrapeProtocol, scheme model.ValidationScheme) string {
var vals []string var vals []string
weight := len(config.ScrapeProtocolsHeaders) + 1 weight := len(config.ScrapeProtocolsHeaders) + 1
for _, sp := range sps { for _, sp := range sps {
vals = append(vals, fmt.Sprintf("%s;q=0.%d", config.ScrapeProtocolsHeaders[sp], weight)) val := config.ScrapeProtocolsHeaders[sp]
if scheme == model.UTF8Validation {
val += ";" + config.UTF8NamesHeader
}
val += fmt.Sprintf(";q=0.%d", weight)
vals = append(vals, val)
weight-- weight--
} }
// Default match anything. // Default match anything.
@ -838,6 +856,7 @@ type scrapeLoop struct {
interval time.Duration interval time.Duration
timeout time.Duration timeout time.Duration
scrapeClassicHistograms bool scrapeClassicHistograms bool
validationScheme model.ValidationScheme
// Feature flagged options. // Feature flagged options.
enableNativeHistogramIngestion bool enableNativeHistogramIngestion bool
@ -1145,6 +1164,7 @@ func newScrapeLoop(ctx context.Context,
passMetadataInContext bool, passMetadataInContext bool,
metrics *scrapeMetrics, metrics *scrapeMetrics,
skipOffsetting bool, skipOffsetting bool,
validationScheme model.ValidationScheme,
) *scrapeLoop { ) *scrapeLoop {
if l == nil { if l == nil {
l = log.NewNopLogger() l = log.NewNopLogger()
@ -1196,6 +1216,7 @@ func newScrapeLoop(ctx context.Context,
appendMetadataToWAL: appendMetadataToWAL, appendMetadataToWAL: appendMetadataToWAL,
metrics: metrics, metrics: metrics,
skipOffsetting: skipOffsetting, skipOffsetting: skipOffsetting,
validationScheme: validationScheme,
} }
sl.ctx, sl.cancel = context.WithCancel(ctx) sl.ctx, sl.cancel = context.WithCancel(ctx)
@ -1616,7 +1637,7 @@ loop:
err = errNameLabelMandatory err = errNameLabelMandatory
break loop break loop
} }
if !lset.IsValid() { if !lset.IsValid(sl.validationScheme) {
err = fmt.Errorf("invalid metric name or label names: %s", lset.String()) err = fmt.Errorf("invalid metric name or label names: %s", lset.String())
break loop break loop
} }
@ -1631,10 +1652,11 @@ loop:
updateMetadata(lset, true) updateMetadata(lset, true)
} }
if seriesAlreadyScraped { if seriesAlreadyScraped && parsedTimestamp == nil {
err = storage.ErrDuplicateSampleForTimestamp err = storage.ErrDuplicateSampleForTimestamp
} else { } else {
if ctMs := p.CreatedTimestamp(); sl.enableCTZeroIngestion && ctMs != nil { if sl.enableCTZeroIngestion {
if ctMs := p.CreatedTimestamp(); ctMs != nil {
ref, err = app.AppendCTZeroSample(ref, lset, t, *ctMs) ref, err = app.AppendCTZeroSample(ref, lset, t, *ctMs)
if err != nil && !errors.Is(err, storage.ErrOutOfOrderCT) { // OOO is a common case, ignoring completely for now. if err != nil && !errors.Is(err, storage.ErrOutOfOrderCT) { // OOO is a common case, ignoring completely for now.
// CT is an experimental feature. For now, we don't need to fail the // CT is an experimental feature. For now, we don't need to fail the
@ -1642,6 +1664,7 @@ loop:
level.Debug(sl.l).Log("msg", "Error when appending CT in scrape loop", "series", string(met), "ct", *ctMs, "t", t, "err", err) level.Debug(sl.l).Log("msg", "Error when appending CT in scrape loop", "series", string(met), "ct", *ctMs, "t", t, "err", err)
} }
} }
}
if isHistogram && sl.enableNativeHistogramIngestion { if isHistogram && sl.enableNativeHistogramIngestion {
if h != nil { if h != nil {

View file

@ -35,6 +35,7 @@ import (
"github.com/gogo/protobuf/proto" "github.com/gogo/protobuf/proto"
"github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
prom_testutil "github.com/prometheus/client_golang/prometheus/testutil"
dto "github.com/prometheus/client_model/go" dto "github.com/prometheus/client_model/go"
config_util "github.com/prometheus/common/config" config_util "github.com/prometheus/common/config"
"github.com/prometheus/common/model" "github.com/prometheus/common/model"
@ -683,6 +684,7 @@ func newBasicScrapeLoop(t testing.TB, ctx context.Context, scraper scraper, app
false, false,
newTestScrapeMetrics(t), newTestScrapeMetrics(t),
false, false,
model.LegacyValidation,
) )
} }
@ -825,6 +827,7 @@ func TestScrapeLoopRun(t *testing.T) {
false, false,
scrapeMetrics, scrapeMetrics,
false, false,
model.LegacyValidation,
) )
// The loop must terminate during the initial offset if the context // The loop must terminate during the initial offset if the context
@ -969,6 +972,7 @@ func TestScrapeLoopMetadata(t *testing.T) {
false, false,
scrapeMetrics, scrapeMetrics,
false, false,
model.LegacyValidation,
) )
defer cancel() defer cancel()
@ -1064,6 +1068,40 @@ func TestScrapeLoopFailWithInvalidLabelsAfterRelabel(t *testing.T) {
require.Equal(t, 0, seriesAdded) require.Equal(t, 0, seriesAdded)
} }
func TestScrapeLoopFailLegacyUnderUTF8(t *testing.T) {
// Test that scrapes fail when default validation is utf8 but scrape config is
// legacy.
model.NameValidationScheme = model.UTF8Validation
defer func() {
model.NameValidationScheme = model.LegacyValidation
}()
s := teststorage.New(t)
defer s.Close()
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
sl := newBasicScrapeLoop(t, ctx, &testScraper{}, s.Appender, 0)
sl.validationScheme = model.LegacyValidation
slApp := sl.appender(ctx)
total, added, seriesAdded, err := sl.append(slApp, []byte("{\"test.metric\"} 1\n"), "", time.Time{})
require.ErrorContains(t, err, "invalid metric name or label names")
require.NoError(t, slApp.Rollback())
require.Equal(t, 1, total)
require.Equal(t, 0, added)
require.Equal(t, 0, seriesAdded)
// When scrapeloop has validation set to UTF-8, the metric is allowed.
sl.validationScheme = model.UTF8Validation
slApp = sl.appender(ctx)
total, added, seriesAdded, err = sl.append(slApp, []byte("{\"test.metric\"} 1\n"), "", time.Time{})
require.NoError(t, err)
require.Equal(t, 1, total)
require.Equal(t, 1, added)
require.Equal(t, 1, seriesAdded)
}
func makeTestMetrics(n int) []byte { func makeTestMetrics(n int) []byte {
// Construct a metrics string to parse // Construct a metrics string to parse
sb := bytes.Buffer{} sb := bytes.Buffer{}
@ -2339,11 +2377,15 @@ func TestTargetScraperScrapeOK(t *testing.T) {
) )
var protobufParsing bool var protobufParsing bool
var allowUTF8 bool
server := httptest.NewServer( server := httptest.NewServer(
http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if protobufParsing {
accept := r.Header.Get("Accept") accept := r.Header.Get("Accept")
if allowUTF8 {
require.Truef(t, strings.Contains(accept, "escaping=allow-utf-8"), "Expected Accept header to allow utf8, got %q", accept)
}
if protobufParsing {
require.True(t, strings.HasPrefix(accept, "application/vnd.google.protobuf;"), require.True(t, strings.HasPrefix(accept, "application/vnd.google.protobuf;"),
"Expected Accept header to prefer application/vnd.google.protobuf.") "Expected Accept header to prefer application/vnd.google.protobuf.")
} }
@ -2351,7 +2393,11 @@ func TestTargetScraperScrapeOK(t *testing.T) {
timeout := r.Header.Get("X-Prometheus-Scrape-Timeout-Seconds") timeout := r.Header.Get("X-Prometheus-Scrape-Timeout-Seconds")
require.Equal(t, expectedTimeout, timeout, "Expected scrape timeout header.") require.Equal(t, expectedTimeout, timeout, "Expected scrape timeout header.")
if allowUTF8 {
w.Header().Set("Content-Type", `text/plain; version=1.0.0; escaping=allow-utf-8`)
} else {
w.Header().Set("Content-Type", `text/plain; version=0.0.4`) w.Header().Set("Content-Type", `text/plain; version=0.0.4`)
}
w.Write([]byte("metric_a 1\nmetric_b 2\n")) w.Write([]byte("metric_a 1\nmetric_b 2\n"))
}), }),
) )
@ -2380,13 +2426,22 @@ func TestTargetScraperScrapeOK(t *testing.T) {
require.NoError(t, err) require.NoError(t, err)
contentType, err := ts.readResponse(context.Background(), resp, &buf) contentType, err := ts.readResponse(context.Background(), resp, &buf)
require.NoError(t, err) require.NoError(t, err)
if allowUTF8 {
require.Equal(t, "text/plain; version=1.0.0; escaping=allow-utf-8", contentType)
} else {
require.Equal(t, "text/plain; version=0.0.4", contentType) require.Equal(t, "text/plain; version=0.0.4", contentType)
}
require.Equal(t, "metric_a 1\nmetric_b 2\n", buf.String()) require.Equal(t, "metric_a 1\nmetric_b 2\n", buf.String())
} }
runTest(acceptHeader(config.DefaultScrapeProtocols)) runTest(acceptHeader(config.DefaultScrapeProtocols, model.LegacyValidation))
protobufParsing = true protobufParsing = true
runTest(acceptHeader(config.DefaultProtoFirstScrapeProtocols)) runTest(acceptHeader(config.DefaultProtoFirstScrapeProtocols, model.LegacyValidation))
protobufParsing = false
allowUTF8 = true
runTest(acceptHeader(config.DefaultScrapeProtocols, model.UTF8Validation))
protobufParsing = true
runTest(acceptHeader(config.DefaultProtoFirstScrapeProtocols, model.UTF8Validation))
} }
func TestTargetScrapeScrapeCancel(t *testing.T) { func TestTargetScrapeScrapeCancel(t *testing.T) {
@ -2412,7 +2467,7 @@ func TestTargetScrapeScrapeCancel(t *testing.T) {
), ),
}, },
client: http.DefaultClient, client: http.DefaultClient,
acceptHeader: acceptHeader(config.DefaultGlobalConfig.ScrapeProtocols), acceptHeader: acceptHeader(config.DefaultGlobalConfig.ScrapeProtocols, model.LegacyValidation),
} }
ctx, cancel := context.WithCancel(context.Background()) ctx, cancel := context.WithCancel(context.Background())
@ -2467,7 +2522,7 @@ func TestTargetScrapeScrapeNotFound(t *testing.T) {
), ),
}, },
client: http.DefaultClient, client: http.DefaultClient,
acceptHeader: acceptHeader(config.DefaultGlobalConfig.ScrapeProtocols), acceptHeader: acceptHeader(config.DefaultGlobalConfig.ScrapeProtocols, model.LegacyValidation),
} }
resp, err := ts.scrape(context.Background()) resp, err := ts.scrape(context.Background())
@ -2511,7 +2566,7 @@ func TestTargetScraperBodySizeLimit(t *testing.T) {
}, },
client: http.DefaultClient, client: http.DefaultClient,
bodySizeLimit: bodySizeLimit, bodySizeLimit: bodySizeLimit,
acceptHeader: acceptHeader(config.DefaultGlobalConfig.ScrapeProtocols), acceptHeader: acceptHeader(config.DefaultGlobalConfig.ScrapeProtocols, model.LegacyValidation),
metrics: newTestScrapeMetrics(t), metrics: newTestScrapeMetrics(t),
} }
var buf bytes.Buffer var buf bytes.Buffer
@ -3627,6 +3682,7 @@ func TestScrapeLoopSeriesAddedDuplicates(t *testing.T) {
require.Equal(t, 3, total) require.Equal(t, 3, total)
require.Equal(t, 3, added) require.Equal(t, 3, added)
require.Equal(t, 1, seriesAdded) require.Equal(t, 1, seriesAdded)
require.Equal(t, 2.0, prom_testutil.ToFloat64(sl.metrics.targetScrapeSampleDuplicate))
slApp = sl.appender(ctx) slApp = sl.appender(ctx)
total, added, seriesAdded, err = sl.append(slApp, []byte("test_metric 1\ntest_metric 1\ntest_metric 1\n"), "", time.Time{}) total, added, seriesAdded, err = sl.append(slApp, []byte("test_metric 1\ntest_metric 1\ntest_metric 1\n"), "", time.Time{})
@ -3635,12 +3691,18 @@ func TestScrapeLoopSeriesAddedDuplicates(t *testing.T) {
require.Equal(t, 3, total) require.Equal(t, 3, total)
require.Equal(t, 3, added) require.Equal(t, 3, added)
require.Equal(t, 0, seriesAdded) require.Equal(t, 0, seriesAdded)
require.Equal(t, 4.0, prom_testutil.ToFloat64(sl.metrics.targetScrapeSampleDuplicate))
metric := dto.Metric{} // When different timestamps are supplied, multiple samples are accepted.
err = sl.metrics.targetScrapeSampleDuplicate.Write(&metric) slApp = sl.appender(ctx)
total, added, seriesAdded, err = sl.append(slApp, []byte("test_metric 1 1001\ntest_metric 1 1002\ntest_metric 1 1003\n"), "", time.Time{})
require.NoError(t, err) require.NoError(t, err)
value := metric.GetCounter().GetValue() require.NoError(t, slApp.Commit())
require.Equal(t, 4.0, value) require.Equal(t, 3, total)
require.Equal(t, 3, added)
require.Equal(t, 0, seriesAdded)
// Metric is not higher than last time.
require.Equal(t, 4.0, prom_testutil.ToFloat64(sl.metrics.targetScrapeSampleDuplicate))
} }
// This tests running a full scrape loop and checking that the scrape option // This tests running a full scrape loop and checking that the scrape option

View file

@ -1,8 +1,66 @@
Certificate:
Data:
Version: 3 (0x2)
Serial Number:
93:6c:9e:29:8d:37:7b:66
Signature Algorithm: sha256WithRSAEncryption
Issuer: C = XX, L = Default City, O = Default Company Ltd, CN = Prometheus Test CA
Validity
Not Before: Aug 20 11:51:23 2024 GMT
Not After : Dec 5 11:51:23 2044 GMT
Subject: C = XX, L = Default City, O = Default Company Ltd, CN = Prometheus Test CA
Subject Public Key Info:
Public Key Algorithm: rsaEncryption
Public-Key: (2048 bit)
Modulus:
00:e9:52:05:4d:f2:5a:95:04:2d:b8:73:8b:3c:e7:
47:48:67:00:be:dd:6c:41:f3:7c:4b:44:73:a3:77:
3e:84:af:30:d7:2a:ad:45:6a:b7:89:23:05:15:b3:
aa:46:79:b8:95:64:cc:13:c4:44:a1:01:a0:e2:3d:
a5:67:2b:aa:d3:13:06:43:33:1c:96:36:12:9e:c6:
1d:36:9b:d7:47:bd:28:2d:88:15:04:fa:14:a3:ff:
8c:26:22:c5:a2:15:c7:76:b3:11:f6:a3:44:9a:28:
0f:ca:fb:f4:51:a8:6a:05:94:7c:77:47:c8:21:56:
25:bf:e2:2e:df:33:f3:e4:bd:d6:47:a5:49:13:12:
c8:1f:4c:d7:2a:56:a2:6c:c1:cf:55:05:5d:9a:75:
a2:23:4e:e6:8e:ff:76:05:d6:e0:c8:0b:51:f0:b6:
a1:b2:7d:8f:9c:6a:a5:ce:86:92:15:8c:5b:86:45:
c0:4a:ff:54:b8:ee:cf:11:bd:07:cb:4b:7d:0b:a1:
9d:72:86:9f:55:bc:f9:6c:d9:55:60:96:30:3f:ec:
2d:f6:5f:9a:32:9a:5a:5c:1c:5f:32:f9:d6:0f:04:
f8:81:08:04:9a:95:c3:9d:5a:30:8e:a5:0e:47:2f:
00:ce:e0:2e:ad:5a:b8:b6:4c:55:7c:8a:59:22:b0:
ed:73
Exponent: 65537 (0x10001)
X509v3 extensions:
X509v3 Subject Key Identifier:
CC:F5:05:99:E5:AB:12:69:D8:78:89:4A:31:CA:F0:8B:0B:AD:66:1B
X509v3 Authority Key Identifier:
CC:F5:05:99:E5:AB:12:69:D8:78:89:4A:31:CA:F0:8B:0B:AD:66:1B
X509v3 Basic Constraints:
CA:TRUE
Signature Algorithm: sha256WithRSAEncryption
Signature Value:
4a:a1:b0:bc:c8:87:4f:7c:96:62:e5:09:29:ae:3a:2e:68:d0:
d2:c5:68:ed:ea:83:36:b1:86:f3:b9:e9:19:2b:b6:73:10:6f:
df:7f:bb:f1:76:81:03:c1:a1:5a:ee:6c:44:b8:7c:10:d1:5a:
d7:c1:92:64:59:35:a6:e0:aa:08:41:37:6e:e7:c8:b6:bd:0c:
4b:47:78:ec:c4:b4:15:a3:62:76:4a:39:8e:6e:19:ff:f0:c0:
8a:7e:1c:cd:87:e5:00:6c:f1:ce:27:26:ff:b8:e9:eb:f7:2f:
bd:c2:4b:9c:d6:57:de:74:74:b3:4f:03:98:9a:b5:08:2d:16:
ca:7f:b6:c8:76:62:86:1b:7c:f2:3e:6c:78:cc:2c:95:9a:bb:
77:25:e8:80:ff:9b:e8:f8:9a:85:3b:85:b7:17:4e:77:a1:cf:
4d:b9:d0:25:e8:5d:8c:e6:7c:f1:d9:52:30:3d:ec:2b:37:91:
bc:e2:e8:39:31:6f:3d:e9:98:70:80:7c:41:dd:19:13:05:21:
94:7b:16:cf:d8:ee:4e:38:34:5e:6a:ff:cd:85:ac:8f:94:9a:
dd:4e:77:05:13:a6:b4:80:52:b2:97:64:76:88:f4:dd:42:0a:
50:1c:80:fd:4b:6e:a9:62:10:aa:ef:2e:c1:2f:be:0e:c2:2e:
b5:28:5f:83
-----BEGIN CERTIFICATE----- -----BEGIN CERTIFICATE-----
MIIDkTCCAnmgAwIBAgIJAJNsnimNN3tmMA0GCSqGSIb3DQEBCwUAMF8xCzAJBgNV MIIDkTCCAnmgAwIBAgIJAJNsnimNN3tmMA0GCSqGSIb3DQEBCwUAMF8xCzAJBgNV
BAYTAlhYMRUwEwYDVQQHDAxEZWZhdWx0IENpdHkxHDAaBgNVBAoME0RlZmF1bHQg BAYTAlhYMRUwEwYDVQQHDAxEZWZhdWx0IENpdHkxHDAaBgNVBAoME0RlZmF1bHQg
Q29tcGFueSBMdGQxGzAZBgNVBAMMElByb21ldGhldXMgVGVzdCBDQTAeFw0xNTA4 Q29tcGFueSBMdGQxGzAZBgNVBAMMElByb21ldGhldXMgVGVzdCBDQTAeFw0yNDA4
MDQxNDA5MjFaFw0yNTA4MDExNDA5MjFaMF8xCzAJBgNVBAYTAlhYMRUwEwYDVQQH MjAxMTUxMjNaFw00NDEyMDUxMTUxMjNaMF8xCzAJBgNVBAYTAlhYMRUwEwYDVQQH
DAxEZWZhdWx0IENpdHkxHDAaBgNVBAoME0RlZmF1bHQgQ29tcGFueSBMdGQxGzAZ DAxEZWZhdWx0IENpdHkxHDAaBgNVBAoME0RlZmF1bHQgQ29tcGFueSBMdGQxGzAZ
BgNVBAMMElByb21ldGhldXMgVGVzdCBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEP BgNVBAMMElByb21ldGhldXMgVGVzdCBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEP
ADCCAQoCggEBAOlSBU3yWpUELbhzizznR0hnAL7dbEHzfEtEc6N3PoSvMNcqrUVq ADCCAQoCggEBAOlSBU3yWpUELbhzizznR0hnAL7dbEHzfEtEc6N3PoSvMNcqrUVq
@ -12,11 +70,11 @@ yB9M1ypWomzBz1UFXZp1oiNO5o7/dgXW4MgLUfC2obJ9j5xqpc6GkhWMW4ZFwEr/
VLjuzxG9B8tLfQuhnXKGn1W8+WzZVWCWMD/sLfZfmjKaWlwcXzL51g8E+IEIBJqV VLjuzxG9B8tLfQuhnXKGn1W8+WzZVWCWMD/sLfZfmjKaWlwcXzL51g8E+IEIBJqV
w51aMI6lDkcvAM7gLq1auLZMVXyKWSKw7XMCAwEAAaNQME4wHQYDVR0OBBYEFMz1 w51aMI6lDkcvAM7gLq1auLZMVXyKWSKw7XMCAwEAAaNQME4wHQYDVR0OBBYEFMz1
BZnlqxJp2HiJSjHK8IsLrWYbMB8GA1UdIwQYMBaAFMz1BZnlqxJp2HiJSjHK8IsL BZnlqxJp2HiJSjHK8IsLrWYbMB8GA1UdIwQYMBaAFMz1BZnlqxJp2HiJSjHK8IsL
rWYbMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQELBQADggEBAI2iA3w3TK5J15Pu rWYbMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQELBQADggEBAEqhsLzIh098lmLl
e4fPFB4jxQqsbUwuyXbCCv/jKLeFNCD4BjM181WZEYjPMumeTBVzU3aF45LWQIG1 CSmuOi5o0NLFaO3qgzaxhvO56RkrtnMQb99/u/F2gQPBoVrubES4fBDRWtfBkmRZ
0DJcrCL4mjMz9qgAoGqA7aDDXiJGbukMgYYsn7vrnVmrZH8T3E8ySlltr7+W578k NabgqghBN27nyLa9DEtHeOzEtBWjYnZKOY5uGf/wwIp+HM2H5QBs8c4nJv+46ev3
pJ5FxnbCroQwn0zLyVB3sFbS8E3vpBr3L8oy8PwPHhIScexcNVc3V6/m4vTZsXTH L73CS5zWV950dLNPA5iatQgtFsp/tsh2YoYbfPI+bHjMLJWau3cl6ID/m+j4moU7
U+vUm1XhDgpDcFMTg2QQiJbfpOYUkwIgnRDAT7t282t2KQWtnlqc3zwPQ1F/6Cpx hbcXTnehz0250CXoXYzmfPHZUjA97Cs3kbzi6Dkxbz3pmHCAfEHdGRMFIZR7Fs/Y
j19JeNsaF1DArkD7YlyKj/GhZLtHwFHG5cxznH0mLDJTW7bQvqqh2iQTeXmBk1lU 7k44NF5q/82FrI+Umt1OdwUTprSAUrKXZHaI9N1CClAcgP1LbqliEKrvLsEvvg7C
mM5lH/s= LrUoX4M=
-----END CERTIFICATE----- -----END CERTIFICATE-----

View file

@ -26,14 +26,14 @@ jobs:
- name: Checkout repository - name: Checkout repository
uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
- name: Install Go - name: Install Go
uses: actions/setup-go@cdcb36043654635271a94b9a6d1392de5bb323a7 # v5.0.1 uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 # v5.0.2
with: with:
go-version: 1.22.x go-version: 1.22.x
- name: Install snmp_exporter/generator dependencies - name: Install snmp_exporter/generator dependencies
run: sudo apt-get update && sudo apt-get -y install libsnmp-dev run: sudo apt-get update && sudo apt-get -y install libsnmp-dev
if: github.repository == 'prometheus/snmp_exporter' if: github.repository == 'prometheus/snmp_exporter'
- name: Lint - name: Lint
uses: golangci/golangci-lint-action@a4f60bb28d35aeee14e6880718e0c85ff1882e64 # v6.0.1 uses: golangci/golangci-lint-action@aaa42aa0628b4ae2578232a66b541047968fac86 # v6.1.0
with: with:
args: --verbose args: --verbose
version: v1.59.1 version: v1.60.2

View file

@ -37,7 +37,7 @@ if [ -z "${GITHUB_TOKEN}" ]; then
fi fi
# List of files that should be synced. # List of files that should be synced.
SYNC_FILES="CODE_OF_CONDUCT.md LICENSE Makefile.common SECURITY.md .yamllint scripts/golangci-lint.yml .github/workflows/scorecards.yml .github/workflows/container_description.yml" SYNC_FILES="CODE_OF_CONDUCT.md LICENSE Makefile.common SECURITY.md .yamllint scripts/golangci-lint.yml .github/workflows/scorecards.yml .github/workflows/container_description.yml .github/workflows/stale.yml"
# Go to the root of the repo # Go to the root of the repo
cd "$(git rev-parse --show-cdup)" || exit 1 cd "$(git rev-parse --show-cdup)" || exit 1

View file

@ -96,10 +96,10 @@ func TestSampleRingMixed(t *testing.T) {
// With ValNone as the preferred type, nothing should be initialized. // With ValNone as the preferred type, nothing should be initialized.
r := newSampleRing(10, 2, chunkenc.ValNone) r := newSampleRing(10, 2, chunkenc.ValNone)
require.Zero(t, len(r.fBuf)) require.Empty(t, r.fBuf)
require.Zero(t, len(r.hBuf)) require.Empty(t, r.hBuf)
require.Zero(t, len(r.fhBuf)) require.Empty(t, r.fhBuf)
require.Zero(t, len(r.iBuf)) require.Empty(t, r.iBuf)
// But then mixed adds should work as expected. // But then mixed adds should work as expected.
r.addF(fSample{t: 1, f: 3.14}) r.addF(fSample{t: 1, f: 3.14})
@ -146,10 +146,10 @@ func TestSampleRingAtFloatHistogram(t *testing.T) {
// With ValNone as the preferred type, nothing should be initialized. // With ValNone as the preferred type, nothing should be initialized.
r := newSampleRing(10, 2, chunkenc.ValNone) r := newSampleRing(10, 2, chunkenc.ValNone)
require.Zero(t, len(r.fBuf)) require.Empty(t, r.fBuf)
require.Zero(t, len(r.hBuf)) require.Empty(t, r.hBuf)
require.Zero(t, len(r.fhBuf)) require.Empty(t, r.fhBuf)
require.Zero(t, len(r.iBuf)) require.Empty(t, r.iBuf)
var ( var (
h *histogram.Histogram h *histogram.Histogram

View file

@ -18,6 +18,7 @@ import "fmt"
type errDuplicateSampleForTimestamp struct { type errDuplicateSampleForTimestamp struct {
timestamp int64 timestamp int64
existing float64 existing float64
existingIsHistogram bool
newValue float64 newValue float64
} }
@ -29,13 +30,26 @@ func NewDuplicateFloatErr(t int64, existing, newValue float64) error {
} }
} }
// NewDuplicateHistogramToFloatErr describes an error where a new float sample is sent for same timestamp as previous histogram.
func NewDuplicateHistogramToFloatErr(t int64, newValue float64) error {
return errDuplicateSampleForTimestamp{
timestamp: t,
existingIsHistogram: true,
newValue: newValue,
}
}
func (e errDuplicateSampleForTimestamp) Error() string { func (e errDuplicateSampleForTimestamp) Error() string {
if e.timestamp == 0 { if e.timestamp == 0 {
return "duplicate sample for timestamp" return "duplicate sample for timestamp"
} }
if e.existingIsHistogram {
return fmt.Sprintf("duplicate sample for timestamp %d; overrides not allowed: existing is a histogram, new value %g", e.timestamp, e.newValue)
}
return fmt.Sprintf("duplicate sample for timestamp %d; overrides not allowed: existing %g, new value %g", e.timestamp, e.existing, e.newValue) return fmt.Sprintf("duplicate sample for timestamp %d; overrides not allowed: existing %g, new value %g", e.timestamp, e.existing, e.newValue)
} }
// Is implements the anonymous interface checked by errors.Is.
// Every errDuplicateSampleForTimestamp compares equal to the global ErrDuplicateSampleForTimestamp. // Every errDuplicateSampleForTimestamp compares equal to the global ErrDuplicateSampleForTimestamp.
func (e errDuplicateSampleForTimestamp) Is(t error) bool { func (e errDuplicateSampleForTimestamp) Is(t error) bool {
if t == ErrDuplicateSampleForTimestamp { if t == ErrDuplicateSampleForTimestamp {

38
storage/errors_test.go Normal file
View file

@ -0,0 +1,38 @@
// Copyright 2014 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package storage
import (
"fmt"
"testing"
"github.com/stretchr/testify/require"
)
func TestErrDuplicateSampleForTimestamp(t *testing.T) {
// All errDuplicateSampleForTimestamp are ErrDuplicateSampleForTimestamp
require.ErrorIs(t, ErrDuplicateSampleForTimestamp, errDuplicateSampleForTimestamp{})
// Same type only is if it has same properties.
err := NewDuplicateFloatErr(1_000, 10, 20)
sameErr := NewDuplicateFloatErr(1_000, 10, 20)
differentErr := NewDuplicateFloatErr(1_001, 30, 40)
require.ErrorIs(t, err, sameErr)
require.NotErrorIs(t, err, differentErr)
// Also works when err is wrapped.
require.ErrorIs(t, fmt.Errorf("failed: %w", err), sameErr)
require.NotErrorIs(t, fmt.Errorf("failed: %w", err), differentErr)
}

Some files were not shown because too many files have changed in this diff Show more