mirror of
https://github.com/prometheus/prometheus.git
synced 2024-12-25 13:44:05 -08:00
Merge remote-tracking branch 'prometheus/main' into arve/wlog-histograms
Signed-off-by: Arve Knudsen <arve.knudsen@gmail.com>
This commit is contained in:
commit
c3793f2b32
2
.github/dependabot.yml
vendored
2
.github/dependabot.yml
vendored
|
@ -11,6 +11,7 @@ updates:
|
|||
go.opentelemetry.io:
|
||||
patterns:
|
||||
- "go.opentelemetry.io/*"
|
||||
open-pull-requests-limit: 20
|
||||
- package-ecosystem: "gomod"
|
||||
directory: "/documentation/examples/remote_storage"
|
||||
schedule:
|
||||
|
@ -19,6 +20,7 @@ updates:
|
|||
directory: "/web/ui"
|
||||
schedule:
|
||||
interval: "monthly"
|
||||
open-pull-requests-limit: 20
|
||||
- package-ecosystem: "github-actions"
|
||||
directory: "/"
|
||||
schedule:
|
||||
|
|
4
.github/workflows/buf-lint.yml
vendored
4
.github/workflows/buf-lint.yml
vendored
|
@ -12,8 +12,8 @@ jobs:
|
|||
name: lint
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4
|
||||
- uses: bufbuild/buf-setup-action@517ee23296d5caf38df31c21945e6a54bbc8a89f # v1.30.0
|
||||
- uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
|
||||
- uses: bufbuild/buf-setup-action@dde0b9351db90fbf78e345f41a57de8514bf1091 # v1.32.2
|
||||
with:
|
||||
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||
- uses: bufbuild/buf-lint-action@06f9dd823d873146471cfaaf108a993fe00e5325 # v1.1.1
|
||||
|
|
4
.github/workflows/buf.yml
vendored
4
.github/workflows/buf.yml
vendored
|
@ -12,8 +12,8 @@ jobs:
|
|||
runs-on: ubuntu-latest
|
||||
if: github.repository_owner == 'prometheus'
|
||||
steps:
|
||||
- uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4
|
||||
- uses: bufbuild/buf-setup-action@517ee23296d5caf38df31c21945e6a54bbc8a89f # v1.30.0
|
||||
- uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
|
||||
- uses: bufbuild/buf-setup-action@dde0b9351db90fbf78e345f41a57de8514bf1091 # v1.32.2
|
||||
with:
|
||||
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||
- uses: bufbuild/buf-lint-action@06f9dd823d873146471cfaaf108a993fe00e5325 # v1.1.1
|
||||
|
|
32
.github/workflows/ci.yml
vendored
32
.github/workflows/ci.yml
vendored
|
@ -13,7 +13,7 @@ jobs:
|
|||
# should also be updated.
|
||||
image: quay.io/prometheus/golang-builder:1.22-base
|
||||
steps:
|
||||
- uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4
|
||||
- uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
|
||||
- uses: prometheus/promci@3cb0c3871f223bd5ce1226995bd52ffb314798b6 # v0.1.0
|
||||
- uses: ./.github/promci/actions/setup_environment
|
||||
- run: make GOOPTS=--tags=stringlabels GO_ONLY=1 SKIP_GOLANGCI_LINT=1
|
||||
|
@ -27,7 +27,7 @@ jobs:
|
|||
container:
|
||||
image: quay.io/prometheus/golang-builder:1.22-base
|
||||
steps:
|
||||
- uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4
|
||||
- uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
|
||||
- uses: prometheus/promci@3cb0c3871f223bd5ce1226995bd52ffb314798b6 # v0.1.0
|
||||
- uses: ./.github/promci/actions/setup_environment
|
||||
- run: go test --tags=dedupelabels ./...
|
||||
|
@ -43,7 +43,7 @@ jobs:
|
|||
# The go version in this image should be N-1 wrt test_go.
|
||||
image: quay.io/prometheus/golang-builder:1.21-base
|
||||
steps:
|
||||
- uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4
|
||||
- uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
|
||||
- run: make build
|
||||
# Don't run NPM build; don't run race-detector.
|
||||
- run: make test GO_ONLY=1 test-flags=""
|
||||
|
@ -57,7 +57,7 @@ jobs:
|
|||
image: quay.io/prometheus/golang-builder:1.22-base
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4
|
||||
- uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
|
||||
- uses: prometheus/promci@3cb0c3871f223bd5ce1226995bd52ffb314798b6 # v0.1.0
|
||||
- uses: ./.github/promci/actions/setup_environment
|
||||
with:
|
||||
|
@ -74,8 +74,8 @@ jobs:
|
|||
name: Go tests on Windows
|
||||
runs-on: windows-latest
|
||||
steps:
|
||||
- uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4
|
||||
- uses: actions/setup-go@0c52d547c9bc32b1aa3301fd7a9cb496313a4491 # v5.0.0
|
||||
- uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
|
||||
- uses: actions/setup-go@cdcb36043654635271a94b9a6d1392de5bb323a7 # v5.0.1
|
||||
with:
|
||||
go-version: 1.22.x
|
||||
- run: |
|
||||
|
@ -91,7 +91,7 @@ jobs:
|
|||
container:
|
||||
image: quay.io/prometheus/golang-builder:1.22-base
|
||||
steps:
|
||||
- uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4
|
||||
- uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
|
||||
- run: go install ./cmd/promtool/.
|
||||
- run: go install github.com/google/go-jsonnet/cmd/jsonnet@latest
|
||||
- run: go install github.com/google/go-jsonnet/cmd/jsonnetfmt@latest
|
||||
|
@ -114,7 +114,7 @@ jobs:
|
|||
matrix:
|
||||
thread: [ 0, 1, 2 ]
|
||||
steps:
|
||||
- uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4
|
||||
- uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
|
||||
- uses: prometheus/promci@3cb0c3871f223bd5ce1226995bd52ffb314798b6 # v0.1.0
|
||||
- uses: ./.github/promci/actions/build
|
||||
with:
|
||||
|
@ -137,7 +137,7 @@ jobs:
|
|||
# Whenever the Go version is updated here, .promu.yml
|
||||
# should also be updated.
|
||||
steps:
|
||||
- uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4
|
||||
- uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
|
||||
- uses: prometheus/promci@3cb0c3871f223bd5ce1226995bd52ffb314798b6 # v0.1.0
|
||||
- uses: ./.github/promci/actions/build
|
||||
with:
|
||||
|
@ -148,9 +148,9 @@ jobs:
|
|||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4
|
||||
uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@0c52d547c9bc32b1aa3301fd7a9cb496313a4491 # v5.0.0
|
||||
uses: actions/setup-go@cdcb36043654635271a94b9a6d1392de5bb323a7 # v5.0.1
|
||||
with:
|
||||
cache: false
|
||||
go-version: 1.22.x
|
||||
|
@ -163,7 +163,7 @@ jobs:
|
|||
- name: Checkout repository
|
||||
uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@0c52d547c9bc32b1aa3301fd7a9cb496313a4491 # v5.0.0
|
||||
uses: actions/setup-go@cdcb36043654635271a94b9a6d1392de5bb323a7 # v5.0.1
|
||||
with:
|
||||
go-version: 1.22.x
|
||||
- name: Install snmp_exporter/generator dependencies
|
||||
|
@ -174,7 +174,7 @@ jobs:
|
|||
with:
|
||||
args: --verbose
|
||||
# Make sure to sync this with Makefile.common and scripts/golangci-lint.yml.
|
||||
version: v1.59.0
|
||||
version: v1.59.1
|
||||
fuzzing:
|
||||
uses: ./.github/workflows/fuzzing.yml
|
||||
if: github.event_name == 'pull_request'
|
||||
|
@ -187,7 +187,7 @@ jobs:
|
|||
needs: [test_ui, test_go, test_go_more, test_go_oldest, test_windows, golangci, codeql, build_all]
|
||||
if: github.event_name == 'push' && github.event.ref == 'refs/heads/main'
|
||||
steps:
|
||||
- uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4
|
||||
- uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
|
||||
- uses: prometheus/promci@3cb0c3871f223bd5ce1226995bd52ffb314798b6 # v0.1.0
|
||||
- uses: ./.github/promci/actions/publish_main
|
||||
with:
|
||||
|
@ -201,7 +201,7 @@ jobs:
|
|||
needs: [test_ui, test_go, test_go_more, test_go_oldest, test_windows, golangci, codeql, build_all]
|
||||
if: github.event_name == 'push' && startsWith(github.ref, 'refs/tags/v2.')
|
||||
steps:
|
||||
- uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4
|
||||
- uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
|
||||
- uses: prometheus/promci@3cb0c3871f223bd5ce1226995bd52ffb314798b6 # v0.1.0
|
||||
- uses: ./.github/promci/actions/publish_release
|
||||
with:
|
||||
|
@ -216,7 +216,7 @@ jobs:
|
|||
needs: [test_ui, codeql]
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4
|
||||
uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
|
||||
- uses: prometheus/promci@3cb0c3871f223bd5ce1226995bd52ffb314798b6 # v0.1.0
|
||||
- name: Install nodejs
|
||||
uses: actions/setup-node@60edb5dd545a775178f52524783378180af0d1f8 # v4.0.2
|
||||
|
|
8
.github/workflows/codeql-analysis.yml
vendored
8
.github/workflows/codeql-analysis.yml
vendored
|
@ -24,15 +24,15 @@ jobs:
|
|||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4
|
||||
uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
|
||||
|
||||
- name: Initialize CodeQL
|
||||
uses: github/codeql-action/init@012739e5082ff0c22ca6d6ab32e07c36df03c4a4 # v3.22.12
|
||||
uses: github/codeql-action/init@2e230e8fe0ad3a14a340ad0815ddb96d599d2aff # v3.25.8
|
||||
with:
|
||||
languages: ${{ matrix.language }}
|
||||
|
||||
- name: Autobuild
|
||||
uses: github/codeql-action/autobuild@012739e5082ff0c22ca6d6ab32e07c36df03c4a4 # v3.22.12
|
||||
uses: github/codeql-action/autobuild@2e230e8fe0ad3a14a340ad0815ddb96d599d2aff # v3.25.8
|
||||
|
||||
- name: Perform CodeQL Analysis
|
||||
uses: github/codeql-action/analyze@012739e5082ff0c22ca6d6ab32e07c36df03c4a4 # v3.22.12
|
||||
uses: github/codeql-action/analyze@2e230e8fe0ad3a14a340ad0815ddb96d599d2aff # v3.25.8
|
||||
|
|
13
.github/workflows/container_description.yml
vendored
13
.github/workflows/container_description.yml
vendored
|
@ -4,6 +4,7 @@ on:
|
|||
push:
|
||||
paths:
|
||||
- "README.md"
|
||||
- "README-containers.md"
|
||||
- ".github/workflows/container_description.yml"
|
||||
branches: [ main, master ]
|
||||
|
||||
|
@ -17,7 +18,7 @@ jobs:
|
|||
if: github.repository_owner == 'prometheus' || github.repository_owner == 'prometheus-community' # Don't run this workflow on forks.
|
||||
steps:
|
||||
- name: git checkout
|
||||
uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4
|
||||
uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
|
||||
- name: Set docker hub repo name
|
||||
run: echo "DOCKER_REPO_NAME=$(make docker-repo-name)" >> $GITHUB_ENV
|
||||
- name: Push README to Dockerhub
|
||||
|
@ -29,7 +30,9 @@ jobs:
|
|||
destination_container_repo: ${{ env.DOCKER_REPO_NAME }}
|
||||
provider: dockerhub
|
||||
short_description: ${{ env.DOCKER_REPO_NAME }}
|
||||
readme_file: 'README.md'
|
||||
# Empty string results in README-containers.md being pushed if it
|
||||
# exists. Otherwise, README.md is pushed.
|
||||
readme_file: ''
|
||||
|
||||
PushQuayIoReadme:
|
||||
runs-on: ubuntu-latest
|
||||
|
@ -37,7 +40,7 @@ jobs:
|
|||
if: github.repository_owner == 'prometheus' || github.repository_owner == 'prometheus-community' # Don't run this workflow on forks.
|
||||
steps:
|
||||
- name: git checkout
|
||||
uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4
|
||||
uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
|
||||
- name: Set quay.io org name
|
||||
run: echo "DOCKER_REPO=$(echo quay.io/${GITHUB_REPOSITORY_OWNER} | tr -d '-')" >> $GITHUB_ENV
|
||||
- name: Set quay.io repo name
|
||||
|
@ -49,4 +52,6 @@ jobs:
|
|||
with:
|
||||
destination_container_repo: ${{ env.DOCKER_REPO_NAME }}
|
||||
provider: quay
|
||||
readme_file: 'README.md'
|
||||
# Empty string results in README-containers.md being pushed if it
|
||||
# exists. Otherwise, README.md is pushed.
|
||||
readme_file: ''
|
||||
|
|
2
.github/workflows/repo_sync.yml
vendored
2
.github/workflows/repo_sync.yml
vendored
|
@ -13,7 +13,7 @@ jobs:
|
|||
container:
|
||||
image: quay.io/prometheus/golang-builder
|
||||
steps:
|
||||
- uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4
|
||||
- uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
|
||||
- run: ./scripts/sync_repo_files.sh
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.PROMBOT_GITHUB_TOKEN }}
|
||||
|
|
6
.github/workflows/scorecards.yml
vendored
6
.github/workflows/scorecards.yml
vendored
|
@ -21,12 +21,12 @@ jobs:
|
|||
|
||||
steps:
|
||||
- name: "Checkout code"
|
||||
uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # tag=v4.1.4
|
||||
uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # tag=v4.1.6
|
||||
with:
|
||||
persist-credentials: false
|
||||
|
||||
- name: "Run analysis"
|
||||
uses: ossf/scorecard-action@0864cf19026789058feabb7e87baa5f140aac736 # tag=v2.3.1
|
||||
uses: ossf/scorecard-action@dc50aa9510b46c811795eb24b2f1ba02a914e534 # tag=v2.3.3
|
||||
with:
|
||||
results_file: results.sarif
|
||||
results_format: sarif
|
||||
|
@ -45,6 +45,6 @@ jobs:
|
|||
|
||||
# Upload the results to GitHub's code scanning dashboard.
|
||||
- name: "Upload to code-scanning"
|
||||
uses: github/codeql-action/upload-sarif@012739e5082ff0c22ca6d6ab32e07c36df03c4a4 # tag=v3.22.12
|
||||
uses: github/codeql-action/upload-sarif@2e230e8fe0ad3a14a340ad0815ddb96d599d2aff # tag=v3.25.8
|
||||
with:
|
||||
sarif_file: results.sarif
|
||||
|
|
|
@ -29,6 +29,7 @@ linters:
|
|||
- unused
|
||||
- usestdlibvars
|
||||
- whitespace
|
||||
- loggercheck
|
||||
|
||||
issues:
|
||||
max-same-issues: 0
|
||||
|
|
30
CHANGELOG.md
30
CHANGELOG.md
|
@ -2,19 +2,31 @@
|
|||
|
||||
## unreleased
|
||||
|
||||
* [CHANGE] Rules: Execute 1 query instead of N (where N is the number of alerts within alert rule) when restoring alerts. #13980
|
||||
* [ENHANCEMENT] Rules: Add `rule_group_last_restore_duration_seconds` to measure the time it takes to restore a rule group. #13974
|
||||
* [ENHANCEMENT] OTLP: Improve remote write format translation performance by using label set hashes for metric identifiers instead of string based ones. #14006 #13991
|
||||
* [ENHANCEMENT] TSDB: Optimize querying with regexp matchers. #13620
|
||||
* [BUGFIX] OTLP: Don't generate target_info unless at least one identifying label is defined. #13991
|
||||
* [BUGFIX] OTLP: Don't generate target_info unless there are metrics. #13991
|
||||
* [BUGFIX] tsdb/wlog.Watcher.readSegmentForGC: Only count unknown record types against record_decode_failures_total metric. #14042
|
||||
|
||||
## 2.52.0-rc.1 / 2024-05-03
|
||||
## 2.53.0 / 2024-06-16
|
||||
|
||||
* [BUGFIX] API: Fix missing comma during JSON encoding of API results. #14047
|
||||
This release changes the default for GOGC, the Go runtime control for the trade-off between excess memory use and CPU usage. We have found that Prometheus operates with minimal additional CPU usage, but greatly reduced memory by adjusting the upstream Go default from 100 to 75.
|
||||
|
||||
## 2.52.0-rc.0 / 2024-04-22
|
||||
* [CHANGE] Rules: Execute 1 query instead of N (where N is the number of alerts within alert rule) when restoring alerts. #13980 #14048
|
||||
* [CHANGE] Runtime: Change GOGC threshold from 100 to 75 #14176 #14285
|
||||
* [FEATURE] Rules: Add new option `query_offset` for each rule group via rule group configuration file and `rule_query_offset` as part of the global configuration to have more resilience for remote write delays. #14061 #14216 #14273
|
||||
* [ENHANCEMENT] Rules: Add `rule_group_last_restore_duration_seconds` metric to measure the time it takes to restore a rule group. #13974
|
||||
* [ENHANCEMENT] OTLP: Improve remote write format translation performance by using label set hashes for metric identifiers instead of string based ones. #14006 #13991
|
||||
* [ENHANCEMENT] TSDB: Optimize querying with regexp matchers. #13620
|
||||
* [BUGFIX] OTLP: Don't generate target_info unless there are metrics and at least one identifying label is defined. #13991
|
||||
* [BUGFIX] Scrape: Do no try to ingest native histograms when the native histograms feature is turned off. This happened when protobuf scrape was enabled by for example the created time feature. #13987
|
||||
* [BUGFIX] Scaleway SD: Use the instance's public IP if no private IP is available as the `__address__` meta label. #13941
|
||||
* [BUGFIX] Query logger: Do not leak file descriptors on error. #13948
|
||||
* [BUGFIX] TSDB: Let queries with heavy regex matches be cancelled and not use up the CPU. #14096 #14103 #14118 #14199
|
||||
* [BUGFIX] API: Do not warn if result count is equal to the limit, only when exceeding the limit for the series, label-names and label-values APIs. #14116
|
||||
* [BUGFIX] TSDB: Fix head stats and hooks when replaying a corrupted snapshot. #14079
|
||||
|
||||
## 2.52.1 / 2024-05-29
|
||||
|
||||
* [BUGFIX] Linode SD: Fix partial fetch when discovery would return more than 500 elements. #14141
|
||||
|
||||
## 2.52.0 / 2024-05-07
|
||||
|
||||
* [CHANGE] TSDB: Fix the predicate checking for blocks which are beyond the retention period to include the ones right at the retention boundary. #9633
|
||||
* [FEATURE] Kubernetes SD: Add a new metric `prometheus_sd_kubernetes_failures_total` to track failed requests to Kubernetes API. #13554
|
||||
|
|
|
@ -16,7 +16,7 @@ Maintainers for specific parts of the codebase:
|
|||
* `model/histogram` and other code related to native histograms: Björn Rabenstein (<beorn@grafana.com> / @beorn7),
|
||||
George Krajcsovits (<gyorgy.krajcsovits@grafana.com> / @krajorama)
|
||||
* `storage`
|
||||
* `remote`: Callum Styan (<callumstyan@gmail.com> / @cstyan), Bartłomiej Płotka (<bwplotka@gmail.com> / @bwplotka), Tom Wilkie (<tom.wilkie@gmail.com> / @tomwilkie)
|
||||
* `remote`: Callum Styan (<callumstyan@gmail.com> / @cstyan), Bartłomiej Płotka (<bwplotka@gmail.com> / @bwplotka), Tom Wilkie (tom.wilkie@gmail.com / @tomwilkie), Nicolás Pazos ( <npazosmendez@gmail.com> / @npazosmendez), Alex Greenbank ( <alex.greenbank@grafana.com> / @alexgreenbank)
|
||||
* `otlptranslator`: Arve Knudsen (<arve.knudsen@gmail.com> / @aknuds1), Jesús Vázquez (<jesus.vazquez@grafana.com> / @jesusvazquez)
|
||||
* `tsdb`: Ganesh Vernekar (<ganesh@grafana.com> / @codesome), Bartłomiej Płotka (<bwplotka@gmail.com> / @bwplotka), Jesús Vázquez (<jesus.vazquez@grafana.com> / @jesusvazquez)
|
||||
* `agent`: Robert Fratto (<robert.fratto@grafana.com> / @rfratto)
|
||||
|
|
2
Makefile
2
Makefile
|
@ -91,7 +91,7 @@ endif
|
|||
|
||||
promql/parser/generated_parser.y.go: promql/parser/generated_parser.y
|
||||
@echo ">> running goyacc to generate the .go file."
|
||||
@goyacc -l -o promql/parser/generated_parser.y.go promql/parser/generated_parser.y
|
||||
@$(FIRST_GOPATH)/bin/goyacc -l -o promql/parser/generated_parser.y.go promql/parser/generated_parser.y
|
||||
|
||||
.PHONY: clean-parser
|
||||
clean-parser:
|
||||
|
|
|
@ -61,7 +61,7 @@ PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_
|
|||
SKIP_GOLANGCI_LINT :=
|
||||
GOLANGCI_LINT :=
|
||||
GOLANGCI_LINT_OPTS ?=
|
||||
GOLANGCI_LINT_VERSION ?= v1.59.0
|
||||
GOLANGCI_LINT_VERSION ?= v1.59.1
|
||||
# golangci-lint only supports linux, darwin and windows platforms on i386/amd64/arm64.
|
||||
# windows isn't included here because of the path separator being different.
|
||||
ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux darwin))
|
||||
|
|
|
@ -1,8 +1,8 @@
|
|||
<h1 align="center" style="border-bottom: none">
|
||||
<a href="//prometheus.io" target="_blank"><img alt="Prometheus" src="/documentation/images/prometheus-logo.svg"></a><br>Prometheus
|
||||
<a href="https://prometheus.io" target="_blank"><img alt="Prometheus" src="/documentation/images/prometheus-logo.svg"></a><br>Prometheus
|
||||
</h1>
|
||||
|
||||
<p align="center">Visit <a href="//prometheus.io" target="_blank">prometheus.io</a> for the full documentation,
|
||||
<p align="center">Visit <a href="https://prometheus.io" target="_blank">prometheus.io</a> for the full documentation,
|
||||
examples and guides.</p>
|
||||
|
||||
<div align="center">
|
||||
|
|
|
@ -149,6 +149,8 @@ Changes for a patch release or release candidate should be merged into the previ
|
|||
|
||||
Bump the version in the `VERSION` file and update `CHANGELOG.md`. Do this in a proper PR pointing to the release branch as this gives others the opportunity to chime in on the release in general and on the addition to the changelog in particular. For a release candidate, append something like `-rc.0` to the version (with the corresponding changes to the tag name, the release name etc.).
|
||||
|
||||
When updating the `CHANGELOG.md` look at all PRs included in the release since the last release and verify if they need a changelog entry.
|
||||
|
||||
Note that `CHANGELOG.md` should only document changes relevant to users of Prometheus, including external API changes, performance improvements, and new features. Do not document changes of internal interfaces, code refactorings and clean-ups, changes to the build process, etc. People interested in these are asked to refer to the git history.
|
||||
|
||||
For release candidates still update `CHANGELOG.md`, but when you cut the final release later, merge all the changes from the pre-releases into the one final update.
|
||||
|
|
|
@ -28,6 +28,8 @@ import (
|
|||
"os/signal"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"runtime/debug"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"syscall"
|
||||
|
@ -443,6 +445,9 @@ func main() {
|
|||
serverOnlyFlag(a, "alertmanager.notification-queue-capacity", "The capacity of the queue for pending Alertmanager notifications.").
|
||||
Default("10000").IntVar(&cfg.notifier.QueueCapacity)
|
||||
|
||||
serverOnlyFlag(a, "alertmanager.drain-notification-queue-on-shutdown", "Send any outstanding Alertmanager notifications when shutting down. If false, any outstanding Alertmanager notifications will be dropped when shutting down.").
|
||||
Default("true").BoolVar(&cfg.notifier.DrainOnShutdown)
|
||||
|
||||
// TODO: Remove in Prometheus 3.0.
|
||||
alertmanagerTimeout := a.Flag("alertmanager.timeout", "[DEPRECATED] This flag has no effect.").Hidden().String()
|
||||
|
||||
|
@ -785,6 +790,9 @@ func main() {
|
|||
ResendDelay: time.Duration(cfg.resendDelay),
|
||||
MaxConcurrentEvals: cfg.maxConcurrentEvals,
|
||||
ConcurrentEvalsEnabled: cfg.enableConcurrentRuleEval,
|
||||
DefaultRuleQueryOffset: func() time.Duration {
|
||||
return time.Duration(cfgFile.GlobalConfig.RuleQueryOffset)
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
|
@ -1192,7 +1200,7 @@ func main() {
|
|||
}
|
||||
if agentMode {
|
||||
// WAL storage.
|
||||
opts := cfg.agent.ToAgentOptions()
|
||||
opts := cfg.agent.ToAgentOptions(cfg.tsdb.OutOfOrderTimeWindow)
|
||||
cancel := make(chan struct{})
|
||||
g.Add(
|
||||
func() error {
|
||||
|
@ -1228,6 +1236,7 @@ func main() {
|
|||
"TruncateFrequency", cfg.agent.TruncateFrequency,
|
||||
"MinWALTime", cfg.agent.MinWALTime,
|
||||
"MaxWALTime", cfg.agent.MaxWALTime,
|
||||
"OutOfOrderTimeWindow", cfg.agent.OutOfOrderTimeWindow,
|
||||
)
|
||||
|
||||
localStorage.Set(db, 0)
|
||||
|
@ -1381,6 +1390,17 @@ func reloadConfig(filename string, expandExternalLabels, enableExemplarStorage b
|
|||
return fmt.Errorf("one or more errors occurred while applying the new configuration (--config.file=%q)", filename)
|
||||
}
|
||||
|
||||
oldGoGC := debug.SetGCPercent(conf.Runtime.GoGC)
|
||||
if oldGoGC != conf.Runtime.GoGC {
|
||||
level.Info(logger).Log("msg", "updated GOGC", "old", oldGoGC, "new", conf.Runtime.GoGC)
|
||||
}
|
||||
// Write the new setting out to the ENV var for runtime API output.
|
||||
if conf.Runtime.GoGC >= 0 {
|
||||
os.Setenv("GOGC", strconv.Itoa(conf.Runtime.GoGC))
|
||||
} else {
|
||||
os.Setenv("GOGC", "off")
|
||||
}
|
||||
|
||||
noStepSuqueryInterval.Set(conf.GlobalConfig.EvaluationInterval)
|
||||
l := []interface{}{"msg", "Completed loading of configuration file", "filename", filename, "totalDuration", time.Since(start)}
|
||||
level.Info(logger).Log(append(l, timings...)...)
|
||||
|
@ -1720,17 +1740,22 @@ type agentOptions struct {
|
|||
TruncateFrequency model.Duration
|
||||
MinWALTime, MaxWALTime model.Duration
|
||||
NoLockfile bool
|
||||
OutOfOrderTimeWindow int64
|
||||
}
|
||||
|
||||
func (opts agentOptions) ToAgentOptions() agent.Options {
|
||||
func (opts agentOptions) ToAgentOptions(outOfOrderTimeWindow int64) agent.Options {
|
||||
if outOfOrderTimeWindow < 0 {
|
||||
outOfOrderTimeWindow = 0
|
||||
}
|
||||
return agent.Options{
|
||||
WALSegmentSize: int(opts.WALSegmentSize),
|
||||
WALCompression: wlog.ParseCompressionType(opts.WALCompression, opts.WALCompressionType),
|
||||
StripeSize: opts.StripeSize,
|
||||
TruncateFrequency: time.Duration(opts.TruncateFrequency),
|
||||
MinWALTime: durationToInt64Millis(time.Duration(opts.MinWALTime)),
|
||||
MaxWALTime: durationToInt64Millis(time.Duration(opts.MaxWALTime)),
|
||||
NoLockfile: opts.NoLockfile,
|
||||
WALSegmentSize: int(opts.WALSegmentSize),
|
||||
WALCompression: wlog.ParseCompressionType(opts.WALCompression, opts.WALCompressionType),
|
||||
StripeSize: opts.StripeSize,
|
||||
TruncateFrequency: time.Duration(opts.TruncateFrequency),
|
||||
MinWALTime: durationToInt64Millis(time.Duration(opts.MinWALTime)),
|
||||
MaxWALTime: durationToInt64Millis(time.Duration(opts.MaxWALTime)),
|
||||
NoLockfile: opts.NoLockfile,
|
||||
OutOfOrderTimeWindow: outOfOrderTimeWindow,
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -72,7 +72,7 @@ func (p *queryLogTest) waitForPrometheus() error {
|
|||
var err error
|
||||
for x := 0; x < 20; x++ {
|
||||
var r *http.Response
|
||||
if r, err = http.Get(fmt.Sprintf("http://%s:%d%s/-/ready", p.host, p.port, p.prefix)); err == nil && r.StatusCode == 200 {
|
||||
if r, err = http.Get(fmt.Sprintf("http://%s:%d%s/-/ready", p.host, p.port, p.prefix)); err == nil && r.StatusCode == http.StatusOK {
|
||||
break
|
||||
}
|
||||
time.Sleep(500 * time.Millisecond)
|
||||
|
|
|
@ -22,6 +22,7 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/go-kit/log"
|
||||
"github.com/oklog/ulid"
|
||||
|
||||
"github.com/prometheus/prometheus/model/labels"
|
||||
"github.com/prometheus/prometheus/model/textparse"
|
||||
|
@ -191,6 +192,10 @@ func createBlocks(input []byte, mint, maxt, maxBlockDuration int64, maxSamplesIn
|
|||
if quiet {
|
||||
break
|
||||
}
|
||||
// Empty block, don't print.
|
||||
if block.Compare(ulid.ULID{}) == 0 {
|
||||
break
|
||||
}
|
||||
blocks, err := db.Blocks()
|
||||
if err != nil {
|
||||
return fmt.Errorf("get blocks: %w", err)
|
||||
|
|
|
@ -20,6 +20,7 @@ import (
|
|||
"os"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
|
@ -145,11 +146,17 @@ var (
|
|||
ScrapeInterval: model.Duration(1 * time.Minute),
|
||||
ScrapeTimeout: model.Duration(10 * time.Second),
|
||||
EvaluationInterval: model.Duration(1 * time.Minute),
|
||||
RuleQueryOffset: model.Duration(0 * time.Minute),
|
||||
// When native histogram feature flag is enabled, ScrapeProtocols default
|
||||
// changes to DefaultNativeHistogramScrapeProtocols.
|
||||
ScrapeProtocols: DefaultScrapeProtocols,
|
||||
}
|
||||
|
||||
DefaultRuntimeConfig = RuntimeConfig{
|
||||
// Go runtime tuning.
|
||||
GoGC: 75,
|
||||
}
|
||||
|
||||
// DefaultScrapeConfig is the default scrape configuration.
|
||||
DefaultScrapeConfig = ScrapeConfig{
|
||||
// ScrapeTimeout, ScrapeInterval and ScrapeProtocols default to the configured globals.
|
||||
|
@ -224,6 +231,7 @@ var (
|
|||
// Config is the top-level configuration for Prometheus's config files.
|
||||
type Config struct {
|
||||
GlobalConfig GlobalConfig `yaml:"global"`
|
||||
Runtime RuntimeConfig `yaml:"runtime,omitempty"`
|
||||
AlertingConfig AlertingConfig `yaml:"alerting,omitempty"`
|
||||
RuleFiles []string `yaml:"rule_files,omitempty"`
|
||||
ScrapeConfigFiles []string `yaml:"scrape_config_files,omitempty"`
|
||||
|
@ -334,6 +342,14 @@ func (c *Config) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
|||
c.GlobalConfig = DefaultGlobalConfig
|
||||
}
|
||||
|
||||
// If a runtime block was open but empty the default runtime config is overwritten.
|
||||
// We have to restore it here.
|
||||
if c.Runtime.isZero() {
|
||||
c.Runtime = DefaultRuntimeConfig
|
||||
// Use the GOGC env var value if the runtime section is empty.
|
||||
c.Runtime.GoGC = getGoGCEnv()
|
||||
}
|
||||
|
||||
for _, rf := range c.RuleFiles {
|
||||
if !patRulePath.MatchString(rf) {
|
||||
return fmt.Errorf("invalid rule file path %q", rf)
|
||||
|
@ -397,6 +413,8 @@ type GlobalConfig struct {
|
|||
ScrapeProtocols []ScrapeProtocol `yaml:"scrape_protocols,omitempty"`
|
||||
// How frequently to evaluate rules by default.
|
||||
EvaluationInterval model.Duration `yaml:"evaluation_interval,omitempty"`
|
||||
// Offset the rule evaluation timestamp of this particular group by the specified duration into the past to ensure the underlying metrics have been received.
|
||||
RuleQueryOffset model.Duration `yaml:"rule_query_offset,omitempty"`
|
||||
// File to which PromQL queries are logged.
|
||||
QueryLogFile string `yaml:"query_log_file,omitempty"`
|
||||
// The labels to add to any timeseries that this Prometheus instance scrapes.
|
||||
|
@ -556,10 +574,22 @@ func (c *GlobalConfig) isZero() bool {
|
|||
c.ScrapeInterval == 0 &&
|
||||
c.ScrapeTimeout == 0 &&
|
||||
c.EvaluationInterval == 0 &&
|
||||
c.RuleQueryOffset == 0 &&
|
||||
c.QueryLogFile == "" &&
|
||||
c.ScrapeProtocols == nil
|
||||
}
|
||||
|
||||
// RuntimeConfig configures the values for the process behavior.
|
||||
type RuntimeConfig struct {
|
||||
// The Go garbage collection target percentage.
|
||||
GoGC int `yaml:"gogc,omitempty"`
|
||||
}
|
||||
|
||||
// isZero returns true iff the global config is the zero value.
|
||||
func (c *RuntimeConfig) isZero() bool {
|
||||
return c.GoGC == 0
|
||||
}
|
||||
|
||||
type ScrapeConfigs struct {
|
||||
ScrapeConfigs []*ScrapeConfig `yaml:"scrape_configs,omitempty"`
|
||||
}
|
||||
|
@ -1207,3 +1237,19 @@ func filePath(filename string) string {
|
|||
func fileErr(filename string, err error) error {
|
||||
return fmt.Errorf("%q: %w", filePath(filename), err)
|
||||
}
|
||||
|
||||
func getGoGCEnv() int {
|
||||
goGCEnv := os.Getenv("GOGC")
|
||||
// If the GOGC env var is set, use the same logic as upstream Go.
|
||||
if goGCEnv != "" {
|
||||
// Special case for GOGC=off.
|
||||
if strings.ToLower(goGCEnv) == "off" {
|
||||
return -1
|
||||
}
|
||||
i, err := strconv.Atoi(goGCEnv)
|
||||
if err == nil {
|
||||
return i
|
||||
}
|
||||
}
|
||||
return DefaultRuntimeConfig.GoGC
|
||||
}
|
||||
|
|
|
@ -19,6 +19,7 @@ const ruleFilesConfigFile = "testdata/rules_abs_path.good.yml"
|
|||
|
||||
var ruleFilesExpectedConf = &Config{
|
||||
GlobalConfig: DefaultGlobalConfig,
|
||||
Runtime: DefaultRuntimeConfig,
|
||||
RuleFiles: []string{
|
||||
"testdata/first.rules",
|
||||
"testdata/rules/second.rules",
|
||||
|
|
|
@ -76,6 +76,7 @@ const (
|
|||
globLabelLimit = 30
|
||||
globLabelNameLengthLimit = 200
|
||||
globLabelValueLengthLimit = 200
|
||||
globalGoGC = 42
|
||||
)
|
||||
|
||||
var expectedConf = &Config{
|
||||
|
@ -96,6 +97,10 @@ var expectedConf = &Config{
|
|||
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
|
||||
},
|
||||
|
||||
Runtime: RuntimeConfig{
|
||||
GoGC: globalGoGC,
|
||||
},
|
||||
|
||||
RuleFiles: []string{
|
||||
filepath.FromSlash("testdata/first.rules"),
|
||||
filepath.FromSlash("testdata/my/*.rules"),
|
||||
|
@ -993,6 +998,7 @@ var expectedConf = &Config{
|
|||
HostNetworkingHost: "localhost",
|
||||
RefreshInterval: model.Duration(60 * time.Second),
|
||||
HTTPClientConfig: config.DefaultHTTPClientConfig,
|
||||
MatchFirstNetwork: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
|
@ -2081,6 +2087,7 @@ func TestEmptyGlobalBlock(t *testing.T) {
|
|||
c, err := Load("global:\n", false, log.NewNopLogger())
|
||||
require.NoError(t, err)
|
||||
exp := DefaultConfig
|
||||
exp.Runtime = DefaultRuntimeConfig
|
||||
require.Equal(t, exp, *c)
|
||||
}
|
||||
|
||||
|
|
|
@ -17,6 +17,7 @@ const ruleFilesConfigFile = "testdata/rules_abs_path_windows.good.yml"
|
|||
|
||||
var ruleFilesExpectedConf = &Config{
|
||||
GlobalConfig: DefaultGlobalConfig,
|
||||
Runtime: DefaultRuntimeConfig,
|
||||
RuleFiles: []string{
|
||||
"testdata\\first.rules",
|
||||
"testdata\\rules\\second.rules",
|
||||
|
|
3
config/testdata/conf.good.yml
vendored
3
config/testdata/conf.good.yml
vendored
|
@ -14,6 +14,9 @@ global:
|
|||
monitor: codelab
|
||||
foo: bar
|
||||
|
||||
runtime:
|
||||
gogc: 42
|
||||
|
||||
rule_files:
|
||||
- "first.rules"
|
||||
- "my/*.rules"
|
||||
|
|
|
@ -42,28 +42,29 @@ import (
|
|||
)
|
||||
|
||||
const (
|
||||
ec2Label = model.MetaLabelPrefix + "ec2_"
|
||||
ec2LabelAMI = ec2Label + "ami"
|
||||
ec2LabelAZ = ec2Label + "availability_zone"
|
||||
ec2LabelAZID = ec2Label + "availability_zone_id"
|
||||
ec2LabelArch = ec2Label + "architecture"
|
||||
ec2LabelIPv6Addresses = ec2Label + "ipv6_addresses"
|
||||
ec2LabelInstanceID = ec2Label + "instance_id"
|
||||
ec2LabelInstanceLifecycle = ec2Label + "instance_lifecycle"
|
||||
ec2LabelInstanceState = ec2Label + "instance_state"
|
||||
ec2LabelInstanceType = ec2Label + "instance_type"
|
||||
ec2LabelOwnerID = ec2Label + "owner_id"
|
||||
ec2LabelPlatform = ec2Label + "platform"
|
||||
ec2LabelPrimarySubnetID = ec2Label + "primary_subnet_id"
|
||||
ec2LabelPrivateDNS = ec2Label + "private_dns_name"
|
||||
ec2LabelPrivateIP = ec2Label + "private_ip"
|
||||
ec2LabelPublicDNS = ec2Label + "public_dns_name"
|
||||
ec2LabelPublicIP = ec2Label + "public_ip"
|
||||
ec2LabelRegion = ec2Label + "region"
|
||||
ec2LabelSubnetID = ec2Label + "subnet_id"
|
||||
ec2LabelTag = ec2Label + "tag_"
|
||||
ec2LabelVPCID = ec2Label + "vpc_id"
|
||||
ec2LabelSeparator = ","
|
||||
ec2Label = model.MetaLabelPrefix + "ec2_"
|
||||
ec2LabelAMI = ec2Label + "ami"
|
||||
ec2LabelAZ = ec2Label + "availability_zone"
|
||||
ec2LabelAZID = ec2Label + "availability_zone_id"
|
||||
ec2LabelArch = ec2Label + "architecture"
|
||||
ec2LabelIPv6Addresses = ec2Label + "ipv6_addresses"
|
||||
ec2LabelInstanceID = ec2Label + "instance_id"
|
||||
ec2LabelInstanceLifecycle = ec2Label + "instance_lifecycle"
|
||||
ec2LabelInstanceState = ec2Label + "instance_state"
|
||||
ec2LabelInstanceType = ec2Label + "instance_type"
|
||||
ec2LabelOwnerID = ec2Label + "owner_id"
|
||||
ec2LabelPlatform = ec2Label + "platform"
|
||||
ec2LabelPrimaryIPv6Addresses = ec2Label + "primary_ipv6_addresses"
|
||||
ec2LabelPrimarySubnetID = ec2Label + "primary_subnet_id"
|
||||
ec2LabelPrivateDNS = ec2Label + "private_dns_name"
|
||||
ec2LabelPrivateIP = ec2Label + "private_ip"
|
||||
ec2LabelPublicDNS = ec2Label + "public_dns_name"
|
||||
ec2LabelPublicIP = ec2Label + "public_ip"
|
||||
ec2LabelRegion = ec2Label + "region"
|
||||
ec2LabelSubnetID = ec2Label + "subnet_id"
|
||||
ec2LabelTag = ec2Label + "tag_"
|
||||
ec2LabelVPCID = ec2Label + "vpc_id"
|
||||
ec2LabelSeparator = ","
|
||||
)
|
||||
|
||||
// DefaultEC2SDConfig is the default EC2 SD configuration.
|
||||
|
@ -317,6 +318,7 @@ func (d *EC2Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error
|
|||
|
||||
var subnets []string
|
||||
var ipv6addrs []string
|
||||
var primaryipv6addrs []string
|
||||
subnetsMap := make(map[string]struct{})
|
||||
for _, eni := range inst.NetworkInterfaces {
|
||||
if eni.SubnetId == nil {
|
||||
|
@ -330,6 +332,15 @@ func (d *EC2Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error
|
|||
|
||||
for _, ipv6addr := range eni.Ipv6Addresses {
|
||||
ipv6addrs = append(ipv6addrs, *ipv6addr.Ipv6Address)
|
||||
if *ipv6addr.IsPrimaryIpv6 {
|
||||
// we might have to extend the slice with more than one element
|
||||
// that could leave empty strings in the list which is intentional
|
||||
// to keep the position/device index information
|
||||
for int64(len(primaryipv6addrs)) <= *eni.Attachment.DeviceIndex {
|
||||
primaryipv6addrs = append(primaryipv6addrs, "")
|
||||
}
|
||||
primaryipv6addrs[*eni.Attachment.DeviceIndex] = *ipv6addr.Ipv6Address
|
||||
}
|
||||
}
|
||||
}
|
||||
labels[ec2LabelSubnetID] = model.LabelValue(
|
||||
|
@ -342,6 +353,12 @@ func (d *EC2Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error
|
|||
strings.Join(ipv6addrs, ec2LabelSeparator) +
|
||||
ec2LabelSeparator)
|
||||
}
|
||||
if len(primaryipv6addrs) > 0 {
|
||||
labels[ec2LabelPrimaryIPv6Addresses] = model.LabelValue(
|
||||
ec2LabelSeparator +
|
||||
strings.Join(primaryipv6addrs, ec2LabelSeparator) +
|
||||
ec2LabelSeparator)
|
||||
}
|
||||
}
|
||||
|
||||
for _, t := range inst.Tags {
|
||||
|
|
|
@ -97,6 +97,7 @@ func fetchApps(ctx context.Context, server string, client *http.Client) (*Applic
|
|||
resp.Body.Close()
|
||||
}()
|
||||
|
||||
//nolint:usestdlibvars
|
||||
if resp.StatusCode/100 != 2 {
|
||||
return nil, fmt.Errorf("non 2xx status '%d' response during eureka service discovery", resp.StatusCode)
|
||||
}
|
||||
|
|
|
@ -87,6 +87,7 @@ func (d *robotDiscovery) refresh(context.Context) ([]*targetgroup.Group, error)
|
|||
resp.Body.Close()
|
||||
}()
|
||||
|
||||
//nolint:usestdlibvars
|
||||
if resp.StatusCode/100 != 2 {
|
||||
return nil, fmt.Errorf("non 2xx status '%d' response during hetzner service discovery with role robot", resp.StatusCode)
|
||||
}
|
||||
|
|
|
@ -186,12 +186,12 @@ func (d *Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) {
|
|||
|
||||
if d.lastResults != nil && d.eventPollingEnabled {
|
||||
// Check to see if there have been any events. If so, refresh our data.
|
||||
opts := linodego.ListOptions{
|
||||
eventsOpts := linodego.ListOptions{
|
||||
PageOptions: &linodego.PageOptions{Page: 1},
|
||||
PageSize: 25,
|
||||
Filter: fmt.Sprintf(filterTemplate, d.lastRefreshTimestamp.Format("2006-01-02T15:04:05")),
|
||||
}
|
||||
events, err := d.client.ListEvents(ctx, &opts)
|
||||
events, err := d.client.ListEvents(ctx, &eventsOpts)
|
||||
if err != nil {
|
||||
var e *linodego.Error
|
||||
if errors.As(err, &e) && e.Code == http.StatusUnauthorized {
|
||||
|
@ -232,31 +232,40 @@ func (d *Discovery) refreshData(ctx context.Context) ([]*targetgroup.Group, erro
|
|||
tg := &targetgroup.Group{
|
||||
Source: "Linode",
|
||||
}
|
||||
opts := linodego.ListOptions{
|
||||
// We need 3 of these because Linodego writes into the structure during pagination
|
||||
listInstancesOpts := linodego.ListOptions{
|
||||
PageSize: 500,
|
||||
}
|
||||
listIPAddressesOpts := linodego.ListOptions{
|
||||
PageSize: 500,
|
||||
}
|
||||
listIPv6RangesOpts := linodego.ListOptions{
|
||||
PageSize: 500,
|
||||
}
|
||||
|
||||
// If region filter provided, use it to constrain results.
|
||||
if d.region != "" {
|
||||
opts.Filter = fmt.Sprintf(regionFilterTemplate, d.region)
|
||||
listInstancesOpts.Filter = fmt.Sprintf(regionFilterTemplate, d.region)
|
||||
listIPAddressesOpts.Filter = fmt.Sprintf(regionFilterTemplate, d.region)
|
||||
listIPv6RangesOpts.Filter = fmt.Sprintf(regionFilterTemplate, d.region)
|
||||
}
|
||||
|
||||
// Gather all linode instances.
|
||||
instances, err := d.client.ListInstances(ctx, &opts)
|
||||
instances, err := d.client.ListInstances(ctx, &listInstancesOpts)
|
||||
if err != nil {
|
||||
d.metrics.failuresCount.Inc()
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Gather detailed IP address info for all IPs on all linode instances.
|
||||
detailedIPs, err := d.client.ListIPAddresses(ctx, &opts)
|
||||
detailedIPs, err := d.client.ListIPAddresses(ctx, &listIPAddressesOpts)
|
||||
if err != nil {
|
||||
d.metrics.failuresCount.Inc()
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Gather detailed IPv6 Range info for all linode instances.
|
||||
ipv6RangeList, err := d.client.ListIPv6Ranges(ctx, &opts)
|
||||
ipv6RangeList, err := d.client.ListIPv6Ranges(ctx, &listIPv6RangesOpts)
|
||||
if err != nil {
|
||||
d.metrics.failuresCount.Inc()
|
||||
return nil, err
|
||||
|
|
|
@ -120,6 +120,16 @@ func Name(n string) func(*Manager) {
|
|||
}
|
||||
}
|
||||
|
||||
// Updatert sets the updatert of the manager.
|
||||
// Used to speed up tests.
|
||||
func Updatert(u time.Duration) func(*Manager) {
|
||||
return func(m *Manager) {
|
||||
m.mtx.Lock()
|
||||
defer m.mtx.Unlock()
|
||||
m.updatert = u
|
||||
}
|
||||
}
|
||||
|
||||
// HTTPClientOptions sets the list of HTTP client options to expose to
|
||||
// Discoverers. It is up to Discoverers to choose to use the options provided.
|
||||
func HTTPClientOptions(opts ...config.HTTPClientOption) func(*Manager) {
|
||||
|
|
|
@ -22,8 +22,10 @@ import (
|
|||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/container"
|
||||
"github.com/docker/docker/api/types/filters"
|
||||
"github.com/docker/docker/api/types/network"
|
||||
"github.com/docker/docker/client"
|
||||
"github.com/go-kit/log"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
|
@ -58,6 +60,7 @@ var DefaultDockerSDConfig = DockerSDConfig{
|
|||
Filters: []Filter{},
|
||||
HostNetworkingHost: "localhost",
|
||||
HTTPClientConfig: config.DefaultHTTPClientConfig,
|
||||
MatchFirstNetwork: true,
|
||||
}
|
||||
|
||||
func init() {
|
||||
|
@ -73,7 +76,8 @@ type DockerSDConfig struct {
|
|||
Filters []Filter `yaml:"filters"`
|
||||
HostNetworkingHost string `yaml:"host_networking_host"`
|
||||
|
||||
RefreshInterval model.Duration `yaml:"refresh_interval"`
|
||||
RefreshInterval model.Duration `yaml:"refresh_interval"`
|
||||
MatchFirstNetwork bool `yaml:"match_first_network"`
|
||||
}
|
||||
|
||||
// NewDiscovererMetrics implements discovery.Config.
|
||||
|
@ -119,6 +123,7 @@ type DockerDiscovery struct {
|
|||
port int
|
||||
hostNetworkingHost string
|
||||
filters filters.Args
|
||||
matchFirstNetwork bool
|
||||
}
|
||||
|
||||
// NewDockerDiscovery returns a new DockerDiscovery which periodically refreshes its targets.
|
||||
|
@ -131,6 +136,7 @@ func NewDockerDiscovery(conf *DockerSDConfig, logger log.Logger, metrics discove
|
|||
d := &DockerDiscovery{
|
||||
port: conf.Port,
|
||||
hostNetworkingHost: conf.HostNetworkingHost,
|
||||
matchFirstNetwork: conf.MatchFirstNetwork,
|
||||
}
|
||||
|
||||
hostURL, err := url.Parse(conf.Host)
|
||||
|
@ -202,6 +208,11 @@ func (d *DockerDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group, er
|
|||
return nil, fmt.Errorf("error while computing network labels: %w", err)
|
||||
}
|
||||
|
||||
allContainers := make(map[string]types.Container)
|
||||
for _, c := range containers {
|
||||
allContainers[c.ID] = c
|
||||
}
|
||||
|
||||
for _, c := range containers {
|
||||
if len(c.Names) == 0 {
|
||||
continue
|
||||
|
@ -218,7 +229,50 @@ func (d *DockerDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group, er
|
|||
commonLabels[dockerLabelContainerLabelPrefix+ln] = v
|
||||
}
|
||||
|
||||
for _, n := range c.NetworkSettings.Networks {
|
||||
networks := c.NetworkSettings.Networks
|
||||
containerNetworkMode := container.NetworkMode(c.HostConfig.NetworkMode)
|
||||
if len(networks) == 0 {
|
||||
// Try to lookup shared networks
|
||||
for {
|
||||
if containerNetworkMode.IsContainer() {
|
||||
tmpContainer, exists := allContainers[containerNetworkMode.ConnectedContainer()]
|
||||
if !exists {
|
||||
break
|
||||
}
|
||||
networks = tmpContainer.NetworkSettings.Networks
|
||||
containerNetworkMode = container.NetworkMode(tmpContainer.HostConfig.NetworkMode)
|
||||
if len(networks) > 0 {
|
||||
break
|
||||
}
|
||||
} else {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if d.matchFirstNetwork && len(networks) > 1 {
|
||||
// Match user defined network
|
||||
if containerNetworkMode.IsUserDefined() {
|
||||
networkMode := string(containerNetworkMode)
|
||||
networks = map[string]*network.EndpointSettings{networkMode: networks[networkMode]}
|
||||
} else {
|
||||
// Get first network if container network mode has "none" value.
|
||||
// This case appears under certain condition:
|
||||
// 1. Container created with network set to "--net=none".
|
||||
// 2. Disconnect network "none".
|
||||
// 3. Reconnect network with user defined networks.
|
||||
var first string
|
||||
for k, n := range networks {
|
||||
if n != nil {
|
||||
first = k
|
||||
break
|
||||
}
|
||||
}
|
||||
networks = map[string]*network.EndpointSettings{first: networks[first]}
|
||||
}
|
||||
}
|
||||
|
||||
for _, n := range networks {
|
||||
var added bool
|
||||
|
||||
for _, p := range c.Ports {
|
||||
|
|
|
@ -16,6 +16,7 @@ package moby
|
|||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sort"
|
||||
"testing"
|
||||
|
||||
"github.com/go-kit/log"
|
||||
|
@ -59,7 +60,7 @@ host: %s
|
|||
tg := tgs[0]
|
||||
require.NotNil(t, tg)
|
||||
require.NotNil(t, tg.Targets)
|
||||
require.Len(t, tg.Targets, 3)
|
||||
require.Len(t, tg.Targets, 6)
|
||||
|
||||
for i, lbls := range []model.LabelSet{
|
||||
{
|
||||
|
@ -113,9 +114,259 @@ host: %s
|
|||
"__meta_docker_container_network_mode": "host",
|
||||
"__meta_docker_network_ip": "",
|
||||
},
|
||||
{
|
||||
"__address__": "172.20.0.2:3306",
|
||||
"__meta_docker_container_id": "f9ade4b83199d6f83020b7c0bfd1e8281b19dbf9e6cef2cf89bc45c8f8d20fe8",
|
||||
"__meta_docker_container_label_com_docker_compose_project": "dockersd",
|
||||
"__meta_docker_container_label_com_docker_compose_service": "mysql",
|
||||
"__meta_docker_container_label_com_docker_compose_version": "2.2.2",
|
||||
"__meta_docker_container_name": "/dockersd_mysql",
|
||||
"__meta_docker_container_network_mode": "dockersd_private",
|
||||
"__meta_docker_network_id": "e804771e55254a360fdb70dfdd78d3610fdde231b14ef2f837a00ac1eeb9e601",
|
||||
"__meta_docker_network_ingress": "false",
|
||||
"__meta_docker_network_internal": "false",
|
||||
"__meta_docker_network_ip": "172.20.0.2",
|
||||
"__meta_docker_network_name": "dockersd_private",
|
||||
"__meta_docker_network_scope": "local",
|
||||
"__meta_docker_port_private": "3306",
|
||||
},
|
||||
{
|
||||
"__address__": "172.20.0.2:33060",
|
||||
"__meta_docker_container_id": "f9ade4b83199d6f83020b7c0bfd1e8281b19dbf9e6cef2cf89bc45c8f8d20fe8",
|
||||
"__meta_docker_container_label_com_docker_compose_project": "dockersd",
|
||||
"__meta_docker_container_label_com_docker_compose_service": "mysql",
|
||||
"__meta_docker_container_label_com_docker_compose_version": "2.2.2",
|
||||
"__meta_docker_container_name": "/dockersd_mysql",
|
||||
"__meta_docker_container_network_mode": "dockersd_private",
|
||||
"__meta_docker_network_id": "e804771e55254a360fdb70dfdd78d3610fdde231b14ef2f837a00ac1eeb9e601",
|
||||
"__meta_docker_network_ingress": "false",
|
||||
"__meta_docker_network_internal": "false",
|
||||
"__meta_docker_network_ip": "172.20.0.2",
|
||||
"__meta_docker_network_name": "dockersd_private",
|
||||
"__meta_docker_network_scope": "local",
|
||||
"__meta_docker_port_private": "33060",
|
||||
},
|
||||
{
|
||||
"__address__": "172.20.0.2:9104",
|
||||
"__meta_docker_container_id": "59bf76e8816af98856b90dd619c91027145ca501043b1c51756d03b085882e06",
|
||||
"__meta_docker_container_label_com_docker_compose_project": "dockersd",
|
||||
"__meta_docker_container_label_com_docker_compose_service": "mysqlexporter",
|
||||
"__meta_docker_container_label_com_docker_compose_version": "2.2.2",
|
||||
"__meta_docker_container_label_maintainer": "The Prometheus Authors <prometheus-developers@googlegroups.com>",
|
||||
"__meta_docker_container_name": "/dockersd_mysql_exporter",
|
||||
"__meta_docker_container_network_mode": "container:f9ade4b83199d6f83020b7c0bfd1e8281b19dbf9e6cef2cf89bc45c8f8d20fe8",
|
||||
"__meta_docker_network_id": "e804771e55254a360fdb70dfdd78d3610fdde231b14ef2f837a00ac1eeb9e601",
|
||||
"__meta_docker_network_ingress": "false",
|
||||
"__meta_docker_network_internal": "false",
|
||||
"__meta_docker_network_ip": "172.20.0.2",
|
||||
"__meta_docker_network_name": "dockersd_private",
|
||||
"__meta_docker_network_scope": "local",
|
||||
"__meta_docker_port_private": "9104",
|
||||
},
|
||||
} {
|
||||
t.Run(fmt.Sprintf("item %d", i), func(t *testing.T) {
|
||||
require.Equal(t, lbls, tg.Targets[i])
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestDockerSDRefreshMatchAllNetworks(t *testing.T) {
|
||||
sdmock := NewSDMock(t, "dockerprom")
|
||||
sdmock.Setup()
|
||||
|
||||
e := sdmock.Endpoint()
|
||||
url := e[:len(e)-1]
|
||||
cfgString := fmt.Sprintf(`
|
||||
---
|
||||
host: %s
|
||||
`, url)
|
||||
var cfg DockerSDConfig
|
||||
require.NoError(t, yaml.Unmarshal([]byte(cfgString), &cfg))
|
||||
|
||||
cfg.MatchFirstNetwork = false
|
||||
reg := prometheus.NewRegistry()
|
||||
refreshMetrics := discovery.NewRefreshMetrics(reg)
|
||||
metrics := cfg.NewDiscovererMetrics(reg, refreshMetrics)
|
||||
require.NoError(t, metrics.Register())
|
||||
defer metrics.Unregister()
|
||||
defer refreshMetrics.Unregister()
|
||||
d, err := NewDockerDiscovery(&cfg, log.NewNopLogger(), metrics)
|
||||
require.NoError(t, err)
|
||||
|
||||
ctx := context.Background()
|
||||
tgs, err := d.refresh(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Len(t, tgs, 1)
|
||||
|
||||
tg := tgs[0]
|
||||
require.NotNil(t, tg)
|
||||
require.NotNil(t, tg.Targets)
|
||||
require.Len(t, tg.Targets, 9)
|
||||
|
||||
sortFunc := func(labelSets []model.LabelSet) {
|
||||
sort.Slice(labelSets, func(i, j int) bool {
|
||||
return labelSets[i]["__address__"] < labelSets[j]["__address__"]
|
||||
})
|
||||
}
|
||||
expected := []model.LabelSet{
|
||||
{
|
||||
"__address__": "172.19.0.2:9100",
|
||||
"__meta_docker_container_id": "c301b928faceb1a18fe379f6bc178727ef920bb30b0f9b8592b32b36255a0eca",
|
||||
"__meta_docker_container_label_com_docker_compose_project": "dockersd",
|
||||
"__meta_docker_container_label_com_docker_compose_service": "node",
|
||||
"__meta_docker_container_label_com_docker_compose_version": "1.25.0",
|
||||
"__meta_docker_container_label_maintainer": "The Prometheus Authors <prometheus-developers@googlegroups.com>",
|
||||
"__meta_docker_container_label_prometheus_job": "node",
|
||||
"__meta_docker_container_name": "/dockersd_node_1",
|
||||
"__meta_docker_container_network_mode": "dockersd_default",
|
||||
"__meta_docker_network_id": "7189986ab399e144e52a71b7451b4e04e2158c044b4cd2f3ae26fc3a285d3798",
|
||||
"__meta_docker_network_ingress": "false",
|
||||
"__meta_docker_network_internal": "false",
|
||||
"__meta_docker_network_ip": "172.19.0.2",
|
||||
"__meta_docker_network_label_com_docker_compose_network": "default",
|
||||
"__meta_docker_network_label_com_docker_compose_project": "dockersd",
|
||||
"__meta_docker_network_label_com_docker_compose_version": "1.25.0",
|
||||
"__meta_docker_network_name": "dockersd_default",
|
||||
"__meta_docker_network_scope": "local",
|
||||
"__meta_docker_port_private": "9100",
|
||||
},
|
||||
{
|
||||
"__address__": "172.19.0.3:80",
|
||||
"__meta_docker_container_id": "c301b928faceb1a18fe379f6bc178727ef920bb30b0f9b8592b32b36255a0eca",
|
||||
"__meta_docker_container_label_com_docker_compose_project": "dockersd",
|
||||
"__meta_docker_container_label_com_docker_compose_service": "noport",
|
||||
"__meta_docker_container_label_com_docker_compose_version": "1.25.0",
|
||||
"__meta_docker_container_label_maintainer": "The Prometheus Authors <prometheus-developers@googlegroups.com>",
|
||||
"__meta_docker_container_label_prometheus_job": "noport",
|
||||
"__meta_docker_container_name": "/dockersd_noport_1",
|
||||
"__meta_docker_container_network_mode": "dockersd_default",
|
||||
"__meta_docker_network_id": "7189986ab399e144e52a71b7451b4e04e2158c044b4cd2f3ae26fc3a285d3798",
|
||||
"__meta_docker_network_ingress": "false",
|
||||
"__meta_docker_network_internal": "false",
|
||||
"__meta_docker_network_ip": "172.19.0.3",
|
||||
"__meta_docker_network_label_com_docker_compose_network": "default",
|
||||
"__meta_docker_network_label_com_docker_compose_project": "dockersd",
|
||||
"__meta_docker_network_label_com_docker_compose_version": "1.25.0",
|
||||
"__meta_docker_network_name": "dockersd_default",
|
||||
"__meta_docker_network_scope": "local",
|
||||
},
|
||||
{
|
||||
"__address__": "localhost",
|
||||
"__meta_docker_container_id": "54ed6cc5c0988260436cb0e739b7b6c9cad6c439a93b4c4fdbe9753e1c94b189",
|
||||
"__meta_docker_container_label_com_docker_compose_project": "dockersd",
|
||||
"__meta_docker_container_label_com_docker_compose_service": "host_networking",
|
||||
"__meta_docker_container_label_com_docker_compose_version": "1.25.0",
|
||||
"__meta_docker_container_name": "/dockersd_host_networking_1",
|
||||
"__meta_docker_container_network_mode": "host",
|
||||
"__meta_docker_network_ip": "",
|
||||
},
|
||||
{
|
||||
"__address__": "172.20.0.2:3306",
|
||||
"__meta_docker_container_id": "f9ade4b83199d6f83020b7c0bfd1e8281b19dbf9e6cef2cf89bc45c8f8d20fe8",
|
||||
"__meta_docker_container_label_com_docker_compose_project": "dockersd",
|
||||
"__meta_docker_container_label_com_docker_compose_service": "mysql",
|
||||
"__meta_docker_container_label_com_docker_compose_version": "2.2.2",
|
||||
"__meta_docker_container_name": "/dockersd_mysql",
|
||||
"__meta_docker_container_network_mode": "dockersd_private",
|
||||
"__meta_docker_network_id": "e804771e55254a360fdb70dfdd78d3610fdde231b14ef2f837a00ac1eeb9e601",
|
||||
"__meta_docker_network_ingress": "false",
|
||||
"__meta_docker_network_internal": "false",
|
||||
"__meta_docker_network_ip": "172.20.0.2",
|
||||
"__meta_docker_network_name": "dockersd_private",
|
||||
"__meta_docker_network_scope": "local",
|
||||
"__meta_docker_port_private": "3306",
|
||||
},
|
||||
{
|
||||
"__address__": "172.20.0.2:33060",
|
||||
"__meta_docker_container_id": "f9ade4b83199d6f83020b7c0bfd1e8281b19dbf9e6cef2cf89bc45c8f8d20fe8",
|
||||
"__meta_docker_container_label_com_docker_compose_project": "dockersd",
|
||||
"__meta_docker_container_label_com_docker_compose_service": "mysql",
|
||||
"__meta_docker_container_label_com_docker_compose_version": "2.2.2",
|
||||
"__meta_docker_container_name": "/dockersd_mysql",
|
||||
"__meta_docker_container_network_mode": "dockersd_private",
|
||||
"__meta_docker_network_id": "e804771e55254a360fdb70dfdd78d3610fdde231b14ef2f837a00ac1eeb9e601",
|
||||
"__meta_docker_network_ingress": "false",
|
||||
"__meta_docker_network_internal": "false",
|
||||
"__meta_docker_network_ip": "172.20.0.2",
|
||||
"__meta_docker_network_name": "dockersd_private",
|
||||
"__meta_docker_network_scope": "local",
|
||||
"__meta_docker_port_private": "33060",
|
||||
},
|
||||
{
|
||||
"__address__": "172.21.0.2:3306",
|
||||
"__meta_docker_container_id": "f9ade4b83199d6f83020b7c0bfd1e8281b19dbf9e6cef2cf89bc45c8f8d20fe8",
|
||||
"__meta_docker_container_label_com_docker_compose_project": "dockersd",
|
||||
"__meta_docker_container_label_com_docker_compose_service": "mysql",
|
||||
"__meta_docker_container_label_com_docker_compose_version": "2.2.2",
|
||||
"__meta_docker_container_name": "/dockersd_mysql",
|
||||
"__meta_docker_container_network_mode": "dockersd_private",
|
||||
"__meta_docker_network_id": "bfcf66a6b64f7d518f009e34290dc3f3c66a08164257ad1afc3bd31d75f656e8",
|
||||
"__meta_docker_network_ingress": "false",
|
||||
"__meta_docker_network_internal": "false",
|
||||
"__meta_docker_network_ip": "172.21.0.2",
|
||||
"__meta_docker_network_name": "dockersd_private1",
|
||||
"__meta_docker_network_scope": "local",
|
||||
"__meta_docker_port_private": "3306",
|
||||
},
|
||||
{
|
||||
"__address__": "172.21.0.2:33060",
|
||||
"__meta_docker_container_id": "f9ade4b83199d6f83020b7c0bfd1e8281b19dbf9e6cef2cf89bc45c8f8d20fe8",
|
||||
"__meta_docker_container_label_com_docker_compose_project": "dockersd",
|
||||
"__meta_docker_container_label_com_docker_compose_service": "mysql",
|
||||
"__meta_docker_container_label_com_docker_compose_version": "2.2.2",
|
||||
"__meta_docker_container_name": "/dockersd_mysql",
|
||||
"__meta_docker_container_network_mode": "dockersd_private",
|
||||
"__meta_docker_network_id": "bfcf66a6b64f7d518f009e34290dc3f3c66a08164257ad1afc3bd31d75f656e8",
|
||||
"__meta_docker_network_ingress": "false",
|
||||
"__meta_docker_network_internal": "false",
|
||||
"__meta_docker_network_ip": "172.21.0.2",
|
||||
"__meta_docker_network_name": "dockersd_private1",
|
||||
"__meta_docker_network_scope": "local",
|
||||
"__meta_docker_port_private": "33060",
|
||||
},
|
||||
{
|
||||
"__address__": "172.21.0.2:9104",
|
||||
"__meta_docker_container_id": "59bf76e8816af98856b90dd619c91027145ca501043b1c51756d03b085882e06",
|
||||
"__meta_docker_container_label_com_docker_compose_project": "dockersd",
|
||||
"__meta_docker_container_label_com_docker_compose_service": "mysqlexporter",
|
||||
"__meta_docker_container_label_com_docker_compose_version": "2.2.2",
|
||||
"__meta_docker_container_label_maintainer": "The Prometheus Authors <prometheus-developers@googlegroups.com>",
|
||||
"__meta_docker_container_name": "/dockersd_mysql_exporter",
|
||||
"__meta_docker_container_network_mode": "container:f9ade4b83199d6f83020b7c0bfd1e8281b19dbf9e6cef2cf89bc45c8f8d20fe8",
|
||||
"__meta_docker_network_id": "bfcf66a6b64f7d518f009e34290dc3f3c66a08164257ad1afc3bd31d75f656e8",
|
||||
"__meta_docker_network_ingress": "false",
|
||||
"__meta_docker_network_internal": "false",
|
||||
"__meta_docker_network_ip": "172.21.0.2",
|
||||
"__meta_docker_network_name": "dockersd_private1",
|
||||
"__meta_docker_network_scope": "local",
|
||||
"__meta_docker_port_private": "9104",
|
||||
},
|
||||
{
|
||||
"__address__": "172.20.0.2:9104",
|
||||
"__meta_docker_container_id": "59bf76e8816af98856b90dd619c91027145ca501043b1c51756d03b085882e06",
|
||||
"__meta_docker_container_label_com_docker_compose_project": "dockersd",
|
||||
"__meta_docker_container_label_com_docker_compose_service": "mysqlexporter",
|
||||
"__meta_docker_container_label_com_docker_compose_version": "2.2.2",
|
||||
"__meta_docker_container_label_maintainer": "The Prometheus Authors <prometheus-developers@googlegroups.com>",
|
||||
"__meta_docker_container_name": "/dockersd_mysql_exporter",
|
||||
"__meta_docker_container_network_mode": "container:f9ade4b83199d6f83020b7c0bfd1e8281b19dbf9e6cef2cf89bc45c8f8d20fe8",
|
||||
"__meta_docker_network_id": "e804771e55254a360fdb70dfdd78d3610fdde231b14ef2f837a00ac1eeb9e601",
|
||||
"__meta_docker_network_ingress": "false",
|
||||
"__meta_docker_network_internal": "false",
|
||||
"__meta_docker_network_ip": "172.20.0.2",
|
||||
"__meta_docker_network_name": "dockersd_private",
|
||||
"__meta_docker_network_scope": "local",
|
||||
"__meta_docker_port_private": "9104",
|
||||
},
|
||||
}
|
||||
|
||||
sortFunc(expected)
|
||||
sortFunc(tg.Targets)
|
||||
|
||||
for i, lbls := range expected {
|
||||
t.Run(fmt.Sprintf("item %d", i), func(t *testing.T) {
|
||||
require.Equal(t, lbls, tg.Targets[i])
|
||||
})
|
||||
}
|
||||
}
|
||||
|
|
|
@ -128,5 +128,105 @@
|
|||
}
|
||||
},
|
||||
"Mounts": []
|
||||
},
|
||||
{
|
||||
"Id": "f9ade4b83199d6f83020b7c0bfd1e8281b19dbf9e6cef2cf89bc45c8f8d20fe8",
|
||||
"Names": [
|
||||
"/dockersd_mysql"
|
||||
],
|
||||
"Image": "mysql:5.7.29",
|
||||
"ImageID": "sha256:5d9483f9a7b21c87e0f5b9776c3e06567603c28c0062013eda127c968175f5e8",
|
||||
"Command": "mysqld",
|
||||
"Created": 1616273136,
|
||||
"Ports": [
|
||||
{
|
||||
"PrivatePort": 3306,
|
||||
"Type": "tcp"
|
||||
},
|
||||
{
|
||||
"PrivatePort": 33060,
|
||||
"Type": "tcp"
|
||||
}
|
||||
],
|
||||
"Labels": {
|
||||
"com.docker.compose.project": "dockersd",
|
||||
"com.docker.compose.service": "mysql",
|
||||
"com.docker.compose.version": "2.2.2"
|
||||
},
|
||||
"State": "running",
|
||||
"Status": "Up 40 seconds",
|
||||
"HostConfig": {
|
||||
"NetworkMode": "dockersd_private"
|
||||
},
|
||||
"NetworkSettings": {
|
||||
"Networks": {
|
||||
"dockersd_private": {
|
||||
"IPAMConfig": null,
|
||||
"Links": null,
|
||||
"Aliases": null,
|
||||
"NetworkID": "e804771e55254a360fdb70dfdd78d3610fdde231b14ef2f837a00ac1eeb9e601",
|
||||
"EndpointID": "80f8a61b37701a9991bb98c75ddd23fd9b7c16b5575ca81343f6b44ff4a2a9d9",
|
||||
"Gateway": "172.20.0.1",
|
||||
"IPAddress": "172.20.0.2",
|
||||
"IPPrefixLen": 16,
|
||||
"IPv6Gateway": "",
|
||||
"GlobalIPv6Address": "",
|
||||
"GlobalIPv6PrefixLen": 0,
|
||||
"MacAddress": "02:42:ac:14:00:0a",
|
||||
"DriverOpts": null
|
||||
},
|
||||
"dockersd_private1": {
|
||||
"IPAMConfig": {},
|
||||
"Links": null,
|
||||
"Aliases": [
|
||||
"mysql",
|
||||
"mysql",
|
||||
"f9ade4b83199"
|
||||
],
|
||||
"NetworkID": "bfcf66a6b64f7d518f009e34290dc3f3c66a08164257ad1afc3bd31d75f656e8",
|
||||
"EndpointID": "f80921d10e78c99a5907705aae75befea40c3d3e9f820e66ab392f7274be16b8",
|
||||
"Gateway": "172.21.0.1",
|
||||
"IPAddress": "172.21.0.2",
|
||||
"IPPrefixLen": 24,
|
||||
"IPv6Gateway": "",
|
||||
"GlobalIPv6Address": "",
|
||||
"GlobalIPv6PrefixLen": 0,
|
||||
"MacAddress": "02:42:ac:15:00:02",
|
||||
"DriverOpts": null
|
||||
}
|
||||
}
|
||||
},
|
||||
"Mounts": []
|
||||
},
|
||||
{
|
||||
"Id": "59bf76e8816af98856b90dd619c91027145ca501043b1c51756d03b085882e06",
|
||||
"Names": [
|
||||
"/dockersd_mysql_exporter"
|
||||
],
|
||||
"Image": "prom/mysqld-exporter:latest",
|
||||
"ImageID": "sha256:121b8a7cd0525dd89aaec58ad7d34c3bb3714740e5a67daf6510ccf71ab219a9",
|
||||
"Command": "/bin/mysqld_exporter",
|
||||
"Created": 1616273136,
|
||||
"Ports": [
|
||||
{
|
||||
"PrivatePort": 9104,
|
||||
"Type": "tcp"
|
||||
}
|
||||
],
|
||||
"Labels": {
|
||||
"com.docker.compose.project": "dockersd",
|
||||
"com.docker.compose.service": "mysqlexporter",
|
||||
"com.docker.compose.version": "2.2.2",
|
||||
"maintainer": "The Prometheus Authors <prometheus-developers@googlegroups.com>"
|
||||
},
|
||||
"State": "running",
|
||||
"Status": "Up 40 seconds",
|
||||
"HostConfig": {
|
||||
"NetworkMode": "container:f9ade4b83199d6f83020b7c0bfd1e8281b19dbf9e6cef2cf89bc45c8f8d20fe8"
|
||||
},
|
||||
"NetworkSettings": {
|
||||
"Networks": {}
|
||||
},
|
||||
"Mounts": []
|
||||
}
|
||||
]
|
||||
|
|
54
discovery/moby/testdata/dockerprom/networks.json
vendored
54
discovery/moby/testdata/dockerprom/networks.json
vendored
|
@ -111,5 +111,59 @@
|
|||
"Containers": {},
|
||||
"Options": {},
|
||||
"Labels": {}
|
||||
},
|
||||
{
|
||||
"Name": "dockersd_private",
|
||||
"Id": "e804771e55254a360fdb70dfdd78d3610fdde231b14ef2f837a00ac1eeb9e601",
|
||||
"Created": "2022-03-25T09:21:17.718370976+08:00",
|
||||
"Scope": "local",
|
||||
"Driver": "bridge",
|
||||
"EnableIPv6": false,
|
||||
"IPAM": {
|
||||
"Driver": "default",
|
||||
"Options": null,
|
||||
"Config": [
|
||||
{
|
||||
"Subnet": "172.20.0.1/16"
|
||||
}
|
||||
]
|
||||
},
|
||||
"Internal": false,
|
||||
"Attachable": false,
|
||||
"Ingress": false,
|
||||
"ConfigFrom": {
|
||||
"Network": ""
|
||||
},
|
||||
"ConfigOnly": false,
|
||||
"Containers": {},
|
||||
"Options": {},
|
||||
"Labels": {}
|
||||
},
|
||||
{
|
||||
"Name": "dockersd_private1",
|
||||
"Id": "bfcf66a6b64f7d518f009e34290dc3f3c66a08164257ad1afc3bd31d75f656e8",
|
||||
"Created": "2022-03-25T09:21:17.718370976+08:00",
|
||||
"Scope": "local",
|
||||
"Driver": "bridge",
|
||||
"EnableIPv6": false,
|
||||
"IPAM": {
|
||||
"Driver": "default",
|
||||
"Options": null,
|
||||
"Config": [
|
||||
{
|
||||
"Subnet": "172.21.0.1/16"
|
||||
}
|
||||
]
|
||||
},
|
||||
"Internal": false,
|
||||
"Attachable": false,
|
||||
"Ingress": false,
|
||||
"ConfigFrom": {
|
||||
"Network": ""
|
||||
},
|
||||
"ConfigOnly": false,
|
||||
"Containers": {},
|
||||
"Options": {},
|
||||
"Labels": {}
|
||||
}
|
||||
]
|
||||
|
|
|
@ -146,12 +146,18 @@ func (i *InstanceDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group,
|
|||
openstackLabelUserID: model.LabelValue(s.UserID),
|
||||
}
|
||||
|
||||
flavorID, ok := s.Flavor["id"].(string)
|
||||
if !ok {
|
||||
level.Warn(i.logger).Log("msg", "Invalid type for flavor id, expected string")
|
||||
continue
|
||||
flavorName, nameOk := s.Flavor["original_name"].(string)
|
||||
// "original_name" is only available for microversion >= 2.47. It was added in favor of "id".
|
||||
if !nameOk {
|
||||
flavorID, idOk := s.Flavor["id"].(string)
|
||||
if !idOk {
|
||||
level.Warn(i.logger).Log("msg", "Invalid type for both flavor original_name and flavor id, expected string")
|
||||
continue
|
||||
}
|
||||
labels[openstackLabelInstanceFlavor] = model.LabelValue(flavorID)
|
||||
} else {
|
||||
labels[openstackLabelInstanceFlavor] = model.LabelValue(flavorName)
|
||||
}
|
||||
labels[openstackLabelInstanceFlavor] = model.LabelValue(flavorID)
|
||||
|
||||
imageID, ok := s.Image["id"].(string)
|
||||
if ok {
|
||||
|
|
|
@ -84,7 +84,7 @@ func TestOpenstackSDInstanceRefresh(t *testing.T) {
|
|||
},
|
||||
{
|
||||
"__address__": model.LabelValue("10.0.0.31:0"),
|
||||
"__meta_openstack_instance_flavor": model.LabelValue("1"),
|
||||
"__meta_openstack_instance_flavor": model.LabelValue("m1.medium"),
|
||||
"__meta_openstack_instance_id": model.LabelValue("9e5476bd-a4ec-4653-93d6-72c93aa682ba"),
|
||||
"__meta_openstack_instance_image": model.LabelValue("f90f6034-2570-4974-8351-6b49732ef2eb"),
|
||||
"__meta_openstack_instance_status": model.LabelValue("ACTIVE"),
|
||||
|
@ -96,7 +96,7 @@ func TestOpenstackSDInstanceRefresh(t *testing.T) {
|
|||
},
|
||||
{
|
||||
"__address__": model.LabelValue("10.0.0.33:0"),
|
||||
"__meta_openstack_instance_flavor": model.LabelValue("4"),
|
||||
"__meta_openstack_instance_flavor": model.LabelValue("m1.small"),
|
||||
"__meta_openstack_instance_id": model.LabelValue("9e5476bd-a4ec-4653-93d6-72c93aa682bb"),
|
||||
"__meta_openstack_instance_status": model.LabelValue("ACTIVE"),
|
||||
"__meta_openstack_instance_name": model.LabelValue("merp"),
|
||||
|
@ -108,7 +108,7 @@ func TestOpenstackSDInstanceRefresh(t *testing.T) {
|
|||
},
|
||||
{
|
||||
"__address__": model.LabelValue("10.0.0.34:0"),
|
||||
"__meta_openstack_instance_flavor": model.LabelValue("4"),
|
||||
"__meta_openstack_instance_flavor": model.LabelValue("m1.small"),
|
||||
"__meta_openstack_instance_id": model.LabelValue("9e5476bd-a4ec-4653-93d6-72c93aa682bb"),
|
||||
"__meta_openstack_instance_status": model.LabelValue("ACTIVE"),
|
||||
"__meta_openstack_instance_name": model.LabelValue("merp"),
|
||||
|
|
|
@ -427,13 +427,17 @@ const serverListBody = `
|
|||
"OS-SRV-USG:launched_at": "2014-09-25T13:04:49.000000",
|
||||
"OS-EXT-SRV-ATTR:hypervisor_hostname": "devstack",
|
||||
"flavor": {
|
||||
"id": "1",
|
||||
"links": [
|
||||
{
|
||||
"href": "http://104.130.131.164:8774/fcad67a6189847c4aecfa3c81a05783b/flavors/1",
|
||||
"rel": "bookmark"
|
||||
}
|
||||
]
|
||||
"vcpus": 2,
|
||||
"ram": 4096,
|
||||
"disk": 0,
|
||||
"ephemeral": 0,
|
||||
"swap": 0,
|
||||
"original_name": "m1.medium",
|
||||
"extra_specs": {
|
||||
"aggregate_instance_extra_specs:general": "true",
|
||||
"hw:mem_page_size": "large",
|
||||
"hw:vif_multiqueue_enabled": "true"
|
||||
}
|
||||
},
|
||||
"id": "9e5476bd-a4ec-4653-93d6-72c93aa682ba",
|
||||
"security_groups": [
|
||||
|
@ -498,13 +502,17 @@ const serverListBody = `
|
|||
"OS-SRV-USG:launched_at": "2014-09-25T13:04:49.000000",
|
||||
"OS-EXT-SRV-ATTR:hypervisor_hostname": "devstack",
|
||||
"flavor": {
|
||||
"id": "4",
|
||||
"links": [
|
||||
{
|
||||
"href": "http://104.130.131.164:8774/fcad67a6189847c4aecfa3c81a05783b/flavors/1",
|
||||
"rel": "bookmark"
|
||||
}
|
||||
]
|
||||
"vcpus": 2,
|
||||
"ram": 4096,
|
||||
"disk": 0,
|
||||
"ephemeral": 0,
|
||||
"swap": 0,
|
||||
"original_name": "m1.small",
|
||||
"extra_specs": {
|
||||
"aggregate_instance_extra_specs:general": "true",
|
||||
"hw:mem_page_size": "large",
|
||||
"hw:vif_multiqueue_enabled": "true"
|
||||
}
|
||||
},
|
||||
"id": "9e5476bd-a4ec-4653-93d6-72c93aa682bb",
|
||||
"security_groups": [
|
||||
|
|
|
@ -66,7 +66,7 @@ endpoint: %s
|
|||
|
||||
_, err := createClient(&conf)
|
||||
|
||||
require.ErrorContains(t, err, "missing application key")
|
||||
require.ErrorContains(t, err, "missing authentication information")
|
||||
}
|
||||
|
||||
func TestParseIPs(t *testing.T) {
|
||||
|
|
|
@ -175,14 +175,14 @@ func (d *instanceDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group,
|
|||
}
|
||||
|
||||
addr := ""
|
||||
if server.IPv6 != nil {
|
||||
labels[instancePublicIPv6Label] = model.LabelValue(server.IPv6.Address.String())
|
||||
addr = server.IPv6.Address.String()
|
||||
if server.IPv6 != nil { //nolint:staticcheck
|
||||
labels[instancePublicIPv6Label] = model.LabelValue(server.IPv6.Address.String()) //nolint:staticcheck
|
||||
addr = server.IPv6.Address.String() //nolint:staticcheck
|
||||
}
|
||||
|
||||
if server.PublicIP != nil {
|
||||
labels[instancePublicIPv4Label] = model.LabelValue(server.PublicIP.Address.String())
|
||||
addr = server.PublicIP.Address.String()
|
||||
if server.PublicIP != nil { //nolint:staticcheck
|
||||
labels[instancePublicIPv4Label] = model.LabelValue(server.PublicIP.Address.String()) //nolint:staticcheck
|
||||
addr = server.PublicIP.Address.String() //nolint:staticcheck
|
||||
}
|
||||
|
||||
if server.PrivateIP != nil {
|
||||
|
|
|
@ -50,6 +50,7 @@ The Prometheus monitoring server
|
|||
| <code class="text-nowrap">--rules.alert.resend-delay</code> | Minimum amount of time to wait before resending an alert to Alertmanager. Use with server mode only. | `1m` |
|
||||
| <code class="text-nowrap">--rules.max-concurrent-evals</code> | Global concurrency limit for independent rules that can run concurrently. When set, "query.max-concurrency" may need to be adjusted accordingly. Use with server mode only. | `4` |
|
||||
| <code class="text-nowrap">--alertmanager.notification-queue-capacity</code> | The capacity of the queue for pending Alertmanager notifications. Use with server mode only. | `10000` |
|
||||
| <code class="text-nowrap">--alertmanager.drain-notification-queue-on-shutdown</code> | Send any outstanding Alertmanager notifications when shutting down. If false, any outstanding Alertmanager notifications will be dropped when shutting down. Use with server mode only. | `true` |
|
||||
| <code class="text-nowrap">--query.lookback-delta</code> | The maximum lookback duration for retrieving metrics during expression evaluations and federation. Use with server mode only. | `5m` |
|
||||
| <code class="text-nowrap">--query.timeout</code> | Maximum time a query may take before being aborted. Use with server mode only. | `2m` |
|
||||
| <code class="text-nowrap">--query.max-concurrency</code> | Maximum number of queries executed concurrently. Use with server mode only. | `20` |
|
||||
|
|
|
@ -70,6 +70,10 @@ global:
|
|||
|
||||
# How frequently to evaluate rules.
|
||||
[ evaluation_interval: <duration> | default = 1m ]
|
||||
|
||||
# Offset the rule evaluation timestamp of this particular group by the specified duration into the past to ensure the underlying metrics have been received.
|
||||
# Metric availability delays are more likely to occur when Prometheus is running as a remote write target, but can also occur when there's anomalies with scraping.
|
||||
[ rule_query_offset: <duration> | default = 0s ]
|
||||
|
||||
# The labels to add to any time series or alerts when communicating with
|
||||
# external systems (federation, remote storage, Alertmanager).
|
||||
|
@ -117,6 +121,12 @@ global:
|
|||
# that will be kept in memory. 0 means no limit.
|
||||
[ keep_dropped_targets: <int> | default = 0 ]
|
||||
|
||||
runtime:
|
||||
# Configure the Go garbage collector GOGC parameter
|
||||
# See: https://tip.golang.org/doc/gc-guide#GOGC
|
||||
# Lowering this number increases CPU usage.
|
||||
[ gogc: <int> | default = 75 ]
|
||||
|
||||
# Rule files specifies a list of globs. Rules and alerts are read from
|
||||
# all matching files.
|
||||
rule_files:
|
||||
|
@ -931,6 +941,9 @@ tls_config:
|
|||
# The host to use if the container is in host networking mode.
|
||||
[ host_networking_host: <string> | default = "localhost" ]
|
||||
|
||||
# Match the first network if the container has multiple networks defined, thus avoiding collecting duplicate targets.
|
||||
[ match_first_network: <boolean> | default = true ]
|
||||
|
||||
# Optional filters to limit the discovery process to a subset of available
|
||||
# resources.
|
||||
# The available filters are listed in the upstream documentation:
|
||||
|
@ -1219,6 +1232,7 @@ The following meta labels are available on targets during [relabeling](#relabel_
|
|||
* `__meta_ec2_ipv6_addresses`: comma separated list of IPv6 addresses assigned to the instance's network interfaces, if present
|
||||
* `__meta_ec2_owner_id`: the ID of the AWS account that owns the EC2 instance
|
||||
* `__meta_ec2_platform`: the Operating System platform, set to 'windows' on Windows servers, absent otherwise
|
||||
* `__meta_ec2_primary_ipv6_addresses`: comma separated list of the Primary IPv6 addresses of the instance, if present. The list is ordered based on the position of each corresponding network interface in the attachment order.
|
||||
* `__meta_ec2_primary_subnet_id`: the subnet ID of the primary network interface, if available
|
||||
* `__meta_ec2_private_dns_name`: the private DNS name of the instance, if available
|
||||
* `__meta_ec2_private_ip`: the private IP address of the instance, if present
|
||||
|
@ -1349,7 +1363,7 @@ interface.
|
|||
The following meta labels are available on targets during [relabeling](#relabel_config):
|
||||
|
||||
* `__meta_openstack_address_pool`: the pool of the private IP.
|
||||
* `__meta_openstack_instance_flavor`: the flavor ID of the OpenStack instance.
|
||||
* `__meta_openstack_instance_flavor`: the flavor name of the OpenStack instance, or the flavor ID if the flavor name isn't available.
|
||||
* `__meta_openstack_instance_id`: the OpenStack instance ID.
|
||||
* `__meta_openstack_instance_image`: the ID of the image the OpenStack instance is using.
|
||||
* `__meta_openstack_instance_name`: the OpenStack instance name.
|
||||
|
@ -1598,7 +1612,16 @@ and serves as an interface to plug in custom service discovery mechanisms.
|
|||
|
||||
It reads a set of files containing a list of zero or more
|
||||
`<static_config>`s. Changes to all defined files are detected via disk watches
|
||||
and applied immediately. Files may be provided in YAML or JSON format. Only
|
||||
and applied immediately.
|
||||
|
||||
While those individual files are watched for changes,
|
||||
the parent directory is also watched implicitly. This is to handle [atomic
|
||||
renaming](https://github.com/fsnotify/fsnotify/blob/c1467c02fba575afdb5f4201072ab8403bbf00f4/README.md?plain=1#L128) efficiently and to detect new files that match the configured globs.
|
||||
This may cause issues if the parent directory contains a large number of other files,
|
||||
as each of these files will be watched too, even though the events related
|
||||
to them are not relevant.
|
||||
|
||||
Files may be provided in YAML or JSON format. Only
|
||||
changes resulting in well-formed target groups are applied.
|
||||
|
||||
Files must contain a list of static configs, using these formats:
|
||||
|
@ -3803,6 +3826,10 @@ NOTE: Out-of-order ingestion is an experimental feature, but you do not need any
|
|||
# into the TSDB, i.e. it is an in-order sample or an out-of-order/out-of-bounds sample
|
||||
# that is within the out-of-order window, or (b) too-old, i.e. not in-order
|
||||
# and before the out-of-order window.
|
||||
#
|
||||
# When out_of_order_time_window is greater than 0, it also affects experimental agent. It allows
|
||||
# the agent's WAL to accept out-of-order samples that fall within the specified time window relative
|
||||
# to the timestamp of the last appended sample for the same series.
|
||||
[ out_of_order_time_window: <duration> | default = 0s ]
|
||||
```
|
||||
|
||||
|
|
|
@ -86,6 +86,9 @@ name: <string>
|
|||
# rule can produce. 0 is no limit.
|
||||
[ limit: <int> | default = 0 ]
|
||||
|
||||
# Offset the rule evaluation timestamp of this particular group by the specified duration into the past.
|
||||
[ query_offset: <duration> | default = global.rule_query_offset ]
|
||||
|
||||
rules:
|
||||
[ - <rule> ... ]
|
||||
```
|
||||
|
@ -148,6 +151,9 @@ the rule, active, pending, or inactive, are cleared as well. The event will be
|
|||
recorded as an error in the evaluation, and as such no stale markers are
|
||||
written.
|
||||
|
||||
# Rule query offset
|
||||
This is useful to ensure the underlying metrics have been received and stored in Prometheus. Metric availability delays are more likely to occur when Prometheus is running as a remote write target due to the nature of distributed systems, but can also occur when there's anomalies with scraping and/or short evaluation intervals.
|
||||
|
||||
# Failed rule evaluations due to slow evaluation
|
||||
|
||||
If a rule group hasn't finished evaluating before its next evaluation is supposed to start (as defined by the `evaluation_interval`), the next evaluation will be skipped. Subsequent evaluations of the rule group will continue to be skipped until the initial evaluation either completes or times out. When this happens, there will be a gap in the metric produced by the recording rule. The `rule_group_iterations_missed_total` metric will be incremented for each missed iteration of the rule group.
|
||||
|
|
|
@ -31,11 +31,19 @@ production deployments it is highly recommended to use a
|
|||
[named volume](https://docs.docker.com/storage/volumes/)
|
||||
to ease managing the data on Prometheus upgrades.
|
||||
|
||||
To provide your own configuration, there are several options. Here are
|
||||
two examples.
|
||||
### Setting command line parameters
|
||||
|
||||
The Docker image is started with a number of default command line parameters, which
|
||||
can be found in the [Dockerfile](https://github.com/prometheus/prometheus/blob/main/Dockerfile) (adjust the link to correspond with the version in use).
|
||||
|
||||
If you want to add extra command line parameters to the `docker run` command,
|
||||
you will need to re-add these yourself as they will be overwritten.
|
||||
|
||||
### Volumes & bind-mount
|
||||
|
||||
To provide your own configuration, there are several options. Here are
|
||||
two examples.
|
||||
|
||||
Bind-mount your `prometheus.yml` from the host by running:
|
||||
|
||||
```bash
|
||||
|
|
|
@ -473,6 +473,9 @@ Range vectors are returned as result type `matrix`. The corresponding
|
|||
Each series could have the `"values"` key, or the `"histograms"` key, or both.
|
||||
For a given timestamp, there will only be one sample of either float or histogram type.
|
||||
|
||||
Series are returned sorted by `metric`. Functions such as [`sort`](functions.md#sort)
|
||||
and [`sort_by_label`](functions.md#sort_by_label) have no effect for range vectors.
|
||||
|
||||
### Instant vectors
|
||||
|
||||
Instant vectors are returned as result type `vector`. The corresponding
|
||||
|
@ -491,6 +494,10 @@ Instant vectors are returned as result type `vector`. The corresponding
|
|||
|
||||
Each series could have the `"value"` key, or the `"histogram"` key, but not both.
|
||||
|
||||
Series are not guaranteed to be returned in any particular order unless a function
|
||||
such as [`sort`](functions.md#sort) or [`sort_by_label`](functions.md#sort_by_label)`
|
||||
is used.
|
||||
|
||||
### Scalars
|
||||
|
||||
Scalar results are returned as result type `scalar`. The corresponding
|
||||
|
|
|
@ -79,7 +79,12 @@ labels of the 1-element output vector from the input vector.
|
|||
## `ceil()`
|
||||
|
||||
`ceil(v instant-vector)` rounds the sample values of all elements in `v` up to
|
||||
the nearest integer.
|
||||
the nearest integer value greater than or equal to v.
|
||||
|
||||
* `ceil(+Inf) = +Inf`
|
||||
* `ceil(±0) = ±0`
|
||||
* `ceil(1.49) = 2.0`
|
||||
* `ceil(1.78) = 2.0`
|
||||
|
||||
## `changes()`
|
||||
|
||||
|
@ -173,7 +178,12 @@ Special cases are:
|
|||
## `floor()`
|
||||
|
||||
`floor(v instant-vector)` rounds the sample values of all elements in `v` down
|
||||
to the nearest integer.
|
||||
to the nearest integer value smaller than or equal to v.
|
||||
|
||||
* `floor(+Inf) = +Inf`
|
||||
* `floor(±0) = ±0`
|
||||
* `floor(1.49) = 1.0`
|
||||
* `floor(1.78) = 1.0`
|
||||
|
||||
## `histogram_avg()`
|
||||
|
||||
|
@ -596,10 +606,14 @@ have exactly one element, `scalar` will return `NaN`.
|
|||
`sort(v instant-vector)` returns vector elements sorted by their sample values,
|
||||
in ascending order. Native histograms are sorted by their sum of observations.
|
||||
|
||||
Please note that `sort` only affects the results of instant queries, as range query results always have a fixed output ordering.
|
||||
|
||||
## `sort_desc()`
|
||||
|
||||
Same as `sort`, but sorts in descending order.
|
||||
|
||||
Like `sort`, `sort_desc` only affects the results of instant queries, as range query results always have a fixed output ordering.
|
||||
|
||||
## `sort_by_label()`
|
||||
|
||||
**This function has to be enabled via the [feature flag](../feature_flags/) `--enable-feature=promql-experimental-functions`.**
|
||||
|
|
|
@ -61,8 +61,11 @@ A Prometheus server's data directory looks something like this:
|
|||
Note that a limitation of local storage is that it is not clustered or
|
||||
replicated. Thus, it is not arbitrarily scalable or durable in the face of
|
||||
drive or node outages and should be managed like any other single node
|
||||
database. The use of RAID is suggested for storage availability, and
|
||||
[snapshots](querying/api.md#snapshot) are recommended for backups. With proper
|
||||
database.
|
||||
|
||||
[Snapshots](querying/api.md#snapshot) are recommended for backups. Backups
|
||||
made without snapshots run the risk of losing data that was recorded since
|
||||
the last WAL sync, which typically happens every two hours. With proper
|
||||
architecture, it is possible to retain years of data in local storage.
|
||||
|
||||
Alternatively, external storage may be used via the
|
||||
|
|
|
@ -8,36 +8,35 @@ require (
|
|||
github.com/gogo/protobuf v1.3.2
|
||||
github.com/golang/snappy v0.0.4
|
||||
github.com/influxdata/influxdb v1.11.5
|
||||
github.com/prometheus/client_golang v1.19.0
|
||||
github.com/prometheus/common v0.53.0
|
||||
github.com/prometheus/prometheus v0.51.2
|
||||
github.com/prometheus/client_golang v1.19.1
|
||||
github.com/prometheus/common v0.54.0
|
||||
github.com/prometheus/prometheus v0.52.1
|
||||
github.com/stretchr/testify v1.9.0
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.10.0 // indirect
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.1 // indirect
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1 // indirect
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.2 // indirect
|
||||
github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.2 // indirect
|
||||
github.com/AzureAD/microsoft-authentication-library-for-go v1.2.1 // indirect
|
||||
github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 // indirect
|
||||
github.com/alecthomas/units v0.0.0-20231202071711-9a357b53e9c9 // indirect
|
||||
github.com/aws/aws-sdk-go v1.50.32 // indirect
|
||||
github.com/aws/aws-sdk-go v1.51.25 // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.2.0 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.3.0 // indirect
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
|
||||
github.com/dennwc/varint v1.0.0 // indirect
|
||||
github.com/felixge/httpsnoop v1.0.4 // indirect
|
||||
github.com/go-logfmt/logfmt v0.6.0 // indirect
|
||||
github.com/go-logr/logr v1.4.1 // indirect
|
||||
github.com/go-logr/stdr v1.2.2 // indirect
|
||||
github.com/golang-jwt/jwt/v5 v5.2.0 // indirect
|
||||
github.com/golang/protobuf v1.5.3 // indirect
|
||||
github.com/golang-jwt/jwt/v5 v5.2.1 // indirect
|
||||
github.com/google/uuid v1.6.0 // indirect
|
||||
github.com/grafana/regexp v0.0.0-20221122212121-6b5c0a4cb7fd // indirect
|
||||
github.com/hashicorp/go-version v1.6.0 // indirect
|
||||
github.com/jmespath/go-jmespath v0.4.0 // indirect
|
||||
github.com/jpillora/backoff v1.0.0 // indirect
|
||||
github.com/json-iterator/go v1.1.12 // indirect
|
||||
github.com/klauspost/compress v1.17.7 // indirect
|
||||
github.com/klauspost/compress v1.17.8 // indirect
|
||||
github.com/kr/text v0.2.0 // indirect
|
||||
github.com/kylelemons/godebug v1.1.0 // indirect
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||
|
@ -45,33 +44,32 @@ require (
|
|||
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f // indirect
|
||||
github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
|
||||
github.com/prometheus/client_model v0.6.0 // indirect
|
||||
github.com/prometheus/client_model v0.6.1 // indirect
|
||||
github.com/prometheus/common/sigv4 v0.1.0 // indirect
|
||||
github.com/prometheus/procfs v0.12.0 // indirect
|
||||
github.com/xhit/go-str2duration/v2 v2.1.0 // indirect
|
||||
go.opentelemetry.io/collector/featuregate v1.3.0 // indirect
|
||||
go.opentelemetry.io/collector/pdata v1.3.0 // indirect
|
||||
go.opentelemetry.io/collector/semconv v0.96.0 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 // indirect
|
||||
go.opentelemetry.io/otel v1.24.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v1.24.0 // indirect
|
||||
go.opentelemetry.io/otel/trace v1.24.0 // indirect
|
||||
go.opentelemetry.io/collector/featuregate v1.5.0 // indirect
|
||||
go.opentelemetry.io/collector/pdata v1.5.0 // indirect
|
||||
go.opentelemetry.io/collector/semconv v0.98.0 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.50.0 // indirect
|
||||
go.opentelemetry.io/otel v1.25.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v1.25.0 // indirect
|
||||
go.opentelemetry.io/otel/trace v1.25.0 // indirect
|
||||
go.uber.org/atomic v1.11.0 // indirect
|
||||
go.uber.org/multierr v1.11.0 // indirect
|
||||
golang.org/x/crypto v0.21.0 // indirect
|
||||
golang.org/x/net v0.22.0 // indirect
|
||||
golang.org/x/oauth2 v0.18.0 // indirect
|
||||
golang.org/x/sys v0.18.0 // indirect
|
||||
golang.org/x/crypto v0.22.0 // indirect
|
||||
golang.org/x/net v0.24.0 // indirect
|
||||
golang.org/x/oauth2 v0.19.0 // indirect
|
||||
golang.org/x/sys v0.19.0 // indirect
|
||||
golang.org/x/text v0.14.0 // indirect
|
||||
golang.org/x/time v0.5.0 // indirect
|
||||
google.golang.org/appengine v1.6.8 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240304161311-37d4d3c04a78 // indirect
|
||||
google.golang.org/grpc v1.62.1 // indirect
|
||||
google.golang.org/protobuf v1.33.0 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240415180920-8c6c420018be // indirect
|
||||
google.golang.org/grpc v1.63.2 // indirect
|
||||
google.golang.org/protobuf v1.34.0 // indirect
|
||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
k8s.io/apimachinery v0.29.2 // indirect
|
||||
k8s.io/client-go v0.29.2 // indirect
|
||||
k8s.io/apimachinery v0.29.3 // indirect
|
||||
k8s.io/client-go v0.29.3 // indirect
|
||||
k8s.io/klog/v2 v2.120.1 // indirect
|
||||
k8s.io/utils v0.0.0-20230726121419-3b25d923346b // indirect
|
||||
)
|
||||
|
|
|
@ -1,17 +1,17 @@
|
|||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.10.0 h1:n1DH8TPV4qqPTje2RcUBYwtrTWlabVp4n46+74X2pn4=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.10.0/go.mod h1:HDcZnuGbiyppErN6lB+idp4CKhjbc8gwjto6OPpyggM=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.1 h1:sO0/P7g68FrryJzljemN+6GTssUXdANk6aJ7T1ZxnsQ=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.1/go.mod h1:h8hyGFDsU5HMivxiS2iYFZsgDbU9OnnJ163x5UGVKYo=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1 h1:E+OJmp2tPvt1W+amx48v1eqbjDYsgN+RzP4q16yV5eM=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1/go.mod h1:a6xsAQUZg+VsS3TJ05SRp524Hs4pZ/AeFSr5ENf0Yjo=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.2 h1:FDif4R1+UUR+00q6wquyX90K7A8dN+R5E8GEadoP7sU=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.2/go.mod h1:aiYBYui4BJ/BJCAIKs92XiPyQfTaBWqvHujDwKb6CBU=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.2 h1:LqbJ/WzJUwBf8UiaSzgX7aMclParm9/5Vgp+TY51uBQ=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.2/go.mod h1:yInRyqWXAuaPrgI7p70+lDDgh3mlBohis29jGMISnmc=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.5.0 h1:MxA59PGoCFb+vCwRQi3PhQEwHj4+r2dhuv9HG+vM7iM=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.5.0/go.mod h1:uYt4CfhkJA9o0FN7jfE5minm/i4nUE4MjGUJkzB6Zs8=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.6.0 h1:ui3YNbxfW7J3tTFIZMH6LIGRjCngp+J+nIFlnizfNTE=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.6.0/go.mod h1:gZmgV+qBqygoznvqo2J9oKZAFziqhLZ2xE/WVUmzkHA=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4 v4.3.0 h1:bXwSugBiSbgtz7rOtbfGf+woewp4f06orW9OP5BjHLA=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4 v4.3.0/go.mod h1:Y/HgrePTmGy9HjdSGTqZNa+apUpTVIEVKXJyARP2lrk=
|
||||
github.com/AzureAD/microsoft-authentication-library-for-go v1.2.1 h1:DzHpqpoJVaCgOUdVHxE8QB52S6NiVdDQvGlny1qvPqA=
|
||||
github.com/AzureAD/microsoft-authentication-library-for-go v1.2.1/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI=
|
||||
github.com/Code-Hex/go-generics-cache v1.3.1 h1:i8rLwyhoyhaerr7JpjtYjJZUcCbWOdiYO3fZXLiEC4g=
|
||||
github.com/Code-Hex/go-generics-cache v1.3.1/go.mod h1:qxcC9kRVrct9rHeiYpFWSoW1vxyillCVzX13KZG8dl4=
|
||||
github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 h1:XHOnouVk1mxXfQidrMEnLlPk9UMeRtyBTnEFtxkV0kU=
|
||||
github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI=
|
||||
github.com/Code-Hex/go-generics-cache v1.5.1 h1:6vhZGc5M7Y/YD8cIUcY8kcuQLB4cHR7U+0KMqAA0KcU=
|
||||
github.com/Code-Hex/go-generics-cache v1.5.1/go.mod h1:qxcC9kRVrct9rHeiYpFWSoW1vxyillCVzX13KZG8dl4=
|
||||
github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow=
|
||||
github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM=
|
||||
github.com/alecthomas/kingpin/v2 v2.4.0 h1:f48lwail6p8zpO1bC4TxtqACaGqHYA22qkHjHpqDjYY=
|
||||
|
@ -26,8 +26,8 @@ github.com/alecthomas/units v0.0.0-20231202071711-9a357b53e9c9/go.mod h1:OMCwj8V
|
|||
github.com/armon/go-metrics v0.4.1 h1:hR91U9KYmb6bLBYLQjyM+3j+rcd/UhE+G78SFnF8gJA=
|
||||
github.com/armon/go-metrics v0.4.1/go.mod h1:E6amYzXo6aW1tqzoZGT755KkbgrJsSdpwZ+3JqfkOG4=
|
||||
github.com/aws/aws-sdk-go v1.38.35/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro=
|
||||
github.com/aws/aws-sdk-go v1.50.32 h1:POt81DvegnpQKM4DMDLlHz1CO6OBnEoQ1gRhYFd7QRY=
|
||||
github.com/aws/aws-sdk-go v1.50.32/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk=
|
||||
github.com/aws/aws-sdk-go v1.51.25 h1:DjTT8mtmsachhV6yrXR8+yhnG6120dazr720nopRsls=
|
||||
github.com/aws/aws-sdk-go v1.51.25/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk=
|
||||
github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3 h1:6df1vn4bBlDDo4tARvBm7l6KA9iVMnE3NWizDeWSrps=
|
||||
github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3/go.mod h1:CIWtjkly68+yqLPbvwwR/fjNJA/idrtULjZWh2v1ys0=
|
||||
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
|
||||
|
@ -35,8 +35,8 @@ github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+Ce
|
|||
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
||||
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
|
||||
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=
|
||||
github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
|
||||
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/cncf/xds/go v0.0.0-20231128003011-0fa0005c9caa h1:jQCWAUqqlij9Pgj2i/PB79y4KOPYVyFYdROxgaCwdTQ=
|
||||
github.com/cncf/xds/go v0.0.0-20231128003011-0fa0005c9caa/go.mod h1:x/1Gn8zydmfq8dk6e9PdstVsDgu9RuyIIJqAaF//0IM=
|
||||
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
|
||||
|
@ -46,14 +46,14 @@ github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1
|
|||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/dennwc/varint v1.0.0 h1:kGNFFSSw8ToIy3obO/kKr8U9GZYUAxQEVuix4zfDWzE=
|
||||
github.com/dennwc/varint v1.0.0/go.mod h1:hnItb35rvZvJrbTALZtY/iQfDs48JKRG1RPpgziApxA=
|
||||
github.com/digitalocean/godo v1.109.0 h1:4W97RJLJSUQ3veRZDNbp1Ol3Rbn6Lmt9bKGvfqYI5SU=
|
||||
github.com/digitalocean/godo v1.109.0/go.mod h1:R6EmmWI8CT1+fCtjWY9UCB+L5uufuZH13wk3YhxycCs=
|
||||
github.com/digitalocean/godo v1.113.0 h1:CLtCxlP4wDAjKIQ+Hshht/UNbgAp8/J/XBH1ZtDCF9Y=
|
||||
github.com/digitalocean/godo v1.113.0/go.mod h1:Z2mTP848Vi3IXXl5YbPekUgr4j4tOePomA+OE1Ag98w=
|
||||
github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0=
|
||||
github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E=
|
||||
github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI=
|
||||
github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ=
|
||||
github.com/docker/docker v25.0.3+incompatible h1:D5fy/lYmY7bvZa0XTZ5/UJPljor41F+vdyJG5luQLfQ=
|
||||
github.com/docker/docker v25.0.3+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||
github.com/docker/docker v26.0.1+incompatible h1:t39Hm6lpXuXtgkF0dm1t9a5HkbUfdGy6XbWexmGr+hA=
|
||||
github.com/docker/docker v26.0.1+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||
github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ=
|
||||
github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec=
|
||||
github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4=
|
||||
|
@ -95,16 +95,16 @@ github.com/go-openapi/jsonreference v0.20.4 h1:bKlDxQxQJgwpUSgOENiMPzCTBVuc7vTdX
|
|||
github.com/go-openapi/jsonreference v0.20.4/go.mod h1:5pZJyJP2MnYCpoeoMAql78cCHauHj0V9Lhc506VOpw4=
|
||||
github.com/go-openapi/swag v0.22.9 h1:XX2DssF+mQKM2DHsbgZK74y/zj4mo9I99+89xUmuZCE=
|
||||
github.com/go-openapi/swag v0.22.9/go.mod h1:3/OXnFfnMAwBD099SwYRk7GD3xOrr1iL7d/XNLXVVwE=
|
||||
github.com/go-resty/resty/v2 v2.11.0 h1:i7jMfNOJYMp69lq7qozJP+bjgzfAzeOhuGlyDrqxT/8=
|
||||
github.com/go-resty/resty/v2 v2.11.0/go.mod h1:iiP/OpA0CkcL3IGt1O0+/SIItFUbkkyw5BGXiVdTu+A=
|
||||
github.com/go-resty/resty/v2 v2.12.0 h1:rsVL8P90LFvkUYq/V5BTVe203WfRIU4gvcf+yfzJzGA=
|
||||
github.com/go-resty/resty/v2 v2.12.0/go.mod h1:o0yGPrkS3lOe1+eFajk6kBW8ScXzwU3hD69/gt2yB/0=
|
||||
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
|
||||
github.com/go-zookeeper/zk v1.0.3 h1:7M2kwOsc//9VeeFiPtf+uSJlVpU66x9Ba5+8XK7/TDg=
|
||||
github.com/go-zookeeper/zk v1.0.3/go.mod h1:nOB03cncLtlp4t+UAkGSV+9beXP/akpekBwL+UX1Qcw=
|
||||
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
||||
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
|
||||
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
|
||||
github.com/golang-jwt/jwt/v5 v5.2.0 h1:d/ix8ftRUorsN+5eMIlF4T6J8CAt9rch3My2winC1Jw=
|
||||
github.com/golang-jwt/jwt/v5 v5.2.0/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk=
|
||||
github.com/golang-jwt/jwt/v5 v5.2.1 h1:OuVbFODueb089Lh128TAcimifWaLhJwVflnrgM17wHk=
|
||||
github.com/golang-jwt/jwt/v5 v5.2.1/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk=
|
||||
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
|
@ -115,10 +115,8 @@ github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:W
|
|||
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
|
||||
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
|
||||
github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
|
||||
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
|
||||
github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
|
||||
github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg=
|
||||
github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
|
||||
github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
|
||||
github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
|
||||
github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM=
|
||||
github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||
github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I=
|
||||
|
@ -137,8 +135,8 @@ github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0=
|
|||
github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
|
||||
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/gophercloud/gophercloud v1.8.0 h1:TM3Jawprb2NrdOnvcHhWJalmKmAmOGgfZElM/3oBYCk=
|
||||
github.com/gophercloud/gophercloud v1.8.0/go.mod h1:aAVqcocTSXh2vYFZ1JTvx4EQmfgzxRcNupUfxZbBNDM=
|
||||
github.com/gophercloud/gophercloud v1.11.0 h1:ls0O747DIq1D8SUHc7r2vI8BFbMLeLFuENaAIfEx7OM=
|
||||
github.com/gophercloud/gophercloud v1.11.0/go.mod h1:aAVqcocTSXh2vYFZ1JTvx4EQmfgzxRcNupUfxZbBNDM=
|
||||
github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc=
|
||||
github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
||||
github.com/grafana/regexp v0.0.0-20221122212121-6b5c0a4cb7fd h1:PpuIBO5P3e9hpqBD0O/HjhShYuM6XE0i/lbE6J94kww=
|
||||
|
@ -165,12 +163,12 @@ github.com/hashicorp/go-version v1.6.0 h1:feTTfFNnjP967rlCxM/I9g701jU+RN74YKx2mO
|
|||
github.com/hashicorp/go-version v1.6.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
|
||||
github.com/hashicorp/golang-lru v0.6.0 h1:uL2shRDx7RTrOrTCUZEGP/wJUFiUI8QT6E7z5o8jga4=
|
||||
github.com/hashicorp/golang-lru v0.6.0/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4=
|
||||
github.com/hashicorp/nomad/api v0.0.0-20240306004928-3e7191ccb702 h1:fI1LXuBaS1d9z1kmb++Og6YD8uMRwadXorCwE+xgOFA=
|
||||
github.com/hashicorp/nomad/api v0.0.0-20240306004928-3e7191ccb702/go.mod h1:z71gkJdrkAt/Rl6C7Q79VE7AwJ5lUF+M+fzFTyIHYB0=
|
||||
github.com/hashicorp/nomad/api v0.0.0-20240418183417-ea5f2f6748c7 h1:pjE59CS2C9Bg+Xby0ROrnZSSBWtKwx3Sf9gqsrvIFSA=
|
||||
github.com/hashicorp/nomad/api v0.0.0-20240418183417-ea5f2f6748c7/go.mod h1:svtxn6QnrQ69P23VvIWMR34tg3vmwLz4UdUzm1dSCgE=
|
||||
github.com/hashicorp/serf v0.10.1 h1:Z1H2J60yRKvfDYAOZLd2MU0ND4AH/WDz7xYHDWQsIPY=
|
||||
github.com/hashicorp/serf v0.10.1/go.mod h1:yL2t6BqATOLGc5HF7qbFkTfXoPIY0WZdWHfEvMqbG+4=
|
||||
github.com/hetznercloud/hcloud-go/v2 v2.6.0 h1:RJOA2hHZ7rD1pScA4O1NF6qhkHyUdbbxjHgFNot8928=
|
||||
github.com/hetznercloud/hcloud-go/v2 v2.6.0/go.mod h1:4J1cSE57+g0WS93IiHLV7ubTHItcp+awzeBp5bM9mfA=
|
||||
github.com/hetznercloud/hcloud-go/v2 v2.7.2 h1:UlE7n1GQZacCfyjv9tDVUN7HZfOXErPIfM/M039u9A0=
|
||||
github.com/hetznercloud/hcloud-go/v2 v2.7.2/go.mod h1:49tIV+pXRJTUC7fbFZ03s45LKqSQdOPP5y91eOnJo/k=
|
||||
github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4=
|
||||
github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY=
|
||||
github.com/influxdata/influxdb v1.11.5 h1:+em5VOl6lhAZubXj5o6SobCwvrRs3XDlBx/MUI4schI=
|
||||
|
@ -194,8 +192,8 @@ github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7V
|
|||
github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM=
|
||||
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
|
||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||
github.com/klauspost/compress v1.17.7 h1:ehO88t2UGzQK66LMdE8tibEd1ErmzZjNEqWkjLAKQQg=
|
||||
github.com/klauspost/compress v1.17.7/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw=
|
||||
github.com/klauspost/compress v1.17.8 h1:YcnTYrq7MikUT7k0Yb5eceMmALQPYBW/Xltxn0NAMnU=
|
||||
github.com/klauspost/compress v1.17.8/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw=
|
||||
github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b h1:udzkj9S/zlT5X367kqJis0QP7YMxobob6zhzq6Yre00=
|
||||
github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b/go.mod h1:pcaDhQK0/NJZEvtCO0qQPPropqV0sJOJ6YW7X+9kRwM=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
|
@ -210,8 +208,8 @@ github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
|||
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||
github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
|
||||
github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
|
||||
github.com/linode/linodego v1.29.0 h1:gDSQWAbKMAQX8db9FDCXHhodQPrJmLcmthjx6m+PyV4=
|
||||
github.com/linode/linodego v1.29.0/go.mod h1:3k6WvCM10gillgYcnoLqIL23ST27BD9HhMsCJWb3Bpk=
|
||||
github.com/linode/linodego v1.32.0 h1:OmZzB3iON6uu84VtLFf64uKmAQqJJarvmsVguroioPI=
|
||||
github.com/linode/linodego v1.32.0/go.mod h1:y8GDP9uLVH4jTB9qyrgw79qfKdYJmNCGUOJmfuiOcmI=
|
||||
github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0=
|
||||
github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
|
||||
github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
|
||||
|
@ -219,12 +217,14 @@ github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovk
|
|||
github.com/mattn/go-isatty v0.0.19 h1:JITubQf0MOLdlGRuRq+jtsDlekdYPia9ZFsB8h/APPA=
|
||||
github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
|
||||
github.com/miekg/dns v1.1.58 h1:ca2Hdkz+cDg/7eNF6V56jjzuZ4aCAE+DbVkILdQWG/4=
|
||||
github.com/miekg/dns v1.1.58/go.mod h1:Ypv+3b/KadlvW9vJfXOTf300O4UqaHFzFCuHz+rPkBY=
|
||||
github.com/miekg/dns v1.1.59 h1:C9EXc/UToRwKLhK5wKU/I4QVsBUc8kE6MkHBkeypWZs=
|
||||
github.com/miekg/dns v1.1.59/go.mod h1:nZpewl5p6IvctfgrckopVx2OlSEHPRO/U4SYkRklrEk=
|
||||
github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=
|
||||
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
|
||||
github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
|
||||
github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
|
||||
github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0=
|
||||
github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo=
|
||||
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
|
@ -258,19 +258,19 @@ github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXP
|
|||
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
|
||||
github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
|
||||
github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
|
||||
github.com/prometheus/client_golang v1.19.0 h1:ygXvpU1AoN1MhdzckN+PyD9QJOSD4x7kmXYlnfbA6JU=
|
||||
github.com/prometheus/client_golang v1.19.0/go.mod h1:ZRM9uEAypZakd+q/x7+gmsvXdURP+DABIEIjnmDdp+k=
|
||||
github.com/prometheus/client_golang v1.19.1 h1:wZWJDwK+NameRJuPGDhlnFgx8e8HN3XHQeLaYJFJBOE=
|
||||
github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho=
|
||||
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
||||
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/client_model v0.6.0 h1:k1v3CzpSRUTrKMppY35TLwPvxHqBu0bYgxZzqGIgaos=
|
||||
github.com/prometheus/client_model v0.6.0/go.mod h1:NTQHnmxFpouOD0DpvP4XujX3CdOAGQPoaGhyTchlyt8=
|
||||
github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E=
|
||||
github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY=
|
||||
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
|
||||
github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
|
||||
github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc=
|
||||
github.com/prometheus/common v0.29.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
|
||||
github.com/prometheus/common v0.53.0 h1:U2pL9w9nmJwJDa4qqLQ3ZaePJ6ZTwt7cMD3AG3+aLCE=
|
||||
github.com/prometheus/common v0.53.0/go.mod h1:BrxBKv3FWBIGXw89Mg1AeBq7FSyRzXWI3l3e7W3RN5U=
|
||||
github.com/prometheus/common v0.54.0 h1:ZlZy0BgJhTwVZUn7dLOkwCZHUkrAqd3WYtcFCWnM1D8=
|
||||
github.com/prometheus/common v0.54.0/go.mod h1:/TQgMJP5CuVYveyT7n/0Ix8yLNNXy9yRSkhnLTHPDIQ=
|
||||
github.com/prometheus/common/sigv4 v0.1.0 h1:qoVebwtwwEhS85Czm2dSROY5fTo2PAPEVdDeppTwGX4=
|
||||
github.com/prometheus/common/sigv4 v0.1.0/go.mod h1:2Jkxxk9yYvCkE5G1sQT7GuEXm57JrvHu9k5YwTjsNtI=
|
||||
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||
|
@ -279,12 +279,12 @@ github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4O
|
|||
github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
|
||||
github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo=
|
||||
github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo=
|
||||
github.com/prometheus/prometheus v0.51.2 h1:U0faf1nT4CB9DkBW87XLJCBi2s8nwWXdTbyzRUAkX0w=
|
||||
github.com/prometheus/prometheus v0.51.2/go.mod h1:yv4MwOn3yHMQ6MZGHPg/U7Fcyqf+rxqiZfSur6myVtc=
|
||||
github.com/prometheus/prometheus v0.52.1 h1:BrQ29YG+mzdGh8DgHPirHbeMGNqtL+INe0rqg7ttBJ4=
|
||||
github.com/prometheus/prometheus v0.52.1/go.mod h1:3z74cVsmVH0iXOR5QBjB7Pa6A0KJeEAK5A6UsmAFb1g=
|
||||
github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
|
||||
github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog=
|
||||
github.com/scaleway/scaleway-sdk-go v1.0.0-beta.25 h1:/8rfZAdFfafRXOgz+ZpMZZWZ5pYggCY9t7e/BvjaBHM=
|
||||
github.com/scaleway/scaleway-sdk-go v1.0.0-beta.25/go.mod h1:fCa7OJZ/9DRTnOKmxvT6pn+LPWUptQAmHF/SBJUGEcg=
|
||||
github.com/scaleway/scaleway-sdk-go v1.0.0-beta.26 h1:F+GIVtGqCFxPxO46ujf8cEOP574MBoRm3gNbPXECbxs=
|
||||
github.com/scaleway/scaleway-sdk-go v1.0.0-beta.26/go.mod h1:fCa7OJZ/9DRTnOKmxvT6pn+LPWUptQAmHF/SBJUGEcg=
|
||||
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
|
||||
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
|
||||
github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
|
||||
|
@ -306,21 +306,20 @@ github.com/xhit/go-str2duration/v2 v2.1.0 h1:lxklc02Drh6ynqX+DdPyp5pCKLUQpRT8bp8
|
|||
github.com/xhit/go-str2duration/v2 v2.1.0/go.mod h1:ohY8p+0f07DiV6Em5LKB0s2YpLtXVyJfNt1+BlmyAsU=
|
||||
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
|
||||
go.opentelemetry.io/collector/featuregate v1.3.0 h1:nrFSx+zfjdisjE9oCx25Aep3nJ9RaUjeE1qFL6eovoU=
|
||||
go.opentelemetry.io/collector/featuregate v1.3.0/go.mod h1:mm8+xyQfgDmqhyegZRNIQmoKsNnDTwWKFLsdMoXAb7A=
|
||||
go.opentelemetry.io/collector/pdata v1.3.0 h1:JRYN7tVHYFwmtQhIYbxWeiKSa2L1nCohyAs8sYqKFZo=
|
||||
go.opentelemetry.io/collector/pdata v1.3.0/go.mod h1:t7W0Undtes53HODPdSujPLTnfSR5fzT+WpL+RTaaayo=
|
||||
go.opentelemetry.io/collector/semconv v0.96.0 h1:DrZy8BpzJDnN2zFxXRj6BhfGYxNlqpFHBqyuS9fVHRY=
|
||||
go.opentelemetry.io/collector/semconv v0.96.0/go.mod h1:zOm/U3pgMIWcvrcnPbR9Xx2HinoXj46ERMK8PUV9wrs=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 h1:jq9TW8u3so/bN+JPT166wjOI6/vQPF6Xe7nMNIltagk=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0/go.mod h1:p8pYQP+m5XfbZm9fxtSKAbM6oIllS7s2AfxrChvc7iw=
|
||||
go.opentelemetry.io/otel v1.24.0 h1:0LAOdjNmQeSTzGBzduGe/rU4tZhMwL5rWgtp9Ku5Jfo=
|
||||
go.opentelemetry.io/otel v1.24.0/go.mod h1:W7b9Ozg4nkF5tWI5zsXkaKKDjdVjpD4oAt9Qi/MArHo=
|
||||
go.opentelemetry.io/otel/metric v1.24.0 h1:6EhoGWWK28x1fbpA4tYTOWBkPefTDQnb8WSGXlc88kI=
|
||||
go.opentelemetry.io/otel/metric v1.24.0/go.mod h1:VYhLe1rFfxuTXLgj4CBiyz+9WYBA8pNGJgDcSFRKBco=
|
||||
go.opentelemetry.io/otel/trace v1.24.0 h1:CsKnnL4dUAr/0llH9FKuc698G04IrpWV0MQA/Y1YELI=
|
||||
go.opentelemetry.io/otel/trace v1.24.0/go.mod h1:HPc3Xr/cOApsBI154IU0OI0HJexz+aw5uPdbs3UCjNU=
|
||||
go.opentelemetry.io/collector/featuregate v1.5.0 h1:uK8qnYQKz1TMkK+FDTFsywg/EybW/gbnOUaPNUkRznM=
|
||||
go.opentelemetry.io/collector/featuregate v1.5.0/go.mod h1:w7nUODKxEi3FLf1HslCiE6YWtMtOOrMnSwsDam8Mg9w=
|
||||
go.opentelemetry.io/collector/pdata v1.5.0 h1:1fKTmUpr0xCOhP/B0VEvtz7bYPQ45luQ8XFyA07j8LE=
|
||||
go.opentelemetry.io/collector/pdata v1.5.0/go.mod h1:TYj8aKRWZyT/KuKQXKyqSEvK/GV+slFaDMEI+Ke64Yw=
|
||||
go.opentelemetry.io/collector/semconv v0.98.0 h1:zO4L4TmlxXoYu8UgPeYElGY19BW7wPjM+quL5CzoOoY=
|
||||
go.opentelemetry.io/collector/semconv v0.98.0/go.mod h1:8ElcRZ8Cdw5JnvhTOQOdYizkJaQ10Z2fS+R6djOnj6A=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.50.0 h1:cEPbyTSEHlQR89XVlyo78gqluF8Y3oMeBkXGWzQsfXY=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.50.0/go.mod h1:DKdbWcT4GH1D0Y3Sqt/PFXt2naRKDWtU+eE6oLdFNA8=
|
||||
go.opentelemetry.io/otel v1.25.0 h1:gldB5FfhRl7OJQbUHt/8s0a7cE8fbsPAtdpRaApKy4k=
|
||||
go.opentelemetry.io/otel v1.25.0/go.mod h1:Wa2ds5NOXEMkCmUou1WA7ZBfLTHWIsp034OVD7AO+Vg=
|
||||
go.opentelemetry.io/otel/metric v1.25.0 h1:LUKbS7ArpFL/I2jJHdJcqMGxkRdxpPHE0VU/D4NuEwA=
|
||||
go.opentelemetry.io/otel/metric v1.25.0/go.mod h1:rkDLUSd2lC5lq2dFNrX9LGAbINP5B7WBkC78RXCpH5s=
|
||||
go.opentelemetry.io/otel/trace v1.25.0 h1:tqukZGLwQYRIFtSQM2u2+yfMVTgGVeqRLPUYx1Dq6RM=
|
||||
go.opentelemetry.io/otel/trace v1.25.0/go.mod h1:hCCs70XM/ljO+BeQkyFnbK28SBIJ/Emuha+ccrCRT7I=
|
||||
go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE=
|
||||
go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0=
|
||||
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
|
||||
|
@ -331,16 +330,14 @@ golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnf
|
|||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||
golang.org/x/crypto v0.21.0 h1:X31++rzVUdKhX5sWmSOFZxx8UW/ldWx55cbf08iNAMA=
|
||||
golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs=
|
||||
golang.org/x/crypto v0.22.0 h1:g1v0xeRhjcugydODzvb3mEM9SQ0HGp9s/nh3COQ/C30=
|
||||
golang.org/x/crypto v0.22.0/go.mod h1:vr6Su+7cTlO45qkww3VDJlzDn0ctJvRgYbC2NvXHt+M=
|
||||
golang.org/x/exp v0.0.0-20240119083558-1b970713d09a h1:Q8/wZp0KX97QFTc2ywcOE0YRjZPVIx+MXInMzdvQqcA=
|
||||
golang.org/x/exp v0.0.0-20240119083558-1b970713d09a/go.mod h1:idGWGoKP1toJGkd5/ig9ZLuPcZBC3ewk7SzmH0uou08=
|
||||
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
|
||||
golang.org/x/mod v0.16.0 h1:QX4fJ0Rr5cPQCF7O9lh9Se4pmwfwskqZfq5moyldzic=
|
||||
golang.org/x/mod v0.16.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
|
||||
golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA=
|
||||
golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
|
||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
|
@ -353,24 +350,21 @@ golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81R
|
|||
golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
||||
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||
golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||
golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
||||
golang.org/x/net v0.22.0 h1:9sGLhx7iRIHEiX0oAJ3MRZMUCElJgy7Br1nO+AMN3Tc=
|
||||
golang.org/x/net v0.22.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg=
|
||||
golang.org/x/net v0.24.0 h1:1PcaxkF854Fu3+lvBIx5SYn9wRlBzzcnHZSiaFFAb0w=
|
||||
golang.org/x/net v0.24.0/go.mod h1:2Q7sJY5mzlzWjKtYUEXSlBWCdyaioyXzRB2RtU8KVE8=
|
||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/oauth2 v0.18.0 h1:09qnuIAgzdx1XplqJvW6CQqMCtGZykZWcXzPMPUusvI=
|
||||
golang.org/x/oauth2 v0.18.0/go.mod h1:Wf7knwG0MPoWIMMBgFlEaSUDaKskp0dCfrlJRJXbBi8=
|
||||
golang.org/x/oauth2 v0.19.0 h1:9+E/EZBCbTLNrbN35fHv/a/d/mOBatymz1zbtQrXpIg=
|
||||
golang.org/x/oauth2 v0.19.0/go.mod h1:vYi7skDa1x015PmRRYZ7+s1cWyPgrPiSYRe4rnsexc8=
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ=
|
||||
golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M=
|
||||
golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
|
@ -385,22 +379,16 @@ golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7w
|
|||
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.18.0 h1:DBdB3niSjOA/O0blCZBqDefyWNYveAYMNF1Wum0DYQ4=
|
||||
golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.19.0 h1:q5f1RH2jigJ1MoAWp2KTp3gm5zAGFUTarQZ5U386+4o=
|
||||
golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/term v0.18.0 h1:FcHjZXDMxI8mM3nwhX9HlKop4C0YQvCVCdwYl2wOtE8=
|
||||
golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58=
|
||||
golang.org/x/term v0.19.0 h1:+ThwsDv+tYfnJFhF4L8jITxu1tdTWRTZpdsWgEgjL6Q=
|
||||
golang.org/x/term v0.19.0/go.mod h1:2CuTdWZ7KHSQwUzKva0cbMg6q2DMI3Mmxp+gKJbskEk=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
||||
golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ=
|
||||
golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ=
|
||||
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
|
||||
golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk=
|
||||
|
@ -409,24 +397,21 @@ golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGm
|
|||
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
|
||||
golang.org/x/tools v0.19.0 h1:tfGCXNR1OsFG+sVdLAitlpjAvD/I6dHDKnYrpEZUHkw=
|
||||
golang.org/x/tools v0.19.0/go.mod h1:qoJWxmGSIBmAeriMx19ogtrEPrGtDbPK634QFIcLAhc=
|
||||
golang.org/x/tools v0.20.0 h1:hz/CVckiOxybQvFw6h7b/q80NTr9IUQb4s1IIzW7KNY=
|
||||
golang.org/x/tools v0.20.0/go.mod h1:WvitBU7JJf6A4jOdg4S1tviW9bhUxkgeCui/0JHctQg=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
|
||||
google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM=
|
||||
google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJffLiz/Ds=
|
||||
google.golang.org/genproto v0.0.0-20240123012728-ef4313101c80 h1:KAeGQVN3M9nD0/bQXnr/ClcEMJ968gUXJQ9pwfSynuQ=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20240304212257-790db918fca8 h1:8eadJkXbwDEMNwcB5O0s5Y5eCfyuCLdvaiOIaGTrWmQ=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20240304212257-790db918fca8/go.mod h1:O1cOfN1Cy6QEYr7VxtjOyP5AdAuR0aJ/MYZaaof623Y=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240304161311-37d4d3c04a78 h1:Xs9lu+tLXxLIfuci70nG4cpwaRC+mRQPUL7LoIeDJC4=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240304161311-37d4d3c04a78/go.mod h1:UCOku4NytXMJuLQE5VuqA5lX3PcHCBo8pxNyvkf4xBs=
|
||||
google.golang.org/grpc v1.62.1 h1:B4n+nfKzOICUXMgyrNd19h/I9oH0L1pizfk1d4zSgTk=
|
||||
google.golang.org/grpc v1.62.1/go.mod h1:IWTG0VlJLCh1SkC58F7np9ka9mx/WNkjl4PGJaiq+QE=
|
||||
google.golang.org/genproto v0.0.0-20240227224415-6ceb2ff114de h1:F6qOa9AZTYJXOUEr4jDysRDLrm4PHePlge4v4TGAlxY=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20240415180920-8c6c420018be h1:Zz7rLWqp0ApfsR/l7+zSHhY3PMiH2xqgxlfYfAfNpoU=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20240415180920-8c6c420018be/go.mod h1:dvdCTIoAGbkWbcIKBniID56/7XHTt6WfxXNMxuziJ+w=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240415180920-8c6c420018be h1:LG9vZxsWGOmUKieR8wPAUR3u3MpnYFQZROPIMaXh7/A=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240415180920-8c6c420018be/go.mod h1:WtryC6hu0hhx87FDGxWCDptyssuo68sk10vYjF+T9fY=
|
||||
google.golang.org/grpc v1.63.2 h1:MUeiw1B2maTVZthpU5xvASfTh3LDbxHd6IJ6QQVU+xM=
|
||||
google.golang.org/grpc v1.63.2/go.mod h1:WAX/8DgncnokcFUldAxq7GeB5DXHDbMF+lLvDomNkRA=
|
||||
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
||||
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
|
||||
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
|
||||
|
@ -434,9 +419,8 @@ google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miE
|
|||
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
|
||||
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
|
||||
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
||||
google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI=
|
||||
google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
|
||||
google.golang.org/protobuf v1.34.0 h1:Qo/qEd2RZPCf2nKuorzksSknv0d3ERwp1vFG38gSmH4=
|
||||
google.golang.org/protobuf v1.34.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
|
||||
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
|
@ -457,16 +441,16 @@ gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
|
|||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
k8s.io/api v0.29.2 h1:hBC7B9+MU+ptchxEqTNW2DkUosJpp1P+Wn6YncZ474A=
|
||||
k8s.io/api v0.29.2/go.mod h1:sdIaaKuU7P44aoyyLlikSLayT6Vb7bvJNCX105xZXY0=
|
||||
k8s.io/apimachinery v0.29.2 h1:EWGpfJ856oj11C52NRCHuU7rFDwxev48z+6DSlGNsV8=
|
||||
k8s.io/apimachinery v0.29.2/go.mod h1:6HVkd1FwxIagpYrHSwJlQqZI3G9LfYWRPAkUvLnXTKU=
|
||||
k8s.io/client-go v0.29.2 h1:FEg85el1TeZp+/vYJM7hkDlSTFZ+c5nnK44DJ4FyoRg=
|
||||
k8s.io/client-go v0.29.2/go.mod h1:knlvFZE58VpqbQpJNbCbctTVXcd35mMyAAwBdpt4jrA=
|
||||
k8s.io/api v0.29.3 h1:2ORfZ7+bGC3YJqGpV0KSDDEVf8hdGQ6A03/50vj8pmw=
|
||||
k8s.io/api v0.29.3/go.mod h1:y2yg2NTyHUUkIoTC+phinTnEa3KFM6RZ3szxt014a80=
|
||||
k8s.io/apimachinery v0.29.3 h1:2tbx+5L7RNvqJjn7RIuIKu9XTsIZ9Z5wX2G22XAa5EU=
|
||||
k8s.io/apimachinery v0.29.3/go.mod h1:hx/S4V2PNW4OMg3WizRrHutyB5la0iCUbZym+W0EQIU=
|
||||
k8s.io/client-go v0.29.3 h1:R/zaZbEAxqComZ9FHeQwOh3Y1ZUs7FaHKZdQtIc2WZg=
|
||||
k8s.io/client-go v0.29.3/go.mod h1:tkDisCvgPfiRpxGnOORfkljmS+UrW+WtXAy2fTvXJB0=
|
||||
k8s.io/klog/v2 v2.120.1 h1:QXU6cPEOIslTGvZaXvFWiP9VKyeet3sawzTOvdXb4Vw=
|
||||
k8s.io/klog/v2 v2.120.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE=
|
||||
k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 h1:aVUu9fTY98ivBPKR9Y5w/AuzbMm96cd3YHRTU83I780=
|
||||
k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00/go.mod h1:AsvuZPBlUDVuCdzJ87iajxtXuR9oktsTctW/R9wwouA=
|
||||
k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 h1:BZqlfIlq5YbRMFko6/PM7FjZpUb45WallggurYhKGag=
|
||||
k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340/go.mod h1:yD4MZYeKMBwQKVht279WycxKyM84kkAx2DPrTXaeb98=
|
||||
k8s.io/utils v0.0.0-20230726121419-3b25d923346b h1:sgn3ZU783SCgtaSJjpcVVlRqd6GSnlTLKgpAAttJvpI=
|
||||
k8s.io/utils v0.0.0-20230726121419-3b25d923346b/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
|
||||
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo=
|
||||
|
|
|
@ -34,6 +34,20 @@
|
|||
description: 'Prometheus %(prometheusName)s has failed to refresh SD with mechanism {{$labels.mechanism}}.' % $._config,
|
||||
},
|
||||
},
|
||||
{
|
||||
alert: 'PrometheusKubernetesListWatchFailures',
|
||||
expr: |||
|
||||
increase(prometheus_sd_kubernetes_failures_total{%(prometheusSelector)s}[5m]) > 0
|
||||
||| % $._config,
|
||||
'for': '15m',
|
||||
labels: {
|
||||
severity: 'warning',
|
||||
},
|
||||
annotations: {
|
||||
summary: 'Requests in Kubernetes SD are failing.',
|
||||
description: 'Kubernetes service discovery of Prometheus %(prometheusName)s is experiencing {{ printf "%%.0f" $value }} failures with LIST/WATCH requests to the Kubernetes API in the last 5 minutes.' % $._config,
|
||||
},
|
||||
},
|
||||
{
|
||||
alert: 'PrometheusNotificationQueueRunningFull',
|
||||
expr: |||
|
||||
|
|
100
go.mod
100
go.mod
|
@ -5,18 +5,18 @@ go 1.21
|
|||
require (
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.2
|
||||
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.6.0
|
||||
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.7.0
|
||||
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4 v4.3.0
|
||||
github.com/Code-Hex/go-generics-cache v1.5.1
|
||||
github.com/KimMachineGun/automemlimit v0.6.0
|
||||
github.com/KimMachineGun/automemlimit v0.6.1
|
||||
github.com/alecthomas/kingpin/v2 v2.4.0
|
||||
github.com/alecthomas/units v0.0.0-20231202071711-9a357b53e9c9
|
||||
github.com/aws/aws-sdk-go v1.51.25
|
||||
github.com/aws/aws-sdk-go v1.53.16
|
||||
github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3
|
||||
github.com/cespare/xxhash/v2 v2.3.0
|
||||
github.com/dennwc/varint v1.0.0
|
||||
github.com/digitalocean/godo v1.113.0
|
||||
github.com/docker/docker v26.0.1+incompatible
|
||||
github.com/digitalocean/godo v1.117.0
|
||||
github.com/docker/docker v26.1.3+incompatible
|
||||
github.com/edsrzf/mmap-go v1.1.0
|
||||
github.com/envoyproxy/go-control-plane v0.12.0
|
||||
github.com/envoyproxy/protoc-gen-validate v1.0.4
|
||||
|
@ -29,60 +29,61 @@ require (
|
|||
github.com/gogo/protobuf v1.3.2
|
||||
github.com/golang/snappy v0.0.4
|
||||
github.com/google/go-cmp v0.6.0
|
||||
github.com/google/pprof v0.0.0-20240416155748-26353dc0451f
|
||||
github.com/google/pprof v0.0.0-20240528025155-186aa0362fba
|
||||
github.com/google/uuid v1.6.0
|
||||
github.com/gophercloud/gophercloud v1.11.0
|
||||
github.com/grafana/regexp v0.0.0-20221122212121-6b5c0a4cb7fd
|
||||
github.com/gophercloud/gophercloud v1.12.0
|
||||
github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.16.0
|
||||
github.com/hashicorp/consul/api v1.28.2
|
||||
github.com/hashicorp/nomad/api v0.0.0-20240418183417-ea5f2f6748c7
|
||||
github.com/hetznercloud/hcloud-go/v2 v2.7.2
|
||||
github.com/hashicorp/consul/api v1.29.1
|
||||
github.com/hashicorp/nomad/api v0.0.0-20240604134157-e73d8bb1140d
|
||||
github.com/hetznercloud/hcloud-go/v2 v2.9.0
|
||||
github.com/ionos-cloud/sdk-go/v6 v6.1.11
|
||||
github.com/json-iterator/go v1.1.12
|
||||
github.com/klauspost/compress v1.17.8
|
||||
github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b
|
||||
github.com/linode/linodego v1.33.0
|
||||
github.com/linode/linodego v1.35.0
|
||||
github.com/miekg/dns v1.1.59
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822
|
||||
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f
|
||||
github.com/nsf/jsondiff v0.0.0-20230430225905-43f6cf3098c1
|
||||
github.com/oklog/run v1.1.0
|
||||
github.com/oklog/ulid v1.3.1
|
||||
github.com/ovh/go-ovh v1.4.3
|
||||
github.com/ovh/go-ovh v1.5.1
|
||||
github.com/prometheus/alertmanager v0.27.0
|
||||
github.com/prometheus/client_golang v1.19.0
|
||||
github.com/prometheus/client_golang v1.19.1
|
||||
github.com/prometheus/client_model v0.6.1
|
||||
github.com/prometheus/common v0.53.0
|
||||
github.com/prometheus/common v0.54.0
|
||||
github.com/prometheus/common/assets v0.2.0
|
||||
github.com/prometheus/common/sigv4 v0.1.0
|
||||
github.com/prometheus/exporter-toolkit v0.11.0
|
||||
github.com/scaleway/scaleway-sdk-go v1.0.0-beta.26
|
||||
github.com/scaleway/scaleway-sdk-go v1.0.0-beta.27
|
||||
github.com/shurcooL/httpfs v0.0.0-20230704072500-f1e31cf0ba5c
|
||||
github.com/stretchr/testify v1.9.0
|
||||
github.com/vultr/govultr/v2 v2.17.2
|
||||
go.opentelemetry.io/collector/pdata v1.5.0
|
||||
go.opentelemetry.io/collector/semconv v0.98.0
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.50.0
|
||||
go.opentelemetry.io/otel v1.25.0
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.25.0
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.25.0
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.25.0
|
||||
go.opentelemetry.io/otel/sdk v1.25.0
|
||||
go.opentelemetry.io/otel/trace v1.25.0
|
||||
go.opentelemetry.io/collector/pdata v1.8.0
|
||||
go.opentelemetry.io/collector/semconv v0.101.0
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.52.0
|
||||
go.opentelemetry.io/otel v1.27.0
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.27.0
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.27.0
|
||||
go.opentelemetry.io/otel/sdk v1.27.0
|
||||
go.opentelemetry.io/otel/trace v1.27.0
|
||||
go.uber.org/atomic v1.11.0
|
||||
go.uber.org/automaxprocs v1.5.3
|
||||
go.uber.org/goleak v1.3.0
|
||||
go.uber.org/multierr v1.11.0
|
||||
golang.org/x/net v0.24.0
|
||||
golang.org/x/oauth2 v0.19.0
|
||||
golang.org/x/net v0.26.0
|
||||
golang.org/x/oauth2 v0.21.0
|
||||
golang.org/x/sync v0.7.0
|
||||
golang.org/x/sys v0.19.0
|
||||
golang.org/x/sys v0.21.0
|
||||
golang.org/x/text v0.16.0
|
||||
golang.org/x/time v0.5.0
|
||||
golang.org/x/tools v0.20.0
|
||||
google.golang.org/api v0.177.0
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20240415180920-8c6c420018be
|
||||
google.golang.org/grpc v1.63.2
|
||||
google.golang.org/protobuf v1.34.0
|
||||
golang.org/x/tools v0.22.0
|
||||
google.golang.org/api v0.183.0
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20240528184218-531527333157
|
||||
google.golang.org/grpc v1.64.0
|
||||
google.golang.org/protobuf v1.34.1
|
||||
gopkg.in/yaml.v2 v2.4.0
|
||||
gopkg.in/yaml.v3 v3.0.1
|
||||
k8s.io/api v0.29.3
|
||||
|
@ -93,10 +94,10 @@ require (
|
|||
)
|
||||
|
||||
require (
|
||||
cloud.google.com/go/auth v0.3.0 // indirect
|
||||
cloud.google.com/go/auth v0.5.1 // indirect
|
||||
cloud.google.com/go/auth/oauth2adapt v0.2.2 // indirect
|
||||
cloud.google.com/go/compute/metadata v0.3.0 // indirect
|
||||
github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.2 // indirect
|
||||
github.com/Azure/azure-sdk-for-go/sdk/internal v1.6.0 // indirect
|
||||
github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 // indirect
|
||||
github.com/Microsoft/go-winio v0.6.1 // indirect
|
||||
github.com/armon/go-metrics v0.4.1 // indirect
|
||||
|
@ -104,7 +105,7 @@ require (
|
|||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/cenkalti/backoff/v4 v4.3.0 // indirect
|
||||
github.com/cilium/ebpf v0.11.0 // indirect
|
||||
github.com/cncf/xds/go v0.0.0-20231128003011-0fa0005c9caa // indirect
|
||||
github.com/cncf/xds/go v0.0.0-20240318125728-8a4994d93e50 // indirect
|
||||
github.com/containerd/cgroups/v3 v3.0.3 // indirect
|
||||
github.com/containerd/log v0.1.0 // indirect
|
||||
github.com/coreos/go-systemd/v22 v22.5.0 // indirect
|
||||
|
@ -114,7 +115,7 @@ require (
|
|||
github.com/docker/go-units v0.5.0 // indirect
|
||||
github.com/emicklei/go-restful/v3 v3.11.0 // indirect
|
||||
github.com/evanphx/json-patch v5.6.0+incompatible // indirect
|
||||
github.com/fatih/color v1.15.0 // indirect
|
||||
github.com/fatih/color v1.16.0 // indirect
|
||||
github.com/felixge/httpsnoop v1.0.4 // indirect
|
||||
github.com/ghodss/yaml v1.0.0 // indirect
|
||||
github.com/go-kit/kit v0.12.0 // indirect
|
||||
|
@ -128,7 +129,7 @@ require (
|
|||
github.com/go-openapi/spec v0.20.14 // indirect
|
||||
github.com/go-openapi/swag v0.22.9 // indirect
|
||||
github.com/go-openapi/validate v0.23.0 // indirect
|
||||
github.com/go-resty/resty/v2 v2.12.0 // indirect
|
||||
github.com/go-resty/resty/v2 v2.13.1 // indirect
|
||||
github.com/godbus/dbus/v5 v5.0.4 // indirect
|
||||
github.com/golang-jwt/jwt/v5 v5.2.1 // indirect
|
||||
github.com/golang/glog v1.2.0 // indirect
|
||||
|
@ -139,16 +140,16 @@ require (
|
|||
github.com/google/gofuzz v1.2.0 // indirect
|
||||
github.com/google/s2a-go v0.1.7 // indirect
|
||||
github.com/googleapis/enterprise-certificate-proxy v0.3.2 // indirect
|
||||
github.com/googleapis/gax-go/v2 v2.12.3 // indirect
|
||||
github.com/googleapis/gax-go/v2 v2.12.4 // indirect
|
||||
github.com/gorilla/websocket v1.5.0 // indirect
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.0 // indirect
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 // indirect
|
||||
github.com/hashicorp/cronexpr v1.1.2 // indirect
|
||||
github.com/hashicorp/errwrap v1.1.0 // indirect
|
||||
github.com/hashicorp/go-cleanhttp v0.5.2 // indirect
|
||||
github.com/hashicorp/go-hclog v1.5.0 // indirect
|
||||
github.com/hashicorp/go-hclog v1.6.3 // indirect
|
||||
github.com/hashicorp/go-immutable-radix v1.3.1 // indirect
|
||||
github.com/hashicorp/go-multierror v1.1.1 // indirect
|
||||
github.com/hashicorp/go-retryablehttp v0.7.4 // indirect
|
||||
github.com/hashicorp/go-retryablehttp v0.7.7 // indirect
|
||||
github.com/hashicorp/go-rootcerts v1.0.2 // indirect
|
||||
github.com/hashicorp/golang-lru v0.6.0 // indirect
|
||||
github.com/hashicorp/serf v0.10.1 // indirect
|
||||
|
@ -160,7 +161,7 @@ require (
|
|||
github.com/kylelemons/godebug v1.1.0 // indirect
|
||||
github.com/mailru/easyjson v0.7.7 // indirect
|
||||
github.com/mattn/go-colorable v0.1.13 // indirect
|
||||
github.com/mattn/go-isatty v0.0.19 // indirect
|
||||
github.com/mattn/go-isatty v0.0.20 // indirect
|
||||
github.com/mitchellh/go-homedir v1.1.0 // indirect
|
||||
github.com/mitchellh/mapstructure v1.5.0 // indirect
|
||||
github.com/moby/docker-image-spec v1.3.1 // indirect
|
||||
|
@ -182,14 +183,13 @@ require (
|
|||
github.com/xhit/go-str2duration/v2 v2.1.0 // indirect
|
||||
go.mongodb.org/mongo-driver v1.14.0 // indirect
|
||||
go.opencensus.io v0.24.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v1.25.0 // indirect
|
||||
go.opentelemetry.io/proto/otlp v1.1.0 // indirect
|
||||
golang.org/x/crypto v0.22.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v1.27.0 // indirect
|
||||
go.opentelemetry.io/proto/otlp v1.2.0 // indirect
|
||||
golang.org/x/crypto v0.24.0 // indirect
|
||||
golang.org/x/exp v0.0.0-20240119083558-1b970713d09a // indirect
|
||||
golang.org/x/mod v0.17.0 // indirect
|
||||
golang.org/x/term v0.19.0 // indirect
|
||||
golang.org/x/text v0.14.0 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240429193739-8cf5692501f6 // indirect
|
||||
golang.org/x/mod v0.18.0 // indirect
|
||||
golang.org/x/term v0.21.0 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240528184218-531527333157 // indirect
|
||||
gopkg.in/inf.v0 v0.9.1 // indirect
|
||||
gopkg.in/ini.v1 v1.67.0 // indirect
|
||||
gotest.tools/v3 v3.0.3 // indirect
|
||||
|
|
219
go.sum
219
go.sum
|
@ -12,8 +12,8 @@ cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bP
|
|||
cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk=
|
||||
cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs=
|
||||
cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY=
|
||||
cloud.google.com/go/auth v0.3.0 h1:PRyzEpGfx/Z9e8+lHsbkoUVXD0gnu4MNmm7Gp8TQNIs=
|
||||
cloud.google.com/go/auth v0.3.0/go.mod h1:lBv6NKTWp8E3LPzmO1TbiiRKc4drLOfHsgmlH9ogv5w=
|
||||
cloud.google.com/go/auth v0.5.1 h1:0QNO7VThG54LUzKiQxv8C6x1YX7lUrzlAa1nVLF8CIw=
|
||||
cloud.google.com/go/auth v0.5.1/go.mod h1:vbZT8GjzDf3AVqCcQmqeeM32U9HBFc32vVVAbwDsa6s=
|
||||
cloud.google.com/go/auth/oauth2adapt v0.2.2 h1:+TTV8aXpjeChS9M+aTtN/TjdQnzJvmzKFt//oWu7HX4=
|
||||
cloud.google.com/go/auth/oauth2adapt v0.2.2/go.mod h1:wcYjgpZI9+Yu7LyYBg4pqSiaRkfEK3GQcpb7C/uyF1Q=
|
||||
cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
|
||||
|
@ -40,10 +40,10 @@ github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1 h1:E+OJmp2tPvt1W+amx48v1eqb
|
|||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1/go.mod h1:a6xsAQUZg+VsS3TJ05SRp524Hs4pZ/AeFSr5ENf0Yjo=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.2 h1:FDif4R1+UUR+00q6wquyX90K7A8dN+R5E8GEadoP7sU=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.2/go.mod h1:aiYBYui4BJ/BJCAIKs92XiPyQfTaBWqvHujDwKb6CBU=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.2 h1:LqbJ/WzJUwBf8UiaSzgX7aMclParm9/5Vgp+TY51uBQ=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.2/go.mod h1:yInRyqWXAuaPrgI7p70+lDDgh3mlBohis29jGMISnmc=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.6.0 h1:ui3YNbxfW7J3tTFIZMH6LIGRjCngp+J+nIFlnizfNTE=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.6.0/go.mod h1:gZmgV+qBqygoznvqo2J9oKZAFziqhLZ2xE/WVUmzkHA=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/internal v1.6.0 h1:sUFnFjzDUie80h24I7mrKtwCKgLY9L8h5Tp2x9+TWqk=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/internal v1.6.0/go.mod h1:52JbnQTp15qg5mRkMBHwp0j0ZFwHJ42Sx3zVV5RE9p0=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.7.0 h1:LkHbJbgF3YyvC53aqYGR+wWQDn2Rdp9AQdGndf9QvY4=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.7.0/go.mod h1:QyiQdW4f4/BIfB8ZutZ2s+28RAgfa/pT+zS++ZHyM1I=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/internal/v2 v2.0.0 h1:PTFGRSlMKCQelWwxUyYVEUqseBJVemLyqWJjvMyt0do=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/internal/v2 v2.0.0/go.mod h1:LRr2FzBTQlONPPa5HREE5+RjSCTXl7BwOvYOaWTqCaI=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4 v4.3.0 h1:bXwSugBiSbgtz7rOtbfGf+woewp4f06orW9OP5BjHLA=
|
||||
|
@ -59,8 +59,8 @@ github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym
|
|||
github.com/Code-Hex/go-generics-cache v1.5.1 h1:6vhZGc5M7Y/YD8cIUcY8kcuQLB4cHR7U+0KMqAA0KcU=
|
||||
github.com/Code-Hex/go-generics-cache v1.5.1/go.mod h1:qxcC9kRVrct9rHeiYpFWSoW1vxyillCVzX13KZG8dl4=
|
||||
github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ=
|
||||
github.com/KimMachineGun/automemlimit v0.6.0 h1:p/BXkH+K40Hax+PuWWPQ478hPjsp9h1CPDhLlA3Z37E=
|
||||
github.com/KimMachineGun/automemlimit v0.6.0/go.mod h1:T7xYht7B8r6AG/AqFcUdc7fzd2bIdBKmepfP2S1svPY=
|
||||
github.com/KimMachineGun/automemlimit v0.6.1 h1:ILa9j1onAAMadBsyyUJv5cack8Y1WT26yLj/V+ulKp8=
|
||||
github.com/KimMachineGun/automemlimit v0.6.1/go.mod h1:T7xYht7B8r6AG/AqFcUdc7fzd2bIdBKmepfP2S1svPY=
|
||||
github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0=
|
||||
github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow=
|
||||
github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM=
|
||||
|
@ -92,8 +92,8 @@ github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:W
|
|||
github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQwij/eHl5CU=
|
||||
github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
|
||||
github.com/aws/aws-sdk-go v1.38.35/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro=
|
||||
github.com/aws/aws-sdk-go v1.51.25 h1:DjTT8mtmsachhV6yrXR8+yhnG6120dazr720nopRsls=
|
||||
github.com/aws/aws-sdk-go v1.51.25/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk=
|
||||
github.com/aws/aws-sdk-go v1.53.16 h1:8oZjKQO/ml1WLUZw5hvF7pvYjPf8o9f57Wldoy/q9Qc=
|
||||
github.com/aws/aws-sdk-go v1.53.16/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk=
|
||||
github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g=
|
||||
github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3 h1:6df1vn4bBlDDo4tARvBm7l6KA9iVMnE3NWizDeWSrps=
|
||||
github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3/go.mod h1:CIWtjkly68+yqLPbvwwR/fjNJA/idrtULjZWh2v1ys0=
|
||||
|
@ -120,8 +120,8 @@ github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp
|
|||
github.com/clbanning/x2j v0.0.0-20191024224557-825249438eec/go.mod h1:jMjuTZXRI4dUb/I5gc9Hdhagfvm9+RyrPryS/auMzxE=
|
||||
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
|
||||
github.com/cncf/xds/go v0.0.0-20231128003011-0fa0005c9caa h1:jQCWAUqqlij9Pgj2i/PB79y4KOPYVyFYdROxgaCwdTQ=
|
||||
github.com/cncf/xds/go v0.0.0-20231128003011-0fa0005c9caa/go.mod h1:x/1Gn8zydmfq8dk6e9PdstVsDgu9RuyIIJqAaF//0IM=
|
||||
github.com/cncf/xds/go v0.0.0-20240318125728-8a4994d93e50 h1:DBmgJDC9dTfkVyGgipamEh2BpGYxScCH1TOF1LL1cXc=
|
||||
github.com/cncf/xds/go v0.0.0-20240318125728-8a4994d93e50/go.mod h1:5e1+Vvlzido69INQaVO6d87Qn543Xr6nooe9Kz7oBFM=
|
||||
github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8=
|
||||
github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI=
|
||||
github.com/containerd/cgroups/v3 v3.0.3 h1:S5ByHZ/h9PMe5IOQoN7E+nMc2UcLEM/V48DGDJ9kip0=
|
||||
|
@ -143,14 +143,14 @@ github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8Yc
|
|||
github.com/dennwc/varint v1.0.0 h1:kGNFFSSw8ToIy3obO/kKr8U9GZYUAxQEVuix4zfDWzE=
|
||||
github.com/dennwc/varint v1.0.0/go.mod h1:hnItb35rvZvJrbTALZtY/iQfDs48JKRG1RPpgziApxA=
|
||||
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
|
||||
github.com/digitalocean/godo v1.113.0 h1:CLtCxlP4wDAjKIQ+Hshht/UNbgAp8/J/XBH1ZtDCF9Y=
|
||||
github.com/digitalocean/godo v1.113.0/go.mod h1:Z2mTP848Vi3IXXl5YbPekUgr4j4tOePomA+OE1Ag98w=
|
||||
github.com/digitalocean/godo v1.117.0 h1:WVlTe09melDYTd7VCVyvHcNWbgB+uI1O115+5LOtdSw=
|
||||
github.com/digitalocean/godo v1.117.0/go.mod h1:Vk0vpCot2HOAJwc5WE8wljZGtJ3ZtWIc8MQ8rF38sdo=
|
||||
github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0=
|
||||
github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E=
|
||||
github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI=
|
||||
github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ=
|
||||
github.com/docker/docker v26.0.1+incompatible h1:t39Hm6lpXuXtgkF0dm1t9a5HkbUfdGy6XbWexmGr+hA=
|
||||
github.com/docker/docker v26.0.1+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||
github.com/docker/docker v26.1.3+incompatible h1:lLCzRbrVZrljpVNobJu1J2FHk8V0s4BawoZippkc+xo=
|
||||
github.com/docker/docker v26.1.3+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||
github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ=
|
||||
github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec=
|
||||
github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4=
|
||||
|
@ -180,8 +180,8 @@ github.com/facette/natsort v0.0.0-20181210072756-2cd4dd1e2dcb/go.mod h1:bH6Xx7IW
|
|||
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
|
||||
github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU=
|
||||
github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk=
|
||||
github.com/fatih/color v1.15.0 h1:kOqh6YHBtK8aywxGerMG2Eq3H6Qgoqeo13Bk2Mv/nBs=
|
||||
github.com/fatih/color v1.15.0/go.mod h1:0h5ZqXfHYED7Bhv2ZJamyIOUej9KtShiJESRwBDUSsw=
|
||||
github.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM=
|
||||
github.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4NijnWvE=
|
||||
github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg=
|
||||
github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
|
||||
github.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVBjqR7JHJk0brhHOZYGmfBYOrK0ZhYMEtBr4=
|
||||
|
@ -232,8 +232,8 @@ github.com/go-openapi/swag v0.22.9 h1:XX2DssF+mQKM2DHsbgZK74y/zj4mo9I99+89xUmuZC
|
|||
github.com/go-openapi/swag v0.22.9/go.mod h1:3/OXnFfnMAwBD099SwYRk7GD3xOrr1iL7d/XNLXVVwE=
|
||||
github.com/go-openapi/validate v0.23.0 h1:2l7PJLzCis4YUGEoW6eoQw3WhyM65WSIcjX6SQnlfDw=
|
||||
github.com/go-openapi/validate v0.23.0/go.mod h1:EeiAZ5bmpSIOJV1WLfyYF9qp/B1ZgSaEpHTJHtN5cbE=
|
||||
github.com/go-resty/resty/v2 v2.12.0 h1:rsVL8P90LFvkUYq/V5BTVe203WfRIU4gvcf+yfzJzGA=
|
||||
github.com/go-resty/resty/v2 v2.12.0/go.mod h1:o0yGPrkS3lOe1+eFajk6kBW8ScXzwU3hD69/gt2yB/0=
|
||||
github.com/go-resty/resty/v2 v2.13.1 h1:x+LHXBI2nMB1vqndymf26quycC4aggYJ7DECYbiz03g=
|
||||
github.com/go-resty/resty/v2 v2.13.1/go.mod h1:GznXlLxkq6Nh4sU59rPmUw3VtgpO3aS96ORAI6Q7d+0=
|
||||
github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
|
||||
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
|
||||
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI=
|
||||
|
@ -319,8 +319,8 @@ github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hf
|
|||
github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
|
||||
github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
|
||||
github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
|
||||
github.com/google/pprof v0.0.0-20240416155748-26353dc0451f h1:WpZiq8iqvGjJ3m3wzAVKL6+0vz7VkE79iSy9GII00II=
|
||||
github.com/google/pprof v0.0.0-20240416155748-26353dc0451f/go.mod h1:kf6iHlnVGwgKolg33glAes7Yg/8iWP8ukqeldJSO7jw=
|
||||
github.com/google/pprof v0.0.0-20240528025155-186aa0362fba h1:ql1qNgCyOB7iAEk8JTNM+zJrgIbnyCKX/wdlyPufP5g=
|
||||
github.com/google/pprof v0.0.0-20240528025155-186aa0362fba/go.mod h1:K1liHPHnj73Fdn/EKuT8nrFqBihUSKXoLYU0BuatOYo=
|
||||
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
|
||||
github.com/google/s2a-go v0.1.7 h1:60BLSyTrOV4/haCDW4zb1guZItoSq8foHCXrAnjBo/o=
|
||||
github.com/google/s2a-go v0.1.7/go.mod h1:50CgR4k1jNlWBu4UfS4AcfhVe1r6pdZPygJ3R8F0Qdw=
|
||||
|
@ -332,10 +332,10 @@ github.com/googleapis/enterprise-certificate-proxy v0.3.2 h1:Vie5ybvEvT75RniqhfF
|
|||
github.com/googleapis/enterprise-certificate-proxy v0.3.2/go.mod h1:VLSiSSBs/ksPL8kq3OBOQ6WRI2QnaFynd1DCjZ62+V0=
|
||||
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
|
||||
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
|
||||
github.com/googleapis/gax-go/v2 v2.12.3 h1:5/zPPDvw8Q1SuXjrqrZslrqT7dL/uJT2CQii/cLCKqA=
|
||||
github.com/googleapis/gax-go/v2 v2.12.3/go.mod h1:AKloxT6GtNbaLm8QTNSidHUVsHYcBHwWRvkNFJUQcS4=
|
||||
github.com/gophercloud/gophercloud v1.11.0 h1:ls0O747DIq1D8SUHc7r2vI8BFbMLeLFuENaAIfEx7OM=
|
||||
github.com/gophercloud/gophercloud v1.11.0/go.mod h1:aAVqcocTSXh2vYFZ1JTvx4EQmfgzxRcNupUfxZbBNDM=
|
||||
github.com/googleapis/gax-go/v2 v2.12.4 h1:9gWcmF85Wvq4ryPFvGFaOgPIs1AQX0d0bcbGw4Z96qg=
|
||||
github.com/googleapis/gax-go/v2 v2.12.4/go.mod h1:KYEYLorsnIGDi/rPC8b5TdlB9kbKoFubselGIoBMCwI=
|
||||
github.com/gophercloud/gophercloud v1.12.0 h1:Jrz16vPAL93l80q16fp8NplrTCp93y7rZh2P3Q4Yq7g=
|
||||
github.com/gophercloud/gophercloud v1.12.0/go.mod h1:aAVqcocTSXh2vYFZ1JTvx4EQmfgzxRcNupUfxZbBNDM=
|
||||
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
|
||||
github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg=
|
||||
github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
|
||||
|
@ -343,21 +343,23 @@ github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2z
|
|||
github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
|
||||
github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc=
|
||||
github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
||||
github.com/grafana/regexp v0.0.0-20221122212121-6b5c0a4cb7fd h1:PpuIBO5P3e9hpqBD0O/HjhShYuM6XE0i/lbE6J94kww=
|
||||
github.com/grafana/regexp v0.0.0-20221122212121-6b5c0a4cb7fd/go.mod h1:M5qHK+eWfAv8VR/265dIuEpL3fNfeC21tXXp9itM24A=
|
||||
github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc h1:GN2Lv3MGO7AS6PrRoT6yV5+wkrOpcszoIsO4+4ds248=
|
||||
github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc/go.mod h1:+JKpmjMGhpgPL+rXZ5nsZieVzvarn86asRlBg4uNGnk=
|
||||
github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
|
||||
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo=
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.0 h1:Wqo399gCIufwto+VfwCSvsnfGpF/w5E9CNxSwbpD6No=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.0/go.mod h1:qmOFXW2epJhM0qSnUUYpldc7gVz2KMQwJ/QYCDIa7XU=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 h1:bkypFPDjIYGfCYD5mRBvpqxfYX1YCS1PXdKYWi8FsN0=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0/go.mod h1:P+Lt/0by1T8bfcF3z737NnSbmxQAppXMRziHUxPOC8k=
|
||||
github.com/hashicorp/consul/api v1.3.0/go.mod h1:MmDNSzIMUjNpY/mQ398R4bk2FnqQLoPndWW5VkKPlCE=
|
||||
github.com/hashicorp/consul/api v1.28.2 h1:mXfkRHrpHN4YY3RqL09nXU1eHKLNiuAN4kHvDQ16k/8=
|
||||
github.com/hashicorp/consul/api v1.28.2/go.mod h1:KyzqzgMEya+IZPcD65YFoOVAgPpbfERu4I/tzG6/ueE=
|
||||
github.com/hashicorp/consul/api v1.29.1 h1:UEwOjYJrd3lG1x5w7HxDRMGiAUPrb3f103EoeKuuEcc=
|
||||
github.com/hashicorp/consul/api v1.29.1/go.mod h1:lumfRkY/coLuqMICkI7Fh3ylMG31mQSRZyef2c5YvJI=
|
||||
github.com/hashicorp/consul/proto-public v0.6.1 h1:+uzH3olCrksXYWAYHKqK782CtK9scfqH+Unlw3UHhCg=
|
||||
github.com/hashicorp/consul/proto-public v0.6.1/go.mod h1:cXXbOg74KBNGajC+o8RlA502Esf0R9prcoJgiOX/2Tg=
|
||||
github.com/hashicorp/consul/sdk v0.3.0/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8=
|
||||
github.com/hashicorp/consul/sdk v0.16.0 h1:SE9m0W6DEfgIVCJX7xU+iv/hUl4m/nxqMTnCdMxDpJ8=
|
||||
github.com/hashicorp/consul/sdk v0.16.0/go.mod h1:7pxqqhqoaPqnBnzXD1StKed62LqJeClzVsUEy85Zr0A=
|
||||
github.com/hashicorp/consul/sdk v0.16.1 h1:V8TxTnImoPD5cj0U9Spl0TUxcytjcbbJeADFF07KdHg=
|
||||
github.com/hashicorp/consul/sdk v0.16.1/go.mod h1:fSXvwxB2hmh1FMZCNl6PwX0Q/1wdWtHJcZ7Ea5tns0s=
|
||||
github.com/hashicorp/cronexpr v1.1.2 h1:wG/ZYIKT+RT3QkOdgYc+xsKWVRgnxJ1OJtjjy84fJ9A=
|
||||
github.com/hashicorp/cronexpr v1.1.2/go.mod h1:P4wA0KBl9C5q2hABiMO7cp6jcIg96CDh1Efb3g1PWA4=
|
||||
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
|
||||
|
@ -367,9 +369,8 @@ github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtng
|
|||
github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
|
||||
github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ=
|
||||
github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48=
|
||||
github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ=
|
||||
github.com/hashicorp/go-hclog v1.5.0 h1:bI2ocEMgcVlz55Oj1xZNBsVi900c7II+fWDyV9o+13c=
|
||||
github.com/hashicorp/go-hclog v1.5.0/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M=
|
||||
github.com/hashicorp/go-hclog v1.6.3 h1:Qr2kF+eVWjTiYmU7Y31tYlP1h0q/X3Nl3tPGdaB11/k=
|
||||
github.com/hashicorp/go-hclog v1.6.3/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M=
|
||||
github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
|
||||
github.com/hashicorp/go-immutable-radix v1.3.1 h1:DKHmCUm2hRBK510BaiZlwvpD40f8bJFeZnpfm2KLowc=
|
||||
github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
|
||||
|
@ -381,8 +382,8 @@ github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+
|
|||
github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo=
|
||||
github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM=
|
||||
github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs=
|
||||
github.com/hashicorp/go-retryablehttp v0.7.4 h1:ZQgVdpTdAL7WpMIwLzCfbalOcSUdkDZnpUv3/+BxzFA=
|
||||
github.com/hashicorp/go-retryablehttp v0.7.4/go.mod h1:Jy/gPYAdjqffZ/yFGCFV2doI5wjtH1ewM9u8iYVjtX8=
|
||||
github.com/hashicorp/go-retryablehttp v0.7.7 h1:C8hUCYzor8PIfXHa4UrZkU4VvK8o9ISHxT2Q8+VepXU=
|
||||
github.com/hashicorp/go-retryablehttp v0.7.7/go.mod h1:pkQpWZeYWskR+D1tR2O5OcBFOxfA7DoAO6xtkuQnHTk=
|
||||
github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU=
|
||||
github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc=
|
||||
github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8=
|
||||
|
@ -408,13 +409,13 @@ github.com/hashicorp/mdns v1.0.4/go.mod h1:mtBihi+LeNXGtG8L9dX59gAEa12BDtBQSp4v/
|
|||
github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I=
|
||||
github.com/hashicorp/memberlist v0.5.0 h1:EtYPN8DpAURiapus508I4n9CzHs2W+8NZGbmmR/prTM=
|
||||
github.com/hashicorp/memberlist v0.5.0/go.mod h1:yvyXLpo0QaGE59Y7hDTsTzDD25JYBZ4mHgHUZ8lrOI0=
|
||||
github.com/hashicorp/nomad/api v0.0.0-20240418183417-ea5f2f6748c7 h1:pjE59CS2C9Bg+Xby0ROrnZSSBWtKwx3Sf9gqsrvIFSA=
|
||||
github.com/hashicorp/nomad/api v0.0.0-20240418183417-ea5f2f6748c7/go.mod h1:svtxn6QnrQ69P23VvIWMR34tg3vmwLz4UdUzm1dSCgE=
|
||||
github.com/hashicorp/nomad/api v0.0.0-20240604134157-e73d8bb1140d h1:KHq+mAzWSkumj4PDoXc5VZbycPGcmYu8tohgVLQ6SIc=
|
||||
github.com/hashicorp/nomad/api v0.0.0-20240604134157-e73d8bb1140d/go.mod h1:svtxn6QnrQ69P23VvIWMR34tg3vmwLz4UdUzm1dSCgE=
|
||||
github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc=
|
||||
github.com/hashicorp/serf v0.10.1 h1:Z1H2J60yRKvfDYAOZLd2MU0ND4AH/WDz7xYHDWQsIPY=
|
||||
github.com/hashicorp/serf v0.10.1/go.mod h1:yL2t6BqATOLGc5HF7qbFkTfXoPIY0WZdWHfEvMqbG+4=
|
||||
github.com/hetznercloud/hcloud-go/v2 v2.7.2 h1:UlE7n1GQZacCfyjv9tDVUN7HZfOXErPIfM/M039u9A0=
|
||||
github.com/hetznercloud/hcloud-go/v2 v2.7.2/go.mod h1:49tIV+pXRJTUC7fbFZ03s45LKqSQdOPP5y91eOnJo/k=
|
||||
github.com/hetznercloud/hcloud-go/v2 v2.9.0 h1:s0N6R7Zoi2DPfMtUF5o9VeUBzTtHVY6MIkHOQnfu/AY=
|
||||
github.com/hetznercloud/hcloud-go/v2 v2.9.0/go.mod h1:qtW/TuU7Bs16ibXl/ktJarWqU2LwHr7eGlwoilHxtgg=
|
||||
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
|
||||
github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg=
|
||||
github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
|
||||
|
@ -471,8 +472,8 @@ github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0
|
|||
github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
|
||||
github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM=
|
||||
github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4=
|
||||
github.com/linode/linodego v1.33.0 h1:cX2FYry7r6CA1ujBMsdqiM4VhvIQtnWsOuVblzfBhCw=
|
||||
github.com/linode/linodego v1.33.0/go.mod h1:dSJJgIwqZCF5wnpuC6w5cyIbRtcexAm7uVvuJopGB40=
|
||||
github.com/linode/linodego v1.35.0 h1:rIhUeCHBLEDlkoRnOTwzSGzljQ3ksXwLxacmXnrV+Do=
|
||||
github.com/linode/linodego v1.35.0/go.mod h1:JxuhOEAMfSxun6RU5/MgTKH2GGTmFrhKRj3wL1NFin0=
|
||||
github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ=
|
||||
github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0=
|
||||
github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
|
||||
|
@ -490,8 +491,8 @@ github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOA
|
|||
github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
|
||||
github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94=
|
||||
github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
|
||||
github.com/mattn/go-isatty v0.0.19 h1:JITubQf0MOLdlGRuRq+jtsDlekdYPia9ZFsB8h/APPA=
|
||||
github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
|
||||
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
|
||||
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
|
||||
github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
|
||||
github.com/maxatome/go-testdeep v1.12.0 h1:Ql7Go8Tg0C1D/uMMX59LAoYK7LffeJQ6X2T04nTH68g=
|
||||
|
@ -572,8 +573,8 @@ github.com/openzipkin-contrib/zipkin-go-opentracing v0.4.5/go.mod h1:/wsWhb9smxS
|
|||
github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw=
|
||||
github.com/openzipkin/zipkin-go v0.2.1/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4=
|
||||
github.com/openzipkin/zipkin-go v0.2.2/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4=
|
||||
github.com/ovh/go-ovh v1.4.3 h1:Gs3V823zwTFpzgGLZNI6ILS4rmxZgJwJCz54Er9LwD0=
|
||||
github.com/ovh/go-ovh v1.4.3/go.mod h1:AkPXVtgwB6xlKblMjRKJJmjRp+ogrE7fz2lVgcQY8SY=
|
||||
github.com/ovh/go-ovh v1.5.1 h1:P8O+7H+NQuFK9P/j4sFW5C0fvSS2DnHYGPwdVCp45wI=
|
||||
github.com/ovh/go-ovh v1.5.1/go.mod h1:cTVDnl94z4tl8pP1uZ/8jlVxntjSIf09bNcQ5TJSC7c=
|
||||
github.com/pact-foundation/pact-go v1.0.4/go.mod h1:uExwJY4kCzNPcHRj+hCR/HBbOOIwwtUjcrb0b5/5kLM=
|
||||
github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
|
||||
github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY=
|
||||
|
@ -607,8 +608,8 @@ github.com/prometheus/client_golang v1.3.0/go.mod h1:hJaj2vgQTGQmVCsAACORcieXFeD
|
|||
github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU=
|
||||
github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
|
||||
github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
|
||||
github.com/prometheus/client_golang v1.19.0 h1:ygXvpU1AoN1MhdzckN+PyD9QJOSD4x7kmXYlnfbA6JU=
|
||||
github.com/prometheus/client_golang v1.19.0/go.mod h1:ZRM9uEAypZakd+q/x7+gmsvXdURP+DABIEIjnmDdp+k=
|
||||
github.com/prometheus/client_golang v1.19.1 h1:wZWJDwK+NameRJuPGDhlnFgx8e8HN3XHQeLaYJFJBOE=
|
||||
github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho=
|
||||
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
||||
github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
||||
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
|
@ -624,8 +625,8 @@ github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8b
|
|||
github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
|
||||
github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc=
|
||||
github.com/prometheus/common v0.29.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
|
||||
github.com/prometheus/common v0.53.0 h1:U2pL9w9nmJwJDa4qqLQ3ZaePJ6ZTwt7cMD3AG3+aLCE=
|
||||
github.com/prometheus/common v0.53.0/go.mod h1:BrxBKv3FWBIGXw89Mg1AeBq7FSyRzXWI3l3e7W3RN5U=
|
||||
github.com/prometheus/common v0.54.0 h1:ZlZy0BgJhTwVZUn7dLOkwCZHUkrAqd3WYtcFCWnM1D8=
|
||||
github.com/prometheus/common v0.54.0/go.mod h1:/TQgMJP5CuVYveyT7n/0Ix8yLNNXy9yRSkhnLTHPDIQ=
|
||||
github.com/prometheus/common/assets v0.2.0 h1:0P5OrzoHrYBOSM1OigWL3mY8ZvV2N4zIE/5AahrSrfM=
|
||||
github.com/prometheus/common/assets v0.2.0/go.mod h1:D17UVUE12bHbim7HzwUvtqm6gwBEaDQ0F+hIGbFbccI=
|
||||
github.com/prometheus/common/sigv4 v0.1.0 h1:qoVebwtwwEhS85Czm2dSROY5fTo2PAPEVdDeppTwGX4=
|
||||
|
@ -644,13 +645,13 @@ github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqn
|
|||
github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
|
||||
github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
|
||||
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
||||
github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M=
|
||||
github.com/rogpeppe/go-internal v1.11.0/go.mod h1:ddIwULY96R17DhadqLgMfk9H9tvdUzkipdSkR5nkCZA=
|
||||
github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8=
|
||||
github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4=
|
||||
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
|
||||
github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E=
|
||||
github.com/scaleway/scaleway-sdk-go v1.0.0-beta.26 h1:F+GIVtGqCFxPxO46ujf8cEOP574MBoRm3gNbPXECbxs=
|
||||
github.com/scaleway/scaleway-sdk-go v1.0.0-beta.26/go.mod h1:fCa7OJZ/9DRTnOKmxvT6pn+LPWUptQAmHF/SBJUGEcg=
|
||||
github.com/scaleway/scaleway-sdk-go v1.0.0-beta.27 h1:yGAraK1uUjlhSXgNMIy8o/J4LFNcy7yeipBqt9N9mVg=
|
||||
github.com/scaleway/scaleway-sdk-go v1.0.0-beta.27/go.mod h1:fCa7OJZ/9DRTnOKmxvT6pn+LPWUptQAmHF/SBJUGEcg=
|
||||
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I=
|
||||
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
|
||||
github.com/shoenig/test v1.7.1 h1:UJcjSAI3aUKx52kfcfhblgyhZceouhvvs3OYdWgn+PY=
|
||||
|
@ -722,28 +723,28 @@ go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
|||
go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
||||
go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0=
|
||||
go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo=
|
||||
go.opentelemetry.io/collector/pdata v1.5.0 h1:1fKTmUpr0xCOhP/B0VEvtz7bYPQ45luQ8XFyA07j8LE=
|
||||
go.opentelemetry.io/collector/pdata v1.5.0/go.mod h1:TYj8aKRWZyT/KuKQXKyqSEvK/GV+slFaDMEI+Ke64Yw=
|
||||
go.opentelemetry.io/collector/semconv v0.98.0 h1:zO4L4TmlxXoYu8UgPeYElGY19BW7wPjM+quL5CzoOoY=
|
||||
go.opentelemetry.io/collector/semconv v0.98.0/go.mod h1:8ElcRZ8Cdw5JnvhTOQOdYizkJaQ10Z2fS+R6djOnj6A=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.50.0 h1:cEPbyTSEHlQR89XVlyo78gqluF8Y3oMeBkXGWzQsfXY=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.50.0/go.mod h1:DKdbWcT4GH1D0Y3Sqt/PFXt2naRKDWtU+eE6oLdFNA8=
|
||||
go.opentelemetry.io/otel v1.25.0 h1:gldB5FfhRl7OJQbUHt/8s0a7cE8fbsPAtdpRaApKy4k=
|
||||
go.opentelemetry.io/otel v1.25.0/go.mod h1:Wa2ds5NOXEMkCmUou1WA7ZBfLTHWIsp034OVD7AO+Vg=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.25.0 h1:dT33yIHtmsqpixFsSQPwNeY5drM9wTcoL8h0FWF4oGM=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.25.0/go.mod h1:h95q0LBGh7hlAC08X2DhSeyIG02YQ0UyioTCVAqRPmc=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.25.0 h1:vOL89uRfOCCNIjkisd0r7SEdJF3ZJFyCNY34fdZs8eU=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.25.0/go.mod h1:8GlBGcDk8KKi7n+2S4BT/CPZQYH3erLu0/k64r1MYgo=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.25.0 h1:Mbi5PKN7u322woPa85d7ebZ+SOvEoPvoiBu+ryHWgfA=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.25.0/go.mod h1:e7ciERRhZaOZXVjx5MiL8TK5+Xv7G5Gv5PA2ZDEJdL8=
|
||||
go.opentelemetry.io/otel/metric v1.25.0 h1:LUKbS7ArpFL/I2jJHdJcqMGxkRdxpPHE0VU/D4NuEwA=
|
||||
go.opentelemetry.io/otel/metric v1.25.0/go.mod h1:rkDLUSd2lC5lq2dFNrX9LGAbINP5B7WBkC78RXCpH5s=
|
||||
go.opentelemetry.io/otel/sdk v1.25.0 h1:PDryEJPC8YJZQSyLY5eqLeafHtG+X7FWnf3aXMtxbqo=
|
||||
go.opentelemetry.io/otel/sdk v1.25.0/go.mod h1:oFgzCM2zdsxKzz6zwpTZYLLQsFwc+K0daArPdIhuxkw=
|
||||
go.opentelemetry.io/otel/trace v1.25.0 h1:tqukZGLwQYRIFtSQM2u2+yfMVTgGVeqRLPUYx1Dq6RM=
|
||||
go.opentelemetry.io/otel/trace v1.25.0/go.mod h1:hCCs70XM/ljO+BeQkyFnbK28SBIJ/Emuha+ccrCRT7I=
|
||||
go.opentelemetry.io/proto/otlp v1.1.0 h1:2Di21piLrCqJ3U3eXGCTPHE9R8Nh+0uglSnOyxikMeI=
|
||||
go.opentelemetry.io/proto/otlp v1.1.0/go.mod h1:GpBHCBWiqvVLDqmHZsoMM3C5ySeKTC7ej/RNTae6MdY=
|
||||
go.opentelemetry.io/collector/pdata v1.8.0 h1:d/QQgZxB4Y+d3mqLVh2ozvzujUhloD3P/fk7X+In764=
|
||||
go.opentelemetry.io/collector/pdata v1.8.0/go.mod h1:/W7clu0wFC4WSRp94Ucn6Vm36Wkrt+tmtlDb1aiNZCY=
|
||||
go.opentelemetry.io/collector/semconv v0.101.0 h1:tOe9iTe9dDCnvz/bqgfNRr4w80kXG8505tQJ5h5v08Q=
|
||||
go.opentelemetry.io/collector/semconv v0.101.0/go.mod h1:8ElcRZ8Cdw5JnvhTOQOdYizkJaQ10Z2fS+R6djOnj6A=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.52.0 h1:9l89oX4ba9kHbBol3Xin3leYJ+252h0zszDtBwyKe2A=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.52.0/go.mod h1:XLZfZboOJWHNKUv7eH0inh0E9VV6eWDFB/9yJyTLPp0=
|
||||
go.opentelemetry.io/otel v1.27.0 h1:9BZoF3yMK/O1AafMiQTVu0YDj5Ea4hPhxCs7sGva+cg=
|
||||
go.opentelemetry.io/otel v1.27.0/go.mod h1:DMpAK8fzYRzs+bi3rS5REupisuqTheUlSZJ1WnZaPAQ=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.27.0 h1:R9DE4kQ4k+YtfLI2ULwX82VtNQ2J8yZmA7ZIF/D+7Mc=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.27.0/go.mod h1:OQFyQVrDlbe+R7xrEyDr/2Wr67Ol0hRUgsfA+V5A95s=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0 h1:qFffATk0X+HD+f1Z8lswGiOQYKHRlzfmdJm0wEaVrFA=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0/go.mod h1:MOiCmryaYtc+V0Ei+Tx9o5S1ZjA7kzLucuVuyzBZloQ=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.27.0 h1:QY7/0NeRPKlzusf40ZE4t1VlMKbqSNT7cJRYzWuja0s=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.27.0/go.mod h1:HVkSiDhTM9BoUJU8qE6j2eSWLLXvi1USXjyd2BXT8PY=
|
||||
go.opentelemetry.io/otel/metric v1.27.0 h1:hvj3vdEKyeCi4YaYfNjv2NUje8FqKqUY8IlF0FxV/ik=
|
||||
go.opentelemetry.io/otel/metric v1.27.0/go.mod h1:mVFgmRlhljgBiuk/MP/oKylr4hs85GZAylncepAX/ak=
|
||||
go.opentelemetry.io/otel/sdk v1.27.0 h1:mlk+/Y1gLPLn84U4tI8d3GNJmGT/eXe3ZuOXN9kTWmI=
|
||||
go.opentelemetry.io/otel/sdk v1.27.0/go.mod h1:Ha9vbLwJE6W86YstIywK2xFfPjbWlCuwPtMkKdz/Y4A=
|
||||
go.opentelemetry.io/otel/trace v1.27.0 h1:IqYb813p7cmbHk0a5y6pD5JPakbVfftRXABGt5/Rscw=
|
||||
go.opentelemetry.io/otel/trace v1.27.0/go.mod h1:6RiD1hkAprV4/q+yd2ln1HG9GoPx39SuvvstaLBl+l4=
|
||||
go.opentelemetry.io/proto/otlp v1.2.0 h1:pVeZGk7nXDC9O2hncA6nHldxEjm6LByfA2aN8IOkz94=
|
||||
go.opentelemetry.io/proto/otlp v1.2.0/go.mod h1:gGpR8txAl5M03pDhMC79G6SdqNV26naRm/KDsgaHD8A=
|
||||
go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
|
||||
go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
|
||||
go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE=
|
||||
|
@ -771,9 +772,9 @@ golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPh
|
|||
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||
golang.org/x/crypto v0.0.0-20220829220503-c86fa9a7ed90/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
||||
golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU=
|
||||
golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs=
|
||||
golang.org/x/crypto v0.22.0 h1:g1v0xeRhjcugydODzvb3mEM9SQ0HGp9s/nh3COQ/C30=
|
||||
golang.org/x/crypto v0.22.0/go.mod h1:vr6Su+7cTlO45qkww3VDJlzDn0ctJvRgYbC2NvXHt+M=
|
||||
golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8=
|
||||
golang.org/x/crypto v0.24.0 h1:mnl8DM0o513X8fdIkmyFE/5hTYxbwYOjDS/+rK6qpRI=
|
||||
golang.org/x/crypto v0.24.0/go.mod h1:Z1PMYSOR5nyMcyAVAIQSKCDwalqy85Aqn1x3Ws4L5DM=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
|
||||
|
@ -808,8 +809,8 @@ golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
|||
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
|
||||
golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
||||
golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA=
|
||||
golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
|
||||
golang.org/x/mod v0.18.0 h1:5+9lSbEzPSdWkH32vYPBwEpX8KwDbM52Ud9xBUvNlb0=
|
||||
golang.org/x/mod v0.18.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
|
||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
|
@ -854,17 +855,17 @@ golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug
|
|||
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
|
||||
golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
|
||||
golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44=
|
||||
golang.org/x/net v0.22.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg=
|
||||
golang.org/x/net v0.24.0 h1:1PcaxkF854Fu3+lvBIx5SYn9wRlBzzcnHZSiaFFAb0w=
|
||||
golang.org/x/net v0.24.0/go.mod h1:2Q7sJY5mzlzWjKtYUEXSlBWCdyaioyXzRB2RtU8KVE8=
|
||||
golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM=
|
||||
golang.org/x/net v0.26.0 h1:soB7SVo0PWrY4vPW/+ay0jKDNScG2X9wFeYlXIvJsOQ=
|
||||
golang.org/x/net v0.26.0/go.mod h1:5YKkiSynbBIh3p6iOc/vibscux0x38BZDkn8sCUPxHE=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/oauth2 v0.19.0 h1:9+E/EZBCbTLNrbN35fHv/a/d/mOBatymz1zbtQrXpIg=
|
||||
golang.org/x/oauth2 v0.19.0/go.mod h1:vYi7skDa1x015PmRRYZ7+s1cWyPgrPiSYRe4rnsexc8=
|
||||
golang.org/x/oauth2 v0.21.0 h1:tsimM75w1tF/uws5rbeHzIWxEqElMehnc+iW793zsZs=
|
||||
golang.org/x/oauth2 v0.21.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
|
@ -944,17 +945,17 @@ golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
|||
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.19.0 h1:q5f1RH2jigJ1MoAWp2KTp3gm5zAGFUTarQZ5U386+4o=
|
||||
golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.21.0 h1:rF+pYz3DAGSQAxAu1CbC7catZg4ebC4UIeIhKxBZvws=
|
||||
golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
|
||||
golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo=
|
||||
golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk=
|
||||
golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58=
|
||||
golang.org/x/term v0.19.0 h1:+ThwsDv+tYfnJFhF4L8jITxu1tdTWRTZpdsWgEgjL6Q=
|
||||
golang.org/x/term v0.19.0/go.mod h1:2CuTdWZ7KHSQwUzKva0cbMg6q2DMI3Mmxp+gKJbskEk=
|
||||
golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY=
|
||||
golang.org/x/term v0.21.0 h1:WVXCp+/EBEHOj53Rvu+7KiT/iElMrO8ACK16SMZ3jaA=
|
||||
golang.org/x/term v0.21.0/go.mod h1:ooXLefLobQVslOqselCNF4SxFAaoS6KujMbsGzSDmX0=
|
||||
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
|
@ -964,8 +965,10 @@ golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
|||
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
||||
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
||||
golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
|
||||
golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ=
|
||||
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
|
||||
golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
|
||||
golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4=
|
||||
golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI=
|
||||
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
|
@ -1022,8 +1025,8 @@ golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc
|
|||
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
|
||||
golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
|
||||
golang.org/x/tools v0.20.0 h1:hz/CVckiOxybQvFw6h7b/q80NTr9IUQb4s1IIzW7KNY=
|
||||
golang.org/x/tools v0.20.0/go.mod h1:WvitBU7JJf6A4jOdg4S1tviW9bhUxkgeCui/0JHctQg=
|
||||
golang.org/x/tools v0.22.0 h1:gqSGLZqv+AI9lIQzniJ0nZDRG5GBPsSi+DRNHWNz6yA=
|
||||
golang.org/x/tools v0.22.0/go.mod h1:aCwcsjqvq7Yqt6TNyX7QMU2enbQ/Gt0bo6krSeEri+c=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
|
@ -1043,8 +1046,8 @@ google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/
|
|||
google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
|
||||
google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
|
||||
google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
|
||||
google.golang.org/api v0.177.0 h1:8a0p/BbPa65GlqGWtUKxot4p0TV8OGOfyTjtmkXNXmk=
|
||||
google.golang.org/api v0.177.0/go.mod h1:srbhue4MLjkjbkux5p3dw/ocYOSZTaIEvf7bCOnFQDw=
|
||||
google.golang.org/api v0.183.0 h1:PNMeRDwo1pJdgNcFQ9GstuLe/noWKIc89pRWRLMvLwE=
|
||||
google.golang.org/api v0.183.0/go.mod h1:q43adC5/pHoSZTx5h2mSmdF7NcyfW9JuDyIOJAgS9ZQ=
|
||||
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||
google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
|
@ -1081,10 +1084,10 @@ google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1m
|
|||
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
|
||||
google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA=
|
||||
google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20240415180920-8c6c420018be h1:Zz7rLWqp0ApfsR/l7+zSHhY3PMiH2xqgxlfYfAfNpoU=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20240415180920-8c6c420018be/go.mod h1:dvdCTIoAGbkWbcIKBniID56/7XHTt6WfxXNMxuziJ+w=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240429193739-8cf5692501f6 h1:DujSIu+2tC9Ht0aPNA7jgj23Iq8Ewi5sgkQ++wdvonE=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240429193739-8cf5692501f6/go.mod h1:WtryC6hu0hhx87FDGxWCDptyssuo68sk10vYjF+T9fY=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20240528184218-531527333157 h1:7whR9kGa5LUwFtpLm2ArCEejtnxlGeLbAyjFY8sGNFw=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20240528184218-531527333157/go.mod h1:99sLkeliLXfdj2J75X3Ho+rrVCaJze0uwN7zDDkjPVU=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240528184218-531527333157 h1:Zy9XzmMEflZ/MAaA7vNcoebnRAld7FsPW1EeBB7V0m8=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240528184218-531527333157/go.mod h1:EfXuqaE1J41VCDicxHzUDm+8rk+7ZdXzHV0IhO/I6s0=
|
||||
google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=
|
||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||
google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM=
|
||||
|
@ -1103,8 +1106,8 @@ google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3Iji
|
|||
google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
|
||||
google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0=
|
||||
google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=
|
||||
google.golang.org/grpc v1.63.2 h1:MUeiw1B2maTVZthpU5xvASfTh3LDbxHd6IJ6QQVU+xM=
|
||||
google.golang.org/grpc v1.63.2/go.mod h1:WAX/8DgncnokcFUldAxq7GeB5DXHDbMF+lLvDomNkRA=
|
||||
google.golang.org/grpc v1.64.0 h1:KH3VH9y/MgNQg1dE7b3XfVK0GsPSIzJwdF617gUSbvY=
|
||||
google.golang.org/grpc v1.64.0/go.mod h1:oxjF8E3FBnjp+/gVFYdWacaLDx9na1aqy9oovLpxQYg=
|
||||
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
||||
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
|
||||
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
|
||||
|
@ -1116,8 +1119,8 @@ google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpAD
|
|||
google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4=
|
||||
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
|
||||
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
|
||||
google.golang.org/protobuf v1.34.0 h1:Qo/qEd2RZPCf2nKuorzksSknv0d3ERwp1vFG38gSmH4=
|
||||
google.golang.org/protobuf v1.34.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
|
||||
google.golang.org/protobuf v1.34.1 h1:9ddQBjfCyZPOHPUiPxpYESBLc+T8P3E+Vo4IbKZgFWg=
|
||||
google.golang.org/protobuf v1.34.1/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
|
||||
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
|
|
|
@ -30,11 +30,12 @@ import (
|
|||
type FloatHistogram struct {
|
||||
// Counter reset information.
|
||||
CounterResetHint CounterResetHint
|
||||
// Currently valid schema numbers are -4 <= n <= 8. They are all for
|
||||
// base-2 bucket schemas, where 1 is a bucket boundary in each case, and
|
||||
// then each power of two is divided into 2^n logarithmic buckets. Or
|
||||
// in other words, each bucket boundary is the previous boundary times
|
||||
// 2^(2^-n).
|
||||
// Currently valid schema numbers are -4 <= n <= 8 for exponential buckets.
|
||||
// They are all for base-2 bucket schemas, where 1 is a bucket boundary in
|
||||
// each case, and then each power of two is divided into 2^n logarithmic buckets.
|
||||
// Or in other words, each bucket boundary is the previous boundary times
|
||||
// 2^(2^-n). Another valid schema number is -53 for custom buckets, defined by
|
||||
// the CustomValues field.
|
||||
Schema int32
|
||||
// Width of the zero bucket.
|
||||
ZeroThreshold float64
|
||||
|
@ -49,6 +50,16 @@ type FloatHistogram struct {
|
|||
// Observation counts in buckets. Each represents an absolute count and
|
||||
// must be zero or positive.
|
||||
PositiveBuckets, NegativeBuckets []float64
|
||||
// Holds the custom (usually upper) bounds for bucket definitions, otherwise nil.
|
||||
// This slice is interned, to be treated as immutable and copied by reference.
|
||||
// These numbers should be strictly increasing. This field is only used when the
|
||||
// schema is for custom buckets, and the ZeroThreshold, ZeroCount, NegativeSpans
|
||||
// and NegativeBuckets fields are not used in that case.
|
||||
CustomValues []float64
|
||||
}
|
||||
|
||||
func (h *FloatHistogram) UsesCustomBuckets() bool {
|
||||
return IsCustomBucketsSchema(h.Schema)
|
||||
}
|
||||
|
||||
// Copy returns a deep copy of the Histogram.
|
||||
|
@ -56,28 +67,37 @@ func (h *FloatHistogram) Copy() *FloatHistogram {
|
|||
c := FloatHistogram{
|
||||
CounterResetHint: h.CounterResetHint,
|
||||
Schema: h.Schema,
|
||||
ZeroThreshold: h.ZeroThreshold,
|
||||
ZeroCount: h.ZeroCount,
|
||||
Count: h.Count,
|
||||
Sum: h.Sum,
|
||||
}
|
||||
|
||||
if h.UsesCustomBuckets() {
|
||||
if len(h.CustomValues) != 0 {
|
||||
c.CustomValues = make([]float64, len(h.CustomValues))
|
||||
copy(c.CustomValues, h.CustomValues)
|
||||
}
|
||||
} else {
|
||||
c.ZeroThreshold = h.ZeroThreshold
|
||||
c.ZeroCount = h.ZeroCount
|
||||
|
||||
if len(h.NegativeSpans) != 0 {
|
||||
c.NegativeSpans = make([]Span, len(h.NegativeSpans))
|
||||
copy(c.NegativeSpans, h.NegativeSpans)
|
||||
}
|
||||
if len(h.NegativeBuckets) != 0 {
|
||||
c.NegativeBuckets = make([]float64, len(h.NegativeBuckets))
|
||||
copy(c.NegativeBuckets, h.NegativeBuckets)
|
||||
}
|
||||
}
|
||||
|
||||
if len(h.PositiveSpans) != 0 {
|
||||
c.PositiveSpans = make([]Span, len(h.PositiveSpans))
|
||||
copy(c.PositiveSpans, h.PositiveSpans)
|
||||
}
|
||||
if len(h.NegativeSpans) != 0 {
|
||||
c.NegativeSpans = make([]Span, len(h.NegativeSpans))
|
||||
copy(c.NegativeSpans, h.NegativeSpans)
|
||||
}
|
||||
if len(h.PositiveBuckets) != 0 {
|
||||
c.PositiveBuckets = make([]float64, len(h.PositiveBuckets))
|
||||
copy(c.PositiveBuckets, h.PositiveBuckets)
|
||||
}
|
||||
if len(h.NegativeBuckets) != 0 {
|
||||
c.NegativeBuckets = make([]float64, len(h.NegativeBuckets))
|
||||
copy(c.NegativeBuckets, h.NegativeBuckets)
|
||||
}
|
||||
|
||||
return &c
|
||||
}
|
||||
|
@ -87,32 +107,53 @@ func (h *FloatHistogram) Copy() *FloatHistogram {
|
|||
func (h *FloatHistogram) CopyTo(to *FloatHistogram) {
|
||||
to.CounterResetHint = h.CounterResetHint
|
||||
to.Schema = h.Schema
|
||||
to.ZeroThreshold = h.ZeroThreshold
|
||||
to.ZeroCount = h.ZeroCount
|
||||
to.Count = h.Count
|
||||
to.Sum = h.Sum
|
||||
|
||||
if h.UsesCustomBuckets() {
|
||||
to.ZeroThreshold = 0
|
||||
to.ZeroCount = 0
|
||||
|
||||
to.NegativeSpans = clearIfNotNil(to.NegativeSpans)
|
||||
to.NegativeBuckets = clearIfNotNil(to.NegativeBuckets)
|
||||
|
||||
to.CustomValues = resize(to.CustomValues, len(h.CustomValues))
|
||||
copy(to.CustomValues, h.CustomValues)
|
||||
} else {
|
||||
to.ZeroThreshold = h.ZeroThreshold
|
||||
to.ZeroCount = h.ZeroCount
|
||||
|
||||
to.NegativeSpans = resize(to.NegativeSpans, len(h.NegativeSpans))
|
||||
copy(to.NegativeSpans, h.NegativeSpans)
|
||||
|
||||
to.NegativeBuckets = resize(to.NegativeBuckets, len(h.NegativeBuckets))
|
||||
copy(to.NegativeBuckets, h.NegativeBuckets)
|
||||
|
||||
to.CustomValues = clearIfNotNil(to.CustomValues)
|
||||
}
|
||||
|
||||
to.PositiveSpans = resize(to.PositiveSpans, len(h.PositiveSpans))
|
||||
copy(to.PositiveSpans, h.PositiveSpans)
|
||||
|
||||
to.NegativeSpans = resize(to.NegativeSpans, len(h.NegativeSpans))
|
||||
copy(to.NegativeSpans, h.NegativeSpans)
|
||||
|
||||
to.PositiveBuckets = resize(to.PositiveBuckets, len(h.PositiveBuckets))
|
||||
copy(to.PositiveBuckets, h.PositiveBuckets)
|
||||
|
||||
to.NegativeBuckets = resize(to.NegativeBuckets, len(h.NegativeBuckets))
|
||||
copy(to.NegativeBuckets, h.NegativeBuckets)
|
||||
}
|
||||
|
||||
// CopyToSchema works like Copy, but the returned deep copy has the provided
|
||||
// target schema, which must be ≤ the original schema (i.e. it must have a lower
|
||||
// resolution).
|
||||
// resolution). This method panics if a custom buckets schema is used in the
|
||||
// receiving FloatHistogram or as the provided targetSchema.
|
||||
func (h *FloatHistogram) CopyToSchema(targetSchema int32) *FloatHistogram {
|
||||
if targetSchema == h.Schema {
|
||||
// Fast path.
|
||||
return h.Copy()
|
||||
}
|
||||
if h.UsesCustomBuckets() {
|
||||
panic(fmt.Errorf("cannot reduce resolution to %d when there are custom buckets", targetSchema))
|
||||
}
|
||||
if IsCustomBucketsSchema(targetSchema) {
|
||||
panic("cannot reduce resolution to custom buckets schema")
|
||||
}
|
||||
if targetSchema > h.Schema {
|
||||
panic(fmt.Errorf("cannot copy from schema %d to %d", h.Schema, targetSchema))
|
||||
}
|
||||
|
@ -185,6 +226,9 @@ func (h *FloatHistogram) TestExpression() string {
|
|||
if m.ZeroThreshold != 0 {
|
||||
res = append(res, fmt.Sprintf("z_bucket_w:%g", m.ZeroThreshold))
|
||||
}
|
||||
if m.UsesCustomBuckets() {
|
||||
res = append(res, fmt.Sprintf("custom_values:%g", m.CustomValues))
|
||||
}
|
||||
|
||||
addBuckets := func(kind, bucketsKey, offsetKey string, buckets []float64, spans []Span) []string {
|
||||
if len(spans) > 1 {
|
||||
|
@ -210,14 +254,18 @@ func (h *FloatHistogram) TestExpression() string {
|
|||
return "{{" + strings.Join(res, " ") + "}}"
|
||||
}
|
||||
|
||||
// ZeroBucket returns the zero bucket.
|
||||
// ZeroBucket returns the zero bucket. This method panics if the schema is for custom buckets.
|
||||
func (h *FloatHistogram) ZeroBucket() Bucket[float64] {
|
||||
if h.UsesCustomBuckets() {
|
||||
panic("histograms with custom buckets have no zero bucket")
|
||||
}
|
||||
return Bucket[float64]{
|
||||
Lower: -h.ZeroThreshold,
|
||||
Upper: h.ZeroThreshold,
|
||||
LowerInclusive: true,
|
||||
UpperInclusive: true,
|
||||
Count: h.ZeroCount,
|
||||
// Index is irrelevant for the zero bucket.
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -263,9 +311,18 @@ func (h *FloatHistogram) Div(scalar float64) *FloatHistogram {
|
|||
//
|
||||
// The method reconciles differences in the zero threshold and in the schema, and
|
||||
// changes them if needed. The other histogram will not be modified in any case.
|
||||
// Adding is currently only supported between 2 exponential histograms, or between
|
||||
// 2 custom buckets histograms with the exact same custom bounds.
|
||||
//
|
||||
// This method returns a pointer to the receiving histogram for convenience.
|
||||
func (h *FloatHistogram) Add(other *FloatHistogram) *FloatHistogram {
|
||||
func (h *FloatHistogram) Add(other *FloatHistogram) (*FloatHistogram, error) {
|
||||
if h.UsesCustomBuckets() != other.UsesCustomBuckets() {
|
||||
return nil, ErrHistogramsIncompatibleSchema
|
||||
}
|
||||
if h.UsesCustomBuckets() && !FloatBucketsMatch(h.CustomValues, other.CustomValues) {
|
||||
return nil, ErrHistogramsIncompatibleBounds
|
||||
}
|
||||
|
||||
switch {
|
||||
case other.CounterResetHint == h.CounterResetHint:
|
||||
// Adding apples to apples, all good. No need to change anything.
|
||||
|
@ -290,19 +347,28 @@ func (h *FloatHistogram) Add(other *FloatHistogram) *FloatHistogram {
|
|||
// TODO(trevorwhitney): Actually issue the warning as soon as the plumbing for it is in place
|
||||
}
|
||||
|
||||
otherZeroCount := h.reconcileZeroBuckets(other)
|
||||
h.ZeroCount += otherZeroCount
|
||||
if !h.UsesCustomBuckets() {
|
||||
otherZeroCount := h.reconcileZeroBuckets(other)
|
||||
h.ZeroCount += otherZeroCount
|
||||
}
|
||||
h.Count += other.Count
|
||||
h.Sum += other.Sum
|
||||
|
||||
var (
|
||||
hPositiveSpans = h.PositiveSpans
|
||||
hPositiveBuckets = h.PositiveBuckets
|
||||
hNegativeSpans = h.NegativeSpans
|
||||
hNegativeBuckets = h.NegativeBuckets
|
||||
|
||||
hPositiveSpans = h.PositiveSpans
|
||||
hPositiveBuckets = h.PositiveBuckets
|
||||
otherPositiveSpans = other.PositiveSpans
|
||||
otherPositiveBuckets = other.PositiveBuckets
|
||||
)
|
||||
|
||||
if h.UsesCustomBuckets() {
|
||||
h.PositiveSpans, h.PositiveBuckets = addBuckets(h.Schema, h.ZeroThreshold, false, hPositiveSpans, hPositiveBuckets, otherPositiveSpans, otherPositiveBuckets)
|
||||
return h, nil
|
||||
}
|
||||
|
||||
var (
|
||||
hNegativeSpans = h.NegativeSpans
|
||||
hNegativeBuckets = h.NegativeBuckets
|
||||
otherNegativeSpans = other.NegativeSpans
|
||||
otherNegativeBuckets = other.NegativeBuckets
|
||||
)
|
||||
|
@ -321,24 +387,40 @@ func (h *FloatHistogram) Add(other *FloatHistogram) *FloatHistogram {
|
|||
h.PositiveSpans, h.PositiveBuckets = addBuckets(h.Schema, h.ZeroThreshold, false, hPositiveSpans, hPositiveBuckets, otherPositiveSpans, otherPositiveBuckets)
|
||||
h.NegativeSpans, h.NegativeBuckets = addBuckets(h.Schema, h.ZeroThreshold, false, hNegativeSpans, hNegativeBuckets, otherNegativeSpans, otherNegativeBuckets)
|
||||
|
||||
return h
|
||||
return h, nil
|
||||
}
|
||||
|
||||
// Sub works like Add but subtracts the other histogram.
|
||||
func (h *FloatHistogram) Sub(other *FloatHistogram) *FloatHistogram {
|
||||
otherZeroCount := h.reconcileZeroBuckets(other)
|
||||
h.ZeroCount -= otherZeroCount
|
||||
func (h *FloatHistogram) Sub(other *FloatHistogram) (*FloatHistogram, error) {
|
||||
if h.UsesCustomBuckets() != other.UsesCustomBuckets() {
|
||||
return nil, ErrHistogramsIncompatibleSchema
|
||||
}
|
||||
if h.UsesCustomBuckets() && !FloatBucketsMatch(h.CustomValues, other.CustomValues) {
|
||||
return nil, ErrHistogramsIncompatibleBounds
|
||||
}
|
||||
|
||||
if !h.UsesCustomBuckets() {
|
||||
otherZeroCount := h.reconcileZeroBuckets(other)
|
||||
h.ZeroCount -= otherZeroCount
|
||||
}
|
||||
h.Count -= other.Count
|
||||
h.Sum -= other.Sum
|
||||
|
||||
var (
|
||||
hPositiveSpans = h.PositiveSpans
|
||||
hPositiveBuckets = h.PositiveBuckets
|
||||
hNegativeSpans = h.NegativeSpans
|
||||
hNegativeBuckets = h.NegativeBuckets
|
||||
|
||||
hPositiveSpans = h.PositiveSpans
|
||||
hPositiveBuckets = h.PositiveBuckets
|
||||
otherPositiveSpans = other.PositiveSpans
|
||||
otherPositiveBuckets = other.PositiveBuckets
|
||||
)
|
||||
|
||||
if h.UsesCustomBuckets() {
|
||||
h.PositiveSpans, h.PositiveBuckets = addBuckets(h.Schema, h.ZeroThreshold, true, hPositiveSpans, hPositiveBuckets, otherPositiveSpans, otherPositiveBuckets)
|
||||
return h, nil
|
||||
}
|
||||
|
||||
var (
|
||||
hNegativeSpans = h.NegativeSpans
|
||||
hNegativeBuckets = h.NegativeBuckets
|
||||
otherNegativeSpans = other.NegativeSpans
|
||||
otherNegativeBuckets = other.NegativeBuckets
|
||||
)
|
||||
|
@ -356,7 +438,7 @@ func (h *FloatHistogram) Sub(other *FloatHistogram) *FloatHistogram {
|
|||
h.PositiveSpans, h.PositiveBuckets = addBuckets(h.Schema, h.ZeroThreshold, true, hPositiveSpans, hPositiveBuckets, otherPositiveSpans, otherPositiveBuckets)
|
||||
h.NegativeSpans, h.NegativeBuckets = addBuckets(h.Schema, h.ZeroThreshold, true, hNegativeSpans, hNegativeBuckets, otherNegativeSpans, otherNegativeBuckets)
|
||||
|
||||
return h
|
||||
return h, nil
|
||||
}
|
||||
|
||||
// Equals returns true if the given float histogram matches exactly.
|
||||
|
@ -365,29 +447,42 @@ func (h *FloatHistogram) Sub(other *FloatHistogram) *FloatHistogram {
|
|||
// but they must represent the same bucket layout to match.
|
||||
// Sum, Count, ZeroCount and bucket values are compared based on their bit patterns
|
||||
// because this method is about data equality rather than mathematical equality.
|
||||
// We ignore fields that are not used based on the exponential / custom buckets schema,
|
||||
// but check fields where differences may cause unintended behaviour even if they are not
|
||||
// supposed to be used according to the schema.
|
||||
func (h *FloatHistogram) Equals(h2 *FloatHistogram) bool {
|
||||
if h2 == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
if h.Schema != h2.Schema || h.ZeroThreshold != h2.ZeroThreshold ||
|
||||
math.Float64bits(h.ZeroCount) != math.Float64bits(h2.ZeroCount) ||
|
||||
if h.Schema != h2.Schema ||
|
||||
math.Float64bits(h.Count) != math.Float64bits(h2.Count) ||
|
||||
math.Float64bits(h.Sum) != math.Float64bits(h2.Sum) {
|
||||
return false
|
||||
}
|
||||
|
||||
if h.UsesCustomBuckets() {
|
||||
if !FloatBucketsMatch(h.CustomValues, h2.CustomValues) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
if h.ZeroThreshold != h2.ZeroThreshold ||
|
||||
math.Float64bits(h.ZeroCount) != math.Float64bits(h2.ZeroCount) {
|
||||
return false
|
||||
}
|
||||
|
||||
if !spansMatch(h.NegativeSpans, h2.NegativeSpans) {
|
||||
return false
|
||||
}
|
||||
if !FloatBucketsMatch(h.NegativeBuckets, h2.NegativeBuckets) {
|
||||
return false
|
||||
}
|
||||
|
||||
if !spansMatch(h.PositiveSpans, h2.PositiveSpans) {
|
||||
return false
|
||||
}
|
||||
if !spansMatch(h.NegativeSpans, h2.NegativeSpans) {
|
||||
return false
|
||||
}
|
||||
|
||||
if !floatBucketsMatch(h.PositiveBuckets, h2.PositiveBuckets) {
|
||||
return false
|
||||
}
|
||||
if !floatBucketsMatch(h.NegativeBuckets, h2.NegativeBuckets) {
|
||||
if !FloatBucketsMatch(h.PositiveBuckets, h2.PositiveBuckets) {
|
||||
return false
|
||||
}
|
||||
|
||||
|
@ -403,6 +498,7 @@ func (h *FloatHistogram) Size() int {
|
|||
negSpanSize := len(h.NegativeSpans) * 8 // 8 bytes (int32 + uint32).
|
||||
posBucketSize := len(h.PositiveBuckets) * 8 // 8 bytes (float64).
|
||||
negBucketSize := len(h.NegativeBuckets) * 8 // 8 bytes (float64).
|
||||
customBoundSize := len(h.CustomValues) * 8 // 8 bytes (float64).
|
||||
|
||||
// Total size of the struct.
|
||||
|
||||
|
@ -417,9 +513,10 @@ func (h *FloatHistogram) Size() int {
|
|||
// fh.NegativeSpans is 24 bytes.
|
||||
// fh.PositiveBuckets is 24 bytes.
|
||||
// fh.NegativeBuckets is 24 bytes.
|
||||
structSize := 144
|
||||
// fh.CustomValues is 24 bytes.
|
||||
structSize := 168
|
||||
|
||||
return structSize + posSpanSize + negSpanSize + posBucketSize + negBucketSize
|
||||
return structSize + posSpanSize + negSpanSize + posBucketSize + negBucketSize + customBoundSize
|
||||
}
|
||||
|
||||
// Compact eliminates empty buckets at the beginning and end of each span, then
|
||||
|
@ -504,6 +601,12 @@ func (h *FloatHistogram) DetectReset(previous *FloatHistogram) bool {
|
|||
if h.Count < previous.Count {
|
||||
return true
|
||||
}
|
||||
if h.UsesCustomBuckets() != previous.UsesCustomBuckets() || (h.UsesCustomBuckets() && !FloatBucketsMatch(h.CustomValues, previous.CustomValues)) {
|
||||
// Mark that something has changed or that the application has been restarted. However, this does
|
||||
// not matter so much since the change in schema will be handled directly in the chunks and PromQL
|
||||
// functions.
|
||||
return true
|
||||
}
|
||||
if h.Schema > previous.Schema {
|
||||
return true
|
||||
}
|
||||
|
@ -609,7 +712,7 @@ func (h *FloatHistogram) NegativeBucketIterator() BucketIterator[float64] {
|
|||
// positive buckets in descending order (starting at the highest bucket and
|
||||
// going down towards the zero bucket).
|
||||
func (h *FloatHistogram) PositiveReverseBucketIterator() BucketIterator[float64] {
|
||||
it := newReverseFloatBucketIterator(h.PositiveSpans, h.PositiveBuckets, h.Schema, true)
|
||||
it := newReverseFloatBucketIterator(h.PositiveSpans, h.PositiveBuckets, h.Schema, true, h.CustomValues)
|
||||
return &it
|
||||
}
|
||||
|
||||
|
@ -617,7 +720,7 @@ func (h *FloatHistogram) PositiveReverseBucketIterator() BucketIterator[float64]
|
|||
// negative buckets in ascending order (starting at the lowest bucket and going
|
||||
// up towards the zero bucket).
|
||||
func (h *FloatHistogram) NegativeReverseBucketIterator() BucketIterator[float64] {
|
||||
it := newReverseFloatBucketIterator(h.NegativeSpans, h.NegativeBuckets, h.Schema, false)
|
||||
it := newReverseFloatBucketIterator(h.NegativeSpans, h.NegativeBuckets, h.Schema, false, nil)
|
||||
return &it
|
||||
}
|
||||
|
||||
|
@ -629,7 +732,7 @@ func (h *FloatHistogram) NegativeReverseBucketIterator() BucketIterator[float64]
|
|||
func (h *FloatHistogram) AllBucketIterator() BucketIterator[float64] {
|
||||
return &allFloatBucketIterator{
|
||||
h: h,
|
||||
leftIter: newReverseFloatBucketIterator(h.NegativeSpans, h.NegativeBuckets, h.Schema, false),
|
||||
leftIter: newReverseFloatBucketIterator(h.NegativeSpans, h.NegativeBuckets, h.Schema, false, nil),
|
||||
rightIter: h.floatBucketIterator(true, 0, h.Schema),
|
||||
state: -1,
|
||||
}
|
||||
|
@ -643,30 +746,52 @@ func (h *FloatHistogram) AllBucketIterator() BucketIterator[float64] {
|
|||
func (h *FloatHistogram) AllReverseBucketIterator() BucketIterator[float64] {
|
||||
return &allFloatBucketIterator{
|
||||
h: h,
|
||||
leftIter: newReverseFloatBucketIterator(h.PositiveSpans, h.PositiveBuckets, h.Schema, true),
|
||||
leftIter: newReverseFloatBucketIterator(h.PositiveSpans, h.PositiveBuckets, h.Schema, true, h.CustomValues),
|
||||
rightIter: h.floatBucketIterator(false, 0, h.Schema),
|
||||
state: -1,
|
||||
}
|
||||
}
|
||||
|
||||
// Validate validates consistency between span and bucket slices. Also, buckets are checked
|
||||
// against negative values.
|
||||
// against negative values. We check to make sure there are no unexpected fields or field values
|
||||
// based on the exponential / custom buckets schema.
|
||||
// We do not check for h.Count being at least as large as the sum of the
|
||||
// counts in the buckets because floating point precision issues can
|
||||
// create false positives here.
|
||||
func (h *FloatHistogram) Validate() error {
|
||||
if err := checkHistogramSpans(h.NegativeSpans, len(h.NegativeBuckets)); err != nil {
|
||||
return fmt.Errorf("negative side: %w", err)
|
||||
}
|
||||
if err := checkHistogramSpans(h.PositiveSpans, len(h.PositiveBuckets)); err != nil {
|
||||
return fmt.Errorf("positive side: %w", err)
|
||||
}
|
||||
var nCount, pCount float64
|
||||
err := checkHistogramBuckets(h.NegativeBuckets, &nCount, false)
|
||||
if err != nil {
|
||||
return fmt.Errorf("negative side: %w", err)
|
||||
if h.UsesCustomBuckets() {
|
||||
if err := checkHistogramCustomBounds(h.CustomValues, h.PositiveSpans, len(h.PositiveBuckets)); err != nil {
|
||||
return fmt.Errorf("custom buckets: %w", err)
|
||||
}
|
||||
if h.ZeroCount != 0 {
|
||||
return fmt.Errorf("custom buckets: must have zero count of 0")
|
||||
}
|
||||
if h.ZeroThreshold != 0 {
|
||||
return fmt.Errorf("custom buckets: must have zero threshold of 0")
|
||||
}
|
||||
if len(h.NegativeSpans) > 0 {
|
||||
return fmt.Errorf("custom buckets: must not have negative spans")
|
||||
}
|
||||
if len(h.NegativeBuckets) > 0 {
|
||||
return fmt.Errorf("custom buckets: must not have negative buckets")
|
||||
}
|
||||
} else {
|
||||
if err := checkHistogramSpans(h.PositiveSpans, len(h.PositiveBuckets)); err != nil {
|
||||
return fmt.Errorf("positive side: %w", err)
|
||||
}
|
||||
if err := checkHistogramSpans(h.NegativeSpans, len(h.NegativeBuckets)); err != nil {
|
||||
return fmt.Errorf("negative side: %w", err)
|
||||
}
|
||||
err := checkHistogramBuckets(h.NegativeBuckets, &nCount, false)
|
||||
if err != nil {
|
||||
return fmt.Errorf("negative side: %w", err)
|
||||
}
|
||||
if h.CustomValues != nil {
|
||||
return fmt.Errorf("histogram with exponential schema must not have custom bounds")
|
||||
}
|
||||
}
|
||||
err = checkHistogramBuckets(h.PositiveBuckets, &pCount, false)
|
||||
err := checkHistogramBuckets(h.PositiveBuckets, &pCount, false)
|
||||
if err != nil {
|
||||
return fmt.Errorf("positive side: %w", err)
|
||||
}
|
||||
|
@ -790,17 +915,25 @@ func (h *FloatHistogram) reconcileZeroBuckets(other *FloatHistogram) float64 {
|
|||
// If positive is true, the returned iterator iterates through the positive
|
||||
// buckets, otherwise through the negative buckets.
|
||||
//
|
||||
// If absoluteStartValue is < the lowest absolute value of any upper bucket
|
||||
// boundary, the iterator starts with the first bucket. Otherwise, it will skip
|
||||
// all buckets with an absolute value of their upper boundary ≤
|
||||
// absoluteStartValue.
|
||||
// Only for exponential schemas, if absoluteStartValue is < the lowest absolute
|
||||
// value of any upper bucket boundary, the iterator starts with the first bucket.
|
||||
// Otherwise, it will skip all buckets with an absolute value of their upper boundary ≤
|
||||
// absoluteStartValue. For custom bucket schemas, absoluteStartValue is ignored and
|
||||
// no buckets are skipped.
|
||||
//
|
||||
// targetSchema must be ≤ the schema of FloatHistogram (and of course within the
|
||||
// legal values for schemas in general). The buckets are merged to match the
|
||||
// targetSchema prior to iterating (without mutating FloatHistogram).
|
||||
// targetSchema prior to iterating (without mutating FloatHistogram), but custom buckets
|
||||
// schemas cannot be merged with other schemas.
|
||||
func (h *FloatHistogram) floatBucketIterator(
|
||||
positive bool, absoluteStartValue float64, targetSchema int32,
|
||||
) floatBucketIterator {
|
||||
if h.UsesCustomBuckets() && targetSchema != h.Schema {
|
||||
panic(fmt.Errorf("cannot merge from custom buckets schema to exponential schema"))
|
||||
}
|
||||
if !h.UsesCustomBuckets() && IsCustomBucketsSchema(targetSchema) {
|
||||
panic(fmt.Errorf("cannot merge from exponential buckets schema to custom schema"))
|
||||
}
|
||||
if targetSchema > h.Schema {
|
||||
panic(fmt.Errorf("cannot merge from schema %d to %d", h.Schema, targetSchema))
|
||||
}
|
||||
|
@ -816,6 +949,7 @@ func (h *FloatHistogram) floatBucketIterator(
|
|||
if positive {
|
||||
i.spans = h.PositiveSpans
|
||||
i.buckets = h.PositiveBuckets
|
||||
i.customValues = h.CustomValues
|
||||
} else {
|
||||
i.spans = h.NegativeSpans
|
||||
i.buckets = h.NegativeBuckets
|
||||
|
@ -825,14 +959,15 @@ func (h *FloatHistogram) floatBucketIterator(
|
|||
|
||||
// reverseFloatBucketIterator is a low-level constructor for reverse bucket iterators.
|
||||
func newReverseFloatBucketIterator(
|
||||
spans []Span, buckets []float64, schema int32, positive bool,
|
||||
spans []Span, buckets []float64, schema int32, positive bool, customValues []float64,
|
||||
) reverseFloatBucketIterator {
|
||||
r := reverseFloatBucketIterator{
|
||||
baseBucketIterator: baseBucketIterator[float64, float64]{
|
||||
schema: schema,
|
||||
spans: spans,
|
||||
buckets: buckets,
|
||||
positive: positive,
|
||||
schema: schema,
|
||||
spans: spans,
|
||||
buckets: buckets,
|
||||
positive: positive,
|
||||
customValues: customValues,
|
||||
},
|
||||
}
|
||||
|
||||
|
@ -946,9 +1081,9 @@ func (i *floatBucketIterator) Next() bool {
|
|||
}
|
||||
}
|
||||
|
||||
// Skip buckets before absoluteStartValue.
|
||||
// Skip buckets before absoluteStartValue for exponential schemas.
|
||||
// TODO(beorn7): Maybe do something more efficient than this recursive call.
|
||||
if !i.boundReachedStartValue && getBound(i.currIdx, i.targetSchema) <= i.absoluteStartValue {
|
||||
if !i.boundReachedStartValue && IsExponentialSchema(i.targetSchema) && getBoundExponential(i.currIdx, i.targetSchema) <= i.absoluteStartValue {
|
||||
return i.Next()
|
||||
}
|
||||
i.boundReachedStartValue = true
|
||||
|
@ -1010,14 +1145,7 @@ func (i *allFloatBucketIterator) Next() bool {
|
|||
case 0:
|
||||
i.state = 1
|
||||
if i.h.ZeroCount > 0 {
|
||||
i.currBucket = Bucket[float64]{
|
||||
Lower: -i.h.ZeroThreshold,
|
||||
Upper: i.h.ZeroThreshold,
|
||||
LowerInclusive: true,
|
||||
UpperInclusive: true,
|
||||
Count: i.h.ZeroCount,
|
||||
// Index is irrelevant for the zero bucket.
|
||||
}
|
||||
i.currBucket = i.h.ZeroBucket()
|
||||
return true
|
||||
}
|
||||
return i.Next()
|
||||
|
@ -1076,7 +1204,7 @@ func addBuckets(
|
|||
for _, spanB := range spansB {
|
||||
indexB += spanB.Offset
|
||||
for j := 0; j < int(spanB.Length); j++ {
|
||||
if lowerThanThreshold && getBound(indexB, schema) <= threshold {
|
||||
if lowerThanThreshold && IsExponentialSchema(schema) && getBoundExponential(indexB, schema) <= threshold {
|
||||
goto nextLoop
|
||||
}
|
||||
lowerThanThreshold = false
|
||||
|
@ -1177,7 +1305,7 @@ func addBuckets(
|
|||
return spansA, bucketsA
|
||||
}
|
||||
|
||||
func floatBucketsMatch(b1, b2 []float64) bool {
|
||||
func FloatBucketsMatch(b1, b2 []float64) bool {
|
||||
if len(b1) != len(b2) {
|
||||
return false
|
||||
}
|
||||
|
@ -1191,7 +1319,15 @@ func floatBucketsMatch(b1, b2 []float64) bool {
|
|||
|
||||
// ReduceResolution reduces the float histogram's spans, buckets into target schema.
|
||||
// The target schema must be smaller than the current float histogram's schema.
|
||||
// This will panic if the histogram has custom buckets or if the target schema is
|
||||
// a custom buckets schema.
|
||||
func (h *FloatHistogram) ReduceResolution(targetSchema int32) *FloatHistogram {
|
||||
if h.UsesCustomBuckets() {
|
||||
panic("cannot reduce resolution when there are custom buckets")
|
||||
}
|
||||
if IsCustomBucketsSchema(targetSchema) {
|
||||
panic("cannot reduce resolution to custom buckets schema")
|
||||
}
|
||||
if targetSchema >= h.Schema {
|
||||
panic(fmt.Errorf("cannot reduce resolution from schema %d to %d", h.Schema, targetSchema))
|
||||
}
|
||||
|
|
File diff suppressed because it is too large
Load diff
|
@ -20,14 +20,33 @@ import (
|
|||
"strings"
|
||||
)
|
||||
|
||||
var (
|
||||
ErrHistogramCountNotBigEnough = errors.New("histogram's observation count should be at least the number of observations found in the buckets")
|
||||
ErrHistogramCountMismatch = errors.New("histogram's observation count should equal the number of observations found in the buckets (in absence of NaN)")
|
||||
ErrHistogramNegativeBucketCount = errors.New("histogram has a bucket whose observation count is negative")
|
||||
ErrHistogramSpanNegativeOffset = errors.New("histogram has a span whose offset is negative")
|
||||
ErrHistogramSpansBucketsMismatch = errors.New("histogram spans specify different number of buckets than provided")
|
||||
const (
|
||||
ExponentialSchemaMax int32 = 8
|
||||
ExponentialSchemaMin int32 = -4
|
||||
CustomBucketsSchema int32 = -53
|
||||
)
|
||||
|
||||
var (
|
||||
ErrHistogramCountNotBigEnough = errors.New("histogram's observation count should be at least the number of observations found in the buckets")
|
||||
ErrHistogramCountMismatch = errors.New("histogram's observation count should equal the number of observations found in the buckets (in absence of NaN)")
|
||||
ErrHistogramNegativeBucketCount = errors.New("histogram has a bucket whose observation count is negative")
|
||||
ErrHistogramSpanNegativeOffset = errors.New("histogram has a span whose offset is negative")
|
||||
ErrHistogramSpansBucketsMismatch = errors.New("histogram spans specify different number of buckets than provided")
|
||||
ErrHistogramCustomBucketsMismatch = errors.New("histogram custom bounds are too few")
|
||||
ErrHistogramCustomBucketsInvalid = errors.New("histogram custom bounds must be in strictly increasing order")
|
||||
ErrHistogramCustomBucketsInfinite = errors.New("histogram custom bounds must be finite")
|
||||
ErrHistogramsIncompatibleSchema = errors.New("cannot apply this operation on histograms with a mix of exponential and custom bucket schemas")
|
||||
ErrHistogramsIncompatibleBounds = errors.New("cannot apply this operation on custom buckets histograms with different custom bounds")
|
||||
)
|
||||
|
||||
func IsCustomBucketsSchema(s int32) bool {
|
||||
return s == CustomBucketsSchema
|
||||
}
|
||||
|
||||
func IsExponentialSchema(s int32) bool {
|
||||
return s >= ExponentialSchemaMin && s <= ExponentialSchemaMax
|
||||
}
|
||||
|
||||
// BucketCount is a type constraint for the count in a bucket, which can be
|
||||
// float64 (for type FloatHistogram) or uint64 (for type Histogram).
|
||||
type BucketCount interface {
|
||||
|
@ -115,6 +134,8 @@ type baseBucketIterator[BC BucketCount, IBC InternalBucketCount] struct {
|
|||
|
||||
currCount IBC // Count in the current bucket.
|
||||
currIdx int32 // The actual bucket index.
|
||||
|
||||
customValues []float64 // Bounds (usually upper) for histograms with custom buckets.
|
||||
}
|
||||
|
||||
func (b *baseBucketIterator[BC, IBC]) At() Bucket[BC] {
|
||||
|
@ -128,14 +149,19 @@ func (b *baseBucketIterator[BC, IBC]) at(schema int32) Bucket[BC] {
|
|||
Index: b.currIdx,
|
||||
}
|
||||
if b.positive {
|
||||
bucket.Upper = getBound(b.currIdx, schema)
|
||||
bucket.Lower = getBound(b.currIdx-1, schema)
|
||||
bucket.Upper = getBound(b.currIdx, schema, b.customValues)
|
||||
bucket.Lower = getBound(b.currIdx-1, schema, b.customValues)
|
||||
} else {
|
||||
bucket.Lower = -getBound(b.currIdx, schema)
|
||||
bucket.Upper = -getBound(b.currIdx-1, schema)
|
||||
bucket.Lower = -getBound(b.currIdx, schema, b.customValues)
|
||||
bucket.Upper = -getBound(b.currIdx-1, schema, b.customValues)
|
||||
}
|
||||
if IsCustomBucketsSchema(schema) {
|
||||
bucket.LowerInclusive = b.currIdx == 0
|
||||
bucket.UpperInclusive = true
|
||||
} else {
|
||||
bucket.LowerInclusive = bucket.Lower < 0
|
||||
bucket.UpperInclusive = bucket.Upper > 0
|
||||
}
|
||||
bucket.LowerInclusive = bucket.Lower < 0
|
||||
bucket.UpperInclusive = bucket.Upper > 0
|
||||
return bucket
|
||||
}
|
||||
|
||||
|
@ -393,7 +419,55 @@ func checkHistogramBuckets[BC BucketCount, IBC InternalBucketCount](buckets []IB
|
|||
return nil
|
||||
}
|
||||
|
||||
func getBound(idx, schema int32) float64 {
|
||||
func checkHistogramCustomBounds(bounds []float64, spans []Span, numBuckets int) error {
|
||||
prev := math.Inf(-1)
|
||||
for _, curr := range bounds {
|
||||
if curr <= prev {
|
||||
return fmt.Errorf("previous bound is %f and current is %f: %w", prev, curr, ErrHistogramCustomBucketsInvalid)
|
||||
}
|
||||
prev = curr
|
||||
}
|
||||
if prev == math.Inf(1) {
|
||||
return fmt.Errorf("last +Inf bound must not be explicitly defined: %w", ErrHistogramCustomBucketsInfinite)
|
||||
}
|
||||
|
||||
var spanBuckets int
|
||||
var totalSpanLength int
|
||||
for n, span := range spans {
|
||||
if span.Offset < 0 {
|
||||
return fmt.Errorf("span number %d with offset %d: %w", n+1, span.Offset, ErrHistogramSpanNegativeOffset)
|
||||
}
|
||||
spanBuckets += int(span.Length)
|
||||
totalSpanLength += int(span.Length) + int(span.Offset)
|
||||
}
|
||||
if spanBuckets != numBuckets {
|
||||
return fmt.Errorf("spans need %d buckets, have %d buckets: %w", spanBuckets, numBuckets, ErrHistogramSpansBucketsMismatch)
|
||||
}
|
||||
if (len(bounds) + 1) < totalSpanLength {
|
||||
return fmt.Errorf("only %d custom bounds defined which is insufficient to cover total span length of %d: %w", len(bounds), totalSpanLength, ErrHistogramCustomBucketsMismatch)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func getBound(idx, schema int32, customValues []float64) float64 {
|
||||
if IsCustomBucketsSchema(schema) {
|
||||
length := int32(len(customValues))
|
||||
switch {
|
||||
case idx > length || idx < -1:
|
||||
panic(fmt.Errorf("index %d out of bounds for custom bounds of length %d", idx, length))
|
||||
case idx == length:
|
||||
return math.Inf(1)
|
||||
case idx == -1:
|
||||
return math.Inf(-1)
|
||||
default:
|
||||
return customValues[idx]
|
||||
}
|
||||
}
|
||||
return getBoundExponential(idx, schema)
|
||||
}
|
||||
|
||||
func getBoundExponential(idx, schema int32) float64 {
|
||||
// Here a bit of context about the behavior for the last bucket counting
|
||||
// regular numbers (called simply "last bucket" below) and the bucket
|
||||
// counting observations of ±Inf (called "inf bucket" below, with an idx
|
||||
|
@ -422,7 +496,7 @@ func getBound(idx, schema int32) float64 {
|
|||
// bucket results in precisely that. It is either frac=1.0 & exp=1024
|
||||
// (for schema < 0) or frac=0.5 & exp=1025 (for schema >=0). (This is,
|
||||
// by the way, a power of two where the exponent itself is a power of
|
||||
// two, 2¹⁰ in fact, which coinicides with a bucket boundary in all
|
||||
// two, 2¹⁰ in fact, which coincides with a bucket boundary in all
|
||||
// schemas.) So these are the special cases we have to catch below.
|
||||
if schema < 0 {
|
||||
exp := int(idx) << -schema
|
||||
|
@ -703,3 +777,10 @@ func reduceResolution[IBC InternalBucketCount](
|
|||
|
||||
return targetSpans, targetBuckets
|
||||
}
|
||||
|
||||
func clearIfNotNil[T any](items []T) []T {
|
||||
if items == nil {
|
||||
return nil
|
||||
}
|
||||
return items[:0]
|
||||
}
|
||||
|
|
|
@ -21,7 +21,7 @@ import (
|
|||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestGetBound(t *testing.T) {
|
||||
func TestGetBoundExponential(t *testing.T) {
|
||||
scenarios := []struct {
|
||||
idx int32
|
||||
schema int32
|
||||
|
@ -105,7 +105,7 @@ func TestGetBound(t *testing.T) {
|
|||
}
|
||||
|
||||
for _, s := range scenarios {
|
||||
got := getBound(s.idx, s.schema)
|
||||
got := getBoundExponential(s.idx, s.schema)
|
||||
if s.want != got {
|
||||
require.Equal(t, s.want, got, "idx %d, schema %d", s.idx, s.schema)
|
||||
}
|
||||
|
|
|
@ -49,11 +49,12 @@ const (
|
|||
type Histogram struct {
|
||||
// Counter reset information.
|
||||
CounterResetHint CounterResetHint
|
||||
// Currently valid schema numbers are -4 <= n <= 8. They are all for
|
||||
// base-2 bucket schemas, where 1 is a bucket boundary in each case, and
|
||||
// then each power of two is divided into 2^n logarithmic buckets. Or
|
||||
// in other words, each bucket boundary is the previous boundary times
|
||||
// 2^(2^-n).
|
||||
// Currently valid schema numbers are -4 <= n <= 8 for exponential buckets,
|
||||
// They are all for base-2 bucket schemas, where 1 is a bucket boundary in
|
||||
// each case, and then each power of two is divided into 2^n logarithmic buckets.
|
||||
// Or in other words, each bucket boundary is the previous boundary times
|
||||
// 2^(2^-n). Another valid schema number is -53 for custom buckets, defined by
|
||||
// the CustomValues field.
|
||||
Schema int32
|
||||
// Width of the zero bucket.
|
||||
ZeroThreshold float64
|
||||
|
@ -69,6 +70,12 @@ type Histogram struct {
|
|||
// count. All following ones are deltas relative to the previous
|
||||
// element.
|
||||
PositiveBuckets, NegativeBuckets []int64
|
||||
// Holds the custom (usually upper) bounds for bucket definitions, otherwise nil.
|
||||
// This slice is interned, to be treated as immutable and copied by reference.
|
||||
// These numbers should be strictly increasing. This field is only used when the
|
||||
// schema is for custom buckets, and the ZeroThreshold, ZeroCount, NegativeSpans
|
||||
// and NegativeBuckets fields are not used in that case.
|
||||
CustomValues []float64
|
||||
}
|
||||
|
||||
// A Span defines a continuous sequence of buckets.
|
||||
|
@ -80,33 +87,46 @@ type Span struct {
|
|||
Length uint32
|
||||
}
|
||||
|
||||
func (h *Histogram) UsesCustomBuckets() bool {
|
||||
return IsCustomBucketsSchema(h.Schema)
|
||||
}
|
||||
|
||||
// Copy returns a deep copy of the Histogram.
|
||||
func (h *Histogram) Copy() *Histogram {
|
||||
c := Histogram{
|
||||
CounterResetHint: h.CounterResetHint,
|
||||
Schema: h.Schema,
|
||||
ZeroThreshold: h.ZeroThreshold,
|
||||
ZeroCount: h.ZeroCount,
|
||||
Count: h.Count,
|
||||
Sum: h.Sum,
|
||||
}
|
||||
|
||||
if h.UsesCustomBuckets() {
|
||||
if len(h.CustomValues) != 0 {
|
||||
c.CustomValues = make([]float64, len(h.CustomValues))
|
||||
copy(c.CustomValues, h.CustomValues)
|
||||
}
|
||||
} else {
|
||||
c.ZeroThreshold = h.ZeroThreshold
|
||||
c.ZeroCount = h.ZeroCount
|
||||
|
||||
if len(h.NegativeSpans) != 0 {
|
||||
c.NegativeSpans = make([]Span, len(h.NegativeSpans))
|
||||
copy(c.NegativeSpans, h.NegativeSpans)
|
||||
}
|
||||
if len(h.NegativeBuckets) != 0 {
|
||||
c.NegativeBuckets = make([]int64, len(h.NegativeBuckets))
|
||||
copy(c.NegativeBuckets, h.NegativeBuckets)
|
||||
}
|
||||
}
|
||||
|
||||
if len(h.PositiveSpans) != 0 {
|
||||
c.PositiveSpans = make([]Span, len(h.PositiveSpans))
|
||||
copy(c.PositiveSpans, h.PositiveSpans)
|
||||
}
|
||||
if len(h.NegativeSpans) != 0 {
|
||||
c.NegativeSpans = make([]Span, len(h.NegativeSpans))
|
||||
copy(c.NegativeSpans, h.NegativeSpans)
|
||||
}
|
||||
if len(h.PositiveBuckets) != 0 {
|
||||
c.PositiveBuckets = make([]int64, len(h.PositiveBuckets))
|
||||
copy(c.PositiveBuckets, h.PositiveBuckets)
|
||||
}
|
||||
if len(h.NegativeBuckets) != 0 {
|
||||
c.NegativeBuckets = make([]int64, len(h.NegativeBuckets))
|
||||
copy(c.NegativeBuckets, h.NegativeBuckets)
|
||||
}
|
||||
|
||||
return &c
|
||||
}
|
||||
|
@ -116,22 +136,36 @@ func (h *Histogram) Copy() *Histogram {
|
|||
func (h *Histogram) CopyTo(to *Histogram) {
|
||||
to.CounterResetHint = h.CounterResetHint
|
||||
to.Schema = h.Schema
|
||||
to.ZeroThreshold = h.ZeroThreshold
|
||||
to.ZeroCount = h.ZeroCount
|
||||
to.Count = h.Count
|
||||
to.Sum = h.Sum
|
||||
|
||||
if h.UsesCustomBuckets() {
|
||||
to.ZeroThreshold = 0
|
||||
to.ZeroCount = 0
|
||||
|
||||
to.NegativeSpans = clearIfNotNil(to.NegativeSpans)
|
||||
to.NegativeBuckets = clearIfNotNil(to.NegativeBuckets)
|
||||
|
||||
to.CustomValues = resize(to.CustomValues, len(h.CustomValues))
|
||||
copy(to.CustomValues, h.CustomValues)
|
||||
} else {
|
||||
to.ZeroThreshold = h.ZeroThreshold
|
||||
to.ZeroCount = h.ZeroCount
|
||||
|
||||
to.NegativeSpans = resize(to.NegativeSpans, len(h.NegativeSpans))
|
||||
copy(to.NegativeSpans, h.NegativeSpans)
|
||||
|
||||
to.NegativeBuckets = resize(to.NegativeBuckets, len(h.NegativeBuckets))
|
||||
copy(to.NegativeBuckets, h.NegativeBuckets)
|
||||
|
||||
to.CustomValues = clearIfNotNil(to.CustomValues)
|
||||
}
|
||||
|
||||
to.PositiveSpans = resize(to.PositiveSpans, len(h.PositiveSpans))
|
||||
copy(to.PositiveSpans, h.PositiveSpans)
|
||||
|
||||
to.NegativeSpans = resize(to.NegativeSpans, len(h.NegativeSpans))
|
||||
copy(to.NegativeSpans, h.NegativeSpans)
|
||||
|
||||
to.PositiveBuckets = resize(to.PositiveBuckets, len(h.PositiveBuckets))
|
||||
copy(to.PositiveBuckets, h.PositiveBuckets)
|
||||
|
||||
to.NegativeBuckets = resize(to.NegativeBuckets, len(h.NegativeBuckets))
|
||||
copy(to.NegativeBuckets, h.NegativeBuckets)
|
||||
}
|
||||
|
||||
// String returns a string representation of the Histogram.
|
||||
|
@ -165,8 +199,11 @@ func (h *Histogram) String() string {
|
|||
return sb.String()
|
||||
}
|
||||
|
||||
// ZeroBucket returns the zero bucket.
|
||||
// ZeroBucket returns the zero bucket. This method panics if the schema is for custom buckets.
|
||||
func (h *Histogram) ZeroBucket() Bucket[uint64] {
|
||||
if h.UsesCustomBuckets() {
|
||||
panic("histograms with custom buckets have no zero bucket")
|
||||
}
|
||||
return Bucket[uint64]{
|
||||
Lower: -h.ZeroThreshold,
|
||||
Upper: h.ZeroThreshold,
|
||||
|
@ -179,14 +216,14 @@ func (h *Histogram) ZeroBucket() Bucket[uint64] {
|
|||
// PositiveBucketIterator returns a BucketIterator to iterate over all positive
|
||||
// buckets in ascending order (starting next to the zero bucket and going up).
|
||||
func (h *Histogram) PositiveBucketIterator() BucketIterator[uint64] {
|
||||
it := newRegularBucketIterator(h.PositiveSpans, h.PositiveBuckets, h.Schema, true)
|
||||
it := newRegularBucketIterator(h.PositiveSpans, h.PositiveBuckets, h.Schema, true, h.CustomValues)
|
||||
return &it
|
||||
}
|
||||
|
||||
// NegativeBucketIterator returns a BucketIterator to iterate over all negative
|
||||
// buckets in descending order (starting next to the zero bucket and going down).
|
||||
func (h *Histogram) NegativeBucketIterator() BucketIterator[uint64] {
|
||||
it := newRegularBucketIterator(h.NegativeSpans, h.NegativeBuckets, h.Schema, false)
|
||||
it := newRegularBucketIterator(h.NegativeSpans, h.NegativeBuckets, h.Schema, false, nil)
|
||||
return &it
|
||||
}
|
||||
|
||||
|
@ -207,30 +244,42 @@ func (h *Histogram) CumulativeBucketIterator() BucketIterator[uint64] {
|
|||
// but they must represent the same bucket layout to match.
|
||||
// Sum is compared based on its bit pattern because this method
|
||||
// is about data equality rather than mathematical equality.
|
||||
// We ignore fields that are not used based on the exponential / custom buckets schema,
|
||||
// but check fields where differences may cause unintended behaviour even if they are not
|
||||
// supposed to be used according to the schema.
|
||||
func (h *Histogram) Equals(h2 *Histogram) bool {
|
||||
if h2 == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
if h.Schema != h2.Schema || h.ZeroThreshold != h2.ZeroThreshold ||
|
||||
h.ZeroCount != h2.ZeroCount || h.Count != h2.Count ||
|
||||
if h.Schema != h2.Schema || h.Count != h2.Count ||
|
||||
math.Float64bits(h.Sum) != math.Float64bits(h2.Sum) {
|
||||
return false
|
||||
}
|
||||
|
||||
if h.UsesCustomBuckets() {
|
||||
if !FloatBucketsMatch(h.CustomValues, h2.CustomValues) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
if h.ZeroThreshold != h2.ZeroThreshold || h.ZeroCount != h2.ZeroCount {
|
||||
return false
|
||||
}
|
||||
|
||||
if !spansMatch(h.NegativeSpans, h2.NegativeSpans) {
|
||||
return false
|
||||
}
|
||||
if !slices.Equal(h.NegativeBuckets, h2.NegativeBuckets) {
|
||||
return false
|
||||
}
|
||||
|
||||
if !spansMatch(h.PositiveSpans, h2.PositiveSpans) {
|
||||
return false
|
||||
}
|
||||
if !spansMatch(h.NegativeSpans, h2.NegativeSpans) {
|
||||
return false
|
||||
}
|
||||
|
||||
if !slices.Equal(h.PositiveBuckets, h2.PositiveBuckets) {
|
||||
return false
|
||||
}
|
||||
if !slices.Equal(h.NegativeBuckets, h2.NegativeBuckets) {
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
@ -321,17 +370,36 @@ func (h *Histogram) ToFloat(fh *FloatHistogram) *FloatHistogram {
|
|||
}
|
||||
fh.CounterResetHint = h.CounterResetHint
|
||||
fh.Schema = h.Schema
|
||||
fh.ZeroThreshold = h.ZeroThreshold
|
||||
fh.ZeroCount = float64(h.ZeroCount)
|
||||
fh.Count = float64(h.Count)
|
||||
fh.Sum = h.Sum
|
||||
|
||||
if h.UsesCustomBuckets() {
|
||||
fh.ZeroThreshold = 0
|
||||
fh.ZeroCount = 0
|
||||
fh.NegativeSpans = clearIfNotNil(fh.NegativeSpans)
|
||||
fh.NegativeBuckets = clearIfNotNil(fh.NegativeBuckets)
|
||||
|
||||
fh.CustomValues = resize(fh.CustomValues, len(h.CustomValues))
|
||||
copy(fh.CustomValues, h.CustomValues)
|
||||
} else {
|
||||
fh.ZeroThreshold = h.ZeroThreshold
|
||||
fh.ZeroCount = float64(h.ZeroCount)
|
||||
|
||||
fh.NegativeSpans = resize(fh.NegativeSpans, len(h.NegativeSpans))
|
||||
copy(fh.NegativeSpans, h.NegativeSpans)
|
||||
|
||||
fh.NegativeBuckets = resize(fh.NegativeBuckets, len(h.NegativeBuckets))
|
||||
var currentNegative float64
|
||||
for i, b := range h.NegativeBuckets {
|
||||
currentNegative += float64(b)
|
||||
fh.NegativeBuckets[i] = currentNegative
|
||||
}
|
||||
fh.CustomValues = clearIfNotNil(fh.CustomValues)
|
||||
}
|
||||
|
||||
fh.PositiveSpans = resize(fh.PositiveSpans, len(h.PositiveSpans))
|
||||
copy(fh.PositiveSpans, h.PositiveSpans)
|
||||
|
||||
fh.NegativeSpans = resize(fh.NegativeSpans, len(h.NegativeSpans))
|
||||
copy(fh.NegativeSpans, h.NegativeSpans)
|
||||
|
||||
fh.PositiveBuckets = resize(fh.PositiveBuckets, len(h.PositiveBuckets))
|
||||
var currentPositive float64
|
||||
for i, b := range h.PositiveBuckets {
|
||||
|
@ -339,13 +407,6 @@ func (h *Histogram) ToFloat(fh *FloatHistogram) *FloatHistogram {
|
|||
fh.PositiveBuckets[i] = currentPositive
|
||||
}
|
||||
|
||||
fh.NegativeBuckets = resize(fh.NegativeBuckets, len(h.NegativeBuckets))
|
||||
var currentNegative float64
|
||||
for i, b := range h.NegativeBuckets {
|
||||
currentNegative += float64(b)
|
||||
fh.NegativeBuckets[i] = currentNegative
|
||||
}
|
||||
|
||||
return fh
|
||||
}
|
||||
|
||||
|
@ -357,25 +418,47 @@ func resize[T any](items []T, n int) []T {
|
|||
}
|
||||
|
||||
// Validate validates consistency between span and bucket slices. Also, buckets are checked
|
||||
// against negative values.
|
||||
// against negative values. We check to make sure there are no unexpected fields or field values
|
||||
// based on the exponential / custom buckets schema.
|
||||
// For histograms that have not observed any NaN values (based on IsNaN(h.Sum) check), a
|
||||
// strict h.Count = nCount + pCount + h.ZeroCount check is performed.
|
||||
// Otherwise, only a lower bound check will be done (h.Count >= nCount + pCount + h.ZeroCount),
|
||||
// because NaN observations do not increment the values of buckets (but they do increment
|
||||
// the total h.Count).
|
||||
func (h *Histogram) Validate() error {
|
||||
if err := checkHistogramSpans(h.NegativeSpans, len(h.NegativeBuckets)); err != nil {
|
||||
return fmt.Errorf("negative side: %w", err)
|
||||
}
|
||||
if err := checkHistogramSpans(h.PositiveSpans, len(h.PositiveBuckets)); err != nil {
|
||||
return fmt.Errorf("positive side: %w", err)
|
||||
}
|
||||
var nCount, pCount uint64
|
||||
err := checkHistogramBuckets(h.NegativeBuckets, &nCount, true)
|
||||
if err != nil {
|
||||
return fmt.Errorf("negative side: %w", err)
|
||||
if h.UsesCustomBuckets() {
|
||||
if err := checkHistogramCustomBounds(h.CustomValues, h.PositiveSpans, len(h.PositiveBuckets)); err != nil {
|
||||
return fmt.Errorf("custom buckets: %w", err)
|
||||
}
|
||||
if h.ZeroCount != 0 {
|
||||
return fmt.Errorf("custom buckets: must have zero count of 0")
|
||||
}
|
||||
if h.ZeroThreshold != 0 {
|
||||
return fmt.Errorf("custom buckets: must have zero threshold of 0")
|
||||
}
|
||||
if len(h.NegativeSpans) > 0 {
|
||||
return fmt.Errorf("custom buckets: must not have negative spans")
|
||||
}
|
||||
if len(h.NegativeBuckets) > 0 {
|
||||
return fmt.Errorf("custom buckets: must not have negative buckets")
|
||||
}
|
||||
} else {
|
||||
if err := checkHistogramSpans(h.PositiveSpans, len(h.PositiveBuckets)); err != nil {
|
||||
return fmt.Errorf("positive side: %w", err)
|
||||
}
|
||||
if err := checkHistogramSpans(h.NegativeSpans, len(h.NegativeBuckets)); err != nil {
|
||||
return fmt.Errorf("negative side: %w", err)
|
||||
}
|
||||
err := checkHistogramBuckets(h.NegativeBuckets, &nCount, true)
|
||||
if err != nil {
|
||||
return fmt.Errorf("negative side: %w", err)
|
||||
}
|
||||
if h.CustomValues != nil {
|
||||
return fmt.Errorf("histogram with exponential schema must not have custom bounds")
|
||||
}
|
||||
}
|
||||
err = checkHistogramBuckets(h.PositiveBuckets, &pCount, true)
|
||||
err := checkHistogramBuckets(h.PositiveBuckets, &pCount, true)
|
||||
if err != nil {
|
||||
return fmt.Errorf("positive side: %w", err)
|
||||
}
|
||||
|
@ -398,12 +481,13 @@ type regularBucketIterator struct {
|
|||
baseBucketIterator[uint64, int64]
|
||||
}
|
||||
|
||||
func newRegularBucketIterator(spans []Span, buckets []int64, schema int32, positive bool) regularBucketIterator {
|
||||
func newRegularBucketIterator(spans []Span, buckets []int64, schema int32, positive bool, customValues []float64) regularBucketIterator {
|
||||
i := baseBucketIterator[uint64, int64]{
|
||||
schema: schema,
|
||||
spans: spans,
|
||||
buckets: buckets,
|
||||
positive: positive,
|
||||
schema: schema,
|
||||
spans: spans,
|
||||
buckets: buckets,
|
||||
positive: positive,
|
||||
customValues: customValues,
|
||||
}
|
||||
return regularBucketIterator{i}
|
||||
}
|
||||
|
@ -477,7 +561,7 @@ func (c *cumulativeBucketIterator) Next() bool {
|
|||
|
||||
if c.emptyBucketCount > 0 {
|
||||
// We are traversing through empty buckets at the moment.
|
||||
c.currUpper = getBound(c.currIdx, c.h.Schema)
|
||||
c.currUpper = getBound(c.currIdx, c.h.Schema, c.h.CustomValues)
|
||||
c.currIdx++
|
||||
c.emptyBucketCount--
|
||||
return true
|
||||
|
@ -494,7 +578,7 @@ func (c *cumulativeBucketIterator) Next() bool {
|
|||
|
||||
c.currCount += c.h.PositiveBuckets[c.posBucketsIdx]
|
||||
c.currCumulativeCount += uint64(c.currCount)
|
||||
c.currUpper = getBound(c.currIdx, c.h.Schema)
|
||||
c.currUpper = getBound(c.currIdx, c.h.Schema, c.h.CustomValues)
|
||||
|
||||
c.posBucketsIdx++
|
||||
c.idxInSpan++
|
||||
|
@ -524,7 +608,15 @@ func (c *cumulativeBucketIterator) At() Bucket[uint64] {
|
|||
|
||||
// ReduceResolution reduces the histogram's spans, buckets into target schema.
|
||||
// The target schema must be smaller than the current histogram's schema.
|
||||
// This will panic if the histogram has custom buckets or if the target schema is
|
||||
// a custom buckets schema.
|
||||
func (h *Histogram) ReduceResolution(targetSchema int32) *Histogram {
|
||||
if h.UsesCustomBuckets() {
|
||||
panic("cannot reduce resolution when there are custom buckets")
|
||||
}
|
||||
if IsCustomBucketsSchema(targetSchema) {
|
||||
panic("cannot reduce resolution to custom buckets schema")
|
||||
}
|
||||
if targetSchema >= h.Schema {
|
||||
panic(fmt.Errorf("cannot reduce resolution from schema %d to %d", h.Schema, targetSchema))
|
||||
}
|
||||
|
|
|
@ -69,6 +69,21 @@ func TestHistogramString(t *testing.T) {
|
|||
},
|
||||
expectedString: "{count:19, sum:2.7, [-64,-32):1, [-16,-8):1, [-8,-4):2, [-4,-2):1, [-2,-1):3, [-1,-0.5):1, (0.5,1]:1, (1,2]:3, (2,4]:1, (4,8]:2, (8,16]:1, (16,32]:1, (32,64]:1}",
|
||||
},
|
||||
{
|
||||
histogram: Histogram{
|
||||
Schema: CustomBucketsSchema,
|
||||
Count: 19,
|
||||
Sum: 2.7,
|
||||
PositiveSpans: []Span{
|
||||
{Offset: 0, Length: 4},
|
||||
{Offset: 0, Length: 0},
|
||||
{Offset: 0, Length: 3},
|
||||
},
|
||||
PositiveBuckets: []int64{1, 2, -2, 1, -1, 0, 0},
|
||||
CustomValues: []float64{1, 2, 5, 10, 15, 20, 25, 50},
|
||||
},
|
||||
expectedString: "{count:19, sum:2.7, [-Inf,1]:1, (1,2]:3, (2,5]:1, (5,10]:2, (10,15]:1, (15,20]:1, (20,25]:1}",
|
||||
},
|
||||
}
|
||||
|
||||
for i, c := range cases {
|
||||
|
@ -208,6 +223,26 @@ func TestCumulativeBucketIterator(t *testing.T) {
|
|||
{Lower: math.Inf(-1), Upper: 16, Count: 8, LowerInclusive: true, UpperInclusive: true, Index: 2},
|
||||
},
|
||||
},
|
||||
{
|
||||
histogram: Histogram{
|
||||
Schema: CustomBucketsSchema,
|
||||
PositiveSpans: []Span{
|
||||
{Offset: 0, Length: 2},
|
||||
{Offset: 1, Length: 2},
|
||||
},
|
||||
PositiveBuckets: []int64{1, 1, -1, 0},
|
||||
CustomValues: []float64{5, 10, 20, 50},
|
||||
},
|
||||
expectedBuckets: []Bucket[uint64]{
|
||||
{Lower: math.Inf(-1), Upper: 5, Count: 1, LowerInclusive: true, UpperInclusive: true, Index: 0},
|
||||
{Lower: math.Inf(-1), Upper: 10, Count: 3, LowerInclusive: true, UpperInclusive: true, Index: 1},
|
||||
|
||||
{Lower: math.Inf(-1), Upper: 20, Count: 3, LowerInclusive: true, UpperInclusive: true, Index: 2},
|
||||
|
||||
{Lower: math.Inf(-1), Upper: 50, Count: 4, LowerInclusive: true, UpperInclusive: true, Index: 3},
|
||||
{Lower: math.Inf(-1), Upper: math.Inf(1), Count: 5, LowerInclusive: true, UpperInclusive: true, Index: 4},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for i, c := range cases {
|
||||
|
@ -368,6 +403,62 @@ func TestRegularBucketIterator(t *testing.T) {
|
|||
},
|
||||
expectedNegativeBuckets: []Bucket[uint64]{},
|
||||
},
|
||||
{
|
||||
histogram: Histogram{
|
||||
Schema: CustomBucketsSchema,
|
||||
PositiveSpans: []Span{
|
||||
{Offset: 0, Length: 2},
|
||||
{Offset: 1, Length: 2},
|
||||
},
|
||||
PositiveBuckets: []int64{1, 1, -1, 0},
|
||||
CustomValues: []float64{5, 10, 20, 50},
|
||||
},
|
||||
expectedPositiveBuckets: []Bucket[uint64]{
|
||||
{Lower: math.Inf(-1), Upper: 5, Count: 1, LowerInclusive: true, UpperInclusive: true, Index: 0},
|
||||
{Lower: 5, Upper: 10, Count: 2, LowerInclusive: false, UpperInclusive: true, Index: 1},
|
||||
|
||||
{Lower: 20, Upper: 50, Count: 1, LowerInclusive: false, UpperInclusive: true, Index: 3},
|
||||
{Lower: 50, Upper: math.Inf(1), Count: 1, LowerInclusive: false, UpperInclusive: true, Index: 4},
|
||||
},
|
||||
expectedNegativeBuckets: []Bucket[uint64]{},
|
||||
},
|
||||
{
|
||||
histogram: Histogram{
|
||||
Schema: CustomBucketsSchema,
|
||||
PositiveSpans: []Span{
|
||||
{Offset: 0, Length: 2},
|
||||
{Offset: 1, Length: 2},
|
||||
},
|
||||
PositiveBuckets: []int64{1, 1, -1, 0},
|
||||
CustomValues: []float64{0, 10, 20, 50},
|
||||
},
|
||||
expectedPositiveBuckets: []Bucket[uint64]{
|
||||
{Lower: math.Inf(-1), Upper: 0, Count: 1, LowerInclusive: true, UpperInclusive: true, Index: 0},
|
||||
{Lower: 0, Upper: 10, Count: 2, LowerInclusive: false, UpperInclusive: true, Index: 1},
|
||||
|
||||
{Lower: 20, Upper: 50, Count: 1, LowerInclusive: false, UpperInclusive: true, Index: 3},
|
||||
{Lower: 50, Upper: math.Inf(1), Count: 1, LowerInclusive: false, UpperInclusive: true, Index: 4},
|
||||
},
|
||||
expectedNegativeBuckets: []Bucket[uint64]{},
|
||||
},
|
||||
{
|
||||
histogram: Histogram{
|
||||
Schema: CustomBucketsSchema,
|
||||
PositiveSpans: []Span{
|
||||
{Offset: 0, Length: 5},
|
||||
},
|
||||
PositiveBuckets: []int64{1, 1, 0, -1, 0},
|
||||
CustomValues: []float64{-5, 0, 20, 50},
|
||||
},
|
||||
expectedPositiveBuckets: []Bucket[uint64]{
|
||||
{Lower: math.Inf(-1), Upper: -5, Count: 1, LowerInclusive: true, UpperInclusive: true, Index: 0},
|
||||
{Lower: -5, Upper: 0, Count: 2, LowerInclusive: false, UpperInclusive: true, Index: 1},
|
||||
{Lower: 0, Upper: 20, Count: 2, LowerInclusive: false, UpperInclusive: true, Index: 2},
|
||||
{Lower: 20, Upper: 50, Count: 1, LowerInclusive: false, UpperInclusive: true, Index: 3},
|
||||
{Lower: 50, Upper: math.Inf(1), Count: 1, LowerInclusive: false, UpperInclusive: true, Index: 4},
|
||||
},
|
||||
expectedNegativeBuckets: []Bucket[uint64]{},
|
||||
},
|
||||
}
|
||||
|
||||
for i, c := range cases {
|
||||
|
@ -461,11 +552,81 @@ func TestHistogramToFloat(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestCustomBucketsHistogramToFloat(t *testing.T) {
|
||||
h := Histogram{
|
||||
Schema: CustomBucketsSchema,
|
||||
Count: 10,
|
||||
Sum: 2.7,
|
||||
PositiveSpans: []Span{
|
||||
{Offset: 0, Length: 4},
|
||||
{Offset: 0, Length: 0},
|
||||
{Offset: 0, Length: 3},
|
||||
},
|
||||
PositiveBuckets: []int64{1, 2, -2, 1, -1, 0, 0},
|
||||
CustomValues: []float64{5, 10, 20, 50, 100, 500},
|
||||
}
|
||||
cases := []struct {
|
||||
name string
|
||||
fh *FloatHistogram
|
||||
}{
|
||||
{name: "without prior float histogram"},
|
||||
{name: "prior float histogram with more buckets", fh: &FloatHistogram{
|
||||
Schema: 2,
|
||||
Count: 3,
|
||||
Sum: 5,
|
||||
ZeroThreshold: 4,
|
||||
ZeroCount: 1,
|
||||
PositiveSpans: []Span{
|
||||
{Offset: 1, Length: 2},
|
||||
{Offset: 1, Length: 2},
|
||||
{Offset: 1, Length: 2},
|
||||
},
|
||||
PositiveBuckets: []float64{1, 2, 3, 4, 5, 6, 7, 8, 9},
|
||||
NegativeSpans: []Span{
|
||||
{Offset: 20, Length: 6},
|
||||
{Offset: 12, Length: 7},
|
||||
{Offset: 33, Length: 10},
|
||||
},
|
||||
NegativeBuckets: []float64{1, 2, 3, 4, 5, 6, 7, 8, 9},
|
||||
}},
|
||||
{name: "prior float histogram with fewer buckets", fh: &FloatHistogram{
|
||||
Schema: 2,
|
||||
Count: 3,
|
||||
Sum: 5,
|
||||
ZeroThreshold: 4,
|
||||
ZeroCount: 1,
|
||||
PositiveSpans: []Span{
|
||||
{Offset: 1, Length: 2},
|
||||
{Offset: 1, Length: 2},
|
||||
{Offset: 1, Length: 2},
|
||||
},
|
||||
PositiveBuckets: []float64{1, 2},
|
||||
NegativeSpans: []Span{
|
||||
{Offset: 20, Length: 6},
|
||||
{Offset: 12, Length: 7},
|
||||
{Offset: 33, Length: 10},
|
||||
},
|
||||
NegativeBuckets: []float64{1, 2},
|
||||
}},
|
||||
}
|
||||
|
||||
require.NoError(t, h.Validate())
|
||||
for _, c := range cases {
|
||||
t.Run(c.name, func(t *testing.T) {
|
||||
hStr := h.String()
|
||||
fh := h.ToFloat(c.fh)
|
||||
require.NoError(t, fh.Validate())
|
||||
require.Equal(t, hStr, h.String())
|
||||
require.Equal(t, hStr, fh.String())
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// TestHistogramEquals tests both Histogram and FloatHistogram.
|
||||
func TestHistogramEquals(t *testing.T) {
|
||||
h1 := Histogram{
|
||||
Schema: 3,
|
||||
Count: 61,
|
||||
Count: 62,
|
||||
Sum: 2.7,
|
||||
ZeroThreshold: 0.1,
|
||||
ZeroCount: 42,
|
||||
|
@ -495,6 +656,15 @@ func TestHistogramEquals(t *testing.T) {
|
|||
require.False(t, h1f.Equals(h2f))
|
||||
require.False(t, h2f.Equals(h1f))
|
||||
}
|
||||
notEqualsUntilFloatConv := func(h1, h2 Histogram) {
|
||||
require.False(t, h1.Equals(&h2))
|
||||
require.False(t, h2.Equals(&h1))
|
||||
h1f, h2f := h1.ToFloat(nil), h2.ToFloat(nil)
|
||||
require.True(t, h1f.Equals(h2f))
|
||||
require.True(t, h2f.Equals(h1f))
|
||||
}
|
||||
|
||||
require.NoError(t, h1.Validate())
|
||||
|
||||
h2 := h1.Copy()
|
||||
equals(h1, *h2)
|
||||
|
@ -602,6 +772,45 @@ func TestHistogramEquals(t *testing.T) {
|
|||
|
||||
// Sum StaleNaN vs regular NaN.
|
||||
notEquals(*hStale, *hNaN)
|
||||
|
||||
// Has non-empty custom bounds for exponential schema.
|
||||
hCustom := h1.Copy()
|
||||
hCustom.CustomValues = []float64{1, 2, 3}
|
||||
equals(h1, *hCustom)
|
||||
|
||||
cbh1 := Histogram{
|
||||
Schema: CustomBucketsSchema,
|
||||
Count: 10,
|
||||
Sum: 2.7,
|
||||
PositiveSpans: []Span{
|
||||
{Offset: 0, Length: 4},
|
||||
{Offset: 10, Length: 3},
|
||||
},
|
||||
PositiveBuckets: []int64{1, 2, -2, 1, -1, 0, 0},
|
||||
CustomValues: []float64{0.1, 0.2, 0.5, 1, 2, 5, 10, 15, 20, 25, 50, 75, 100, 200, 250, 500, 1000},
|
||||
}
|
||||
|
||||
require.NoError(t, cbh1.Validate())
|
||||
|
||||
cbh2 := cbh1.Copy()
|
||||
equals(cbh1, *cbh2)
|
||||
|
||||
// Has different custom bounds for custom buckets schema.
|
||||
cbh2 = cbh1.Copy()
|
||||
cbh2.CustomValues = []float64{0.1, 0.2, 0.5}
|
||||
notEquals(cbh1, *cbh2)
|
||||
|
||||
// Has non-empty negative spans and buckets for custom buckets schema.
|
||||
cbh2 = cbh1.Copy()
|
||||
cbh2.NegativeSpans = []Span{{Offset: 0, Length: 1}}
|
||||
cbh2.NegativeBuckets = []int64{1}
|
||||
notEqualsUntilFloatConv(cbh1, *cbh2)
|
||||
|
||||
// Has non-zero zero count and threshold for custom buckets schema.
|
||||
cbh2 = cbh1.Copy()
|
||||
cbh2.ZeroThreshold = 0.1
|
||||
cbh2.ZeroCount = 10
|
||||
notEqualsUntilFloatConv(cbh1, *cbh2)
|
||||
}
|
||||
|
||||
func TestHistogramCopy(t *testing.T) {
|
||||
|
@ -640,6 +849,21 @@ func TestHistogramCopy(t *testing.T) {
|
|||
},
|
||||
expected: &Histogram{},
|
||||
},
|
||||
{
|
||||
name: "with custom buckets",
|
||||
orig: &Histogram{
|
||||
Schema: CustomBucketsSchema,
|
||||
PositiveSpans: []Span{{-2, 1}},
|
||||
PositiveBuckets: []int64{1, 3, -3, 42},
|
||||
CustomValues: []float64{5, 10, 15},
|
||||
},
|
||||
expected: &Histogram{
|
||||
Schema: CustomBucketsSchema,
|
||||
PositiveSpans: []Span{{-2, 1}},
|
||||
PositiveBuckets: []int64{1, 3, -3, 42},
|
||||
CustomValues: []float64{5, 10, 15},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tcase := range cases {
|
||||
|
@ -690,6 +914,21 @@ func TestHistogramCopyTo(t *testing.T) {
|
|||
},
|
||||
expected: &Histogram{},
|
||||
},
|
||||
{
|
||||
name: "with custom buckets",
|
||||
orig: &Histogram{
|
||||
Schema: CustomBucketsSchema,
|
||||
PositiveSpans: []Span{{-2, 1}},
|
||||
PositiveBuckets: []int64{1, 3, -3, 42},
|
||||
CustomValues: []float64{5, 10, 15},
|
||||
},
|
||||
expected: &Histogram{
|
||||
Schema: CustomBucketsSchema,
|
||||
PositiveSpans: []Span{{-2, 1}},
|
||||
PositiveBuckets: []int64{1, 3, -3, 42},
|
||||
CustomValues: []float64{5, 10, 15},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tcase := range cases {
|
||||
|
@ -971,6 +1210,86 @@ func TestHistogramCompact(t *testing.T) {
|
|||
NegativeBuckets: []int64{2, 3},
|
||||
},
|
||||
},
|
||||
{
|
||||
"nothing should happen with custom buckets",
|
||||
&Histogram{
|
||||
Schema: CustomBucketsSchema,
|
||||
PositiveSpans: []Span{{-2, 1}, {2, 3}},
|
||||
PositiveBuckets: []int64{1, 3, -3, 42},
|
||||
CustomValues: []float64{5, 10, 15},
|
||||
},
|
||||
0,
|
||||
&Histogram{
|
||||
Schema: CustomBucketsSchema,
|
||||
PositiveSpans: []Span{{-2, 1}, {2, 3}},
|
||||
PositiveBuckets: []int64{1, 3, -3, 42},
|
||||
CustomValues: []float64{5, 10, 15},
|
||||
},
|
||||
},
|
||||
{
|
||||
"eliminate zero offsets with custom buckets",
|
||||
&Histogram{
|
||||
Schema: CustomBucketsSchema,
|
||||
PositiveSpans: []Span{{-2, 1}, {0, 3}, {0, 1}},
|
||||
PositiveBuckets: []int64{1, 3, -3, 42, 3},
|
||||
CustomValues: []float64{5, 10, 15, 20},
|
||||
},
|
||||
0,
|
||||
&Histogram{
|
||||
Schema: CustomBucketsSchema,
|
||||
PositiveSpans: []Span{{-2, 5}},
|
||||
PositiveBuckets: []int64{1, 3, -3, 42, 3},
|
||||
CustomValues: []float64{5, 10, 15, 20},
|
||||
},
|
||||
},
|
||||
{
|
||||
"eliminate zero length with custom buckets",
|
||||
&Histogram{
|
||||
Schema: CustomBucketsSchema,
|
||||
PositiveSpans: []Span{{-2, 2}, {2, 0}, {3, 3}},
|
||||
PositiveBuckets: []int64{1, 3, -3, 42, 3},
|
||||
CustomValues: []float64{5, 10, 15, 20},
|
||||
},
|
||||
0,
|
||||
&Histogram{
|
||||
Schema: CustomBucketsSchema,
|
||||
PositiveSpans: []Span{{-2, 2}, {5, 3}},
|
||||
PositiveBuckets: []int64{1, 3, -3, 42, 3},
|
||||
CustomValues: []float64{5, 10, 15, 20},
|
||||
},
|
||||
},
|
||||
{
|
||||
"eliminate multiple zero length spans with custom buckets",
|
||||
&Histogram{
|
||||
Schema: CustomBucketsSchema,
|
||||
PositiveSpans: []Span{{-2, 2}, {2, 0}, {2, 0}, {2, 0}, {3, 3}},
|
||||
PositiveBuckets: []int64{1, 3, -3, 42, 3},
|
||||
CustomValues: []float64{5, 10, 15, 20},
|
||||
},
|
||||
0,
|
||||
&Histogram{
|
||||
Schema: CustomBucketsSchema,
|
||||
PositiveSpans: []Span{{-2, 2}, {9, 3}},
|
||||
PositiveBuckets: []int64{1, 3, -3, 42, 3},
|
||||
CustomValues: []float64{5, 10, 15, 20},
|
||||
},
|
||||
},
|
||||
{
|
||||
"cut empty buckets at start or end of spans, even in the middle, with custom buckets",
|
||||
&Histogram{
|
||||
Schema: CustomBucketsSchema,
|
||||
PositiveSpans: []Span{{-4, 6}, {3, 6}},
|
||||
PositiveBuckets: []int64{0, 0, 1, 3, -4, 0, 1, 42, 3, -46, 0, 0},
|
||||
CustomValues: []float64{5, 10, 15, 20},
|
||||
},
|
||||
0,
|
||||
&Histogram{
|
||||
Schema: CustomBucketsSchema,
|
||||
PositiveSpans: []Span{{-2, 2}, {5, 3}},
|
||||
PositiveBuckets: []int64{1, 3, -3, 42, 3},
|
||||
CustomValues: []float64{5, 10, 15, 20},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, c := range cases {
|
||||
|
@ -1107,6 +1426,145 @@ func TestHistogramValidation(t *testing.T) {
|
|||
errMsg: `3 observations found in buckets, but the Count field is 2: histogram's observation count should equal the number of observations found in the buckets (in absence of NaN)`,
|
||||
skipFloat: true,
|
||||
},
|
||||
"rejects an exponential histogram with custom buckets schema": {
|
||||
h: &Histogram{
|
||||
Count: 12,
|
||||
ZeroCount: 2,
|
||||
ZeroThreshold: 0.001,
|
||||
Sum: 19.4,
|
||||
Schema: CustomBucketsSchema,
|
||||
PositiveSpans: []Span{
|
||||
{Offset: 0, Length: 2},
|
||||
{Offset: 1, Length: 2},
|
||||
},
|
||||
PositiveBuckets: []int64{1, 1, -1, 0},
|
||||
NegativeSpans: []Span{
|
||||
{Offset: 0, Length: 2},
|
||||
{Offset: 1, Length: 2},
|
||||
},
|
||||
NegativeBuckets: []int64{1, 1, -1, 0},
|
||||
},
|
||||
errMsg: `custom buckets: only 0 custom bounds defined which is insufficient to cover total span length of 5: histogram custom bounds are too few`,
|
||||
},
|
||||
"rejects a custom buckets histogram with exponential schema": {
|
||||
h: &Histogram{
|
||||
Count: 5,
|
||||
Sum: 19.4,
|
||||
Schema: 0,
|
||||
PositiveSpans: []Span{
|
||||
{Offset: 0, Length: 2},
|
||||
{Offset: 1, Length: 2},
|
||||
},
|
||||
PositiveBuckets: []int64{1, 1, -1, 0},
|
||||
CustomValues: []float64{1, 2, 3, 4},
|
||||
},
|
||||
errMsg: `histogram with exponential schema must not have custom bounds`,
|
||||
skipFloat: true, // Converting to float will remove the wrong fields so only the float version will pass validation
|
||||
},
|
||||
"rejects a custom buckets histogram with zero/negative buckets": {
|
||||
h: &Histogram{
|
||||
Count: 12,
|
||||
ZeroCount: 2,
|
||||
ZeroThreshold: 0.001,
|
||||
Sum: 19.4,
|
||||
Schema: CustomBucketsSchema,
|
||||
PositiveSpans: []Span{
|
||||
{Offset: 0, Length: 2},
|
||||
{Offset: 1, Length: 2},
|
||||
},
|
||||
PositiveBuckets: []int64{1, 1, -1, 0},
|
||||
NegativeSpans: []Span{
|
||||
{Offset: 0, Length: 2},
|
||||
{Offset: 1, Length: 2},
|
||||
},
|
||||
NegativeBuckets: []int64{1, 1, -1, 0},
|
||||
CustomValues: []float64{1, 2, 3, 4},
|
||||
},
|
||||
errMsg: `custom buckets: must have zero count of 0`,
|
||||
skipFloat: true, // Converting to float will remove the wrong fields so only the float version will pass validation
|
||||
},
|
||||
"rejects a custom buckets histogram with negative offset in first span": {
|
||||
h: &Histogram{
|
||||
Count: 5,
|
||||
Sum: 19.4,
|
||||
Schema: CustomBucketsSchema,
|
||||
PositiveSpans: []Span{
|
||||
{Offset: -1, Length: 2},
|
||||
{Offset: 1, Length: 2},
|
||||
},
|
||||
PositiveBuckets: []int64{1, 1, -1, 0},
|
||||
CustomValues: []float64{1, 2, 3, 4},
|
||||
},
|
||||
errMsg: `custom buckets: span number 1 with offset -1: histogram has a span whose offset is negative`,
|
||||
},
|
||||
"rejects a custom buckets histogram with negative offset in subsequent spans": {
|
||||
h: &Histogram{
|
||||
Count: 5,
|
||||
Sum: 19.4,
|
||||
Schema: CustomBucketsSchema,
|
||||
PositiveSpans: []Span{
|
||||
{Offset: 0, Length: 2},
|
||||
{Offset: -1, Length: 2},
|
||||
},
|
||||
PositiveBuckets: []int64{1, 1, -1, 0},
|
||||
CustomValues: []float64{1, 2, 3, 4},
|
||||
},
|
||||
errMsg: `custom buckets: span number 2 with offset -1: histogram has a span whose offset is negative`,
|
||||
},
|
||||
"rejects a custom buckets histogram with non-matching bucket counts": {
|
||||
h: &Histogram{
|
||||
Count: 5,
|
||||
Sum: 19.4,
|
||||
Schema: CustomBucketsSchema,
|
||||
PositiveSpans: []Span{
|
||||
{Offset: 0, Length: 2},
|
||||
{Offset: 1, Length: 2},
|
||||
},
|
||||
PositiveBuckets: []int64{1, 1, -1},
|
||||
CustomValues: []float64{1, 2, 3, 4},
|
||||
},
|
||||
errMsg: `custom buckets: spans need 4 buckets, have 3 buckets: histogram spans specify different number of buckets than provided`,
|
||||
},
|
||||
"rejects a custom buckets histogram with too few bounds": {
|
||||
h: &Histogram{
|
||||
Count: 5,
|
||||
Sum: 19.4,
|
||||
Schema: CustomBucketsSchema,
|
||||
PositiveSpans: []Span{
|
||||
{Offset: 0, Length: 2},
|
||||
{Offset: 1, Length: 2},
|
||||
},
|
||||
PositiveBuckets: []int64{1, 1, -1, 0},
|
||||
CustomValues: []float64{1, 2, 3},
|
||||
},
|
||||
errMsg: `custom buckets: only 3 custom bounds defined which is insufficient to cover total span length of 5: histogram custom bounds are too few`,
|
||||
},
|
||||
"valid custom buckets histogram": {
|
||||
h: &Histogram{
|
||||
Count: 5,
|
||||
Sum: 19.4,
|
||||
Schema: CustomBucketsSchema,
|
||||
PositiveSpans: []Span{
|
||||
{Offset: 0, Length: 2},
|
||||
{Offset: 1, Length: 2},
|
||||
},
|
||||
PositiveBuckets: []int64{1, 1, -1, 0},
|
||||
CustomValues: []float64{1, 2, 3, 4},
|
||||
},
|
||||
},
|
||||
"valid custom buckets histogram with extra bounds": {
|
||||
h: &Histogram{
|
||||
Count: 5,
|
||||
Sum: 19.4,
|
||||
Schema: CustomBucketsSchema,
|
||||
PositiveSpans: []Span{
|
||||
{Offset: 0, Length: 2},
|
||||
{Offset: 1, Length: 2},
|
||||
},
|
||||
PositiveBuckets: []int64{1, 1, -1, 0},
|
||||
CustomValues: []float64{1, 2, 3, 4, 5, 6, 7, 8},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for testName, tc := range tests {
|
||||
|
|
|
@ -18,6 +18,7 @@ import (
|
|||
"encoding/json"
|
||||
"slices"
|
||||
"strconv"
|
||||
"unsafe"
|
||||
|
||||
"github.com/prometheus/common/model"
|
||||
)
|
||||
|
@ -215,3 +216,7 @@ func contains(s []Label, n string) bool {
|
|||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func yoloString(b []byte) string {
|
||||
return *((*string)(unsafe.Pointer(&b)))
|
||||
}
|
||||
|
|
|
@ -20,7 +20,6 @@ import (
|
|||
"slices"
|
||||
"strings"
|
||||
"sync"
|
||||
"unsafe"
|
||||
|
||||
"github.com/cespare/xxhash/v2"
|
||||
)
|
||||
|
@ -105,30 +104,39 @@ func (t *nameTable) ToName(num int) string {
|
|||
return t.byNum[num]
|
||||
}
|
||||
|
||||
// "Varint" in this file is non-standard: we encode small numbers (up to 32767) in 2 bytes,
|
||||
// because we expect most Prometheus to have more than 127 unique strings.
|
||||
// And we don't encode numbers larger than 4 bytes because we don't expect more than 536,870,912 unique strings.
|
||||
func decodeVarint(data string, index int) (int, int) {
|
||||
// Fast-path for common case of a single byte, value 0..127.
|
||||
b := data[index]
|
||||
b := int(data[index]) + int(data[index+1])<<8
|
||||
index += 2
|
||||
if b < 0x8000 {
|
||||
return b, index
|
||||
}
|
||||
return decodeVarintRest(b, data, index)
|
||||
}
|
||||
|
||||
func decodeVarintRest(b int, data string, index int) (int, int) {
|
||||
value := int(b & 0x7FFF)
|
||||
b = int(data[index])
|
||||
index++
|
||||
if b < 0x80 {
|
||||
return int(b), index
|
||||
return value | (b << 15), index
|
||||
}
|
||||
value := int(b & 0x7F)
|
||||
for shift := uint(7); ; shift += 7 {
|
||||
// Just panic if we go of the end of data, since all Labels strings are constructed internally and
|
||||
// malformed data indicates a bug, or memory corruption.
|
||||
b := data[index]
|
||||
index++
|
||||
value |= int(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
return value, index
|
||||
|
||||
value |= (b & 0x7f) << 15
|
||||
b = int(data[index])
|
||||
index++
|
||||
return value | (b << 22), index
|
||||
}
|
||||
|
||||
func decodeString(t *nameTable, data string, index int) (string, int) {
|
||||
var num int
|
||||
num, index = decodeVarint(data, index)
|
||||
// Copy decodeVarint here, because the Go compiler says it's too big to inline.
|
||||
num := int(data[index]) + int(data[index+1])<<8
|
||||
index += 2
|
||||
if num >= 0x8000 {
|
||||
num, index = decodeVarintRest(num, data, index)
|
||||
}
|
||||
return t.ToName(num), index
|
||||
}
|
||||
|
||||
|
@ -322,7 +330,12 @@ func (ls Labels) Get(name string) string {
|
|||
} else if lName[0] > name[0] { // Stop looking if we've gone past.
|
||||
break
|
||||
}
|
||||
_, i = decodeVarint(ls.data, i)
|
||||
// Copy decodeVarint here, because the Go compiler says it's too big to inline.
|
||||
num := int(ls.data[i]) + int(ls.data[i+1])<<8
|
||||
i += 2
|
||||
if num >= 0x8000 {
|
||||
_, i = decodeVarintRest(num, ls.data, i)
|
||||
}
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
@ -340,7 +353,12 @@ func (ls Labels) Has(name string) bool {
|
|||
} else if lName[0] > name[0] { // Stop looking if we've gone past.
|
||||
break
|
||||
}
|
||||
_, i = decodeVarint(ls.data, i)
|
||||
// Copy decodeVarint here, because the Go compiler says it's too big to inline.
|
||||
num := int(ls.data[i]) + int(ls.data[i+1])<<8
|
||||
i += 2
|
||||
if num >= 0x8000 {
|
||||
_, i = decodeVarintRest(num, ls.data, i)
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
@ -426,10 +444,6 @@ func EmptyLabels() Labels {
|
|||
return Labels{}
|
||||
}
|
||||
|
||||
func yoloString(b []byte) string {
|
||||
return *((*string)(unsafe.Pointer(&b)))
|
||||
}
|
||||
|
||||
// New returns a sorted Labels from the given labels.
|
||||
// The caller has to guarantee that all label names are unique.
|
||||
// Note this function is not efficient; should not be used in performance-critical places.
|
||||
|
@ -646,29 +660,24 @@ func marshalNumbersToSizedBuffer(nums []int, data []byte) int {
|
|||
|
||||
func sizeVarint(x uint64) (n int) {
|
||||
// Most common case first
|
||||
if x < 1<<7 {
|
||||
return 1
|
||||
if x < 1<<15 {
|
||||
return 2
|
||||
}
|
||||
if x >= 1<<56 {
|
||||
return 9
|
||||
if x < 1<<22 {
|
||||
return 3
|
||||
}
|
||||
if x >= 1<<28 {
|
||||
x >>= 28
|
||||
n = 4
|
||||
if x >= 1<<29 {
|
||||
panic("Number too large to represent")
|
||||
}
|
||||
if x >= 1<<14 {
|
||||
x >>= 14
|
||||
n += 2
|
||||
}
|
||||
if x >= 1<<7 {
|
||||
n++
|
||||
}
|
||||
return n + 1
|
||||
return 4
|
||||
}
|
||||
|
||||
func encodeVarintSlow(data []byte, offset int, v uint64) int {
|
||||
offset -= sizeVarint(v)
|
||||
base := offset
|
||||
data[offset] = uint8(v)
|
||||
v >>= 8
|
||||
offset++
|
||||
for v >= 1<<7 {
|
||||
data[offset] = uint8(v&0x7f | 0x80)
|
||||
v >>= 7
|
||||
|
@ -678,11 +687,12 @@ func encodeVarintSlow(data []byte, offset int, v uint64) int {
|
|||
return base
|
||||
}
|
||||
|
||||
// Special code for the common case that a value is less than 128
|
||||
// Special code for the common case that a value is less than 32768
|
||||
func encodeVarint(data []byte, offset, v int) int {
|
||||
if v < 1<<7 {
|
||||
offset--
|
||||
if v < 1<<15 {
|
||||
offset -= 2
|
||||
data[offset] = uint8(v)
|
||||
data[offset+1] = uint8(v >> 8)
|
||||
return offset
|
||||
}
|
||||
return encodeVarintSlow(data, offset, uint64(v))
|
||||
|
|
50
model/labels/labels_dedupelabels_test.go
Normal file
50
model/labels/labels_dedupelabels_test.go
Normal file
|
@ -0,0 +1,50 @@
|
|||
// Copyright 2024 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//go:build dedupelabels
|
||||
|
||||
package labels
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestVarint(t *testing.T) {
|
||||
cases := []struct {
|
||||
v int
|
||||
expected []byte
|
||||
}{
|
||||
{0, []byte{0, 0}},
|
||||
{1, []byte{1, 0}},
|
||||
{2, []byte{2, 0}},
|
||||
{0x7FFF, []byte{0xFF, 0x7F}},
|
||||
{0x8000, []byte{0x00, 0x80, 0x01}},
|
||||
{0x8001, []byte{0x01, 0x80, 0x01}},
|
||||
{0x3FFFFF, []byte{0xFF, 0xFF, 0x7F}},
|
||||
{0x400000, []byte{0x00, 0x80, 0x80, 0x01}},
|
||||
{0x400001, []byte{0x01, 0x80, 0x80, 0x01}},
|
||||
{0x1FFFFFFF, []byte{0xFF, 0xFF, 0xFF, 0x7F}},
|
||||
}
|
||||
var buf [16]byte
|
||||
for _, c := range cases {
|
||||
n := encodeVarint(buf[:], len(buf), c.v)
|
||||
require.Equal(t, len(c.expected), len(buf)-n)
|
||||
require.Equal(t, c.expected, buf[n:])
|
||||
got, m := decodeVarint(string(buf[:]), n)
|
||||
require.Equal(t, c.v, got)
|
||||
require.Equal(t, len(buf), m)
|
||||
}
|
||||
require.Panics(t, func() { encodeVarint(buf[:], len(buf), 1<<29) })
|
||||
}
|
|
@ -299,11 +299,6 @@ func Equal(ls, o Labels) bool {
|
|||
func EmptyLabels() Labels {
|
||||
return Labels{}
|
||||
}
|
||||
|
||||
func yoloString(b []byte) string {
|
||||
return *((*string)(unsafe.Pointer(&b)))
|
||||
}
|
||||
|
||||
func yoloBytes(s string) (b []byte) {
|
||||
*(*string)(unsafe.Pointer(&b)) = s
|
||||
(*reflect.SliceHeader)(unsafe.Pointer(&b)).Cap = len(s)
|
||||
|
|
|
@ -466,6 +466,38 @@ func TestLabels_DropMetricName(t *testing.T) {
|
|||
require.True(t, Equal(original, check))
|
||||
}
|
||||
|
||||
func ScratchBuilderForBenchmark() ScratchBuilder {
|
||||
// (Only relevant to -tags dedupelabels: stuff the symbol table before adding the real labels, to avoid having everything fitting into 1 byte.)
|
||||
b := NewScratchBuilder(256)
|
||||
for i := 0; i < 256; i++ {
|
||||
b.Add(fmt.Sprintf("name%d", i), fmt.Sprintf("value%d", i))
|
||||
}
|
||||
b.Labels()
|
||||
b.Reset()
|
||||
return b
|
||||
}
|
||||
|
||||
func NewForBenchmark(ls ...Label) Labels {
|
||||
b := ScratchBuilderForBenchmark()
|
||||
for _, l := range ls {
|
||||
b.Add(l.Name, l.Value)
|
||||
}
|
||||
b.Sort()
|
||||
return b.Labels()
|
||||
}
|
||||
|
||||
func FromStringsForBenchmark(ss ...string) Labels {
|
||||
if len(ss)%2 != 0 {
|
||||
panic("invalid number of strings")
|
||||
}
|
||||
b := ScratchBuilderForBenchmark()
|
||||
for i := 0; i < len(ss); i += 2 {
|
||||
b.Add(ss[i], ss[i+1])
|
||||
}
|
||||
b.Sort()
|
||||
return b.Labels()
|
||||
}
|
||||
|
||||
// BenchmarkLabels_Get was written to check whether a binary search can improve the performance vs the linear search implementation
|
||||
// The results have shown that binary search would only be better when searching last labels in scenarios with more than 10 labels.
|
||||
// In the following list, `old` is the linear search while `new` is the binary search implementation (without calling sort.Search, which performs even worse here)
|
||||
|
@ -488,7 +520,7 @@ func BenchmarkLabels_Get(b *testing.B) {
|
|||
}
|
||||
for _, size := range []int{5, 10, maxLabels} {
|
||||
b.Run(fmt.Sprintf("with %d labels", size), func(b *testing.B) {
|
||||
labels := New(allLabels[:size]...)
|
||||
labels := NewForBenchmark(allLabels[:size]...)
|
||||
for _, scenario := range []struct {
|
||||
desc, label string
|
||||
}{
|
||||
|
@ -520,33 +552,33 @@ var comparisonBenchmarkScenarios = []struct {
|
|||
}{
|
||||
{
|
||||
"equal",
|
||||
FromStrings("a_label_name", "a_label_value", "another_label_name", "another_label_value"),
|
||||
FromStrings("a_label_name", "a_label_value", "another_label_name", "another_label_value"),
|
||||
FromStringsForBenchmark("a_label_name", "a_label_value", "another_label_name", "another_label_value"),
|
||||
FromStringsForBenchmark("a_label_name", "a_label_value", "another_label_name", "another_label_value"),
|
||||
},
|
||||
{
|
||||
"not equal",
|
||||
FromStrings("a_label_name", "a_label_value", "another_label_name", "another_label_value"),
|
||||
FromStrings("a_label_name", "a_label_value", "another_label_name", "a_different_label_value"),
|
||||
FromStringsForBenchmark("a_label_name", "a_label_value", "another_label_name", "another_label_value"),
|
||||
FromStringsForBenchmark("a_label_name", "a_label_value", "another_label_name", "a_different_label_value"),
|
||||
},
|
||||
{
|
||||
"different sizes",
|
||||
FromStrings("a_label_name", "a_label_value", "another_label_name", "another_label_value"),
|
||||
FromStrings("a_label_name", "a_label_value"),
|
||||
FromStringsForBenchmark("a_label_name", "a_label_value", "another_label_name", "another_label_value"),
|
||||
FromStringsForBenchmark("a_label_name", "a_label_value"),
|
||||
},
|
||||
{
|
||||
"lots",
|
||||
FromStrings("aaa", "bbb", "ccc", "ddd", "eee", "fff", "ggg", "hhh", "iii", "jjj", "kkk", "lll", "mmm", "nnn", "ooo", "ppp", "qqq", "rrz"),
|
||||
FromStrings("aaa", "bbb", "ccc", "ddd", "eee", "fff", "ggg", "hhh", "iii", "jjj", "kkk", "lll", "mmm", "nnn", "ooo", "ppp", "qqq", "rrr"),
|
||||
FromStringsForBenchmark("aaa", "bbb", "ccc", "ddd", "eee", "fff", "ggg", "hhh", "iii", "jjj", "kkk", "lll", "mmm", "nnn", "ooo", "ppp", "qqq", "rrz"),
|
||||
FromStringsForBenchmark("aaa", "bbb", "ccc", "ddd", "eee", "fff", "ggg", "hhh", "iii", "jjj", "kkk", "lll", "mmm", "nnn", "ooo", "ppp", "qqq", "rrr"),
|
||||
},
|
||||
{
|
||||
"real long equal",
|
||||
FromStrings("__name__", "kube_pod_container_status_last_terminated_exitcode", "cluster", "prod-af-north-0", " container", "prometheus", "instance", "kube-state-metrics-0:kube-state-metrics:ksm", "job", "kube-state-metrics/kube-state-metrics", " namespace", "observability-prometheus", "pod", "observability-prometheus-0", "uid", "d3ec90b2-4975-4607-b45d-b9ad64bb417e"),
|
||||
FromStrings("__name__", "kube_pod_container_status_last_terminated_exitcode", "cluster", "prod-af-north-0", " container", "prometheus", "instance", "kube-state-metrics-0:kube-state-metrics:ksm", "job", "kube-state-metrics/kube-state-metrics", " namespace", "observability-prometheus", "pod", "observability-prometheus-0", "uid", "d3ec90b2-4975-4607-b45d-b9ad64bb417e"),
|
||||
FromStringsForBenchmark("__name__", "kube_pod_container_status_last_terminated_exitcode", "cluster", "prod-af-north-0", " container", "prometheus", "instance", "kube-state-metrics-0:kube-state-metrics:ksm", "job", "kube-state-metrics/kube-state-metrics", " namespace", "observability-prometheus", "pod", "observability-prometheus-0", "uid", "d3ec90b2-4975-4607-b45d-b9ad64bb417e"),
|
||||
FromStringsForBenchmark("__name__", "kube_pod_container_status_last_terminated_exitcode", "cluster", "prod-af-north-0", " container", "prometheus", "instance", "kube-state-metrics-0:kube-state-metrics:ksm", "job", "kube-state-metrics/kube-state-metrics", " namespace", "observability-prometheus", "pod", "observability-prometheus-0", "uid", "d3ec90b2-4975-4607-b45d-b9ad64bb417e"),
|
||||
},
|
||||
{
|
||||
"real long different end",
|
||||
FromStrings("__name__", "kube_pod_container_status_last_terminated_exitcode", "cluster", "prod-af-north-0", " container", "prometheus", "instance", "kube-state-metrics-0:kube-state-metrics:ksm", "job", "kube-state-metrics/kube-state-metrics", " namespace", "observability-prometheus", "pod", "observability-prometheus-0", "uid", "d3ec90b2-4975-4607-b45d-b9ad64bb417e"),
|
||||
FromStrings("__name__", "kube_pod_container_status_last_terminated_exitcode", "cluster", "prod-af-north-0", " container", "prometheus", "instance", "kube-state-metrics-0:kube-state-metrics:ksm", "job", "kube-state-metrics/kube-state-metrics", " namespace", "observability-prometheus", "pod", "observability-prometheus-0", "uid", "deadbeef-0000-1111-2222-b9ad64bb417e"),
|
||||
FromStringsForBenchmark("__name__", "kube_pod_container_status_last_terminated_exitcode", "cluster", "prod-af-north-0", " container", "prometheus", "instance", "kube-state-metrics-0:kube-state-metrics:ksm", "job", "kube-state-metrics/kube-state-metrics", " namespace", "observability-prometheus", "pod", "observability-prometheus-0", "uid", "d3ec90b2-4975-4607-b45d-b9ad64bb417e"),
|
||||
FromStringsForBenchmark("__name__", "kube_pod_container_status_last_terminated_exitcode", "cluster", "prod-af-north-0", " container", "prometheus", "instance", "kube-state-metrics-0:kube-state-metrics:ksm", "job", "kube-state-metrics/kube-state-metrics", " namespace", "observability-prometheus", "pod", "observability-prometheus-0", "uid", "deadbeef-0000-1111-2222-b9ad64bb417e"),
|
||||
},
|
||||
}
|
||||
|
||||
|
@ -834,7 +866,7 @@ func BenchmarkBuilder(b *testing.B) {
|
|||
}
|
||||
|
||||
func BenchmarkLabels_Copy(b *testing.B) {
|
||||
l := New(benchmarkLabels...)
|
||||
l := NewForBenchmark(benchmarkLabels...)
|
||||
|
||||
for i := 0; i < b.N; i++ {
|
||||
l = l.Copy()
|
||||
|
|
|
@ -101,7 +101,7 @@ func (m *Matcher) shouldQuoteName() bool {
|
|||
}
|
||||
return true
|
||||
}
|
||||
return false
|
||||
return len(m.Name) == 0
|
||||
}
|
||||
|
||||
// Matches returns whether the matcher matches the given string value.
|
||||
|
|
|
@ -16,10 +16,12 @@ package labels
|
|||
import (
|
||||
"slices"
|
||||
"strings"
|
||||
"unicode"
|
||||
"unicode/utf8"
|
||||
|
||||
"github.com/grafana/regexp"
|
||||
"github.com/grafana/regexp/syntax"
|
||||
"golang.org/x/text/unicode/norm"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -42,7 +44,7 @@ type FastRegexMatcher struct {
|
|||
stringMatcher StringMatcher
|
||||
prefix string
|
||||
suffix string
|
||||
contains string
|
||||
contains []string
|
||||
|
||||
// matchString is the "compiled" function to run by MatchString().
|
||||
matchString func(string) bool
|
||||
|
@ -87,7 +89,7 @@ func NewFastRegexMatcher(v string) (*FastRegexMatcher, error) {
|
|||
// compileMatchStringFunction returns the function to run by MatchString().
|
||||
func (m *FastRegexMatcher) compileMatchStringFunction() func(string) bool {
|
||||
// If the only optimization available is the string matcher, then we can just run it.
|
||||
if len(m.setMatches) == 0 && m.prefix == "" && m.suffix == "" && m.contains == "" && m.stringMatcher != nil {
|
||||
if len(m.setMatches) == 0 && m.prefix == "" && m.suffix == "" && len(m.contains) == 0 && m.stringMatcher != nil {
|
||||
return m.stringMatcher.Matches
|
||||
}
|
||||
|
||||
|
@ -106,7 +108,7 @@ func (m *FastRegexMatcher) compileMatchStringFunction() func(string) bool {
|
|||
if m.suffix != "" && !strings.HasSuffix(s, m.suffix) {
|
||||
return false
|
||||
}
|
||||
if m.contains != "" && !strings.Contains(s, m.contains) {
|
||||
if len(m.contains) > 0 && !containsInOrder(s, m.contains) {
|
||||
return false
|
||||
}
|
||||
if m.stringMatcher != nil {
|
||||
|
@ -119,7 +121,7 @@ func (m *FastRegexMatcher) compileMatchStringFunction() func(string) bool {
|
|||
// IsOptimized returns true if any fast-path optimization is applied to the
|
||||
// regex matcher.
|
||||
func (m *FastRegexMatcher) IsOptimized() bool {
|
||||
return len(m.setMatches) > 0 || m.stringMatcher != nil || m.prefix != "" || m.suffix != "" || m.contains != ""
|
||||
return len(m.setMatches) > 0 || m.stringMatcher != nil || m.prefix != "" || m.suffix != "" || len(m.contains) > 0
|
||||
}
|
||||
|
||||
// findSetMatches extract equality matches from a regexp.
|
||||
|
@ -361,8 +363,9 @@ func optimizeAlternatingLiterals(s string) (StringMatcher, []string) {
|
|||
|
||||
// optimizeConcatRegex returns literal prefix/suffix text that can be safely
|
||||
// checked against the label value before running the regexp matcher.
|
||||
func optimizeConcatRegex(r *syntax.Regexp) (prefix, suffix, contains string) {
|
||||
func optimizeConcatRegex(r *syntax.Regexp) (prefix, suffix string, contains []string) {
|
||||
sub := r.Sub
|
||||
clearCapture(sub...)
|
||||
|
||||
// We can safely remove begin and end text matchers respectively
|
||||
// at the beginning and end of the regexp.
|
||||
|
@ -387,13 +390,11 @@ func optimizeConcatRegex(r *syntax.Regexp) (prefix, suffix, contains string) {
|
|||
suffix = string(sub[last].Rune)
|
||||
}
|
||||
|
||||
// If contains any literal which is not a prefix/suffix, we keep the
|
||||
// 1st one. We do not keep the whole list of literals to simplify the
|
||||
// fast path.
|
||||
// If contains any literal which is not a prefix/suffix, we keep track of
|
||||
// all the ones which are case-sensitive.
|
||||
for i := 1; i < len(sub)-1; i++ {
|
||||
if sub[i].Op == syntax.OpLiteral && (sub[i].Flags&syntax.FoldCase) == 0 {
|
||||
contains = string(sub[i].Rune)
|
||||
break
|
||||
contains = append(contains, string(sub[i].Rune))
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -767,7 +768,7 @@ type equalMultiStringMapMatcher struct {
|
|||
|
||||
func (m *equalMultiStringMapMatcher) add(s string) {
|
||||
if !m.caseSensitive {
|
||||
s = strings.ToLower(s)
|
||||
s = toNormalisedLower(s)
|
||||
}
|
||||
|
||||
m.values[s] = struct{}{}
|
||||
|
@ -787,13 +788,35 @@ func (m *equalMultiStringMapMatcher) setMatches() []string {
|
|||
|
||||
func (m *equalMultiStringMapMatcher) Matches(s string) bool {
|
||||
if !m.caseSensitive {
|
||||
s = strings.ToLower(s)
|
||||
s = toNormalisedLower(s)
|
||||
}
|
||||
|
||||
_, ok := m.values[s]
|
||||
return ok
|
||||
}
|
||||
|
||||
// toNormalisedLower normalise the input string using "Unicode Normalization Form D" and then convert
|
||||
// it to lower case.
|
||||
func toNormalisedLower(s string) string {
|
||||
var buf []byte
|
||||
for i := 0; i < len(s); i++ {
|
||||
c := s[i]
|
||||
if c >= utf8.RuneSelf {
|
||||
return strings.Map(unicode.ToLower, norm.NFKD.String(s))
|
||||
}
|
||||
if 'A' <= c && c <= 'Z' {
|
||||
if buf == nil {
|
||||
buf = []byte(s)
|
||||
}
|
||||
buf[i] = c + 'a' - 'A'
|
||||
}
|
||||
}
|
||||
if buf == nil {
|
||||
return s
|
||||
}
|
||||
return yoloString(buf)
|
||||
}
|
||||
|
||||
// anyStringWithoutNewlineMatcher is a stringMatcher which matches any string
|
||||
// (including an empty one) as far as it doesn't contain any newline character.
|
||||
type anyStringWithoutNewlineMatcher struct{}
|
||||
|
@ -940,3 +963,27 @@ func hasPrefixCaseInsensitive(s, prefix string) bool {
|
|||
func hasSuffixCaseInsensitive(s, suffix string) bool {
|
||||
return len(s) >= len(suffix) && strings.EqualFold(s[len(s)-len(suffix):], suffix)
|
||||
}
|
||||
|
||||
func containsInOrder(s string, contains []string) bool {
|
||||
// Optimization for the case we only have to look for 1 substring.
|
||||
if len(contains) == 1 {
|
||||
return strings.Contains(s, contains[0])
|
||||
}
|
||||
|
||||
return containsInOrderMulti(s, contains)
|
||||
}
|
||||
|
||||
func containsInOrderMulti(s string, contains []string) bool {
|
||||
offset := 0
|
||||
|
||||
for _, substr := range contains {
|
||||
at := strings.Index(s[offset:], substr)
|
||||
if at == -1 {
|
||||
return false
|
||||
}
|
||||
|
||||
offset += at + len(substr)
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
|
File diff suppressed because one or more lines are too long
|
@ -206,6 +206,11 @@ func (re Regexp) MarshalYAML() (interface{}, error) {
|
|||
return nil, nil
|
||||
}
|
||||
|
||||
// IsZero implements the yaml.IsZeroer interface.
|
||||
func (re Regexp) IsZero() bool {
|
||||
return re.Regexp == DefaultRelabelConfig.Regex.Regexp
|
||||
}
|
||||
|
||||
// String returns the original string used to compile the regular expression.
|
||||
func (re Regexp) String() string {
|
||||
str := re.Regexp.String()
|
||||
|
|
|
@ -851,3 +851,52 @@ func BenchmarkRelabel(b *testing.B) {
|
|||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestConfig_UnmarshalThenMarshal(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
inputYaml string
|
||||
}{
|
||||
{
|
||||
name: "Values provided",
|
||||
inputYaml: `source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_port]
|
||||
separator: ;
|
||||
regex: \\d+
|
||||
target_label: __meta_kubernetes_pod_container_port_number
|
||||
replacement: $1
|
||||
action: replace
|
||||
`,
|
||||
},
|
||||
{
|
||||
name: "No regex provided",
|
||||
inputYaml: `source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_port]
|
||||
separator: ;
|
||||
target_label: __meta_kubernetes_pod_container_port_number
|
||||
replacement: $1
|
||||
action: keepequal
|
||||
`,
|
||||
},
|
||||
{
|
||||
name: "Default regex provided",
|
||||
inputYaml: `source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_port]
|
||||
separator: ;
|
||||
regex: (.*)
|
||||
target_label: __meta_kubernetes_pod_container_port_number
|
||||
replacement: $1
|
||||
action: replace
|
||||
`,
|
||||
},
|
||||
}
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
unmarshalled := Config{}
|
||||
err := yaml.Unmarshal([]byte(test.inputYaml), &unmarshalled)
|
||||
require.NoError(t, err)
|
||||
|
||||
marshalled, err := yaml.Marshal(&unmarshalled)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, test.inputYaml, string(marshalled))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
|
|
@ -136,10 +136,11 @@ func (g *RuleGroups) Validate(node ruleGroups) (errs []error) {
|
|||
|
||||
// RuleGroup is a list of sequentially evaluated recording and alerting rules.
|
||||
type RuleGroup struct {
|
||||
Name string `yaml:"name"`
|
||||
Interval model.Duration `yaml:"interval,omitempty"`
|
||||
Limit int `yaml:"limit,omitempty"`
|
||||
Rules []RuleNode `yaml:"rules"`
|
||||
Name string `yaml:"name"`
|
||||
Interval model.Duration `yaml:"interval,omitempty"`
|
||||
QueryOffset *model.Duration `yaml:"query_offset,omitempty"`
|
||||
Limit int `yaml:"limit,omitempty"`
|
||||
Rules []RuleNode `yaml:"rules"`
|
||||
}
|
||||
|
||||
// Rule describes an alerting or recording rule.
|
||||
|
|
|
@ -110,10 +110,11 @@ type Manager struct {
|
|||
|
||||
metrics *alertMetrics
|
||||
|
||||
more chan struct{}
|
||||
mtx sync.RWMutex
|
||||
ctx context.Context
|
||||
cancel func()
|
||||
more chan struct{}
|
||||
mtx sync.RWMutex
|
||||
|
||||
stopOnce *sync.Once
|
||||
stopRequested chan struct{}
|
||||
|
||||
alertmanagers map[string]*alertmanagerSet
|
||||
logger log.Logger
|
||||
|
@ -121,9 +122,10 @@ type Manager struct {
|
|||
|
||||
// Options are the configurable parameters of a Handler.
|
||||
type Options struct {
|
||||
QueueCapacity int
|
||||
ExternalLabels labels.Labels
|
||||
RelabelConfigs []*relabel.Config
|
||||
QueueCapacity int
|
||||
DrainOnShutdown bool
|
||||
ExternalLabels labels.Labels
|
||||
RelabelConfigs []*relabel.Config
|
||||
// Used for sending HTTP requests to the Alertmanager.
|
||||
Do func(ctx context.Context, client *http.Client, req *http.Request) (*http.Response, error)
|
||||
|
||||
|
@ -217,8 +219,6 @@ func do(ctx context.Context, client *http.Client, req *http.Request) (*http.Resp
|
|||
|
||||
// NewManager is the manager constructor.
|
||||
func NewManager(o *Options, logger log.Logger) *Manager {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
|
||||
if o.Do == nil {
|
||||
o.Do = do
|
||||
}
|
||||
|
@ -227,12 +227,12 @@ func NewManager(o *Options, logger log.Logger) *Manager {
|
|||
}
|
||||
|
||||
n := &Manager{
|
||||
queue: make([]*Alert, 0, o.QueueCapacity),
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
more: make(chan struct{}, 1),
|
||||
opts: o,
|
||||
logger: logger,
|
||||
queue: make([]*Alert, 0, o.QueueCapacity),
|
||||
more: make(chan struct{}, 1),
|
||||
stopRequested: make(chan struct{}),
|
||||
stopOnce: &sync.Once{},
|
||||
opts: o,
|
||||
logger: logger,
|
||||
}
|
||||
|
||||
queueLenFunc := func() float64 { return float64(n.queueLen()) }
|
||||
|
@ -298,38 +298,100 @@ func (n *Manager) nextBatch() []*Alert {
|
|||
return alerts
|
||||
}
|
||||
|
||||
// Run dispatches notifications continuously.
|
||||
// Run dispatches notifications continuously, returning once Stop has been called and all
|
||||
// pending notifications have been drained from the queue (if draining is enabled).
|
||||
//
|
||||
// Dispatching of notifications occurs in parallel to processing target updates to avoid one starving the other.
|
||||
// Refer to https://github.com/prometheus/prometheus/issues/13676 for more details.
|
||||
func (n *Manager) Run(tsets <-chan map[string][]*targetgroup.Group) {
|
||||
wg := sync.WaitGroup{}
|
||||
wg.Add(2)
|
||||
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
n.targetUpdateLoop(tsets)
|
||||
}()
|
||||
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
n.sendLoop()
|
||||
n.drainQueue()
|
||||
}()
|
||||
|
||||
wg.Wait()
|
||||
level.Info(n.logger).Log("msg", "Notification manager stopped")
|
||||
}
|
||||
|
||||
// sendLoop continuously consumes the notifications queue and sends alerts to
|
||||
// the configured Alertmanagers.
|
||||
func (n *Manager) sendLoop() {
|
||||
for {
|
||||
// The select is split in two parts, such as we will first try to read
|
||||
// new alertmanager targets if they are available, before sending new
|
||||
// alerts.
|
||||
// If we've been asked to stop, that takes priority over sending any further notifications.
|
||||
select {
|
||||
case <-n.ctx.Done():
|
||||
case <-n.stopRequested:
|
||||
return
|
||||
case ts := <-tsets:
|
||||
n.reload(ts)
|
||||
default:
|
||||
select {
|
||||
case <-n.ctx.Done():
|
||||
case <-n.stopRequested:
|
||||
return
|
||||
|
||||
case <-n.more:
|
||||
n.sendOneBatch()
|
||||
|
||||
// If the queue still has items left, kick off the next iteration.
|
||||
if n.queueLen() > 0 {
|
||||
n.setMore()
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// targetUpdateLoop receives updates of target groups and triggers a reload.
|
||||
func (n *Manager) targetUpdateLoop(tsets <-chan map[string][]*targetgroup.Group) {
|
||||
for {
|
||||
// If we've been asked to stop, that takes priority over processing any further target group updates.
|
||||
select {
|
||||
case <-n.stopRequested:
|
||||
return
|
||||
default:
|
||||
select {
|
||||
case <-n.stopRequested:
|
||||
return
|
||||
case ts := <-tsets:
|
||||
n.reload(ts)
|
||||
case <-n.more:
|
||||
}
|
||||
}
|
||||
alerts := n.nextBatch()
|
||||
|
||||
if !n.sendAll(alerts...) {
|
||||
n.metrics.dropped.Add(float64(len(alerts)))
|
||||
}
|
||||
// If the queue still has items left, kick off the next iteration.
|
||||
if n.queueLen() > 0 {
|
||||
n.setMore()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (n *Manager) sendOneBatch() {
|
||||
alerts := n.nextBatch()
|
||||
|
||||
if !n.sendAll(alerts...) {
|
||||
n.metrics.dropped.Add(float64(len(alerts)))
|
||||
}
|
||||
}
|
||||
|
||||
func (n *Manager) drainQueue() {
|
||||
if !n.opts.DrainOnShutdown {
|
||||
if n.queueLen() > 0 {
|
||||
level.Warn(n.logger).Log("msg", "Draining remaining notifications on shutdown is disabled, and some notifications have been dropped", "count", n.queueLen())
|
||||
n.metrics.dropped.Add(float64(n.queueLen()))
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
level.Info(n.logger).Log("msg", "Draining any remaining notifications...")
|
||||
|
||||
for n.queueLen() > 0 {
|
||||
n.sendOneBatch()
|
||||
}
|
||||
|
||||
level.Info(n.logger).Log("msg", "Remaining notifications drained")
|
||||
}
|
||||
|
||||
func (n *Manager) reload(tgs map[string][]*targetgroup.Group) {
|
||||
n.mtx.Lock()
|
||||
defer n.mtx.Unlock()
|
||||
|
@ -471,10 +533,6 @@ func (n *Manager) sendAll(alerts ...*Alert) bool {
|
|||
numSuccess atomic.Uint64
|
||||
)
|
||||
for _, ams := range amSets {
|
||||
if len(ams.ams) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
var (
|
||||
payload []byte
|
||||
err error
|
||||
|
@ -483,6 +541,11 @@ func (n *Manager) sendAll(alerts ...*Alert) bool {
|
|||
|
||||
ams.mtx.RLock()
|
||||
|
||||
if len(ams.ams) == 0 {
|
||||
ams.mtx.RUnlock()
|
||||
continue
|
||||
}
|
||||
|
||||
if len(ams.cfg.AlertRelabelConfigs) > 0 {
|
||||
amAlerts = relabelAlerts(ams.cfg.AlertRelabelConfigs, labels.Labels{}, alerts)
|
||||
if len(amAlerts) == 0 {
|
||||
|
@ -541,7 +604,7 @@ func (n *Manager) sendAll(alerts ...*Alert) bool {
|
|||
for _, am := range ams.ams {
|
||||
wg.Add(1)
|
||||
|
||||
ctx, cancel := context.WithTimeout(n.ctx, time.Duration(ams.cfg.Timeout))
|
||||
ctx, cancel := context.WithTimeout(context.Background(), time.Duration(ams.cfg.Timeout))
|
||||
defer cancel()
|
||||
|
||||
go func(ctx context.Context, client *http.Client, url string, payload []byte, count int) {
|
||||
|
@ -611,6 +674,7 @@ func (n *Manager) sendOne(ctx context.Context, c *http.Client, url string, b []b
|
|||
}()
|
||||
|
||||
// Any HTTP status 2xx is OK.
|
||||
//nolint:usestdlibvars
|
||||
if resp.StatusCode/100 != 2 {
|
||||
return fmt.Errorf("bad response status %s", resp.Status)
|
||||
}
|
||||
|
@ -618,10 +682,19 @@ func (n *Manager) sendOne(ctx context.Context, c *http.Client, url string, b []b
|
|||
return nil
|
||||
}
|
||||
|
||||
// Stop shuts down the notification handler.
|
||||
// Stop signals the notification manager to shut down and immediately returns.
|
||||
//
|
||||
// Run will return once the notification manager has successfully shut down.
|
||||
//
|
||||
// The manager will optionally drain any queued notifications before shutting down.
|
||||
//
|
||||
// Stop is safe to call multiple times.
|
||||
func (n *Manager) Stop() {
|
||||
level.Info(n.logger).Log("msg", "Stopping notification manager...")
|
||||
n.cancel()
|
||||
|
||||
n.stopOnce.Do(func() {
|
||||
close(n.stopRequested)
|
||||
})
|
||||
}
|
||||
|
||||
// Alertmanager holds Alertmanager endpoint information.
|
||||
|
|
|
@ -26,13 +26,17 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/go-kit/log"
|
||||
"github.com/prometheus/alertmanager/api/v2/models"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
config_util "github.com/prometheus/common/config"
|
||||
"github.com/prometheus/common/model"
|
||||
"github.com/stretchr/testify/require"
|
||||
"go.uber.org/atomic"
|
||||
"gopkg.in/yaml.v2"
|
||||
|
||||
"github.com/prometheus/prometheus/discovery"
|
||||
|
||||
"github.com/prometheus/prometheus/config"
|
||||
"github.com/prometheus/prometheus/discovery/targetgroup"
|
||||
"github.com/prometheus/prometheus/model/labels"
|
||||
|
@ -697,117 +701,319 @@ func TestLabelsToOpenAPILabelSet(t *testing.T) {
|
|||
require.Equal(t, models.LabelSet{"aaa": "111", "bbb": "222"}, labelsToOpenAPILabelSet(labels.FromStrings("aaa", "111", "bbb", "222")))
|
||||
}
|
||||
|
||||
// TestHangingNotifier validates that targets updates happen even when there are
|
||||
// queued alerts.
|
||||
// TestHangingNotifier ensures that the notifier takes into account SD changes even when there are
|
||||
// queued alerts. This test reproduces the issue described in https://github.com/prometheus/prometheus/issues/13676.
|
||||
// and https://github.com/prometheus/prometheus/issues/8768.
|
||||
func TestHangingNotifier(t *testing.T) {
|
||||
// Note: When targets are not updated in time, this test is flaky because go
|
||||
// selects are not deterministic. Therefore we run 10 subtests to run into the issue.
|
||||
for i := 0; i < 10; i++ {
|
||||
t.Run(strconv.Itoa(i), func(t *testing.T) {
|
||||
var (
|
||||
done = make(chan struct{})
|
||||
changed = make(chan struct{})
|
||||
syncCh = make(chan map[string][]*targetgroup.Group)
|
||||
)
|
||||
const (
|
||||
batches = 100
|
||||
alertsCount = maxBatchSize * batches
|
||||
)
|
||||
|
||||
defer func() {
|
||||
close(done)
|
||||
}()
|
||||
var (
|
||||
sendTimeout = 10 * time.Millisecond
|
||||
sdUpdatert = sendTimeout / 2
|
||||
|
||||
var calledOnce bool
|
||||
// Setting up a bad server. This server hangs for 2 seconds.
|
||||
badServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
if calledOnce {
|
||||
t.Fatal("hanging server called multiple times")
|
||||
}
|
||||
calledOnce = true
|
||||
select {
|
||||
case <-done:
|
||||
case <-time.After(2 * time.Second):
|
||||
}
|
||||
}))
|
||||
badURL, err := url.Parse(badServer.URL)
|
||||
require.NoError(t, err)
|
||||
badAddress := badURL.Host // Used for __name__ label in targets.
|
||||
done = make(chan struct{})
|
||||
)
|
||||
|
||||
// Setting up a bad server. This server returns fast, signaling requests on
|
||||
// by closing the changed channel.
|
||||
goodServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
close(changed)
|
||||
}))
|
||||
goodURL, err := url.Parse(goodServer.URL)
|
||||
require.NoError(t, err)
|
||||
goodAddress := goodURL.Host // Used for __name__ label in targets.
|
||||
defer func() {
|
||||
close(done)
|
||||
}()
|
||||
|
||||
h := NewManager(
|
||||
&Options{
|
||||
QueueCapacity: 20 * maxBatchSize,
|
||||
},
|
||||
nil,
|
||||
)
|
||||
// Set up a faulty Alertmanager.
|
||||
var faultyCalled atomic.Bool
|
||||
faultyServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
faultyCalled.Store(true)
|
||||
select {
|
||||
case <-done:
|
||||
case <-time.After(time.Hour):
|
||||
}
|
||||
}))
|
||||
faultyURL, err := url.Parse(faultyServer.URL)
|
||||
require.NoError(t, err)
|
||||
|
||||
h.alertmanagers = make(map[string]*alertmanagerSet)
|
||||
// Set up a functional Alertmanager.
|
||||
var functionalCalled atomic.Bool
|
||||
functionalServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
functionalCalled.Store(true)
|
||||
}))
|
||||
functionalURL, err := url.Parse(functionalServer.URL)
|
||||
require.NoError(t, err)
|
||||
|
||||
am1Cfg := config.DefaultAlertmanagerConfig
|
||||
am1Cfg.Timeout = model.Duration(200 * time.Millisecond)
|
||||
// Initialize the discovery manager
|
||||
// This is relevant as the updates aren't sent continually in real life, but only each updatert.
|
||||
// The old implementation of TestHangingNotifier didn't take that into acount.
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
reg := prometheus.NewRegistry()
|
||||
sdMetrics, err := discovery.RegisterSDMetrics(reg, discovery.NewRefreshMetrics(reg))
|
||||
require.NoError(t, err)
|
||||
sdManager := discovery.NewManager(
|
||||
ctx,
|
||||
log.NewNopLogger(),
|
||||
reg,
|
||||
sdMetrics,
|
||||
discovery.Name("sd-manager"),
|
||||
discovery.Updatert(sdUpdatert),
|
||||
)
|
||||
go sdManager.Run()
|
||||
|
||||
h.alertmanagers["config-0"] = &alertmanagerSet{
|
||||
ams: []alertmanager{},
|
||||
cfg: &am1Cfg,
|
||||
metrics: h.metrics,
|
||||
}
|
||||
go h.Run(syncCh)
|
||||
defer h.Stop()
|
||||
// Set up the notifier with both faulty and functional Alertmanagers.
|
||||
notifier := NewManager(
|
||||
&Options{
|
||||
QueueCapacity: alertsCount,
|
||||
},
|
||||
nil,
|
||||
)
|
||||
notifier.alertmanagers = make(map[string]*alertmanagerSet)
|
||||
amCfg := config.DefaultAlertmanagerConfig
|
||||
amCfg.Timeout = model.Duration(sendTimeout)
|
||||
notifier.alertmanagers["config-0"] = &alertmanagerSet{
|
||||
ams: []alertmanager{
|
||||
alertmanagerMock{
|
||||
urlf: func() string { return faultyURL.String() },
|
||||
},
|
||||
alertmanagerMock{
|
||||
urlf: func() string { return functionalURL.String() },
|
||||
},
|
||||
},
|
||||
cfg: &amCfg,
|
||||
metrics: notifier.metrics,
|
||||
}
|
||||
go notifier.Run(sdManager.SyncCh())
|
||||
defer notifier.Stop()
|
||||
|
||||
var alerts []*Alert
|
||||
for i := range make([]struct{}, 20*maxBatchSize) {
|
||||
alerts = append(alerts, &Alert{
|
||||
Labels: labels.FromStrings("alertname", strconv.Itoa(i)),
|
||||
})
|
||||
}
|
||||
require.Len(t, notifier.Alertmanagers(), 2)
|
||||
|
||||
// Injecting the hanging server URL.
|
||||
syncCh <- map[string][]*targetgroup.Group{
|
||||
"config-0": {
|
||||
{
|
||||
Targets: []model.LabelSet{
|
||||
{
|
||||
model.AddressLabel: model.LabelValue(badAddress),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
// Queing alerts.
|
||||
h.Send(alerts...)
|
||||
|
||||
// Updating with a working alertmanager target.
|
||||
go func() {
|
||||
select {
|
||||
case syncCh <- map[string][]*targetgroup.Group{
|
||||
"config-0": {
|
||||
{
|
||||
Targets: []model.LabelSet{
|
||||
{
|
||||
model.AddressLabel: model.LabelValue(goodAddress),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}:
|
||||
case <-done:
|
||||
}
|
||||
}()
|
||||
|
||||
select {
|
||||
case <-time.After(1 * time.Second):
|
||||
t.Fatalf("Timeout after 1 second, targets not synced in time.")
|
||||
case <-changed:
|
||||
// The good server has been hit in less than 3 seconds, therefore
|
||||
// targets have been updated before a second call could be made to the
|
||||
// bad server.
|
||||
}
|
||||
// Enqueue the alerts.
|
||||
var alerts []*Alert
|
||||
for i := range make([]struct{}, alertsCount) {
|
||||
alerts = append(alerts, &Alert{
|
||||
Labels: labels.FromStrings("alertname", strconv.Itoa(i)),
|
||||
})
|
||||
}
|
||||
notifier.Send(alerts...)
|
||||
|
||||
// Wait for the Alertmanagers to start receiving alerts.
|
||||
// 10*sdUpdatert is used as an arbitrary timeout here.
|
||||
timeout := time.After(10 * sdUpdatert)
|
||||
loop1:
|
||||
for {
|
||||
select {
|
||||
case <-timeout:
|
||||
t.Fatalf("Timeout waiting for the alertmanagers to be reached for the first time.")
|
||||
default:
|
||||
if faultyCalled.Load() && functionalCalled.Load() {
|
||||
break loop1
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Request to remove the faulty Alertmanager.
|
||||
c := map[string]discovery.Configs{
|
||||
"config-0": {
|
||||
discovery.StaticConfig{
|
||||
&targetgroup.Group{
|
||||
Targets: []model.LabelSet{
|
||||
{
|
||||
model.AddressLabel: model.LabelValue(functionalURL.Host),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
require.NoError(t, sdManager.ApplyConfig(c))
|
||||
|
||||
// The notifier should not wait until the alerts queue is empty to apply the discovery changes
|
||||
// A faulty Alertmanager could cause each alert sending cycle to take up to AlertmanagerConfig.Timeout
|
||||
// The queue may never be emptied, as the arrival rate could be larger than the departure rate
|
||||
// It could even overflow and alerts could be dropped.
|
||||
timeout = time.After(batches * sendTimeout)
|
||||
loop2:
|
||||
for {
|
||||
select {
|
||||
case <-timeout:
|
||||
t.Fatalf("Timeout, the faulty alertmanager not removed on time.")
|
||||
default:
|
||||
// The faulty alertmanager was dropped.
|
||||
if len(notifier.Alertmanagers()) == 1 {
|
||||
// Prevent from TOCTOU.
|
||||
require.Positive(t, notifier.queueLen())
|
||||
break loop2
|
||||
}
|
||||
require.Positive(t, notifier.queueLen(), "The faulty alertmanager wasn't dropped before the alerts queue was emptied.")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestStop_DrainingDisabled(t *testing.T) {
|
||||
releaseReceiver := make(chan struct{})
|
||||
receiverReceivedRequest := make(chan struct{}, 2)
|
||||
alertsReceived := atomic.NewInt64(0)
|
||||
|
||||
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
// Let the test know we've received a request.
|
||||
receiverReceivedRequest <- struct{}{}
|
||||
|
||||
var alerts []*Alert
|
||||
|
||||
b, err := io.ReadAll(r.Body)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = json.Unmarshal(b, &alerts)
|
||||
require.NoError(t, err)
|
||||
|
||||
alertsReceived.Add(int64(len(alerts)))
|
||||
|
||||
// Wait for the test to release us.
|
||||
<-releaseReceiver
|
||||
|
||||
w.WriteHeader(http.StatusOK)
|
||||
}))
|
||||
defer func() {
|
||||
server.Close()
|
||||
}()
|
||||
|
||||
m := NewManager(
|
||||
&Options{
|
||||
QueueCapacity: 10,
|
||||
DrainOnShutdown: false,
|
||||
},
|
||||
nil,
|
||||
)
|
||||
|
||||
m.alertmanagers = make(map[string]*alertmanagerSet)
|
||||
|
||||
am1Cfg := config.DefaultAlertmanagerConfig
|
||||
am1Cfg.Timeout = model.Duration(time.Second)
|
||||
|
||||
m.alertmanagers["1"] = &alertmanagerSet{
|
||||
ams: []alertmanager{
|
||||
alertmanagerMock{
|
||||
urlf: func() string { return server.URL },
|
||||
},
|
||||
},
|
||||
cfg: &am1Cfg,
|
||||
}
|
||||
|
||||
notificationManagerStopped := make(chan struct{})
|
||||
|
||||
go func() {
|
||||
defer close(notificationManagerStopped)
|
||||
m.Run(nil)
|
||||
}()
|
||||
|
||||
// Queue two alerts. The first should be immediately sent to the receiver, which should block until we release it later.
|
||||
m.Send(&Alert{Labels: labels.FromStrings(labels.AlertName, "alert-1")})
|
||||
|
||||
select {
|
||||
case <-receiverReceivedRequest:
|
||||
// Nothing more to do.
|
||||
case <-time.After(time.Second):
|
||||
require.FailNow(t, "gave up waiting for receiver to receive notification of first alert")
|
||||
}
|
||||
|
||||
m.Send(&Alert{Labels: labels.FromStrings(labels.AlertName, "alert-2")})
|
||||
|
||||
// Stop the notification manager, pause to allow the shutdown to be observed, and then allow the receiver to proceed.
|
||||
m.Stop()
|
||||
time.Sleep(time.Second)
|
||||
close(releaseReceiver)
|
||||
|
||||
// Wait for the notification manager to stop and confirm only the first notification was sent.
|
||||
// The second notification should be dropped.
|
||||
select {
|
||||
case <-notificationManagerStopped:
|
||||
// Nothing more to do.
|
||||
case <-time.After(time.Second):
|
||||
require.FailNow(t, "gave up waiting for notification manager to stop")
|
||||
}
|
||||
|
||||
require.Equal(t, int64(1), alertsReceived.Load())
|
||||
}
|
||||
|
||||
func TestStop_DrainingEnabled(t *testing.T) {
|
||||
releaseReceiver := make(chan struct{})
|
||||
receiverReceivedRequest := make(chan struct{}, 2)
|
||||
alertsReceived := atomic.NewInt64(0)
|
||||
|
||||
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
// Let the test know we've received a request.
|
||||
receiverReceivedRequest <- struct{}{}
|
||||
|
||||
var alerts []*Alert
|
||||
|
||||
b, err := io.ReadAll(r.Body)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = json.Unmarshal(b, &alerts)
|
||||
require.NoError(t, err)
|
||||
|
||||
alertsReceived.Add(int64(len(alerts)))
|
||||
|
||||
// Wait for the test to release us.
|
||||
<-releaseReceiver
|
||||
|
||||
w.WriteHeader(http.StatusOK)
|
||||
}))
|
||||
defer func() {
|
||||
server.Close()
|
||||
}()
|
||||
|
||||
m := NewManager(
|
||||
&Options{
|
||||
QueueCapacity: 10,
|
||||
DrainOnShutdown: true,
|
||||
},
|
||||
nil,
|
||||
)
|
||||
|
||||
m.alertmanagers = make(map[string]*alertmanagerSet)
|
||||
|
||||
am1Cfg := config.DefaultAlertmanagerConfig
|
||||
am1Cfg.Timeout = model.Duration(time.Second)
|
||||
|
||||
m.alertmanagers["1"] = &alertmanagerSet{
|
||||
ams: []alertmanager{
|
||||
alertmanagerMock{
|
||||
urlf: func() string { return server.URL },
|
||||
},
|
||||
},
|
||||
cfg: &am1Cfg,
|
||||
}
|
||||
|
||||
notificationManagerStopped := make(chan struct{})
|
||||
|
||||
go func() {
|
||||
defer close(notificationManagerStopped)
|
||||
m.Run(nil)
|
||||
}()
|
||||
|
||||
// Queue two alerts. The first should be immediately sent to the receiver, which should block until we release it later.
|
||||
m.Send(&Alert{Labels: labels.FromStrings(labels.AlertName, "alert-1")})
|
||||
|
||||
select {
|
||||
case <-receiverReceivedRequest:
|
||||
// Nothing more to do.
|
||||
case <-time.After(time.Second):
|
||||
require.FailNow(t, "gave up waiting for receiver to receive notification of first alert")
|
||||
}
|
||||
|
||||
m.Send(&Alert{Labels: labels.FromStrings(labels.AlertName, "alert-2")})
|
||||
|
||||
// Stop the notification manager and allow the receiver to proceed.
|
||||
m.Stop()
|
||||
close(releaseReceiver)
|
||||
|
||||
// Wait for the notification manager to stop and confirm both notifications were sent.
|
||||
select {
|
||||
case <-notificationManagerStopped:
|
||||
// Nothing more to do.
|
||||
case <-time.After(200 * time.Millisecond):
|
||||
require.FailNow(t, "gave up waiting for notification manager to stop")
|
||||
}
|
||||
|
||||
require.Equal(t, int64(2), alertsReceived.Load())
|
||||
}
|
||||
|
|
|
@ -323,6 +323,14 @@ func BenchmarkNativeHistograms(b *testing.B) {
|
|||
name: "sum rate with long rate interval",
|
||||
query: "sum(rate(native_histogram_series[20m]))",
|
||||
},
|
||||
{
|
||||
name: "histogram_count with short rate interval",
|
||||
query: "histogram_count(sum(rate(native_histogram_series[2m])))",
|
||||
},
|
||||
{
|
||||
name: "histogram_count with long rate interval",
|
||||
query: "histogram_count(sum(rate(native_histogram_series[20m])))",
|
||||
},
|
||||
}
|
||||
|
||||
opts := promql.EngineOpts{
|
||||
|
|
188
promql/engine.go
188
promql/engine.go
|
@ -752,6 +752,7 @@ func (ng *Engine) execEvalStmt(ctx context.Context, query *query, s *parser.Eval
|
|||
case parser.ValueTypeScalar:
|
||||
return Scalar{V: mat[0].Floats[0].F, T: start}, warnings, nil
|
||||
case parser.ValueTypeMatrix:
|
||||
ng.sortMatrixResult(ctx, query, mat)
|
||||
return mat, warnings, nil
|
||||
default:
|
||||
panic(fmt.Errorf("promql.Engine.exec: unexpected expression type %q", s.Expr.Type()))
|
||||
|
@ -790,11 +791,15 @@ func (ng *Engine) execEvalStmt(ctx context.Context, query *query, s *parser.Eval
|
|||
}
|
||||
|
||||
// TODO(fabxc): where to ensure metric labels are a copy from the storage internals.
|
||||
ng.sortMatrixResult(ctx, query, mat)
|
||||
|
||||
return mat, warnings, nil
|
||||
}
|
||||
|
||||
func (ng *Engine) sortMatrixResult(ctx context.Context, query *query, mat Matrix) {
|
||||
sortSpanTimer, _ := query.stats.GetSpanTimer(ctx, stats.ResultSortTime, ng.metrics.queryResultSort)
|
||||
sort.Sort(mat)
|
||||
sortSpanTimer.Finish()
|
||||
|
||||
return mat, warnings, nil
|
||||
}
|
||||
|
||||
// subqueryTimes returns the sum of offsets and ranges of all subqueries in the path.
|
||||
|
@ -980,6 +985,11 @@ func checkAndExpandSeriesSet(ctx context.Context, expr parser.Expr) (annotations
|
|||
return nil, nil
|
||||
}
|
||||
series, ws, err := expandSeriesSet(ctx, e.UnexpandedSeriesSet)
|
||||
if e.SkipHistogramBuckets {
|
||||
for i := range series {
|
||||
series[i] = newHistogramStatsSeries(series[i])
|
||||
}
|
||||
}
|
||||
e.Series = series
|
||||
return ws, err
|
||||
}
|
||||
|
@ -1783,18 +1793,21 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, annotations.Annotatio
|
|||
}, e.LHS, e.RHS)
|
||||
default:
|
||||
return ev.rangeEval(initSignatures, func(v []parser.Value, sh [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
|
||||
return ev.VectorBinop(e.Op, v[0].(Vector), v[1].(Vector), e.VectorMatching, e.ReturnBool, sh[0], sh[1], enh), nil
|
||||
vec, err := ev.VectorBinop(e.Op, v[0].(Vector), v[1].(Vector), e.VectorMatching, e.ReturnBool, sh[0], sh[1], enh)
|
||||
return vec, handleVectorBinopError(err, e)
|
||||
}, e.LHS, e.RHS)
|
||||
}
|
||||
|
||||
case lt == parser.ValueTypeVector && rt == parser.ValueTypeScalar:
|
||||
return ev.rangeEval(nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
|
||||
return ev.VectorscalarBinop(e.Op, v[0].(Vector), Scalar{V: v[1].(Vector)[0].F}, false, e.ReturnBool, enh), nil
|
||||
vec, err := ev.VectorscalarBinop(e.Op, v[0].(Vector), Scalar{V: v[1].(Vector)[0].F}, false, e.ReturnBool, enh)
|
||||
return vec, handleVectorBinopError(err, e)
|
||||
}, e.LHS, e.RHS)
|
||||
|
||||
case lt == parser.ValueTypeScalar && rt == parser.ValueTypeVector:
|
||||
return ev.rangeEval(nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
|
||||
return ev.VectorscalarBinop(e.Op, v[1].(Vector), Scalar{V: v[0].(Vector)[0].F}, true, e.ReturnBool, enh), nil
|
||||
vec, err := ev.VectorscalarBinop(e.Op, v[1].(Vector), Scalar{V: v[0].(Vector)[0].F}, true, e.ReturnBool, enh)
|
||||
return vec, handleVectorBinopError(err, e)
|
||||
}, e.LHS, e.RHS)
|
||||
}
|
||||
|
||||
|
@ -2427,12 +2440,12 @@ func (ev *evaluator) VectorUnless(lhs, rhs Vector, matching *parser.VectorMatchi
|
|||
}
|
||||
|
||||
// VectorBinop evaluates a binary operation between two Vectors, excluding set operators.
|
||||
func (ev *evaluator) VectorBinop(op parser.ItemType, lhs, rhs Vector, matching *parser.VectorMatching, returnBool bool, lhsh, rhsh []EvalSeriesHelper, enh *EvalNodeHelper) Vector {
|
||||
func (ev *evaluator) VectorBinop(op parser.ItemType, lhs, rhs Vector, matching *parser.VectorMatching, returnBool bool, lhsh, rhsh []EvalSeriesHelper, enh *EvalNodeHelper) (Vector, error) {
|
||||
if matching.Card == parser.CardManyToMany {
|
||||
panic("many-to-many only allowed for set operators")
|
||||
}
|
||||
if len(lhs) == 0 || len(rhs) == 0 {
|
||||
return nil // Short-circuit: nothing is going to match.
|
||||
return nil, nil // Short-circuit: nothing is going to match.
|
||||
}
|
||||
|
||||
// The control flow below handles one-to-one or many-to-one matching.
|
||||
|
@ -2485,6 +2498,7 @@ func (ev *evaluator) VectorBinop(op parser.ItemType, lhs, rhs Vector, matching *
|
|||
|
||||
// For all lhs samples find a respective rhs sample and perform
|
||||
// the binary operation.
|
||||
var lastErr error
|
||||
for i, ls := range lhs {
|
||||
sig := lhsh[i].signature
|
||||
|
||||
|
@ -2500,7 +2514,10 @@ func (ev *evaluator) VectorBinop(op parser.ItemType, lhs, rhs Vector, matching *
|
|||
fl, fr = fr, fl
|
||||
hl, hr = hr, hl
|
||||
}
|
||||
floatValue, histogramValue, keep := vectorElemBinop(op, fl, fr, hl, hr)
|
||||
floatValue, histogramValue, keep, err := vectorElemBinop(op, fl, fr, hl, hr)
|
||||
if err != nil {
|
||||
lastErr = err
|
||||
}
|
||||
switch {
|
||||
case returnBool:
|
||||
if keep {
|
||||
|
@ -2542,7 +2559,7 @@ func (ev *evaluator) VectorBinop(op parser.ItemType, lhs, rhs Vector, matching *
|
|||
H: histogramValue,
|
||||
})
|
||||
}
|
||||
return enh.Out
|
||||
return enh.Out, lastErr
|
||||
}
|
||||
|
||||
func signatureFunc(on bool, b []byte, names ...string) func(labels.Labels) string {
|
||||
|
@ -2605,7 +2622,8 @@ func resultMetric(lhs, rhs labels.Labels, op parser.ItemType, matching *parser.V
|
|||
}
|
||||
|
||||
// VectorscalarBinop evaluates a binary operation between a Vector and a Scalar.
|
||||
func (ev *evaluator) VectorscalarBinop(op parser.ItemType, lhs Vector, rhs Scalar, swap, returnBool bool, enh *EvalNodeHelper) Vector {
|
||||
func (ev *evaluator) VectorscalarBinop(op parser.ItemType, lhs Vector, rhs Scalar, swap, returnBool bool, enh *EvalNodeHelper) (Vector, error) {
|
||||
var lastErr error
|
||||
for _, lhsSample := range lhs {
|
||||
lf, rf := lhsSample.F, rhs.V
|
||||
var rh *histogram.FloatHistogram
|
||||
|
@ -2616,7 +2634,10 @@ func (ev *evaluator) VectorscalarBinop(op parser.ItemType, lhs Vector, rhs Scala
|
|||
lf, rf = rf, lf
|
||||
lh, rh = rh, lh
|
||||
}
|
||||
float, histogram, keep := vectorElemBinop(op, lf, rf, lh, rh)
|
||||
float, histogram, keep, err := vectorElemBinop(op, lf, rf, lh, rh)
|
||||
if err != nil {
|
||||
lastErr = err
|
||||
}
|
||||
// Catch cases where the scalar is the LHS in a scalar-vector comparison operation.
|
||||
// We want to always keep the vector element value as the output value, even if it's on the RHS.
|
||||
if op.IsComparisonOperator() && swap {
|
||||
|
@ -2640,7 +2661,7 @@ func (ev *evaluator) VectorscalarBinop(op parser.ItemType, lhs Vector, rhs Scala
|
|||
enh.Out = append(enh.Out, lhsSample)
|
||||
}
|
||||
}
|
||||
return enh.Out
|
||||
return enh.Out, lastErr
|
||||
}
|
||||
|
||||
// scalarBinop evaluates a binary operation between two Scalars.
|
||||
|
@ -2677,49 +2698,57 @@ func scalarBinop(op parser.ItemType, lhs, rhs float64) float64 {
|
|||
}
|
||||
|
||||
// vectorElemBinop evaluates a binary operation between two Vector elements.
|
||||
func vectorElemBinop(op parser.ItemType, lhs, rhs float64, hlhs, hrhs *histogram.FloatHistogram) (float64, *histogram.FloatHistogram, bool) {
|
||||
func vectorElemBinop(op parser.ItemType, lhs, rhs float64, hlhs, hrhs *histogram.FloatHistogram) (float64, *histogram.FloatHistogram, bool, error) {
|
||||
switch op {
|
||||
case parser.ADD:
|
||||
if hlhs != nil && hrhs != nil {
|
||||
return 0, hlhs.Copy().Add(hrhs).Compact(0), true
|
||||
res, err := hlhs.Copy().Add(hrhs)
|
||||
if err != nil {
|
||||
return 0, nil, false, err
|
||||
}
|
||||
return 0, res.Compact(0), true, nil
|
||||
}
|
||||
return lhs + rhs, nil, true
|
||||
return lhs + rhs, nil, true, nil
|
||||
case parser.SUB:
|
||||
if hlhs != nil && hrhs != nil {
|
||||
return 0, hlhs.Copy().Sub(hrhs).Compact(0), true
|
||||
res, err := hlhs.Copy().Sub(hrhs)
|
||||
if err != nil {
|
||||
return 0, nil, false, err
|
||||
}
|
||||
return 0, res.Compact(0), true, nil
|
||||
}
|
||||
return lhs - rhs, nil, true
|
||||
return lhs - rhs, nil, true, nil
|
||||
case parser.MUL:
|
||||
if hlhs != nil && hrhs == nil {
|
||||
return 0, hlhs.Copy().Mul(rhs), true
|
||||
return 0, hlhs.Copy().Mul(rhs), true, nil
|
||||
}
|
||||
if hlhs == nil && hrhs != nil {
|
||||
return 0, hrhs.Copy().Mul(lhs), true
|
||||
return 0, hrhs.Copy().Mul(lhs), true, nil
|
||||
}
|
||||
return lhs * rhs, nil, true
|
||||
return lhs * rhs, nil, true, nil
|
||||
case parser.DIV:
|
||||
if hlhs != nil && hrhs == nil {
|
||||
return 0, hlhs.Copy().Div(rhs), true
|
||||
return 0, hlhs.Copy().Div(rhs), true, nil
|
||||
}
|
||||
return lhs / rhs, nil, true
|
||||
return lhs / rhs, nil, true, nil
|
||||
case parser.POW:
|
||||
return math.Pow(lhs, rhs), nil, true
|
||||
return math.Pow(lhs, rhs), nil, true, nil
|
||||
case parser.MOD:
|
||||
return math.Mod(lhs, rhs), nil, true
|
||||
return math.Mod(lhs, rhs), nil, true, nil
|
||||
case parser.EQLC:
|
||||
return lhs, nil, lhs == rhs
|
||||
return lhs, nil, lhs == rhs, nil
|
||||
case parser.NEQ:
|
||||
return lhs, nil, lhs != rhs
|
||||
return lhs, nil, lhs != rhs, nil
|
||||
case parser.GTR:
|
||||
return lhs, nil, lhs > rhs
|
||||
return lhs, nil, lhs > rhs, nil
|
||||
case parser.LSS:
|
||||
return lhs, nil, lhs < rhs
|
||||
return lhs, nil, lhs < rhs, nil
|
||||
case parser.GTE:
|
||||
return lhs, nil, lhs >= rhs
|
||||
return lhs, nil, lhs >= rhs, nil
|
||||
case parser.LTE:
|
||||
return lhs, nil, lhs <= rhs
|
||||
return lhs, nil, lhs <= rhs, nil
|
||||
case parser.ATAN2:
|
||||
return math.Atan2(lhs, rhs), nil, true
|
||||
return math.Atan2(lhs, rhs), nil, true, nil
|
||||
}
|
||||
panic(fmt.Errorf("operator %q not allowed for operations between Vectors", op))
|
||||
}
|
||||
|
@ -2730,7 +2759,7 @@ type groupedAggregation struct {
|
|||
hasHistogram bool // Has at least 1 histogram sample aggregated.
|
||||
floatValue float64
|
||||
histogramValue *histogram.FloatHistogram
|
||||
floatMean float64
|
||||
floatMean float64 // Mean, or "compensating value" for Kahan summation.
|
||||
groupCount int
|
||||
heap vectorByValueHeap
|
||||
}
|
||||
|
@ -2758,11 +2787,13 @@ func (ev *evaluator) aggregation(e *parser.AggregateExpr, q float64, inputMatrix
|
|||
*group = groupedAggregation{
|
||||
seen: true,
|
||||
floatValue: f,
|
||||
floatMean: f,
|
||||
groupCount: 1,
|
||||
}
|
||||
switch op {
|
||||
case parser.SUM, parser.AVG:
|
||||
case parser.AVG:
|
||||
group.floatMean = f
|
||||
fallthrough
|
||||
case parser.SUM:
|
||||
if h == nil {
|
||||
group.hasFloat = true
|
||||
} else {
|
||||
|
@ -2770,6 +2801,7 @@ func (ev *evaluator) aggregation(e *parser.AggregateExpr, q float64, inputMatrix
|
|||
group.hasHistogram = true
|
||||
}
|
||||
case parser.STDVAR, parser.STDDEV:
|
||||
group.floatMean = f
|
||||
group.floatValue = 0
|
||||
case parser.QUANTILE:
|
||||
group.heap = make(vectorByValueHeap, 1)
|
||||
|
@ -2785,14 +2817,17 @@ func (ev *evaluator) aggregation(e *parser.AggregateExpr, q float64, inputMatrix
|
|||
if h != nil {
|
||||
group.hasHistogram = true
|
||||
if group.histogramValue != nil {
|
||||
group.histogramValue.Add(h)
|
||||
_, err := group.histogramValue.Add(h)
|
||||
if err != nil {
|
||||
handleAggregationError(err, e, inputMatrix[si].Metric.Get(model.MetricNameLabel), &annos)
|
||||
}
|
||||
}
|
||||
// Otherwise the aggregation contained floats
|
||||
// previously and will be invalid anyway. No
|
||||
// point in copying the histogram in that case.
|
||||
} else {
|
||||
group.hasFloat = true
|
||||
group.floatValue += f
|
||||
group.floatValue, group.floatMean = kahanSumInc(f, group.floatValue, group.floatMean)
|
||||
}
|
||||
|
||||
case parser.AVG:
|
||||
|
@ -2802,8 +2837,14 @@ func (ev *evaluator) aggregation(e *parser.AggregateExpr, q float64, inputMatrix
|
|||
if group.histogramValue != nil {
|
||||
left := h.Copy().Div(float64(group.groupCount))
|
||||
right := group.histogramValue.Copy().Div(float64(group.groupCount))
|
||||
toAdd := left.Sub(right)
|
||||
group.histogramValue.Add(toAdd)
|
||||
toAdd, err := left.Sub(right)
|
||||
if err != nil {
|
||||
handleAggregationError(err, e, inputMatrix[si].Metric.Get(model.MetricNameLabel), &annos)
|
||||
}
|
||||
_, err = group.histogramValue.Add(toAdd)
|
||||
if err != nil {
|
||||
handleAggregationError(err, e, inputMatrix[si].Metric.Get(model.MetricNameLabel), &annos)
|
||||
}
|
||||
}
|
||||
// Otherwise the aggregation contained floats
|
||||
// previously and will be invalid anyway. No
|
||||
|
@ -2903,6 +2944,8 @@ func (ev *evaluator) aggregation(e *parser.AggregateExpr, q float64, inputMatrix
|
|||
}
|
||||
if aggr.hasHistogram {
|
||||
aggr.histogramValue.Compact(0)
|
||||
} else {
|
||||
aggr.floatValue += aggr.floatMean // Add Kahan summation compensating term.
|
||||
}
|
||||
default:
|
||||
// For other aggregations, we already have the right value.
|
||||
|
@ -3100,6 +3143,31 @@ func (ev *evaluator) nextValues(ts int64, series *Series) (f float64, h *histogr
|
|||
return f, h, true
|
||||
}
|
||||
|
||||
// handleAggregationError adds the appropriate annotation based on the aggregation error.
|
||||
func handleAggregationError(err error, e *parser.AggregateExpr, metricName string, annos *annotations.Annotations) {
|
||||
pos := e.Expr.PositionRange()
|
||||
if errors.Is(err, histogram.ErrHistogramsIncompatibleSchema) {
|
||||
annos.Add(annotations.NewMixedExponentialCustomHistogramsWarning(metricName, pos))
|
||||
} else if errors.Is(err, histogram.ErrHistogramsIncompatibleBounds) {
|
||||
annos.Add(annotations.NewIncompatibleCustomBucketsHistogramsWarning(metricName, pos))
|
||||
}
|
||||
}
|
||||
|
||||
// handleVectorBinopError returns the appropriate annotation based on the vector binary operation error.
|
||||
func handleVectorBinopError(err error, e *parser.BinaryExpr) annotations.Annotations {
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
metricName := ""
|
||||
pos := e.PositionRange()
|
||||
if errors.Is(err, histogram.ErrHistogramsIncompatibleSchema) {
|
||||
return annotations.New().Add(annotations.NewMixedExponentialCustomHistogramsWarning(metricName, pos))
|
||||
} else if errors.Is(err, histogram.ErrHistogramsIncompatibleBounds) {
|
||||
return annotations.New().Add(annotations.NewIncompatibleCustomBucketsHistogramsWarning(metricName, pos))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// groupingKey builds and returns the grouping key for the given metric and
|
||||
// grouping labels.
|
||||
func generateGroupingKey(metric labels.Labels, grouping []string, without bool, buf []byte) (uint64, []byte) {
|
||||
|
@ -3179,6 +3247,8 @@ func unwrapStepInvariantExpr(e parser.Expr) parser.Expr {
|
|||
// PreprocessExpr wraps all possible step invariant parts of the given expression with
|
||||
// StepInvariantExpr. It also resolves the preprocessors.
|
||||
func PreprocessExpr(expr parser.Expr, start, end time.Time) parser.Expr {
|
||||
detectHistogramStatsDecoding(expr)
|
||||
|
||||
isStepInvariant := preprocessExprHelper(expr, start, end)
|
||||
if isStepInvariant {
|
||||
return newStepInvariantExpr(expr)
|
||||
|
@ -3313,8 +3383,50 @@ func setOffsetForAtModifier(evalTime int64, expr parser.Expr) {
|
|||
})
|
||||
}
|
||||
|
||||
// detectHistogramStatsDecoding modifies the expression by setting the
|
||||
// SkipHistogramBuckets field in those vector selectors for which it is safe to
|
||||
// return only histogram statistics (sum and count), excluding histogram spans
|
||||
// and buckets. The function can be treated as an optimization and is not
|
||||
// required for correctness.
|
||||
func detectHistogramStatsDecoding(expr parser.Expr) {
|
||||
parser.Inspect(expr, func(node parser.Node, path []parser.Node) error {
|
||||
n, ok := (node).(*parser.VectorSelector)
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
|
||||
for _, p := range path {
|
||||
call, ok := p.(*parser.Call)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
if call.Func.Name == "histogram_count" || call.Func.Name == "histogram_sum" {
|
||||
n.SkipHistogramBuckets = true
|
||||
break
|
||||
}
|
||||
if call.Func.Name == "histogram_quantile" || call.Func.Name == "histogram_fraction" {
|
||||
n.SkipHistogramBuckets = false
|
||||
break
|
||||
}
|
||||
}
|
||||
return fmt.Errorf("stop")
|
||||
})
|
||||
}
|
||||
|
||||
func makeInt64Pointer(val int64) *int64 {
|
||||
valp := new(int64)
|
||||
*valp = val
|
||||
return valp
|
||||
}
|
||||
|
||||
type histogramStatsSeries struct {
|
||||
storage.Series
|
||||
}
|
||||
|
||||
func newHistogramStatsSeries(series storage.Series) *histogramStatsSeries {
|
||||
return &histogramStatsSeries{Series: series}
|
||||
}
|
||||
|
||||
func (s histogramStatsSeries) Iterator(it chunkenc.Iterator) chunkenc.Iterator {
|
||||
return NewHistogramStatsIterator(s.Series.Iterator(it))
|
||||
}
|
||||
|
|
|
@ -834,10 +834,10 @@ load 10s
|
|||
{
|
||||
Query: "metricWith1HistogramEvery10Seconds",
|
||||
Start: time.Unix(21, 0),
|
||||
PeakSamples: 12,
|
||||
TotalSamples: 12, // 1 histogram sample of size 12 / 10 seconds
|
||||
PeakSamples: 13,
|
||||
TotalSamples: 13, // 1 histogram HPoint of size 13 / 10 seconds
|
||||
TotalSamplesPerStep: stats.TotalSamplesPerStep{
|
||||
21000: 12,
|
||||
21000: 13,
|
||||
},
|
||||
},
|
||||
{
|
||||
|
@ -934,10 +934,10 @@ load 10s
|
|||
{
|
||||
Query: "metricWith1HistogramEvery10Seconds[60s]",
|
||||
Start: time.Unix(201, 0),
|
||||
PeakSamples: 72,
|
||||
TotalSamples: 72, // 1 histogram (size 12) / 10 seconds * 60 seconds
|
||||
PeakSamples: 78,
|
||||
TotalSamples: 78, // 1 histogram (size 13 HPoint) / 10 seconds * 60 seconds
|
||||
TotalSamplesPerStep: stats.TotalSamplesPerStep{
|
||||
201000: 72,
|
||||
201000: 78,
|
||||
},
|
||||
},
|
||||
{
|
||||
|
@ -964,11 +964,11 @@ load 10s
|
|||
{
|
||||
Query: "max_over_time(metricWith1HistogramEvery10Seconds[60s])[20s:5s]",
|
||||
Start: time.Unix(201, 0),
|
||||
PeakSamples: 72,
|
||||
TotalSamples: 312, // (1 histogram (size 12) / 10 seconds * 60 seconds) * 4 + 2 * 12 as
|
||||
PeakSamples: 78,
|
||||
TotalSamples: 338, // (1 histogram (size 13 HPoint) / 10 seconds * 60 seconds) * 4 + 2 * 13 as
|
||||
// max_over_time(metricWith1SampleEvery10Seconds[60s]) @ 190 and 200 will return 7 samples.
|
||||
TotalSamplesPerStep: stats.TotalSamplesPerStep{
|
||||
201000: 312,
|
||||
201000: 338,
|
||||
},
|
||||
},
|
||||
{
|
||||
|
@ -983,10 +983,10 @@ load 10s
|
|||
{
|
||||
Query: "metricWith1HistogramEvery10Seconds[60s] @ 30",
|
||||
Start: time.Unix(201, 0),
|
||||
PeakSamples: 48,
|
||||
TotalSamples: 48, // @ modifier force the evaluation to at 30 seconds - So it brings 4 datapoints (0, 10, 20, 30 seconds) * 1 series
|
||||
PeakSamples: 52,
|
||||
TotalSamples: 52, // @ modifier force the evaluation to at 30 seconds - So it brings 4 datapoints (0, 10, 20, 30 seconds) * 1 series
|
||||
TotalSamplesPerStep: stats.TotalSamplesPerStep{
|
||||
201000: 48,
|
||||
201000: 52,
|
||||
},
|
||||
},
|
||||
{
|
||||
|
@ -1121,13 +1121,13 @@ load 10s
|
|||
Start: time.Unix(204, 0),
|
||||
End: time.Unix(223, 0),
|
||||
Interval: 5 * time.Second,
|
||||
PeakSamples: 48,
|
||||
TotalSamples: 48, // 1 histogram (size 12) per query * 4 steps
|
||||
PeakSamples: 52,
|
||||
TotalSamples: 52, // 1 histogram (size 13 HPoint) per query * 4 steps
|
||||
TotalSamplesPerStep: stats.TotalSamplesPerStep{
|
||||
204000: 12, // aligned to the step time, not the sample time
|
||||
209000: 12,
|
||||
214000: 12,
|
||||
219000: 12,
|
||||
204000: 13, // aligned to the step time, not the sample time
|
||||
209000: 13,
|
||||
214000: 13,
|
||||
219000: 13,
|
||||
},
|
||||
},
|
||||
{
|
||||
|
@ -2015,47 +2015,6 @@ func TestSubquerySelector(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestTimestampFunction_StepsMoreOftenThanSamples(t *testing.T) {
|
||||
engine := newTestEngine()
|
||||
storage := promqltest.LoadedStorage(t, `
|
||||
load 1m
|
||||
metric 0+1x1000
|
||||
`)
|
||||
t.Cleanup(func() { storage.Close() })
|
||||
|
||||
query := "timestamp(metric)"
|
||||
start := time.Unix(0, 0)
|
||||
end := time.Unix(61, 0)
|
||||
interval := time.Second
|
||||
|
||||
// We expect the value to be 0 for t=0s to t=59s (inclusive), then 60 for t=60s and t=61s.
|
||||
expectedPoints := []promql.FPoint{}
|
||||
|
||||
for t := 0; t <= 59; t++ {
|
||||
expectedPoints = append(expectedPoints, promql.FPoint{F: 0, T: int64(t * 1000)})
|
||||
}
|
||||
|
||||
expectedPoints = append(
|
||||
expectedPoints,
|
||||
promql.FPoint{F: 60, T: 60_000},
|
||||
promql.FPoint{F: 60, T: 61_000},
|
||||
)
|
||||
|
||||
expectedResult := promql.Matrix{
|
||||
promql.Series{
|
||||
Floats: expectedPoints,
|
||||
Metric: labels.EmptyLabels(),
|
||||
},
|
||||
}
|
||||
|
||||
qry, err := engine.NewRangeQuery(context.Background(), storage, nil, query, start, end, interval)
|
||||
require.NoError(t, err)
|
||||
|
||||
res := qry.Exec(context.Background())
|
||||
require.NoError(t, res.Err)
|
||||
testutil.RequireEqual(t, expectedResult, res.Value)
|
||||
}
|
||||
|
||||
type FakeQueryLogger struct {
|
||||
closed bool
|
||||
logs []interface{}
|
||||
|
@ -3061,163 +3020,78 @@ func TestEngineOptsValidation(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestRangeQuery(t *testing.T) {
|
||||
cases := []struct {
|
||||
Name string
|
||||
Load string
|
||||
Query string
|
||||
Result parser.Value
|
||||
Start time.Time
|
||||
End time.Time
|
||||
Interval time.Duration
|
||||
func TestInstantQueryWithRangeVectorSelector(t *testing.T) {
|
||||
engine := newTestEngine()
|
||||
|
||||
baseT := timestamp.Time(0)
|
||||
storage := promqltest.LoadedStorage(t, `
|
||||
load 1m
|
||||
some_metric{env="1"} 0+1x4
|
||||
some_metric{env="2"} 0+2x4
|
||||
some_metric_with_stale_marker 0 1 stale 3
|
||||
`)
|
||||
t.Cleanup(func() { require.NoError(t, storage.Close()) })
|
||||
|
||||
testCases := map[string]struct {
|
||||
expr string
|
||||
expected promql.Matrix
|
||||
ts time.Time
|
||||
}{
|
||||
{
|
||||
Name: "sum_over_time with all values",
|
||||
Load: `load 30s
|
||||
bar 0 1 10 100 1000`,
|
||||
Query: "sum_over_time(bar[30s])",
|
||||
Result: promql.Matrix{
|
||||
promql.Series{
|
||||
Floats: []promql.FPoint{{F: 0, T: 0}, {F: 11, T: 60000}, {F: 1100, T: 120000}},
|
||||
Metric: labels.EmptyLabels(),
|
||||
"matches series with points in range": {
|
||||
expr: "some_metric[1m]",
|
||||
ts: baseT.Add(2 * time.Minute),
|
||||
expected: promql.Matrix{
|
||||
{
|
||||
Metric: labels.FromStrings("__name__", "some_metric", "env", "1"),
|
||||
Floats: []promql.FPoint{
|
||||
{T: timestamp.FromTime(baseT.Add(time.Minute)), F: 1},
|
||||
{T: timestamp.FromTime(baseT.Add(2 * time.Minute)), F: 2},
|
||||
},
|
||||
},
|
||||
{
|
||||
Metric: labels.FromStrings("__name__", "some_metric", "env", "2"),
|
||||
Floats: []promql.FPoint{
|
||||
{T: timestamp.FromTime(baseT.Add(time.Minute)), F: 2},
|
||||
{T: timestamp.FromTime(baseT.Add(2 * time.Minute)), F: 4},
|
||||
},
|
||||
},
|
||||
},
|
||||
Start: time.Unix(0, 0),
|
||||
End: time.Unix(120, 0),
|
||||
Interval: 60 * time.Second,
|
||||
},
|
||||
{
|
||||
Name: "sum_over_time with trailing values",
|
||||
Load: `load 30s
|
||||
bar 0 1 10 100 1000 0 0 0 0`,
|
||||
Query: "sum_over_time(bar[30s])",
|
||||
Result: promql.Matrix{
|
||||
promql.Series{
|
||||
Floats: []promql.FPoint{{F: 0, T: 0}, {F: 11, T: 60000}, {F: 1100, T: 120000}},
|
||||
Metric: labels.EmptyLabels(),
|
||||
},
|
||||
},
|
||||
Start: time.Unix(0, 0),
|
||||
End: time.Unix(120, 0),
|
||||
Interval: 60 * time.Second,
|
||||
"matches no series": {
|
||||
expr: "some_nonexistent_metric[1m]",
|
||||
ts: baseT,
|
||||
expected: promql.Matrix{},
|
||||
},
|
||||
{
|
||||
Name: "sum_over_time with all values long",
|
||||
Load: `load 30s
|
||||
bar 0 1 10 100 1000 10000 100000 1000000 10000000`,
|
||||
Query: "sum_over_time(bar[30s])",
|
||||
Result: promql.Matrix{
|
||||
promql.Series{
|
||||
Floats: []promql.FPoint{{F: 0, T: 0}, {F: 11, T: 60000}, {F: 1100, T: 120000}, {F: 110000, T: 180000}, {F: 11000000, T: 240000}},
|
||||
Metric: labels.EmptyLabels(),
|
||||
},
|
||||
},
|
||||
Start: time.Unix(0, 0),
|
||||
End: time.Unix(240, 0),
|
||||
Interval: 60 * time.Second,
|
||||
"no samples in range": {
|
||||
expr: "some_metric[1m]",
|
||||
ts: baseT.Add(20 * time.Minute),
|
||||
expected: promql.Matrix{},
|
||||
},
|
||||
{
|
||||
Name: "sum_over_time with all values random",
|
||||
Load: `load 30s
|
||||
bar 5 17 42 2 7 905 51`,
|
||||
Query: "sum_over_time(bar[30s])",
|
||||
Result: promql.Matrix{
|
||||
promql.Series{
|
||||
Floats: []promql.FPoint{{F: 5, T: 0}, {F: 59, T: 60000}, {F: 9, T: 120000}, {F: 956, T: 180000}},
|
||||
Metric: labels.EmptyLabels(),
|
||||
"metric with stale marker": {
|
||||
expr: "some_metric_with_stale_marker[3m]",
|
||||
ts: baseT.Add(3 * time.Minute),
|
||||
expected: promql.Matrix{
|
||||
{
|
||||
Metric: labels.FromStrings("__name__", "some_metric_with_stale_marker"),
|
||||
Floats: []promql.FPoint{
|
||||
{T: timestamp.FromTime(baseT), F: 0},
|
||||
{T: timestamp.FromTime(baseT.Add(time.Minute)), F: 1},
|
||||
{T: timestamp.FromTime(baseT.Add(3 * time.Minute)), F: 3},
|
||||
},
|
||||
},
|
||||
},
|
||||
Start: time.Unix(0, 0),
|
||||
End: time.Unix(180, 0),
|
||||
Interval: 60 * time.Second,
|
||||
},
|
||||
{
|
||||
Name: "metric query",
|
||||
Load: `load 30s
|
||||
metric 1+1x4`,
|
||||
Query: "metric",
|
||||
Result: promql.Matrix{
|
||||
promql.Series{
|
||||
Floats: []promql.FPoint{{F: 1, T: 0}, {F: 3, T: 60000}, {F: 5, T: 120000}},
|
||||
Metric: labels.FromStrings("__name__", "metric"),
|
||||
},
|
||||
},
|
||||
Start: time.Unix(0, 0),
|
||||
End: time.Unix(120, 0),
|
||||
Interval: 1 * time.Minute,
|
||||
},
|
||||
{
|
||||
Name: "metric query with trailing values",
|
||||
Load: `load 30s
|
||||
metric 1+1x8`,
|
||||
Query: "metric",
|
||||
Result: promql.Matrix{
|
||||
promql.Series{
|
||||
Floats: []promql.FPoint{{F: 1, T: 0}, {F: 3, T: 60000}, {F: 5, T: 120000}},
|
||||
Metric: labels.FromStrings("__name__", "metric"),
|
||||
},
|
||||
},
|
||||
Start: time.Unix(0, 0),
|
||||
End: time.Unix(120, 0),
|
||||
Interval: 1 * time.Minute,
|
||||
},
|
||||
{
|
||||
Name: "short-circuit",
|
||||
Load: `load 30s
|
||||
foo{job="1"} 1+1x4
|
||||
bar{job="2"} 1+1x4`,
|
||||
Query: `foo > 2 or bar`,
|
||||
Result: promql.Matrix{
|
||||
promql.Series{
|
||||
Floats: []promql.FPoint{{F: 1, T: 0}, {F: 3, T: 60000}, {F: 5, T: 120000}},
|
||||
Metric: labels.FromStrings(
|
||||
"__name__", "bar",
|
||||
"job", "2",
|
||||
),
|
||||
},
|
||||
promql.Series{
|
||||
Floats: []promql.FPoint{{F: 3, T: 60000}, {F: 5, T: 120000}},
|
||||
Metric: labels.FromStrings(
|
||||
"__name__", "foo",
|
||||
"job", "1",
|
||||
),
|
||||
},
|
||||
},
|
||||
Start: time.Unix(0, 0),
|
||||
End: time.Unix(120, 0),
|
||||
Interval: 1 * time.Minute,
|
||||
},
|
||||
{
|
||||
Name: "drop-metric-name",
|
||||
Load: `load 30s
|
||||
requests{job="1", __address__="bar"} 100`,
|
||||
Query: `requests * 2`,
|
||||
Result: promql.Matrix{
|
||||
promql.Series{
|
||||
Floats: []promql.FPoint{{F: 200, T: 0}, {F: 200, T: 60000}, {F: 200, T: 120000}},
|
||||
Metric: labels.FromStrings(
|
||||
"__address__", "bar",
|
||||
"job", "1",
|
||||
),
|
||||
},
|
||||
},
|
||||
Start: time.Unix(0, 0),
|
||||
End: time.Unix(120, 0),
|
||||
Interval: 1 * time.Minute,
|
||||
},
|
||||
}
|
||||
for _, c := range cases {
|
||||
t.Run(c.Name, func(t *testing.T) {
|
||||
engine := newTestEngine()
|
||||
storage := promqltest.LoadedStorage(t, c.Load)
|
||||
t.Cleanup(func() { storage.Close() })
|
||||
|
||||
qry, err := engine.NewRangeQuery(context.Background(), storage, nil, c.Query, c.Start, c.End, c.Interval)
|
||||
for name, testCase := range testCases {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
q, err := engine.NewInstantQuery(context.Background(), storage, nil, testCase.expr, testCase.ts)
|
||||
require.NoError(t, err)
|
||||
defer q.Close()
|
||||
|
||||
res := qry.Exec(context.Background())
|
||||
res := q.Exec(context.Background())
|
||||
require.NoError(t, res.Err)
|
||||
testutil.RequireEqual(t, c.Result, res.Value)
|
||||
testutil.RequireEqual(t, testCase.expected, res.Value)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
|
|
@ -14,6 +14,7 @@
|
|||
package promql
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"math"
|
||||
"slices"
|
||||
|
@ -210,14 +211,28 @@ func histogramRate(points []HPoint, isCounter bool, metricName string, pos posra
|
|||
}
|
||||
|
||||
h := last.CopyToSchema(minSchema)
|
||||
h.Sub(prev)
|
||||
_, err := h.Sub(prev)
|
||||
if err != nil {
|
||||
if errors.Is(err, histogram.ErrHistogramsIncompatibleSchema) {
|
||||
return nil, annotations.New().Add(annotations.NewMixedExponentialCustomHistogramsWarning(metricName, pos))
|
||||
} else if errors.Is(err, histogram.ErrHistogramsIncompatibleBounds) {
|
||||
return nil, annotations.New().Add(annotations.NewIncompatibleCustomBucketsHistogramsWarning(metricName, pos))
|
||||
}
|
||||
}
|
||||
|
||||
if isCounter {
|
||||
// Second iteration to deal with counter resets.
|
||||
for _, currPoint := range points[1:] {
|
||||
curr := currPoint.H
|
||||
if curr.DetectReset(prev) {
|
||||
h.Add(prev)
|
||||
_, err := h.Add(prev)
|
||||
if err != nil {
|
||||
if errors.Is(err, histogram.ErrHistogramsIncompatibleSchema) {
|
||||
return nil, annotations.New().Add(annotations.NewMixedExponentialCustomHistogramsWarning(metricName, pos))
|
||||
} else if errors.Is(err, histogram.ErrHistogramsIncompatibleBounds) {
|
||||
return nil, annotations.New().Add(annotations.NewIncompatibleCustomBucketsHistogramsWarning(metricName, pos))
|
||||
}
|
||||
}
|
||||
}
|
||||
prev = curr
|
||||
}
|
||||
|
@ -513,10 +528,11 @@ func aggrOverTime(vals []parser.Value, enh *EvalNodeHelper, aggrFn func(Series)
|
|||
return append(enh.Out, Sample{F: aggrFn(el)})
|
||||
}
|
||||
|
||||
func aggrHistOverTime(vals []parser.Value, enh *EvalNodeHelper, aggrFn func(Series) *histogram.FloatHistogram) Vector {
|
||||
func aggrHistOverTime(vals []parser.Value, enh *EvalNodeHelper, aggrFn func(Series) (*histogram.FloatHistogram, error)) (Vector, error) {
|
||||
el := vals[0].(Matrix)[0]
|
||||
res, err := aggrFn(el)
|
||||
|
||||
return append(enh.Out, Sample{H: aggrFn(el)})
|
||||
return append(enh.Out, Sample{H: res}), err
|
||||
}
|
||||
|
||||
// === avg_over_time(Matrix parser.ValueTypeMatrix) (Vector, Annotations) ===
|
||||
|
@ -528,18 +544,33 @@ func funcAvgOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNode
|
|||
}
|
||||
if len(firstSeries.Floats) == 0 {
|
||||
// The passed values only contain histograms.
|
||||
return aggrHistOverTime(vals, enh, func(s Series) *histogram.FloatHistogram {
|
||||
vec, err := aggrHistOverTime(vals, enh, func(s Series) (*histogram.FloatHistogram, error) {
|
||||
count := 1
|
||||
mean := s.Histograms[0].H.Copy()
|
||||
for _, h := range s.Histograms[1:] {
|
||||
count++
|
||||
left := h.H.Copy().Div(float64(count))
|
||||
right := mean.Copy().Div(float64(count))
|
||||
toAdd := left.Sub(right)
|
||||
mean.Add(toAdd)
|
||||
toAdd, err := left.Sub(right)
|
||||
if err != nil {
|
||||
return mean, err
|
||||
}
|
||||
_, err = mean.Add(toAdd)
|
||||
if err != nil {
|
||||
return mean, err
|
||||
}
|
||||
}
|
||||
return mean
|
||||
}), nil
|
||||
return mean, nil
|
||||
})
|
||||
if err != nil {
|
||||
metricName := firstSeries.Metric.Get(labels.MetricName)
|
||||
if errors.Is(err, histogram.ErrHistogramsIncompatibleSchema) {
|
||||
return enh.Out, annotations.New().Add(annotations.NewMixedExponentialCustomHistogramsWarning(metricName, args[0].PositionRange()))
|
||||
} else if errors.Is(err, histogram.ErrHistogramsIncompatibleBounds) {
|
||||
return enh.Out, annotations.New().Add(annotations.NewIncompatibleCustomBucketsHistogramsWarning(metricName, args[0].PositionRange()))
|
||||
}
|
||||
}
|
||||
return vec, nil
|
||||
}
|
||||
return aggrOverTime(vals, enh, func(s Series) float64 {
|
||||
var mean, count, c float64
|
||||
|
@ -673,13 +704,25 @@ func funcSumOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNode
|
|||
}
|
||||
if len(firstSeries.Floats) == 0 {
|
||||
// The passed values only contain histograms.
|
||||
return aggrHistOverTime(vals, enh, func(s Series) *histogram.FloatHistogram {
|
||||
vec, err := aggrHistOverTime(vals, enh, func(s Series) (*histogram.FloatHistogram, error) {
|
||||
sum := s.Histograms[0].H.Copy()
|
||||
for _, h := range s.Histograms[1:] {
|
||||
sum.Add(h.H)
|
||||
_, err := sum.Add(h.H)
|
||||
if err != nil {
|
||||
return sum, err
|
||||
}
|
||||
}
|
||||
return sum
|
||||
}), nil
|
||||
return sum, nil
|
||||
})
|
||||
if err != nil {
|
||||
metricName := firstSeries.Metric.Get(labels.MetricName)
|
||||
if errors.Is(err, histogram.ErrHistogramsIncompatibleSchema) {
|
||||
return enh.Out, annotations.New().Add(annotations.NewMixedExponentialCustomHistogramsWarning(metricName, args[0].PositionRange()))
|
||||
} else if errors.Is(err, histogram.ErrHistogramsIncompatibleBounds) {
|
||||
return enh.Out, annotations.New().Add(annotations.NewIncompatibleCustomBucketsHistogramsWarning(metricName, args[0].PositionRange()))
|
||||
}
|
||||
}
|
||||
return vec, nil
|
||||
}
|
||||
return aggrOverTime(vals, enh, func(s Series) float64 {
|
||||
var sum, c float64
|
||||
|
|
144
promql/histogram_stats_iterator.go
Normal file
144
promql/histogram_stats_iterator.go
Normal file
|
@ -0,0 +1,144 @@
|
|||
// Copyright 2024 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package promql
|
||||
|
||||
import (
|
||||
"github.com/prometheus/prometheus/model/histogram"
|
||||
"github.com/prometheus/prometheus/model/value"
|
||||
"github.com/prometheus/prometheus/tsdb/chunkenc"
|
||||
)
|
||||
|
||||
type histogramStatsIterator struct {
|
||||
chunkenc.Iterator
|
||||
|
||||
currentH *histogram.Histogram
|
||||
lastH *histogram.Histogram
|
||||
|
||||
currentFH *histogram.FloatHistogram
|
||||
lastFH *histogram.FloatHistogram
|
||||
}
|
||||
|
||||
// NewHistogramStatsIterator creates an iterator which returns histogram objects
|
||||
// which have only their sum and count values populated. The iterator handles
|
||||
// counter reset detection internally and sets the counter reset hint accordingly
|
||||
// in each returned histogram objects.
|
||||
func NewHistogramStatsIterator(it chunkenc.Iterator) chunkenc.Iterator {
|
||||
return &histogramStatsIterator{
|
||||
Iterator: it,
|
||||
currentH: &histogram.Histogram{},
|
||||
currentFH: &histogram.FloatHistogram{},
|
||||
}
|
||||
}
|
||||
|
||||
// AtHistogram returns the next timestamp/histogram pair. The counter reset
|
||||
// detection is guaranteed to be correct only when the caller does not switch
|
||||
// between AtHistogram and AtFloatHistogram calls.
|
||||
func (f *histogramStatsIterator) AtHistogram(h *histogram.Histogram) (int64, *histogram.Histogram) {
|
||||
var t int64
|
||||
t, f.currentH = f.Iterator.AtHistogram(f.currentH)
|
||||
if value.IsStaleNaN(f.currentH.Sum) {
|
||||
f.setLastH(f.currentH)
|
||||
h = &histogram.Histogram{Sum: f.currentH.Sum}
|
||||
return t, h
|
||||
}
|
||||
|
||||
if h == nil {
|
||||
h = &histogram.Histogram{
|
||||
CounterResetHint: f.getResetHint(f.currentH),
|
||||
Count: f.currentH.Count,
|
||||
Sum: f.currentH.Sum,
|
||||
}
|
||||
f.setLastH(f.currentH)
|
||||
return t, h
|
||||
}
|
||||
|
||||
h.CounterResetHint = f.getResetHint(f.currentH)
|
||||
h.Count = f.currentH.Count
|
||||
h.Sum = f.currentH.Sum
|
||||
f.setLastH(f.currentH)
|
||||
return t, h
|
||||
}
|
||||
|
||||
// AtFloatHistogram returns the next timestamp/float histogram pair. The counter
|
||||
// reset detection is guaranteed to be correct only when the caller does not
|
||||
// switch between AtHistogram and AtFloatHistogram calls.
|
||||
func (f *histogramStatsIterator) AtFloatHistogram(fh *histogram.FloatHistogram) (int64, *histogram.FloatHistogram) {
|
||||
var t int64
|
||||
t, f.currentFH = f.Iterator.AtFloatHistogram(f.currentFH)
|
||||
if value.IsStaleNaN(f.currentFH.Sum) {
|
||||
f.setLastFH(f.currentFH)
|
||||
return t, &histogram.FloatHistogram{Sum: f.currentFH.Sum}
|
||||
}
|
||||
|
||||
if fh == nil {
|
||||
fh = &histogram.FloatHistogram{
|
||||
CounterResetHint: f.getFloatResetHint(f.currentFH.CounterResetHint),
|
||||
Count: f.currentFH.Count,
|
||||
Sum: f.currentFH.Sum,
|
||||
}
|
||||
f.setLastFH(f.currentFH)
|
||||
return t, fh
|
||||
}
|
||||
|
||||
fh.CounterResetHint = f.getFloatResetHint(f.currentFH.CounterResetHint)
|
||||
fh.Count = f.currentFH.Count
|
||||
fh.Sum = f.currentFH.Sum
|
||||
f.setLastFH(f.currentFH)
|
||||
return t, fh
|
||||
}
|
||||
|
||||
func (f *histogramStatsIterator) setLastH(h *histogram.Histogram) {
|
||||
if f.lastH == nil {
|
||||
f.lastH = h.Copy()
|
||||
} else {
|
||||
h.CopyTo(f.lastH)
|
||||
}
|
||||
}
|
||||
|
||||
func (f *histogramStatsIterator) setLastFH(fh *histogram.FloatHistogram) {
|
||||
if f.lastFH == nil {
|
||||
f.lastFH = fh.Copy()
|
||||
} else {
|
||||
fh.CopyTo(f.lastFH)
|
||||
}
|
||||
}
|
||||
|
||||
func (f *histogramStatsIterator) getFloatResetHint(hint histogram.CounterResetHint) histogram.CounterResetHint {
|
||||
if hint != histogram.UnknownCounterReset {
|
||||
return hint
|
||||
}
|
||||
if f.lastFH == nil {
|
||||
return histogram.NotCounterReset
|
||||
}
|
||||
|
||||
if f.currentFH.DetectReset(f.lastFH) {
|
||||
return histogram.CounterReset
|
||||
}
|
||||
return histogram.NotCounterReset
|
||||
}
|
||||
|
||||
func (f *histogramStatsIterator) getResetHint(h *histogram.Histogram) histogram.CounterResetHint {
|
||||
if h.CounterResetHint != histogram.UnknownCounterReset {
|
||||
return h.CounterResetHint
|
||||
}
|
||||
if f.lastH == nil {
|
||||
return histogram.NotCounterReset
|
||||
}
|
||||
|
||||
fh, prevFH := h.ToFloat(nil), f.lastH.ToFloat(nil)
|
||||
if fh.DetectReset(prevFH) {
|
||||
return histogram.CounterReset
|
||||
}
|
||||
return histogram.NotCounterReset
|
||||
}
|
121
promql/histogram_stats_iterator_test.go
Normal file
121
promql/histogram_stats_iterator_test.go
Normal file
|
@ -0,0 +1,121 @@
|
|||
// Copyright 2015 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package promql
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/prometheus/prometheus/model/histogram"
|
||||
"github.com/prometheus/prometheus/model/labels"
|
||||
"github.com/prometheus/prometheus/tsdb/chunkenc"
|
||||
"github.com/prometheus/prometheus/tsdb/tsdbutil"
|
||||
)
|
||||
|
||||
func TestHistogramStatsDecoding(t *testing.T) {
|
||||
histograms := []*histogram.Histogram{
|
||||
tsdbutil.GenerateTestHistogram(0),
|
||||
tsdbutil.GenerateTestHistogram(1),
|
||||
tsdbutil.GenerateTestHistogram(2),
|
||||
tsdbutil.GenerateTestHistogram(2),
|
||||
}
|
||||
histograms[0].CounterResetHint = histogram.NotCounterReset
|
||||
histograms[1].CounterResetHint = histogram.UnknownCounterReset
|
||||
histograms[2].CounterResetHint = histogram.CounterReset
|
||||
histograms[3].CounterResetHint = histogram.UnknownCounterReset
|
||||
|
||||
expectedHints := []histogram.CounterResetHint{
|
||||
histogram.NotCounterReset,
|
||||
histogram.NotCounterReset,
|
||||
histogram.CounterReset,
|
||||
histogram.NotCounterReset,
|
||||
}
|
||||
|
||||
t.Run("histogram_stats", func(t *testing.T) {
|
||||
decodedStats := make([]*histogram.Histogram, 0)
|
||||
statsIterator := NewHistogramStatsIterator(newHistogramSeries(histograms).Iterator(nil))
|
||||
for statsIterator.Next() != chunkenc.ValNone {
|
||||
_, h := statsIterator.AtHistogram(nil)
|
||||
decodedStats = append(decodedStats, h)
|
||||
}
|
||||
for i := 0; i < len(histograms); i++ {
|
||||
require.Equal(t, expectedHints[i], decodedStats[i].CounterResetHint)
|
||||
require.Equal(t, histograms[i].Count, decodedStats[i].Count)
|
||||
require.Equal(t, histograms[i].Sum, decodedStats[i].Sum)
|
||||
}
|
||||
})
|
||||
t.Run("float_histogram_stats", func(t *testing.T) {
|
||||
decodedStats := make([]*histogram.FloatHistogram, 0)
|
||||
statsIterator := NewHistogramStatsIterator(newHistogramSeries(histograms).Iterator(nil))
|
||||
for statsIterator.Next() != chunkenc.ValNone {
|
||||
_, h := statsIterator.AtFloatHistogram(nil)
|
||||
decodedStats = append(decodedStats, h)
|
||||
}
|
||||
for i := 0; i < len(histograms); i++ {
|
||||
fh := histograms[i].ToFloat(nil)
|
||||
require.Equal(t, expectedHints[i], decodedStats[i].CounterResetHint)
|
||||
require.Equal(t, fh.Count, decodedStats[i].Count)
|
||||
require.Equal(t, fh.Sum, decodedStats[i].Sum)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
type histogramSeries struct {
|
||||
histograms []*histogram.Histogram
|
||||
}
|
||||
|
||||
func newHistogramSeries(histograms []*histogram.Histogram) *histogramSeries {
|
||||
return &histogramSeries{
|
||||
histograms: histograms,
|
||||
}
|
||||
}
|
||||
|
||||
func (m histogramSeries) Labels() labels.Labels { return labels.EmptyLabels() }
|
||||
|
||||
func (m histogramSeries) Iterator(_ chunkenc.Iterator) chunkenc.Iterator {
|
||||
return &histogramIterator{
|
||||
i: -1,
|
||||
histograms: m.histograms,
|
||||
}
|
||||
}
|
||||
|
||||
type histogramIterator struct {
|
||||
i int
|
||||
histograms []*histogram.Histogram
|
||||
}
|
||||
|
||||
func (h *histogramIterator) Next() chunkenc.ValueType {
|
||||
h.i++
|
||||
if h.i < len(h.histograms) {
|
||||
return chunkenc.ValHistogram
|
||||
}
|
||||
return chunkenc.ValNone
|
||||
}
|
||||
|
||||
func (h *histogramIterator) Seek(t int64) chunkenc.ValueType { panic("not implemented") }
|
||||
|
||||
func (h *histogramIterator) At() (int64, float64) { panic("not implemented") }
|
||||
|
||||
func (h *histogramIterator) AtHistogram(_ *histogram.Histogram) (int64, *histogram.Histogram) {
|
||||
return 0, h.histograms[h.i]
|
||||
}
|
||||
|
||||
func (h *histogramIterator) AtFloatHistogram(_ *histogram.FloatHistogram) (int64, *histogram.FloatHistogram) {
|
||||
return 0, h.histograms[h.i].ToFloat(nil)
|
||||
}
|
||||
|
||||
func (h *histogramIterator) AtT() int64 { return 0 }
|
||||
|
||||
func (h *histogramIterator) Err() error { return nil }
|
|
@ -198,10 +198,11 @@ type VectorSelector struct {
|
|||
// Offset is the offset used during the query execution
|
||||
// which is calculated using the original offset, at modifier time,
|
||||
// eval time, and subquery offsets in the AST tree.
|
||||
Offset time.Duration
|
||||
Timestamp *int64
|
||||
StartOrEnd ItemType // Set when @ is used with start() or end()
|
||||
LabelMatchers []*labels.Matcher
|
||||
Offset time.Duration
|
||||
Timestamp *int64
|
||||
SkipHistogramBuckets bool // Set when decoding native histogram buckets is not needed for query evaluation.
|
||||
StartOrEnd ItemType // Set when @ is used with start() or end()
|
||||
LabelMatchers []*labels.Matcher
|
||||
|
||||
// The unexpanded seriesSet populated at query preparation time.
|
||||
UnexpandedSeriesSet storage.SeriesSet
|
||||
|
|
|
@ -84,6 +84,7 @@ BUCKETS_DESC
|
|||
NEGATIVE_BUCKETS_DESC
|
||||
ZERO_BUCKET_DESC
|
||||
ZERO_BUCKET_WIDTH_DESC
|
||||
CUSTOM_VALUES_DESC
|
||||
%token histogramDescEnd
|
||||
|
||||
// Operators.
|
||||
|
@ -797,6 +798,11 @@ histogram_desc_item
|
|||
$$ = yylex.(*parser).newMap()
|
||||
$$["z_bucket_w"] = $3
|
||||
}
|
||||
| CUSTOM_VALUES_DESC COLON bucket_set
|
||||
{
|
||||
$$ = yylex.(*parser).newMap()
|
||||
$$["custom_values"] = $3
|
||||
}
|
||||
| BUCKETS_DESC COLON bucket_set
|
||||
{
|
||||
$$ = yylex.(*parser).newMap()
|
||||
|
|
|
@ -67,62 +67,63 @@ const BUCKETS_DESC = 57375
|
|||
const NEGATIVE_BUCKETS_DESC = 57376
|
||||
const ZERO_BUCKET_DESC = 57377
|
||||
const ZERO_BUCKET_WIDTH_DESC = 57378
|
||||
const histogramDescEnd = 57379
|
||||
const operatorsStart = 57380
|
||||
const ADD = 57381
|
||||
const DIV = 57382
|
||||
const EQLC = 57383
|
||||
const EQL_REGEX = 57384
|
||||
const GTE = 57385
|
||||
const GTR = 57386
|
||||
const LAND = 57387
|
||||
const LOR = 57388
|
||||
const LSS = 57389
|
||||
const LTE = 57390
|
||||
const LUNLESS = 57391
|
||||
const MOD = 57392
|
||||
const MUL = 57393
|
||||
const NEQ = 57394
|
||||
const NEQ_REGEX = 57395
|
||||
const POW = 57396
|
||||
const SUB = 57397
|
||||
const AT = 57398
|
||||
const ATAN2 = 57399
|
||||
const operatorsEnd = 57400
|
||||
const aggregatorsStart = 57401
|
||||
const AVG = 57402
|
||||
const BOTTOMK = 57403
|
||||
const COUNT = 57404
|
||||
const COUNT_VALUES = 57405
|
||||
const GROUP = 57406
|
||||
const MAX = 57407
|
||||
const MIN = 57408
|
||||
const QUANTILE = 57409
|
||||
const STDDEV = 57410
|
||||
const STDVAR = 57411
|
||||
const SUM = 57412
|
||||
const TOPK = 57413
|
||||
const aggregatorsEnd = 57414
|
||||
const keywordsStart = 57415
|
||||
const BOOL = 57416
|
||||
const BY = 57417
|
||||
const GROUP_LEFT = 57418
|
||||
const GROUP_RIGHT = 57419
|
||||
const IGNORING = 57420
|
||||
const OFFSET = 57421
|
||||
const ON = 57422
|
||||
const WITHOUT = 57423
|
||||
const keywordsEnd = 57424
|
||||
const preprocessorStart = 57425
|
||||
const START = 57426
|
||||
const END = 57427
|
||||
const preprocessorEnd = 57428
|
||||
const startSymbolsStart = 57429
|
||||
const START_METRIC = 57430
|
||||
const START_SERIES_DESCRIPTION = 57431
|
||||
const START_EXPRESSION = 57432
|
||||
const START_METRIC_SELECTOR = 57433
|
||||
const startSymbolsEnd = 57434
|
||||
const CUSTOM_VALUES_DESC = 57379
|
||||
const histogramDescEnd = 57380
|
||||
const operatorsStart = 57381
|
||||
const ADD = 57382
|
||||
const DIV = 57383
|
||||
const EQLC = 57384
|
||||
const EQL_REGEX = 57385
|
||||
const GTE = 57386
|
||||
const GTR = 57387
|
||||
const LAND = 57388
|
||||
const LOR = 57389
|
||||
const LSS = 57390
|
||||
const LTE = 57391
|
||||
const LUNLESS = 57392
|
||||
const MOD = 57393
|
||||
const MUL = 57394
|
||||
const NEQ = 57395
|
||||
const NEQ_REGEX = 57396
|
||||
const POW = 57397
|
||||
const SUB = 57398
|
||||
const AT = 57399
|
||||
const ATAN2 = 57400
|
||||
const operatorsEnd = 57401
|
||||
const aggregatorsStart = 57402
|
||||
const AVG = 57403
|
||||
const BOTTOMK = 57404
|
||||
const COUNT = 57405
|
||||
const COUNT_VALUES = 57406
|
||||
const GROUP = 57407
|
||||
const MAX = 57408
|
||||
const MIN = 57409
|
||||
const QUANTILE = 57410
|
||||
const STDDEV = 57411
|
||||
const STDVAR = 57412
|
||||
const SUM = 57413
|
||||
const TOPK = 57414
|
||||
const aggregatorsEnd = 57415
|
||||
const keywordsStart = 57416
|
||||
const BOOL = 57417
|
||||
const BY = 57418
|
||||
const GROUP_LEFT = 57419
|
||||
const GROUP_RIGHT = 57420
|
||||
const IGNORING = 57421
|
||||
const OFFSET = 57422
|
||||
const ON = 57423
|
||||
const WITHOUT = 57424
|
||||
const keywordsEnd = 57425
|
||||
const preprocessorStart = 57426
|
||||
const START = 57427
|
||||
const END = 57428
|
||||
const preprocessorEnd = 57429
|
||||
const startSymbolsStart = 57430
|
||||
const START_METRIC = 57431
|
||||
const START_SERIES_DESCRIPTION = 57432
|
||||
const START_EXPRESSION = 57433
|
||||
const START_METRIC_SELECTOR = 57434
|
||||
const startSymbolsEnd = 57435
|
||||
|
||||
var yyToknames = [...]string{
|
||||
"$end",
|
||||
|
@ -161,6 +162,7 @@ var yyToknames = [...]string{
|
|||
"NEGATIVE_BUCKETS_DESC",
|
||||
"ZERO_BUCKET_DESC",
|
||||
"ZERO_BUCKET_WIDTH_DESC",
|
||||
"CUSTOM_VALUES_DESC",
|
||||
"histogramDescEnd",
|
||||
"operatorsStart",
|
||||
"ADD",
|
||||
|
@ -235,270 +237,273 @@ var yyExca = [...]int16{
|
|||
24, 134,
|
||||
-2, 0,
|
||||
-1, 58,
|
||||
2, 171,
|
||||
15, 171,
|
||||
75, 171,
|
||||
81, 171,
|
||||
-2, 100,
|
||||
-1, 59,
|
||||
2, 172,
|
||||
15, 172,
|
||||
75, 172,
|
||||
81, 172,
|
||||
-2, 101,
|
||||
-1, 60,
|
||||
76, 172,
|
||||
82, 172,
|
||||
-2, 100,
|
||||
-1, 59,
|
||||
2, 173,
|
||||
15, 173,
|
||||
75, 173,
|
||||
81, 173,
|
||||
-2, 103,
|
||||
-1, 61,
|
||||
76, 173,
|
||||
82, 173,
|
||||
-2, 101,
|
||||
-1, 60,
|
||||
2, 174,
|
||||
15, 174,
|
||||
75, 174,
|
||||
81, 174,
|
||||
-2, 104,
|
||||
-1, 62,
|
||||
76, 174,
|
||||
82, 174,
|
||||
-2, 103,
|
||||
-1, 61,
|
||||
2, 175,
|
||||
15, 175,
|
||||
75, 175,
|
||||
81, 175,
|
||||
-2, 105,
|
||||
-1, 63,
|
||||
76, 175,
|
||||
82, 175,
|
||||
-2, 104,
|
||||
-1, 62,
|
||||
2, 176,
|
||||
15, 176,
|
||||
75, 176,
|
||||
81, 176,
|
||||
-2, 110,
|
||||
-1, 64,
|
||||
76, 176,
|
||||
82, 176,
|
||||
-2, 105,
|
||||
-1, 63,
|
||||
2, 177,
|
||||
15, 177,
|
||||
75, 177,
|
||||
81, 177,
|
||||
-2, 112,
|
||||
-1, 65,
|
||||
76, 177,
|
||||
82, 177,
|
||||
-2, 110,
|
||||
-1, 64,
|
||||
2, 178,
|
||||
15, 178,
|
||||
75, 178,
|
||||
81, 178,
|
||||
-2, 114,
|
||||
-1, 66,
|
||||
76, 178,
|
||||
82, 178,
|
||||
-2, 112,
|
||||
-1, 65,
|
||||
2, 179,
|
||||
15, 179,
|
||||
75, 179,
|
||||
81, 179,
|
||||
-2, 115,
|
||||
-1, 67,
|
||||
76, 179,
|
||||
82, 179,
|
||||
-2, 114,
|
||||
-1, 66,
|
||||
2, 180,
|
||||
15, 180,
|
||||
75, 180,
|
||||
81, 180,
|
||||
-2, 116,
|
||||
-1, 68,
|
||||
76, 180,
|
||||
82, 180,
|
||||
-2, 115,
|
||||
-1, 67,
|
||||
2, 181,
|
||||
15, 181,
|
||||
75, 181,
|
||||
81, 181,
|
||||
-2, 117,
|
||||
-1, 69,
|
||||
76, 181,
|
||||
82, 181,
|
||||
-2, 116,
|
||||
-1, 68,
|
||||
2, 182,
|
||||
15, 182,
|
||||
75, 182,
|
||||
81, 182,
|
||||
76, 182,
|
||||
82, 182,
|
||||
-2, 117,
|
||||
-1, 69,
|
||||
2, 183,
|
||||
15, 183,
|
||||
76, 183,
|
||||
82, 183,
|
||||
-2, 118,
|
||||
-1, 195,
|
||||
12, 230,
|
||||
13, 230,
|
||||
18, 230,
|
||||
19, 230,
|
||||
25, 230,
|
||||
39, 230,
|
||||
45, 230,
|
||||
46, 230,
|
||||
49, 230,
|
||||
55, 230,
|
||||
60, 230,
|
||||
61, 230,
|
||||
62, 230,
|
||||
63, 230,
|
||||
64, 230,
|
||||
65, 230,
|
||||
66, 230,
|
||||
67, 230,
|
||||
68, 230,
|
||||
69, 230,
|
||||
70, 230,
|
||||
71, 230,
|
||||
75, 230,
|
||||
79, 230,
|
||||
81, 230,
|
||||
84, 230,
|
||||
85, 230,
|
||||
12, 231,
|
||||
13, 231,
|
||||
18, 231,
|
||||
19, 231,
|
||||
25, 231,
|
||||
40, 231,
|
||||
46, 231,
|
||||
47, 231,
|
||||
50, 231,
|
||||
56, 231,
|
||||
61, 231,
|
||||
62, 231,
|
||||
63, 231,
|
||||
64, 231,
|
||||
65, 231,
|
||||
66, 231,
|
||||
67, 231,
|
||||
68, 231,
|
||||
69, 231,
|
||||
70, 231,
|
||||
71, 231,
|
||||
72, 231,
|
||||
76, 231,
|
||||
80, 231,
|
||||
82, 231,
|
||||
85, 231,
|
||||
86, 231,
|
||||
-2, 0,
|
||||
-1, 196,
|
||||
12, 230,
|
||||
13, 230,
|
||||
18, 230,
|
||||
19, 230,
|
||||
25, 230,
|
||||
39, 230,
|
||||
45, 230,
|
||||
46, 230,
|
||||
49, 230,
|
||||
55, 230,
|
||||
60, 230,
|
||||
61, 230,
|
||||
62, 230,
|
||||
63, 230,
|
||||
64, 230,
|
||||
65, 230,
|
||||
66, 230,
|
||||
67, 230,
|
||||
68, 230,
|
||||
69, 230,
|
||||
70, 230,
|
||||
71, 230,
|
||||
75, 230,
|
||||
79, 230,
|
||||
81, 230,
|
||||
84, 230,
|
||||
85, 230,
|
||||
12, 231,
|
||||
13, 231,
|
||||
18, 231,
|
||||
19, 231,
|
||||
25, 231,
|
||||
40, 231,
|
||||
46, 231,
|
||||
47, 231,
|
||||
50, 231,
|
||||
56, 231,
|
||||
61, 231,
|
||||
62, 231,
|
||||
63, 231,
|
||||
64, 231,
|
||||
65, 231,
|
||||
66, 231,
|
||||
67, 231,
|
||||
68, 231,
|
||||
69, 231,
|
||||
70, 231,
|
||||
71, 231,
|
||||
72, 231,
|
||||
76, 231,
|
||||
80, 231,
|
||||
82, 231,
|
||||
85, 231,
|
||||
86, 231,
|
||||
-2, 0,
|
||||
-1, 217,
|
||||
21, 228,
|
||||
-2, 0,
|
||||
-1, 285,
|
||||
21, 229,
|
||||
-2, 0,
|
||||
-1, 286,
|
||||
21, 230,
|
||||
-2, 0,
|
||||
}
|
||||
|
||||
const yyPrivate = 57344
|
||||
|
||||
const yyLast = 742
|
||||
const yyLast = 778
|
||||
|
||||
var yyAct = [...]int16{
|
||||
151, 322, 320, 268, 327, 148, 221, 37, 187, 144,
|
||||
281, 280, 152, 113, 77, 173, 104, 102, 101, 6,
|
||||
128, 223, 105, 193, 155, 194, 195, 196, 339, 262,
|
||||
260, 233, 317, 316, 57, 100, 294, 239, 103, 146,
|
||||
300, 313, 263, 156, 156, 283, 147, 338, 259, 123,
|
||||
337, 106, 252, 311, 155, 299, 340, 301, 264, 157,
|
||||
157, 108, 298, 109, 235, 236, 292, 251, 237, 107,
|
||||
155, 292, 174, 191, 175, 96, 250, 99, 258, 224,
|
||||
151, 324, 322, 268, 329, 148, 221, 37, 187, 144,
|
||||
282, 281, 152, 113, 77, 173, 104, 102, 101, 6,
|
||||
223, 193, 105, 194, 195, 196, 128, 262, 260, 155,
|
||||
233, 103, 342, 293, 100, 319, 239, 116, 146, 318,
|
||||
315, 263, 156, 123, 106, 147, 284, 114, 295, 116,
|
||||
156, 341, 175, 259, 340, 253, 57, 264, 157, 114,
|
||||
117, 108, 313, 109, 235, 236, 157, 112, 237, 107,
|
||||
323, 174, 117, 175, 155, 96, 250, 99, 293, 224,
|
||||
226, 228, 229, 230, 238, 240, 243, 244, 245, 246,
|
||||
247, 110, 145, 225, 227, 231, 232, 234, 241, 242,
|
||||
98, 257, 321, 248, 249, 2, 3, 4, 5, 218,
|
||||
158, 104, 177, 217, 168, 162, 165, 105, 175, 160,
|
||||
164, 161, 176, 178, 189, 213, 106, 328, 216, 256,
|
||||
183, 179, 192, 163, 181, 100, 190, 197, 198, 199,
|
||||
247, 177, 145, 225, 227, 231, 232, 234, 241, 242,
|
||||
98, 176, 178, 248, 249, 104, 2, 3, 4, 5,
|
||||
158, 105, 177, 110, 168, 162, 165, 302, 150, 160,
|
||||
191, 161, 176, 178, 189, 155, 213, 343, 106, 330,
|
||||
72, 179, 192, 33, 181, 155, 190, 197, 198, 199,
|
||||
200, 201, 202, 203, 204, 205, 206, 207, 208, 209,
|
||||
210, 211, 255, 182, 72, 212, 177, 214, 215, 33,
|
||||
82, 84, 85, 7, 86, 87, 176, 178, 90, 91,
|
||||
223, 93, 94, 95, 116, 96, 97, 99, 83, 147,
|
||||
233, 286, 289, 116, 114, 254, 239, 288, 147, 172,
|
||||
220, 124, 253, 114, 171, 310, 309, 117, 120, 261,
|
||||
98, 112, 287, 119, 278, 279, 117, 170, 282, 10,
|
||||
308, 159, 307, 235, 236, 312, 118, 237, 147, 74,
|
||||
306, 305, 304, 303, 302, 250, 81, 285, 224, 226,
|
||||
228, 229, 230, 238, 240, 243, 244, 245, 246, 247,
|
||||
79, 79, 225, 227, 231, 232, 234, 241, 242, 48,
|
||||
78, 78, 248, 249, 122, 73, 121, 150, 180, 76,
|
||||
290, 291, 293, 56, 295, 8, 9, 9, 34, 35,
|
||||
1, 284, 296, 297, 155, 129, 130, 131, 132, 133,
|
||||
210, 211, 185, 301, 258, 212, 156, 214, 215, 188,
|
||||
256, 183, 290, 191, 252, 164, 155, 289, 300, 218,
|
||||
223, 79, 157, 217, 7, 299, 312, 257, 163, 251,
|
||||
233, 78, 288, 255, 182, 254, 239, 156, 216, 180,
|
||||
220, 124, 172, 120, 147, 311, 314, 171, 119, 261,
|
||||
287, 153, 154, 157, 279, 280, 79, 147, 283, 310,
|
||||
170, 118, 159, 10, 235, 236, 78, 309, 237, 147,
|
||||
308, 307, 306, 74, 76, 305, 250, 286, 304, 224,
|
||||
226, 228, 229, 230, 238, 240, 243, 244, 245, 246,
|
||||
247, 303, 81, 225, 227, 231, 232, 234, 241, 242,
|
||||
48, 34, 1, 248, 249, 122, 73, 121, 285, 47,
|
||||
291, 292, 294, 56, 296, 8, 9, 9, 46, 35,
|
||||
45, 44, 297, 298, 127, 129, 130, 131, 132, 133,
|
||||
134, 135, 136, 137, 138, 139, 140, 141, 142, 143,
|
||||
47, 46, 45, 44, 156, 314, 315, 127, 43, 42,
|
||||
41, 185, 319, 125, 166, 324, 325, 326, 188, 323,
|
||||
157, 329, 191, 331, 330, 155, 40, 126, 332, 333,
|
||||
100, 51, 72, 334, 53, 39, 38, 22, 52, 336,
|
||||
49, 167, 186, 335, 54, 156, 265, 80, 341, 153,
|
||||
154, 184, 219, 75, 115, 82, 84, 149, 70, 55,
|
||||
222, 157, 50, 111, 18, 19, 93, 94, 20, 0,
|
||||
96, 97, 99, 83, 71, 0, 0, 0, 0, 58,
|
||||
43, 42, 41, 125, 166, 40, 316, 317, 126, 39,
|
||||
38, 49, 186, 321, 338, 265, 326, 327, 328, 80,
|
||||
325, 184, 219, 332, 331, 334, 333, 75, 115, 149,
|
||||
335, 336, 100, 51, 72, 337, 53, 55, 222, 22,
|
||||
52, 339, 50, 167, 111, 0, 54, 0, 0, 0,
|
||||
0, 344, 0, 0, 0, 0, 0, 0, 82, 84,
|
||||
0, 70, 0, 0, 0, 0, 0, 18, 19, 93,
|
||||
94, 20, 0, 96, 97, 99, 83, 71, 0, 0,
|
||||
0, 0, 58, 59, 60, 61, 62, 63, 64, 65,
|
||||
66, 67, 68, 69, 0, 0, 0, 13, 98, 0,
|
||||
0, 24, 0, 30, 0, 0, 31, 32, 36, 100,
|
||||
51, 72, 0, 53, 267, 0, 22, 52, 0, 0,
|
||||
0, 266, 0, 54, 0, 270, 271, 269, 276, 278,
|
||||
275, 277, 272, 273, 274, 0, 84, 0, 70, 0,
|
||||
0, 0, 0, 0, 18, 19, 93, 94, 20, 0,
|
||||
96, 0, 99, 83, 71, 0, 0, 0, 0, 58,
|
||||
59, 60, 61, 62, 63, 64, 65, 66, 67, 68,
|
||||
69, 0, 0, 0, 13, 98, 0, 0, 24, 0,
|
||||
30, 0, 0, 31, 32, 36, 100, 51, 72, 0,
|
||||
53, 267, 0, 22, 52, 0, 0, 0, 266, 0,
|
||||
54, 0, 270, 271, 269, 275, 277, 274, 276, 272,
|
||||
273, 0, 84, 0, 70, 0, 0, 0, 0, 0,
|
||||
18, 19, 93, 94, 20, 0, 96, 0, 99, 83,
|
||||
71, 0, 0, 0, 0, 58, 59, 60, 61, 62,
|
||||
63, 64, 65, 66, 67, 68, 69, 0, 0, 0,
|
||||
13, 98, 0, 0, 24, 0, 30, 0, 0, 31,
|
||||
32, 51, 72, 0, 53, 318, 0, 22, 52, 0,
|
||||
0, 0, 0, 0, 54, 0, 270, 271, 269, 275,
|
||||
277, 274, 276, 272, 273, 0, 0, 0, 70, 0,
|
||||
0, 0, 0, 0, 18, 19, 0, 0, 20, 0,
|
||||
0, 0, 17, 72, 71, 0, 0, 0, 22, 58,
|
||||
59, 60, 61, 62, 63, 64, 65, 66, 67, 68,
|
||||
69, 0, 0, 0, 13, 0, 0, 0, 24, 0,
|
||||
30, 0, 0, 31, 32, 18, 19, 0, 0, 20,
|
||||
0, 0, 0, 17, 33, 0, 0, 0, 0, 22,
|
||||
11, 12, 14, 15, 16, 21, 23, 25, 26, 27,
|
||||
28, 29, 0, 0, 0, 13, 0, 0, 0, 24,
|
||||
0, 30, 0, 0, 31, 32, 18, 19, 0, 0,
|
||||
20, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||
0, 11, 12, 14, 15, 16, 21, 23, 25, 26,
|
||||
27, 28, 29, 100, 0, 0, 13, 0, 0, 0,
|
||||
24, 169, 30, 0, 0, 31, 32, 0, 0, 0,
|
||||
0, 0, 100, 0, 0, 0, 0, 0, 82, 84,
|
||||
85, 0, 86, 87, 88, 89, 90, 91, 92, 93,
|
||||
30, 0, 0, 31, 32, 51, 72, 0, 53, 320,
|
||||
0, 22, 52, 0, 0, 0, 0, 0, 54, 0,
|
||||
270, 271, 269, 276, 278, 275, 277, 272, 273, 274,
|
||||
0, 0, 0, 70, 0, 0, 17, 72, 0, 18,
|
||||
19, 0, 22, 20, 0, 0, 0, 0, 0, 71,
|
||||
0, 0, 0, 0, 58, 59, 60, 61, 62, 63,
|
||||
64, 65, 66, 67, 68, 69, 0, 0, 0, 13,
|
||||
18, 19, 0, 24, 20, 30, 0, 0, 31, 32,
|
||||
0, 0, 0, 0, 0, 11, 12, 14, 15, 16,
|
||||
21, 23, 25, 26, 27, 28, 29, 17, 33, 0,
|
||||
13, 0, 0, 22, 24, 0, 30, 0, 0, 31,
|
||||
32, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||
0, 18, 19, 0, 0, 20, 0, 0, 0, 0,
|
||||
0, 0, 0, 0, 0, 0, 11, 12, 14, 15,
|
||||
16, 21, 23, 25, 26, 27, 28, 29, 100, 0,
|
||||
0, 13, 0, 0, 0, 24, 169, 30, 0, 0,
|
||||
31, 32, 0, 0, 0, 0, 0, 100, 0, 0,
|
||||
0, 0, 0, 0, 82, 84, 85, 0, 86, 87,
|
||||
88, 89, 90, 91, 92, 93, 94, 95, 0, 96,
|
||||
97, 99, 83, 82, 84, 85, 0, 86, 87, 88,
|
||||
89, 90, 91, 92, 93, 94, 95, 0, 96, 97,
|
||||
99, 83, 100, 0, 98, 0, 0, 0, 0, 0,
|
||||
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||
0, 100, 0, 98, 0, 0, 0, 0, 82, 84,
|
||||
85, 0, 86, 87, 88, 0, 90, 91, 92, 93,
|
||||
94, 95, 0, 96, 97, 99, 83, 82, 84, 85,
|
||||
0, 86, 87, 88, 89, 90, 91, 92, 93, 94,
|
||||
95, 0, 96, 97, 99, 83, 100, 0, 98, 0,
|
||||
0, 86, 87, 0, 0, 90, 91, 0, 93, 94,
|
||||
95, 0, 96, 97, 99, 83, 0, 0, 98, 0,
|
||||
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||
0, 0, 0, 0, 0, 0, 0, 98, 0, 0,
|
||||
0, 82, 84, 85, 0, 86, 87, 88, 0, 90,
|
||||
91, 92, 93, 94, 95, 0, 96, 97, 99, 83,
|
||||
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||
0, 98,
|
||||
0, 0, 0, 0, 0, 0, 0, 98,
|
||||
}
|
||||
|
||||
var yyPact = [...]int16{
|
||||
17, 153, 541, 541, 385, 500, -1000, -1000, -1000, 146,
|
||||
17, 164, 555, 555, 388, 494, -1000, -1000, -1000, 120,
|
||||
-1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
|
||||
-1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
|
||||
-1000, -1000, -1000, 239, -1000, 224, -1000, 618, -1000, -1000,
|
||||
-1000, -1000, -1000, 204, -1000, 240, -1000, 633, -1000, -1000,
|
||||
-1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
|
||||
36, 111, -1000, 459, -1000, 459, 141, -1000, -1000, -1000,
|
||||
29, 113, -1000, 463, -1000, 463, 117, -1000, -1000, -1000,
|
||||
-1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
|
||||
-1000, -1000, 181, -1000, -1000, 196, -1000, -1000, 252, -1000,
|
||||
25, -1000, -54, -54, -54, -54, -54, -54, -54, -54,
|
||||
-54, -54, -54, -54, -54, -54, -54, -54, 37, 255,
|
||||
209, 111, -59, -1000, 118, 118, 309, -1000, 599, 21,
|
||||
-1000, 187, -1000, -1000, 70, 114, -1000, -1000, -1000, 238,
|
||||
-1000, 128, -1000, 296, 459, -1000, -55, -50, -1000, 459,
|
||||
459, 459, 459, 459, 459, 459, 459, 459, 459, 459,
|
||||
459, 459, 459, 459, -1000, 170, -1000, -1000, -1000, 110,
|
||||
-1000, -1000, -1000, -1000, -1000, -1000, 51, 51, 107, -1000,
|
||||
-1000, -1000, -1000, 168, -1000, -1000, 45, -1000, 618, -1000,
|
||||
-1000, 172, -1000, 127, -1000, -1000, -1000, -1000, -1000, 76,
|
||||
-1000, -1000, -1000, -1000, -1000, 22, 4, 3, -1000, -1000,
|
||||
-1000, 384, 382, 118, 118, 118, 118, 21, 21, 306,
|
||||
306, 306, 121, 662, 306, 306, 121, 21, 21, 306,
|
||||
21, 382, -1000, 23, -1000, -1000, -1000, 179, -1000, 180,
|
||||
-1000, -1000, 47, -1000, -1000, 191, -1000, -1000, 253, -1000,
|
||||
19, -1000, -49, -49, -49, -49, -49, -49, -49, -49,
|
||||
-49, -49, -49, -49, -49, -49, -49, -49, 36, 116,
|
||||
210, 113, -60, -1000, 163, 163, 311, -1000, 614, 20,
|
||||
-1000, 190, -1000, -1000, 69, 48, -1000, -1000, -1000, 169,
|
||||
-1000, 159, -1000, 147, 463, -1000, -58, -53, -1000, 463,
|
||||
463, 463, 463, 463, 463, 463, 463, 463, 463, 463,
|
||||
463, 463, 463, 463, -1000, 185, -1000, -1000, -1000, 111,
|
||||
-1000, -1000, -1000, -1000, -1000, -1000, 55, 55, 167, -1000,
|
||||
-1000, -1000, -1000, 168, -1000, -1000, 157, -1000, 633, -1000,
|
||||
-1000, 35, -1000, 158, -1000, -1000, -1000, -1000, -1000, 152,
|
||||
-1000, -1000, -1000, -1000, -1000, 27, 2, 1, -1000, -1000,
|
||||
-1000, 387, 385, 163, 163, 163, 163, 20, 20, 308,
|
||||
308, 308, 697, 678, 308, 308, 697, 20, 20, 308,
|
||||
20, 385, -1000, 24, -1000, -1000, -1000, 198, -1000, 160,
|
||||
-1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
|
||||
-1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
|
||||
-1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
|
||||
-1000, -1000, 459, -1000, -1000, -1000, -1000, -1000, -1000, 52,
|
||||
52, 10, 52, 57, 57, 38, 40, -1000, -1000, 218,
|
||||
217, 216, 215, 214, 206, 204, 190, 189, -1000, -1000,
|
||||
-1000, -1000, -1000, -1000, 32, 213, -1000, -1000, 19, -1000,
|
||||
618, -1000, -1000, -1000, 52, -1000, 7, 6, 458, -1000,
|
||||
-1000, -1000, 47, 5, 51, 51, 51, 113, 47, 113,
|
||||
47, -1000, -1000, -1000, -1000, -1000, 52, 52, -1000, -1000,
|
||||
-1000, 52, -1000, -1000, -1000, -1000, -1000, -1000, 51, -1000,
|
||||
-1000, -1000, -1000, -1000, -1000, 26, -1000, 35, -1000, -1000,
|
||||
-1000, -1000,
|
||||
-1000, -1000, 463, -1000, -1000, -1000, -1000, -1000, -1000, 59,
|
||||
59, 22, 59, 104, 104, 151, 100, -1000, -1000, 235,
|
||||
222, 219, 216, 215, 214, 211, 203, 189, 170, -1000,
|
||||
-1000, -1000, -1000, -1000, -1000, 41, 194, -1000, -1000, 18,
|
||||
-1000, 633, -1000, -1000, -1000, 59, -1000, 13, 9, 462,
|
||||
-1000, -1000, -1000, 14, 10, 55, 55, 55, 115, 115,
|
||||
14, 115, 14, -1000, -1000, -1000, -1000, -1000, 59, 59,
|
||||
-1000, -1000, -1000, 59, -1000, -1000, -1000, -1000, -1000, -1000,
|
||||
55, -1000, -1000, -1000, -1000, -1000, -1000, -1000, 30, -1000,
|
||||
106, -1000, -1000, -1000, -1000,
|
||||
}
|
||||
|
||||
var yyPgo = [...]int16{
|
||||
0, 353, 13, 352, 6, 15, 350, 263, 349, 347,
|
||||
344, 209, 265, 343, 14, 342, 10, 11, 341, 337,
|
||||
8, 336, 3, 4, 333, 2, 1, 0, 332, 12,
|
||||
5, 330, 326, 18, 191, 325, 317, 7, 316, 304,
|
||||
17, 303, 34, 300, 299, 298, 297, 293, 292, 291,
|
||||
290, 249, 9, 271, 270, 268,
|
||||
0, 334, 13, 332, 6, 15, 328, 263, 327, 319,
|
||||
318, 213, 265, 317, 14, 312, 10, 11, 311, 309,
|
||||
8, 305, 3, 4, 304, 2, 1, 0, 302, 12,
|
||||
5, 301, 300, 18, 191, 299, 298, 7, 295, 294,
|
||||
17, 293, 56, 292, 291, 290, 274, 271, 270, 268,
|
||||
259, 250, 9, 258, 252, 251,
|
||||
}
|
||||
|
||||
var yyR1 = [...]int8{
|
||||
|
@ -518,14 +523,14 @@ var yyR1 = [...]int8{
|
|||
14, 14, 14, 55, 19, 19, 19, 19, 18, 18,
|
||||
18, 18, 18, 18, 18, 18, 18, 28, 28, 28,
|
||||
20, 20, 20, 20, 21, 21, 21, 22, 22, 22,
|
||||
22, 22, 22, 22, 22, 22, 23, 23, 24, 24,
|
||||
24, 3, 3, 3, 3, 3, 3, 3, 3, 3,
|
||||
3, 3, 3, 6, 6, 6, 6, 6, 6, 6,
|
||||
22, 22, 22, 22, 22, 22, 22, 23, 23, 24,
|
||||
24, 24, 3, 3, 3, 3, 3, 3, 3, 3,
|
||||
3, 3, 3, 3, 6, 6, 6, 6, 6, 6,
|
||||
6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
|
||||
6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
|
||||
8, 8, 5, 5, 5, 5, 44, 27, 29, 29,
|
||||
30, 30, 26, 25, 25, 52, 48, 10, 53, 53,
|
||||
17, 17,
|
||||
6, 8, 8, 5, 5, 5, 5, 44, 27, 29,
|
||||
29, 30, 30, 26, 25, 25, 52, 48, 10, 53,
|
||||
53, 17, 17,
|
||||
}
|
||||
|
||||
var yyR2 = [...]int8{
|
||||
|
@ -545,52 +550,52 @@ var yyR2 = [...]int8{
|
|||
3, 2, 1, 2, 0, 3, 2, 1, 1, 3,
|
||||
1, 3, 4, 1, 3, 5, 5, 1, 1, 1,
|
||||
4, 3, 3, 2, 3, 1, 2, 3, 3, 3,
|
||||
3, 3, 3, 3, 3, 3, 4, 3, 3, 1,
|
||||
2, 1, 1, 1, 1, 1, 1, 1, 1, 1,
|
||||
3, 3, 3, 3, 3, 3, 3, 4, 3, 3,
|
||||
1, 2, 1, 1, 1, 1, 1, 1, 1, 1,
|
||||
1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
|
||||
1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
|
||||
1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
|
||||
1, 1, 1, 1, 1, 1, 1, 1, 2, 2,
|
||||
1, 1, 1, 2, 1, 1, 1, 1, 0, 1,
|
||||
0, 1,
|
||||
1, 1, 1, 1, 1, 1, 1, 1, 1, 2,
|
||||
2, 1, 1, 1, 2, 1, 1, 1, 1, 0,
|
||||
1, 0, 1,
|
||||
}
|
||||
|
||||
var yyChk = [...]int16{
|
||||
-1000, -54, 88, 89, 90, 91, 2, 10, -12, -7,
|
||||
-11, 60, 61, 75, 62, 63, 64, 12, 45, 46,
|
||||
49, 65, 18, 66, 79, 67, 68, 69, 70, 71,
|
||||
81, 84, 85, 13, -55, -12, 10, -37, -32, -35,
|
||||
-1000, -54, 89, 90, 91, 92, 2, 10, -12, -7,
|
||||
-11, 61, 62, 76, 63, 64, 65, 12, 46, 47,
|
||||
50, 66, 18, 67, 80, 68, 69, 70, 71, 72,
|
||||
82, 85, 86, 13, -55, -12, 10, -37, -32, -35,
|
||||
-38, -43, -44, -45, -47, -48, -49, -50, -51, -31,
|
||||
-3, 12, 19, 15, 25, -8, -7, -42, 60, 61,
|
||||
62, 63, 64, 65, 66, 67, 68, 69, 70, 71,
|
||||
39, 55, 13, -51, -11, -13, 20, -14, 12, 2,
|
||||
-19, 2, 39, 57, 40, 41, 43, 44, 45, 46,
|
||||
47, 48, 49, 50, 51, 52, 54, 55, 79, 56,
|
||||
14, -33, -40, 2, 75, 81, 15, -40, -37, -37,
|
||||
-3, 12, 19, 15, 25, -8, -7, -42, 61, 62,
|
||||
63, 64, 65, 66, 67, 68, 69, 70, 71, 72,
|
||||
40, 56, 13, -51, -11, -13, 20, -14, 12, 2,
|
||||
-19, 2, 40, 58, 41, 42, 44, 45, 46, 47,
|
||||
48, 49, 50, 51, 52, 53, 55, 56, 80, 57,
|
||||
14, -33, -40, 2, 76, 82, 15, -40, -37, -37,
|
||||
-42, -1, 20, -2, 12, -10, 2, 25, 20, 7,
|
||||
2, 4, 2, 24, -34, -41, -36, -46, 74, -34,
|
||||
2, 4, 2, 24, -34, -41, -36, -46, 75, -34,
|
||||
-34, -34, -34, -34, -34, -34, -34, -34, -34, -34,
|
||||
-34, -34, -34, -34, -52, 55, 2, 9, -30, -9,
|
||||
2, -27, -29, 84, 85, 19, 39, 55, -52, 2,
|
||||
-34, -34, -34, -34, -52, 56, 2, 9, -30, -9,
|
||||
2, -27, -29, 85, 86, 19, 40, 56, -52, 2,
|
||||
-40, -33, -16, 15, 2, -16, -39, 22, -37, 22,
|
||||
20, 7, 2, -5, 2, 4, 52, 42, 53, -5,
|
||||
20, 7, 2, -5, 2, 4, 53, 43, 54, -5,
|
||||
20, -14, 25, 2, -18, 5, -28, -20, 12, -27,
|
||||
-29, 16, -37, 78, 80, 76, 77, -37, -37, -37,
|
||||
-29, 16, -37, 79, 81, 77, 78, -37, -37, -37,
|
||||
-37, -37, -37, -37, -37, -37, -37, -37, -37, -37,
|
||||
-37, -37, -52, 15, -27, -27, 21, 6, 2, -15,
|
||||
22, -4, -6, 2, 60, 74, 61, 75, 62, 63,
|
||||
64, 76, 77, 12, 78, 45, 46, 49, 65, 18,
|
||||
66, 79, 80, 67, 68, 69, 70, 71, 84, 85,
|
||||
57, 22, 7, 20, -2, 25, 2, 25, 2, 26,
|
||||
26, -29, 26, 39, 55, -21, 24, 17, -22, 30,
|
||||
28, 29, 35, 36, 33, 31, 34, 32, -16, -16,
|
||||
-17, -16, -17, 22, -53, -52, 2, 22, 7, 2,
|
||||
-37, -26, 19, -26, 26, -26, -20, -20, 24, 17,
|
||||
2, 17, 6, 6, 6, 6, 6, 6, 6, 6,
|
||||
6, 21, 2, 22, -4, -26, 26, 26, 17, -22,
|
||||
-25, 55, -26, -30, -27, -27, -27, -23, 14, -25,
|
||||
-23, -25, -26, -26, -26, -24, -27, 24, 21, 2,
|
||||
21, -27,
|
||||
22, -4, -6, 2, 61, 75, 62, 76, 63, 64,
|
||||
65, 77, 78, 12, 79, 46, 47, 50, 66, 18,
|
||||
67, 80, 81, 68, 69, 70, 71, 72, 85, 86,
|
||||
58, 22, 7, 20, -2, 25, 2, 25, 2, 26,
|
||||
26, -29, 26, 40, 56, -21, 24, 17, -22, 30,
|
||||
28, 29, 35, 36, 37, 33, 31, 34, 32, -16,
|
||||
-16, -17, -16, -17, 22, -53, -52, 2, 22, 7,
|
||||
2, -37, -26, 19, -26, 26, -26, -20, -20, 24,
|
||||
17, 2, 17, 6, 6, 6, 6, 6, 6, 6,
|
||||
6, 6, 6, 21, 2, 22, -4, -26, 26, 26,
|
||||
17, -22, -25, 56, -26, -30, -27, -27, -27, -23,
|
||||
14, -23, -25, -23, -25, -26, -26, -26, -24, -27,
|
||||
24, 21, 2, 21, -27,
|
||||
}
|
||||
|
||||
var yyDef = [...]int16{
|
||||
|
@ -599,36 +604,36 @@ var yyDef = [...]int16{
|
|||
109, 110, 111, 112, 113, 114, 115, 116, 117, 118,
|
||||
119, 120, 121, 0, 2, -2, 3, 4, 8, 9,
|
||||
10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
|
||||
0, 106, 216, 0, 226, 0, 83, 84, -2, -2,
|
||||
0, 106, 217, 0, 227, 0, 83, 84, -2, -2,
|
||||
-2, -2, -2, -2, -2, -2, -2, -2, -2, -2,
|
||||
210, 211, 0, 5, 98, 0, 124, 127, 0, 132,
|
||||
211, 212, 0, 5, 98, 0, 124, 127, 0, 132,
|
||||
133, 137, 43, 43, 43, 43, 43, 43, 43, 43,
|
||||
43, 43, 43, 43, 43, 43, 43, 43, 0, 0,
|
||||
0, 0, 22, 23, 0, 0, 0, 60, 0, 81,
|
||||
82, 0, 87, 89, 0, 93, 97, 227, 122, 0,
|
||||
82, 0, 87, 89, 0, 93, 97, 228, 122, 0,
|
||||
128, 0, 131, 136, 0, 42, 47, 48, 44, 0,
|
||||
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||
0, 0, 0, 0, 67, 0, 69, 225, 70, 0,
|
||||
72, 220, 221, 73, 74, 217, 0, 0, 0, 80,
|
||||
0, 0, 0, 0, 67, 0, 69, 226, 70, 0,
|
||||
72, 221, 222, 73, 74, 218, 0, 0, 0, 80,
|
||||
20, 21, 24, 0, 54, 25, 0, 62, 64, 66,
|
||||
85, 0, 90, 0, 96, 212, 213, 214, 215, 0,
|
||||
85, 0, 90, 0, 96, 213, 214, 215, 216, 0,
|
||||
123, 126, 129, 130, 135, 138, 140, 143, 147, 148,
|
||||
149, 0, 26, 0, 0, -2, -2, 27, 28, 29,
|
||||
30, 31, 32, 33, 34, 35, 36, 37, 38, 39,
|
||||
40, 41, 68, 0, 218, 219, 75, -2, 79, 0,
|
||||
53, 56, 58, 59, 183, 184, 185, 186, 187, 188,
|
||||
189, 190, 191, 192, 193, 194, 195, 196, 197, 198,
|
||||
199, 200, 201, 202, 203, 204, 205, 206, 207, 208,
|
||||
209, 61, 65, 86, 88, 91, 95, 92, 94, 0,
|
||||
40, 41, 68, 0, 219, 220, 75, -2, 79, 0,
|
||||
53, 56, 58, 59, 184, 185, 186, 187, 188, 189,
|
||||
190, 191, 192, 193, 194, 195, 196, 197, 198, 199,
|
||||
200, 201, 202, 203, 204, 205, 206, 207, 208, 209,
|
||||
210, 61, 65, 86, 88, 91, 95, 92, 94, 0,
|
||||
0, 0, 0, 0, 0, 0, 0, 153, 155, 0,
|
||||
0, 0, 0, 0, 0, 0, 0, 0, 45, 46,
|
||||
49, 231, 50, 71, 0, -2, 78, 51, 0, 57,
|
||||
63, 139, 222, 141, 0, 144, 0, 0, 0, 151,
|
||||
156, 152, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||
0, 76, 77, 52, 55, 142, 0, 0, 150, 154,
|
||||
157, 0, 224, 158, 159, 160, 161, 162, 0, 163,
|
||||
164, 165, 145, 146, 223, 0, 169, 0, 167, 170,
|
||||
166, 168,
|
||||
0, 0, 0, 0, 0, 0, 0, 0, 0, 45,
|
||||
46, 49, 232, 50, 71, 0, -2, 78, 51, 0,
|
||||
57, 63, 139, 223, 141, 0, 144, 0, 0, 0,
|
||||
151, 156, 152, 0, 0, 0, 0, 0, 0, 0,
|
||||
0, 0, 0, 76, 77, 52, 55, 142, 0, 0,
|
||||
150, 154, 157, 0, 225, 158, 159, 160, 161, 162,
|
||||
0, 163, 164, 165, 166, 145, 146, 224, 0, 170,
|
||||
0, 168, 171, 167, 169,
|
||||
}
|
||||
|
||||
var yyTok1 = [...]int8{
|
||||
|
@ -645,7 +650,7 @@ var yyTok2 = [...]int8{
|
|||
62, 63, 64, 65, 66, 67, 68, 69, 70, 71,
|
||||
72, 73, 74, 75, 76, 77, 78, 79, 80, 81,
|
||||
82, 83, 84, 85, 86, 87, 88, 89, 90, 91,
|
||||
92,
|
||||
92, 93,
|
||||
}
|
||||
|
||||
var yyTok3 = [...]int8{
|
||||
|
@ -1738,47 +1743,53 @@ yydefault:
|
|||
yyDollar = yyS[yypt-3 : yypt+1]
|
||||
{
|
||||
yyVAL.descriptors = yylex.(*parser).newMap()
|
||||
yyVAL.descriptors["buckets"] = yyDollar[3].bucket_set
|
||||
yyVAL.descriptors["custom_values"] = yyDollar[3].bucket_set
|
||||
}
|
||||
case 163:
|
||||
yyDollar = yyS[yypt-3 : yypt+1]
|
||||
{
|
||||
yyVAL.descriptors = yylex.(*parser).newMap()
|
||||
yyVAL.descriptors["offset"] = yyDollar[3].int
|
||||
yyVAL.descriptors["buckets"] = yyDollar[3].bucket_set
|
||||
}
|
||||
case 164:
|
||||
yyDollar = yyS[yypt-3 : yypt+1]
|
||||
{
|
||||
yyVAL.descriptors = yylex.(*parser).newMap()
|
||||
yyVAL.descriptors["n_buckets"] = yyDollar[3].bucket_set
|
||||
yyVAL.descriptors["offset"] = yyDollar[3].int
|
||||
}
|
||||
case 165:
|
||||
yyDollar = yyS[yypt-3 : yypt+1]
|
||||
{
|
||||
yyVAL.descriptors = yylex.(*parser).newMap()
|
||||
yyVAL.descriptors["n_offset"] = yyDollar[3].int
|
||||
yyVAL.descriptors["n_buckets"] = yyDollar[3].bucket_set
|
||||
}
|
||||
case 166:
|
||||
yyDollar = yyS[yypt-4 : yypt+1]
|
||||
yyDollar = yyS[yypt-3 : yypt+1]
|
||||
{
|
||||
yyVAL.bucket_set = yyDollar[2].bucket_set
|
||||
yyVAL.descriptors = yylex.(*parser).newMap()
|
||||
yyVAL.descriptors["n_offset"] = yyDollar[3].int
|
||||
}
|
||||
case 167:
|
||||
yyDollar = yyS[yypt-3 : yypt+1]
|
||||
yyDollar = yyS[yypt-4 : yypt+1]
|
||||
{
|
||||
yyVAL.bucket_set = yyDollar[2].bucket_set
|
||||
}
|
||||
case 168:
|
||||
yyDollar = yyS[yypt-3 : yypt+1]
|
||||
{
|
||||
yyVAL.bucket_set = append(yyDollar[1].bucket_set, yyDollar[3].float)
|
||||
yyVAL.bucket_set = yyDollar[2].bucket_set
|
||||
}
|
||||
case 169:
|
||||
yyDollar = yyS[yypt-3 : yypt+1]
|
||||
{
|
||||
yyVAL.bucket_set = append(yyDollar[1].bucket_set, yyDollar[3].float)
|
||||
}
|
||||
case 170:
|
||||
yyDollar = yyS[yypt-1 : yypt+1]
|
||||
{
|
||||
yyVAL.bucket_set = []float64{yyDollar[1].float}
|
||||
}
|
||||
case 216:
|
||||
case 217:
|
||||
yyDollar = yyS[yypt-1 : yypt+1]
|
||||
{
|
||||
yyVAL.node = &NumberLiteral{
|
||||
|
@ -1786,22 +1797,22 @@ yydefault:
|
|||
PosRange: yyDollar[1].item.PositionRange(),
|
||||
}
|
||||
}
|
||||
case 217:
|
||||
case 218:
|
||||
yyDollar = yyS[yypt-1 : yypt+1]
|
||||
{
|
||||
yyVAL.float = yylex.(*parser).number(yyDollar[1].item.Val)
|
||||
}
|
||||
case 218:
|
||||
case 219:
|
||||
yyDollar = yyS[yypt-2 : yypt+1]
|
||||
{
|
||||
yyVAL.float = yyDollar[2].float
|
||||
}
|
||||
case 219:
|
||||
case 220:
|
||||
yyDollar = yyS[yypt-2 : yypt+1]
|
||||
{
|
||||
yyVAL.float = -yyDollar[2].float
|
||||
}
|
||||
case 222:
|
||||
case 223:
|
||||
yyDollar = yyS[yypt-1 : yypt+1]
|
||||
{
|
||||
var err error
|
||||
|
@ -1810,17 +1821,17 @@ yydefault:
|
|||
yylex.(*parser).addParseErrf(yyDollar[1].item.PositionRange(), "invalid repetition in series values: %s", err)
|
||||
}
|
||||
}
|
||||
case 223:
|
||||
case 224:
|
||||
yyDollar = yyS[yypt-2 : yypt+1]
|
||||
{
|
||||
yyVAL.int = -int64(yyDollar[2].uint)
|
||||
}
|
||||
case 224:
|
||||
case 225:
|
||||
yyDollar = yyS[yypt-1 : yypt+1]
|
||||
{
|
||||
yyVAL.int = int64(yyDollar[1].uint)
|
||||
}
|
||||
case 225:
|
||||
case 226:
|
||||
yyDollar = yyS[yypt-1 : yypt+1]
|
||||
{
|
||||
var err error
|
||||
|
@ -1829,7 +1840,7 @@ yydefault:
|
|||
yylex.(*parser).addParseErr(yyDollar[1].item.PositionRange(), err)
|
||||
}
|
||||
}
|
||||
case 226:
|
||||
case 227:
|
||||
yyDollar = yyS[yypt-1 : yypt+1]
|
||||
{
|
||||
yyVAL.node = &StringLiteral{
|
||||
|
@ -1837,7 +1848,7 @@ yydefault:
|
|||
PosRange: yyDollar[1].item.PositionRange(),
|
||||
}
|
||||
}
|
||||
case 227:
|
||||
case 228:
|
||||
yyDollar = yyS[yypt-1 : yypt+1]
|
||||
{
|
||||
yyVAL.item = Item{
|
||||
|
@ -1846,12 +1857,12 @@ yydefault:
|
|||
Val: yylex.(*parser).unquoteString(yyDollar[1].item.Val),
|
||||
}
|
||||
}
|
||||
case 228:
|
||||
case 229:
|
||||
yyDollar = yyS[yypt-0 : yypt+1]
|
||||
{
|
||||
yyVAL.duration = 0
|
||||
}
|
||||
case 230:
|
||||
case 231:
|
||||
yyDollar = yyS[yypt-0 : yypt+1]
|
||||
{
|
||||
yyVAL.strings = nil
|
||||
|
|
|
@ -135,15 +135,16 @@ var key = map[string]ItemType{
|
|||
}
|
||||
|
||||
var histogramDesc = map[string]ItemType{
|
||||
"sum": SUM_DESC,
|
||||
"count": COUNT_DESC,
|
||||
"schema": SCHEMA_DESC,
|
||||
"offset": OFFSET_DESC,
|
||||
"n_offset": NEGATIVE_OFFSET_DESC,
|
||||
"buckets": BUCKETS_DESC,
|
||||
"n_buckets": NEGATIVE_BUCKETS_DESC,
|
||||
"z_bucket": ZERO_BUCKET_DESC,
|
||||
"z_bucket_w": ZERO_BUCKET_WIDTH_DESC,
|
||||
"sum": SUM_DESC,
|
||||
"count": COUNT_DESC,
|
||||
"schema": SCHEMA_DESC,
|
||||
"offset": OFFSET_DESC,
|
||||
"n_offset": NEGATIVE_OFFSET_DESC,
|
||||
"buckets": BUCKETS_DESC,
|
||||
"n_buckets": NEGATIVE_BUCKETS_DESC,
|
||||
"z_bucket": ZERO_BUCKET_DESC,
|
||||
"z_bucket_w": ZERO_BUCKET_WIDTH_DESC,
|
||||
"custom_values": CUSTOM_VALUES_DESC,
|
||||
}
|
||||
|
||||
// ItemTypeStr is the default string representations for common Items. It does not
|
||||
|
@ -313,6 +314,11 @@ func (l *Lexer) accept(valid string) bool {
|
|||
return false
|
||||
}
|
||||
|
||||
// is peeks and returns true if the next rune is contained in the provided string.
|
||||
func (l *Lexer) is(valid string) bool {
|
||||
return strings.ContainsRune(valid, l.peek())
|
||||
}
|
||||
|
||||
// acceptRun consumes a run of runes from the valid set.
|
||||
func (l *Lexer) acceptRun(valid string) {
|
||||
for strings.ContainsRune(valid, l.next()) {
|
||||
|
@ -901,19 +907,78 @@ func acceptRemainingDuration(l *Lexer) bool {
|
|||
// scanNumber scans numbers of different formats. The scanned Item is
|
||||
// not necessarily a valid number. This case is caught by the parser.
|
||||
func (l *Lexer) scanNumber() bool {
|
||||
digits := "0123456789"
|
||||
// Modify the digit pattern if the number is hexadecimal.
|
||||
digitPattern := "0123456789"
|
||||
// Disallow hexadecimal in series descriptions as the syntax is ambiguous.
|
||||
if !l.seriesDesc && l.accept("0") && l.accept("xX") {
|
||||
digits = "0123456789abcdefABCDEF"
|
||||
if !l.seriesDesc &&
|
||||
l.accept("0") && l.accept("xX") {
|
||||
l.accept("_") // eg., 0X_1FFFP-16 == 0.1249847412109375
|
||||
digitPattern = "0123456789abcdefABCDEF"
|
||||
}
|
||||
l.acceptRun(digits)
|
||||
if l.accept(".") {
|
||||
l.acceptRun(digits)
|
||||
}
|
||||
if l.accept("eE") {
|
||||
l.accept("+-")
|
||||
l.acceptRun("0123456789")
|
||||
const (
|
||||
// Define dot, exponent, and underscore patterns.
|
||||
dotPattern = "."
|
||||
exponentPattern = "eE"
|
||||
underscorePattern = "_"
|
||||
// Anti-patterns are rune sets that cannot follow their respective rune.
|
||||
dotAntiPattern = "_."
|
||||
exponentAntiPattern = "._eE" // and EOL.
|
||||
underscoreAntiPattern = "._eE" // and EOL.
|
||||
)
|
||||
// All numbers follow the prefix: [.][d][d._eE]*
|
||||
l.accept(dotPattern)
|
||||
l.accept(digitPattern)
|
||||
// [d._eE]* hereon.
|
||||
dotConsumed := false
|
||||
exponentConsumed := false
|
||||
for l.is(digitPattern + dotPattern + underscorePattern + exponentPattern) {
|
||||
// "." cannot repeat.
|
||||
if l.is(dotPattern) {
|
||||
if dotConsumed {
|
||||
l.accept(dotPattern)
|
||||
return false
|
||||
}
|
||||
}
|
||||
// "eE" cannot repeat.
|
||||
if l.is(exponentPattern) {
|
||||
if exponentConsumed {
|
||||
l.accept(exponentPattern)
|
||||
return false
|
||||
}
|
||||
}
|
||||
// Handle dots.
|
||||
if l.accept(dotPattern) {
|
||||
dotConsumed = true
|
||||
if l.accept(dotAntiPattern) {
|
||||
return false
|
||||
}
|
||||
// Fractional hexadecimal literals are not allowed.
|
||||
if len(digitPattern) > 10 /* 0x[\da-fA-F].[\d]+p[\d] */ {
|
||||
return false
|
||||
}
|
||||
continue
|
||||
}
|
||||
// Handle exponents.
|
||||
if l.accept(exponentPattern) {
|
||||
exponentConsumed = true
|
||||
l.accept("+-")
|
||||
if l.accept(exponentAntiPattern) || l.peek() == eof {
|
||||
return false
|
||||
}
|
||||
continue
|
||||
}
|
||||
// Handle underscores.
|
||||
if l.accept(underscorePattern) {
|
||||
if l.accept(underscoreAntiPattern) || l.peek() == eof {
|
||||
return false
|
||||
}
|
||||
|
||||
continue
|
||||
}
|
||||
// Handle digits at the end since we already consumed before this loop.
|
||||
l.acceptRun(digitPattern)
|
||||
}
|
||||
|
||||
// Next thing must not be alphanumeric unless it's the times token
|
||||
// for series repetitions.
|
||||
if r := l.peek(); (l.seriesDesc && r == 'x') || !isAlphaNumeric(r) {
|
||||
|
|
|
@ -132,6 +132,84 @@ var tests = []struct {
|
|||
}, {
|
||||
input: "0x123",
|
||||
expected: []Item{{NUMBER, 0, "0x123"}},
|
||||
}, {
|
||||
input: "1..2",
|
||||
fail: true,
|
||||
}, {
|
||||
input: "1.2.",
|
||||
fail: true,
|
||||
}, {
|
||||
input: "00_1_23_4.56_7_8",
|
||||
expected: []Item{{NUMBER, 0, "00_1_23_4.56_7_8"}},
|
||||
}, {
|
||||
input: "00_1_23__4.56_7_8",
|
||||
fail: true,
|
||||
}, {
|
||||
input: "00_1_23_4._56_7_8",
|
||||
fail: true,
|
||||
}, {
|
||||
input: "00_1_23_4_.56_7_8",
|
||||
fail: true,
|
||||
}, {
|
||||
input: "0x1_2_34",
|
||||
expected: []Item{{NUMBER, 0, "0x1_2_34"}},
|
||||
}, {
|
||||
input: "0x1_2__34",
|
||||
fail: true,
|
||||
}, {
|
||||
input: "0x1_2__34.5_6p1", // "0x1.1p1"-based formats are not supported yet.
|
||||
fail: true,
|
||||
}, {
|
||||
input: "0x1_2__34.5_6",
|
||||
fail: true,
|
||||
}, {
|
||||
input: "0x1_2__34.56",
|
||||
fail: true,
|
||||
}, {
|
||||
input: "1_e2",
|
||||
fail: true,
|
||||
}, {
|
||||
input: "1.e2",
|
||||
expected: []Item{{NUMBER, 0, "1.e2"}},
|
||||
}, {
|
||||
input: "1e.2",
|
||||
fail: true,
|
||||
}, {
|
||||
input: "1e+.2",
|
||||
fail: true,
|
||||
}, {
|
||||
input: "1ee2",
|
||||
fail: true,
|
||||
}, {
|
||||
input: "1e+e2",
|
||||
fail: true,
|
||||
}, {
|
||||
input: "1e",
|
||||
fail: true,
|
||||
}, {
|
||||
input: "1e+",
|
||||
fail: true,
|
||||
}, {
|
||||
input: "1e1_2_34",
|
||||
expected: []Item{{NUMBER, 0, "1e1_2_34"}},
|
||||
}, {
|
||||
input: "1e_1_2_34",
|
||||
fail: true,
|
||||
}, {
|
||||
input: "1e1_2__34",
|
||||
fail: true,
|
||||
}, {
|
||||
input: "1e+_1_2_34",
|
||||
fail: true,
|
||||
}, {
|
||||
input: "1e-_1_2_34",
|
||||
fail: true,
|
||||
}, {
|
||||
input: "12_",
|
||||
fail: true,
|
||||
}, {
|
||||
input: "_1_2",
|
||||
expected: []Item{{IDENTIFIER, 0, "_1_2"}},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
|
|
@ -481,19 +481,19 @@ func (p *parser) mergeMaps(left, right *map[string]interface{}) (ret *map[string
|
|||
}
|
||||
|
||||
func (p *parser) histogramsIncreaseSeries(base, inc *histogram.FloatHistogram, times uint64) ([]SequenceValue, error) {
|
||||
return p.histogramsSeries(base, inc, times, func(a, b *histogram.FloatHistogram) *histogram.FloatHistogram {
|
||||
return p.histogramsSeries(base, inc, times, func(a, b *histogram.FloatHistogram) (*histogram.FloatHistogram, error) {
|
||||
return a.Add(b)
|
||||
})
|
||||
}
|
||||
|
||||
func (p *parser) histogramsDecreaseSeries(base, inc *histogram.FloatHistogram, times uint64) ([]SequenceValue, error) {
|
||||
return p.histogramsSeries(base, inc, times, func(a, b *histogram.FloatHistogram) *histogram.FloatHistogram {
|
||||
return p.histogramsSeries(base, inc, times, func(a, b *histogram.FloatHistogram) (*histogram.FloatHistogram, error) {
|
||||
return a.Sub(b)
|
||||
})
|
||||
}
|
||||
|
||||
func (p *parser) histogramsSeries(base, inc *histogram.FloatHistogram, times uint64,
|
||||
combine func(*histogram.FloatHistogram, *histogram.FloatHistogram) *histogram.FloatHistogram,
|
||||
combine func(*histogram.FloatHistogram, *histogram.FloatHistogram) (*histogram.FloatHistogram, error),
|
||||
) ([]SequenceValue, error) {
|
||||
ret := make([]SequenceValue, times+1)
|
||||
// Add an additional value (the base) for time 0, which we ignore in tests.
|
||||
|
@ -504,7 +504,11 @@ func (p *parser) histogramsSeries(base, inc *histogram.FloatHistogram, times uin
|
|||
return nil, fmt.Errorf("error combining histograms: cannot merge from schema %d to %d", inc.Schema, cur.Schema)
|
||||
}
|
||||
|
||||
cur = combine(cur.Copy(), inc)
|
||||
var err error
|
||||
cur, err = combine(cur.Copy(), inc)
|
||||
if err != nil {
|
||||
return ret, err
|
||||
}
|
||||
ret[i] = SequenceValue{Histogram: cur}
|
||||
}
|
||||
|
||||
|
@ -562,6 +566,15 @@ func (p *parser) buildHistogramFromMap(desc *map[string]interface{}) *histogram.
|
|||
p.addParseErrf(p.yyParser.lval.item.PositionRange(), "error parsing z_bucket_w number: %v", val)
|
||||
}
|
||||
}
|
||||
val, ok = (*desc)["custom_values"]
|
||||
if ok {
|
||||
customValues, ok := val.([]float64)
|
||||
if ok {
|
||||
output.CustomValues = customValues
|
||||
} else {
|
||||
p.addParseErrf(p.yyParser.lval.item.PositionRange(), "error parsing custom_values: %v", val)
|
||||
}
|
||||
}
|
||||
|
||||
buckets, spans := p.buildHistogramBucketsAndSpans(desc, "buckets", "offset")
|
||||
output.PositiveBuckets = buckets
|
||||
|
|
|
@ -513,12 +513,12 @@ var testExpr = []struct {
|
|||
{
|
||||
input: "2.5.",
|
||||
fail: true,
|
||||
errMsg: "unexpected character: '.'",
|
||||
errMsg: `1:1: parse error: bad number or duration syntax: "2.5."`,
|
||||
},
|
||||
{
|
||||
input: "100..4",
|
||||
fail: true,
|
||||
errMsg: `unexpected number ".4"`,
|
||||
errMsg: `1:1: parse error: bad number or duration syntax: "100.."`,
|
||||
},
|
||||
{
|
||||
input: "0deadbeef",
|
||||
|
|
|
@ -148,6 +148,13 @@ func TestExprString(t *testing.T) {
|
|||
in: `{"_0"="1"}`,
|
||||
out: `{_0="1"}`,
|
||||
},
|
||||
{
|
||||
in: `{""="0"}`,
|
||||
},
|
||||
{
|
||||
in: "{``=\"0\"}",
|
||||
out: `{""="0"}`,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range inputs {
|
||||
|
|
132
promql/promqltest/README.md
Normal file
132
promql/promqltest/README.md
Normal file
|
@ -0,0 +1,132 @@
|
|||
# The PromQL test scripting language
|
||||
|
||||
This package contains two things:
|
||||
|
||||
* an implementation of a test scripting language for PromQL engines
|
||||
* a predefined set of tests written in that scripting language
|
||||
|
||||
The predefined set of tests can be run against any PromQL engine implementation by calling `promqltest.RunBuiltinTests()`.
|
||||
Any other test script can be run with `promqltest.RunTest()`.
|
||||
|
||||
The rest of this document explains the test scripting language.
|
||||
|
||||
Each test script is written in plain text.
|
||||
|
||||
Comments can be given by prefixing the comment with a `#`, for example:
|
||||
|
||||
```
|
||||
# This is a comment.
|
||||
```
|
||||
|
||||
Each test file contains a series of commands. There are three kinds of commands:
|
||||
|
||||
* `load`
|
||||
* `clear`
|
||||
* `eval`
|
||||
|
||||
Each command is executed in the order given in the file.
|
||||
|
||||
## `load` command
|
||||
|
||||
`load` adds some data to the test environment.
|
||||
|
||||
The syntax is as follows:
|
||||
|
||||
```
|
||||
load <interval>
|
||||
<series> <points>
|
||||
...
|
||||
<series> <points>
|
||||
```
|
||||
|
||||
* `<interval>` is the step between points (eg. `1m` or `30s`)
|
||||
* `<series>` is a Prometheus series name in the usual `metric{label="value"}` syntax
|
||||
* `<points>` is a specification of the points to add for that series, following the same expanding syntax as for `promtool unittest` documented [here](../../docs/configuration/unit_testing_rules.md#series)
|
||||
|
||||
For example:
|
||||
|
||||
```
|
||||
load 1m
|
||||
my_metric{env="prod"} 5 2+3x2 _ stale {{schema:1 sum:3 count:22 buckets:[5 10 7]}}
|
||||
```
|
||||
|
||||
...will create a single series with labels `my_metric{env="prod"}`, with the following points:
|
||||
|
||||
* t=0: value is 5
|
||||
* t=1m: value is 2
|
||||
* t=2m: value is 5
|
||||
* t=3m: value is 7
|
||||
* t=4m: no point
|
||||
* t=5m: stale marker
|
||||
* t=6m: native histogram with schema 1, sum -3, count 22 and bucket counts 5, 10 and 7
|
||||
|
||||
Each `load` command is additive - it does not replace any data loaded in a previous `load` command.
|
||||
Use `clear` to remove all loaded data.
|
||||
|
||||
### Native histograms with custom buckets (NHCB)
|
||||
|
||||
When loading a batch of classic histogram float series, you can optionally append the suffix `_with_nhcb` to convert them to native histograms with custom buckets and load both the original float series and the new histogram series.
|
||||
|
||||
## `clear` command
|
||||
|
||||
`clear` removes all data previously loaded with `load` commands.
|
||||
|
||||
## `eval` command
|
||||
|
||||
`eval` runs a query against the test environment and asserts that the result is as expected.
|
||||
|
||||
Both instant and range queries are supported.
|
||||
|
||||
The syntax is as follows:
|
||||
|
||||
```
|
||||
# Instant query
|
||||
eval instant at <time> <query>
|
||||
<series> <points>
|
||||
...
|
||||
<series> <points>
|
||||
|
||||
# Range query
|
||||
eval range from <start> to <end> step <step> <query>
|
||||
<series> <points>
|
||||
...
|
||||
<series> <points>
|
||||
```
|
||||
|
||||
* `<time>` is the timestamp to evaluate the instant query at (eg. `1m`)
|
||||
* `<start>` and `<end>` specify the time range of the range query, and use the same syntax as `<time>`
|
||||
* `<step>` is the step of the range query, and uses the same syntax as `<time>` (eg. `30s`)
|
||||
* `<series>` and `<points>` specify the expected values, and follow the same syntax as for `load` above
|
||||
|
||||
For example:
|
||||
|
||||
```
|
||||
eval instant at 1m sum by (env) (my_metric)
|
||||
{env="prod"} 5
|
||||
{env="test"} 20
|
||||
|
||||
eval range from 0 to 3m step 1m sum by (env) (my_metric)
|
||||
{env="prod"} 2 5 10 20
|
||||
{env="test"} 10 20 30 45
|
||||
```
|
||||
|
||||
Instant queries also support asserting that the series are returned in exactly the order specified: use `eval_ordered instant ...` instead of `eval instant ...`.
|
||||
This is not supported for range queries.
|
||||
|
||||
It is also possible to test that queries fail: use `eval_fail instant ...` or `eval_fail range ...`.
|
||||
`eval_fail` optionally takes an expected error message string or regexp to assert that the error message is as expected.
|
||||
|
||||
For example:
|
||||
|
||||
```
|
||||
# Assert that the query fails for any reason without asserting on the error message.
|
||||
eval_fail instant at 1m ceil({__name__=~'testmetric1|testmetric2'})
|
||||
|
||||
# Assert that the query fails with exactly the provided error message string.
|
||||
eval_fail instant at 1m ceil({__name__=~'testmetric1|testmetric2'})
|
||||
expected_fail_message vector cannot contain metrics with the same labelset
|
||||
|
||||
# Assert that the query fails with an error message matching the regexp provided.
|
||||
eval_fail instant at 1m ceil({__name__=~'testmetric1|testmetric2'})
|
||||
expected_fail_regexp (vector cannot contain metrics .*|something else went wrong)
|
||||
```
|
|
@ -19,6 +19,8 @@ import (
|
|||
"errors"
|
||||
"fmt"
|
||||
"io/fs"
|
||||
"math"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"testing"
|
||||
|
@ -43,9 +45,9 @@ import (
|
|||
|
||||
var (
|
||||
patSpace = regexp.MustCompile("[\t ]+")
|
||||
patLoad = regexp.MustCompile(`^load\s+(.+?)$`)
|
||||
patEvalInstant = regexp.MustCompile(`^eval(?:_(fail|ordered))?\s+instant\s+(?:at\s+(.+?))?\s+(.+)$`)
|
||||
patEvalRange = regexp.MustCompile(`^eval(?:_(fail))?\s+range\s+from\s+(.+)\s+to\s+(.+)\s+step\s+(.+?)\s+(.+)$`)
|
||||
patLoad = regexp.MustCompile(`^load(?:_(with_nhcb))?\s+(.+?)$`)
|
||||
patEvalInstant = regexp.MustCompile(`^eval(?:_(fail|warn|ordered))?\s+instant\s+(?:at\s+(.+?))?\s+(.+)$`)
|
||||
patEvalRange = regexp.MustCompile(`^eval(?:_(fail|warn))?\s+range\s+from\s+(.+)\s+to\s+(.+)\s+step\s+(.+?)\s+(.+)$`)
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -177,15 +179,18 @@ func raise(line int, format string, v ...interface{}) error {
|
|||
|
||||
func parseLoad(lines []string, i int) (int, *loadCmd, error) {
|
||||
if !patLoad.MatchString(lines[i]) {
|
||||
return i, nil, raise(i, "invalid load command. (load <step:duration>)")
|
||||
return i, nil, raise(i, "invalid load command. (load[_with_nhcb] <step:duration>)")
|
||||
}
|
||||
parts := patLoad.FindStringSubmatch(lines[i])
|
||||
|
||||
gap, err := model.ParseDuration(parts[1])
|
||||
var (
|
||||
withNHCB = parts[1] == "with_nhcb"
|
||||
step = parts[2]
|
||||
)
|
||||
gap, err := model.ParseDuration(step)
|
||||
if err != nil {
|
||||
return i, nil, raise(i, "invalid step definition %q: %s", parts[1], err)
|
||||
return i, nil, raise(i, "invalid step definition %q: %s", step, err)
|
||||
}
|
||||
cmd := newLoadCmd(time.Duration(gap))
|
||||
cmd := newLoadCmd(time.Duration(gap), withNHCB)
|
||||
for i+1 < len(lines) {
|
||||
i++
|
||||
defLine := lines[i]
|
||||
|
@ -218,7 +223,7 @@ func (t *test) parseEval(lines []string, i int) (int, *evalCmd, error) {
|
|||
rangeParts := patEvalRange.FindStringSubmatch(lines[i])
|
||||
|
||||
if instantParts == nil && rangeParts == nil {
|
||||
return i, nil, raise(i, "invalid evaluation command. Must be either 'eval[_fail|_ordered] instant [at <offset:duration>] <query>' or 'eval[_fail] range from <from> to <to> step <step> <query>'")
|
||||
return i, nil, raise(i, "invalid evaluation command. Must be either 'eval[_fail|_warn|_ordered] instant [at <offset:duration>] <query>' or 'eval[_fail|_warn] range from <from> to <to> step <step> <query>'")
|
||||
}
|
||||
|
||||
isInstant := instantParts != nil
|
||||
|
@ -297,6 +302,8 @@ func (t *test) parseEval(lines []string, i int) (int, *evalCmd, error) {
|
|||
cmd.ordered = true
|
||||
case "fail":
|
||||
cmd.fail = true
|
||||
case "warn":
|
||||
cmd.warn = true
|
||||
}
|
||||
|
||||
for j := 1; i+1 < len(lines); j++ {
|
||||
|
@ -306,6 +313,21 @@ func (t *test) parseEval(lines []string, i int) (int, *evalCmd, error) {
|
|||
i--
|
||||
break
|
||||
}
|
||||
|
||||
if cmd.fail && strings.HasPrefix(defLine, "expected_fail_message") {
|
||||
cmd.expectedFailMessage = strings.TrimSpace(strings.TrimPrefix(defLine, "expected_fail_message"))
|
||||
break
|
||||
}
|
||||
|
||||
if cmd.fail && strings.HasPrefix(defLine, "expected_fail_regexp") {
|
||||
pattern := strings.TrimSpace(strings.TrimPrefix(defLine, "expected_fail_regexp"))
|
||||
cmd.expectedFailRegexp, err = regexp.Compile(pattern)
|
||||
if err != nil {
|
||||
return i, nil, formatErr("invalid regexp '%s' for expected_fail_regexp: %w", pattern, err)
|
||||
}
|
||||
break
|
||||
}
|
||||
|
||||
if f, err := parseNumber(defLine); err == nil {
|
||||
cmd.expect(0, parser.SequenceValue{Value: f})
|
||||
break
|
||||
|
@ -352,7 +374,7 @@ func (t *test) parse(input string) error {
|
|||
switch c := strings.ToLower(patSpace.Split(l, 2)[0]); {
|
||||
case c == "clear":
|
||||
cmd = &clearCmd{}
|
||||
case c == "load":
|
||||
case strings.HasPrefix(c, "load"):
|
||||
i, cmd, err = parseLoad(lines, i)
|
||||
case strings.HasPrefix(c, "eval"):
|
||||
i, cmd, err = t.parseEval(lines, i)
|
||||
|
@ -384,14 +406,16 @@ type loadCmd struct {
|
|||
metrics map[uint64]labels.Labels
|
||||
defs map[uint64][]promql.Sample
|
||||
exemplars map[uint64][]exemplar.Exemplar
|
||||
withNHCB bool
|
||||
}
|
||||
|
||||
func newLoadCmd(gap time.Duration) *loadCmd {
|
||||
func newLoadCmd(gap time.Duration, withNHCB bool) *loadCmd {
|
||||
return &loadCmd{
|
||||
gap: gap,
|
||||
metrics: map[uint64]labels.Labels{},
|
||||
defs: map[uint64][]promql.Sample{},
|
||||
exemplars: map[uint64][]exemplar.Exemplar{},
|
||||
withNHCB: withNHCB,
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -430,6 +454,167 @@ func (cmd *loadCmd) append(a storage.Appender) error {
|
|||
}
|
||||
}
|
||||
}
|
||||
if cmd.withNHCB {
|
||||
return cmd.appendCustomHistogram(a)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func getHistogramMetricBase(m labels.Labels, suffix string) (labels.Labels, uint64) {
|
||||
mName := m.Get(labels.MetricName)
|
||||
baseM := labels.NewBuilder(m).
|
||||
Set(labels.MetricName, strings.TrimSuffix(mName, suffix)).
|
||||
Del(labels.BucketLabel).
|
||||
Labels()
|
||||
hash := baseM.Hash()
|
||||
return baseM, hash
|
||||
}
|
||||
|
||||
type tempHistogramWrapper struct {
|
||||
metric labels.Labels
|
||||
upperBounds []float64
|
||||
histogramByTs map[int64]tempHistogram
|
||||
}
|
||||
|
||||
func newTempHistogramWrapper() tempHistogramWrapper {
|
||||
return tempHistogramWrapper{
|
||||
upperBounds: []float64{},
|
||||
histogramByTs: map[int64]tempHistogram{},
|
||||
}
|
||||
}
|
||||
|
||||
type tempHistogram struct {
|
||||
bucketCounts map[float64]float64
|
||||
count float64
|
||||
sum float64
|
||||
}
|
||||
|
||||
func newTempHistogram() tempHistogram {
|
||||
return tempHistogram{
|
||||
bucketCounts: map[float64]float64{},
|
||||
}
|
||||
}
|
||||
|
||||
func processClassicHistogramSeries(m labels.Labels, suffix string, histogramMap map[uint64]tempHistogramWrapper, smpls []promql.Sample, updateHistogramWrapper func(*tempHistogramWrapper), updateHistogram func(*tempHistogram, float64)) {
|
||||
m2, m2hash := getHistogramMetricBase(m, suffix)
|
||||
histogramWrapper, exists := histogramMap[m2hash]
|
||||
if !exists {
|
||||
histogramWrapper = newTempHistogramWrapper()
|
||||
}
|
||||
histogramWrapper.metric = m2
|
||||
if updateHistogramWrapper != nil {
|
||||
updateHistogramWrapper(&histogramWrapper)
|
||||
}
|
||||
for _, s := range smpls {
|
||||
if s.H != nil {
|
||||
continue
|
||||
}
|
||||
histogram, exists := histogramWrapper.histogramByTs[s.T]
|
||||
if !exists {
|
||||
histogram = newTempHistogram()
|
||||
}
|
||||
updateHistogram(&histogram, s.F)
|
||||
histogramWrapper.histogramByTs[s.T] = histogram
|
||||
}
|
||||
histogramMap[m2hash] = histogramWrapper
|
||||
}
|
||||
|
||||
func processUpperBoundsAndCreateBaseHistogram(upperBounds0 []float64) ([]float64, *histogram.FloatHistogram) {
|
||||
sort.Float64s(upperBounds0)
|
||||
upperBounds := make([]float64, 0, len(upperBounds0))
|
||||
prevLE := math.Inf(-1)
|
||||
for _, le := range upperBounds0 {
|
||||
if le != prevLE { // deduplicate
|
||||
upperBounds = append(upperBounds, le)
|
||||
prevLE = le
|
||||
}
|
||||
}
|
||||
var customBounds []float64
|
||||
if upperBounds[len(upperBounds)-1] == math.Inf(1) {
|
||||
customBounds = upperBounds[:len(upperBounds)-1]
|
||||
} else {
|
||||
customBounds = upperBounds
|
||||
}
|
||||
return upperBounds, &histogram.FloatHistogram{
|
||||
Count: 0,
|
||||
Sum: 0,
|
||||
Schema: histogram.CustomBucketsSchema,
|
||||
PositiveSpans: []histogram.Span{
|
||||
{Offset: 0, Length: uint32(len(upperBounds))},
|
||||
},
|
||||
PositiveBuckets: make([]float64, len(upperBounds)),
|
||||
CustomValues: customBounds,
|
||||
}
|
||||
}
|
||||
|
||||
// If classic histograms are defined, convert them into native histograms with custom
|
||||
// bounds and append the defined time series to the storage.
|
||||
func (cmd *loadCmd) appendCustomHistogram(a storage.Appender) error {
|
||||
histogramMap := map[uint64]tempHistogramWrapper{}
|
||||
|
||||
// Go through all the time series to collate classic histogram data
|
||||
// and organise them by timestamp.
|
||||
for hash, smpls := range cmd.defs {
|
||||
m := cmd.metrics[hash]
|
||||
mName := m.Get(labels.MetricName)
|
||||
switch {
|
||||
case strings.HasSuffix(mName, "_bucket") && m.Has(labels.BucketLabel):
|
||||
le, err := strconv.ParseFloat(m.Get(labels.BucketLabel), 64)
|
||||
if err != nil || math.IsNaN(le) {
|
||||
continue
|
||||
}
|
||||
processClassicHistogramSeries(m, "_bucket", histogramMap, smpls, func(histogramWrapper *tempHistogramWrapper) {
|
||||
histogramWrapper.upperBounds = append(histogramWrapper.upperBounds, le)
|
||||
}, func(histogram *tempHistogram, f float64) {
|
||||
histogram.bucketCounts[le] = f
|
||||
})
|
||||
case strings.HasSuffix(mName, "_count"):
|
||||
processClassicHistogramSeries(m, "_count", histogramMap, smpls, nil, func(histogram *tempHistogram, f float64) {
|
||||
histogram.count = f
|
||||
})
|
||||
case strings.HasSuffix(mName, "_sum"):
|
||||
processClassicHistogramSeries(m, "_sum", histogramMap, smpls, nil, func(histogram *tempHistogram, f float64) {
|
||||
histogram.sum = f
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// Convert the collated classic histogram data into native histograms
|
||||
// with custom bounds and append them to the storage.
|
||||
for _, histogramWrapper := range histogramMap {
|
||||
upperBounds, fhBase := processUpperBoundsAndCreateBaseHistogram(histogramWrapper.upperBounds)
|
||||
samples := make([]promql.Sample, 0, len(histogramWrapper.histogramByTs))
|
||||
for t, histogram := range histogramWrapper.histogramByTs {
|
||||
fh := fhBase.Copy()
|
||||
var prevCount, total float64
|
||||
for i, le := range upperBounds {
|
||||
currCount, exists := histogram.bucketCounts[le]
|
||||
if !exists {
|
||||
currCount = 0
|
||||
}
|
||||
count := currCount - prevCount
|
||||
fh.PositiveBuckets[i] = count
|
||||
total += count
|
||||
prevCount = currCount
|
||||
}
|
||||
fh.Sum = histogram.sum
|
||||
if histogram.count != 0 {
|
||||
total = histogram.count
|
||||
}
|
||||
fh.Count = total
|
||||
s := promql.Sample{T: t, H: fh.Compact(0)}
|
||||
if err := s.H.Validate(); err != nil {
|
||||
return err
|
||||
}
|
||||
samples = append(samples, s)
|
||||
}
|
||||
sort.Slice(samples, func(i, j int) bool { return samples[i].T < samples[j].T })
|
||||
for _, s := range samples {
|
||||
if err := appendSample(a, s, histogramWrapper.metric); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -455,8 +640,10 @@ type evalCmd struct {
|
|||
step time.Duration
|
||||
line int
|
||||
|
||||
isRange bool // if false, instant query
|
||||
fail, ordered bool
|
||||
isRange bool // if false, instant query
|
||||
fail, warn, ordered bool
|
||||
expectedFailMessage string
|
||||
expectedFailRegexp *regexp.Regexp
|
||||
|
||||
metrics map[uint64]labels.Labels
|
||||
expected map[uint64]entry
|
||||
|
@ -645,6 +832,24 @@ func (ev *evalCmd) compareResult(result parser.Value) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (ev *evalCmd) checkExpectedFailure(actual error) error {
|
||||
if ev.expectedFailMessage != "" {
|
||||
if ev.expectedFailMessage != actual.Error() {
|
||||
return fmt.Errorf("expected error %q evaluating query %q (line %d), but got: %s", ev.expectedFailMessage, ev.expr, ev.line, actual.Error())
|
||||
}
|
||||
}
|
||||
|
||||
if ev.expectedFailRegexp != nil {
|
||||
if !ev.expectedFailRegexp.MatchString(actual.Error()) {
|
||||
return fmt.Errorf("expected error matching pattern %q evaluating query %q (line %d), but got: %s", ev.expectedFailRegexp.String(), ev.expr, ev.line, actual.Error())
|
||||
}
|
||||
}
|
||||
|
||||
// We're not expecting a particular error, or we got the error we expected.
|
||||
// This test passes.
|
||||
return nil
|
||||
}
|
||||
|
||||
func formatSeriesResult(s promql.Series) string {
|
||||
floatPlural := "s"
|
||||
histogramPlural := "s"
|
||||
|
@ -793,9 +998,16 @@ func (t *test) execRangeEval(cmd *evalCmd, engine promql.QueryEngine) error {
|
|||
return fmt.Errorf("error creating range query for %q (line %d): %w", cmd.expr, cmd.line, err)
|
||||
}
|
||||
res := q.Exec(t.context)
|
||||
countWarnings, _ := res.Warnings.CountWarningsAndInfo()
|
||||
if !cmd.warn && countWarnings > 0 {
|
||||
return fmt.Errorf("unexpected warnings evaluating query %q (line %d): %v", cmd.expr, cmd.line, res.Warnings)
|
||||
}
|
||||
if cmd.warn && countWarnings == 0 {
|
||||
return fmt.Errorf("expected warnings evaluating query %q (line %d) but got none", cmd.expr, cmd.line)
|
||||
}
|
||||
if res.Err != nil {
|
||||
if cmd.fail {
|
||||
return nil
|
||||
return cmd.checkExpectedFailure(res.Err)
|
||||
}
|
||||
|
||||
return fmt.Errorf("error evaluating query %q (line %d): %w", cmd.expr, cmd.line, res.Err)
|
||||
|
@ -819,72 +1031,89 @@ func (t *test) execInstantEval(cmd *evalCmd, engine promql.QueryEngine) error {
|
|||
}
|
||||
queries = append([]atModifierTestCase{{expr: cmd.expr, evalTime: cmd.start}}, queries...)
|
||||
for _, iq := range queries {
|
||||
q, err := engine.NewInstantQuery(t.context, t.storage, nil, iq.expr, iq.evalTime)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error creating instant query for %q (line %d): %w", cmd.expr, cmd.line, err)
|
||||
}
|
||||
defer q.Close()
|
||||
res := q.Exec(t.context)
|
||||
if res.Err != nil {
|
||||
if cmd.fail {
|
||||
continue
|
||||
}
|
||||
return fmt.Errorf("error evaluating query %q (line %d): %w", iq.expr, cmd.line, res.Err)
|
||||
}
|
||||
if res.Err == nil && cmd.fail {
|
||||
return fmt.Errorf("expected error evaluating query %q (line %d) but got none", iq.expr, cmd.line)
|
||||
}
|
||||
err = cmd.compareResult(res.Value)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error in %s %s (line %d): %w", cmd, iq.expr, cmd.line, err)
|
||||
}
|
||||
|
||||
// Check query returns same result in range mode,
|
||||
// by checking against the middle step.
|
||||
q, err = engine.NewRangeQuery(t.context, t.storage, nil, iq.expr, iq.evalTime.Add(-time.Minute), iq.evalTime.Add(time.Minute), time.Minute)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error creating range query for %q (line %d): %w", cmd.expr, cmd.line, err)
|
||||
}
|
||||
rangeRes := q.Exec(t.context)
|
||||
if rangeRes.Err != nil {
|
||||
return fmt.Errorf("error evaluating query %q (line %d) in range mode: %w", iq.expr, cmd.line, rangeRes.Err)
|
||||
}
|
||||
defer q.Close()
|
||||
if cmd.ordered {
|
||||
// Range queries are always sorted by labels, so skip this test case that expects results in a particular order.
|
||||
continue
|
||||
}
|
||||
mat := rangeRes.Value.(promql.Matrix)
|
||||
if err := assertMatrixSorted(mat); err != nil {
|
||||
if err := t.runInstantQuery(iq, cmd, engine); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
vec := make(promql.Vector, 0, len(mat))
|
||||
for _, series := range mat {
|
||||
// We expect either Floats or Histograms.
|
||||
for _, point := range series.Floats {
|
||||
if point.T == timeMilliseconds(iq.evalTime) {
|
||||
vec = append(vec, promql.Sample{Metric: series.Metric, T: point.T, F: point.F})
|
||||
break
|
||||
}
|
||||
}
|
||||
for _, point := range series.Histograms {
|
||||
if point.T == timeMilliseconds(iq.evalTime) {
|
||||
vec = append(vec, promql.Sample{Metric: series.Metric, T: point.T, H: point.H})
|
||||
break
|
||||
}
|
||||
func (t *test) runInstantQuery(iq atModifierTestCase, cmd *evalCmd, engine promql.QueryEngine) error {
|
||||
q, err := engine.NewInstantQuery(t.context, t.storage, nil, iq.expr, iq.evalTime)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error creating instant query for %q (line %d): %w", cmd.expr, cmd.line, err)
|
||||
}
|
||||
defer q.Close()
|
||||
res := q.Exec(t.context)
|
||||
countWarnings, _ := res.Warnings.CountWarningsAndInfo()
|
||||
if !cmd.warn && countWarnings > 0 {
|
||||
return fmt.Errorf("unexpected warnings evaluating query %q (line %d): %v", iq.expr, cmd.line, res.Warnings)
|
||||
}
|
||||
if cmd.warn && countWarnings == 0 {
|
||||
return fmt.Errorf("expected warnings evaluating query %q (line %d) but got none", iq.expr, cmd.line)
|
||||
}
|
||||
if res.Err != nil {
|
||||
if cmd.fail {
|
||||
if err := cmd.checkExpectedFailure(res.Err); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
if _, ok := res.Value.(promql.Scalar); ok {
|
||||
err = cmd.compareResult(promql.Scalar{V: vec[0].F})
|
||||
} else {
|
||||
err = cmd.compareResult(vec)
|
||||
}
|
||||
if err != nil {
|
||||
return fmt.Errorf("error in %s %s (line %d) range mode: %w", cmd, iq.expr, cmd.line, err)
|
||||
}
|
||||
return fmt.Errorf("error evaluating query %q (line %d): %w", iq.expr, cmd.line, res.Err)
|
||||
}
|
||||
if res.Err == nil && cmd.fail {
|
||||
return fmt.Errorf("expected error evaluating query %q (line %d) but got none", iq.expr, cmd.line)
|
||||
}
|
||||
err = cmd.compareResult(res.Value)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error in %s %s (line %d): %w", cmd, iq.expr, cmd.line, err)
|
||||
}
|
||||
|
||||
// Check query returns same result in range mode,
|
||||
// by checking against the middle step.
|
||||
q, err = engine.NewRangeQuery(t.context, t.storage, nil, iq.expr, iq.evalTime.Add(-time.Minute), iq.evalTime.Add(time.Minute), time.Minute)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error creating range query for %q (line %d): %w", cmd.expr, cmd.line, err)
|
||||
}
|
||||
rangeRes := q.Exec(t.context)
|
||||
if rangeRes.Err != nil {
|
||||
return fmt.Errorf("error evaluating query %q (line %d) in range mode: %w", iq.expr, cmd.line, rangeRes.Err)
|
||||
}
|
||||
defer q.Close()
|
||||
if cmd.ordered {
|
||||
// Range queries are always sorted by labels, so skip this test case that expects results in a particular order.
|
||||
return nil
|
||||
}
|
||||
mat := rangeRes.Value.(promql.Matrix)
|
||||
if err := assertMatrixSorted(mat); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
vec := make(promql.Vector, 0, len(mat))
|
||||
for _, series := range mat {
|
||||
// We expect either Floats or Histograms.
|
||||
for _, point := range series.Floats {
|
||||
if point.T == timeMilliseconds(iq.evalTime) {
|
||||
vec = append(vec, promql.Sample{Metric: series.Metric, T: point.T, F: point.F})
|
||||
break
|
||||
}
|
||||
}
|
||||
for _, point := range series.Histograms {
|
||||
if point.T == timeMilliseconds(iq.evalTime) {
|
||||
vec = append(vec, promql.Sample{Metric: series.Metric, T: point.T, H: point.H})
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
if _, ok := res.Value.(promql.Scalar); ok {
|
||||
err = cmd.compareResult(promql.Scalar{V: vec[0].F})
|
||||
} else {
|
||||
err = cmd.compareResult(vec)
|
||||
}
|
||||
if err != nil {
|
||||
return fmt.Errorf("error in %s %s (line %d) range mode: %w", cmd, iq.expr, cmd.line, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -975,7 +1204,7 @@ func (ll *LazyLoader) parse(input string) error {
|
|||
if len(l) == 0 {
|
||||
continue
|
||||
}
|
||||
if strings.ToLower(patSpace.Split(l, 2)[0]) == "load" {
|
||||
if strings.HasPrefix(strings.ToLower(patSpace.Split(l, 2)[0]), "load") {
|
||||
_, cmd, err := parseLoad(lines, i)
|
||||
if err != nil {
|
||||
return err
|
||||
|
|
|
@ -263,6 +263,60 @@ eval_fail instant at 0m ceil({__name__=~'testmetric1|testmetric2'})
|
|||
input: `eval_fail instant at 0s vector(0)`,
|
||||
expectedError: `expected error evaluating query "vector(0)" (line 1) but got none`,
|
||||
},
|
||||
"instant query expected to fail with specific error message, and query fails with that error": {
|
||||
input: `
|
||||
load 5m
|
||||
testmetric1{src="a",dst="b"} 0
|
||||
testmetric2{src="a",dst="b"} 1
|
||||
|
||||
eval_fail instant at 0m ceil({__name__=~'testmetric1|testmetric2'})
|
||||
expected_fail_message vector cannot contain metrics with the same labelset
|
||||
`,
|
||||
},
|
||||
"instant query expected to fail with specific error message, and query fails with a different error": {
|
||||
input: `
|
||||
load 5m
|
||||
testmetric1{src="a",dst="b"} 0
|
||||
testmetric2{src="a",dst="b"} 1
|
||||
|
||||
eval_fail instant at 0m ceil({__name__=~'testmetric1|testmetric2'})
|
||||
expected_fail_message something else went wrong
|
||||
`,
|
||||
expectedError: `expected error "something else went wrong" evaluating query "ceil({__name__=~'testmetric1|testmetric2'})" (line 6), but got: vector cannot contain metrics with the same labelset`,
|
||||
},
|
||||
|
||||
"instant query expected to fail with error matching pattern, and query fails with that error": {
|
||||
input: `
|
||||
load 5m
|
||||
testmetric1{src="a",dst="b"} 0
|
||||
testmetric2{src="a",dst="b"} 1
|
||||
|
||||
eval_fail instant at 0m ceil({__name__=~'testmetric1|testmetric2'})
|
||||
expected_fail_regexp vector .* contain metrics
|
||||
`,
|
||||
},
|
||||
"instant query expected to fail with error matching pattern, and query fails with a different error": {
|
||||
input: `
|
||||
load 5m
|
||||
testmetric1{src="a",dst="b"} 0
|
||||
testmetric2{src="a",dst="b"} 1
|
||||
|
||||
eval_fail instant at 0m ceil({__name__=~'testmetric1|testmetric2'})
|
||||
expected_fail_regexp something else went wrong
|
||||
`,
|
||||
expectedError: `expected error matching pattern "something else went wrong" evaluating query "ceil({__name__=~'testmetric1|testmetric2'})" (line 6), but got: vector cannot contain metrics with the same labelset`,
|
||||
},
|
||||
"instant query expected to fail with error matching pattern, and pattern is not a valid regexp": {
|
||||
input: `
|
||||
load 5m
|
||||
testmetric1{src="a",dst="b"} 0
|
||||
testmetric2{src="a",dst="b"} 1
|
||||
|
||||
eval_fail instant at 0m ceil({__name__=~'testmetric1|testmetric2'})
|
||||
expected_fail_regexp [
|
||||
`,
|
||||
expectedError: `error in eval ceil({__name__=~'testmetric1|testmetric2'}) (line 7): invalid regexp '[' for expected_fail_regexp: error parsing regexp: missing closing ]: ` + "`[`",
|
||||
},
|
||||
"instant query with results expected to match provided order, and result is in expected order": {
|
||||
input: testData + `
|
||||
eval_ordered instant at 50m sort(http_requests)
|
||||
|
@ -384,6 +438,59 @@ eval_fail range from 0 to 10m step 5m ceil({__name__=~'testmetric1|testmetric2'}
|
|||
input: `eval_fail range from 0 to 10m step 5m vector(0)`,
|
||||
expectedError: `expected error evaluating query "vector(0)" (line 1) but got none`,
|
||||
},
|
||||
"range query expected to fail with specific error message, and query fails with that error": {
|
||||
input: `
|
||||
load 5m
|
||||
testmetric1{src="a",dst="b"} 0
|
||||
testmetric2{src="a",dst="b"} 1
|
||||
|
||||
eval_fail range from 0 to 10m step 5m ceil({__name__=~'testmetric1|testmetric2'})
|
||||
expected_fail_message vector cannot contain metrics with the same labelset
|
||||
`,
|
||||
},
|
||||
"range query expected to fail with specific error message, and query fails with a different error": {
|
||||
input: `
|
||||
load 5m
|
||||
testmetric1{src="a",dst="b"} 0
|
||||
testmetric2{src="a",dst="b"} 1
|
||||
|
||||
eval_fail range from 0 to 10m step 5m ceil({__name__=~'testmetric1|testmetric2'})
|
||||
expected_fail_message something else went wrong
|
||||
`,
|
||||
expectedError: `expected error "something else went wrong" evaluating query "ceil({__name__=~'testmetric1|testmetric2'})" (line 6), but got: vector cannot contain metrics with the same labelset`,
|
||||
},
|
||||
"range query expected to fail with error matching pattern, and query fails with that error": {
|
||||
input: `
|
||||
load 5m
|
||||
testmetric1{src="a",dst="b"} 0
|
||||
testmetric2{src="a",dst="b"} 1
|
||||
|
||||
eval_fail range from 0 to 10m step 5m ceil({__name__=~'testmetric1|testmetric2'})
|
||||
expected_fail_regexp vector .* contain metrics
|
||||
`,
|
||||
},
|
||||
"range query expected to fail with error matching pattern, and query fails with a different error": {
|
||||
input: `
|
||||
load 5m
|
||||
testmetric1{src="a",dst="b"} 0
|
||||
testmetric2{src="a",dst="b"} 1
|
||||
|
||||
eval_fail range from 0 to 10m step 5m ceil({__name__=~'testmetric1|testmetric2'})
|
||||
expected_fail_regexp something else went wrong
|
||||
`,
|
||||
expectedError: `expected error matching pattern "something else went wrong" evaluating query "ceil({__name__=~'testmetric1|testmetric2'})" (line 6), but got: vector cannot contain metrics with the same labelset`,
|
||||
},
|
||||
"range query expected to fail with error matching pattern, and pattern is not a valid regexp": {
|
||||
input: `
|
||||
load 5m
|
||||
testmetric1{src="a",dst="b"} 0
|
||||
testmetric2{src="a",dst="b"} 1
|
||||
|
||||
eval_fail range from 0 to 10m step 5m ceil({__name__=~'testmetric1|testmetric2'})
|
||||
expected_fail_regexp [
|
||||
`,
|
||||
expectedError: `error in eval ceil({__name__=~'testmetric1|testmetric2'}) (line 7): invalid regexp '[' for expected_fail_regexp: error parsing regexp: missing closing ]: ` + "`[`",
|
||||
},
|
||||
"range query with from and to timestamps in wrong order": {
|
||||
input: `eval range from 10m to 9m step 5m vector(0)`,
|
||||
expectedError: `error in eval vector(0) (line 1): invalid test definition, end timestamp (9m) is before start timestamp (10m)`,
|
||||
|
|
14
promql/promqltest/testdata/aggregators.test
vendored
14
promql/promqltest/testdata/aggregators.test
vendored
|
@ -399,7 +399,7 @@ eval instant at 1m quantile without(point)((scalar(foo)), data)
|
|||
{test="three samples"} 1.6
|
||||
{test="uneven samples"} 2.8
|
||||
|
||||
eval instant at 1m quantile without(point)(NaN, data)
|
||||
eval_warn instant at 1m quantile without(point)(NaN, data)
|
||||
{test="two samples"} NaN
|
||||
{test="three samples"} NaN
|
||||
{test="uneven samples"} NaN
|
||||
|
@ -503,6 +503,18 @@ eval instant at 1m avg(data{test="-big"})
|
|||
eval instant at 1m avg(data{test="bigzero"})
|
||||
{} 0
|
||||
|
||||
# Test summing extreme values.
|
||||
clear
|
||||
|
||||
load 10s
|
||||
data{test="ten",point="a"} 2
|
||||
data{test="ten",point="b"} 8
|
||||
data{test="ten",point="c"} 1e+100
|
||||
data{test="ten",point="d"} -1e100
|
||||
|
||||
eval instant at 1m sum(data{test="ten"})
|
||||
{} 10
|
||||
|
||||
clear
|
||||
|
||||
# Test that aggregations are deterministic.
|
||||
|
|
14
promql/promqltest/testdata/functions.test
vendored
14
promql/promqltest/testdata/functions.test
vendored
|
@ -838,17 +838,17 @@ eval instant at 1m quantile_over_time(1, data[1m])
|
|||
{test="three samples"} 2
|
||||
{test="uneven samples"} 4
|
||||
|
||||
eval instant at 1m quantile_over_time(-1, data[1m])
|
||||
eval_warn instant at 1m quantile_over_time(-1, data[1m])
|
||||
{test="two samples"} -Inf
|
||||
{test="three samples"} -Inf
|
||||
{test="uneven samples"} -Inf
|
||||
|
||||
eval instant at 1m quantile_over_time(2, data[1m])
|
||||
eval_warn instant at 1m quantile_over_time(2, data[1m])
|
||||
{test="two samples"} +Inf
|
||||
{test="three samples"} +Inf
|
||||
{test="uneven samples"} +Inf
|
||||
|
||||
eval instant at 1m (quantile_over_time(2, (data[1m])))
|
||||
eval_warn instant at 1m (quantile_over_time(2, (data[1m])))
|
||||
{test="two samples"} +Inf
|
||||
{test="three samples"} +Inf
|
||||
{test="uneven samples"} +Inf
|
||||
|
@ -1213,3 +1213,11 @@ eval instant at 5m log10(exp_root_log - 20)
|
|||
{l="y"} -Inf
|
||||
|
||||
clear
|
||||
|
||||
# Test that timestamp() handles the scenario where there are more steps than samples.
|
||||
load 1m
|
||||
metric 0+1x1000
|
||||
|
||||
# We expect the value to be 0 for t=0s to t=59s (inclusive), then 60 for t=60s and t=61s.
|
||||
eval range from 0 to 61s step 1s timestamp(metric)
|
||||
{} 0x59 60 60
|
||||
|
|
110
promql/promqltest/testdata/histograms.test
vendored
110
promql/promqltest/testdata/histograms.test
vendored
|
@ -5,7 +5,7 @@
|
|||
# server has to cope with it.
|
||||
|
||||
# Test histogram.
|
||||
load 5m
|
||||
load_with_nhcb 5m
|
||||
testhistogram_bucket{le="0.1", start="positive"} 0+5x10
|
||||
testhistogram_bucket{le=".2", start="positive"} 0+7x10
|
||||
testhistogram_bucket{le="1e0", start="positive"} 0+11x10
|
||||
|
@ -18,15 +18,33 @@ load 5m
|
|||
# Another test histogram, where q(1/6), q(1/2), and q(5/6) are each in
|
||||
# the middle of a bucket and should therefore be 1, 3, and 5,
|
||||
# respectively.
|
||||
load 5m
|
||||
load_with_nhcb 5m
|
||||
testhistogram2_bucket{le="0"} 0+0x10
|
||||
testhistogram2_bucket{le="2"} 0+1x10
|
||||
testhistogram2_bucket{le="4"} 0+2x10
|
||||
testhistogram2_bucket{le="6"} 0+3x10
|
||||
testhistogram2_bucket{le="+Inf"} 0+3x10
|
||||
|
||||
# Another test histogram, this time without any observations in the +Inf bucket.
|
||||
# This enables a meaningful calculation of standard deviation and variance.
|
||||
load_with_nhcb 5m
|
||||
testhistogram3_bucket{le="0", start="positive"} 0+0x10
|
||||
testhistogram3_bucket{le="0.1", start="positive"} 0+5x10
|
||||
testhistogram3_bucket{le=".2", start="positive"} 0+7x10
|
||||
testhistogram3_bucket{le="1e0", start="positive"} 0+11x10
|
||||
testhistogram3_bucket{le="+Inf", start="positive"} 0+11x10
|
||||
testhistogram3_sum{start="positive"} 0+33x10
|
||||
testhistogram3_count{start="positive"} 0+11x10
|
||||
testhistogram3_bucket{le="-.25", start="negative"} 0+0x10
|
||||
testhistogram3_bucket{le="-.2", start="negative"} 0+1x10
|
||||
testhistogram3_bucket{le="-0.1", start="negative"} 0+2x10
|
||||
testhistogram3_bucket{le="0.3", start="negative"} 0+2x10
|
||||
testhistogram3_bucket{le="+Inf", start="negative"} 0+2x10
|
||||
testhistogram3_sum{start="negative"} 0+8x10
|
||||
testhistogram3_count{start="negative"} 0+2x10
|
||||
|
||||
# Now a more realistic histogram per job and instance to test aggregation.
|
||||
load 5m
|
||||
load_with_nhcb 5m
|
||||
request_duration_seconds_bucket{job="job1", instance="ins1", le="0.1"} 0+1x10
|
||||
request_duration_seconds_bucket{job="job1", instance="ins1", le="0.2"} 0+3x10
|
||||
request_duration_seconds_bucket{job="job1", instance="ins1", le="+Inf"} 0+4x10
|
||||
|
@ -41,7 +59,7 @@ load 5m
|
|||
request_duration_seconds_bucket{job="job2", instance="ins2", le="+Inf"} 0+9x10
|
||||
|
||||
# Different le representations in one histogram.
|
||||
load 5m
|
||||
load_with_nhcb 5m
|
||||
mixed_bucket{job="job1", instance="ins1", le="0.1"} 0+1x10
|
||||
mixed_bucket{job="job1", instance="ins1", le="0.2"} 0+1x10
|
||||
mixed_bucket{job="job1", instance="ins1", le="2e-1"} 0+1x10
|
||||
|
@ -50,27 +68,81 @@ load 5m
|
|||
mixed_bucket{job="job1", instance="ins2", le="+inf"} 0+0x10
|
||||
mixed_bucket{job="job1", instance="ins2", le="+Inf"} 0+0x10
|
||||
|
||||
# Test histogram_count.
|
||||
eval instant at 50m histogram_count(testhistogram3)
|
||||
{start="positive"} 110
|
||||
{start="negative"} 20
|
||||
|
||||
# Test histogram_sum.
|
||||
eval instant at 50m histogram_sum(testhistogram3)
|
||||
{start="positive"} 330
|
||||
{start="negative"} 80
|
||||
|
||||
# Test histogram_avg.
|
||||
eval instant at 50m histogram_avg(testhistogram3)
|
||||
{start="positive"} 3
|
||||
{start="negative"} 4
|
||||
|
||||
# Test histogram_stddev.
|
||||
eval instant at 50m histogram_stddev(testhistogram3)
|
||||
{start="positive"} 2.8189265757336734
|
||||
{start="negative"} 4.182715937754936
|
||||
|
||||
# Test histogram_stdvar.
|
||||
eval instant at 50m histogram_stdvar(testhistogram3)
|
||||
{start="positive"} 7.946347039377573
|
||||
{start="negative"} 17.495112615949154
|
||||
|
||||
# Test histogram_fraction.
|
||||
|
||||
eval instant at 50m histogram_fraction(0, 0.2, testhistogram3)
|
||||
{start="positive"} 0.6363636363636364
|
||||
{start="negative"} 0
|
||||
|
||||
eval instant at 50m histogram_fraction(0, 0.2, rate(testhistogram3[5m]))
|
||||
{start="positive"} 0.6363636363636364
|
||||
{start="negative"} 0
|
||||
|
||||
# Test histogram_quantile.
|
||||
|
||||
eval instant at 50m histogram_quantile(0, testhistogram3_bucket)
|
||||
{start="positive"} 0
|
||||
{start="negative"} -0.25
|
||||
|
||||
eval instant at 50m histogram_quantile(0.25, testhistogram3_bucket)
|
||||
{start="positive"} 0.055
|
||||
{start="negative"} -0.225
|
||||
|
||||
eval instant at 50m histogram_quantile(0.5, testhistogram3_bucket)
|
||||
{start="positive"} 0.125
|
||||
{start="negative"} -0.2
|
||||
|
||||
eval instant at 50m histogram_quantile(0.75, testhistogram3_bucket)
|
||||
{start="positive"} 0.45
|
||||
{start="negative"} -0.15
|
||||
|
||||
eval instant at 50m histogram_quantile(1, testhistogram3_bucket)
|
||||
{start="positive"} 1
|
||||
{start="negative"} -0.1
|
||||
|
||||
# Quantile too low.
|
||||
eval instant at 50m histogram_quantile(-0.1, testhistogram_bucket)
|
||||
eval_warn instant at 50m histogram_quantile(-0.1, testhistogram_bucket)
|
||||
{start="positive"} -Inf
|
||||
{start="negative"} -Inf
|
||||
|
||||
# Quantile too high.
|
||||
eval instant at 50m histogram_quantile(1.01, testhistogram_bucket)
|
||||
eval_warn instant at 50m histogram_quantile(1.01, testhistogram_bucket)
|
||||
{start="positive"} +Inf
|
||||
{start="negative"} +Inf
|
||||
|
||||
# Quantile invalid.
|
||||
eval instant at 50m histogram_quantile(NaN, testhistogram_bucket)
|
||||
eval_warn instant at 50m histogram_quantile(NaN, testhistogram_bucket)
|
||||
{start="positive"} NaN
|
||||
{start="negative"} NaN
|
||||
|
||||
# Quantile value in lowest bucket, which is positive.
|
||||
eval instant at 50m histogram_quantile(0, testhistogram_bucket{start="positive"})
|
||||
# Quantile value in lowest bucket.
|
||||
eval instant at 50m histogram_quantile(0, testhistogram_bucket)
|
||||
{start="positive"} 0
|
||||
|
||||
# Quantile value in lowest bucket, which is negative.
|
||||
eval instant at 50m histogram_quantile(0, testhistogram_bucket{start="negative"})
|
||||
{start="negative"} -0.2
|
||||
|
||||
# Quantile value in highest bucket.
|
||||
|
@ -83,7 +155,6 @@ eval instant at 50m histogram_quantile(0.2, testhistogram_bucket)
|
|||
{start="positive"} 0.048
|
||||
{start="negative"} -0.2
|
||||
|
||||
|
||||
eval instant at 50m histogram_quantile(0.5, testhistogram_bucket)
|
||||
{start="positive"} 0.15
|
||||
{start="negative"} -0.15
|
||||
|
@ -182,6 +253,9 @@ eval instant at 50m histogram_quantile(0.5, rate(request_duration_seconds_bucket
|
|||
{instance="ins1", job="job2"} 0.1
|
||||
{instance="ins2", job="job2"} 0.11666666666666667
|
||||
|
||||
eval instant at 50m sum(request_duration_seconds)
|
||||
{} {{schema:-53 count:250 custom_values:[0.1 0.2] buckets:[100 90 60]}}
|
||||
|
||||
# A histogram with nonmonotonic bucket counts. This may happen when recording
|
||||
# rule evaluation or federation races scrape ingestion, causing some buckets
|
||||
# counts to be derived from fewer samples.
|
||||
|
@ -209,6 +283,10 @@ eval instant at 50m histogram_quantile(0.5, rate(mixed_bucket[5m]))
|
|||
{instance="ins1", job="job1"} 0.15
|
||||
{instance="ins2", job="job1"} NaN
|
||||
|
||||
eval instant at 50m histogram_quantile(0.5, rate(mixed[5m]))
|
||||
{instance="ins1", job="job1"} 0.2
|
||||
{instance="ins2", job="job1"} NaN
|
||||
|
||||
eval instant at 50m histogram_quantile(0.75, rate(mixed_bucket[5m]))
|
||||
{instance="ins1", job="job1"} 0.2
|
||||
{instance="ins2", job="job1"} NaN
|
||||
|
@ -217,7 +295,7 @@ eval instant at 50m histogram_quantile(1, rate(mixed_bucket[5m]))
|
|||
{instance="ins1", job="job1"} 0.2
|
||||
{instance="ins2", job="job1"} NaN
|
||||
|
||||
load 5m
|
||||
load_with_nhcb 5m
|
||||
empty_bucket{le="0.1", job="job1", instance="ins1"} 0x10
|
||||
empty_bucket{le="0.2", job="job1", instance="ins1"} 0x10
|
||||
empty_bucket{le="+Inf", job="job1", instance="ins1"} 0x10
|
||||
|
@ -227,9 +305,9 @@ eval instant at 50m histogram_quantile(0.2, rate(empty_bucket[5m]))
|
|||
|
||||
# Load a duplicate histogram with a different name to test failure scenario on multiple histograms with the same label set
|
||||
# https://github.com/prometheus/prometheus/issues/9910
|
||||
load 5m
|
||||
load_with_nhcb 5m
|
||||
request_duration_seconds2_bucket{job="job1", instance="ins1", le="0.1"} 0+1x10
|
||||
request_duration_seconds2_bucket{job="job1", instance="ins1", le="0.2"} 0+3x10
|
||||
request_duration_seconds2_bucket{job="job1", instance="ins1", le="+Inf"} 0+4x10
|
||||
|
||||
eval_fail instant at 50m histogram_quantile(0.99, {__name__=~"request_duration.*"})
|
||||
eval_fail instant at 50m histogram_quantile(0.99, {__name__=~"request_duration_seconds\\d*_bucket$"})
|
||||
|
|
|
@ -364,7 +364,7 @@ eval instant at 10m histogram_stdvar(histogram_stddev_stdvar_7)
|
|||
load 10m
|
||||
histogram_quantile_1 {{schema:0 count:12 sum:100 z_bucket:2 z_bucket_w:0.001 buckets:[2 3 0 1 4]}}x1
|
||||
|
||||
eval instant at 10m histogram_quantile(1.001, histogram_quantile_1)
|
||||
eval_warn instant at 10m histogram_quantile(1.001, histogram_quantile_1)
|
||||
{} Inf
|
||||
|
||||
eval instant at 10m histogram_quantile(1, histogram_quantile_1)
|
||||
|
@ -388,14 +388,14 @@ eval instant at 10m histogram_quantile(0.1, histogram_quantile_1)
|
|||
eval instant at 10m histogram_quantile(0, histogram_quantile_1)
|
||||
{} 0
|
||||
|
||||
eval instant at 10m histogram_quantile(-1, histogram_quantile_1)
|
||||
eval_warn instant at 10m histogram_quantile(-1, histogram_quantile_1)
|
||||
{} -Inf
|
||||
|
||||
# Apply quantile function to histogram with all negative buckets with zero bucket.
|
||||
load 10m
|
||||
histogram_quantile_2 {{schema:0 count:12 sum:100 z_bucket:2 z_bucket_w:0.001 n_buckets:[2 3 0 1 4]}}x1
|
||||
|
||||
eval instant at 10m histogram_quantile(1.001, histogram_quantile_2)
|
||||
eval_warn instant at 10m histogram_quantile(1.001, histogram_quantile_2)
|
||||
{} Inf
|
||||
|
||||
eval instant at 10m histogram_quantile(1, histogram_quantile_2)
|
||||
|
@ -416,14 +416,14 @@ eval instant at 10m histogram_quantile(0.1, histogram_quantile_2)
|
|||
eval instant at 10m histogram_quantile(0, histogram_quantile_2)
|
||||
{} -16
|
||||
|
||||
eval instant at 10m histogram_quantile(-1, histogram_quantile_2)
|
||||
eval_warn instant at 10m histogram_quantile(-1, histogram_quantile_2)
|
||||
{} -Inf
|
||||
|
||||
# Apply quantile function to histogram with both positive and negative buckets with zero bucket.
|
||||
load 10m
|
||||
histogram_quantile_3 {{schema:0 count:24 sum:100 z_bucket:4 z_bucket_w:0.001 buckets:[2 3 0 1 4] n_buckets:[2 3 0 1 4]}}x1
|
||||
|
||||
eval instant at 10m histogram_quantile(1.001, histogram_quantile_3)
|
||||
eval_warn instant at 10m histogram_quantile(1.001, histogram_quantile_3)
|
||||
{} Inf
|
||||
|
||||
eval instant at 10m histogram_quantile(1, histogram_quantile_3)
|
||||
|
@ -459,7 +459,7 @@ eval instant at 10m histogram_quantile(0.01, histogram_quantile_3)
|
|||
eval instant at 10m histogram_quantile(0, histogram_quantile_3)
|
||||
{} -16
|
||||
|
||||
eval instant at 10m histogram_quantile(-1, histogram_quantile_3)
|
||||
eval_warn instant at 10m histogram_quantile(-1, histogram_quantile_3)
|
||||
{} -Inf
|
||||
|
||||
# Apply fraction function to empty histogram.
|
||||
|
@ -714,3 +714,34 @@ eval instant at 10m histogram_fraction(NaN, NaN, histogram_fraction_4)
|
|||
|
||||
eval instant at 10m histogram_fraction(-Inf, +Inf, histogram_fraction_4)
|
||||
{} 1
|
||||
|
||||
clear
|
||||
|
||||
# Counter reset only noticeable in a single bucket.
|
||||
load 5m
|
||||
reset_in_bucket {{schema:0 count:4 sum:5 buckets:[1 2 1]}} {{schema:0 count:5 sum:6 buckets:[1 1 3]}} {{schema:0 count:6 sum:7 buckets:[1 2 3]}}
|
||||
|
||||
eval instant at 10m increase(reset_in_bucket[15m])
|
||||
{} {{count:9 sum:10.5 buckets:[1.5 3 4.5]}}
|
||||
|
||||
# The following two test the "fast path" where only sum and count is decoded.
|
||||
eval instant at 10m histogram_count(increase(reset_in_bucket[15m]))
|
||||
{} 9
|
||||
|
||||
eval instant at 10m histogram_sum(increase(reset_in_bucket[15m]))
|
||||
{} 10.5
|
||||
|
||||
clear
|
||||
|
||||
# Test native histograms with custom buckets.
|
||||
load 5m
|
||||
custom_buckets_histogram {{schema:-53 sum:5 count:4 custom_values:[5 10] buckets:[1 2 1]}}x10
|
||||
|
||||
eval instant at 5m histogram_fraction(5, 10, custom_buckets_histogram)
|
||||
{} 0.5
|
||||
|
||||
eval instant at 5m histogram_quantile(0.5, custom_buckets_histogram)
|
||||
{} 7.5
|
||||
|
||||
eval instant at 5m sum(custom_buckets_histogram)
|
||||
{} {{schema:-53 sum:5 count:4 custom_values:[5 10] buckets:[1 2 1]}}
|
||||
|
|
73
promql/promqltest/testdata/range_queries.test
vendored
Normal file
73
promql/promqltest/testdata/range_queries.test
vendored
Normal file
|
@ -0,0 +1,73 @@
|
|||
# sum_over_time with all values
|
||||
load 30s
|
||||
bar 0 1 10 100 1000
|
||||
|
||||
eval range from 0 to 2m step 1m sum_over_time(bar[30s])
|
||||
{} 0 11 1100
|
||||
|
||||
clear
|
||||
|
||||
# sum_over_time with trailing values
|
||||
load 30s
|
||||
bar 0 1 10 100 1000 0 0 0 0
|
||||
|
||||
eval range from 0 to 2m step 1m sum_over_time(bar[30s])
|
||||
{} 0 11 1100
|
||||
|
||||
clear
|
||||
|
||||
# sum_over_time with all values long
|
||||
load 30s
|
||||
bar 0 1 10 100 1000 10000 100000 1000000 10000000
|
||||
|
||||
eval range from 0 to 4m step 1m sum_over_time(bar[30s])
|
||||
{} 0 11 1100 110000 11000000
|
||||
|
||||
clear
|
||||
|
||||
# sum_over_time with all values random
|
||||
load 30s
|
||||
bar 5 17 42 2 7 905 51
|
||||
|
||||
eval range from 0 to 3m step 1m sum_over_time(bar[30s])
|
||||
{} 5 59 9 956
|
||||
|
||||
clear
|
||||
|
||||
# metric query
|
||||
load 30s
|
||||
metric 1+1x4
|
||||
|
||||
eval range from 0 to 2m step 1m metric
|
||||
metric 1 3 5
|
||||
|
||||
clear
|
||||
|
||||
# metric query with trailing values
|
||||
load 30s
|
||||
metric 1+1x8
|
||||
|
||||
eval range from 0 to 2m step 1m metric
|
||||
metric 1 3 5
|
||||
|
||||
clear
|
||||
|
||||
# short-circuit
|
||||
load 30s
|
||||
foo{job="1"} 1+1x4
|
||||
bar{job="2"} 1+1x4
|
||||
|
||||
eval range from 0 to 2m step 1m foo > 2 or bar
|
||||
foo{job="1"} _ 3 5
|
||||
bar{job="2"} 1 3 5
|
||||
|
||||
clear
|
||||
|
||||
# Drop metric name
|
||||
load 30s
|
||||
requests{job="1", __address__="bar"} 100
|
||||
|
||||
eval range from 0 to 2m step 1m requests * 2
|
||||
{job="1", __address__="bar"} 200 200 200
|
||||
|
||||
clear
|
|
@ -206,12 +206,15 @@ func histogramQuantile(q float64, h *histogram.FloatHistogram) float64 {
|
|||
|
||||
for it.Next() {
|
||||
bucket = it.At()
|
||||
if bucket.Count == 0 {
|
||||
continue
|
||||
}
|
||||
count += bucket.Count
|
||||
if count >= rank {
|
||||
break
|
||||
}
|
||||
}
|
||||
if bucket.Lower < 0 && bucket.Upper > 0 {
|
||||
if !h.UsesCustomBuckets() && bucket.Lower < 0 && bucket.Upper > 0 {
|
||||
switch {
|
||||
case len(h.NegativeBuckets) == 0 && len(h.PositiveBuckets) > 0:
|
||||
// The result is in the zero bucket and the histogram has only
|
||||
|
@ -222,6 +225,17 @@ func histogramQuantile(q float64, h *histogram.FloatHistogram) float64 {
|
|||
// negative buckets. So we consider 0 to be the upper bound.
|
||||
bucket.Upper = 0
|
||||
}
|
||||
} else if h.UsesCustomBuckets() {
|
||||
if bucket.Lower == math.Inf(-1) {
|
||||
// first bucket, with lower bound -Inf
|
||||
if bucket.Upper <= 0 {
|
||||
return bucket.Upper
|
||||
}
|
||||
bucket.Lower = 0
|
||||
} else if bucket.Upper == math.Inf(1) {
|
||||
// last bucket, with upper bound +Inf
|
||||
return bucket.Lower
|
||||
}
|
||||
}
|
||||
// Due to numerical inaccuracies, we could end up with a higher count
|
||||
// than h.Count. Thus, make sure count is never higher than h.Count.
|
||||
|
|
|
@ -338,10 +338,9 @@ const resolvedRetention = 15 * time.Minute
|
|||
|
||||
// Eval evaluates the rule expression and then creates pending alerts and fires
|
||||
// or removes previously pending alerts accordingly.
|
||||
func (r *AlertingRule) Eval(ctx context.Context, ts time.Time, query QueryFunc, externalURL *url.URL, limit int) (promql.Vector, error) {
|
||||
func (r *AlertingRule) Eval(ctx context.Context, queryOffset time.Duration, ts time.Time, query QueryFunc, externalURL *url.URL, limit int) (promql.Vector, error) {
|
||||
ctx = NewOriginContext(ctx, NewRuleDetail(r))
|
||||
|
||||
res, err := query(ctx, r.vector.String(), ts)
|
||||
res, err := query(ctx, r.vector.String(), ts.Add(-queryOffset))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -484,8 +483,8 @@ func (r *AlertingRule) Eval(ctx context.Context, ts time.Time, query QueryFunc,
|
|||
}
|
||||
|
||||
if r.restored.Load() {
|
||||
vec = append(vec, r.sample(a, ts))
|
||||
vec = append(vec, r.forStateSample(a, ts, float64(a.ActiveAt.Unix())))
|
||||
vec = append(vec, r.sample(a, ts.Add(-queryOffset)))
|
||||
vec = append(vec, r.forStateSample(a, ts.Add(-queryOffset), float64(a.ActiveAt.Unix())))
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -123,7 +123,7 @@ func TestAlertingRuleTemplateWithHistogram(t *testing.T) {
|
|||
)
|
||||
|
||||
evalTime := time.Now()
|
||||
res, err := rule.Eval(context.TODO(), evalTime, q, nil, 0)
|
||||
res, err := rule.Eval(context.TODO(), 0, evalTime, q, nil, 0)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Len(t, res, 2)
|
||||
|
@ -230,7 +230,7 @@ func TestAlertingRuleLabelsUpdate(t *testing.T) {
|
|||
t.Logf("case %d", i)
|
||||
evalTime := baseTime.Add(time.Duration(i) * time.Minute)
|
||||
result[0].T = timestamp.FromTime(evalTime)
|
||||
res, err := rule.Eval(context.TODO(), evalTime, EngineQueryFunc(testEngine, storage), nil, 0)
|
||||
res, err := rule.Eval(context.TODO(), 0, evalTime, EngineQueryFunc(testEngine, storage), nil, 0)
|
||||
require.NoError(t, err)
|
||||
|
||||
var filteredRes promql.Vector // After removing 'ALERTS_FOR_STATE' samples.
|
||||
|
@ -247,7 +247,7 @@ func TestAlertingRuleLabelsUpdate(t *testing.T) {
|
|||
testutil.RequireEqual(t, result, filteredRes)
|
||||
}
|
||||
evalTime := baseTime.Add(time.Duration(len(results)) * time.Minute)
|
||||
res, err := rule.Eval(context.TODO(), evalTime, EngineQueryFunc(testEngine, storage), nil, 0)
|
||||
res, err := rule.Eval(context.TODO(), 0, evalTime, EngineQueryFunc(testEngine, storage), nil, 0)
|
||||
require.NoError(t, err)
|
||||
require.Empty(t, res)
|
||||
}
|
||||
|
@ -315,7 +315,7 @@ func TestAlertingRuleExternalLabelsInTemplate(t *testing.T) {
|
|||
|
||||
var filteredRes promql.Vector // After removing 'ALERTS_FOR_STATE' samples.
|
||||
res, err := ruleWithoutExternalLabels.Eval(
|
||||
context.TODO(), evalTime, EngineQueryFunc(testEngine, storage), nil, 0,
|
||||
context.TODO(), 0, evalTime, EngineQueryFunc(testEngine, storage), nil, 0,
|
||||
)
|
||||
require.NoError(t, err)
|
||||
for _, smpl := range res {
|
||||
|
@ -329,7 +329,7 @@ func TestAlertingRuleExternalLabelsInTemplate(t *testing.T) {
|
|||
}
|
||||
|
||||
res, err = ruleWithExternalLabels.Eval(
|
||||
context.TODO(), evalTime, EngineQueryFunc(testEngine, storage), nil, 0,
|
||||
context.TODO(), 0, evalTime, EngineQueryFunc(testEngine, storage), nil, 0,
|
||||
)
|
||||
require.NoError(t, err)
|
||||
for _, smpl := range res {
|
||||
|
@ -408,7 +408,7 @@ func TestAlertingRuleExternalURLInTemplate(t *testing.T) {
|
|||
|
||||
var filteredRes promql.Vector // After removing 'ALERTS_FOR_STATE' samples.
|
||||
res, err := ruleWithoutExternalURL.Eval(
|
||||
context.TODO(), evalTime, EngineQueryFunc(testEngine, storage), nil, 0,
|
||||
context.TODO(), 0, evalTime, EngineQueryFunc(testEngine, storage), nil, 0,
|
||||
)
|
||||
require.NoError(t, err)
|
||||
for _, smpl := range res {
|
||||
|
@ -422,7 +422,7 @@ func TestAlertingRuleExternalURLInTemplate(t *testing.T) {
|
|||
}
|
||||
|
||||
res, err = ruleWithExternalURL.Eval(
|
||||
context.TODO(), evalTime, EngineQueryFunc(testEngine, storage), nil, 0,
|
||||
context.TODO(), 0, evalTime, EngineQueryFunc(testEngine, storage), nil, 0,
|
||||
)
|
||||
require.NoError(t, err)
|
||||
for _, smpl := range res {
|
||||
|
@ -477,7 +477,7 @@ func TestAlertingRuleEmptyLabelFromTemplate(t *testing.T) {
|
|||
|
||||
var filteredRes promql.Vector // After removing 'ALERTS_FOR_STATE' samples.
|
||||
res, err := rule.Eval(
|
||||
context.TODO(), evalTime, EngineQueryFunc(testEngine, storage), nil, 0,
|
||||
context.TODO(), 0, evalTime, EngineQueryFunc(testEngine, storage), nil, 0,
|
||||
)
|
||||
require.NoError(t, err)
|
||||
for _, smpl := range res {
|
||||
|
@ -544,7 +544,7 @@ instance: {{ $v.Labels.instance }}, value: {{ printf "%.0f" $v.Value }};
|
|||
close(getDoneCh)
|
||||
}()
|
||||
_, err = ruleWithQueryInTemplate.Eval(
|
||||
context.TODO(), evalTime, slowQueryFunc, nil, 0,
|
||||
context.TODO(), 0, evalTime, slowQueryFunc, nil, 0,
|
||||
)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
@ -596,7 +596,7 @@ func TestAlertingRuleDuplicate(t *testing.T) {
|
|||
"",
|
||||
true, log.NewNopLogger(),
|
||||
)
|
||||
_, err := rule.Eval(ctx, now, EngineQueryFunc(engine, storage), nil, 0)
|
||||
_, err := rule.Eval(ctx, 0, now, EngineQueryFunc(engine, storage), nil, 0)
|
||||
require.Error(t, err)
|
||||
require.EqualError(t, err, "vector contains metrics with the same labelset after applying alert labels")
|
||||
}
|
||||
|
@ -644,7 +644,7 @@ func TestAlertingRuleLimit(t *testing.T) {
|
|||
evalTime := time.Unix(0, 0)
|
||||
|
||||
for _, test := range tests {
|
||||
switch _, err := rule.Eval(context.TODO(), evalTime, EngineQueryFunc(testEngine, storage), nil, test.limit); {
|
||||
switch _, err := rule.Eval(context.TODO(), 0, evalTime, EngineQueryFunc(testEngine, storage), nil, test.limit); {
|
||||
case err != nil:
|
||||
require.EqualError(t, err, test.err)
|
||||
case test.err != "":
|
||||
|
@ -871,7 +871,7 @@ func TestKeepFiringFor(t *testing.T) {
|
|||
t.Logf("case %d", i)
|
||||
evalTime := baseTime.Add(time.Duration(i) * time.Minute)
|
||||
result[0].T = timestamp.FromTime(evalTime)
|
||||
res, err := rule.Eval(context.TODO(), evalTime, EngineQueryFunc(testEngine, storage), nil, 0)
|
||||
res, err := rule.Eval(context.TODO(), 0, evalTime, EngineQueryFunc(testEngine, storage), nil, 0)
|
||||
require.NoError(t, err)
|
||||
|
||||
var filteredRes promql.Vector // After removing 'ALERTS_FOR_STATE' samples.
|
||||
|
@ -888,7 +888,7 @@ func TestKeepFiringFor(t *testing.T) {
|
|||
testutil.RequireEqual(t, result, filteredRes)
|
||||
}
|
||||
evalTime := baseTime.Add(time.Duration(len(results)) * time.Minute)
|
||||
res, err := rule.Eval(context.TODO(), evalTime, EngineQueryFunc(testEngine, storage), nil, 0)
|
||||
res, err := rule.Eval(context.TODO(), 0, evalTime, EngineQueryFunc(testEngine, storage), nil, 0)
|
||||
require.NoError(t, err)
|
||||
require.Empty(t, res)
|
||||
}
|
||||
|
@ -925,7 +925,7 @@ func TestPendingAndKeepFiringFor(t *testing.T) {
|
|||
|
||||
baseTime := time.Unix(0, 0)
|
||||
result.T = timestamp.FromTime(baseTime)
|
||||
res, err := rule.Eval(context.TODO(), baseTime, EngineQueryFunc(testEngine, storage), nil, 0)
|
||||
res, err := rule.Eval(context.TODO(), 0, baseTime, EngineQueryFunc(testEngine, storage), nil, 0)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Len(t, res, 2)
|
||||
|
@ -940,7 +940,7 @@ func TestPendingAndKeepFiringFor(t *testing.T) {
|
|||
}
|
||||
|
||||
evalTime := baseTime.Add(time.Minute)
|
||||
res, err = rule.Eval(context.TODO(), evalTime, EngineQueryFunc(testEngine, storage), nil, 0)
|
||||
res, err = rule.Eval(context.TODO(), 0, evalTime, EngineQueryFunc(testEngine, storage), nil, 0)
|
||||
require.NoError(t, err)
|
||||
require.Empty(t, res)
|
||||
}
|
||||
|
@ -974,7 +974,7 @@ func TestAlertingEvalWithOrigin(t *testing.T) {
|
|||
true, log.NewNopLogger(),
|
||||
)
|
||||
|
||||
_, err = rule.Eval(ctx, now, func(ctx context.Context, qs string, _ time.Time) (promql.Vector, error) {
|
||||
_, err = rule.Eval(ctx, 0, now, func(ctx context.Context, qs string, _ time.Time) (promql.Vector, error) {
|
||||
detail = FromOriginContext(ctx)
|
||||
return nil, nil
|
||||
}, nil, 0)
|
||||
|
|
|
@ -47,6 +47,7 @@ type Group struct {
|
|||
name string
|
||||
file string
|
||||
interval time.Duration
|
||||
queryOffset *time.Duration
|
||||
limit int
|
||||
rules []Rule
|
||||
seriesInPreviousEval []map[string]labels.Labels // One per Rule.
|
||||
|
@ -90,6 +91,7 @@ type GroupOptions struct {
|
|||
Rules []Rule
|
||||
ShouldRestore bool
|
||||
Opts *ManagerOptions
|
||||
QueryOffset *time.Duration
|
||||
done chan struct{}
|
||||
EvalIterationFunc GroupEvalIterationFunc
|
||||
}
|
||||
|
@ -126,6 +128,7 @@ func NewGroup(o GroupOptions) *Group {
|
|||
name: o.Name,
|
||||
file: o.File,
|
||||
interval: o.Interval,
|
||||
queryOffset: o.QueryOffset,
|
||||
limit: o.Limit,
|
||||
rules: o.Rules,
|
||||
shouldRestore: o.ShouldRestore,
|
||||
|
@ -443,6 +446,8 @@ func (g *Group) Eval(ctx context.Context, ts time.Time) {
|
|||
wg sync.WaitGroup
|
||||
)
|
||||
|
||||
ruleQueryOffset := g.QueryOffset()
|
||||
|
||||
for i, rule := range g.rules {
|
||||
select {
|
||||
case <-g.done:
|
||||
|
@ -473,7 +478,7 @@ func (g *Group) Eval(ctx context.Context, ts time.Time) {
|
|||
|
||||
g.metrics.EvalTotal.WithLabelValues(GroupKey(g.File(), g.Name())).Inc()
|
||||
|
||||
vector, err := rule.Eval(ctx, ts, g.opts.QueryFunc, g.opts.ExternalURL, g.Limit())
|
||||
vector, err := rule.Eval(ctx, ruleQueryOffset, ts, g.opts.QueryFunc, g.opts.ExternalURL, g.Limit())
|
||||
if err != nil {
|
||||
rule.SetHealth(HealthBad)
|
||||
rule.SetLastError(err)
|
||||
|
@ -562,7 +567,7 @@ func (g *Group) Eval(ctx context.Context, ts time.Time) {
|
|||
for metric, lset := range g.seriesInPreviousEval[i] {
|
||||
if _, ok := seriesReturned[metric]; !ok {
|
||||
// Series no longer exposed, mark it stale.
|
||||
_, err = app.Append(0, lset, timestamp.FromTime(ts), math.Float64frombits(value.StaleNaN))
|
||||
_, err = app.Append(0, lset, timestamp.FromTime(ts.Add(-ruleQueryOffset)), math.Float64frombits(value.StaleNaN))
|
||||
unwrappedErr := errors.Unwrap(err)
|
||||
if unwrappedErr == nil {
|
||||
unwrappedErr = err
|
||||
|
@ -601,14 +606,27 @@ func (g *Group) Eval(ctx context.Context, ts time.Time) {
|
|||
g.cleanupStaleSeries(ctx, ts)
|
||||
}
|
||||
|
||||
func (g *Group) QueryOffset() time.Duration {
|
||||
if g.queryOffset != nil {
|
||||
return *g.queryOffset
|
||||
}
|
||||
|
||||
if g.opts.DefaultRuleQueryOffset != nil {
|
||||
return g.opts.DefaultRuleQueryOffset()
|
||||
}
|
||||
|
||||
return time.Duration(0)
|
||||
}
|
||||
|
||||
func (g *Group) cleanupStaleSeries(ctx context.Context, ts time.Time) {
|
||||
if len(g.staleSeries) == 0 {
|
||||
return
|
||||
}
|
||||
app := g.opts.Appendable.Appender(ctx)
|
||||
queryOffset := g.QueryOffset()
|
||||
for _, s := range g.staleSeries {
|
||||
// Rule that produced series no longer configured, mark it stale.
|
||||
_, err := app.Append(0, s, timestamp.FromTime(ts), math.Float64frombits(value.StaleNaN))
|
||||
_, err := app.Append(0, s, timestamp.FromTime(ts.Add(-queryOffset)), math.Float64frombits(value.StaleNaN))
|
||||
unwrappedErr := errors.Unwrap(err)
|
||||
if unwrappedErr == nil {
|
||||
unwrappedErr = err
|
||||
|
@ -775,6 +793,10 @@ func (g *Group) Equals(ng *Group) bool {
|
|||
return false
|
||||
}
|
||||
|
||||
if ((g.queryOffset == nil) != (ng.queryOffset == nil)) || (g.queryOffset != nil && ng.queryOffset != nil && *g.queryOffset != *ng.queryOffset) {
|
||||
return false
|
||||
}
|
||||
|
||||
if len(g.rules) != len(ng.rules) {
|
||||
return false
|
||||
}
|
||||
|
|
98
rules/group_test.go
Normal file
98
rules/group_test.go
Normal file
|
@ -0,0 +1,98 @@
|
|||
// Copyright 2013 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package rules
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestGroup_Equals(t *testing.T) {
|
||||
tests := map[string]struct {
|
||||
first *Group
|
||||
second *Group
|
||||
expected bool
|
||||
}{
|
||||
"no query offset set on both groups": {
|
||||
first: &Group{
|
||||
name: "group-1",
|
||||
file: "file-1",
|
||||
interval: time.Minute,
|
||||
},
|
||||
second: &Group{
|
||||
name: "group-1",
|
||||
file: "file-1",
|
||||
interval: time.Minute,
|
||||
},
|
||||
expected: true,
|
||||
},
|
||||
"query offset set only on the first group": {
|
||||
first: &Group{
|
||||
name: "group-1",
|
||||
file: "file-1",
|
||||
interval: time.Minute,
|
||||
queryOffset: pointerOf[time.Duration](time.Minute),
|
||||
},
|
||||
second: &Group{
|
||||
name: "group-1",
|
||||
file: "file-1",
|
||||
interval: time.Minute,
|
||||
},
|
||||
expected: false,
|
||||
},
|
||||
"query offset set on both groups to the same value": {
|
||||
first: &Group{
|
||||
name: "group-1",
|
||||
file: "file-1",
|
||||
interval: time.Minute,
|
||||
queryOffset: pointerOf[time.Duration](time.Minute),
|
||||
},
|
||||
second: &Group{
|
||||
name: "group-1",
|
||||
file: "file-1",
|
||||
interval: time.Minute,
|
||||
queryOffset: pointerOf[time.Duration](time.Minute),
|
||||
},
|
||||
expected: true,
|
||||
},
|
||||
"query offset set on both groups to different value": {
|
||||
first: &Group{
|
||||
name: "group-1",
|
||||
file: "file-1",
|
||||
interval: time.Minute,
|
||||
queryOffset: pointerOf[time.Duration](time.Minute),
|
||||
},
|
||||
second: &Group{
|
||||
name: "group-1",
|
||||
file: "file-1",
|
||||
interval: time.Minute,
|
||||
queryOffset: pointerOf[time.Duration](2 * time.Minute),
|
||||
},
|
||||
expected: false,
|
||||
},
|
||||
}
|
||||
|
||||
for testName, testData := range tests {
|
||||
t.Run(testName, func(t *testing.T) {
|
||||
require.Equal(t, testData.expected, testData.first.Equals(testData.second))
|
||||
require.Equal(t, testData.expected, testData.second.Equals(testData.first))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func pointerOf[T any](value T) *T {
|
||||
return &value
|
||||
}
|
|
@ -116,6 +116,7 @@ type ManagerOptions struct {
|
|||
ForGracePeriod time.Duration
|
||||
ResendDelay time.Duration
|
||||
GroupLoader GroupLoader
|
||||
DefaultRuleQueryOffset func() time.Duration
|
||||
MaxConcurrentEvals int64
|
||||
ConcurrentEvalsEnabled bool
|
||||
RuleConcurrencyController RuleConcurrencyController
|
||||
|
@ -189,10 +190,18 @@ func (m *Manager) Stop() {
|
|||
|
||||
// Update the rule manager's state as the config requires. If
|
||||
// loading the new rules failed the old rule set is restored.
|
||||
// This method will no-op in case the manager is already stopped.
|
||||
func (m *Manager) Update(interval time.Duration, files []string, externalLabels labels.Labels, externalURL string, groupEvalIterationFunc GroupEvalIterationFunc) error {
|
||||
m.mtx.Lock()
|
||||
defer m.mtx.Unlock()
|
||||
|
||||
// We cannot update a stopped manager
|
||||
select {
|
||||
case <-m.done:
|
||||
return nil
|
||||
default:
|
||||
}
|
||||
|
||||
groups, errs := m.LoadGroups(interval, externalLabels, externalURL, groupEvalIterationFunc, files...)
|
||||
|
||||
if errs != nil {
|
||||
|
@ -336,6 +345,7 @@ func (m *Manager) LoadGroups(
|
|||
Rules: rules,
|
||||
ShouldRestore: shouldRestore,
|
||||
Opts: m.opts,
|
||||
QueryOffset: (*time.Duration)(rg.QueryOffset),
|
||||
done: m.done,
|
||||
EvalIterationFunc: groupEvalIterationFunc,
|
||||
})
|
||||
|
|
|
@ -16,8 +16,10 @@ package rules
|
|||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io/fs"
|
||||
"math"
|
||||
"os"
|
||||
"path"
|
||||
"sort"
|
||||
"strconv"
|
||||
"sync"
|
||||
|
@ -162,7 +164,7 @@ func TestAlertingRule(t *testing.T) {
|
|||
|
||||
evalTime := baseTime.Add(test.time)
|
||||
|
||||
res, err := rule.Eval(context.TODO(), evalTime, EngineQueryFunc(testEngine, storage), nil, 0)
|
||||
res, err := rule.Eval(context.TODO(), 0, evalTime, EngineQueryFunc(testEngine, storage), nil, 0)
|
||||
require.NoError(t, err)
|
||||
|
||||
var filteredRes promql.Vector // After removing 'ALERTS_FOR_STATE' samples.
|
||||
|
@ -192,152 +194,156 @@ func TestAlertingRule(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestForStateAddSamples(t *testing.T) {
|
||||
storage := promqltest.LoadedStorage(t, `
|
||||
for _, queryOffset := range []time.Duration{0, time.Minute} {
|
||||
t.Run(fmt.Sprintf("queryOffset %s", queryOffset.String()), func(t *testing.T) {
|
||||
storage := promqltest.LoadedStorage(t, `
|
||||
load 5m
|
||||
http_requests{job="app-server", instance="0", group="canary", severity="overwrite-me"} 75 85 95 105 105 95 85
|
||||
http_requests{job="app-server", instance="1", group="canary", severity="overwrite-me"} 80 90 100 110 120 130 140
|
||||
`)
|
||||
t.Cleanup(func() { storage.Close() })
|
||||
t.Cleanup(func() { storage.Close() })
|
||||
|
||||
expr, err := parser.ParseExpr(`http_requests{group="canary", job="app-server"} < 100`)
|
||||
require.NoError(t, err)
|
||||
expr, err := parser.ParseExpr(`http_requests{group="canary", job="app-server"} < 100`)
|
||||
require.NoError(t, err)
|
||||
|
||||
rule := NewAlertingRule(
|
||||
"HTTPRequestRateLow",
|
||||
expr,
|
||||
time.Minute,
|
||||
0,
|
||||
labels.FromStrings("severity", "{{\"c\"}}ritical"),
|
||||
labels.EmptyLabels(), labels.EmptyLabels(), "", true, nil,
|
||||
)
|
||||
result := promql.Vector{
|
||||
promql.Sample{
|
||||
Metric: labels.FromStrings(
|
||||
"__name__", "ALERTS_FOR_STATE",
|
||||
"alertname", "HTTPRequestRateLow",
|
||||
"group", "canary",
|
||||
"instance", "0",
|
||||
"job", "app-server",
|
||||
"severity", "critical",
|
||||
),
|
||||
F: 1,
|
||||
},
|
||||
promql.Sample{
|
||||
Metric: labels.FromStrings(
|
||||
"__name__", "ALERTS_FOR_STATE",
|
||||
"alertname", "HTTPRequestRateLow",
|
||||
"group", "canary",
|
||||
"instance", "1",
|
||||
"job", "app-server",
|
||||
"severity", "critical",
|
||||
),
|
||||
F: 1,
|
||||
},
|
||||
promql.Sample{
|
||||
Metric: labels.FromStrings(
|
||||
"__name__", "ALERTS_FOR_STATE",
|
||||
"alertname", "HTTPRequestRateLow",
|
||||
"group", "canary",
|
||||
"instance", "0",
|
||||
"job", "app-server",
|
||||
"severity", "critical",
|
||||
),
|
||||
F: 1,
|
||||
},
|
||||
promql.Sample{
|
||||
Metric: labels.FromStrings(
|
||||
"__name__", "ALERTS_FOR_STATE",
|
||||
"alertname", "HTTPRequestRateLow",
|
||||
"group", "canary",
|
||||
"instance", "1",
|
||||
"job", "app-server",
|
||||
"severity", "critical",
|
||||
),
|
||||
F: 1,
|
||||
},
|
||||
}
|
||||
|
||||
baseTime := time.Unix(0, 0)
|
||||
|
||||
tests := []struct {
|
||||
time time.Duration
|
||||
result promql.Vector
|
||||
persistThisTime bool // If true, it means this 'time' is persisted for 'for'.
|
||||
}{
|
||||
{
|
||||
time: 0,
|
||||
result: append(promql.Vector{}, result[:2]...),
|
||||
persistThisTime: true,
|
||||
},
|
||||
{
|
||||
time: 5 * time.Minute,
|
||||
result: append(promql.Vector{}, result[2:]...),
|
||||
},
|
||||
{
|
||||
time: 10 * time.Minute,
|
||||
result: append(promql.Vector{}, result[2:3]...),
|
||||
},
|
||||
{
|
||||
time: 15 * time.Minute,
|
||||
result: nil,
|
||||
},
|
||||
{
|
||||
time: 20 * time.Minute,
|
||||
result: nil,
|
||||
},
|
||||
{
|
||||
time: 25 * time.Minute,
|
||||
result: append(promql.Vector{}, result[:1]...),
|
||||
persistThisTime: true,
|
||||
},
|
||||
{
|
||||
time: 30 * time.Minute,
|
||||
result: append(promql.Vector{}, result[2:3]...),
|
||||
},
|
||||
}
|
||||
|
||||
var forState float64
|
||||
for i, test := range tests {
|
||||
t.Logf("case %d", i)
|
||||
evalTime := baseTime.Add(test.time)
|
||||
|
||||
if test.persistThisTime {
|
||||
forState = float64(evalTime.Unix())
|
||||
}
|
||||
if test.result == nil {
|
||||
forState = float64(value.StaleNaN)
|
||||
}
|
||||
|
||||
res, err := rule.Eval(context.TODO(), evalTime, EngineQueryFunc(testEngine, storage), nil, 0)
|
||||
require.NoError(t, err)
|
||||
|
||||
var filteredRes promql.Vector // After removing 'ALERTS' samples.
|
||||
for _, smpl := range res {
|
||||
smplName := smpl.Metric.Get("__name__")
|
||||
if smplName == "ALERTS_FOR_STATE" {
|
||||
filteredRes = append(filteredRes, smpl)
|
||||
} else {
|
||||
// If not 'ALERTS_FOR_STATE', it has to be 'ALERTS'.
|
||||
require.Equal(t, "ALERTS", smplName)
|
||||
rule := NewAlertingRule(
|
||||
"HTTPRequestRateLow",
|
||||
expr,
|
||||
time.Minute,
|
||||
0,
|
||||
labels.FromStrings("severity", "{{\"c\"}}ritical"),
|
||||
labels.EmptyLabels(), labels.EmptyLabels(), "", true, nil,
|
||||
)
|
||||
result := promql.Vector{
|
||||
promql.Sample{
|
||||
Metric: labels.FromStrings(
|
||||
"__name__", "ALERTS_FOR_STATE",
|
||||
"alertname", "HTTPRequestRateLow",
|
||||
"group", "canary",
|
||||
"instance", "0",
|
||||
"job", "app-server",
|
||||
"severity", "critical",
|
||||
),
|
||||
F: 1,
|
||||
},
|
||||
promql.Sample{
|
||||
Metric: labels.FromStrings(
|
||||
"__name__", "ALERTS_FOR_STATE",
|
||||
"alertname", "HTTPRequestRateLow",
|
||||
"group", "canary",
|
||||
"instance", "1",
|
||||
"job", "app-server",
|
||||
"severity", "critical",
|
||||
),
|
||||
F: 1,
|
||||
},
|
||||
promql.Sample{
|
||||
Metric: labels.FromStrings(
|
||||
"__name__", "ALERTS_FOR_STATE",
|
||||
"alertname", "HTTPRequestRateLow",
|
||||
"group", "canary",
|
||||
"instance", "0",
|
||||
"job", "app-server",
|
||||
"severity", "critical",
|
||||
),
|
||||
F: 1,
|
||||
},
|
||||
promql.Sample{
|
||||
Metric: labels.FromStrings(
|
||||
"__name__", "ALERTS_FOR_STATE",
|
||||
"alertname", "HTTPRequestRateLow",
|
||||
"group", "canary",
|
||||
"instance", "1",
|
||||
"job", "app-server",
|
||||
"severity", "critical",
|
||||
),
|
||||
F: 1,
|
||||
},
|
||||
}
|
||||
}
|
||||
for i := range test.result {
|
||||
test.result[i].T = timestamp.FromTime(evalTime)
|
||||
// Updating the expected 'for' state.
|
||||
if test.result[i].F >= 0 {
|
||||
test.result[i].F = forState
|
||||
}
|
||||
}
|
||||
require.Equal(t, len(test.result), len(filteredRes), "%d. Number of samples in expected and actual output don't match (%d vs. %d)", i, len(test.result), len(res))
|
||||
|
||||
sort.Slice(filteredRes, func(i, j int) bool {
|
||||
return labels.Compare(filteredRes[i].Metric, filteredRes[j].Metric) < 0
|
||||
baseTime := time.Unix(0, 0)
|
||||
|
||||
tests := []struct {
|
||||
time time.Duration
|
||||
result promql.Vector
|
||||
persistThisTime bool // If true, it means this 'time' is persisted for 'for'.
|
||||
}{
|
||||
{
|
||||
time: 0,
|
||||
result: append(promql.Vector{}, result[:2]...),
|
||||
persistThisTime: true,
|
||||
},
|
||||
{
|
||||
time: 5 * time.Minute,
|
||||
result: append(promql.Vector{}, result[2:]...),
|
||||
},
|
||||
{
|
||||
time: 10 * time.Minute,
|
||||
result: append(promql.Vector{}, result[2:3]...),
|
||||
},
|
||||
{
|
||||
time: 15 * time.Minute,
|
||||
result: nil,
|
||||
},
|
||||
{
|
||||
time: 20 * time.Minute,
|
||||
result: nil,
|
||||
},
|
||||
{
|
||||
time: 25 * time.Minute,
|
||||
result: append(promql.Vector{}, result[:1]...),
|
||||
persistThisTime: true,
|
||||
},
|
||||
{
|
||||
time: 30 * time.Minute,
|
||||
result: append(promql.Vector{}, result[2:3]...),
|
||||
},
|
||||
}
|
||||
|
||||
var forState float64
|
||||
for i, test := range tests {
|
||||
t.Logf("case %d", i)
|
||||
evalTime := baseTime.Add(test.time).Add(queryOffset)
|
||||
|
||||
if test.persistThisTime {
|
||||
forState = float64(evalTime.Unix())
|
||||
}
|
||||
if test.result == nil {
|
||||
forState = float64(value.StaleNaN)
|
||||
}
|
||||
|
||||
res, err := rule.Eval(context.TODO(), queryOffset, evalTime, EngineQueryFunc(testEngine, storage), nil, 0)
|
||||
require.NoError(t, err)
|
||||
|
||||
var filteredRes promql.Vector // After removing 'ALERTS' samples.
|
||||
for _, smpl := range res {
|
||||
smplName := smpl.Metric.Get("__name__")
|
||||
if smplName == "ALERTS_FOR_STATE" {
|
||||
filteredRes = append(filteredRes, smpl)
|
||||
} else {
|
||||
// If not 'ALERTS_FOR_STATE', it has to be 'ALERTS'.
|
||||
require.Equal(t, "ALERTS", smplName)
|
||||
}
|
||||
}
|
||||
for i := range test.result {
|
||||
test.result[i].T = timestamp.FromTime(evalTime.Add(-queryOffset))
|
||||
// Updating the expected 'for' state.
|
||||
if test.result[i].F >= 0 {
|
||||
test.result[i].F = forState
|
||||
}
|
||||
}
|
||||
require.Equal(t, len(test.result), len(filteredRes), "%d. Number of samples in expected and actual output don't match (%d vs. %d)", i, len(test.result), len(res))
|
||||
|
||||
sort.Slice(filteredRes, func(i, j int) bool {
|
||||
return labels.Compare(filteredRes[i].Metric, filteredRes[j].Metric) < 0
|
||||
})
|
||||
prom_testutil.RequireEqual(t, test.result, filteredRes)
|
||||
|
||||
for _, aa := range rule.ActiveAlerts() {
|
||||
require.Zero(t, aa.Labels.Get(model.MetricNameLabel), "%s label set on active alert: %s", model.MetricNameLabel, aa.Labels)
|
||||
}
|
||||
}
|
||||
})
|
||||
prom_testutil.RequireEqual(t, test.result, filteredRes)
|
||||
|
||||
for _, aa := range rule.ActiveAlerts() {
|
||||
require.Zero(t, aa.Labels.Get(model.MetricNameLabel), "%s label set on active alert: %s", model.MetricNameLabel, aa.Labels)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -349,243 +355,251 @@ func sortAlerts(items []*Alert) {
|
|||
}
|
||||
|
||||
func TestForStateRestore(t *testing.T) {
|
||||
storage := promqltest.LoadedStorage(t, `
|
||||
for _, queryOffset := range []time.Duration{0, time.Minute} {
|
||||
t.Run(fmt.Sprintf("queryOffset %s", queryOffset.String()), func(t *testing.T) {
|
||||
storage := promqltest.LoadedStorage(t, `
|
||||
load 5m
|
||||
http_requests{job="app-server", instance="0", group="canary", severity="overwrite-me"} 75 85 50 0 0 25 0 0 40 0 120
|
||||
http_requests{job="app-server", instance="1", group="canary", severity="overwrite-me"} 125 90 60 0 0 25 0 0 40 0 130
|
||||
`)
|
||||
t.Cleanup(func() { storage.Close() })
|
||||
t.Cleanup(func() { storage.Close() })
|
||||
|
||||
expr, err := parser.ParseExpr(`http_requests{group="canary", job="app-server"} < 100`)
|
||||
require.NoError(t, err)
|
||||
expr, err := parser.ParseExpr(`http_requests{group="canary", job="app-server"} < 100`)
|
||||
require.NoError(t, err)
|
||||
|
||||
opts := &ManagerOptions{
|
||||
QueryFunc: EngineQueryFunc(testEngine, storage),
|
||||
Appendable: storage,
|
||||
Queryable: storage,
|
||||
Context: context.Background(),
|
||||
Logger: log.NewNopLogger(),
|
||||
NotifyFunc: func(ctx context.Context, expr string, alerts ...*Alert) {},
|
||||
OutageTolerance: 30 * time.Minute,
|
||||
ForGracePeriod: 10 * time.Minute,
|
||||
}
|
||||
|
||||
alertForDuration := 25 * time.Minute
|
||||
// Initial run before prometheus goes down.
|
||||
rule := NewAlertingRule(
|
||||
"HTTPRequestRateLow",
|
||||
expr,
|
||||
alertForDuration,
|
||||
0,
|
||||
labels.FromStrings("severity", "critical"),
|
||||
labels.EmptyLabels(), labels.EmptyLabels(), "", true, nil,
|
||||
)
|
||||
|
||||
group := NewGroup(GroupOptions{
|
||||
Name: "default",
|
||||
Interval: time.Second,
|
||||
Rules: []Rule{rule},
|
||||
ShouldRestore: true,
|
||||
Opts: opts,
|
||||
})
|
||||
groups := make(map[string]*Group)
|
||||
groups["default;"] = group
|
||||
|
||||
initialRuns := []time.Duration{0, 5 * time.Minute}
|
||||
|
||||
baseTime := time.Unix(0, 0)
|
||||
for _, duration := range initialRuns {
|
||||
evalTime := baseTime.Add(duration)
|
||||
group.Eval(context.TODO(), evalTime)
|
||||
}
|
||||
|
||||
// Prometheus goes down here. We create new rules and groups.
|
||||
type testInput struct {
|
||||
name string
|
||||
restoreDuration time.Duration
|
||||
expectedAlerts []*Alert
|
||||
|
||||
num int
|
||||
noRestore bool
|
||||
gracePeriod bool
|
||||
downDuration time.Duration
|
||||
before func()
|
||||
}
|
||||
|
||||
tests := []testInput{
|
||||
{
|
||||
name: "normal restore (alerts were not firing)",
|
||||
restoreDuration: 15 * time.Minute,
|
||||
expectedAlerts: rule.ActiveAlerts(),
|
||||
downDuration: 10 * time.Minute,
|
||||
},
|
||||
{
|
||||
name: "outage tolerance",
|
||||
restoreDuration: 40 * time.Minute,
|
||||
noRestore: true,
|
||||
num: 2,
|
||||
},
|
||||
{
|
||||
name: "no active alerts",
|
||||
restoreDuration: 50 * time.Minute,
|
||||
expectedAlerts: []*Alert{},
|
||||
},
|
||||
{
|
||||
name: "test the grace period",
|
||||
restoreDuration: 25 * time.Minute,
|
||||
expectedAlerts: []*Alert{},
|
||||
gracePeriod: true,
|
||||
before: func() {
|
||||
for _, duration := range []time.Duration{10 * time.Minute, 15 * time.Minute, 20 * time.Minute} {
|
||||
evalTime := baseTime.Add(duration)
|
||||
group.Eval(context.TODO(), evalTime)
|
||||
}
|
||||
},
|
||||
num: 2,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if tt.before != nil {
|
||||
tt.before()
|
||||
opts := &ManagerOptions{
|
||||
QueryFunc: EngineQueryFunc(testEngine, storage),
|
||||
Appendable: storage,
|
||||
Queryable: storage,
|
||||
Context: context.Background(),
|
||||
Logger: log.NewNopLogger(),
|
||||
NotifyFunc: func(ctx context.Context, expr string, alerts ...*Alert) {},
|
||||
OutageTolerance: 30 * time.Minute,
|
||||
ForGracePeriod: 10 * time.Minute,
|
||||
}
|
||||
|
||||
newRule := NewAlertingRule(
|
||||
alertForDuration := 25 * time.Minute
|
||||
// Initial run before prometheus goes down.
|
||||
rule := NewAlertingRule(
|
||||
"HTTPRequestRateLow",
|
||||
expr,
|
||||
alertForDuration,
|
||||
0,
|
||||
labels.FromStrings("severity", "critical"),
|
||||
labels.EmptyLabels(), labels.EmptyLabels(), "", false, nil,
|
||||
labels.EmptyLabels(), labels.EmptyLabels(), "", true, nil,
|
||||
)
|
||||
newGroup := NewGroup(GroupOptions{
|
||||
|
||||
group := NewGroup(GroupOptions{
|
||||
Name: "default",
|
||||
Interval: time.Second,
|
||||
Rules: []Rule{newRule},
|
||||
Rules: []Rule{rule},
|
||||
ShouldRestore: true,
|
||||
Opts: opts,
|
||||
})
|
||||
groups := make(map[string]*Group)
|
||||
groups["default;"] = group
|
||||
|
||||
newGroups := make(map[string]*Group)
|
||||
newGroups["default;"] = newGroup
|
||||
initialRuns := []time.Duration{0, 5 * time.Minute}
|
||||
|
||||
restoreTime := baseTime.Add(tt.restoreDuration)
|
||||
// First eval before restoration.
|
||||
newGroup.Eval(context.TODO(), restoreTime)
|
||||
// Restore happens here.
|
||||
newGroup.RestoreForState(restoreTime)
|
||||
|
||||
got := newRule.ActiveAlerts()
|
||||
for _, aa := range got {
|
||||
require.Zero(t, aa.Labels.Get(model.MetricNameLabel), "%s label set on active alert: %s", model.MetricNameLabel, aa.Labels)
|
||||
baseTime := time.Unix(0, 0)
|
||||
for _, duration := range initialRuns {
|
||||
evalTime := baseTime.Add(duration)
|
||||
group.Eval(context.TODO(), evalTime)
|
||||
}
|
||||
sort.Slice(got, func(i, j int) bool {
|
||||
return labels.Compare(got[i].Labels, got[j].Labels) < 0
|
||||
})
|
||||
|
||||
// In all cases, we expect the restoration process to have completed.
|
||||
require.Truef(t, newRule.Restored(), "expected the rule restoration process to have completed")
|
||||
// Prometheus goes down here. We create new rules and groups.
|
||||
type testInput struct {
|
||||
name string
|
||||
restoreDuration time.Duration
|
||||
expectedAlerts []*Alert
|
||||
|
||||
// Checking if we have restored it correctly.
|
||||
switch {
|
||||
case tt.noRestore:
|
||||
require.Len(t, got, tt.num)
|
||||
for _, e := range got {
|
||||
require.Equal(t, e.ActiveAt, restoreTime)
|
||||
}
|
||||
case tt.gracePeriod:
|
||||
num int
|
||||
noRestore bool
|
||||
gracePeriod bool
|
||||
downDuration time.Duration
|
||||
before func()
|
||||
}
|
||||
|
||||
require.Len(t, got, tt.num)
|
||||
for _, e := range got {
|
||||
require.Equal(t, opts.ForGracePeriod, e.ActiveAt.Add(alertForDuration).Sub(restoreTime))
|
||||
}
|
||||
default:
|
||||
exp := tt.expectedAlerts
|
||||
require.Equal(t, len(exp), len(got))
|
||||
sortAlerts(exp)
|
||||
sortAlerts(got)
|
||||
for i, e := range exp {
|
||||
require.Equal(t, e.Labels, got[i].Labels)
|
||||
tests := []testInput{
|
||||
{
|
||||
name: "normal restore (alerts were not firing)",
|
||||
restoreDuration: 15 * time.Minute,
|
||||
expectedAlerts: rule.ActiveAlerts(),
|
||||
downDuration: 10 * time.Minute,
|
||||
},
|
||||
{
|
||||
name: "outage tolerance",
|
||||
restoreDuration: 40 * time.Minute,
|
||||
noRestore: true,
|
||||
num: 2,
|
||||
},
|
||||
{
|
||||
name: "no active alerts",
|
||||
restoreDuration: 50 * time.Minute,
|
||||
expectedAlerts: []*Alert{},
|
||||
},
|
||||
{
|
||||
name: "test the grace period",
|
||||
restoreDuration: 25 * time.Minute,
|
||||
expectedAlerts: []*Alert{},
|
||||
gracePeriod: true,
|
||||
before: func() {
|
||||
for _, duration := range []time.Duration{10 * time.Minute, 15 * time.Minute, 20 * time.Minute} {
|
||||
evalTime := baseTime.Add(duration)
|
||||
group.Eval(context.TODO(), evalTime)
|
||||
}
|
||||
},
|
||||
num: 2,
|
||||
},
|
||||
}
|
||||
|
||||
// Difference in time should be within 1e6 ns, i.e. 1ms
|
||||
// (due to conversion between ns & ms, float64 & int64).
|
||||
activeAtDiff := float64(e.ActiveAt.Unix() + int64(tt.downDuration/time.Second) - got[i].ActiveAt.Unix())
|
||||
require.Equal(t, 0.0, math.Abs(activeAtDiff), "'for' state restored time is wrong")
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if tt.before != nil {
|
||||
tt.before()
|
||||
}
|
||||
|
||||
newRule := NewAlertingRule(
|
||||
"HTTPRequestRateLow",
|
||||
expr,
|
||||
alertForDuration,
|
||||
0,
|
||||
labels.FromStrings("severity", "critical"),
|
||||
labels.EmptyLabels(), labels.EmptyLabels(), "", false, nil,
|
||||
)
|
||||
newGroup := NewGroup(GroupOptions{
|
||||
Name: "default",
|
||||
Interval: time.Second,
|
||||
Rules: []Rule{newRule},
|
||||
ShouldRestore: true,
|
||||
Opts: opts,
|
||||
QueryOffset: &queryOffset,
|
||||
})
|
||||
|
||||
newGroups := make(map[string]*Group)
|
||||
newGroups["default;"] = newGroup
|
||||
|
||||
restoreTime := baseTime.Add(tt.restoreDuration).Add(queryOffset)
|
||||
// First eval before restoration.
|
||||
newGroup.Eval(context.TODO(), restoreTime)
|
||||
// Restore happens here.
|
||||
newGroup.RestoreForState(restoreTime)
|
||||
|
||||
got := newRule.ActiveAlerts()
|
||||
for _, aa := range got {
|
||||
require.Zero(t, aa.Labels.Get(model.MetricNameLabel), "%s label set on active alert: %s", model.MetricNameLabel, aa.Labels)
|
||||
}
|
||||
sort.Slice(got, func(i, j int) bool {
|
||||
return labels.Compare(got[i].Labels, got[j].Labels) < 0
|
||||
})
|
||||
|
||||
// In all cases, we expect the restoration process to have completed.
|
||||
require.Truef(t, newRule.Restored(), "expected the rule restoration process to have completed")
|
||||
|
||||
// Checking if we have restored it correctly.
|
||||
switch {
|
||||
case tt.noRestore:
|
||||
require.Len(t, got, tt.num)
|
||||
for _, e := range got {
|
||||
require.Equal(t, e.ActiveAt, restoreTime)
|
||||
}
|
||||
case tt.gracePeriod:
|
||||
|
||||
require.Len(t, got, tt.num)
|
||||
for _, e := range got {
|
||||
require.Equal(t, opts.ForGracePeriod, e.ActiveAt.Add(alertForDuration).Sub(restoreTime))
|
||||
}
|
||||
default:
|
||||
exp := tt.expectedAlerts
|
||||
require.Equal(t, len(exp), len(got))
|
||||
sortAlerts(exp)
|
||||
sortAlerts(got)
|
||||
for i, e := range exp {
|
||||
require.Equal(t, e.Labels, got[i].Labels)
|
||||
|
||||
// Difference in time should be within 1e6 ns, i.e. 1ms
|
||||
// (due to conversion between ns & ms, float64 & int64).
|
||||
activeAtDiff := queryOffset.Seconds() + float64(e.ActiveAt.Unix()+int64(tt.downDuration/time.Second)-got[i].ActiveAt.Unix())
|
||||
require.Equal(t, 0.0, math.Abs(activeAtDiff), "'for' state restored time is wrong")
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestStaleness(t *testing.T) {
|
||||
st := teststorage.New(t)
|
||||
defer st.Close()
|
||||
engineOpts := promql.EngineOpts{
|
||||
Logger: nil,
|
||||
Reg: nil,
|
||||
MaxSamples: 10,
|
||||
Timeout: 10 * time.Second,
|
||||
for _, queryOffset := range []time.Duration{0, time.Minute} {
|
||||
st := teststorage.New(t)
|
||||
defer st.Close()
|
||||
engineOpts := promql.EngineOpts{
|
||||
Logger: nil,
|
||||
Reg: nil,
|
||||
MaxSamples: 10,
|
||||
Timeout: 10 * time.Second,
|
||||
}
|
||||
engine := promql.NewEngine(engineOpts)
|
||||
opts := &ManagerOptions{
|
||||
QueryFunc: EngineQueryFunc(engine, st),
|
||||
Appendable: st,
|
||||
Queryable: st,
|
||||
Context: context.Background(),
|
||||
Logger: log.NewNopLogger(),
|
||||
}
|
||||
|
||||
expr, err := parser.ParseExpr("a + 1")
|
||||
require.NoError(t, err)
|
||||
rule := NewRecordingRule("a_plus_one", expr, labels.Labels{})
|
||||
group := NewGroup(GroupOptions{
|
||||
Name: "default",
|
||||
Interval: time.Second,
|
||||
Rules: []Rule{rule},
|
||||
ShouldRestore: true,
|
||||
Opts: opts,
|
||||
QueryOffset: &queryOffset,
|
||||
})
|
||||
|
||||
// A time series that has two samples and then goes stale.
|
||||
app := st.Appender(context.Background())
|
||||
app.Append(0, labels.FromStrings(model.MetricNameLabel, "a"), 0, 1)
|
||||
app.Append(0, labels.FromStrings(model.MetricNameLabel, "a"), 1000, 2)
|
||||
app.Append(0, labels.FromStrings(model.MetricNameLabel, "a"), 2000, math.Float64frombits(value.StaleNaN))
|
||||
|
||||
err = app.Commit()
|
||||
require.NoError(t, err)
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
// Execute 3 times, 1 second apart.
|
||||
group.Eval(ctx, time.Unix(0, 0).Add(queryOffset))
|
||||
group.Eval(ctx, time.Unix(1, 0).Add(queryOffset))
|
||||
group.Eval(ctx, time.Unix(2, 0).Add(queryOffset))
|
||||
|
||||
querier, err := st.Querier(0, 2000)
|
||||
require.NoError(t, err)
|
||||
defer querier.Close()
|
||||
|
||||
matcher, err := labels.NewMatcher(labels.MatchEqual, model.MetricNameLabel, "a_plus_one")
|
||||
require.NoError(t, err)
|
||||
|
||||
set := querier.Select(ctx, false, nil, matcher)
|
||||
samples, err := readSeriesSet(set)
|
||||
require.NoError(t, err)
|
||||
|
||||
metric := labels.FromStrings(model.MetricNameLabel, "a_plus_one").String()
|
||||
metricSample, ok := samples[metric]
|
||||
|
||||
require.True(t, ok, "Series %s not returned.", metric)
|
||||
require.True(t, value.IsStaleNaN(metricSample[2].F), "Appended second sample not as expected. Wanted: stale NaN Got: %x", math.Float64bits(metricSample[2].F))
|
||||
metricSample[2].F = 42 // require.Equal cannot handle NaN.
|
||||
|
||||
want := map[string][]promql.FPoint{
|
||||
metric: {{T: 0, F: 2}, {T: 1000, F: 3}, {T: 2000, F: 42}},
|
||||
}
|
||||
|
||||
require.Equal(t, want, samples)
|
||||
}
|
||||
engine := promql.NewEngine(engineOpts)
|
||||
opts := &ManagerOptions{
|
||||
QueryFunc: EngineQueryFunc(engine, st),
|
||||
Appendable: st,
|
||||
Queryable: st,
|
||||
Context: context.Background(),
|
||||
Logger: log.NewNopLogger(),
|
||||
}
|
||||
|
||||
expr, err := parser.ParseExpr("a + 1")
|
||||
require.NoError(t, err)
|
||||
rule := NewRecordingRule("a_plus_one", expr, labels.Labels{})
|
||||
group := NewGroup(GroupOptions{
|
||||
Name: "default",
|
||||
Interval: time.Second,
|
||||
Rules: []Rule{rule},
|
||||
ShouldRestore: true,
|
||||
Opts: opts,
|
||||
})
|
||||
|
||||
// A time series that has two samples and then goes stale.
|
||||
app := st.Appender(context.Background())
|
||||
app.Append(0, labels.FromStrings(model.MetricNameLabel, "a"), 0, 1)
|
||||
app.Append(0, labels.FromStrings(model.MetricNameLabel, "a"), 1000, 2)
|
||||
app.Append(0, labels.FromStrings(model.MetricNameLabel, "a"), 2000, math.Float64frombits(value.StaleNaN))
|
||||
|
||||
err = app.Commit()
|
||||
require.NoError(t, err)
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
// Execute 3 times, 1 second apart.
|
||||
group.Eval(ctx, time.Unix(0, 0))
|
||||
group.Eval(ctx, time.Unix(1, 0))
|
||||
group.Eval(ctx, time.Unix(2, 0))
|
||||
|
||||
querier, err := st.Querier(0, 2000)
|
||||
require.NoError(t, err)
|
||||
defer querier.Close()
|
||||
|
||||
matcher, err := labels.NewMatcher(labels.MatchEqual, model.MetricNameLabel, "a_plus_one")
|
||||
require.NoError(t, err)
|
||||
|
||||
set := querier.Select(ctx, false, nil, matcher)
|
||||
samples, err := readSeriesSet(set)
|
||||
require.NoError(t, err)
|
||||
|
||||
metric := labels.FromStrings(model.MetricNameLabel, "a_plus_one").String()
|
||||
metricSample, ok := samples[metric]
|
||||
|
||||
require.True(t, ok, "Series %s not returned.", metric)
|
||||
require.True(t, value.IsStaleNaN(metricSample[2].F), "Appended second sample not as expected. Wanted: stale NaN Got: %x", math.Float64bits(metricSample[2].F))
|
||||
metricSample[2].F = 42 // require.Equal cannot handle NaN.
|
||||
|
||||
want := map[string][]promql.FPoint{
|
||||
metric: {{T: 0, F: 2}, {T: 1000, F: 3}, {T: 2000, F: 42}},
|
||||
}
|
||||
|
||||
require.Equal(t, want, samples)
|
||||
}
|
||||
|
||||
// Convert a SeriesSet into a form usable with require.Equal.
|
||||
|
@ -609,6 +623,46 @@ func readSeriesSet(ss storage.SeriesSet) (map[string][]promql.FPoint, error) {
|
|||
return result, ss.Err()
|
||||
}
|
||||
|
||||
func TestGroup_QueryOffset(t *testing.T) {
|
||||
config := `
|
||||
groups:
|
||||
- name: group1
|
||||
query_offset: 2m
|
||||
- name: group2
|
||||
query_offset: 0s
|
||||
- name: group3
|
||||
`
|
||||
|
||||
dir := t.TempDir()
|
||||
fname := path.Join(dir, "rules.yaml")
|
||||
err := os.WriteFile(fname, []byte(config), fs.ModePerm)
|
||||
require.NoError(t, err)
|
||||
|
||||
m := NewManager(&ManagerOptions{
|
||||
Logger: log.NewNopLogger(),
|
||||
DefaultRuleQueryOffset: func() time.Duration {
|
||||
return time.Minute
|
||||
},
|
||||
})
|
||||
m.start()
|
||||
err = m.Update(time.Second, []string{fname}, labels.EmptyLabels(), "", nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
rgs := m.RuleGroups()
|
||||
sort.Slice(rgs, func(i, j int) bool {
|
||||
return rgs[i].Name() < rgs[j].Name()
|
||||
})
|
||||
|
||||
// From config.
|
||||
require.Equal(t, 2*time.Minute, rgs[0].QueryOffset())
|
||||
// Setting 0 in config is detected.
|
||||
require.Equal(t, time.Duration(0), rgs[1].QueryOffset())
|
||||
// Default when nothing is set.
|
||||
require.Equal(t, time.Minute, rgs[2].QueryOffset())
|
||||
|
||||
m.Stop()
|
||||
}
|
||||
|
||||
func TestCopyState(t *testing.T) {
|
||||
oldGroup := &Group{
|
||||
rules: []Rule{
|
||||
|
@ -1401,7 +1455,8 @@ func TestNativeHistogramsInRecordingRules(t *testing.T) {
|
|||
|
||||
expHist := hists[0].ToFloat(nil)
|
||||
for _, h := range hists[1:] {
|
||||
expHist = expHist.Add(h.ToFloat(nil))
|
||||
expHist, err = expHist.Add(h.ToFloat(nil))
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
it := s.Iterator(nil)
|
||||
|
@ -1856,18 +1911,12 @@ func TestDependencyMapUpdatesOnGroupUpdate(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestAsyncRuleEvaluation(t *testing.T) {
|
||||
storage := teststorage.New(t)
|
||||
t.Cleanup(func() { storage.Close() })
|
||||
|
||||
var (
|
||||
inflightQueries atomic.Int32
|
||||
maxInflight atomic.Int32
|
||||
)
|
||||
|
||||
t.Run("synchronous evaluation with independent rules", func(t *testing.T) {
|
||||
// Reset.
|
||||
inflightQueries.Store(0)
|
||||
maxInflight.Store(0)
|
||||
t.Parallel()
|
||||
storage := teststorage.New(t)
|
||||
t.Cleanup(func() { storage.Close() })
|
||||
inflightQueries := atomic.Int32{}
|
||||
maxInflight := atomic.Int32{}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
t.Cleanup(cancel)
|
||||
|
@ -1895,9 +1944,11 @@ func TestAsyncRuleEvaluation(t *testing.T) {
|
|||
})
|
||||
|
||||
t.Run("asynchronous evaluation with independent and dependent rules", func(t *testing.T) {
|
||||
// Reset.
|
||||
inflightQueries.Store(0)
|
||||
maxInflight.Store(0)
|
||||
t.Parallel()
|
||||
storage := teststorage.New(t)
|
||||
t.Cleanup(func() { storage.Close() })
|
||||
inflightQueries := atomic.Int32{}
|
||||
maxInflight := atomic.Int32{}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
t.Cleanup(cancel)
|
||||
|
@ -1931,9 +1982,11 @@ func TestAsyncRuleEvaluation(t *testing.T) {
|
|||
})
|
||||
|
||||
t.Run("asynchronous evaluation of all independent rules, insufficient concurrency", func(t *testing.T) {
|
||||
// Reset.
|
||||
inflightQueries.Store(0)
|
||||
maxInflight.Store(0)
|
||||
t.Parallel()
|
||||
storage := teststorage.New(t)
|
||||
t.Cleanup(func() { storage.Close() })
|
||||
inflightQueries := atomic.Int32{}
|
||||
maxInflight := atomic.Int32{}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
t.Cleanup(cancel)
|
||||
|
@ -1967,9 +2020,11 @@ func TestAsyncRuleEvaluation(t *testing.T) {
|
|||
})
|
||||
|
||||
t.Run("asynchronous evaluation of all independent rules, sufficient concurrency", func(t *testing.T) {
|
||||
// Reset.
|
||||
inflightQueries.Store(0)
|
||||
maxInflight.Store(0)
|
||||
t.Parallel()
|
||||
storage := teststorage.New(t)
|
||||
t.Cleanup(func() { storage.Close() })
|
||||
inflightQueries := atomic.Int32{}
|
||||
maxInflight := atomic.Int32{}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
t.Cleanup(cancel)
|
||||
|
@ -2044,7 +2099,24 @@ func TestBoundedRuleEvalConcurrency(t *testing.T) {
|
|||
require.EqualValues(t, maxInflight.Load(), int32(maxConcurrency)+int32(groupCount))
|
||||
}
|
||||
|
||||
const artificialDelay = 15 * time.Millisecond
|
||||
func TestUpdateWhenStopped(t *testing.T) {
|
||||
files := []string{"fixtures/rules.yaml"}
|
||||
ruleManager := NewManager(&ManagerOptions{
|
||||
Context: context.Background(),
|
||||
Logger: log.NewNopLogger(),
|
||||
})
|
||||
ruleManager.start()
|
||||
err := ruleManager.Update(10*time.Second, files, labels.EmptyLabels(), "", nil)
|
||||
require.NoError(t, err)
|
||||
require.NotEmpty(t, ruleManager.groups)
|
||||
|
||||
ruleManager.Stop()
|
||||
// Updates following a stop are no-op.
|
||||
err = ruleManager.Update(10*time.Second, []string{}, labels.EmptyLabels(), "", nil)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
const artificialDelay = 250 * time.Millisecond
|
||||
|
||||
func optsFactory(storage storage.Storage, maxInflight, inflightQueries *atomic.Int32, maxConcurrent int64) *ManagerOptions {
|
||||
var inflightMu sync.Mutex
|
||||
|
|
|
@ -31,7 +31,7 @@ type unknownRule struct{}
|
|||
|
||||
func (u unknownRule) Name() string { return "" }
|
||||
func (u unknownRule) Labels() labels.Labels { return labels.EmptyLabels() }
|
||||
func (u unknownRule) Eval(context.Context, time.Time, QueryFunc, *url.URL, int) (promql.Vector, error) {
|
||||
func (u unknownRule) Eval(context.Context, time.Duration, time.Time, QueryFunc, *url.URL, int) (promql.Vector, error) {
|
||||
return nil, nil
|
||||
}
|
||||
func (u unknownRule) String() string { return "" }
|
||||
|
|
|
@ -77,10 +77,9 @@ func (rule *RecordingRule) Labels() labels.Labels {
|
|||
}
|
||||
|
||||
// Eval evaluates the rule and then overrides the metric names and labels accordingly.
|
||||
func (rule *RecordingRule) Eval(ctx context.Context, ts time.Time, query QueryFunc, _ *url.URL, limit int) (promql.Vector, error) {
|
||||
func (rule *RecordingRule) Eval(ctx context.Context, queryOffset time.Duration, ts time.Time, query QueryFunc, _ *url.URL, limit int) (promql.Vector, error) {
|
||||
ctx = NewOriginContext(ctx, NewRuleDetail(rule))
|
||||
|
||||
vector, err := query(ctx, rule.vector.String(), ts)
|
||||
vector, err := query(ctx, rule.vector.String(), ts.Add(-queryOffset))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue