mirror of
https://github.com/prometheus/prometheus.git
synced 2024-11-09 23:24:05 -08:00
WIP
This commit is contained in:
commit
4c15750193
65
.github/workflows/ci.yml
vendored
65
.github/workflows/ci.yml
vendored
|
@ -11,12 +11,14 @@ jobs:
|
|||
container:
|
||||
# Whenever the Go version is updated here, .promu.yml
|
||||
# should also be updated.
|
||||
image: quay.io/prometheus/golang-builder:1.22-base
|
||||
image: quay.io/prometheus/golang-builder:1.23-base
|
||||
steps:
|
||||
- uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
|
||||
- uses: prometheus/promci@45166329da36d74895901808f1c8c97efafc7f84 # v0.3.0
|
||||
- uses: prometheus/promci@468927c440349ab56c4a1aafd453b312841503c2 # v0.4.4
|
||||
- uses: ./.github/promci/actions/setup_environment
|
||||
- run: make GOOPTS=--tags=stringlabels GO_ONLY=1 SKIP_GOLANGCI_LINT=1
|
||||
with:
|
||||
enable_npm: true
|
||||
- run: make GOOPTS=--tags=stringlabels GO_ONLY=1 SKIP_GOLANGCI_LINT=1 test-flags=""
|
||||
- run: go test --tags=stringlabels ./tsdb/ -test.tsdb-isolation=false
|
||||
- run: make -C documentation/examples/remote_storage
|
||||
- run: make -C documentation/examples
|
||||
|
@ -25,10 +27,10 @@ jobs:
|
|||
name: More Go tests
|
||||
runs-on: ubuntu-latest
|
||||
container:
|
||||
image: quay.io/prometheus/golang-builder:1.22-base
|
||||
image: quay.io/prometheus/golang-builder:1.23-base
|
||||
steps:
|
||||
- uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
|
||||
- uses: prometheus/promci@45166329da36d74895901808f1c8c97efafc7f84 # v0.3.0
|
||||
- uses: prometheus/promci@468927c440349ab56c4a1aafd453b312841503c2 # v0.4.4
|
||||
- uses: ./.github/promci/actions/setup_environment
|
||||
- run: go test --tags=dedupelabels ./...
|
||||
- run: GOARCH=386 go test ./cmd/prometheus
|
||||
|
@ -39,9 +41,12 @@ jobs:
|
|||
test_go_oldest:
|
||||
name: Go tests with previous Go version
|
||||
runs-on: ubuntu-latest
|
||||
env:
|
||||
# Enforce the Go version.
|
||||
GOTOOLCHAIN: local
|
||||
container:
|
||||
# The go version in this image should be N-1 wrt test_go.
|
||||
image: quay.io/prometheus/golang-builder:1.21-base
|
||||
image: quay.io/prometheus/golang-builder:1.22-base
|
||||
steps:
|
||||
- uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
|
||||
- run: make build
|
||||
|
@ -54,11 +59,11 @@ jobs:
|
|||
# Whenever the Go version is updated here, .promu.yml
|
||||
# should also be updated.
|
||||
container:
|
||||
image: quay.io/prometheus/golang-builder:1.22-base
|
||||
image: quay.io/prometheus/golang-builder:1.23-base
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
|
||||
- uses: prometheus/promci@45166329da36d74895901808f1c8c97efafc7f84 # v0.3.0
|
||||
- uses: prometheus/promci@468927c440349ab56c4a1aafd453b312841503c2 # v0.4.4
|
||||
- uses: ./.github/promci/actions/setup_environment
|
||||
with:
|
||||
enable_go: false
|
||||
|
@ -77,7 +82,7 @@ jobs:
|
|||
- uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
|
||||
- uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 # v5.0.2
|
||||
with:
|
||||
go-version: 1.22.x
|
||||
go-version: 1.23.x
|
||||
- run: |
|
||||
$TestTargets = go list ./... | Where-Object { $_ -NotMatch "(github.com/prometheus/prometheus/discovery.*|github.com/prometheus/prometheus/config|github.com/prometheus/prometheus/web)"}
|
||||
go test $TestTargets -vet=off -v
|
||||
|
@ -89,7 +94,7 @@ jobs:
|
|||
# Whenever the Go version is updated here, .promu.yml
|
||||
# should also be updated.
|
||||
container:
|
||||
image: quay.io/prometheus/golang-builder:1.22-base
|
||||
image: quay.io/prometheus/golang-builder:1.23-base
|
||||
steps:
|
||||
- uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
|
||||
- run: go install ./cmd/promtool/.
|
||||
|
@ -107,6 +112,8 @@ jobs:
|
|||
if: |
|
||||
!(github.event_name == 'push' && startsWith(github.ref, 'refs/tags/v2.'))
|
||||
&&
|
||||
!(github.event_name == 'push' && startsWith(github.ref, 'refs/tags/v3.'))
|
||||
&&
|
||||
!(github.event_name == 'pull_request' && startsWith(github.event.pull_request.base.ref, 'release-'))
|
||||
&&
|
||||
!(github.event_name == 'push' && github.event.ref == 'refs/heads/main')
|
||||
|
@ -115,7 +122,7 @@ jobs:
|
|||
thread: [ 0, 1, 2 ]
|
||||
steps:
|
||||
- uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
|
||||
- uses: prometheus/promci@45166329da36d74895901808f1c8c97efafc7f84 # v0.3.0
|
||||
- uses: prometheus/promci@468927c440349ab56c4a1aafd453b312841503c2 # v0.4.4
|
||||
- uses: ./.github/promci/actions/build
|
||||
with:
|
||||
promu_opts: "-p linux/amd64 -p windows/amd64 -p linux/arm64 -p darwin/amd64 -p darwin/arm64 -p linux/386"
|
||||
|
@ -127,6 +134,8 @@ jobs:
|
|||
if: |
|
||||
(github.event_name == 'push' && startsWith(github.ref, 'refs/tags/v2.'))
|
||||
||
|
||||
(github.event_name == 'push' && startsWith(github.ref, 'refs/tags/v3.'))
|
||||
||
|
||||
(github.event_name == 'pull_request' && startsWith(github.event.pull_request.base.ref, 'release-'))
|
||||
||
|
||||
(github.event_name == 'push' && github.event.ref == 'refs/heads/main')
|
||||
|
@ -138,7 +147,7 @@ jobs:
|
|||
# should also be updated.
|
||||
steps:
|
||||
- uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
|
||||
- uses: prometheus/promci@45166329da36d74895901808f1c8c97efafc7f84 # v0.3.0
|
||||
- uses: prometheus/promci@468927c440349ab56c4a1aafd453b312841503c2 # v0.4.4
|
||||
- uses: ./.github/promci/actions/build
|
||||
with:
|
||||
parallelism: 12
|
||||
|
@ -165,7 +174,7 @@ jobs:
|
|||
uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 # v5.0.2
|
||||
with:
|
||||
cache: false
|
||||
go-version: 1.22.x
|
||||
go-version: 1.23.x
|
||||
- name: Run goyacc and check for diff
|
||||
run: make install-goyacc check-generated-parser
|
||||
golangci:
|
||||
|
@ -177,7 +186,7 @@ jobs:
|
|||
- name: Install Go
|
||||
uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 # v5.0.2
|
||||
with:
|
||||
go-version: 1.22.x
|
||||
go-version: 1.23.x
|
||||
- name: Install snmp_exporter/generator dependencies
|
||||
run: sudo apt-get update && sudo apt-get -y install libsnmp-dev
|
||||
if: github.repository == 'prometheus/snmp_exporter'
|
||||
|
@ -200,7 +209,7 @@ jobs:
|
|||
if: github.event_name == 'push' && github.event.ref == 'refs/heads/main'
|
||||
steps:
|
||||
- uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
|
||||
- uses: prometheus/promci@45166329da36d74895901808f1c8c97efafc7f84 # v0.3.0
|
||||
- uses: prometheus/promci@468927c440349ab56c4a1aafd453b312841503c2 # v0.4.4
|
||||
- uses: ./.github/promci/actions/publish_main
|
||||
with:
|
||||
docker_hub_login: ${{ secrets.docker_hub_login }}
|
||||
|
@ -211,10 +220,13 @@ jobs:
|
|||
name: Publish release artefacts
|
||||
runs-on: ubuntu-latest
|
||||
needs: [test_ui, test_go, test_go_more, test_go_oldest, test_windows, golangci, codeql, build_all]
|
||||
if: github.event_name == 'push' && startsWith(github.ref, 'refs/tags/v2.')
|
||||
if: |
|
||||
(github.event_name == 'push' && startsWith(github.ref, 'refs/tags/v2.'))
|
||||
||
|
||||
(github.event_name == 'push' && startsWith(github.ref, 'refs/tags/v3.'))
|
||||
steps:
|
||||
- uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
|
||||
- uses: prometheus/promci@45166329da36d74895901808f1c8c97efafc7f84 # v0.3.0
|
||||
- uses: prometheus/promci@468927c440349ab56c4a1aafd453b312841503c2 # v0.4.4
|
||||
- uses: ./.github/promci/actions/publish_release
|
||||
with:
|
||||
docker_hub_login: ${{ secrets.docker_hub_login }}
|
||||
|
@ -229,7 +241,7 @@ jobs:
|
|||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
|
||||
- uses: prometheus/promci@45166329da36d74895901808f1c8c97efafc7f84 # v0.3.0
|
||||
- uses: prometheus/promci@468927c440349ab56c4a1aafd453b312841503c2 # v0.4.4
|
||||
- name: Install nodejs
|
||||
uses: actions/setup-node@1e60f620b9541d16bece96c5465dc8ee9832be0b # v4.0.3
|
||||
with:
|
||||
|
@ -242,17 +254,26 @@ jobs:
|
|||
restore-keys: |
|
||||
${{ runner.os }}-node-
|
||||
- name: Check libraries version
|
||||
if: github.event_name == 'push' && startsWith(github.ref, 'refs/tags/v2.')
|
||||
run: ./scripts/ui_release.sh --check-package "$(echo ${{ github.ref_name }}|sed s/v2/v0/)"
|
||||
if: |
|
||||
(github.event_name == 'push' && startsWith(github.ref, 'refs/tags/v2.'))
|
||||
||
|
||||
(github.event_name == 'push' && startsWith(github.ref, 'refs/tags/v3.'))
|
||||
run: ./scripts/ui_release.sh --check-package "$(./scripts/get_module_version.sh ${{ github.ref_name }})"
|
||||
- name: build
|
||||
run: make assets
|
||||
- name: Copy files before publishing libs
|
||||
run: ./scripts/ui_release.sh --copy
|
||||
- name: Publish dry-run libraries
|
||||
if: "!(github.event_name == 'push' && startsWith(github.ref, 'refs/tags/v2.'))"
|
||||
if: |
|
||||
!(github.event_name == 'push' && startsWith(github.ref, 'refs/tags/v2.'))
|
||||
&&
|
||||
!(github.event_name == 'push' && startsWith(github.ref, 'refs/tags/v3.'))
|
||||
run: ./scripts/ui_release.sh --publish dry-run
|
||||
- name: Publish libraries
|
||||
if: github.event_name == 'push' && startsWith(github.ref, 'refs/tags/v2.')
|
||||
if: |
|
||||
(github.event_name == 'push' && startsWith(github.ref, 'refs/tags/v2.'))
|
||||
||
|
||||
(github.event_name == 'push' && startsWith(github.ref, 'refs/tags/v3.'))
|
||||
run: ./scripts/ui_release.sh --publish
|
||||
env:
|
||||
# The setup-node action writes an .npmrc file with this env variable
|
||||
|
|
2
.github/workflows/fuzzing.yml
vendored
2
.github/workflows/fuzzing.yml
vendored
|
@ -21,7 +21,7 @@ jobs:
|
|||
fuzz-seconds: 600
|
||||
dry-run: false
|
||||
- name: Upload Crash
|
||||
uses: actions/upload-artifact@0b2256b8c012f0828dc542b3febcab082c67f72b # v4.3.4
|
||||
uses: actions/upload-artifact@50769540e7f4bd5e21e526ee35c689e35e0d6874 # v4.4.0
|
||||
if: failure() && steps.build.outcome == 'success'
|
||||
with:
|
||||
name: artifacts
|
||||
|
|
2
.github/workflows/scorecards.yml
vendored
2
.github/workflows/scorecards.yml
vendored
|
@ -37,7 +37,7 @@ jobs:
|
|||
# Upload the results as artifacts (optional). Commenting out will disable uploads of run results in SARIF
|
||||
# format to the repository Actions tab.
|
||||
- name: "Upload artifact"
|
||||
uses: actions/upload-artifact@0b2256b8c012f0828dc542b3febcab082c67f72b # tag=v4.3.4
|
||||
uses: actions/upload-artifact@50769540e7f4bd5e21e526ee35c689e35e0d6874 # tag=v4.4.0
|
||||
with:
|
||||
name: SARIF file
|
||||
path: results.sarif
|
||||
|
|
2
.gitignore
vendored
2
.gitignore
vendored
|
@ -22,7 +22,7 @@ benchmark.txt
|
|||
/documentation/examples/remote_storage/example_write_adapter/example_write_adapter
|
||||
|
||||
npm_licenses.tar.bz2
|
||||
/web/ui/static/react
|
||||
/web/ui/static
|
||||
|
||||
/vendor
|
||||
/.build
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
go:
|
||||
# Whenever the Go version is updated here,
|
||||
# .github/workflows should also be updated.
|
||||
version: 1.22
|
||||
version: 1.23
|
||||
repository:
|
||||
path: github.com/prometheus/prometheus
|
||||
build:
|
||||
|
@ -28,8 +28,6 @@ tarball:
|
|||
# Whenever there are new files to include in the tarball,
|
||||
# remember to make sure the new files will be generated after `make build`.
|
||||
files:
|
||||
- consoles
|
||||
- console_libraries
|
||||
- documentation/examples/prometheus.yml
|
||||
- LICENSE
|
||||
- NOTICE
|
||||
|
|
47
CHANGELOG.md
47
CHANGELOG.md
|
@ -1,9 +1,46 @@
|
|||
# Changelog
|
||||
|
||||
## unreleased
|
||||
## 3.0.0-beta.0 / 2024-09-05
|
||||
|
||||
Release 3.0.0-beta.0 includes new features such as a brand new UI and UTF-8 support enabled by default. As a new major version, several breaking changes are introduced. The breaking changes are mainly around the removal of deprecated feature flags and CLI arguments, and the full list can be found below. Most users should be able to try this release out of the box without any configuration changes.
|
||||
|
||||
As is traditional with a beta release, we do **not** recommend users install 3.0.0-beta on critical production systems, but we do want everyone to test it out and find bugs.
|
||||
|
||||
* [CHANGE] UI: The old web UI has been replaced by a completely new one that is less cluttered and adds a few new features (PromLens-style tree view, better metrics explorer, "Explain" tab). However, it is still missing some features of the old UI (notably, exemplar display and heatmaps). To switch back to the old UI, you can use the feature flag `--enable-feature=old-ui` for the time being. #14872
|
||||
* [CHANGE] PromQL: Range selectors and the lookback delta are now left-open, i.e. a sample coinciding with the lower time limit is excluded rather than included. #13904
|
||||
* [CHANGE] Kubernetes SD: Remove support for `discovery.k8s.io/v1beta1` API version of EndpointSlice. This version is no longer served as of Kubernetes v1.25. #14365
|
||||
* [CHANGE] Kubernetes SD: Remove support for `networking.k8s.io/v1beta1` API version of Ingress. This version is no longer served as of Kubernetes v1.22. #14365
|
||||
* [CHANGE] UTF-8: Enable UTF-8 support by default. Prometheus now allows all UTF-8 characters in metric and label names. The corresponding `utf8-name` feature flag has been removed. #14705
|
||||
* [CHANGE] Console: Remove example files for the console feature. Users can continue using the console feature by supplying their own JavaScript and templates. #14807
|
||||
* [CHANGE] SD: Enable the new service discovery manager by default. This SD manager does not restart unchanged discoveries upon reloading. This makes reloads faster and reduces pressure on service discoveries' sources. The corresponding `new-service-discovery-manager` feature flag has been removed. #14770
|
||||
* [CHANGE] Agent mode has been promoted to stable. The feature flag `agent` has been removed. To run Prometheus in Agent mode, use the new `--agent` cmdline arg instead. #14747
|
||||
* [CHANGE] Remove deprecated `remote-write-receiver`,`promql-at-modifier`, and `promql-negative-offset` feature flags. #13456, #14526
|
||||
* [CHANGE] Remove deprecated `storage.tsdb.allow-overlapping-blocks`, `alertmanager.timeout`, and `storage.tsdb.retention` flags. #14640, #14643
|
||||
* [FEATURE] Promtool: Allow additional labels to be added to blocks created from openmetrics. #14402
|
||||
* [FEATURE] OTLP receiver: Add new option `otlp.promote_resource_attributes`, for any OTel resource attributes that should be promoted to metric labels. #14200
|
||||
* [FEATURE] Automatic reloading of the Prometheus configuration file at a specified interval #14769
|
||||
* [ENHANCEMENT] OTLP receiver: Warn when encountering exponential histograms with zero count and non-zero sum. #14706
|
||||
* [ENHANCEMENT] OTLP receiver: Interrupt translation on context cancellation/timeout. #14612
|
||||
* [ENHANCEMENT] Scrape: Only parse created timestamp if `created-timestamp-zero-ingestion` feature flag is enabled. This is as a lot of memory is used when parsing the created timestamp in the OM text format. #14815
|
||||
* [ENHANCEMENT] Scrape: Add support for logging scrape failures to a specified file. #14734
|
||||
* [ENHANCEMENT] Remote Read client: Enable streaming remote read if the server supports it. #11379
|
||||
* [ENHANCEMENT] PromQL: Delay deletion of `__name__` label to the end of the query evaluation. This is **experimental** and enabled under the feature-flag `promql-delayed-name-removal`. #14477
|
||||
* [ENHANCEMENT] Move AM discovery page from "Monitoring status" to "Server status". #14875
|
||||
* [ENHANCEMENT] Tracing: Improve PromQL tracing, including showing the operation performed for aggregates, operators, and calls.#14816
|
||||
* [ENHANCEMENT] Add support for multiple listening addresses. #14665
|
||||
* [ENHANCEMENT] Add the ability to set custom HTTP headers. #14817
|
||||
* [BUGFIX] TSDB: Fix shard initialization after WAL repair. #14731
|
||||
* [BUGFIX] UTF-8: Ensure correct validation when legacy mode turned on. #14736
|
||||
* [BUGFIX] SD: Make discovery manager notify consumers of dropped targets for still defined jobs. #13147
|
||||
* [BUGFIX] SD: Prevent the new service discovery manager from storing stale targets. #13622
|
||||
* [BUGFIX] Remote Write 2.0: Ensure metadata records are sent from the WAL to remote write during WAL replay. #14766
|
||||
* [BUGFIX] Scrape: Do no override target parameter labels with config params. #11029
|
||||
* [BUGFIX] Scrape: Reset exemplar position when scraping histograms in protobuf. #14810
|
||||
* [BUGFIX] Native Histograms: Do not re-use spans between histograms. #14771
|
||||
* [BUGFIX] Scrape: Only parse created timestamp if `created-timestamp-zero-ingestion` feature flag is enabled. This is as a lot of memory is used when parsing the created timestamp in the OM text format. #14815
|
||||
* [BUGFIX] TSDB: Fix panic in query during truncation with OOO head. #14831
|
||||
* [BUGFIX] TSDB: Fix panic in chunk querier. #14874
|
||||
* [BUGFIX] promql.Engine.Close: No-op if nil. #14861
|
||||
* [BUGFIX] tsdb/wlog.Watcher.readSegmentForGC: Only count unknown record types against record_decode_failures_total metric. #14042
|
||||
|
||||
## 2.54.1 / 2024-08-27
|
||||
|
@ -103,7 +140,7 @@ This release changes the default for GOGC, the Go runtime control for the trade-
|
|||
* [ENHANCEMENT] TSDB: Pause regular block compactions if the head needs to be compacted (prioritize head as it increases memory consumption). #13754
|
||||
* [ENHANCEMENT] Observability: Improved logging during signal handling termination. #13772
|
||||
* [ENHANCEMENT] Observability: All log lines for drop series use "num_dropped" key consistently. #13823
|
||||
* [ENHANCEMENT] Observability: Log chunk snapshot and mmaped chunk replay duration during WAL replay. #13838
|
||||
* [ENHANCEMENT] Observability: Log chunk snapshot and mmapped chunk replay duration during WAL replay. #13838
|
||||
* [ENHANCEMENT] Observability: Log if the block is being created from WBL during compaction. #13846
|
||||
* [BUGFIX] PromQL: Fix inaccurate sample number statistic when querying histograms. #13667
|
||||
* [BUGFIX] PromQL: Fix `histogram_stddev` and `histogram_stdvar` for cases where the histogram has negative buckets. #13852
|
||||
|
@ -640,7 +677,7 @@ The binaries published with this release are built with Go1.17.8 to avoid [CVE-2
|
|||
|
||||
## 2.33.0 / 2022-01-29
|
||||
|
||||
* [CHANGE] PromQL: Promote negative offset and `@` modifer to stable features. #10121
|
||||
* [CHANGE] PromQL: Promote negative offset and `@` modifier to stable features. #10121
|
||||
* [CHANGE] Web: Promote remote-write-receiver to stable. #10119
|
||||
* [FEATURE] Config: Add `stripPort` template function. #10002
|
||||
* [FEATURE] Promtool: Add cardinality analysis to `check metrics`, enabled by flag `--extended`. #10045
|
||||
|
@ -877,7 +914,7 @@ This vulnerability has been reported by Aaron Devaney from MDSec.
|
|||
* [ENHANCEMENT] Templating: Enable parsing strings in `humanize` functions. #8682
|
||||
* [BUGFIX] UI: Provide errors instead of blank page on TSDB Status Page. #8654 #8659
|
||||
* [BUGFIX] TSDB: Do not panic when writing very large records to the WAL. #8790
|
||||
* [BUGFIX] TSDB: Avoid panic when mmaped memory is referenced after the file is closed. #8723
|
||||
* [BUGFIX] TSDB: Avoid panic when mmapped memory is referenced after the file is closed. #8723
|
||||
* [BUGFIX] Scaleway Discovery: Fix nil pointer dereference. #8737
|
||||
* [BUGFIX] Consul Discovery: Restart no longer required after config update with no targets. #8766
|
||||
|
||||
|
@ -1803,7 +1840,7 @@ information, read the announcement blog post and migration guide.
|
|||
## 1.7.0 / 2017-06-06
|
||||
|
||||
* [CHANGE] Compress remote storage requests and responses with unframed/raw snappy.
|
||||
* [CHANGE] Properly ellide secrets in config.
|
||||
* [CHANGE] Properly elide secrets in config.
|
||||
* [FEATURE] Add OpenStack service discovery.
|
||||
* [FEATURE] Add ability to limit Kubernetes service discovery to certain namespaces.
|
||||
* [FEATURE] Add metric for discovered number of Alertmanagers.
|
||||
|
|
|
@ -8,21 +8,16 @@ ARG OS="linux"
|
|||
COPY .build/${OS}-${ARCH}/prometheus /bin/prometheus
|
||||
COPY .build/${OS}-${ARCH}/promtool /bin/promtool
|
||||
COPY documentation/examples/prometheus.yml /etc/prometheus/prometheus.yml
|
||||
COPY console_libraries/ /usr/share/prometheus/console_libraries/
|
||||
COPY consoles/ /usr/share/prometheus/consoles/
|
||||
COPY LICENSE /LICENSE
|
||||
COPY NOTICE /NOTICE
|
||||
COPY npm_licenses.tar.bz2 /npm_licenses.tar.bz2
|
||||
|
||||
WORKDIR /prometheus
|
||||
RUN ln -s /usr/share/prometheus/console_libraries /usr/share/prometheus/consoles/ /etc/prometheus/ && \
|
||||
chown -R nobody:nobody /etc/prometheus /prometheus
|
||||
RUN chown -R nobody:nobody /etc/prometheus /prometheus
|
||||
|
||||
USER nobody
|
||||
EXPOSE 9090
|
||||
VOLUME [ "/prometheus" ]
|
||||
ENTRYPOINT [ "/bin/prometheus" ]
|
||||
CMD [ "--config.file=/etc/prometheus/prometheus.yml", \
|
||||
"--storage.tsdb.path=/prometheus", \
|
||||
"--web.console.libraries=/usr/share/prometheus/console_libraries", \
|
||||
"--web.console.templates=/usr/share/prometheus/consoles" ]
|
||||
"--storage.tsdb.path=/prometheus" ]
|
||||
|
|
|
@ -13,13 +13,12 @@ Maintainers for specific parts of the codebase:
|
|||
* `k8s`: Frederic Branczyk (<fbranczyk@gmail.com> / @brancz)
|
||||
* `documentation`
|
||||
* `prometheus-mixin`: Matthias Loibl (<mail@matthiasloibl.com> / @metalmatze)
|
||||
* `model/histogram` and other code related to native histograms: Björn Rabenstein (<beorn@grafana.com> / @beorn7),
|
||||
* `model/histogram` and other code related to native histograms: Björn Rabenstein (<beorn@grafana.com> / @beorn7),
|
||||
George Krajcsovits (<gyorgy.krajcsovits@grafana.com> / @krajorama)
|
||||
* `storage`
|
||||
* `remote`: Callum Styan (<callumstyan@gmail.com> / @cstyan), Bartłomiej Płotka (<bwplotka@gmail.com> / @bwplotka), Tom Wilkie (tom.wilkie@gmail.com / @tomwilkie), Nicolás Pazos ( <npazosmendez@gmail.com> / @npazosmendez), Alex Greenbank ( <alex.greenbank@grafana.com> / @alexgreenbank)
|
||||
* `otlptranslator`: Arve Knudsen (<arve.knudsen@gmail.com> / @aknuds1), Jesús Vázquez (<jesus.vazquez@grafana.com> / @jesusvazquez)
|
||||
* `tsdb`: Ganesh Vernekar (<ganesh@grafana.com> / @codesome), Bartłomiej Płotka (<bwplotka@gmail.com> / @bwplotka), Jesús Vázquez (<jesus.vazquez@grafana.com> / @jesusvazquez)
|
||||
* `agent`: Robert Fratto (<robert.fratto@grafana.com> / @rfratto)
|
||||
* `web`
|
||||
* `ui`: Julius Volz (<julius.volz@gmail.com> / @juliusv)
|
||||
* `module`: Augustin Husson (<husson.augustin@gmail.com> @nexucis)
|
||||
|
|
10
Makefile
10
Makefile
|
@ -42,13 +42,17 @@ upgrade-npm-deps:
|
|||
|
||||
.PHONY: ui-bump-version
|
||||
ui-bump-version:
|
||||
version=$$(sed s/2/0/ < VERSION) && ./scripts/ui_release.sh --bump-version "$${version}"
|
||||
version=$$(./scripts/get_module_version.sh) && ./scripts/ui_release.sh --bump-version "$${version}"
|
||||
cd web/ui && npm install
|
||||
git add "./web/ui/package-lock.json" "./**/package.json"
|
||||
|
||||
.PHONY: ui-install
|
||||
ui-install:
|
||||
cd $(UI_PATH) && npm install
|
||||
# The old React app has been separated from the npm workspaces setup to avoid
|
||||
# issues with conflicting dependencies. This is a temporary solution until the
|
||||
# new Mantine-based UI is fully integrated and the old app can be removed.
|
||||
cd $(UI_PATH)/react-app && npm install
|
||||
|
||||
.PHONY: ui-build
|
||||
ui-build:
|
||||
|
@ -65,6 +69,10 @@ ui-test:
|
|||
.PHONY: ui-lint
|
||||
ui-lint:
|
||||
cd $(UI_PATH) && npm run lint
|
||||
# The old React app has been separated from the npm workspaces setup to avoid
|
||||
# issues with conflicting dependencies. This is a temporary solution until the
|
||||
# new Mantine-based UI is fully integrated and the old app can be removed.
|
||||
cd $(UI_PATH)/react-app && npm run lint
|
||||
|
||||
.PHONY: assets
|
||||
assets: ui-install ui-build
|
||||
|
|
|
@ -115,7 +115,7 @@ The Makefile provides several targets:
|
|||
|
||||
Prometheus is bundled with many service discovery plugins.
|
||||
When building Prometheus from source, you can edit the [plugins.yml](./plugins.yml)
|
||||
file to disable some service discoveries. The file is a yaml-formated list of go
|
||||
file to disable some service discoveries. The file is a yaml-formatted list of go
|
||||
import path that will be built into the Prometheus binary.
|
||||
|
||||
After you have changed the file, you
|
||||
|
|
|
@ -187,7 +187,7 @@ the Prometheus server, we use major version zero releases for the libraries.
|
|||
Tag the new library release via the following commands:
|
||||
|
||||
```bash
|
||||
tag="v$(sed s/2/0/ < VERSION)"
|
||||
tag="v$(./scripts/get_module_version.sh)"
|
||||
git tag -s "${tag}" -m "${tag}"
|
||||
git push origin "${tag}"
|
||||
```
|
||||
|
|
|
@ -58,8 +58,6 @@ import (
|
|||
|
||||
"github.com/prometheus/prometheus/config"
|
||||
"github.com/prometheus/prometheus/discovery"
|
||||
"github.com/prometheus/prometheus/discovery/legacymanager"
|
||||
"github.com/prometheus/prometheus/discovery/targetgroup"
|
||||
"github.com/prometheus/prometheus/model/exemplar"
|
||||
"github.com/prometheus/prometheus/model/histogram"
|
||||
"github.com/prometheus/prometheus/model/labels"
|
||||
|
@ -103,6 +101,8 @@ var (
|
|||
)
|
||||
|
||||
func init() {
|
||||
// This can be removed when the default validation scheme in common is updated.
|
||||
model.NameValidationScheme = model.UTF8Validation
|
||||
prometheus.MustRegister(versioncollector.NewCollector(strings.ReplaceAll(appName, "-", "_")))
|
||||
|
||||
var err error
|
||||
|
@ -154,12 +154,14 @@ type flagConfig struct {
|
|||
RemoteFlushDeadline model.Duration
|
||||
nameEscapingScheme string
|
||||
|
||||
enableAutoReload bool
|
||||
autoReloadInterval model.Duration
|
||||
|
||||
featureList []string
|
||||
memlimitRatio float64
|
||||
// These options are extracted from featureList
|
||||
// for ease of use.
|
||||
enableExpandExternalLabels bool
|
||||
enableNewSDManager bool
|
||||
enablePerStepStats bool
|
||||
enableAutoGOMAXPROCS bool
|
||||
enableAutoGOMEMLIMIT bool
|
||||
|
@ -179,9 +181,6 @@ func (c *flagConfig) setFeatureListOptions(logger log.Logger) error {
|
|||
opts := strings.Split(f, ",")
|
||||
for _, o := range opts {
|
||||
switch o {
|
||||
case "remote-write-receiver":
|
||||
c.web.EnableRemoteWriteReceiver = true
|
||||
level.Warn(logger).Log("msg", "Remote write receiver enabled via feature flag remote-write-receiver. This is DEPRECATED. Use --web.enable-remote-write-receiver.")
|
||||
case "otlp-write-receiver":
|
||||
c.web.EnableOTLPWriteReceiver = true
|
||||
level.Info(logger).Log("msg", "Experimental OTLP write receiver enabled")
|
||||
|
@ -200,18 +199,18 @@ func (c *flagConfig) setFeatureListOptions(logger log.Logger) error {
|
|||
case "metadata-wal-records":
|
||||
c.scrape.AppendMetadata = true
|
||||
level.Info(logger).Log("msg", "Experimental metadata records in WAL enabled, required for remote write 2.0")
|
||||
case "new-service-discovery-manager":
|
||||
c.enableNewSDManager = true
|
||||
level.Info(logger).Log("msg", "Experimental service discovery manager")
|
||||
case "agent":
|
||||
agentMode = true
|
||||
level.Info(logger).Log("msg", "Experimental agent mode enabled.")
|
||||
case "promql-per-step-stats":
|
||||
c.enablePerStepStats = true
|
||||
level.Info(logger).Log("msg", "Experimental per-step statistics reporting")
|
||||
case "auto-gomaxprocs":
|
||||
c.enableAutoGOMAXPROCS = true
|
||||
level.Info(logger).Log("msg", "Automatically set GOMAXPROCS to match Linux container CPU quota")
|
||||
case "auto-reload-config":
|
||||
c.enableAutoReload = true
|
||||
if s := time.Duration(c.autoReloadInterval).Seconds(); s > 0 && s < 1 {
|
||||
c.autoReloadInterval, _ = model.ParseDuration("1s")
|
||||
}
|
||||
level.Info(logger).Log("msg", fmt.Sprintf("Enabled automatic configuration file reloading. Checking for configuration changes every %s.", c.autoReloadInterval))
|
||||
case "auto-gomemlimit":
|
||||
c.enableAutoGOMEMLIMIT = true
|
||||
level.Info(logger).Log("msg", "Automatically set GOMEMLIMIT to match Linux container or system memory limit")
|
||||
|
@ -243,13 +242,11 @@ func (c *flagConfig) setFeatureListOptions(logger log.Logger) error {
|
|||
case "promql-delayed-name-removal":
|
||||
c.promqlEnableDelayedNameRemoval = true
|
||||
level.Info(logger).Log("msg", "Experimental PromQL delayed name removal enabled.")
|
||||
case "utf8-names":
|
||||
model.NameValidationScheme = model.UTF8Validation
|
||||
level.Info(logger).Log("msg", "Experimental UTF-8 support enabled")
|
||||
case "":
|
||||
continue
|
||||
case "promql-at-modifier", "promql-negative-offset":
|
||||
level.Warn(logger).Log("msg", "This option for --enable-feature is now permanently enabled and therefore a no-op.", "option", o)
|
||||
case "old-ui":
|
||||
c.web.UseOldUI = true
|
||||
level.Info(logger).Log("msg", "Serving previous version of the Prometheus web UI.")
|
||||
default:
|
||||
level.Warn(logger).Log("msg", "Unknown option for --enable-feature", "option", o)
|
||||
}
|
||||
|
@ -265,11 +262,6 @@ func main() {
|
|||
runtime.SetMutexProfileFraction(20)
|
||||
}
|
||||
|
||||
var (
|
||||
oldFlagRetentionDuration model.Duration
|
||||
newFlagRetentionDuration model.Duration
|
||||
)
|
||||
|
||||
// Unregister the default GoCollector, and reregister with our defaults.
|
||||
if prometheus.Unregister(collectors.NewGoCollector()) {
|
||||
prometheus.MustRegister(
|
||||
|
@ -302,6 +294,9 @@ func main() {
|
|||
a.Flag("config.file", "Prometheus configuration file path.").
|
||||
Default("prometheus.yml").StringVar(&cfg.configFile)
|
||||
|
||||
a.Flag("config.auto-reload-interval", "Specifies the interval for checking and automatically reloading the Prometheus configuration file upon detecting changes.").
|
||||
Default("30s").SetValue(&cfg.autoReloadInterval)
|
||||
|
||||
a.Flag("web.listen-address", "Address to listen on for UI, API, and telemetry. Can be repeated.").
|
||||
Default("0.0.0.0:9090").StringsVar(&cfg.web.ListenAddresses)
|
||||
|
||||
|
@ -376,11 +371,8 @@ func main() {
|
|||
"Size at which to split the tsdb WAL segment files. Example: 100MB").
|
||||
Hidden().PlaceHolder("<bytes>").BytesVar(&cfg.tsdb.WALSegmentSize)
|
||||
|
||||
serverOnlyFlag(a, "storage.tsdb.retention", "[DEPRECATED] How long to retain samples in storage. This flag has been deprecated, use \"storage.tsdb.retention.time\" instead.").
|
||||
SetValue(&oldFlagRetentionDuration)
|
||||
|
||||
serverOnlyFlag(a, "storage.tsdb.retention.time", "How long to retain samples in storage. When this flag is set it overrides \"storage.tsdb.retention\". If neither this flag nor \"storage.tsdb.retention\" nor \"storage.tsdb.retention.size\" is set, the retention time defaults to "+defaultRetentionString+". Units Supported: y, w, d, h, m, s, ms.").
|
||||
SetValue(&newFlagRetentionDuration)
|
||||
serverOnlyFlag(a, "storage.tsdb.retention.time", "How long to retain samples in storage. If neither this flag nor \"storage.tsdb.retention.size\" is set, the retention time defaults to "+defaultRetentionString+". Units Supported: y, w, d, h, m, s, ms.").
|
||||
SetValue(&cfg.tsdb.RetentionDuration)
|
||||
|
||||
serverOnlyFlag(a, "storage.tsdb.retention.size", "Maximum number of bytes that can be stored for blocks. A unit is required, supported units: B, KB, MB, GB, TB, PB, EB. Ex: \"512MB\". Based on powers-of-2, so 1KB is 1024B.").
|
||||
BytesVar(&cfg.tsdb.MaxBytes)
|
||||
|
@ -388,14 +380,6 @@ func main() {
|
|||
serverOnlyFlag(a, "storage.tsdb.no-lockfile", "Do not create lockfile in data directory.").
|
||||
Default("false").BoolVar(&cfg.tsdb.NoLockfile)
|
||||
|
||||
// TODO: Remove in Prometheus 3.0.
|
||||
var b bool
|
||||
serverOnlyFlag(a, "storage.tsdb.allow-overlapping-blocks", "[DEPRECATED] This flag has no effect. Overlapping blocks are enabled by default now.").
|
||||
Default("true").Hidden().BoolVar(&b)
|
||||
|
||||
serverOnlyFlag(a, "storage.tsdb.allow-overlapping-compaction", "Allow compaction of overlapping blocks. If set to false, TSDB stops vertical compaction and leaves overlapping blocks there. The use case is to let another component handle the compaction of overlapping blocks.").
|
||||
Default("true").Hidden().BoolVar(&cfg.tsdb.EnableOverlappingCompaction)
|
||||
|
||||
serverOnlyFlag(a, "storage.tsdb.wal-compression", "Compress the tsdb WAL.").
|
||||
Hidden().Default("true").BoolVar(&cfg.tsdb.WALCompression)
|
||||
|
||||
|
@ -472,9 +456,6 @@ func main() {
|
|||
serverOnlyFlag(a, "alertmanager.drain-notification-queue-on-shutdown", "Send any outstanding Alertmanager notifications when shutting down. If false, any outstanding Alertmanager notifications will be dropped when shutting down.").
|
||||
Default("true").BoolVar(&cfg.notifier.DrainOnShutdown)
|
||||
|
||||
// TODO: Remove in Prometheus 3.0.
|
||||
alertmanagerTimeout := a.Flag("alertmanager.timeout", "[DEPRECATED] This flag has no effect.").Hidden().String()
|
||||
|
||||
serverOnlyFlag(a, "query.lookback-delta", "The maximum lookback duration for retrieving metrics during expression evaluations and federation.").
|
||||
Default("5m").SetValue(&cfg.lookbackDelta)
|
||||
|
||||
|
@ -490,11 +471,11 @@ func main() {
|
|||
a.Flag("scrape.discovery-reload-interval", "Interval used by scrape manager to throttle target groups updates.").
|
||||
Hidden().Default("5s").SetValue(&cfg.scrape.DiscoveryReloadInterval)
|
||||
|
||||
a.Flag("scrape.name-escaping-scheme", `Method for escaping legacy invalid names when sending to Prometheus that does not support UTF-8. Can be one of "values", "underscores", or "dots".`).Default(scrape.DefaultNameEscapingScheme.String()).StringVar(&cfg.nameEscapingScheme)
|
||||
|
||||
a.Flag("enable-feature", "Comma separated feature names to enable. Valid options: agent, auto-gomaxprocs, auto-gomemlimit, concurrent-rule-eval, created-timestamp-zero-ingestion, delayed-compaction, exemplar-storage, expand-external-labels, extra-scrape-metrics, memory-snapshot-on-shutdown, native-histograms, new-service-discovery-manager, no-default-scrape-port, otlp-write-receiver, promql-experimental-functions, promql-delayed-name-removal, promql-per-step-stats, remote-write-receiver (DEPRECATED), utf8-names. See https://prometheus.io/docs/prometheus/latest/feature_flags/ for more details.").
|
||||
a.Flag("enable-feature", "Comma separated feature names to enable. Valid options: auto-gomemlimit, exemplar-storage, expand-external-labels, memory-snapshot-on-shutdown, promql-per-step-stats, promql-experimental-functions, extra-scrape-metrics, auto-gomaxprocs, no-default-scrape-port, native-histograms, otlp-write-receiver, created-timestamp-zero-ingestion, concurrent-rule-eval. See https://prometheus.io/docs/prometheus/latest/feature_flags/ for more details.").
|
||||
Default("").StringsVar(&cfg.featureList)
|
||||
|
||||
a.Flag("agent", "Run Prometheus in 'Agent mode'.").BoolVar(&agentMode)
|
||||
|
||||
promlogflag.AddFlags(a, &cfg.promlogConfig)
|
||||
|
||||
a.Flag("write-documentation", "Generate command line documentation. Internal use.").Hidden().Action(func(ctx *kingpin.ParseContext) error {
|
||||
|
@ -561,10 +542,6 @@ func main() {
|
|||
os.Exit(2)
|
||||
}
|
||||
|
||||
if *alertmanagerTimeout != "" {
|
||||
level.Warn(logger).Log("msg", "The flag --alertmanager.timeout has no effect and will be removed in the future.")
|
||||
}
|
||||
|
||||
// Throw error for invalid config before starting other components.
|
||||
var cfgFile *config.Config
|
||||
if cfgFile, err = config.LoadFile(cfg.configFile, agentMode, false, log.NewNopLogger()); err != nil {
|
||||
|
@ -611,17 +588,6 @@ func main() {
|
|||
cfg.web.RoutePrefix = "/" + strings.Trim(cfg.web.RoutePrefix, "/")
|
||||
|
||||
if !agentMode {
|
||||
// Time retention settings.
|
||||
if oldFlagRetentionDuration != 0 {
|
||||
level.Warn(logger).Log("deprecation_notice", "'storage.tsdb.retention' flag is deprecated use 'storage.tsdb.retention.time' instead.")
|
||||
cfg.tsdb.RetentionDuration = oldFlagRetentionDuration
|
||||
}
|
||||
|
||||
// When the new flag is set it takes precedence.
|
||||
if newFlagRetentionDuration != 0 {
|
||||
cfg.tsdb.RetentionDuration = newFlagRetentionDuration
|
||||
}
|
||||
|
||||
if cfg.tsdb.RetentionDuration == 0 && cfg.tsdb.MaxBytes == 0 {
|
||||
cfg.tsdb.RetentionDuration = defaultRetentionDuration
|
||||
level.Info(logger).Log("msg", "No time or size retention was set so using the default time retention", "duration", defaultRetentionDuration)
|
||||
|
@ -693,8 +659,8 @@ func main() {
|
|||
|
||||
ctxScrape, cancelScrape = context.WithCancel(context.Background())
|
||||
ctxNotify, cancelNotify = context.WithCancel(context.Background())
|
||||
discoveryManagerScrape discoveryManager
|
||||
discoveryManagerNotify discoveryManager
|
||||
discoveryManagerScrape *discovery.Manager
|
||||
discoveryManagerNotify *discovery.Manager
|
||||
)
|
||||
|
||||
// Kubernetes client metrics are used by Kubernetes SD.
|
||||
|
@ -714,47 +680,22 @@ func main() {
|
|||
os.Exit(1)
|
||||
}
|
||||
|
||||
if cfg.enableNewSDManager {
|
||||
{
|
||||
discMgr := discovery.NewManager(ctxScrape, log.With(logger, "component", "discovery manager scrape"), prometheus.DefaultRegisterer, sdMetrics, discovery.Name("scrape"))
|
||||
if discMgr == nil {
|
||||
level.Error(logger).Log("msg", "failed to create a discovery manager scrape")
|
||||
os.Exit(1)
|
||||
}
|
||||
discoveryManagerScrape = discMgr
|
||||
}
|
||||
discoveryManagerScrape = discovery.NewManager(ctxScrape, log.With(logger, "component", "discovery manager scrape"), prometheus.DefaultRegisterer, sdMetrics, discovery.Name("scrape"))
|
||||
if discoveryManagerScrape == nil {
|
||||
level.Error(logger).Log("msg", "failed to create a discovery manager scrape")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
{
|
||||
discMgr := discovery.NewManager(ctxNotify, log.With(logger, "component", "discovery manager notify"), prometheus.DefaultRegisterer, sdMetrics, discovery.Name("notify"))
|
||||
if discMgr == nil {
|
||||
level.Error(logger).Log("msg", "failed to create a discovery manager notify")
|
||||
os.Exit(1)
|
||||
}
|
||||
discoveryManagerNotify = discMgr
|
||||
}
|
||||
} else {
|
||||
{
|
||||
discMgr := legacymanager.NewManager(ctxScrape, log.With(logger, "component", "discovery manager scrape"), prometheus.DefaultRegisterer, sdMetrics, legacymanager.Name("scrape"))
|
||||
if discMgr == nil {
|
||||
level.Error(logger).Log("msg", "failed to create a discovery manager scrape")
|
||||
os.Exit(1)
|
||||
}
|
||||
discoveryManagerScrape = discMgr
|
||||
}
|
||||
|
||||
{
|
||||
discMgr := legacymanager.NewManager(ctxNotify, log.With(logger, "component", "discovery manager notify"), prometheus.DefaultRegisterer, sdMetrics, legacymanager.Name("notify"))
|
||||
if discMgr == nil {
|
||||
level.Error(logger).Log("msg", "failed to create a discovery manager notify")
|
||||
os.Exit(1)
|
||||
}
|
||||
discoveryManagerNotify = discMgr
|
||||
}
|
||||
discoveryManagerNotify = discovery.NewManager(ctxNotify, log.With(logger, "component", "discovery manager notify"), prometheus.DefaultRegisterer, sdMetrics, discovery.Name("notify"))
|
||||
if discoveryManagerNotify == nil {
|
||||
level.Error(logger).Log("msg", "failed to create a discovery manager notify")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
scrapeManager, err := scrape.NewManager(
|
||||
&cfg.scrape,
|
||||
log.With(logger, "component", "scrape manager"),
|
||||
func(s string) (log.Logger, error) { return logging.NewJSONFileLogger(s) },
|
||||
fanoutStorage,
|
||||
prometheus.DefaultRegisterer,
|
||||
)
|
||||
|
@ -999,12 +940,18 @@ func main() {
|
|||
listeners, err := webHandler.Listeners()
|
||||
if err != nil {
|
||||
level.Error(logger).Log("msg", "Unable to start web listeners", "err", err)
|
||||
if err := queryEngine.Close(); err != nil {
|
||||
level.Warn(logger).Log("msg", "Closing query engine failed", "err", err)
|
||||
}
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
err = toolkit_web.Validate(*webConfig)
|
||||
if err != nil {
|
||||
level.Error(logger).Log("msg", "Unable to validate web configuration file", "err", err)
|
||||
if err := queryEngine.Close(); err != nil {
|
||||
level.Warn(logger).Log("msg", "Closing query engine failed", "err", err)
|
||||
}
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
|
@ -1026,6 +973,9 @@ func main() {
|
|||
case <-cancel:
|
||||
reloadReady.Close()
|
||||
}
|
||||
if err := queryEngine.Close(); err != nil {
|
||||
level.Warn(logger).Log("msg", "Closing query engine failed", "err", err)
|
||||
}
|
||||
return nil
|
||||
},
|
||||
func(err error) {
|
||||
|
@ -1120,6 +1070,15 @@ func main() {
|
|||
hup := make(chan os.Signal, 1)
|
||||
signal.Notify(hup, syscall.SIGHUP)
|
||||
cancel := make(chan struct{})
|
||||
|
||||
var checksum string
|
||||
if cfg.enableAutoReload {
|
||||
checksum, err = config.GenerateChecksum(cfg.configFile)
|
||||
if err != nil {
|
||||
level.Error(logger).Log("msg", "Failed to generate initial checksum for configuration file", "err", err)
|
||||
}
|
||||
}
|
||||
|
||||
g.Add(
|
||||
func() error {
|
||||
<-reloadReady.C
|
||||
|
@ -1129,6 +1088,12 @@ func main() {
|
|||
case <-hup:
|
||||
if err := reloadConfig(cfg.configFile, cfg.enableExpandExternalLabels, cfg.tsdb.EnableExemplarStorage, logger, noStepSubqueryInterval, reloaders...); err != nil {
|
||||
level.Error(logger).Log("msg", "Error reloading config", "err", err)
|
||||
} else if cfg.enableAutoReload {
|
||||
if currentChecksum, err := config.GenerateChecksum(cfg.configFile); err == nil {
|
||||
checksum = currentChecksum
|
||||
} else {
|
||||
level.Error(logger).Log("msg", "Failed to generate checksum during configuration reload", "err", err)
|
||||
}
|
||||
}
|
||||
case rc := <-webHandler.Reload():
|
||||
if err := reloadConfig(cfg.configFile, cfg.enableExpandExternalLabels, cfg.tsdb.EnableExemplarStorage, logger, noStepSubqueryInterval, reloaders...); err != nil {
|
||||
|
@ -1136,6 +1101,32 @@ func main() {
|
|||
rc <- err
|
||||
} else {
|
||||
rc <- nil
|
||||
if cfg.enableAutoReload {
|
||||
if currentChecksum, err := config.GenerateChecksum(cfg.configFile); err == nil {
|
||||
checksum = currentChecksum
|
||||
} else {
|
||||
level.Error(logger).Log("msg", "Failed to generate checksum during configuration reload", "err", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
case <-time.Tick(time.Duration(cfg.autoReloadInterval)):
|
||||
if !cfg.enableAutoReload {
|
||||
continue
|
||||
}
|
||||
currentChecksum, err := config.GenerateChecksum(cfg.configFile)
|
||||
if err != nil {
|
||||
level.Error(logger).Log("msg", "Failed to generate checksum during configuration reload", "err", err)
|
||||
continue
|
||||
}
|
||||
if currentChecksum == checksum {
|
||||
continue
|
||||
}
|
||||
level.Info(logger).Log("msg", "Configuration file change detected, reloading the configuration.")
|
||||
|
||||
if err := reloadConfig(cfg.configFile, cfg.enableExpandExternalLabels, cfg.tsdb.EnableExemplarStorage, logger, noStepSubqueryInterval, reloaders...); err != nil {
|
||||
level.Error(logger).Log("msg", "Error reloading config", "err", err)
|
||||
} else {
|
||||
checksum = currentChecksum
|
||||
}
|
||||
case <-cancel:
|
||||
return nil
|
||||
|
@ -1798,15 +1789,6 @@ func (opts agentOptions) ToAgentOptions(outOfOrderTimeWindow int64) agent.Option
|
|||
}
|
||||
}
|
||||
|
||||
// discoveryManager interfaces the discovery manager. This is used to keep using
|
||||
// the manager that restarts SD's on reload for a few releases until we feel
|
||||
// the new manager can be enabled for all users.
|
||||
type discoveryManager interface {
|
||||
ApplyConfig(cfg map[string]discovery.Configs) error
|
||||
Run() error
|
||||
SyncCh() <-chan map[string][]*targetgroup.Group
|
||||
}
|
||||
|
||||
// rwProtoMsgFlagParser is a custom parser for config.RemoteWriteProtoMsg enum.
|
||||
type rwProtoMsgFlagParser struct {
|
||||
msgs *[]config.RemoteWriteProtoMsg
|
||||
|
|
|
@ -42,6 +42,11 @@ import (
|
|||
"github.com/prometheus/prometheus/rules"
|
||||
)
|
||||
|
||||
func init() {
|
||||
// This can be removed when the default validation scheme in common is updated.
|
||||
model.NameValidationScheme = model.UTF8Validation
|
||||
}
|
||||
|
||||
const startupTime = 10 * time.Second
|
||||
|
||||
var (
|
||||
|
@ -348,7 +353,7 @@ func getCurrentGaugeValuesFor(t *testing.T, reg prometheus.Gatherer, metricNames
|
|||
}
|
||||
|
||||
func TestAgentSuccessfulStartup(t *testing.T) {
|
||||
prom := exec.Command(promPath, "-test.main", "--enable-feature=agent", "--web.listen-address=0.0.0.0:0", "--config.file="+agentConfig)
|
||||
prom := exec.Command(promPath, "-test.main", "--agent", "--web.listen-address=0.0.0.0:0", "--config.file="+agentConfig)
|
||||
require.NoError(t, prom.Start())
|
||||
|
||||
actualExitStatus := 0
|
||||
|
@ -366,7 +371,7 @@ func TestAgentSuccessfulStartup(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestAgentFailedStartupWithServerFlag(t *testing.T) {
|
||||
prom := exec.Command(promPath, "-test.main", "--enable-feature=agent", "--storage.tsdb.path=.", "--web.listen-address=0.0.0.0:0", "--config.file="+promConfig)
|
||||
prom := exec.Command(promPath, "-test.main", "--agent", "--storage.tsdb.path=.", "--web.listen-address=0.0.0.0:0", "--config.file="+promConfig)
|
||||
|
||||
output := bytes.Buffer{}
|
||||
prom.Stderr = &output
|
||||
|
@ -393,7 +398,7 @@ func TestAgentFailedStartupWithServerFlag(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestAgentFailedStartupWithInvalidConfig(t *testing.T) {
|
||||
prom := exec.Command(promPath, "-test.main", "--enable-feature=agent", "--web.listen-address=0.0.0.0:0", "--config.file="+promConfig)
|
||||
prom := exec.Command(promPath, "-test.main", "--agent", "--web.listen-address=0.0.0.0:0", "--config.file="+promConfig)
|
||||
require.NoError(t, prom.Start())
|
||||
|
||||
actualExitStatus := 0
|
||||
|
@ -431,7 +436,7 @@ func TestModeSpecificFlags(t *testing.T) {
|
|||
args := []string{"-test.main", tc.arg, t.TempDir(), "--web.listen-address=0.0.0.0:0"}
|
||||
|
||||
if tc.mode == "agent" {
|
||||
args = append(args, "--enable-feature=agent", "--config.file="+agentConfig)
|
||||
args = append(args, "--agent", "--config.file="+agentConfig)
|
||||
} else {
|
||||
args = append(args, "--config.file="+promConfig)
|
||||
}
|
||||
|
|
193
cmd/prometheus/scrape_failure_log_test.go
Normal file
193
cmd/prometheus/scrape_failure_log_test.go
Normal file
|
@ -0,0 +1,193 @@
|
|||
// Copyright 2024 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"net/url"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
"go.uber.org/atomic"
|
||||
|
||||
"github.com/prometheus/prometheus/util/testutil"
|
||||
)
|
||||
|
||||
func TestScrapeFailureLogFile(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("skipping test in short mode.")
|
||||
}
|
||||
|
||||
// Tracks the number of requests made to the mock server.
|
||||
var requestCount atomic.Int32
|
||||
|
||||
// Starts a server that always returns HTTP 500 errors.
|
||||
mockServerAddress := startGarbageServer(t, &requestCount)
|
||||
|
||||
// Create a temporary directory for Prometheus configuration and logs.
|
||||
tempDir := t.TempDir()
|
||||
|
||||
// Define file paths for the scrape failure log and Prometheus configuration.
|
||||
// Like other files, the scrape failure log file should be relative to the
|
||||
// config file. Therefore, we split the name we put in the file and the full
|
||||
// path used to check the content of the file.
|
||||
scrapeFailureLogFileName := "scrape_failure.log"
|
||||
scrapeFailureLogFile := filepath.Join(tempDir, scrapeFailureLogFileName)
|
||||
promConfigFile := filepath.Join(tempDir, "prometheus.yml")
|
||||
|
||||
// Step 1: Set up an initial Prometheus configuration that globally
|
||||
// specifies a scrape failure log file.
|
||||
promConfig := fmt.Sprintf(`
|
||||
global:
|
||||
scrape_interval: 500ms
|
||||
scrape_failure_log_file: %s
|
||||
|
||||
scrape_configs:
|
||||
- job_name: 'test_job'
|
||||
static_configs:
|
||||
- targets: ['%s']
|
||||
`, scrapeFailureLogFileName, mockServerAddress)
|
||||
|
||||
err := os.WriteFile(promConfigFile, []byte(promConfig), 0o644)
|
||||
require.NoError(t, err, "Failed to write Prometheus configuration file")
|
||||
|
||||
// Start Prometheus with the generated configuration and a random port, enabling the lifecycle API.
|
||||
port := testutil.RandomUnprivilegedPort(t)
|
||||
params := []string{
|
||||
"-test.main",
|
||||
"--config.file=" + promConfigFile,
|
||||
"--storage.tsdb.path=" + filepath.Join(tempDir, "data"),
|
||||
fmt.Sprintf("--web.listen-address=127.0.0.1:%d", port),
|
||||
"--web.enable-lifecycle",
|
||||
}
|
||||
prometheusProcess := exec.Command(promPath, params...)
|
||||
prometheusProcess.Stdout = os.Stdout
|
||||
prometheusProcess.Stderr = os.Stderr
|
||||
|
||||
err = prometheusProcess.Start()
|
||||
require.NoError(t, err, "Failed to start Prometheus")
|
||||
defer prometheusProcess.Process.Kill()
|
||||
|
||||
// Wait until the mock server receives at least two requests from Prometheus.
|
||||
require.Eventually(t, func() bool {
|
||||
return requestCount.Load() >= 2
|
||||
}, 30*time.Second, 500*time.Millisecond, "Expected at least two requests to the mock server")
|
||||
|
||||
// Verify that the scrape failures have been logged to the specified file.
|
||||
content, err := os.ReadFile(scrapeFailureLogFile)
|
||||
require.NoError(t, err, "Failed to read scrape failure log")
|
||||
require.Contains(t, string(content), "server returned HTTP status 500 Internal Server Error", "Expected scrape failure log entry not found")
|
||||
|
||||
// Step 2: Update the Prometheus configuration to remove the scrape failure
|
||||
// log file setting.
|
||||
promConfig = fmt.Sprintf(`
|
||||
global:
|
||||
scrape_interval: 1s
|
||||
|
||||
scrape_configs:
|
||||
- job_name: 'test_job'
|
||||
static_configs:
|
||||
- targets: ['%s']
|
||||
`, mockServerAddress)
|
||||
|
||||
err = os.WriteFile(promConfigFile, []byte(promConfig), 0o644)
|
||||
require.NoError(t, err, "Failed to update Prometheus configuration file")
|
||||
|
||||
// Reload Prometheus with the updated configuration.
|
||||
reloadPrometheus(t, port)
|
||||
|
||||
// Count the number of lines in the scrape failure log file before any
|
||||
// further requests.
|
||||
preReloadLogLineCount := countLinesInFile(scrapeFailureLogFile)
|
||||
|
||||
// Wait for at least two more requests to the mock server to ensure
|
||||
// Prometheus continues scraping.
|
||||
requestsBeforeReload := requestCount.Load()
|
||||
require.Eventually(t, func() bool {
|
||||
return requestCount.Load() >= requestsBeforeReload+2
|
||||
}, 30*time.Second, 500*time.Millisecond, "Expected two more requests to the mock server after configuration reload")
|
||||
|
||||
// Ensure that no new lines were added to the scrape failure log file after
|
||||
// the configuration change.
|
||||
require.Equal(t, preReloadLogLineCount, countLinesInFile(scrapeFailureLogFile), "No new lines should be added to the scrape failure log file after removing the log setting")
|
||||
|
||||
// Step 3: Re-add the scrape failure log file setting, but this time under
|
||||
// scrape_configs, and reload Prometheus.
|
||||
promConfig = fmt.Sprintf(`
|
||||
global:
|
||||
scrape_interval: 1s
|
||||
|
||||
scrape_configs:
|
||||
- job_name: 'test_job'
|
||||
scrape_failure_log_file: %s
|
||||
static_configs:
|
||||
- targets: ['%s']
|
||||
`, scrapeFailureLogFileName, mockServerAddress)
|
||||
|
||||
err = os.WriteFile(promConfigFile, []byte(promConfig), 0o644)
|
||||
require.NoError(t, err, "Failed to update Prometheus configuration file")
|
||||
|
||||
// Reload Prometheus with the updated configuration.
|
||||
reloadPrometheus(t, port)
|
||||
|
||||
// Wait for at least two more requests to the mock server and verify that
|
||||
// new log entries are created.
|
||||
postReloadLogLineCount := countLinesInFile(scrapeFailureLogFile)
|
||||
requestsBeforeReAddingLog := requestCount.Load()
|
||||
require.Eventually(t, func() bool {
|
||||
return requestCount.Load() >= requestsBeforeReAddingLog+2
|
||||
}, 30*time.Second, 500*time.Millisecond, "Expected two additional requests after re-adding the log setting")
|
||||
|
||||
// Confirm that new lines were added to the scrape failure log file.
|
||||
require.Greater(t, countLinesInFile(scrapeFailureLogFile), postReloadLogLineCount, "New lines should be added to the scrape failure log file after re-adding the log setting")
|
||||
}
|
||||
|
||||
// reloadPrometheus sends a reload request to the Prometheus server to apply
|
||||
// updated configurations.
|
||||
func reloadPrometheus(t *testing.T, port int) {
|
||||
resp, err := http.Post(fmt.Sprintf("http://127.0.0.1:%d/-/reload", port), "", nil)
|
||||
require.NoError(t, err, "Failed to reload Prometheus")
|
||||
require.Equal(t, http.StatusOK, resp.StatusCode, "Unexpected status code when reloading Prometheus")
|
||||
}
|
||||
|
||||
// startGarbageServer sets up a mock server that returns a 500 Internal Server Error
|
||||
// for all requests. It also increments the request count each time it's hit.
|
||||
func startGarbageServer(t *testing.T, requestCount *atomic.Int32) string {
|
||||
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
requestCount.Inc()
|
||||
w.WriteHeader(http.StatusInternalServerError)
|
||||
}))
|
||||
t.Cleanup(server.Close)
|
||||
|
||||
parsedURL, err := url.Parse(server.URL)
|
||||
require.NoError(t, err, "Failed to parse mock server URL")
|
||||
|
||||
return parsedURL.Host
|
||||
}
|
||||
|
||||
// countLinesInFile counts and returns the number of lines in the specified file.
|
||||
func countLinesInFile(filePath string) int {
|
||||
data, err := os.ReadFile(filePath)
|
||||
if err != nil {
|
||||
return 0 // Return 0 if the file doesn't exist or can't be read.
|
||||
}
|
||||
return bytes.Count(data, []byte{'\n'})
|
||||
}
|
|
@ -62,6 +62,11 @@ import (
|
|||
"github.com/prometheus/prometheus/util/documentcli"
|
||||
)
|
||||
|
||||
func init() {
|
||||
// This can be removed when the default validation scheme in common is updated.
|
||||
model.NameValidationScheme = model.UTF8Validation
|
||||
}
|
||||
|
||||
const (
|
||||
successExitCode = 0
|
||||
failureExitCode = 1
|
||||
|
@ -236,14 +241,14 @@ func main() {
|
|||
|
||||
tsdbDumpCmd := tsdbCmd.Command("dump", "Dump samples from a TSDB.")
|
||||
dumpPath := tsdbDumpCmd.Arg("db path", "Database path (default is "+defaultDBPath+").").Default(defaultDBPath).String()
|
||||
dumpSandboxDirRoot := tsdbDumpCmd.Flag("sandbox-dir-root", "Root directory where a sandbox directory would be created in case WAL replay generates chunks. The sandbox directory is cleaned up at the end.").Default(defaultDBPath).String()
|
||||
dumpSandboxDirRoot := tsdbDumpCmd.Flag("sandbox-dir-root", "Root directory where a sandbox directory will be created, this sandbox is used in case WAL replay generates chunks (default is the database path). The sandbox is cleaned up at the end.").String()
|
||||
dumpMinTime := tsdbDumpCmd.Flag("min-time", "Minimum timestamp to dump.").Default(strconv.FormatInt(math.MinInt64, 10)).Int64()
|
||||
dumpMaxTime := tsdbDumpCmd.Flag("max-time", "Maximum timestamp to dump.").Default(strconv.FormatInt(math.MaxInt64, 10)).Int64()
|
||||
dumpMatch := tsdbDumpCmd.Flag("match", "Series selector. Can be specified multiple times.").Default("{__name__=~'(?s:.*)'}").Strings()
|
||||
|
||||
tsdbDumpOpenMetricsCmd := tsdbCmd.Command("dump-openmetrics", "[Experimental] Dump samples from a TSDB into OpenMetrics text format, excluding native histograms and staleness markers, which are not representable in OpenMetrics.")
|
||||
dumpOpenMetricsPath := tsdbDumpOpenMetricsCmd.Arg("db path", "Database path (default is "+defaultDBPath+").").Default(defaultDBPath).String()
|
||||
dumpOpenMetricsSandboxDirRoot := tsdbDumpOpenMetricsCmd.Flag("sandbox-dir-root", "Root directory where a sandbox directory would be created in case WAL replay generates chunks. The sandbox directory is cleaned up at the end.").Default(defaultDBPath).String()
|
||||
dumpOpenMetricsSandboxDirRoot := tsdbDumpOpenMetricsCmd.Flag("sandbox-dir-root", "Root directory where a sandbox directory will be created, this sandbox is used in case WAL replay generates chunks (default is the database path). The sandbox is cleaned up at the end.").String()
|
||||
dumpOpenMetricsMinTime := tsdbDumpOpenMetricsCmd.Flag("min-time", "Minimum timestamp to dump.").Default(strconv.FormatInt(math.MinInt64, 10)).Int64()
|
||||
dumpOpenMetricsMaxTime := tsdbDumpOpenMetricsCmd.Flag("max-time", "Maximum timestamp to dump.").Default(strconv.FormatInt(math.MaxInt64, 10)).Int64()
|
||||
dumpOpenMetricsMatch := tsdbDumpOpenMetricsCmd.Flag("match", "Series selector. Can be specified multiple times.").Default("{__name__=~'(?s:.*)'}").Strings()
|
||||
|
@ -325,8 +330,6 @@ func main() {
|
|||
noDefaultScrapePort = true
|
||||
case "":
|
||||
continue
|
||||
case "promql-at-modifier", "promql-negative-offset":
|
||||
fmt.Printf(" WARNING: Option for --enable-feature is a no-op after promotion to a stable feature: %q\n", o)
|
||||
default:
|
||||
fmt.Printf(" WARNING: Unknown option for --enable-feature: %q\n", o)
|
||||
}
|
||||
|
|
|
@ -31,12 +31,19 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/prometheus/common/model"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/prometheus/prometheus/model/labels"
|
||||
"github.com/prometheus/prometheus/model/rulefmt"
|
||||
"github.com/prometheus/prometheus/promql/promqltest"
|
||||
)
|
||||
|
||||
func init() {
|
||||
// This can be removed when the default validation scheme in common is updated.
|
||||
model.NameValidationScheme = model.UTF8Validation
|
||||
}
|
||||
|
||||
var promtoolPath = os.Args[0]
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
|
@ -549,3 +556,46 @@ func TestCheckRulesWithRuleFiles(t *testing.T) {
|
|||
require.Equal(t, lintErrExitCode, exitCode, "")
|
||||
})
|
||||
}
|
||||
|
||||
func TestTSDBDumpCommand(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("skipping test in short mode.")
|
||||
}
|
||||
|
||||
storage := promqltest.LoadedStorage(t, `
|
||||
load 1m
|
||||
metric{foo="bar"} 1 2 3
|
||||
`)
|
||||
t.Cleanup(func() { storage.Close() })
|
||||
|
||||
for _, c := range []struct {
|
||||
name string
|
||||
subCmd string
|
||||
sandboxDirRoot string
|
||||
}{
|
||||
{
|
||||
name: "dump",
|
||||
subCmd: "dump",
|
||||
},
|
||||
{
|
||||
name: "dump with sandbox dir root",
|
||||
subCmd: "dump",
|
||||
sandboxDirRoot: t.TempDir(),
|
||||
},
|
||||
{
|
||||
name: "dump-openmetrics",
|
||||
subCmd: "dump-openmetrics",
|
||||
},
|
||||
{
|
||||
name: "dump-openmetrics with sandbox dir root",
|
||||
subCmd: "dump-openmetrics",
|
||||
sandboxDirRoot: t.TempDir(),
|
||||
},
|
||||
} {
|
||||
t.Run(c.name, func(t *testing.T) {
|
||||
args := []string{"-test.main", "tsdb", c.subCmd, storage.Dir()}
|
||||
cmd := exec.Command(promtoolPath, args...)
|
||||
require.NoError(t, cmd.Run())
|
||||
})
|
||||
}
|
||||
}
|
||||
|
|
10
cmd/promtool/testdata/unittest.yml
vendored
10
cmd/promtool/testdata/unittest.yml
vendored
|
@ -89,11 +89,11 @@ tests:
|
|||
|
||||
# Ensure lookback delta is respected, when a value is missing.
|
||||
- expr: timestamp(test_missing)
|
||||
eval_time: 5m
|
||||
eval_time: 4m59s
|
||||
exp_samples:
|
||||
- value: 0
|
||||
- expr: timestamp(test_missing)
|
||||
eval_time: 5m1s
|
||||
eval_time: 5m
|
||||
exp_samples: []
|
||||
|
||||
# Minimal test case to check edge case of a single sample.
|
||||
|
@ -113,7 +113,7 @@ tests:
|
|||
- expr: count_over_time(fixed_data[1h])
|
||||
eval_time: 1h
|
||||
exp_samples:
|
||||
- value: 61
|
||||
- value: 60
|
||||
- expr: timestamp(fixed_data)
|
||||
eval_time: 1h
|
||||
exp_samples:
|
||||
|
@ -183,7 +183,7 @@ tests:
|
|||
- expr: job:test:count_over_time1m
|
||||
eval_time: 1m
|
||||
exp_samples:
|
||||
- value: 61
|
||||
- value: 60
|
||||
labels: 'job:test:count_over_time1m{job="test"}'
|
||||
- expr: timestamp(job:test:count_over_time1m)
|
||||
eval_time: 1m10s
|
||||
|
@ -194,7 +194,7 @@ tests:
|
|||
- expr: job:test:count_over_time1m
|
||||
eval_time: 2m
|
||||
exp_samples:
|
||||
- value: 61
|
||||
- value: 60
|
||||
labels: 'job:test:count_over_time1m{job="test"}'
|
||||
- expr: timestamp(job:test:count_over_time1m)
|
||||
eval_time: 2m59s999ms
|
||||
|
|
|
@ -367,25 +367,25 @@ func printBlocks(blocks []tsdb.BlockReader, writeHeader, humanReadable bool) {
|
|||
fmt.Fprintf(tw,
|
||||
"%v\t%v\t%v\t%v\t%v\t%v\t%v\t%v\n",
|
||||
meta.ULID,
|
||||
getFormatedTime(meta.MinTime, humanReadable),
|
||||
getFormatedTime(meta.MaxTime, humanReadable),
|
||||
getFormattedTime(meta.MinTime, humanReadable),
|
||||
getFormattedTime(meta.MaxTime, humanReadable),
|
||||
time.Duration(meta.MaxTime-meta.MinTime)*time.Millisecond,
|
||||
meta.Stats.NumSamples,
|
||||
meta.Stats.NumChunks,
|
||||
meta.Stats.NumSeries,
|
||||
getFormatedBytes(b.Size(), humanReadable),
|
||||
getFormattedBytes(b.Size(), humanReadable),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
func getFormatedTime(timestamp int64, humanReadable bool) string {
|
||||
func getFormattedTime(timestamp int64, humanReadable bool) string {
|
||||
if humanReadable {
|
||||
return time.Unix(timestamp/1000, 0).UTC().String()
|
||||
}
|
||||
return strconv.FormatInt(timestamp, 10)
|
||||
}
|
||||
|
||||
func getFormatedBytes(bytes int64, humanReadable bool) string {
|
||||
func getFormattedBytes(bytes int64, humanReadable bool) string {
|
||||
if humanReadable {
|
||||
return units.Base2Bytes(bytes).String()
|
||||
}
|
||||
|
|
|
@ -55,7 +55,7 @@ func TestGenerateBucket(t *testing.T) {
|
|||
}
|
||||
|
||||
// getDumpedSamples dumps samples and returns them.
|
||||
func getDumpedSamples(t *testing.T, path string, mint, maxt int64, match []string, formatter SeriesSetFormatter) string {
|
||||
func getDumpedSamples(t *testing.T, databasePath, sandboxDirRoot string, mint, maxt int64, match []string, formatter SeriesSetFormatter) string {
|
||||
t.Helper()
|
||||
|
||||
oldStdout := os.Stdout
|
||||
|
@ -64,8 +64,8 @@ func getDumpedSamples(t *testing.T, path string, mint, maxt int64, match []strin
|
|||
|
||||
err := dumpSamples(
|
||||
context.Background(),
|
||||
path,
|
||||
t.TempDir(),
|
||||
databasePath,
|
||||
sandboxDirRoot,
|
||||
mint,
|
||||
maxt,
|
||||
match,
|
||||
|
@ -96,13 +96,15 @@ func TestTSDBDump(t *testing.T) {
|
|||
heavy_metric{foo="bar"} 5 4 3 2 1
|
||||
heavy_metric{foo="foo"} 5 4 3 2 1
|
||||
`)
|
||||
t.Cleanup(func() { storage.Close() })
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
mint int64
|
||||
maxt int64
|
||||
match []string
|
||||
expectedDump string
|
||||
name string
|
||||
mint int64
|
||||
maxt int64
|
||||
sandboxDirRoot string
|
||||
match []string
|
||||
expectedDump string
|
||||
}{
|
||||
{
|
||||
name: "default match",
|
||||
|
@ -111,6 +113,14 @@ func TestTSDBDump(t *testing.T) {
|
|||
match: []string{"{__name__=~'(?s:.*)'}"},
|
||||
expectedDump: "testdata/dump-test-1.prom",
|
||||
},
|
||||
{
|
||||
name: "default match with sandbox dir root set",
|
||||
mint: math.MinInt64,
|
||||
maxt: math.MaxInt64,
|
||||
sandboxDirRoot: t.TempDir(),
|
||||
match: []string{"{__name__=~'(?s:.*)'}"},
|
||||
expectedDump: "testdata/dump-test-1.prom",
|
||||
},
|
||||
{
|
||||
name: "same matcher twice",
|
||||
mint: math.MinInt64,
|
||||
|
@ -149,7 +159,7 @@ func TestTSDBDump(t *testing.T) {
|
|||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
dumpedMetrics := getDumpedSamples(t, storage.Dir(), tt.mint, tt.maxt, tt.match, formatSeriesSet)
|
||||
dumpedMetrics := getDumpedSamples(t, storage.Dir(), tt.sandboxDirRoot, tt.mint, tt.maxt, tt.match, formatSeriesSet)
|
||||
expectedMetrics, err := os.ReadFile(tt.expectedDump)
|
||||
require.NoError(t, err)
|
||||
expectedMetrics = normalizeNewLine(expectedMetrics)
|
||||
|
@ -171,12 +181,29 @@ func TestTSDBDumpOpenMetrics(t *testing.T) {
|
|||
my_counter{foo="bar", baz="abc"} 1 2 3 4 5
|
||||
my_gauge{bar="foo", abc="baz"} 9 8 0 4 7
|
||||
`)
|
||||
t.Cleanup(func() { storage.Close() })
|
||||
|
||||
expectedMetrics, err := os.ReadFile("testdata/dump-openmetrics-test.prom")
|
||||
require.NoError(t, err)
|
||||
expectedMetrics = normalizeNewLine(expectedMetrics)
|
||||
dumpedMetrics := getDumpedSamples(t, storage.Dir(), math.MinInt64, math.MaxInt64, []string{"{__name__=~'(?s:.*)'}"}, formatSeriesSetOpenMetrics)
|
||||
require.Equal(t, sortLines(string(expectedMetrics)), sortLines(dumpedMetrics))
|
||||
tests := []struct {
|
||||
name string
|
||||
sandboxDirRoot string
|
||||
}{
|
||||
{
|
||||
name: "default match",
|
||||
},
|
||||
{
|
||||
name: "default match with sandbox dir root set",
|
||||
sandboxDirRoot: t.TempDir(),
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
expectedMetrics, err := os.ReadFile("testdata/dump-openmetrics-test.prom")
|
||||
require.NoError(t, err)
|
||||
expectedMetrics = normalizeNewLine(expectedMetrics)
|
||||
dumpedMetrics := getDumpedSamples(t, storage.Dir(), tt.sandboxDirRoot, math.MinInt64, math.MaxInt64, []string{"{__name__=~'(?s:.*)'}"}, formatSeriesSetOpenMetrics)
|
||||
require.Equal(t, sortLines(string(expectedMetrics)), sortLines(dumpedMetrics))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestTSDBDumpOpenMetricsRoundTrip(t *testing.T) {
|
||||
|
@ -195,7 +222,7 @@ func TestTSDBDumpOpenMetricsRoundTrip(t *testing.T) {
|
|||
})
|
||||
|
||||
// Dump the blocks into OM format
|
||||
dumpedMetrics := getDumpedSamples(t, dbDir, math.MinInt64, math.MaxInt64, []string{"{__name__=~'(?s:.*)'}"}, formatSeriesSetOpenMetrics)
|
||||
dumpedMetrics := getDumpedSamples(t, dbDir, "", math.MinInt64, math.MaxInt64, []string{"{__name__=~'(?s:.*)'}"}, formatSeriesSetOpenMetrics)
|
||||
|
||||
// Should get back the initial metrics.
|
||||
require.Equal(t, string(initialMetrics), dumpedMetrics)
|
||||
|
|
|
@ -429,6 +429,8 @@ type GlobalConfig struct {
|
|||
RuleQueryOffset model.Duration `yaml:"rule_query_offset,omitempty"`
|
||||
// File to which PromQL queries are logged.
|
||||
QueryLogFile string `yaml:"query_log_file,omitempty"`
|
||||
// File to which scrape failures are logged.
|
||||
ScrapeFailureLogFile string `yaml:"scrape_failure_log_file,omitempty"`
|
||||
// The labels to add to any timeseries that this Prometheus instance scrapes.
|
||||
ExternalLabels labels.Labels `yaml:"external_labels,omitempty"`
|
||||
// An uncompressed response body larger than this many bytes will cause the
|
||||
|
@ -529,6 +531,7 @@ func validateAcceptScrapeProtocols(sps []ScrapeProtocol) error {
|
|||
// SetDirectory joins any relative file paths with dir.
|
||||
func (c *GlobalConfig) SetDirectory(dir string) {
|
||||
c.QueryLogFile = config.JoinDir(dir, c.QueryLogFile)
|
||||
c.ScrapeFailureLogFile = config.JoinDir(dir, c.ScrapeFailureLogFile)
|
||||
}
|
||||
|
||||
// UnmarshalYAML implements the yaml.Unmarshaler interface.
|
||||
|
@ -591,6 +594,7 @@ func (c *GlobalConfig) isZero() bool {
|
|||
c.EvaluationInterval == 0 &&
|
||||
c.RuleQueryOffset == 0 &&
|
||||
c.QueryLogFile == "" &&
|
||||
c.ScrapeFailureLogFile == "" &&
|
||||
c.ScrapeProtocols == nil
|
||||
}
|
||||
|
||||
|
@ -632,6 +636,8 @@ type ScrapeConfig struct {
|
|||
ScrapeProtocols []ScrapeProtocol `yaml:"scrape_protocols,omitempty"`
|
||||
// Whether to scrape a classic histogram that is also exposed as a native histogram.
|
||||
ScrapeClassicHistograms bool `yaml:"scrape_classic_histograms,omitempty"`
|
||||
// File to which scrape failures are logged.
|
||||
ScrapeFailureLogFile string `yaml:"scrape_failure_log_file,omitempty"`
|
||||
// The HTTP resource path on which to fetch metrics from targets.
|
||||
MetricsPath string `yaml:"metrics_path,omitempty"`
|
||||
// The URL scheme with which to fetch metrics from targets.
|
||||
|
@ -684,6 +690,7 @@ type ScrapeConfig struct {
|
|||
func (c *ScrapeConfig) SetDirectory(dir string) {
|
||||
c.ServiceDiscoveryConfigs.SetDirectory(dir)
|
||||
c.HTTPClientConfig.SetDirectory(dir)
|
||||
c.ScrapeFailureLogFile = config.JoinDir(dir, c.ScrapeFailureLogFile)
|
||||
}
|
||||
|
||||
// UnmarshalYAML implements the yaml.Unmarshaler interface.
|
||||
|
@ -765,6 +772,9 @@ func (c *ScrapeConfig) Validate(globalConfig GlobalConfig) error {
|
|||
if c.KeepDroppedTargets == 0 {
|
||||
c.KeepDroppedTargets = globalConfig.KeepDroppedTargets
|
||||
}
|
||||
if c.ScrapeFailureLogFile == "" {
|
||||
c.ScrapeFailureLogFile = globalConfig.ScrapeFailureLogFile
|
||||
}
|
||||
|
||||
if c.ScrapeProtocols == nil {
|
||||
c.ScrapeProtocols = globalConfig.ScrapeProtocols
|
||||
|
@ -774,10 +784,10 @@ func (c *ScrapeConfig) Validate(globalConfig GlobalConfig) error {
|
|||
}
|
||||
|
||||
switch globalConfig.MetricNameValidationScheme {
|
||||
case "", LegacyValidationConfig:
|
||||
case UTF8ValidationConfig:
|
||||
case LegacyValidationConfig:
|
||||
case "", UTF8ValidationConfig:
|
||||
if model.NameValidationScheme != model.UTF8Validation {
|
||||
return fmt.Errorf("utf8 name validation requested but feature not enabled via --enable-feature=utf8-names")
|
||||
panic("utf8 name validation requested but model.NameValidationScheme is not set to UTF8")
|
||||
}
|
||||
default:
|
||||
return fmt.Errorf("unknown name validation method specified, must be either 'legacy' or 'utf8', got %s", globalConfig.MetricNameValidationScheme)
|
||||
|
|
|
@ -62,6 +62,11 @@ import (
|
|||
"github.com/prometheus/prometheus/util/testutil"
|
||||
)
|
||||
|
||||
func init() {
|
||||
// This can be removed when the default validation scheme in common is updated.
|
||||
model.NameValidationScheme = model.UTF8Validation
|
||||
}
|
||||
|
||||
func mustParseURL(u string) *config.URL {
|
||||
parsed, err := url.Parse(u)
|
||||
if err != nil {
|
||||
|
@ -78,14 +83,16 @@ const (
|
|||
globLabelNameLengthLimit = 200
|
||||
globLabelValueLengthLimit = 200
|
||||
globalGoGC = 42
|
||||
globScrapeFailureLogFile = "testdata/fail.log"
|
||||
)
|
||||
|
||||
var expectedConf = &Config{
|
||||
GlobalConfig: GlobalConfig{
|
||||
ScrapeInterval: model.Duration(15 * time.Second),
|
||||
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
|
||||
EvaluationInterval: model.Duration(30 * time.Second),
|
||||
QueryLogFile: "",
|
||||
ScrapeInterval: model.Duration(15 * time.Second),
|
||||
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
|
||||
EvaluationInterval: model.Duration(30 * time.Second),
|
||||
QueryLogFile: "testdata/query.log",
|
||||
ScrapeFailureLogFile: globScrapeFailureLogFile,
|
||||
|
||||
ExternalLabels: labels.FromStrings("foo", "bar", "monitor", "codelab"),
|
||||
|
||||
|
@ -211,6 +218,7 @@ var expectedConf = &Config{
|
|||
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
|
||||
ScrapeFailureLogFile: "testdata/fail_prom.log",
|
||||
|
||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||
Scheme: DefaultScrapeConfig.Scheme,
|
||||
|
@ -225,6 +233,15 @@ var expectedConf = &Config{
|
|||
TLSConfig: config.TLSConfig{
|
||||
MinVersion: config.TLSVersion(tls.VersionTLS10),
|
||||
},
|
||||
HTTPHeaders: &config.Headers{
|
||||
Headers: map[string]config.Header{
|
||||
"foo": {
|
||||
Values: []string{"foobar"},
|
||||
Secrets: []config.Secret{"bar", "foo"},
|
||||
Files: []string{filepath.FromSlash("testdata/valid_password_file")},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
ServiceDiscoveryConfigs: discovery.Configs{
|
||||
|
@ -314,6 +331,7 @@ var expectedConf = &Config{
|
|||
LabelNameLengthLimit: 210,
|
||||
LabelValueLengthLimit: 210,
|
||||
ScrapeProtocols: []ScrapeProtocol{PrometheusText0_0_4},
|
||||
ScrapeFailureLogFile: globScrapeFailureLogFile,
|
||||
|
||||
HTTPClientConfig: config.HTTPClientConfig{
|
||||
BasicAuth: &config.BasicAuth{
|
||||
|
@ -411,6 +429,7 @@ var expectedConf = &Config{
|
|||
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
|
||||
ScrapeFailureLogFile: globScrapeFailureLogFile,
|
||||
|
||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||
Scheme: DefaultScrapeConfig.Scheme,
|
||||
|
@ -466,6 +485,7 @@ var expectedConf = &Config{
|
|||
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
|
||||
ScrapeFailureLogFile: globScrapeFailureLogFile,
|
||||
|
||||
MetricsPath: "/metrics",
|
||||
Scheme: "http",
|
||||
|
@ -499,6 +519,7 @@ var expectedConf = &Config{
|
|||
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
|
||||
ScrapeFailureLogFile: globScrapeFailureLogFile,
|
||||
|
||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||
Scheme: DefaultScrapeConfig.Scheme,
|
||||
|
@ -538,6 +559,7 @@ var expectedConf = &Config{
|
|||
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
|
||||
ScrapeFailureLogFile: globScrapeFailureLogFile,
|
||||
|
||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||
Scheme: DefaultScrapeConfig.Scheme,
|
||||
|
@ -577,6 +599,7 @@ var expectedConf = &Config{
|
|||
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
|
||||
ScrapeFailureLogFile: globScrapeFailureLogFile,
|
||||
|
||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||
Scheme: DefaultScrapeConfig.Scheme,
|
||||
|
@ -606,6 +629,7 @@ var expectedConf = &Config{
|
|||
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
|
||||
ScrapeFailureLogFile: globScrapeFailureLogFile,
|
||||
|
||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||
Scheme: DefaultScrapeConfig.Scheme,
|
||||
|
@ -643,6 +667,7 @@ var expectedConf = &Config{
|
|||
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
|
||||
ScrapeFailureLogFile: globScrapeFailureLogFile,
|
||||
|
||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||
Scheme: DefaultScrapeConfig.Scheme,
|
||||
|
@ -677,6 +702,7 @@ var expectedConf = &Config{
|
|||
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
|
||||
ScrapeFailureLogFile: globScrapeFailureLogFile,
|
||||
|
||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||
Scheme: DefaultScrapeConfig.Scheme,
|
||||
|
@ -718,6 +744,7 @@ var expectedConf = &Config{
|
|||
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
|
||||
ScrapeFailureLogFile: globScrapeFailureLogFile,
|
||||
|
||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||
Scheme: DefaultScrapeConfig.Scheme,
|
||||
|
@ -749,6 +776,7 @@ var expectedConf = &Config{
|
|||
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
|
||||
ScrapeFailureLogFile: globScrapeFailureLogFile,
|
||||
|
||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||
Scheme: DefaultScrapeConfig.Scheme,
|
||||
|
@ -783,6 +811,7 @@ var expectedConf = &Config{
|
|||
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
|
||||
ScrapeFailureLogFile: globScrapeFailureLogFile,
|
||||
|
||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||
Scheme: DefaultScrapeConfig.Scheme,
|
||||
|
@ -810,6 +839,7 @@ var expectedConf = &Config{
|
|||
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
|
||||
ScrapeFailureLogFile: globScrapeFailureLogFile,
|
||||
|
||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||
Scheme: DefaultScrapeConfig.Scheme,
|
||||
|
@ -840,6 +870,7 @@ var expectedConf = &Config{
|
|||
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
|
||||
ScrapeFailureLogFile: globScrapeFailureLogFile,
|
||||
|
||||
MetricsPath: "/federate",
|
||||
Scheme: DefaultScrapeConfig.Scheme,
|
||||
|
@ -870,6 +901,7 @@ var expectedConf = &Config{
|
|||
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
|
||||
ScrapeFailureLogFile: globScrapeFailureLogFile,
|
||||
|
||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||
Scheme: DefaultScrapeConfig.Scheme,
|
||||
|
@ -900,6 +932,7 @@ var expectedConf = &Config{
|
|||
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
|
||||
ScrapeFailureLogFile: globScrapeFailureLogFile,
|
||||
|
||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||
Scheme: DefaultScrapeConfig.Scheme,
|
||||
|
@ -927,6 +960,7 @@ var expectedConf = &Config{
|
|||
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
|
||||
ScrapeFailureLogFile: globScrapeFailureLogFile,
|
||||
|
||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||
Scheme: DefaultScrapeConfig.Scheme,
|
||||
|
@ -962,6 +996,7 @@ var expectedConf = &Config{
|
|||
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
|
||||
ScrapeFailureLogFile: globScrapeFailureLogFile,
|
||||
|
||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||
Scheme: DefaultScrapeConfig.Scheme,
|
||||
|
@ -996,6 +1031,7 @@ var expectedConf = &Config{
|
|||
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
|
||||
ScrapeFailureLogFile: globScrapeFailureLogFile,
|
||||
|
||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||
Scheme: DefaultScrapeConfig.Scheme,
|
||||
|
@ -1027,6 +1063,7 @@ var expectedConf = &Config{
|
|||
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
|
||||
ScrapeFailureLogFile: globScrapeFailureLogFile,
|
||||
|
||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||
Scheme: DefaultScrapeConfig.Scheme,
|
||||
|
@ -1057,6 +1094,7 @@ var expectedConf = &Config{
|
|||
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
|
||||
ScrapeFailureLogFile: globScrapeFailureLogFile,
|
||||
|
||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||
Scheme: DefaultScrapeConfig.Scheme,
|
||||
|
@ -1091,6 +1129,7 @@ var expectedConf = &Config{
|
|||
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
|
||||
ScrapeFailureLogFile: globScrapeFailureLogFile,
|
||||
|
||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||
Scheme: DefaultScrapeConfig.Scheme,
|
||||
|
@ -1128,6 +1167,7 @@ var expectedConf = &Config{
|
|||
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
|
||||
ScrapeFailureLogFile: globScrapeFailureLogFile,
|
||||
|
||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||
Scheme: DefaultScrapeConfig.Scheme,
|
||||
|
@ -1184,6 +1224,7 @@ var expectedConf = &Config{
|
|||
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
|
||||
ScrapeFailureLogFile: globScrapeFailureLogFile,
|
||||
|
||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||
Scheme: DefaultScrapeConfig.Scheme,
|
||||
|
@ -1211,6 +1252,7 @@ var expectedConf = &Config{
|
|||
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
|
||||
ScrapeFailureLogFile: globScrapeFailureLogFile,
|
||||
|
||||
HTTPClientConfig: config.DefaultHTTPClientConfig,
|
||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||
|
@ -1249,6 +1291,7 @@ var expectedConf = &Config{
|
|||
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
|
||||
ScrapeFailureLogFile: globScrapeFailureLogFile,
|
||||
|
||||
HTTPClientConfig: config.DefaultHTTPClientConfig,
|
||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||
|
@ -1293,6 +1336,7 @@ var expectedConf = &Config{
|
|||
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
|
||||
ScrapeFailureLogFile: globScrapeFailureLogFile,
|
||||
|
||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||
Scheme: DefaultScrapeConfig.Scheme,
|
||||
|
@ -1328,6 +1372,7 @@ var expectedConf = &Config{
|
|||
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
|
||||
ScrapeFailureLogFile: globScrapeFailureLogFile,
|
||||
|
||||
HTTPClientConfig: config.DefaultHTTPClientConfig,
|
||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||
|
@ -1357,6 +1402,7 @@ var expectedConf = &Config{
|
|||
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
|
||||
ScrapeFailureLogFile: globScrapeFailureLogFile,
|
||||
|
||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||
Scheme: DefaultScrapeConfig.Scheme,
|
||||
|
@ -1389,6 +1435,7 @@ var expectedConf = &Config{
|
|||
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
|
||||
ScrapeFailureLogFile: globScrapeFailureLogFile,
|
||||
|
||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||
Scheme: DefaultScrapeConfig.Scheme,
|
||||
|
@ -1532,7 +1579,7 @@ func TestElideSecrets(t *testing.T) {
|
|||
yamlConfig := string(config)
|
||||
|
||||
matches := secretRe.FindAllStringIndex(yamlConfig, -1)
|
||||
require.Len(t, matches, 22, "wrong number of secret matches found")
|
||||
require.Len(t, matches, 24, "wrong number of secret matches found")
|
||||
require.NotContains(t, yamlConfig, "mysecret",
|
||||
"yaml marshal reveals authentication credentials.")
|
||||
}
|
||||
|
@ -2042,6 +2089,10 @@ var expectedErrors = []struct {
|
|||
}
|
||||
|
||||
func TestBadConfigs(t *testing.T) {
|
||||
model.NameValidationScheme = model.LegacyValidation
|
||||
defer func() {
|
||||
model.NameValidationScheme = model.UTF8Validation
|
||||
}()
|
||||
for _, ee := range expectedErrors {
|
||||
_, err := LoadFile("testdata/"+ee.filename, false, false, log.NewNopLogger())
|
||||
require.Error(t, err, "%s", ee.filename)
|
||||
|
@ -2051,6 +2102,10 @@ func TestBadConfigs(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestBadStaticConfigsJSON(t *testing.T) {
|
||||
model.NameValidationScheme = model.LegacyValidation
|
||||
defer func() {
|
||||
model.NameValidationScheme = model.UTF8Validation
|
||||
}()
|
||||
content, err := os.ReadFile("testdata/static_config.bad.json")
|
||||
require.NoError(t, err)
|
||||
var tg targetgroup.Group
|
||||
|
@ -2059,6 +2114,10 @@ func TestBadStaticConfigsJSON(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestBadStaticConfigsYML(t *testing.T) {
|
||||
model.NameValidationScheme = model.LegacyValidation
|
||||
defer func() {
|
||||
model.NameValidationScheme = model.UTF8Validation
|
||||
}()
|
||||
content, err := os.ReadFile("testdata/static_config.bad.yml")
|
||||
require.NoError(t, err)
|
||||
var tg targetgroup.Group
|
||||
|
@ -2323,17 +2382,17 @@ func TestScrapeConfigNameValidationSettings(t *testing.T) {
|
|||
{
|
||||
name: "global setting implies local settings",
|
||||
inputFile: "scrape_config_global_validation_mode",
|
||||
expectScheme: "utf8",
|
||||
expectScheme: "legacy",
|
||||
},
|
||||
{
|
||||
name: "local setting",
|
||||
inputFile: "scrape_config_local_validation_mode",
|
||||
expectScheme: "utf8",
|
||||
expectScheme: "legacy",
|
||||
},
|
||||
{
|
||||
name: "local setting overrides global setting",
|
||||
inputFile: "scrape_config_local_global_validation_mode",
|
||||
expectScheme: "legacy",
|
||||
expectScheme: "utf8",
|
||||
},
|
||||
}
|
||||
|
||||
|
|
92
config/reload.go
Normal file
92
config/reload.go
Normal file
|
@ -0,0 +1,92 @@
|
|||
// Copyright 2024 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package config
|
||||
|
||||
import (
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"gopkg.in/yaml.v2"
|
||||
)
|
||||
|
||||
type ExternalFilesConfig struct {
|
||||
RuleFiles []string `yaml:"rule_files"`
|
||||
ScrapeConfigFiles []string `yaml:"scrape_config_files"`
|
||||
}
|
||||
|
||||
// GenerateChecksum generates a checksum of the YAML file and the files it references.
|
||||
func GenerateChecksum(yamlFilePath string) (string, error) {
|
||||
hash := sha256.New()
|
||||
|
||||
yamlContent, err := os.ReadFile(yamlFilePath)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("error reading YAML file: %w", err)
|
||||
}
|
||||
_, err = hash.Write(yamlContent)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("error writing YAML file to hash: %w", err)
|
||||
}
|
||||
|
||||
var config ExternalFilesConfig
|
||||
if err := yaml.Unmarshal(yamlContent, &config); err != nil {
|
||||
return "", fmt.Errorf("error unmarshalling YAML: %w", err)
|
||||
}
|
||||
|
||||
dir := filepath.Dir(yamlFilePath)
|
||||
|
||||
for i, file := range config.RuleFiles {
|
||||
config.RuleFiles[i] = filepath.Join(dir, file)
|
||||
}
|
||||
for i, file := range config.ScrapeConfigFiles {
|
||||
config.ScrapeConfigFiles[i] = filepath.Join(dir, file)
|
||||
}
|
||||
|
||||
files := map[string][]string{
|
||||
"r": config.RuleFiles, // "r" for rule files
|
||||
"s": config.ScrapeConfigFiles, // "s" for scrape config files
|
||||
}
|
||||
|
||||
for _, prefix := range []string{"r", "s"} {
|
||||
for _, pattern := range files[prefix] {
|
||||
matchingFiles, err := filepath.Glob(pattern)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("error finding files with pattern %q: %w", pattern, err)
|
||||
}
|
||||
|
||||
for _, file := range matchingFiles {
|
||||
// Write prefix to the hash ("r" or "s") followed by \0, then
|
||||
// the file path.
|
||||
_, err = hash.Write([]byte(prefix + "\x00" + file + "\x00"))
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("error writing %q path to hash: %w", file, err)
|
||||
}
|
||||
|
||||
// Read and hash the content of the file.
|
||||
content, err := os.ReadFile(file)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("error reading file %s: %w", file, err)
|
||||
}
|
||||
_, err = hash.Write(append(content, []byte("\x00")...))
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("error writing %q content to hash: %w", file, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return hex.EncodeToString(hash.Sum(nil)), nil
|
||||
}
|
222
config/reload_test.go
Normal file
222
config/reload_test.go
Normal file
|
@ -0,0 +1,222 @@
|
|||
// Copyright 2024 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package config
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestGenerateChecksum(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
|
||||
// Define paths for the temporary files.
|
||||
yamlFilePath := filepath.Join(tmpDir, "test.yml")
|
||||
ruleFilePath := filepath.Join(tmpDir, "rule_file.yml")
|
||||
scrapeConfigFilePath := filepath.Join(tmpDir, "scrape_config.yml")
|
||||
|
||||
// Define initial and modified content for the files.
|
||||
originalRuleContent := "groups:\n- name: example\n rules:\n - alert: ExampleAlert"
|
||||
modifiedRuleContent := "groups:\n- name: example\n rules:\n - alert: ModifiedAlert"
|
||||
|
||||
originalScrapeConfigContent := "scrape_configs:\n- job_name: example"
|
||||
modifiedScrapeConfigContent := "scrape_configs:\n- job_name: modified_example"
|
||||
|
||||
// Define YAML content referencing the rule and scrape config files.
|
||||
yamlContent := `
|
||||
rule_files:
|
||||
- rule_file.yml
|
||||
scrape_config_files:
|
||||
- scrape_config.yml
|
||||
`
|
||||
|
||||
// Write initial content to files.
|
||||
require.NoError(t, os.WriteFile(ruleFilePath, []byte(originalRuleContent), 0o644))
|
||||
require.NoError(t, os.WriteFile(scrapeConfigFilePath, []byte(originalScrapeConfigContent), 0o644))
|
||||
require.NoError(t, os.WriteFile(yamlFilePath, []byte(yamlContent), 0o644))
|
||||
|
||||
// Generate the original checksum.
|
||||
originalChecksum := calculateChecksum(t, yamlFilePath)
|
||||
|
||||
t.Run("Rule File Change", func(t *testing.T) {
|
||||
// Modify the rule file.
|
||||
require.NoError(t, os.WriteFile(ruleFilePath, []byte(modifiedRuleContent), 0o644))
|
||||
|
||||
// Checksum should change.
|
||||
modifiedChecksum := calculateChecksum(t, yamlFilePath)
|
||||
require.NotEqual(t, originalChecksum, modifiedChecksum)
|
||||
|
||||
// Revert the rule file.
|
||||
require.NoError(t, os.WriteFile(ruleFilePath, []byte(originalRuleContent), 0o644))
|
||||
|
||||
// Checksum should return to the original.
|
||||
revertedChecksum := calculateChecksum(t, yamlFilePath)
|
||||
require.Equal(t, originalChecksum, revertedChecksum)
|
||||
})
|
||||
|
||||
t.Run("Scrape Config Change", func(t *testing.T) {
|
||||
// Modify the scrape config file.
|
||||
require.NoError(t, os.WriteFile(scrapeConfigFilePath, []byte(modifiedScrapeConfigContent), 0o644))
|
||||
|
||||
// Checksum should change.
|
||||
modifiedChecksum := calculateChecksum(t, yamlFilePath)
|
||||
require.NotEqual(t, originalChecksum, modifiedChecksum)
|
||||
|
||||
// Revert the scrape config file.
|
||||
require.NoError(t, os.WriteFile(scrapeConfigFilePath, []byte(originalScrapeConfigContent), 0o644))
|
||||
|
||||
// Checksum should return to the original.
|
||||
revertedChecksum := calculateChecksum(t, yamlFilePath)
|
||||
require.Equal(t, originalChecksum, revertedChecksum)
|
||||
})
|
||||
|
||||
t.Run("Rule File Deletion", func(t *testing.T) {
|
||||
// Delete the rule file.
|
||||
require.NoError(t, os.Remove(ruleFilePath))
|
||||
|
||||
// Checksum should change.
|
||||
deletedChecksum := calculateChecksum(t, yamlFilePath)
|
||||
require.NotEqual(t, originalChecksum, deletedChecksum)
|
||||
|
||||
// Restore the rule file.
|
||||
require.NoError(t, os.WriteFile(ruleFilePath, []byte(originalRuleContent), 0o644))
|
||||
|
||||
// Checksum should return to the original.
|
||||
revertedChecksum := calculateChecksum(t, yamlFilePath)
|
||||
require.Equal(t, originalChecksum, revertedChecksum)
|
||||
})
|
||||
|
||||
t.Run("Scrape Config Deletion", func(t *testing.T) {
|
||||
// Delete the scrape config file.
|
||||
require.NoError(t, os.Remove(scrapeConfigFilePath))
|
||||
|
||||
// Checksum should change.
|
||||
deletedChecksum := calculateChecksum(t, yamlFilePath)
|
||||
require.NotEqual(t, originalChecksum, deletedChecksum)
|
||||
|
||||
// Restore the scrape config file.
|
||||
require.NoError(t, os.WriteFile(scrapeConfigFilePath, []byte(originalScrapeConfigContent), 0o644))
|
||||
|
||||
// Checksum should return to the original.
|
||||
revertedChecksum := calculateChecksum(t, yamlFilePath)
|
||||
require.Equal(t, originalChecksum, revertedChecksum)
|
||||
})
|
||||
|
||||
t.Run("Main File Change", func(t *testing.T) {
|
||||
// Modify the main YAML file.
|
||||
modifiedYamlContent := `
|
||||
global:
|
||||
scrape_interval: 3s
|
||||
rule_files:
|
||||
- rule_file.yml
|
||||
scrape_config_files:
|
||||
- scrape_config.yml
|
||||
`
|
||||
require.NoError(t, os.WriteFile(yamlFilePath, []byte(modifiedYamlContent), 0o644))
|
||||
|
||||
// Checksum should change.
|
||||
modifiedChecksum := calculateChecksum(t, yamlFilePath)
|
||||
require.NotEqual(t, originalChecksum, modifiedChecksum)
|
||||
|
||||
// Revert the main YAML file.
|
||||
require.NoError(t, os.WriteFile(yamlFilePath, []byte(yamlContent), 0o644))
|
||||
|
||||
// Checksum should return to the original.
|
||||
revertedChecksum := calculateChecksum(t, yamlFilePath)
|
||||
require.Equal(t, originalChecksum, revertedChecksum)
|
||||
})
|
||||
|
||||
t.Run("Rule File Removed from YAML Config", func(t *testing.T) {
|
||||
// Modify the YAML content to remove the rule file.
|
||||
modifiedYamlContent := `
|
||||
scrape_config_files:
|
||||
- scrape_config.yml
|
||||
`
|
||||
require.NoError(t, os.WriteFile(yamlFilePath, []byte(modifiedYamlContent), 0o644))
|
||||
|
||||
// Checksum should change.
|
||||
modifiedChecksum := calculateChecksum(t, yamlFilePath)
|
||||
require.NotEqual(t, originalChecksum, modifiedChecksum)
|
||||
|
||||
// Revert the YAML content.
|
||||
require.NoError(t, os.WriteFile(yamlFilePath, []byte(yamlContent), 0o644))
|
||||
|
||||
// Checksum should return to the original.
|
||||
revertedChecksum := calculateChecksum(t, yamlFilePath)
|
||||
require.Equal(t, originalChecksum, revertedChecksum)
|
||||
})
|
||||
|
||||
t.Run("Scrape Config Removed from YAML Config", func(t *testing.T) {
|
||||
// Modify the YAML content to remove the scrape config file.
|
||||
modifiedYamlContent := `
|
||||
rule_files:
|
||||
- rule_file.yml
|
||||
`
|
||||
require.NoError(t, os.WriteFile(yamlFilePath, []byte(modifiedYamlContent), 0o644))
|
||||
|
||||
// Checksum should change.
|
||||
modifiedChecksum := calculateChecksum(t, yamlFilePath)
|
||||
require.NotEqual(t, originalChecksum, modifiedChecksum)
|
||||
|
||||
// Revert the YAML content.
|
||||
require.NoError(t, os.WriteFile(yamlFilePath, []byte(yamlContent), 0o644))
|
||||
|
||||
// Checksum should return to the original.
|
||||
revertedChecksum := calculateChecksum(t, yamlFilePath)
|
||||
require.Equal(t, originalChecksum, revertedChecksum)
|
||||
})
|
||||
|
||||
t.Run("Empty Rule File", func(t *testing.T) {
|
||||
// Write an empty rule file.
|
||||
require.NoError(t, os.WriteFile(ruleFilePath, []byte(""), 0o644))
|
||||
|
||||
// Checksum should change.
|
||||
emptyChecksum := calculateChecksum(t, yamlFilePath)
|
||||
require.NotEqual(t, originalChecksum, emptyChecksum)
|
||||
|
||||
// Restore the rule file.
|
||||
require.NoError(t, os.WriteFile(ruleFilePath, []byte(originalRuleContent), 0o644))
|
||||
|
||||
// Checksum should return to the original.
|
||||
revertedChecksum := calculateChecksum(t, yamlFilePath)
|
||||
require.Equal(t, originalChecksum, revertedChecksum)
|
||||
})
|
||||
|
||||
t.Run("Empty Scrape Config File", func(t *testing.T) {
|
||||
// Write an empty scrape config file.
|
||||
require.NoError(t, os.WriteFile(scrapeConfigFilePath, []byte(""), 0o644))
|
||||
|
||||
// Checksum should change.
|
||||
emptyChecksum := calculateChecksum(t, yamlFilePath)
|
||||
require.NotEqual(t, originalChecksum, emptyChecksum)
|
||||
|
||||
// Restore the scrape config file.
|
||||
require.NoError(t, os.WriteFile(scrapeConfigFilePath, []byte(originalScrapeConfigContent), 0o644))
|
||||
|
||||
// Checksum should return to the original.
|
||||
revertedChecksum := calculateChecksum(t, yamlFilePath)
|
||||
require.Equal(t, originalChecksum, revertedChecksum)
|
||||
})
|
||||
}
|
||||
|
||||
// calculateChecksum generates a checksum for the given YAML file path.
|
||||
func calculateChecksum(t *testing.T, yamlFilePath string) string {
|
||||
checksum, err := GenerateChecksum(yamlFilePath)
|
||||
require.NoError(t, err)
|
||||
require.NotEmpty(t, checksum)
|
||||
return checksum
|
||||
}
|
9
config/testdata/conf.good.yml
vendored
9
config/testdata/conf.good.yml
vendored
|
@ -8,6 +8,8 @@ global:
|
|||
label_limit: 30
|
||||
label_name_length_limit: 200
|
||||
label_value_length_limit: 200
|
||||
query_log_file: query.log
|
||||
scrape_failure_log_file: fail.log
|
||||
# scrape_timeout is set to the global default (10s).
|
||||
|
||||
external_labels:
|
||||
|
@ -72,6 +74,7 @@ scrape_configs:
|
|||
# metrics_path defaults to '/metrics'
|
||||
# scheme defaults to 'http'.
|
||||
|
||||
scrape_failure_log_file: fail_prom.log
|
||||
file_sd_configs:
|
||||
- files:
|
||||
- foo/*.slow.json
|
||||
|
@ -87,6 +90,12 @@ scrape_configs:
|
|||
my: label
|
||||
your: label
|
||||
|
||||
http_headers:
|
||||
foo:
|
||||
values: ["foobar"]
|
||||
secrets: ["bar", "foo"]
|
||||
files: ["valid_password_file"]
|
||||
|
||||
relabel_configs:
|
||||
- source_labels: [job, __meta_dns_name]
|
||||
regex: (.*)some-[regex]
|
||||
|
|
2
config/testdata/jobname_dup.bad.yml
vendored
2
config/testdata/jobname_dup.bad.yml
vendored
|
@ -1,4 +1,6 @@
|
|||
# Two scrape configs with the same job names are not allowed.
|
||||
global:
|
||||
metric_name_validation_scheme: legacy
|
||||
scrape_configs:
|
||||
- job_name: prometheus
|
||||
- job_name: service-x
|
||||
|
|
2
config/testdata/lowercase.bad.yml
vendored
2
config/testdata/lowercase.bad.yml
vendored
|
@ -1,3 +1,5 @@
|
|||
global:
|
||||
metric_name_validation_scheme: legacy
|
||||
scrape_configs:
|
||||
- job_name: prometheus
|
||||
relabel_configs:
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
global:
|
||||
metric_name_validation_scheme: utf8
|
||||
metric_name_validation_scheme: legacy
|
||||
scrape_configs:
|
||||
- job_name: prometheus
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
global:
|
||||
metric_name_validation_scheme: utf8
|
||||
metric_name_validation_scheme: legacy
|
||||
scrape_configs:
|
||||
- job_name: prometheus
|
||||
metric_name_validation_scheme: legacy
|
||||
metric_name_validation_scheme: utf8
|
||||
|
|
|
@ -1,3 +1,3 @@
|
|||
scrape_configs:
|
||||
- job_name: prometheus
|
||||
metric_name_validation_scheme: utf8
|
||||
metric_name_validation_scheme: legacy
|
||||
|
|
|
@ -1,82 +0,0 @@
|
|||
{{/* vim: set ft=html: */}}
|
||||
|
||||
{{/* Navbar, should be passed . */}}
|
||||
{{ define "navbar" }}
|
||||
<nav class="navbar fixed-top navbar-expand-sm navbar-dark bg-dark">
|
||||
<div class="container-fluid">
|
||||
<!-- Brand and toggle get grouped for better mobile display -->
|
||||
<div class="navbar-header">
|
||||
<button type="button" class="navbar-toggler" data-toggle="collapse" data-target="#bs-example-navbar-collapse-1" aria-expanded="false" aria-controls="navbar-nav" aria-label="toggle navigation">
|
||||
<span class="navbar-toggler-icon"></span>
|
||||
|
||||
|
||||
|
||||
|
||||
</button>
|
||||
<a class="navbar-brand" href="{{ pathPrefix }}/">Prometheus</a>
|
||||
</div>
|
||||
|
||||
<div class="collapse navbar-collapse" id="bs-example-navbar-collapse-1">
|
||||
<ul class="nav navbar-nav">
|
||||
<li class="nav-item"><a class="nav-link" href="{{ pathPrefix }}/alerts">Alerts</a></li>
|
||||
<li class="nav-item"><a class="nav-link" href="https://www.pagerduty.com/">PagerDuty</a></li>
|
||||
</ul>
|
||||
</div>
|
||||
</div>
|
||||
</nav>
|
||||
{{ end }}
|
||||
|
||||
{{/* LHS menu, should be passed . */}}
|
||||
{{ define "menu" }}
|
||||
<div class="prom_lhs_menu row">
|
||||
<nav class="col-md-2 md-block bg-dark sidebar prom_lhs_menu_nav">
|
||||
<div class="sidebar-sticky">
|
||||
<ul class="nav flex-column">
|
||||
|
||||
{{ template "_menuItem" (args . "index.html.example" "Overview") }}
|
||||
|
||||
{{ if query "up{job='node'}" }}
|
||||
{{ template "_menuItem" (args . "node.html" "Node") }}
|
||||
{{ if match "^node" .Path }}
|
||||
{{ if .Params.instance }}
|
||||
<ul>
|
||||
<li {{ if eq .Path "node-overview.html" }}class="prom_lhs_menu_selected nav-item"{{ end }}>
|
||||
<a class="nav-link" href="node-overview.html?instance={{ .Params.instance }}">{{ reReplaceAll "(.*?://)([^:/]+?)(:\\d+)?/.*" "$2" .Params.instance }}</a>
|
||||
</li>
|
||||
<ul>
|
||||
<li {{ if eq .Path "node-cpu.html" }}class="prom_lhs_menu_selected nav-item"{{ end }}>
|
||||
<a class="nav-link" href="node-cpu.html?instance={{ .Params.instance }}">CPU</a>
|
||||
</li>
|
||||
<li {{ if eq .Path "node-disk.html" }}class="prom_lhs_menu_selected nav-item"{{ end }}>
|
||||
<a class="nav-link" href="node-disk.html?instance={{ .Params.instance }}">Disk</a>
|
||||
</li>
|
||||
</ul>
|
||||
</ul>
|
||||
{{ end }}
|
||||
{{ end }}
|
||||
{{ end }}
|
||||
|
||||
{{ if query "up{job='prometheus'}" }}
|
||||
{{ template "_menuItem" (args . "prometheus.html" "Prometheus") }}
|
||||
{{ if match "^prometheus" .Path }}
|
||||
{{ if .Params.instance }}
|
||||
<ul>
|
||||
<li {{ if eq .Path "prometheus-overview.html" }}class="prom_lhs_menu_selected nav-item"{{ end }}>
|
||||
<a class="nav-link" href="prometheus-overview.html?instance={{ .Params.instance }}">{{.Params.instance }}</a>
|
||||
</li>
|
||||
</ul>
|
||||
{{ end }}
|
||||
{{ end }}
|
||||
{{ end }}
|
||||
|
||||
</ul>
|
||||
</div>
|
||||
</nav>
|
||||
</div>
|
||||
{{ end }}
|
||||
|
||||
{{/* Helper, pass (args . path name) */}}
|
||||
{{ define "_menuItem" }}
|
||||
<li {{ if eq .arg0.Path .arg1 }} class="prom_lhs_menu_selected nav-item" {{ end }}><a class="nav-link" href="{{ .arg1 }}">{{ .arg2 }}</a></li>
|
||||
{{ end }}
|
||||
|
|
@ -1,138 +0,0 @@
|
|||
{{/* vim: set ft=html: */}}
|
||||
{{/* Load Prometheus console library JS/CSS. Should go in <head> */}}
|
||||
{{ define "prom_console_head" }}
|
||||
<link type="text/css" rel="stylesheet" href="{{ pathPrefix }}/classic/static/vendor/rickshaw/rickshaw.min.css">
|
||||
<link type="text/css" rel="stylesheet" href="{{ pathPrefix }}/classic/static/vendor/bootstrap-4.5.2/css/bootstrap.min.css">
|
||||
<link type="text/css" rel="stylesheet" href="{{ pathPrefix }}/classic/static/css/prom_console.css">
|
||||
<link type="text/css" rel="stylesheet" href="{{ pathPrefix }}/classic/static/vendor/bootstrap4-glyphicons/css/bootstrap-glyphicons.min.css">
|
||||
<script src="{{ pathPrefix }}/classic/static/vendor/rickshaw/vendor/d3.v3.js"></script>
|
||||
<script src="{{ pathPrefix }}/classic/static/vendor/rickshaw/vendor/d3.layout.min.js"></script>
|
||||
<script src="{{ pathPrefix }}/classic/static/vendor/rickshaw/rickshaw.min.js"></script>
|
||||
<script src="{{ pathPrefix }}/classic/static/vendor/js/jquery-3.5.1.min.js"></script>
|
||||
<script src="{{ pathPrefix }}/classic/static/vendor/js/popper.min.js"></script>
|
||||
<script src="{{ pathPrefix }}/classic/static/vendor/bootstrap-4.5.2/js/bootstrap.min.js"></script>
|
||||
|
||||
<script>
|
||||
var PATH_PREFIX = "{{ pathPrefix }}";
|
||||
</script>
|
||||
<script src="{{ pathPrefix }}/classic/static/js/prom_console.js"></script>
|
||||
{{ end }}
|
||||
|
||||
{{/* Top of all pages. */}}
|
||||
{{ define "head" -}}
|
||||
<!doctype html>
|
||||
<html lang="en">
|
||||
<head>
|
||||
{{ template "prom_console_head" }}
|
||||
</head>
|
||||
<body>
|
||||
{{ template "navbar" . }}
|
||||
|
||||
{{ template "menu" . }}
|
||||
{{ end }}
|
||||
|
||||
{{ define "__prom_query_drilldown_noop" }}{{ . }}{{ end }}
|
||||
{{ define "humanize" }}{{ humanize . }}{{ end }}
|
||||
{{ define "humanizeNoSmallPrefix" }}{{ if and (lt . 1.0) (gt . -1.0) }}{{ printf "%.3g" . }}{{ else }}{{ humanize . }}{{ end }}{{ end }}
|
||||
{{ define "humanize1024" }}{{ humanize1024 . }}{{ end }}
|
||||
{{ define "humanizeDuration" }}{{ humanizeDuration . }}{{ end }}
|
||||
{{ define "humanizePercentage" }}{{ humanizePercentage . }}{{ end }}
|
||||
{{ define "humanizeTimestamp" }}{{ humanizeTimestamp . }}{{ end }}
|
||||
{{ define "printf.1f" }}{{ printf "%.1f" . }}{{ end }}
|
||||
{{ define "printf.3g" }}{{ printf "%.3g" . }}{{ end }}
|
||||
|
||||
{{/* prom_query_drilldown (args expr suffix? renderTemplate?)
|
||||
Displays the result of the expression, with a link to /graph for it.
|
||||
|
||||
renderTemplate is the name of the template to use to render the value.
|
||||
*/}}
|
||||
{{ define "prom_query_drilldown" }}
|
||||
{{ $expr := .arg0 }}{{ $suffix := (or .arg1 "") }}{{ $renderTemplate := (or .arg2 "__prom_query_drilldown_noop") }}
|
||||
<a class="prom_query_drilldown" href="{{ pathPrefix }}{{ graphLink $expr }}">{{ with query $expr }}{{tmpl $renderTemplate ( . | first | value )}}{{ $suffix }}{{ else }}-{{ end }}</a>
|
||||
{{ end }}
|
||||
|
||||
{{ define "prom_path" }}/consoles/{{ .Path }}?{{ range $param, $value := .Params }}{{ $param }}={{ $value }}&{{ end }}{{ end }}"
|
||||
|
||||
{{ define "prom_right_table_head" }}
|
||||
<div class="prom_console_rhs">
|
||||
<table class="table table-bordered table-hover table-sm">
|
||||
{{ end }}
|
||||
{{ define "prom_right_table_tail" }}
|
||||
</table>
|
||||
</div>
|
||||
{{ end }}
|
||||
|
||||
{{/* RHS table head, pass job name. Should be used after prom_right_table_head. */}}
|
||||
{{ define "prom_right_table_job_head" }}
|
||||
<tr>
|
||||
<th>{{ . }}</th>
|
||||
<th>{{ template "prom_query_drilldown" (args (printf "sum(up{job='%s'})" .)) }} / {{ template "prom_query_drilldown" (args (printf "count(up{job='%s'})" .)) }}</th>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>CPU</td>
|
||||
<td>{{ template "prom_query_drilldown" (args (printf "avg by(job)(irate(process_cpu_seconds_total{job='%s'}[5m]))" .) "s/s" "humanizeNoSmallPrefix") }}</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Memory</td>
|
||||
<td>{{ template "prom_query_drilldown" (args (printf "avg by(job)(process_resident_memory_bytes{job='%s'})" .) "B" "humanize1024") }}</td>
|
||||
</tr>
|
||||
{{ end }}
|
||||
|
||||
|
||||
{{ define "prom_content_head" }}
|
||||
<div class="prom_console_content">
|
||||
<div class="container-fluid">
|
||||
{{ template "prom_graph_timecontrol" . }}
|
||||
{{ end }}
|
||||
{{ define "prom_content_tail" }}
|
||||
</div>
|
||||
</div>
|
||||
{{ end }}
|
||||
|
||||
{{ define "prom_graph_timecontrol" }}
|
||||
<div class="prom_graph_timecontrol">
|
||||
<div class="prom_graph_timecontrol_inner">
|
||||
<div class="prom_graph_timecontrol_group ">
|
||||
<button class="btn btn-light pull-left" type="button" id="prom_graph_duration_shrink" title="Shrink the time range.">
|
||||
<i class="glyphicon glyphicon-minus"></i>
|
||||
</button><!-- Comments between elements to remove spaces
|
||||
--><input class="input pull-left align-middle" size="3" title="Time range of graph" type="text" id="prom_graph_duration"><!--
|
||||
--><button class="btn btn-light pull-left" type="button" id="prom_graph_duration_grow" title="Grow the time range.">
|
||||
<i class="glyphicon glyphicon-plus"></i>
|
||||
</button>
|
||||
</div>
|
||||
<div class="prom_graph_timecontrol_group ">
|
||||
<button class="btn btn-light pull-left" type="button" id="prom_graph_time_back" title="Rewind the end time.">
|
||||
<i class="glyphicon glyphicon-backward"></i>
|
||||
</button><!--
|
||||
--><input class="input pull-left align-middle" title="End time of graph" placeholder="Until" type="text" id="prom_graph_time_end" size="16" value=""><!--
|
||||
--><button class="btn btn-light pull-left" type="button" id="prom_graph_time_forward" title="Advance the end time.">
|
||||
<i class="glyphicon glyphicon-forward"></i>
|
||||
</button>
|
||||
</div>
|
||||
<div class="prom_graph_timecontrol_group ">
|
||||
<div class="btn-group dropup prom_graph_timecontrol_refresh pull-left">
|
||||
<button type="button" class="btn btn-light pull-left" id="prom_graph_refresh_button" title="Refresh.">
|
||||
<i class="glyphicon glyphicon-repeat"></i>
|
||||
<span class="icon-repeat"></span>
|
||||
(<span id="prom_graph_refresh_button_value">Off</span>)
|
||||
</button>
|
||||
<button type="button" class="btn btn-light pull-left dropdown-toggle" data-toggle="dropdown" title="Set autorefresh."aria-haspopup="true" aria-expanded="false">
|
||||
<span class="caret"></span>
|
||||
</button>
|
||||
<ul class="dropdown-menu" id="prom_graph_refresh_intervals" role="menu">
|
||||
</ul>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<script>
|
||||
new PromConsole.TimeControl();
|
||||
</script>
|
||||
</div>
|
||||
{{ end }}
|
||||
|
||||
{{/* Bottom of all pages. */}}
|
||||
{{ define "tail" }}
|
||||
</body>
|
||||
</html>
|
||||
{{ end }}
|
|
@ -1,28 +0,0 @@
|
|||
{{ template "head" . }}
|
||||
|
||||
{{ template "prom_right_table_head" }}
|
||||
{{ template "prom_right_table_tail" }}
|
||||
|
||||
{{ template "prom_content_head" . }}
|
||||
<h1>Overview</h1>
|
||||
<p>These are example consoles for Prometheus.</p>
|
||||
|
||||
<p>These consoles expect exporters to have the following job labels:</p>
|
||||
<table class="table table-sm table-striped table-bordered" style="width: 0%">
|
||||
<tr>
|
||||
<th>Exporter</th>
|
||||
<th>Job label</th>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Node Exporter</td>
|
||||
<td><code>node</code></td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Prometheus</td>
|
||||
<td><code>prometheus</code></td>
|
||||
</tr>
|
||||
</table>
|
||||
|
||||
{{ template "prom_content_tail" . }}
|
||||
|
||||
{{ template "tail" }}
|
|
@ -1,60 +0,0 @@
|
|||
{{ template "head" . }}
|
||||
|
||||
{{ template "prom_right_table_head" }}
|
||||
<tr>
|
||||
<th colspan="2">CPU(s): {{ template "prom_query_drilldown" (args (printf "scalar(count(count by (cpu)(node_cpu_seconds_total{job='node',instance='%s'})))" .Params.instance)) }}</th>
|
||||
</tr>
|
||||
{{ range printf "sum by (mode)(irate(node_cpu_seconds_total{job='node',instance='%s'}[5m])) * 100 / scalar(count(count by (cpu)(node_cpu_seconds_total{job='node',instance='%s'})))" .Params.instance .Params.instance | query | sortByLabel "mode" }}
|
||||
<tr>
|
||||
<td>{{ .Labels.mode | title }} CPU</td>
|
||||
<td>{{ .Value | printf "%.1f" }}%</td>
|
||||
</tr>
|
||||
{{ end }}
|
||||
<tr><th colspan="2">Misc</th></tr>
|
||||
<tr>
|
||||
<td>Processes Running</td>
|
||||
<td>{{ template "prom_query_drilldown" (args (printf "node_procs_running{job='node',instance='%s'}" .Params.instance) "" "humanize") }}</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Processes Blocked</td>
|
||||
<td>{{ template "prom_query_drilldown" (args (printf "node_procs_blocked{job='node',instance='%s'}" .Params.instance) "" "humanize") }}</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Forks</td>
|
||||
<td>{{ template "prom_query_drilldown" (args (printf "irate(node_forks_total{job='node',instance='%s'}[5m])" .Params.instance) "/s" "humanize") }}</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Context Switches</td>
|
||||
<td>{{ template "prom_query_drilldown" (args (printf "irate(node_context_switches_total{job='node',instance='%s'}[5m])" .Params.instance) "/s" "humanize") }}</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Interrupts</td>
|
||||
<td>{{ template "prom_query_drilldown" (args (printf "irate(node_intr_total{job='node',instance='%s'}[5m])" .Params.instance) "/s" "humanize") }}</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>1m Loadavg</td>
|
||||
<td>{{ template "prom_query_drilldown" (args (printf "node_load1{job='node',instance='%s'}" .Params.instance)) }}</td>
|
||||
</tr>
|
||||
<tr>
|
||||
</tr>
|
||||
{{ template "prom_right_table_tail" }}
|
||||
|
||||
{{ template "prom_content_head" . }}
|
||||
<h1>Node CPU - {{ reReplaceAll "(.*?://)([^:/]+?)(:\\d+)?/.*" "$2" .Params.instance }}</h1>
|
||||
|
||||
<h3>CPU Usage</h3>
|
||||
<div id="cpuGraph"></div>
|
||||
<script>
|
||||
new PromConsole.Graph({
|
||||
node: document.querySelector("#cpuGraph"),
|
||||
expr: "sum by (mode)(irate(node_cpu_seconds_total{job='node',instance='{{ .Params.instance }}',mode!='idle',mode!='iowait',mode!='steal'}[5m]))",
|
||||
renderer: 'area',
|
||||
max: {{ with printf "count(count by (cpu)(node_cpu_seconds_total{job='node',instance='%s'}))" .Params.instance | query }}{{ . | first | value }}{{ else}}undefined{{end}},
|
||||
yAxisFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
|
||||
yHoverFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
|
||||
yTitle: 'Cores'
|
||||
})
|
||||
</script>
|
||||
{{ template "prom_content_tail" . }}
|
||||
|
||||
{{ template "tail" }}
|
|
@ -1,78 +0,0 @@
|
|||
{{ template "head" . }}
|
||||
|
||||
{{ template "prom_right_table_head" }}
|
||||
<tr>
|
||||
<th colspan="2">Disks</th>
|
||||
</tr>
|
||||
{{ range printf "node_disk_io_time_seconds_total{job='node',instance='%s'}" .Params.instance | query | sortByLabel "device" }}
|
||||
<th colspan="2">{{ .Labels.device }}</th>
|
||||
<tr>
|
||||
<td>Utilization</td>
|
||||
<td>{{ template "prom_query_drilldown" (args (printf "irate(node_disk_io_time_seconds_total{job='node',instance='%s',device='%s'}[5m]) * 100" .Labels.instance .Labels.device) "%" "printf.1f") }}</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Throughput</td>
|
||||
<td>{{ template "prom_query_drilldown" (args (printf "irate(node_disk_read_bytes_total{job='node',instance='%s',device='%s'}[5m]) + irate(node_disk_written_bytes_total{job='node',instance='%s',device='%s'}[5m])" .Labels.instance .Labels.device .Labels.instance .Labels.device) "B/s" "humanize") }}</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Avg Read Time</td>
|
||||
<td>{{ template "prom_query_drilldown" (args (printf "irate(node_disk_read_time_seconds_total{job='node',instance='%s',device='%s'}[5m]) / irate(node_disk_reads_completed_total{job='node',instance='%s',device='%s'}[5m])" .Labels.instance .Labels.device .Labels.instance .Labels.device) "s" "humanize") }}</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Avg Write Time</td>
|
||||
<td>{{ template "prom_query_drilldown" (args (printf "irate(node_disk_write_time_seconds_total{job='node',instance='%s',device='%s'}[5m]) / irate(node_disk_writes_completed_total{job='node',instance='%s',device='%s'}[5m])" .Labels.instance .Labels.device .Labels.instance .Labels.device) "s" "humanize") }}</td>
|
||||
</tr>
|
||||
{{ end }}
|
||||
<tr>
|
||||
<th colspan="2">Filesystem Fullness</th>
|
||||
</tr>
|
||||
{{ define "roughlyNearZero" }}
|
||||
{{ if gt .1 . }}~0{{ else }}{{ printf "%.1f" . }}{{ end }}
|
||||
{{ end }}
|
||||
{{ range printf "node_filesystem_size_bytes{job='node',instance='%s'}" .Params.instance | query | sortByLabel "mountpoint" }}
|
||||
<tr>
|
||||
<td>{{ .Labels.mountpoint }}</td>
|
||||
<td>{{ template "prom_query_drilldown" (args (printf "100 - node_filesystem_avail_bytes{job='node',instance='%s',mountpoint='%s'} / node_filesystem_size_bytes{job='node'} * 100" .Labels.instance .Labels.mountpoint) "%" "roughlyNearZero") }}</td>
|
||||
</tr>
|
||||
{{ end }}
|
||||
<tr>
|
||||
</tr>
|
||||
{{ template "prom_right_table_tail" }}
|
||||
|
||||
{{ template "prom_content_head" . }}
|
||||
<h1>Node Disk - {{ reReplaceAll "(.*?://)([^:/]+?)(:\\d+)?/.*" "$2" .Params.instance }}</h1>
|
||||
|
||||
<h3>Disk I/O Utilization</h3>
|
||||
<div id="diskioGraph"></div>
|
||||
<script>
|
||||
new PromConsole.Graph({
|
||||
node: document.querySelector("#diskioGraph"),
|
||||
expr: [
|
||||
"irate(node_disk_io_time_seconds_total{job='node',instance='{{ .Params.instance }}',device!~'^(md\\\\d+$|dm-)'}[5m]) * 100",
|
||||
],
|
||||
min: 0,
|
||||
name: '[[ device ]]',
|
||||
yUnits: "%",
|
||||
yAxisFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
|
||||
yHoverFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
|
||||
yTitle: 'Disk I/O Utilization'
|
||||
})
|
||||
</script>
|
||||
<h3>Filesystem Usage</h3>
|
||||
<div id="fsGraph"></div>
|
||||
<script>
|
||||
new PromConsole.Graph({
|
||||
node: document.querySelector("#fsGraph"),
|
||||
expr: "100 - node_filesystem_avail_bytes{job='node',instance='{{ .Params.instance }}'} / node_filesystem_size_bytes{job='node'} * 100",
|
||||
min: 0,
|
||||
max: 100,
|
||||
name: '[[ mountpoint ]]',
|
||||
yUnits: "%",
|
||||
yAxisFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
|
||||
yHoverFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
|
||||
yTitle: 'Filesystem Fullness'
|
||||
})
|
||||
</script>
|
||||
{{ template "prom_content_tail" . }}
|
||||
|
||||
{{ template "tail" }}
|
|
@ -1,121 +0,0 @@
|
|||
{{ template "head" . }}
|
||||
|
||||
{{ template "prom_right_table_head" }}
|
||||
<tr><th colspan="2">Overview</th></tr>
|
||||
<tr>
|
||||
<td>User CPU</td>
|
||||
<td>{{ template "prom_query_drilldown" (args (printf "sum(irate(node_cpu_seconds_total{job='node',instance='%s',mode='user'}[5m])) * 100 / count(count by (cpu)(node_cpu_seconds_total{job='node',instance='%s'}))" .Params.instance .Params.instance) "%" "printf.1f") }}</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>System CPU</td>
|
||||
<td>{{ template "prom_query_drilldown" (args (printf "sum(irate(node_cpu_seconds_total{job='node',instance='%s',mode='system'}[5m])) * 100 / count(count by (cpu)(node_cpu_seconds_total{job='node',instance='%s'}))" .Params.instance .Params.instance) "%" "printf.1f") }}</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Memory Total</td>
|
||||
<td>{{ template "prom_query_drilldown" (args (printf "node_memory_MemTotal_bytes{job='node',instance='%s'}" .Params.instance) "B" "humanize1024") }}</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Memory Free</td>
|
||||
<td>{{ template "prom_query_drilldown" (args (printf "node_memory_MemFree_bytes{job='node',instance='%s'}" .Params.instance) "B" "humanize1024") }}</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<th colspan="2">Network</th>
|
||||
</tr>
|
||||
{{ range printf "node_network_receive_bytes_total{job='node',instance='%s',device!='lo'}" .Params.instance | query | sortByLabel "device" }}
|
||||
<tr>
|
||||
<td>{{ .Labels.device }} Received</td>
|
||||
<td>{{ template "prom_query_drilldown" (args (printf "irate(node_network_receive_bytes_total{job='node',instance='%s',device='%s'}[5m])" .Labels.instance .Labels.device) "B/s" "humanize") }}</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>{{ .Labels.device }} Transmitted</td>
|
||||
<td>{{ template "prom_query_drilldown" (args (printf "irate(node_network_transmit_bytes_total{job='node',instance='%s',device='%s'}[5m])" .Labels.instance .Labels.device) "B/s" "humanize") }}</td>
|
||||
</tr>
|
||||
{{ end }}
|
||||
<tr>
|
||||
<th colspan="2">Disks</th>
|
||||
</tr>
|
||||
{{ range printf "node_disk_io_time_seconds_total{job='node',instance='%s',device!~'^(md\\\\d+$|dm-)'}" .Params.instance | query | sortByLabel "device" }}
|
||||
<tr>
|
||||
<td>{{ .Labels.device }} Utilization</td>
|
||||
<td>{{ template "prom_query_drilldown" (args (printf "irate(node_disk_io_time_seconds_total{job='node',instance='%s',device='%s'}[5m]) * 100" .Labels.instance .Labels.device) "%" "printf.1f") }}</td>
|
||||
</tr>
|
||||
{{ end }}
|
||||
{{ range printf "node_disk_io_time_seconds_total{job='node',instance='%s'}" .Params.instance | query | sortByLabel "device" }}
|
||||
<tr>
|
||||
<td>{{ .Labels.device }} Throughput</td>
|
||||
<td>{{ template "prom_query_drilldown" (args (printf "irate(node_disk_read_bytes_total{job='node',instance='%s',device='%s'}[5m]) + irate(node_disk_written_bytes_total{job='node',instance='%s',device='%s'}[5m])" .Labels.instance .Labels.device .Labels.instance .Labels.device) "B/s" "humanize") }}</td>
|
||||
</tr>
|
||||
{{ end }}
|
||||
<tr>
|
||||
<th colspan="2">Filesystem Fullness</th>
|
||||
</tr>
|
||||
{{ define "roughlyNearZero" }}
|
||||
{{ if gt .1 . }}~0{{ else }}{{ printf "%.1f" . }}{{ end }}
|
||||
{{ end }}
|
||||
{{ range printf "node_filesystem_size_bytes{job='node',instance='%s'}" .Params.instance | query | sortByLabel "mountpoint" }}
|
||||
<tr>
|
||||
<td>{{ .Labels.mountpoint }}</td>
|
||||
<td>{{ template "prom_query_drilldown" (args (printf "100 - node_filesystem_avail_bytes{job='node',instance='%s',mountpoint='%s'} / node_filesystem_size_bytes{job='node'} * 100" .Labels.instance .Labels.mountpoint) "%" "roughlyNearZero") }}</td>
|
||||
</tr>
|
||||
{{ end }}
|
||||
|
||||
{{ template "prom_right_table_tail" }}
|
||||
|
||||
{{ template "prom_content_head" . }}
|
||||
<h1>Node Overview - {{ reReplaceAll "(.*?://)([^:/]+?)(:\\d+)?/.*" "$2" .Params.instance }}</h1>
|
||||
|
||||
<h3>CPU Usage</h3>
|
||||
<div id="cpuGraph"></div>
|
||||
<script>
|
||||
new PromConsole.Graph({
|
||||
node: document.querySelector("#cpuGraph"),
|
||||
expr: "sum by (mode)(irate(node_cpu_seconds_total{job='node',instance='{{ .Params.instance }}',mode!='idle',mode!='iowait',mode!='steal'}[5m]))",
|
||||
renderer: 'area',
|
||||
max: {{ with printf "count(count by (cpu)(node_cpu_seconds_total{job='node',instance='%s'}))" .Params.instance | query }}{{ . | first | value }}{{ else}}undefined{{end}},
|
||||
yAxisFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
|
||||
yHoverFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
|
||||
yTitle: 'Cores'
|
||||
})
|
||||
</script>
|
||||
|
||||
<h3>Disk I/O Utilization</h3>
|
||||
<div id="diskioGraph"></div>
|
||||
<script>
|
||||
new PromConsole.Graph({
|
||||
node: document.querySelector("#diskioGraph"),
|
||||
expr: [
|
||||
"irate(node_disk_io_time_seconds_total{job='node',instance='{{ .Params.instance }}',device!~'^(md\\\\d+$|dm-)'}[5m]) * 100",
|
||||
],
|
||||
min: 0,
|
||||
name: '[[ device ]]',
|
||||
yAxisFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
|
||||
yHoverFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
|
||||
yUnits: "%",
|
||||
yTitle: 'Disk I/O Utilization'
|
||||
})
|
||||
</script>
|
||||
|
||||
<h3>Memory</h3>
|
||||
<div id="memoryGraph"></div>
|
||||
<script>
|
||||
new PromConsole.Graph({
|
||||
node: document.querySelector("#memoryGraph"),
|
||||
renderer: 'area',
|
||||
expr: [
|
||||
"node_memory_Cached_bytes{job='node',instance='{{ .Params.instance }}'}",
|
||||
"node_memory_Buffers_bytes{job='node',instance='{{ .Params.instance }}'}",
|
||||
"node_memory_MemTotal_bytes{job='node',instance='{{ .Params.instance }}'} - node_memory_MemFree_bytes{job='node',instance='{{.Params.instance}}'} - node_memory_Buffers_bytes{job='node',instance='{{.Params.instance}}'} - node_memory_Cached_bytes{job='node',instance='{{.Params.instance}}'}",
|
||||
"node_memory_MemFree{job='node',instance='{{ .Params.instance }}'}",
|
||||
],
|
||||
name: ["Cached", "Buffers", "Used", "Free"],
|
||||
min: 0,
|
||||
yUnits: "B",
|
||||
yAxisFormatter: PromConsole.NumberFormatter.humanize1024,
|
||||
yHoverFormatter: PromConsole.NumberFormatter.humanize1024,
|
||||
yTitle: 'Memory'
|
||||
})
|
||||
</script>
|
||||
|
||||
{{ template "prom_content_tail" . }}
|
||||
|
||||
{{ template "tail" }}
|
|
@ -1,35 +0,0 @@
|
|||
{{ template "head" . }}
|
||||
|
||||
{{ template "prom_right_table_head" }}
|
||||
<tr>
|
||||
<th>Node</th>
|
||||
<th>{{ template "prom_query_drilldown" (args "sum(up{job='node'})") }} / {{ template "prom_query_drilldown" (args "count(up{job='node'})") }}</th>
|
||||
</tr>
|
||||
{{ template "prom_right_table_tail" }}
|
||||
|
||||
{{ template "prom_content_head" . }}
|
||||
<h1>Node</h1>
|
||||
|
||||
<table class="table table-condensed table-striped table-bordered" style="width: 0%">
|
||||
<tr>
|
||||
<th>Node</th>
|
||||
<th>Up</th>
|
||||
<th>CPU<br/>Used</th>
|
||||
<th>Memory<br/> Available</th>
|
||||
</tr>
|
||||
{{ range query "up{job='node'}" | sortByLabel "instance" }}
|
||||
<tr>
|
||||
<td><a href="node-overview.html?instance={{ .Labels.instance }}">{{ reReplaceAll "(.*?://)([^:/]+?)(:\\d+)?/.*" "$2" .Labels.instance }}</a></td>
|
||||
<td{{ if eq (. | value) 1.0 }}>Yes{{ else }} class="alert-danger">No{{ end }}</td>
|
||||
<td>{{ template "prom_query_drilldown" (args (printf "100 * (1 - avg by(instance) (sum without(mode) (irate(node_cpu_seconds_total{job='node',mode=~'idle|iowait|steal',instance='%s'}[5m]))))" .Labels.instance) "%" "printf.1f") }}</td>
|
||||
<td>{{ template "prom_query_drilldown" (args (printf "node_memory_MemFree_bytes{job='node',instance='%s'} + node_memory_Cached_bytes{job='node',instance='%s'} + node_memory_Buffers_bytes{job='node',instance='%s'}" .Labels.instance .Labels.instance .Labels.instance) "B" "humanize1024") }}</td>
|
||||
</tr>
|
||||
{{ else }}
|
||||
<tr><td colspan=4>No nodes found.</td></tr>
|
||||
{{ end }}
|
||||
</table>
|
||||
|
||||
|
||||
{{ template "prom_content_tail" . }}
|
||||
|
||||
{{ template "tail" }}
|
|
@ -1,96 +0,0 @@
|
|||
{{ template "head" . }}
|
||||
|
||||
{{ template "prom_right_table_head" }}
|
||||
<tr>
|
||||
<th colspan="2">Overview</th>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>CPU</td>
|
||||
<td>{{ template "prom_query_drilldown" (args (printf "irate(process_cpu_seconds_total{job='prometheus',instance='%s'}[5m])" .Params.instance) "s/s" "humanizeNoSmallPrefix") }}</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Memory</td>
|
||||
<td>{{ template "prom_query_drilldown" (args (printf "process_resident_memory_bytes{job='prometheus',instance='%s'}" .Params.instance) "B" "humanize1024") }}</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Version</td>
|
||||
<td>{{ with query (printf "prometheus_build_info{job='prometheus',instance='%s'}" .Params.instance) }}{{. | first | label "version"}}{{end}}</td>
|
||||
</tr>
|
||||
|
||||
<tr>
|
||||
<th colspan="2">Storage</th>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Ingested Samples</td>
|
||||
<td>{{ template "prom_query_drilldown" (args (printf "irate(prometheus_tsdb_head_samples_appended_total{job='prometheus',instance='%s'}[5m])" .Params.instance) "/s" "humanizeNoSmallPrefix") }}</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Head Series</td>
|
||||
<td>{{ template "prom_query_drilldown" (args (printf "prometheus_tsdb_head_series{job='prometheus',instance='%s'}" .Params.instance) "" "humanize") }}</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Blocks Loaded</td>
|
||||
<td>{{ template "prom_query_drilldown" (args (printf "prometheus_tsdb_blocks_loaded{job='prometheus',instance='%s'}" .Params.instance) "" "humanize") }}</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<th colspan="2">Rules</th>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Evaluation Duration</td>
|
||||
<td>{{ template "prom_query_drilldown" (args (printf "irate(prometheus_evaluator_duration_seconds_sum{job='prometheus',instance='%s'}[5m]) / irate(prometheus_evaluator_duration_seconds_count{job='prometheus',instance='%s'}[5m])" .Params.instance .Params.instance) "" "humanizeDuration") }}</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Notification Latency</td>
|
||||
<td>{{ template "prom_query_drilldown" (args (printf "irate(prometheus_notifications_latency_seconds_sum{job='prometheus',instance='%s'}[5m]) / irate(prometheus_notifications_latency_seconds_count{job='prometheus',instance='%s'}[5m])" .Params.instance .Params.instance) "" "humanizeDuration") }}</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Notification Queue</td>
|
||||
<td>{{ template "prom_query_drilldown" (args (printf "prometheus_notifications_queue_length{job='prometheus',instance='%s'}" .Params.instance) "" "humanize") }}</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<th colspan="2">HTTP Server</th>
|
||||
</tr>
|
||||
{{ range printf "prometheus_http_request_duration_seconds_count{job='prometheus',instance='%s'}" .Params.instance | query | sortByLabel "handler" }}
|
||||
<tr>
|
||||
<td>{{ .Labels.handler }}</td>
|
||||
<td>{{ template "prom_query_drilldown" (args (printf "irate(prometheus_http_request_duration_seconds_count{job='prometheus',instance='%s',handler='%s'}[5m])" .Labels.instance .Labels.handler) "/s" "humanizeNoSmallPrefix") }}</td>
|
||||
</tr>
|
||||
{{ end }}
|
||||
|
||||
{{ template "prom_right_table_tail" }}
|
||||
|
||||
{{ template "prom_content_head" . }}
|
||||
<div class="prom_content_div">
|
||||
<h1>Prometheus Overview - {{ .Params.instance }}</h1>
|
||||
|
||||
<h3>Ingested Samples</h3>
|
||||
<div id="samplesGraph"></div>
|
||||
<script>
|
||||
new PromConsole.Graph({
|
||||
node: document.querySelector("#samplesGraph"),
|
||||
expr: "irate(prometheus_tsdb_head_samples_appended_total{job='prometheus',instance='{{ .Params.instance }}'}[5m])",
|
||||
name: 'Ingested Samples',
|
||||
yAxisFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
|
||||
yHoverFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
|
||||
yTitle: "Samples",
|
||||
yUnits: "/s",
|
||||
})
|
||||
</script>
|
||||
|
||||
<h3>HTTP Server</h3>
|
||||
<div id="serverGraph"></div>
|
||||
<script>
|
||||
new PromConsole.Graph({
|
||||
node: document.querySelector("#serverGraph"),
|
||||
expr: "irate(prometheus_http_request_duration_seconds_count{job='prometheus',instance='{{ .Params.instance }}'}[5m])",
|
||||
name: '[[handler]]',
|
||||
yAxisFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
|
||||
yHoverFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
|
||||
yTitle: "Requests",
|
||||
yUnits: "/s",
|
||||
})
|
||||
</script>
|
||||
</div>
|
||||
{{ template "prom_content_tail" . }}
|
||||
|
||||
{{ template "tail" }}
|
|
@ -1,34 +0,0 @@
|
|||
{{ template "head" . }}
|
||||
|
||||
{{ template "prom_right_table_head" }}
|
||||
<tr>
|
||||
<th>Prometheus</th>
|
||||
<th>{{ template "prom_query_drilldown" (args "sum(up{job='prometheus'})") }} / {{ template "prom_query_drilldown" (args "count(up{job='prometheus'})") }}</th>
|
||||
</tr>
|
||||
{{ template "prom_right_table_tail" }}
|
||||
|
||||
{{ template "prom_content_head" . }}
|
||||
<h1>Prometheus</h1>
|
||||
|
||||
<table class="table table-sm table-striped table-bordered" style="width: 0%">
|
||||
<tr>
|
||||
<th>Prometheus</th>
|
||||
<th>Up</th>
|
||||
<th>Ingested Samples</th>
|
||||
<th>Memory</th>
|
||||
</tr>
|
||||
{{ range query "up{job='prometheus'}" | sortByLabel "instance" }}
|
||||
<tr>
|
||||
<td><a href="prometheus-overview.html?instance={{ .Labels.instance }}">{{ .Labels.instance }}</a></td>
|
||||
<td {{ if eq (. | value) 1.0 }}>Yes{{ else }} class="alert-danger">No{{ end }}</td>
|
||||
<td class="text-right">{{ template "prom_query_drilldown" (args (printf "irate(prometheus_tsdb_head_samples_appended_total{job='prometheus',instance='%s'}[5m])" .Labels.instance) "/s" "humanizeNoSmallPrefix") }}</td>
|
||||
<td class="text-right">{{ template "prom_query_drilldown" (args (printf "process_resident_memory_bytes{job='prometheus',instance='%s'}" .Labels.instance) "B" "humanize1024")}}</td>
|
||||
</tr>
|
||||
{{ else }}
|
||||
<tr><td colspan=4>No devices found.</td></tr>
|
||||
{{ end }}
|
||||
</table>
|
||||
|
||||
{{ template "prom_content_tail" . }}
|
||||
|
||||
{{ template "tail" }}
|
|
@ -26,7 +26,6 @@ import (
|
|||
"github.com/prometheus/common/model"
|
||||
apiv1 "k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/discovery/v1"
|
||||
"k8s.io/api/discovery/v1beta1"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/client-go/util/workqueue"
|
||||
|
||||
|
@ -103,9 +102,9 @@ func NewEndpointSlice(l log.Logger, eps cache.SharedIndexInformer, svc, pod, nod
|
|||
return
|
||||
}
|
||||
|
||||
// TODO(brancz): use cache.Indexer to index endpoints by
|
||||
// disv1beta1.LabelServiceName so this operation doesn't have to
|
||||
// iterate over all endpoint objects.
|
||||
// TODO(brancz): use cache.Indexer to index endpointslices by
|
||||
// LabelServiceName so this operation doesn't have to iterate over all
|
||||
// endpoint objects.
|
||||
for _, obj := range e.endpointSliceStore.List() {
|
||||
esa, err := e.getEndpointSliceAdaptor(obj)
|
||||
if err != nil {
|
||||
|
@ -241,8 +240,6 @@ func (e *EndpointSlice) getEndpointSliceAdaptor(o interface{}) (endpointSliceAda
|
|||
switch endpointSlice := o.(type) {
|
||||
case *v1.EndpointSlice:
|
||||
return newEndpointSliceAdaptorFromV1(endpointSlice), nil
|
||||
case *v1beta1.EndpointSlice:
|
||||
return newEndpointSliceAdaptorFromV1beta1(endpointSlice), nil
|
||||
default:
|
||||
return nil, fmt.Errorf("received unexpected object: %v", o)
|
||||
}
|
||||
|
|
|
@ -16,7 +16,6 @@ package kubernetes
|
|||
import (
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/discovery/v1"
|
||||
"k8s.io/api/discovery/v1beta1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
|
@ -109,59 +108,6 @@ func (e *endpointSliceAdaptorV1) labelServiceName() string {
|
|||
return v1.LabelServiceName
|
||||
}
|
||||
|
||||
// Adaptor for k8s.io/api/discovery/v1beta1.
|
||||
type endpointSliceAdaptorV1Beta1 struct {
|
||||
endpointSlice *v1beta1.EndpointSlice
|
||||
}
|
||||
|
||||
func newEndpointSliceAdaptorFromV1beta1(endpointSlice *v1beta1.EndpointSlice) endpointSliceAdaptor {
|
||||
return &endpointSliceAdaptorV1Beta1{endpointSlice: endpointSlice}
|
||||
}
|
||||
|
||||
func (e *endpointSliceAdaptorV1Beta1) get() interface{} {
|
||||
return e.endpointSlice
|
||||
}
|
||||
|
||||
func (e *endpointSliceAdaptorV1Beta1) getObjectMeta() metav1.ObjectMeta {
|
||||
return e.endpointSlice.ObjectMeta
|
||||
}
|
||||
|
||||
func (e *endpointSliceAdaptorV1Beta1) name() string {
|
||||
return e.endpointSlice.Name
|
||||
}
|
||||
|
||||
func (e *endpointSliceAdaptorV1Beta1) namespace() string {
|
||||
return e.endpointSlice.Namespace
|
||||
}
|
||||
|
||||
func (e *endpointSliceAdaptorV1Beta1) addressType() string {
|
||||
return string(e.endpointSlice.AddressType)
|
||||
}
|
||||
|
||||
func (e *endpointSliceAdaptorV1Beta1) endpoints() []endpointSliceEndpointAdaptor {
|
||||
eps := make([]endpointSliceEndpointAdaptor, 0, len(e.endpointSlice.Endpoints))
|
||||
for i := 0; i < len(e.endpointSlice.Endpoints); i++ {
|
||||
eps = append(eps, newEndpointSliceEndpointAdaptorFromV1beta1(e.endpointSlice.Endpoints[i]))
|
||||
}
|
||||
return eps
|
||||
}
|
||||
|
||||
func (e *endpointSliceAdaptorV1Beta1) ports() []endpointSlicePortAdaptor {
|
||||
ports := make([]endpointSlicePortAdaptor, 0, len(e.endpointSlice.Ports))
|
||||
for i := 0; i < len(e.endpointSlice.Ports); i++ {
|
||||
ports = append(ports, newEndpointSlicePortAdaptorFromV1beta1(e.endpointSlice.Ports[i]))
|
||||
}
|
||||
return ports
|
||||
}
|
||||
|
||||
func (e *endpointSliceAdaptorV1Beta1) labels() map[string]string {
|
||||
return e.endpointSlice.Labels
|
||||
}
|
||||
|
||||
func (e *endpointSliceAdaptorV1Beta1) labelServiceName() string {
|
||||
return v1beta1.LabelServiceName
|
||||
}
|
||||
|
||||
type endpointSliceEndpointAdaptorV1 struct {
|
||||
endpoint v1.Endpoint
|
||||
}
|
||||
|
@ -218,62 +164,6 @@ func (e *endpointSliceEndpointConditionsAdaptorV1) terminating() *bool {
|
|||
return e.endpointConditions.Terminating
|
||||
}
|
||||
|
||||
type endpointSliceEndpointAdaptorV1beta1 struct {
|
||||
endpoint v1beta1.Endpoint
|
||||
}
|
||||
|
||||
func newEndpointSliceEndpointAdaptorFromV1beta1(endpoint v1beta1.Endpoint) endpointSliceEndpointAdaptor {
|
||||
return &endpointSliceEndpointAdaptorV1beta1{endpoint: endpoint}
|
||||
}
|
||||
|
||||
func (e *endpointSliceEndpointAdaptorV1beta1) addresses() []string {
|
||||
return e.endpoint.Addresses
|
||||
}
|
||||
|
||||
func (e *endpointSliceEndpointAdaptorV1beta1) hostname() *string {
|
||||
return e.endpoint.Hostname
|
||||
}
|
||||
|
||||
func (e *endpointSliceEndpointAdaptorV1beta1) nodename() *string {
|
||||
return e.endpoint.NodeName
|
||||
}
|
||||
|
||||
func (e *endpointSliceEndpointAdaptorV1beta1) zone() *string {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (e *endpointSliceEndpointAdaptorV1beta1) conditions() endpointSliceEndpointConditionsAdaptor {
|
||||
return newEndpointSliceEndpointConditionsAdaptorFromV1beta1(e.endpoint.Conditions)
|
||||
}
|
||||
|
||||
func (e *endpointSliceEndpointAdaptorV1beta1) targetRef() *corev1.ObjectReference {
|
||||
return e.endpoint.TargetRef
|
||||
}
|
||||
|
||||
func (e *endpointSliceEndpointAdaptorV1beta1) topology() map[string]string {
|
||||
return e.endpoint.Topology
|
||||
}
|
||||
|
||||
type endpointSliceEndpointConditionsAdaptorV1beta1 struct {
|
||||
endpointConditions v1beta1.EndpointConditions
|
||||
}
|
||||
|
||||
func newEndpointSliceEndpointConditionsAdaptorFromV1beta1(endpointConditions v1beta1.EndpointConditions) endpointSliceEndpointConditionsAdaptor {
|
||||
return &endpointSliceEndpointConditionsAdaptorV1beta1{endpointConditions: endpointConditions}
|
||||
}
|
||||
|
||||
func (e *endpointSliceEndpointConditionsAdaptorV1beta1) ready() *bool {
|
||||
return e.endpointConditions.Ready
|
||||
}
|
||||
|
||||
func (e *endpointSliceEndpointConditionsAdaptorV1beta1) serving() *bool {
|
||||
return e.endpointConditions.Serving
|
||||
}
|
||||
|
||||
func (e *endpointSliceEndpointConditionsAdaptorV1beta1) terminating() *bool {
|
||||
return e.endpointConditions.Terminating
|
||||
}
|
||||
|
||||
type endpointSlicePortAdaptorV1 struct {
|
||||
endpointPort v1.EndpointPort
|
||||
}
|
||||
|
@ -298,28 +188,3 @@ func (e *endpointSlicePortAdaptorV1) protocol() *string {
|
|||
func (e *endpointSlicePortAdaptorV1) appProtocol() *string {
|
||||
return e.endpointPort.AppProtocol
|
||||
}
|
||||
|
||||
type endpointSlicePortAdaptorV1beta1 struct {
|
||||
endpointPort v1beta1.EndpointPort
|
||||
}
|
||||
|
||||
func newEndpointSlicePortAdaptorFromV1beta1(port v1beta1.EndpointPort) endpointSlicePortAdaptor {
|
||||
return &endpointSlicePortAdaptorV1beta1{endpointPort: port}
|
||||
}
|
||||
|
||||
func (e *endpointSlicePortAdaptorV1beta1) name() *string {
|
||||
return e.endpointPort.Name
|
||||
}
|
||||
|
||||
func (e *endpointSlicePortAdaptorV1beta1) port() *int32 {
|
||||
return e.endpointPort.Port
|
||||
}
|
||||
|
||||
func (e *endpointSlicePortAdaptorV1beta1) protocol() *string {
|
||||
val := string(*e.endpointPort.Protocol)
|
||||
return &val
|
||||
}
|
||||
|
||||
func (e *endpointSlicePortAdaptorV1beta1) appProtocol() *string {
|
||||
return e.endpointPort.AppProtocol
|
||||
}
|
||||
|
|
|
@ -18,7 +18,6 @@ import (
|
|||
|
||||
"github.com/stretchr/testify/require"
|
||||
v1 "k8s.io/api/discovery/v1"
|
||||
"k8s.io/api/discovery/v1beta1"
|
||||
)
|
||||
|
||||
func Test_EndpointSliceAdaptor_v1(t *testing.T) {
|
||||
|
@ -48,31 +47,3 @@ func Test_EndpointSliceAdaptor_v1(t *testing.T) {
|
|||
require.Equal(t, endpointSlice.Ports[i].AppProtocol, portAdaptor.appProtocol())
|
||||
}
|
||||
}
|
||||
|
||||
func Test_EndpointSliceAdaptor_v1beta1(t *testing.T) {
|
||||
endpointSlice := makeEndpointSliceV1beta1()
|
||||
adaptor := newEndpointSliceAdaptorFromV1beta1(endpointSlice)
|
||||
|
||||
require.Equal(t, endpointSlice.ObjectMeta.Name, adaptor.name())
|
||||
require.Equal(t, endpointSlice.ObjectMeta.Namespace, adaptor.namespace())
|
||||
require.Equal(t, endpointSlice.AddressType, v1beta1.AddressType(adaptor.addressType()))
|
||||
require.Equal(t, endpointSlice.Labels, adaptor.labels())
|
||||
require.Equal(t, "testendpoints", endpointSlice.Labels[v1beta1.LabelServiceName])
|
||||
|
||||
for i, endpointAdaptor := range adaptor.endpoints() {
|
||||
require.Equal(t, endpointSlice.Endpoints[i].Addresses, endpointAdaptor.addresses())
|
||||
require.Equal(t, endpointSlice.Endpoints[i].Hostname, endpointAdaptor.hostname())
|
||||
require.Equal(t, endpointSlice.Endpoints[i].Conditions.Ready, endpointAdaptor.conditions().ready())
|
||||
require.Equal(t, endpointSlice.Endpoints[i].Conditions.Serving, endpointAdaptor.conditions().serving())
|
||||
require.Equal(t, endpointSlice.Endpoints[i].Conditions.Terminating, endpointAdaptor.conditions().terminating())
|
||||
require.Equal(t, endpointSlice.Endpoints[i].TargetRef, endpointAdaptor.targetRef())
|
||||
require.Equal(t, endpointSlice.Endpoints[i].Topology, endpointAdaptor.topology())
|
||||
}
|
||||
|
||||
for i, portAdaptor := range adaptor.ports() {
|
||||
require.Equal(t, endpointSlice.Ports[i].Name, portAdaptor.name())
|
||||
require.Equal(t, endpointSlice.Ports[i].Port, portAdaptor.port())
|
||||
require.EqualValues(t, endpointSlice.Ports[i].Protocol, portAdaptor.protocol())
|
||||
require.Equal(t, endpointSlice.Ports[i].AppProtocol, portAdaptor.appProtocol())
|
||||
}
|
||||
}
|
||||
|
|
|
@ -21,7 +21,6 @@ import (
|
|||
"github.com/stretchr/testify/require"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/discovery/v1"
|
||||
"k8s.io/api/discovery/v1beta1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
|
@ -114,62 +113,8 @@ func makeEndpointSliceV1() *v1.EndpointSlice {
|
|||
}
|
||||
}
|
||||
|
||||
func makeEndpointSliceV1beta1() *v1beta1.EndpointSlice {
|
||||
return &v1beta1.EndpointSlice{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "testendpoints",
|
||||
Namespace: "default",
|
||||
Labels: map[string]string{
|
||||
v1beta1.LabelServiceName: "testendpoints",
|
||||
},
|
||||
Annotations: map[string]string{
|
||||
"test.annotation": "test",
|
||||
},
|
||||
},
|
||||
AddressType: v1beta1.AddressTypeIPv4,
|
||||
Ports: []v1beta1.EndpointPort{
|
||||
{
|
||||
Name: strptr("testport"),
|
||||
Port: int32ptr(9000),
|
||||
Protocol: protocolptr(corev1.ProtocolTCP),
|
||||
},
|
||||
},
|
||||
Endpoints: []v1beta1.Endpoint{
|
||||
{
|
||||
Addresses: []string{"1.2.3.4"},
|
||||
Hostname: strptr("testendpoint1"),
|
||||
}, {
|
||||
Addresses: []string{"2.3.4.5"},
|
||||
Conditions: v1beta1.EndpointConditions{
|
||||
Ready: boolptr(true),
|
||||
Serving: boolptr(true),
|
||||
Terminating: boolptr(false),
|
||||
},
|
||||
}, {
|
||||
Addresses: []string{"3.4.5.6"},
|
||||
Conditions: v1beta1.EndpointConditions{
|
||||
Ready: boolptr(false),
|
||||
Serving: boolptr(true),
|
||||
Terminating: boolptr(true),
|
||||
},
|
||||
}, {
|
||||
Addresses: []string{"4.5.6.7"},
|
||||
Conditions: v1beta1.EndpointConditions{
|
||||
Ready: boolptr(true),
|
||||
Serving: boolptr(true),
|
||||
Terminating: boolptr(false),
|
||||
},
|
||||
TargetRef: &corev1.ObjectReference{
|
||||
Kind: "Node",
|
||||
Name: "barbaz",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func TestEndpointSliceDiscoveryBeforeRun(t *testing.T) {
|
||||
n, c := makeDiscoveryWithVersion(RoleEndpointSlice, NamespaceDiscovery{Names: []string{"default"}}, "v1.25.0")
|
||||
n, c := makeDiscovery(RoleEndpointSlice, NamespaceDiscovery{Names: []string{"default"}})
|
||||
|
||||
k8sDiscoveryTest{
|
||||
discovery: n,
|
||||
|
@ -249,71 +194,6 @@ func TestEndpointSliceDiscoveryBeforeRun(t *testing.T) {
|
|||
}.Run(t)
|
||||
}
|
||||
|
||||
func TestEndpointSliceDiscoveryBeforeRunV1beta1(t *testing.T) {
|
||||
n, c := makeDiscoveryWithVersion(RoleEndpointSlice, NamespaceDiscovery{Names: []string{"default"}}, "1.20.0")
|
||||
|
||||
k8sDiscoveryTest{
|
||||
discovery: n,
|
||||
beforeRun: func() {
|
||||
obj := makeEndpointSliceV1beta1()
|
||||
c.DiscoveryV1beta1().EndpointSlices(obj.Namespace).Create(context.Background(), obj, metav1.CreateOptions{})
|
||||
},
|
||||
expectedMaxItems: 1,
|
||||
expectedRes: map[string]*targetgroup.Group{
|
||||
"endpointslice/default/testendpoints": {
|
||||
Targets: []model.LabelSet{
|
||||
{
|
||||
"__address__": "1.2.3.4:9000",
|
||||
"__meta_kubernetes_endpointslice_endpoint_hostname": "testendpoint1",
|
||||
"__meta_kubernetes_endpointslice_port": "9000",
|
||||
"__meta_kubernetes_endpointslice_port_name": "testport",
|
||||
"__meta_kubernetes_endpointslice_port_protocol": "TCP",
|
||||
},
|
||||
{
|
||||
"__address__": "2.3.4.5:9000",
|
||||
"__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true",
|
||||
"__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true",
|
||||
"__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false",
|
||||
"__meta_kubernetes_endpointslice_port": "9000",
|
||||
"__meta_kubernetes_endpointslice_port_name": "testport",
|
||||
"__meta_kubernetes_endpointslice_port_protocol": "TCP",
|
||||
},
|
||||
{
|
||||
"__address__": "3.4.5.6:9000",
|
||||
"__meta_kubernetes_endpointslice_endpoint_conditions_ready": "false",
|
||||
"__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true",
|
||||
"__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "true",
|
||||
"__meta_kubernetes_endpointslice_port": "9000",
|
||||
"__meta_kubernetes_endpointslice_port_name": "testport",
|
||||
"__meta_kubernetes_endpointslice_port_protocol": "TCP",
|
||||
},
|
||||
{
|
||||
"__address__": "4.5.6.7:9000",
|
||||
"__meta_kubernetes_endpointslice_address_target_kind": "Node",
|
||||
"__meta_kubernetes_endpointslice_address_target_name": "barbaz",
|
||||
"__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true",
|
||||
"__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true",
|
||||
"__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false",
|
||||
"__meta_kubernetes_endpointslice_port": "9000",
|
||||
"__meta_kubernetes_endpointslice_port_name": "testport",
|
||||
"__meta_kubernetes_endpointslice_port_protocol": "TCP",
|
||||
},
|
||||
},
|
||||
Labels: model.LabelSet{
|
||||
"__meta_kubernetes_endpointslice_address_type": "IPv4",
|
||||
"__meta_kubernetes_namespace": "default",
|
||||
"__meta_kubernetes_endpointslice_name": "testendpoints",
|
||||
"__meta_kubernetes_endpointslice_label_kubernetes_io_service_name": "testendpoints",
|
||||
"__meta_kubernetes_endpointslice_labelpresent_kubernetes_io_service_name": "true",
|
||||
"__meta_kubernetes_endpointslice_annotation_test_annotation": "test",
|
||||
"__meta_kubernetes_endpointslice_annotationpresent_test_annotation": "true",
|
||||
},
|
||||
Source: "endpointslice/default/testendpoints",
|
||||
},
|
||||
},
|
||||
}.Run(t)
|
||||
}
|
||||
|
||||
func TestEndpointSliceDiscoveryAdd(t *testing.T) {
|
||||
obj := &corev1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
|
@ -353,25 +233,25 @@ func TestEndpointSliceDiscoveryAdd(t *testing.T) {
|
|||
PodIP: "1.2.3.4",
|
||||
},
|
||||
}
|
||||
n, c := makeDiscoveryWithVersion(RoleEndpointSlice, NamespaceDiscovery{Names: []string{"default"}}, "v1.20.0", obj)
|
||||
n, c := makeDiscovery(RoleEndpointSlice, NamespaceDiscovery{Names: []string{"default"}}, obj)
|
||||
|
||||
k8sDiscoveryTest{
|
||||
discovery: n,
|
||||
afterStart: func() {
|
||||
obj := &v1beta1.EndpointSlice{
|
||||
obj := &v1.EndpointSlice{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "testendpoints",
|
||||
Namespace: "default",
|
||||
},
|
||||
AddressType: v1beta1.AddressTypeIPv4,
|
||||
Ports: []v1beta1.EndpointPort{
|
||||
AddressType: v1.AddressTypeIPv4,
|
||||
Ports: []v1.EndpointPort{
|
||||
{
|
||||
Name: strptr("testport"),
|
||||
Port: int32ptr(9000),
|
||||
Protocol: protocolptr(corev1.ProtocolTCP),
|
||||
},
|
||||
},
|
||||
Endpoints: []v1beta1.Endpoint{
|
||||
Endpoints: []v1.Endpoint{
|
||||
{
|
||||
Addresses: []string{"4.3.2.1"},
|
||||
TargetRef: &corev1.ObjectReference{
|
||||
|
@ -379,13 +259,13 @@ func TestEndpointSliceDiscoveryAdd(t *testing.T) {
|
|||
Name: "testpod",
|
||||
Namespace: "default",
|
||||
},
|
||||
Conditions: v1beta1.EndpointConditions{
|
||||
Conditions: v1.EndpointConditions{
|
||||
Ready: boolptr(false),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
c.DiscoveryV1beta1().EndpointSlices(obj.Namespace).Create(context.Background(), obj, metav1.CreateOptions{})
|
||||
c.DiscoveryV1().EndpointSlices(obj.Namespace).Create(context.Background(), obj, metav1.CreateOptions{})
|
||||
},
|
||||
expectedMaxItems: 1,
|
||||
expectedRes: map[string]*targetgroup.Group{
|
||||
|
@ -440,118 +320,34 @@ func TestEndpointSliceDiscoveryAdd(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestEndpointSliceDiscoveryDelete(t *testing.T) {
|
||||
n, c := makeDiscoveryWithVersion(RoleEndpointSlice, NamespaceDiscovery{Names: []string{"default"}}, "v1.21.0", makeEndpointSliceV1())
|
||||
n, c := makeDiscovery(RoleEndpointSlice, NamespaceDiscovery{Names: []string{"default"}}, makeEndpointSliceV1())
|
||||
|
||||
k8sDiscoveryTest{
|
||||
discovery: n,
|
||||
afterStart: func() {
|
||||
obj := makeEndpointSliceV1()
|
||||
c.DiscoveryV1beta1().EndpointSlices(obj.Namespace).Delete(context.Background(), obj.Name, metav1.DeleteOptions{})
|
||||
c.DiscoveryV1().EndpointSlices(obj.Namespace).Delete(context.Background(), obj.Name, metav1.DeleteOptions{})
|
||||
},
|
||||
expectedMaxItems: 2,
|
||||
expectedRes: map[string]*targetgroup.Group{
|
||||
"endpointslice/default/testendpoints": {
|
||||
Source: "endpointslice/default/testendpoints",
|
||||
Targets: []model.LabelSet{
|
||||
{
|
||||
"__address__": "1.2.3.4:9000",
|
||||
"__meta_kubernetes_endpointslice_address_target_kind": "",
|
||||
"__meta_kubernetes_endpointslice_address_target_name": "",
|
||||
"__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true",
|
||||
"__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true",
|
||||
"__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false",
|
||||
"__meta_kubernetes_endpointslice_endpoint_hostname": "testendpoint1",
|
||||
"__meta_kubernetes_endpointslice_endpoint_node_name": "foobar",
|
||||
"__meta_kubernetes_endpointslice_endpoint_topology_present_topology": "true",
|
||||
"__meta_kubernetes_endpointslice_endpoint_topology_topology": "value",
|
||||
"__meta_kubernetes_endpointslice_endpoint_zone": "us-east-1a",
|
||||
"__meta_kubernetes_endpointslice_port": "9000",
|
||||
"__meta_kubernetes_endpointslice_port_app_protocol": "http",
|
||||
"__meta_kubernetes_endpointslice_port_name": "testport",
|
||||
"__meta_kubernetes_endpointslice_port_protocol": "TCP",
|
||||
},
|
||||
{
|
||||
"__address__": "2.3.4.5:9000",
|
||||
"__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true",
|
||||
"__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true",
|
||||
"__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false",
|
||||
"__meta_kubernetes_endpointslice_endpoint_zone": "us-east-1b",
|
||||
"__meta_kubernetes_endpointslice_port": "9000",
|
||||
"__meta_kubernetes_endpointslice_port_app_protocol": "http",
|
||||
"__meta_kubernetes_endpointslice_port_name": "testport",
|
||||
"__meta_kubernetes_endpointslice_port_protocol": "TCP",
|
||||
},
|
||||
{
|
||||
"__address__": "3.4.5.6:9000",
|
||||
"__meta_kubernetes_endpointslice_endpoint_conditions_ready": "false",
|
||||
"__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true",
|
||||
"__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "true",
|
||||
"__meta_kubernetes_endpointslice_endpoint_zone": "us-east-1c",
|
||||
"__meta_kubernetes_endpointslice_port": "9000",
|
||||
"__meta_kubernetes_endpointslice_port_app_protocol": "http",
|
||||
"__meta_kubernetes_endpointslice_port_name": "testport",
|
||||
"__meta_kubernetes_endpointslice_port_protocol": "TCP",
|
||||
},
|
||||
{
|
||||
"__address__": "4.5.6.7:9000",
|
||||
"__meta_kubernetes_endpointslice_address_target_kind": "Node",
|
||||
"__meta_kubernetes_endpointslice_address_target_name": "barbaz",
|
||||
"__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true",
|
||||
"__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true",
|
||||
"__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false",
|
||||
"__meta_kubernetes_endpointslice_endpoint_zone": "us-east-1a",
|
||||
"__meta_kubernetes_endpointslice_port": "9000",
|
||||
"__meta_kubernetes_endpointslice_port_app_protocol": "http",
|
||||
"__meta_kubernetes_endpointslice_port_name": "testport",
|
||||
"__meta_kubernetes_endpointslice_port_protocol": "TCP",
|
||||
},
|
||||
},
|
||||
Labels: map[model.LabelName]model.LabelValue{
|
||||
"__meta_kubernetes_endpointslice_address_type": "IPv4",
|
||||
"__meta_kubernetes_endpointslice_name": "testendpoints",
|
||||
"__meta_kubernetes_endpointslice_label_kubernetes_io_service_name": "testendpoints",
|
||||
"__meta_kubernetes_endpointslice_labelpresent_kubernetes_io_service_name": "true",
|
||||
"__meta_kubernetes_endpointslice_annotation_test_annotation": "test",
|
||||
"__meta_kubernetes_endpointslice_annotationpresent_test_annotation": "true",
|
||||
"__meta_kubernetes_namespace": "default",
|
||||
},
|
||||
},
|
||||
},
|
||||
}.Run(t)
|
||||
}
|
||||
|
||||
func TestEndpointSliceDiscoveryUpdate(t *testing.T) {
|
||||
n, c := makeDiscoveryWithVersion(RoleEndpointSlice, NamespaceDiscovery{Names: []string{"default"}}, "v1.21.0", makeEndpointSliceV1())
|
||||
n, c := makeDiscovery(RoleEndpointSlice, NamespaceDiscovery{Names: []string{"default"}}, makeEndpointSliceV1())
|
||||
|
||||
k8sDiscoveryTest{
|
||||
discovery: n,
|
||||
afterStart: func() {
|
||||
obj := &v1beta1.EndpointSlice{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "testendpoints",
|
||||
Namespace: "default",
|
||||
},
|
||||
AddressType: v1beta1.AddressTypeIPv4,
|
||||
Ports: []v1beta1.EndpointPort{
|
||||
{
|
||||
Name: strptr("testport"),
|
||||
Port: int32ptr(9000),
|
||||
Protocol: protocolptr(corev1.ProtocolTCP),
|
||||
},
|
||||
},
|
||||
Endpoints: []v1beta1.Endpoint{
|
||||
{
|
||||
Addresses: []string{"1.2.3.4"},
|
||||
Hostname: strptr("testendpoint1"),
|
||||
}, {
|
||||
Addresses: []string{"2.3.4.5"},
|
||||
Conditions: v1beta1.EndpointConditions{
|
||||
Ready: boolptr(true),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
c.DiscoveryV1beta1().EndpointSlices(obj.Namespace).Update(context.Background(), obj, metav1.UpdateOptions{})
|
||||
obj := makeEndpointSliceV1()
|
||||
obj.ObjectMeta.Labels = nil
|
||||
obj.ObjectMeta.Annotations = nil
|
||||
obj.Endpoints = obj.Endpoints[0:2]
|
||||
c.DiscoveryV1().EndpointSlices(obj.Namespace).Update(context.Background(), obj, metav1.UpdateOptions{})
|
||||
},
|
||||
expectedMaxItems: 2,
|
||||
expectedRes: map[string]*targetgroup.Group{
|
||||
|
@ -586,39 +382,11 @@ func TestEndpointSliceDiscoveryUpdate(t *testing.T) {
|
|||
"__meta_kubernetes_endpointslice_port_name": "testport",
|
||||
"__meta_kubernetes_endpointslice_port_protocol": "TCP",
|
||||
},
|
||||
{
|
||||
"__address__": "3.4.5.6:9000",
|
||||
"__meta_kubernetes_endpointslice_endpoint_conditions_ready": "false",
|
||||
"__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true",
|
||||
"__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "true",
|
||||
"__meta_kubernetes_endpointslice_endpoint_zone": "us-east-1c",
|
||||
"__meta_kubernetes_endpointslice_port": "9000",
|
||||
"__meta_kubernetes_endpointslice_port_app_protocol": "http",
|
||||
"__meta_kubernetes_endpointslice_port_name": "testport",
|
||||
"__meta_kubernetes_endpointslice_port_protocol": "TCP",
|
||||
},
|
||||
{
|
||||
"__address__": "4.5.6.7:9000",
|
||||
"__meta_kubernetes_endpointslice_address_target_kind": "Node",
|
||||
"__meta_kubernetes_endpointslice_address_target_name": "barbaz",
|
||||
"__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true",
|
||||
"__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true",
|
||||
"__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false",
|
||||
"__meta_kubernetes_endpointslice_endpoint_zone": "us-east-1a",
|
||||
"__meta_kubernetes_endpointslice_port": "9000",
|
||||
"__meta_kubernetes_endpointslice_port_app_protocol": "http",
|
||||
"__meta_kubernetes_endpointslice_port_name": "testport",
|
||||
"__meta_kubernetes_endpointslice_port_protocol": "TCP",
|
||||
},
|
||||
},
|
||||
Labels: model.LabelSet{
|
||||
"__meta_kubernetes_endpointslice_address_type": "IPv4",
|
||||
"__meta_kubernetes_endpointslice_name": "testendpoints",
|
||||
"__meta_kubernetes_endpointslice_label_kubernetes_io_service_name": "testendpoints",
|
||||
"__meta_kubernetes_endpointslice_labelpresent_kubernetes_io_service_name": "true",
|
||||
"__meta_kubernetes_endpointslice_annotation_test_annotation": "test",
|
||||
"__meta_kubernetes_endpointslice_annotationpresent_test_annotation": "true",
|
||||
"__meta_kubernetes_namespace": "default",
|
||||
"__meta_kubernetes_endpointslice_address_type": "IPv4",
|
||||
"__meta_kubernetes_endpointslice_name": "testendpoints",
|
||||
"__meta_kubernetes_namespace": "default",
|
||||
},
|
||||
},
|
||||
},
|
||||
|
@ -626,85 +394,18 @@ func TestEndpointSliceDiscoveryUpdate(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestEndpointSliceDiscoveryEmptyEndpoints(t *testing.T) {
|
||||
n, c := makeDiscoveryWithVersion(RoleEndpointSlice, NamespaceDiscovery{Names: []string{"default"}}, "v1.21.0", makeEndpointSliceV1())
|
||||
n, c := makeDiscovery(RoleEndpointSlice, NamespaceDiscovery{Names: []string{"default"}}, makeEndpointSliceV1())
|
||||
|
||||
k8sDiscoveryTest{
|
||||
discovery: n,
|
||||
afterStart: func() {
|
||||
obj := &v1beta1.EndpointSlice{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "testendpoints",
|
||||
Namespace: "default",
|
||||
},
|
||||
AddressType: v1beta1.AddressTypeIPv4,
|
||||
Ports: []v1beta1.EndpointPort{
|
||||
{
|
||||
Name: strptr("testport"),
|
||||
Port: int32ptr(9000),
|
||||
Protocol: protocolptr(corev1.ProtocolTCP),
|
||||
},
|
||||
},
|
||||
Endpoints: []v1beta1.Endpoint{},
|
||||
}
|
||||
c.DiscoveryV1beta1().EndpointSlices(obj.Namespace).Update(context.Background(), obj, metav1.UpdateOptions{})
|
||||
obj := makeEndpointSliceV1()
|
||||
obj.Endpoints = []v1.Endpoint{}
|
||||
c.DiscoveryV1().EndpointSlices(obj.Namespace).Update(context.Background(), obj, metav1.UpdateOptions{})
|
||||
},
|
||||
expectedMaxItems: 2,
|
||||
expectedRes: map[string]*targetgroup.Group{
|
||||
"endpointslice/default/testendpoints": {
|
||||
Targets: []model.LabelSet{
|
||||
{
|
||||
"__address__": "1.2.3.4:9000",
|
||||
"__meta_kubernetes_endpointslice_address_target_kind": "",
|
||||
"__meta_kubernetes_endpointslice_address_target_name": "",
|
||||
"__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true",
|
||||
"__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true",
|
||||
"__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false",
|
||||
"__meta_kubernetes_endpointslice_endpoint_hostname": "testendpoint1",
|
||||
"__meta_kubernetes_endpointslice_endpoint_node_name": "foobar",
|
||||
"__meta_kubernetes_endpointslice_endpoint_topology_present_topology": "true",
|
||||
"__meta_kubernetes_endpointslice_endpoint_topology_topology": "value",
|
||||
"__meta_kubernetes_endpointslice_endpoint_zone": "us-east-1a",
|
||||
"__meta_kubernetes_endpointslice_port": "9000",
|
||||
"__meta_kubernetes_endpointslice_port_app_protocol": "http",
|
||||
"__meta_kubernetes_endpointslice_port_name": "testport",
|
||||
"__meta_kubernetes_endpointslice_port_protocol": "TCP",
|
||||
},
|
||||
{
|
||||
"__address__": "2.3.4.5:9000",
|
||||
"__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true",
|
||||
"__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true",
|
||||
"__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false",
|
||||
"__meta_kubernetes_endpointslice_endpoint_zone": "us-east-1b",
|
||||
"__meta_kubernetes_endpointslice_port": "9000",
|
||||
"__meta_kubernetes_endpointslice_port_app_protocol": "http",
|
||||
"__meta_kubernetes_endpointslice_port_name": "testport",
|
||||
"__meta_kubernetes_endpointslice_port_protocol": "TCP",
|
||||
},
|
||||
{
|
||||
"__address__": "3.4.5.6:9000",
|
||||
"__meta_kubernetes_endpointslice_endpoint_conditions_ready": "false",
|
||||
"__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true",
|
||||
"__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "true",
|
||||
"__meta_kubernetes_endpointslice_endpoint_zone": "us-east-1c",
|
||||
"__meta_kubernetes_endpointslice_port": "9000",
|
||||
"__meta_kubernetes_endpointslice_port_app_protocol": "http",
|
||||
"__meta_kubernetes_endpointslice_port_name": "testport",
|
||||
"__meta_kubernetes_endpointslice_port_protocol": "TCP",
|
||||
},
|
||||
{
|
||||
"__address__": "4.5.6.7:9000",
|
||||
"__meta_kubernetes_endpointslice_address_target_kind": "Node",
|
||||
"__meta_kubernetes_endpointslice_address_target_name": "barbaz",
|
||||
"__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true",
|
||||
"__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true",
|
||||
"__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false",
|
||||
"__meta_kubernetes_endpointslice_endpoint_zone": "us-east-1a",
|
||||
"__meta_kubernetes_endpointslice_port": "9000",
|
||||
"__meta_kubernetes_endpointslice_port_app_protocol": "http",
|
||||
"__meta_kubernetes_endpointslice_port_name": "testport",
|
||||
"__meta_kubernetes_endpointslice_port_protocol": "TCP",
|
||||
},
|
||||
},
|
||||
Labels: model.LabelSet{
|
||||
"__meta_kubernetes_endpointslice_address_type": "IPv4",
|
||||
"__meta_kubernetes_endpointslice_name": "testendpoints",
|
||||
|
@ -721,7 +422,7 @@ func TestEndpointSliceDiscoveryEmptyEndpoints(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestEndpointSliceDiscoveryWithService(t *testing.T) {
|
||||
n, c := makeDiscoveryWithVersion(RoleEndpointSlice, NamespaceDiscovery{Names: []string{"default"}}, "v1.21.0", makeEndpointSliceV1())
|
||||
n, c := makeDiscovery(RoleEndpointSlice, NamespaceDiscovery{Names: []string{"default"}}, makeEndpointSliceV1())
|
||||
|
||||
k8sDiscoveryTest{
|
||||
discovery: n,
|
||||
|
@ -813,7 +514,7 @@ func TestEndpointSliceDiscoveryWithService(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestEndpointSliceDiscoveryWithServiceUpdate(t *testing.T) {
|
||||
n, c := makeDiscoveryWithVersion(RoleEndpointSlice, NamespaceDiscovery{Names: []string{"default"}}, "v1.21.0", makeEndpointSliceV1())
|
||||
n, c := makeDiscovery(RoleEndpointSlice, NamespaceDiscovery{Names: []string{"default"}}, makeEndpointSliceV1())
|
||||
|
||||
k8sDiscoveryTest{
|
||||
discovery: n,
|
||||
|
|
|
@ -24,7 +24,6 @@ import (
|
|||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/common/model"
|
||||
v1 "k8s.io/api/networking/v1"
|
||||
"k8s.io/api/networking/v1beta1"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/client-go/util/workqueue"
|
||||
|
||||
|
@ -127,8 +126,6 @@ func (i *Ingress) process(ctx context.Context, ch chan<- []*targetgroup.Group) b
|
|||
switch ingress := o.(type) {
|
||||
case *v1.Ingress:
|
||||
ia = newIngressAdaptorFromV1(ingress)
|
||||
case *v1beta1.Ingress:
|
||||
ia = newIngressAdaptorFromV1beta1(ingress)
|
||||
default:
|
||||
level.Error(i.logger).Log("msg", "converting to Ingress object failed", "err",
|
||||
fmt.Errorf("received unexpected object: %v", o))
|
||||
|
|
|
@ -15,7 +15,6 @@ package kubernetes
|
|||
|
||||
import (
|
||||
v1 "k8s.io/api/networking/v1"
|
||||
"k8s.io/api/networking/v1beta1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
|
@ -89,56 +88,3 @@ func (i *ingressRuleAdaptorV1) paths() []string {
|
|||
}
|
||||
|
||||
func (i *ingressRuleAdaptorV1) host() string { return i.rule.Host }
|
||||
|
||||
// Adaptor for networking.k8s.io/v1beta1.
|
||||
type ingressAdaptorV1Beta1 struct {
|
||||
ingress *v1beta1.Ingress
|
||||
}
|
||||
|
||||
func newIngressAdaptorFromV1beta1(ingress *v1beta1.Ingress) ingressAdaptor {
|
||||
return &ingressAdaptorV1Beta1{ingress: ingress}
|
||||
}
|
||||
func (i *ingressAdaptorV1Beta1) getObjectMeta() metav1.ObjectMeta { return i.ingress.ObjectMeta }
|
||||
func (i *ingressAdaptorV1Beta1) name() string { return i.ingress.Name }
|
||||
func (i *ingressAdaptorV1Beta1) namespace() string { return i.ingress.Namespace }
|
||||
func (i *ingressAdaptorV1Beta1) labels() map[string]string { return i.ingress.Labels }
|
||||
func (i *ingressAdaptorV1Beta1) annotations() map[string]string { return i.ingress.Annotations }
|
||||
func (i *ingressAdaptorV1Beta1) ingressClassName() *string { return i.ingress.Spec.IngressClassName }
|
||||
|
||||
func (i *ingressAdaptorV1Beta1) tlsHosts() []string {
|
||||
var hosts []string
|
||||
for _, tls := range i.ingress.Spec.TLS {
|
||||
hosts = append(hosts, tls.Hosts...)
|
||||
}
|
||||
return hosts
|
||||
}
|
||||
|
||||
func (i *ingressAdaptorV1Beta1) rules() []ingressRuleAdaptor {
|
||||
var rules []ingressRuleAdaptor
|
||||
for _, rule := range i.ingress.Spec.Rules {
|
||||
rules = append(rules, newIngressRuleAdaptorFromV1Beta1(rule))
|
||||
}
|
||||
return rules
|
||||
}
|
||||
|
||||
type ingressRuleAdaptorV1Beta1 struct {
|
||||
rule v1beta1.IngressRule
|
||||
}
|
||||
|
||||
func newIngressRuleAdaptorFromV1Beta1(rule v1beta1.IngressRule) ingressRuleAdaptor {
|
||||
return &ingressRuleAdaptorV1Beta1{rule: rule}
|
||||
}
|
||||
|
||||
func (i *ingressRuleAdaptorV1Beta1) paths() []string {
|
||||
rv := i.rule.IngressRuleValue
|
||||
if rv.HTTP == nil {
|
||||
return nil
|
||||
}
|
||||
paths := make([]string, len(rv.HTTP.Paths))
|
||||
for n, p := range rv.HTTP.Paths {
|
||||
paths[n] = p.Path
|
||||
}
|
||||
return paths
|
||||
}
|
||||
|
||||
func (i *ingressRuleAdaptorV1Beta1) host() string { return i.rule.Host }
|
||||
|
|
|
@ -20,7 +20,6 @@ import (
|
|||
|
||||
"github.com/prometheus/common/model"
|
||||
v1 "k8s.io/api/networking/v1"
|
||||
"k8s.io/api/networking/v1beta1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
"github.com/prometheus/prometheus/discovery/targetgroup"
|
||||
|
@ -89,60 +88,6 @@ func makeIngress(tls TLSMode) *v1.Ingress {
|
|||
return ret
|
||||
}
|
||||
|
||||
func makeIngressV1beta1(tls TLSMode) *v1beta1.Ingress {
|
||||
ret := &v1beta1.Ingress{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "testingress",
|
||||
Namespace: "default",
|
||||
Labels: map[string]string{"test/label": "testvalue"},
|
||||
Annotations: map[string]string{"test/annotation": "testannotationvalue"},
|
||||
},
|
||||
Spec: v1beta1.IngressSpec{
|
||||
IngressClassName: classString("testclass"),
|
||||
TLS: nil,
|
||||
Rules: []v1beta1.IngressRule{
|
||||
{
|
||||
Host: "example.com",
|
||||
IngressRuleValue: v1beta1.IngressRuleValue{
|
||||
HTTP: &v1beta1.HTTPIngressRuleValue{
|
||||
Paths: []v1beta1.HTTPIngressPath{
|
||||
{Path: "/"},
|
||||
{Path: "/foo"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
// No backend config, ignored
|
||||
Host: "nobackend.example.com",
|
||||
IngressRuleValue: v1beta1.IngressRuleValue{
|
||||
HTTP: &v1beta1.HTTPIngressRuleValue{},
|
||||
},
|
||||
},
|
||||
{
|
||||
Host: "test.example.com",
|
||||
IngressRuleValue: v1beta1.IngressRuleValue{
|
||||
HTTP: &v1beta1.HTTPIngressRuleValue{
|
||||
Paths: []v1beta1.HTTPIngressPath{{}},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
switch tls {
|
||||
case TLSYes:
|
||||
ret.Spec.TLS = []v1beta1.IngressTLS{{Hosts: []string{"example.com", "test.example.com"}}}
|
||||
case TLSMixed:
|
||||
ret.Spec.TLS = []v1beta1.IngressTLS{{Hosts: []string{"example.com"}}}
|
||||
case TLSWildcard:
|
||||
ret.Spec.TLS = []v1beta1.IngressTLS{{Hosts: []string{"*.example.com"}}}
|
||||
}
|
||||
|
||||
return ret
|
||||
}
|
||||
|
||||
func classString(v string) *string {
|
||||
return &v
|
||||
}
|
||||
|
@ -212,20 +157,6 @@ func TestIngressDiscoveryAdd(t *testing.T) {
|
|||
}.Run(t)
|
||||
}
|
||||
|
||||
func TestIngressDiscoveryAddV1beta1(t *testing.T) {
|
||||
n, c := makeDiscoveryWithVersion(RoleIngress, NamespaceDiscovery{Names: []string{"default"}}, "v1.18.0")
|
||||
|
||||
k8sDiscoveryTest{
|
||||
discovery: n,
|
||||
afterStart: func() {
|
||||
obj := makeIngressV1beta1(TLSNo)
|
||||
c.NetworkingV1beta1().Ingresses("default").Create(context.Background(), obj, metav1.CreateOptions{})
|
||||
},
|
||||
expectedMaxItems: 1,
|
||||
expectedRes: expectedTargetGroups("default", TLSNo),
|
||||
}.Run(t)
|
||||
}
|
||||
|
||||
func TestIngressDiscoveryAddTLS(t *testing.T) {
|
||||
n, c := makeDiscovery(RoleIngress, NamespaceDiscovery{Names: []string{"default"}})
|
||||
|
||||
|
@ -240,20 +171,6 @@ func TestIngressDiscoveryAddTLS(t *testing.T) {
|
|||
}.Run(t)
|
||||
}
|
||||
|
||||
func TestIngressDiscoveryAddTLSV1beta1(t *testing.T) {
|
||||
n, c := makeDiscoveryWithVersion(RoleIngress, NamespaceDiscovery{Names: []string{"default"}}, "v1.18.0")
|
||||
|
||||
k8sDiscoveryTest{
|
||||
discovery: n,
|
||||
afterStart: func() {
|
||||
obj := makeIngressV1beta1(TLSYes)
|
||||
c.NetworkingV1beta1().Ingresses("default").Create(context.Background(), obj, metav1.CreateOptions{})
|
||||
},
|
||||
expectedMaxItems: 1,
|
||||
expectedRes: expectedTargetGroups("default", TLSYes),
|
||||
}.Run(t)
|
||||
}
|
||||
|
||||
func TestIngressDiscoveryAddMixed(t *testing.T) {
|
||||
n, c := makeDiscovery(RoleIngress, NamespaceDiscovery{Names: []string{"default"}})
|
||||
|
||||
|
@ -268,20 +185,6 @@ func TestIngressDiscoveryAddMixed(t *testing.T) {
|
|||
}.Run(t)
|
||||
}
|
||||
|
||||
func TestIngressDiscoveryAddMixedV1beta1(t *testing.T) {
|
||||
n, c := makeDiscoveryWithVersion(RoleIngress, NamespaceDiscovery{Names: []string{"default"}}, "v1.18.0")
|
||||
|
||||
k8sDiscoveryTest{
|
||||
discovery: n,
|
||||
afterStart: func() {
|
||||
obj := makeIngressV1beta1(TLSMixed)
|
||||
c.NetworkingV1beta1().Ingresses("default").Create(context.Background(), obj, metav1.CreateOptions{})
|
||||
},
|
||||
expectedMaxItems: 1,
|
||||
expectedRes: expectedTargetGroups("default", TLSMixed),
|
||||
}.Run(t)
|
||||
}
|
||||
|
||||
func TestIngressDiscoveryNamespaces(t *testing.T) {
|
||||
n, c := makeDiscovery(RoleIngress, NamespaceDiscovery{Names: []string{"ns1", "ns2"}})
|
||||
|
||||
|
@ -303,27 +206,6 @@ func TestIngressDiscoveryNamespaces(t *testing.T) {
|
|||
}.Run(t)
|
||||
}
|
||||
|
||||
func TestIngressDiscoveryNamespacesV1beta1(t *testing.T) {
|
||||
n, c := makeDiscoveryWithVersion(RoleIngress, NamespaceDiscovery{Names: []string{"ns1", "ns2"}}, "v1.18.0")
|
||||
|
||||
expected := expectedTargetGroups("ns1", TLSNo)
|
||||
for k, v := range expectedTargetGroups("ns2", TLSNo) {
|
||||
expected[k] = v
|
||||
}
|
||||
k8sDiscoveryTest{
|
||||
discovery: n,
|
||||
afterStart: func() {
|
||||
for _, ns := range []string{"ns1", "ns2"} {
|
||||
obj := makeIngressV1beta1(TLSNo)
|
||||
obj.Namespace = ns
|
||||
c.NetworkingV1beta1().Ingresses(obj.Namespace).Create(context.Background(), obj, metav1.CreateOptions{})
|
||||
}
|
||||
},
|
||||
expectedMaxItems: 2,
|
||||
expectedRes: expected,
|
||||
}.Run(t)
|
||||
}
|
||||
|
||||
func TestIngressDiscoveryOwnNamespace(t *testing.T) {
|
||||
n, c := makeDiscovery(RoleIngress, NamespaceDiscovery{IncludeOwnNamespace: true})
|
||||
|
||||
|
|
|
@ -25,8 +25,6 @@ import (
|
|||
|
||||
"github.com/prometheus/prometheus/util/strutil"
|
||||
|
||||
disv1beta1 "k8s.io/api/discovery/v1beta1"
|
||||
|
||||
"github.com/go-kit/log"
|
||||
"github.com/go-kit/log/level"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
|
@ -36,12 +34,10 @@ import (
|
|||
apiv1 "k8s.io/api/core/v1"
|
||||
disv1 "k8s.io/api/discovery/v1"
|
||||
networkv1 "k8s.io/api/networking/v1"
|
||||
"k8s.io/api/networking/v1beta1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
utilversion "k8s.io/apimachinery/pkg/util/version"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/rest"
|
||||
|
@ -401,55 +397,22 @@ func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) {
|
|||
|
||||
switch d.role {
|
||||
case RoleEndpointSlice:
|
||||
// Check "networking.k8s.io/v1" availability with retries.
|
||||
// If "v1" is not available, use "networking.k8s.io/v1beta1" for backward compatibility
|
||||
var v1Supported bool
|
||||
if retryOnError(ctx, 10*time.Second,
|
||||
func() (err error) {
|
||||
v1Supported, err = checkDiscoveryV1Supported(d.client)
|
||||
if err != nil {
|
||||
level.Error(d.logger).Log("msg", "Failed to check networking.k8s.io/v1 availability", "err", err)
|
||||
}
|
||||
return err
|
||||
},
|
||||
) {
|
||||
d.Unlock()
|
||||
return
|
||||
}
|
||||
|
||||
for _, namespace := range namespaces {
|
||||
var informer cache.SharedIndexInformer
|
||||
if v1Supported {
|
||||
e := d.client.DiscoveryV1().EndpointSlices(namespace)
|
||||
elw := &cache.ListWatch{
|
||||
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
|
||||
options.FieldSelector = d.selectors.endpointslice.field
|
||||
options.LabelSelector = d.selectors.endpointslice.label
|
||||
return e.List(ctx, options)
|
||||
},
|
||||
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
|
||||
options.FieldSelector = d.selectors.endpointslice.field
|
||||
options.LabelSelector = d.selectors.endpointslice.label
|
||||
return e.Watch(ctx, options)
|
||||
},
|
||||
}
|
||||
informer = d.newEndpointSlicesByNodeInformer(elw, &disv1.EndpointSlice{})
|
||||
} else {
|
||||
e := d.client.DiscoveryV1beta1().EndpointSlices(namespace)
|
||||
elw := &cache.ListWatch{
|
||||
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
|
||||
options.FieldSelector = d.selectors.endpointslice.field
|
||||
options.LabelSelector = d.selectors.endpointslice.label
|
||||
return e.List(ctx, options)
|
||||
},
|
||||
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
|
||||
options.FieldSelector = d.selectors.endpointslice.field
|
||||
options.LabelSelector = d.selectors.endpointslice.label
|
||||
return e.Watch(ctx, options)
|
||||
},
|
||||
}
|
||||
informer = d.newEndpointSlicesByNodeInformer(elw, &disv1beta1.EndpointSlice{})
|
||||
e := d.client.DiscoveryV1().EndpointSlices(namespace)
|
||||
elw := &cache.ListWatch{
|
||||
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
|
||||
options.FieldSelector = d.selectors.endpointslice.field
|
||||
options.LabelSelector = d.selectors.endpointslice.label
|
||||
return e.List(ctx, options)
|
||||
},
|
||||
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
|
||||
options.FieldSelector = d.selectors.endpointslice.field
|
||||
options.LabelSelector = d.selectors.endpointslice.label
|
||||
return e.Watch(ctx, options)
|
||||
},
|
||||
}
|
||||
informer = d.newEndpointSlicesByNodeInformer(elw, &disv1.EndpointSlice{})
|
||||
|
||||
s := d.client.CoreV1().Services(namespace)
|
||||
slw := &cache.ListWatch{
|
||||
|
@ -609,55 +572,22 @@ func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) {
|
|||
go svc.informer.Run(ctx.Done())
|
||||
}
|
||||
case RoleIngress:
|
||||
// Check "networking.k8s.io/v1" availability with retries.
|
||||
// If "v1" is not available, use "networking.k8s.io/v1beta1" for backward compatibility
|
||||
var v1Supported bool
|
||||
if retryOnError(ctx, 10*time.Second,
|
||||
func() (err error) {
|
||||
v1Supported, err = checkNetworkingV1Supported(d.client)
|
||||
if err != nil {
|
||||
level.Error(d.logger).Log("msg", "Failed to check networking.k8s.io/v1 availability", "err", err)
|
||||
}
|
||||
return err
|
||||
},
|
||||
) {
|
||||
d.Unlock()
|
||||
return
|
||||
}
|
||||
|
||||
for _, namespace := range namespaces {
|
||||
var informer cache.SharedInformer
|
||||
if v1Supported {
|
||||
i := d.client.NetworkingV1().Ingresses(namespace)
|
||||
ilw := &cache.ListWatch{
|
||||
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
|
||||
options.FieldSelector = d.selectors.ingress.field
|
||||
options.LabelSelector = d.selectors.ingress.label
|
||||
return i.List(ctx, options)
|
||||
},
|
||||
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
|
||||
options.FieldSelector = d.selectors.ingress.field
|
||||
options.LabelSelector = d.selectors.ingress.label
|
||||
return i.Watch(ctx, options)
|
||||
},
|
||||
}
|
||||
informer = d.mustNewSharedInformer(ilw, &networkv1.Ingress{}, resyncDisabled)
|
||||
} else {
|
||||
i := d.client.NetworkingV1beta1().Ingresses(namespace)
|
||||
ilw := &cache.ListWatch{
|
||||
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
|
||||
options.FieldSelector = d.selectors.ingress.field
|
||||
options.LabelSelector = d.selectors.ingress.label
|
||||
return i.List(ctx, options)
|
||||
},
|
||||
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
|
||||
options.FieldSelector = d.selectors.ingress.field
|
||||
options.LabelSelector = d.selectors.ingress.label
|
||||
return i.Watch(ctx, options)
|
||||
},
|
||||
}
|
||||
informer = d.mustNewSharedInformer(ilw, &v1beta1.Ingress{}, resyncDisabled)
|
||||
i := d.client.NetworkingV1().Ingresses(namespace)
|
||||
ilw := &cache.ListWatch{
|
||||
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
|
||||
options.FieldSelector = d.selectors.ingress.field
|
||||
options.LabelSelector = d.selectors.ingress.label
|
||||
return i.List(ctx, options)
|
||||
},
|
||||
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
|
||||
options.FieldSelector = d.selectors.ingress.field
|
||||
options.LabelSelector = d.selectors.ingress.label
|
||||
return i.Watch(ctx, options)
|
||||
},
|
||||
}
|
||||
informer = d.mustNewSharedInformer(ilw, &networkv1.Ingress{}, resyncDisabled)
|
||||
ingress := NewIngress(
|
||||
log.With(d.logger, "role", "ingress"),
|
||||
informer,
|
||||
|
@ -720,20 +650,6 @@ func retryOnError(ctx context.Context, interval time.Duration, f func() error) (
|
|||
}
|
||||
}
|
||||
|
||||
func checkNetworkingV1Supported(client kubernetes.Interface) (bool, error) {
|
||||
k8sVer, err := client.Discovery().ServerVersion()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
semVer, err := utilversion.ParseSemantic(k8sVer.String())
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
// networking.k8s.io/v1 is available since Kubernetes v1.19
|
||||
// https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.19.md
|
||||
return semVer.Major() >= 1 && semVer.Minor() >= 19, nil
|
||||
}
|
||||
|
||||
func (d *Discovery) newNodeInformer(ctx context.Context) cache.SharedInformer {
|
||||
nlw := &cache.ListWatch{
|
||||
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
|
||||
|
@ -834,19 +750,6 @@ func (d *Discovery) newEndpointSlicesByNodeInformer(plw *cache.ListWatch, object
|
|||
}
|
||||
}
|
||||
}
|
||||
case *disv1beta1.EndpointSlice:
|
||||
for _, target := range e.Endpoints {
|
||||
if target.TargetRef != nil {
|
||||
switch target.TargetRef.Kind {
|
||||
case "Pod":
|
||||
if target.NodeName != nil {
|
||||
nodes = append(nodes, *target.NodeName)
|
||||
}
|
||||
case "Node":
|
||||
nodes = append(nodes, target.TargetRef.Name)
|
||||
}
|
||||
}
|
||||
}
|
||||
default:
|
||||
return nil, fmt.Errorf("object is not an endpointslice")
|
||||
}
|
||||
|
@ -882,21 +785,6 @@ func (d *Discovery) mustNewSharedIndexInformer(lw cache.ListerWatcher, exampleOb
|
|||
return informer
|
||||
}
|
||||
|
||||
func checkDiscoveryV1Supported(client kubernetes.Interface) (bool, error) {
|
||||
k8sVer, err := client.Discovery().ServerVersion()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
semVer, err := utilversion.ParseSemantic(k8sVer.String())
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
// The discovery.k8s.io/v1beta1 API version of EndpointSlice will no longer be served in v1.25.
|
||||
// discovery.k8s.io/v1 is available since Kubernetes v1.21
|
||||
// https://kubernetes.io/docs/reference/using-api/deprecation-guide/#v1-25
|
||||
return semVer.Major() >= 1 && semVer.Minor() >= 21, nil
|
||||
}
|
||||
|
||||
func addObjectMetaLabels(labelSet model.LabelSet, objectMeta metav1.ObjectMeta, role Role) {
|
||||
labelSet[model.LabelName(metaLabelPrefix+string(role)+"_name")] = lv(objectMeta.Name)
|
||||
|
||||
|
|
|
@ -46,7 +46,7 @@ func TestMain(m *testing.M) {
|
|||
|
||||
// makeDiscovery creates a kubernetes.Discovery instance for testing.
|
||||
func makeDiscovery(role Role, nsDiscovery NamespaceDiscovery, objects ...runtime.Object) (*Discovery, kubernetes.Interface) {
|
||||
return makeDiscoveryWithVersion(role, nsDiscovery, "v1.22.0", objects...)
|
||||
return makeDiscoveryWithVersion(role, nsDiscovery, "v1.25.0", objects...)
|
||||
}
|
||||
|
||||
// makeDiscoveryWithVersion creates a kubernetes.Discovery instance with the specified kubernetes version for testing.
|
||||
|
@ -285,40 +285,6 @@ func TestRetryOnError(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestCheckNetworkingV1Supported(t *testing.T) {
|
||||
tests := []struct {
|
||||
version string
|
||||
wantSupported bool
|
||||
wantErr bool
|
||||
}{
|
||||
{version: "v1.18.0", wantSupported: false, wantErr: false},
|
||||
{version: "v1.18.1", wantSupported: false, wantErr: false},
|
||||
// networking v1 is supported since Kubernetes v1.19
|
||||
{version: "v1.19.0", wantSupported: true, wantErr: false},
|
||||
{version: "v1.20.0-beta.2", wantSupported: true, wantErr: false},
|
||||
// error patterns
|
||||
{version: "", wantSupported: false, wantErr: true},
|
||||
{version: "<>", wantSupported: false, wantErr: true},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
tc := tc
|
||||
t.Run(tc.version, func(t *testing.T) {
|
||||
clientset := fake.NewSimpleClientset()
|
||||
fakeDiscovery, _ := clientset.Discovery().(*fakediscovery.FakeDiscovery)
|
||||
fakeDiscovery.FakedServerVersion = &version.Info{GitVersion: tc.version}
|
||||
supported, err := checkNetworkingV1Supported(clientset)
|
||||
|
||||
if tc.wantErr {
|
||||
require.Error(t, err)
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
require.Equal(t, tc.wantSupported, supported)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestFailuresCountMetric(t *testing.T) {
|
||||
tests := []struct {
|
||||
role Role
|
||||
|
|
|
@ -1,332 +0,0 @@
|
|||
// Copyright 2016 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package legacymanager
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/go-kit/log"
|
||||
"github.com/go-kit/log/level"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
|
||||
"github.com/prometheus/prometheus/discovery"
|
||||
"github.com/prometheus/prometheus/discovery/targetgroup"
|
||||
)
|
||||
|
||||
type poolKey struct {
|
||||
setName string
|
||||
provider string
|
||||
}
|
||||
|
||||
// provider holds a Discoverer instance, its configuration and its subscribers.
|
||||
type provider struct {
|
||||
name string
|
||||
d discovery.Discoverer
|
||||
subs []string
|
||||
config interface{}
|
||||
}
|
||||
|
||||
// NewManager is the Discovery Manager constructor.
|
||||
func NewManager(ctx context.Context, logger log.Logger, registerer prometheus.Registerer, sdMetrics map[string]discovery.DiscovererMetrics, options ...func(*Manager)) *Manager {
|
||||
if logger == nil {
|
||||
logger = log.NewNopLogger()
|
||||
}
|
||||
mgr := &Manager{
|
||||
logger: logger,
|
||||
syncCh: make(chan map[string][]*targetgroup.Group),
|
||||
targets: make(map[poolKey]map[string]*targetgroup.Group),
|
||||
discoverCancel: []context.CancelFunc{},
|
||||
ctx: ctx,
|
||||
updatert: 5 * time.Second,
|
||||
triggerSend: make(chan struct{}, 1),
|
||||
registerer: registerer,
|
||||
sdMetrics: sdMetrics,
|
||||
}
|
||||
for _, option := range options {
|
||||
option(mgr)
|
||||
}
|
||||
|
||||
// Register the metrics.
|
||||
// We have to do this after setting all options, so that the name of the Manager is set.
|
||||
if metrics, err := discovery.NewManagerMetrics(registerer, mgr.name); err == nil {
|
||||
mgr.metrics = metrics
|
||||
} else {
|
||||
level.Error(logger).Log("msg", "Failed to create discovery manager metrics", "manager", mgr.name, "err", err)
|
||||
return nil
|
||||
}
|
||||
|
||||
return mgr
|
||||
}
|
||||
|
||||
// Name sets the name of the manager.
|
||||
func Name(n string) func(*Manager) {
|
||||
return func(m *Manager) {
|
||||
m.mtx.Lock()
|
||||
defer m.mtx.Unlock()
|
||||
m.name = n
|
||||
}
|
||||
}
|
||||
|
||||
// Manager maintains a set of discovery providers and sends each update to a map channel.
|
||||
// Targets are grouped by the target set name.
|
||||
type Manager struct {
|
||||
logger log.Logger
|
||||
name string
|
||||
mtx sync.RWMutex
|
||||
ctx context.Context
|
||||
discoverCancel []context.CancelFunc
|
||||
|
||||
// Some Discoverers(eg. k8s) send only the updates for a given target group
|
||||
// so we use map[tg.Source]*targetgroup.Group to know which group to update.
|
||||
targets map[poolKey]map[string]*targetgroup.Group
|
||||
// providers keeps track of SD providers.
|
||||
providers []*provider
|
||||
// The sync channel sends the updates as a map where the key is the job value from the scrape config.
|
||||
syncCh chan map[string][]*targetgroup.Group
|
||||
|
||||
// How long to wait before sending updates to the channel. The variable
|
||||
// should only be modified in unit tests.
|
||||
updatert time.Duration
|
||||
|
||||
// The triggerSend channel signals to the manager that new updates have been received from providers.
|
||||
triggerSend chan struct{}
|
||||
|
||||
// A registerer for all service discovery metrics.
|
||||
registerer prometheus.Registerer
|
||||
|
||||
metrics *discovery.Metrics
|
||||
sdMetrics map[string]discovery.DiscovererMetrics
|
||||
}
|
||||
|
||||
// Run starts the background processing.
|
||||
func (m *Manager) Run() error {
|
||||
go m.sender()
|
||||
<-m.ctx.Done()
|
||||
m.cancelDiscoverers()
|
||||
return m.ctx.Err()
|
||||
}
|
||||
|
||||
// SyncCh returns a read only channel used by all the clients to receive target updates.
|
||||
func (m *Manager) SyncCh() <-chan map[string][]*targetgroup.Group {
|
||||
return m.syncCh
|
||||
}
|
||||
|
||||
// ApplyConfig removes all running discovery providers and starts new ones using the provided config.
|
||||
func (m *Manager) ApplyConfig(cfg map[string]discovery.Configs) error {
|
||||
m.mtx.Lock()
|
||||
defer m.mtx.Unlock()
|
||||
|
||||
for pk := range m.targets {
|
||||
if _, ok := cfg[pk.setName]; !ok {
|
||||
m.metrics.DiscoveredTargets.DeleteLabelValues(m.name, pk.setName)
|
||||
}
|
||||
}
|
||||
m.cancelDiscoverers()
|
||||
m.targets = make(map[poolKey]map[string]*targetgroup.Group)
|
||||
m.providers = nil
|
||||
m.discoverCancel = nil
|
||||
|
||||
failedCount := 0
|
||||
for name, scfg := range cfg {
|
||||
failedCount += m.registerProviders(scfg, name)
|
||||
m.metrics.DiscoveredTargets.WithLabelValues(name).Set(0)
|
||||
}
|
||||
m.metrics.FailedConfigs.Set(float64(failedCount))
|
||||
|
||||
for _, prov := range m.providers {
|
||||
m.startProvider(m.ctx, prov)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// StartCustomProvider is used for sdtool. Only use this if you know what you're doing.
|
||||
func (m *Manager) StartCustomProvider(ctx context.Context, name string, worker discovery.Discoverer) {
|
||||
p := &provider{
|
||||
name: name,
|
||||
d: worker,
|
||||
subs: []string{name},
|
||||
}
|
||||
m.providers = append(m.providers, p)
|
||||
m.startProvider(ctx, p)
|
||||
}
|
||||
|
||||
func (m *Manager) startProvider(ctx context.Context, p *provider) {
|
||||
level.Debug(m.logger).Log("msg", "Starting provider", "provider", p.name, "subs", fmt.Sprintf("%v", p.subs))
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
updates := make(chan []*targetgroup.Group)
|
||||
|
||||
m.discoverCancel = append(m.discoverCancel, cancel)
|
||||
|
||||
go p.d.Run(ctx, updates)
|
||||
go m.updater(ctx, p, updates)
|
||||
}
|
||||
|
||||
func (m *Manager) updater(ctx context.Context, p *provider, updates chan []*targetgroup.Group) {
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case tgs, ok := <-updates:
|
||||
m.metrics.ReceivedUpdates.Inc()
|
||||
if !ok {
|
||||
level.Debug(m.logger).Log("msg", "Discoverer channel closed", "provider", p.name)
|
||||
return
|
||||
}
|
||||
|
||||
for _, s := range p.subs {
|
||||
m.updateGroup(poolKey{setName: s, provider: p.name}, tgs)
|
||||
}
|
||||
|
||||
select {
|
||||
case m.triggerSend <- struct{}{}:
|
||||
default:
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (m *Manager) sender() {
|
||||
ticker := time.NewTicker(m.updatert)
|
||||
defer ticker.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-m.ctx.Done():
|
||||
return
|
||||
case <-ticker.C: // Some discoverers send updates too often so we throttle these with the ticker.
|
||||
select {
|
||||
case <-m.triggerSend:
|
||||
m.metrics.SentUpdates.Inc()
|
||||
select {
|
||||
case m.syncCh <- m.allGroups():
|
||||
default:
|
||||
m.metrics.DelayedUpdates.Inc()
|
||||
level.Debug(m.logger).Log("msg", "Discovery receiver's channel was full so will retry the next cycle")
|
||||
select {
|
||||
case m.triggerSend <- struct{}{}:
|
||||
default:
|
||||
}
|
||||
}
|
||||
default:
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (m *Manager) cancelDiscoverers() {
|
||||
for _, c := range m.discoverCancel {
|
||||
c()
|
||||
}
|
||||
}
|
||||
|
||||
func (m *Manager) updateGroup(poolKey poolKey, tgs []*targetgroup.Group) {
|
||||
m.mtx.Lock()
|
||||
defer m.mtx.Unlock()
|
||||
|
||||
if _, ok := m.targets[poolKey]; !ok {
|
||||
m.targets[poolKey] = make(map[string]*targetgroup.Group)
|
||||
}
|
||||
for _, tg := range tgs {
|
||||
if tg != nil { // Some Discoverers send nil target group so need to check for it to avoid panics.
|
||||
m.targets[poolKey][tg.Source] = tg
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (m *Manager) allGroups() map[string][]*targetgroup.Group {
|
||||
m.mtx.RLock()
|
||||
defer m.mtx.RUnlock()
|
||||
|
||||
tSets := map[string][]*targetgroup.Group{}
|
||||
n := map[string]int{}
|
||||
for pkey, tsets := range m.targets {
|
||||
for _, tg := range tsets {
|
||||
// Even if the target group 'tg' is empty we still need to send it to the 'Scrape manager'
|
||||
// to signal that it needs to stop all scrape loops for this target set.
|
||||
tSets[pkey.setName] = append(tSets[pkey.setName], tg)
|
||||
n[pkey.setName] += len(tg.Targets)
|
||||
}
|
||||
}
|
||||
for setName, v := range n {
|
||||
m.metrics.DiscoveredTargets.WithLabelValues(setName).Set(float64(v))
|
||||
}
|
||||
return tSets
|
||||
}
|
||||
|
||||
// registerProviders returns a number of failed SD config.
|
||||
func (m *Manager) registerProviders(cfgs discovery.Configs, setName string) int {
|
||||
var (
|
||||
failed int
|
||||
added bool
|
||||
)
|
||||
add := func(cfg discovery.Config) {
|
||||
for _, p := range m.providers {
|
||||
if reflect.DeepEqual(cfg, p.config) {
|
||||
p.subs = append(p.subs, setName)
|
||||
added = true
|
||||
return
|
||||
}
|
||||
}
|
||||
typ := cfg.Name()
|
||||
d, err := cfg.NewDiscoverer(discovery.DiscovererOptions{
|
||||
Logger: log.With(m.logger, "discovery", typ, "config", setName),
|
||||
Metrics: m.sdMetrics[typ],
|
||||
})
|
||||
if err != nil {
|
||||
level.Error(m.logger).Log("msg", "Cannot create service discovery", "err", err, "type", typ, "config", setName)
|
||||
failed++
|
||||
return
|
||||
}
|
||||
m.providers = append(m.providers, &provider{
|
||||
name: fmt.Sprintf("%s/%d", typ, len(m.providers)),
|
||||
d: d,
|
||||
config: cfg,
|
||||
subs: []string{setName},
|
||||
})
|
||||
added = true
|
||||
}
|
||||
for _, cfg := range cfgs {
|
||||
add(cfg)
|
||||
}
|
||||
if !added {
|
||||
// Add an empty target group to force the refresh of the corresponding
|
||||
// scrape pool and to notify the receiver that this target set has no
|
||||
// current targets.
|
||||
// It can happen because the combined set of SD configurations is empty
|
||||
// or because we fail to instantiate all the SD configurations.
|
||||
add(discovery.StaticConfig{{}})
|
||||
}
|
||||
return failed
|
||||
}
|
||||
|
||||
// StaticProvider holds a list of target groups that never change.
|
||||
type StaticProvider struct {
|
||||
TargetGroups []*targetgroup.Group
|
||||
}
|
||||
|
||||
// Run implements the Worker interface.
|
||||
func (sd *StaticProvider) Run(ctx context.Context, ch chan<- []*targetgroup.Group) {
|
||||
// We still have to consider that the consumer exits right away in which case
|
||||
// the context will be canceled.
|
||||
select {
|
||||
case ch <- sd.TargetGroups:
|
||||
case <-ctx.Done():
|
||||
}
|
||||
close(ch)
|
||||
}
|
File diff suppressed because it is too large
Load diff
|
@ -1,261 +0,0 @@
|
|||
// Copyright 2020 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package legacymanager
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"gopkg.in/yaml.v2"
|
||||
|
||||
"github.com/prometheus/prometheus/discovery"
|
||||
"github.com/prometheus/prometheus/discovery/targetgroup"
|
||||
)
|
||||
|
||||
const (
|
||||
configFieldPrefix = "AUTO_DISCOVERY_"
|
||||
staticConfigsKey = "static_configs"
|
||||
staticConfigsFieldName = configFieldPrefix + staticConfigsKey
|
||||
)
|
||||
|
||||
var (
|
||||
configNames = make(map[string]discovery.Config)
|
||||
configFieldNames = make(map[reflect.Type]string)
|
||||
configFields []reflect.StructField
|
||||
|
||||
configTypesMu sync.Mutex
|
||||
configTypes = make(map[reflect.Type]reflect.Type)
|
||||
|
||||
emptyStructType = reflect.TypeOf(struct{}{})
|
||||
configsType = reflect.TypeOf(discovery.Configs{})
|
||||
)
|
||||
|
||||
// RegisterConfig registers the given Config type for YAML marshaling and unmarshaling.
|
||||
func RegisterConfig(config discovery.Config) {
|
||||
registerConfig(config.Name()+"_sd_configs", reflect.TypeOf(config), config)
|
||||
}
|
||||
|
||||
func init() {
|
||||
// N.B.: static_configs is the only Config type implemented by default.
|
||||
// All other types are registered at init by their implementing packages.
|
||||
elemTyp := reflect.TypeOf(&targetgroup.Group{})
|
||||
registerConfig(staticConfigsKey, elemTyp, discovery.StaticConfig{})
|
||||
}
|
||||
|
||||
func registerConfig(yamlKey string, elemType reflect.Type, config discovery.Config) {
|
||||
name := config.Name()
|
||||
if _, ok := configNames[name]; ok {
|
||||
panic(fmt.Sprintf("discovery: Config named %q is already registered", name))
|
||||
}
|
||||
configNames[name] = config
|
||||
|
||||
fieldName := configFieldPrefix + yamlKey // Field must be exported.
|
||||
configFieldNames[elemType] = fieldName
|
||||
|
||||
// Insert fields in sorted order.
|
||||
i := sort.Search(len(configFields), func(k int) bool {
|
||||
return fieldName < configFields[k].Name
|
||||
})
|
||||
configFields = append(configFields, reflect.StructField{}) // Add empty field at end.
|
||||
copy(configFields[i+1:], configFields[i:]) // Shift fields to the right.
|
||||
configFields[i] = reflect.StructField{ // Write new field in place.
|
||||
Name: fieldName,
|
||||
Type: reflect.SliceOf(elemType),
|
||||
Tag: reflect.StructTag(`yaml:"` + yamlKey + `,omitempty"`),
|
||||
}
|
||||
}
|
||||
|
||||
func getConfigType(out reflect.Type) reflect.Type {
|
||||
configTypesMu.Lock()
|
||||
defer configTypesMu.Unlock()
|
||||
if typ, ok := configTypes[out]; ok {
|
||||
return typ
|
||||
}
|
||||
// Initial exported fields map one-to-one.
|
||||
var fields []reflect.StructField
|
||||
for i, n := 0, out.NumField(); i < n; i++ {
|
||||
switch field := out.Field(i); {
|
||||
case field.PkgPath == "" && field.Type != configsType:
|
||||
fields = append(fields, field)
|
||||
default:
|
||||
fields = append(fields, reflect.StructField{
|
||||
Name: "_" + field.Name, // Field must be unexported.
|
||||
PkgPath: out.PkgPath(),
|
||||
Type: emptyStructType,
|
||||
})
|
||||
}
|
||||
}
|
||||
// Append extra config fields on the end.
|
||||
fields = append(fields, configFields...)
|
||||
typ := reflect.StructOf(fields)
|
||||
configTypes[out] = typ
|
||||
return typ
|
||||
}
|
||||
|
||||
// UnmarshalYAMLWithInlineConfigs helps implement yaml.Unmarshal for structs
|
||||
// that have a Configs field that should be inlined.
|
||||
func UnmarshalYAMLWithInlineConfigs(out interface{}, unmarshal func(interface{}) error) error {
|
||||
outVal := reflect.ValueOf(out)
|
||||
if outVal.Kind() != reflect.Ptr {
|
||||
return fmt.Errorf("discovery: can only unmarshal into a struct pointer: %T", out)
|
||||
}
|
||||
outVal = outVal.Elem()
|
||||
if outVal.Kind() != reflect.Struct {
|
||||
return fmt.Errorf("discovery: can only unmarshal into a struct pointer: %T", out)
|
||||
}
|
||||
outTyp := outVal.Type()
|
||||
|
||||
cfgTyp := getConfigType(outTyp)
|
||||
cfgPtr := reflect.New(cfgTyp)
|
||||
cfgVal := cfgPtr.Elem()
|
||||
|
||||
// Copy shared fields (defaults) to dynamic value.
|
||||
var configs *discovery.Configs
|
||||
for i, n := 0, outVal.NumField(); i < n; i++ {
|
||||
if outTyp.Field(i).Type == configsType {
|
||||
configs = outVal.Field(i).Addr().Interface().(*discovery.Configs)
|
||||
continue
|
||||
}
|
||||
if cfgTyp.Field(i).PkgPath != "" {
|
||||
continue // Field is unexported: ignore.
|
||||
}
|
||||
cfgVal.Field(i).Set(outVal.Field(i))
|
||||
}
|
||||
if configs == nil {
|
||||
return fmt.Errorf("discovery: Configs field not found in type: %T", out)
|
||||
}
|
||||
|
||||
// Unmarshal into dynamic value.
|
||||
if err := unmarshal(cfgPtr.Interface()); err != nil {
|
||||
return replaceYAMLTypeError(err, cfgTyp, outTyp)
|
||||
}
|
||||
|
||||
// Copy shared fields from dynamic value.
|
||||
for i, n := 0, outVal.NumField(); i < n; i++ {
|
||||
if cfgTyp.Field(i).PkgPath != "" {
|
||||
continue // Field is unexported: ignore.
|
||||
}
|
||||
outVal.Field(i).Set(cfgVal.Field(i))
|
||||
}
|
||||
|
||||
var err error
|
||||
*configs, err = readConfigs(cfgVal, outVal.NumField())
|
||||
return err
|
||||
}
|
||||
|
||||
func readConfigs(structVal reflect.Value, startField int) (discovery.Configs, error) {
|
||||
var (
|
||||
configs discovery.Configs
|
||||
targets []*targetgroup.Group
|
||||
)
|
||||
for i, n := startField, structVal.NumField(); i < n; i++ {
|
||||
field := structVal.Field(i)
|
||||
if field.Kind() != reflect.Slice {
|
||||
panic("discovery: internal error: field is not a slice")
|
||||
}
|
||||
for k := 0; k < field.Len(); k++ {
|
||||
val := field.Index(k)
|
||||
if val.IsZero() || (val.Kind() == reflect.Ptr && val.Elem().IsZero()) {
|
||||
key := configFieldNames[field.Type().Elem()]
|
||||
key = strings.TrimPrefix(key, configFieldPrefix)
|
||||
return nil, fmt.Errorf("empty or null section in %s", key)
|
||||
}
|
||||
switch c := val.Interface().(type) {
|
||||
case *targetgroup.Group:
|
||||
// Add index to the static config target groups for unique identification
|
||||
// within scrape pool.
|
||||
c.Source = strconv.Itoa(len(targets))
|
||||
// Coalesce multiple static configs into a single static config.
|
||||
targets = append(targets, c)
|
||||
case discovery.Config:
|
||||
configs = append(configs, c)
|
||||
default:
|
||||
panic("discovery: internal error: slice element is not a Config")
|
||||
}
|
||||
}
|
||||
}
|
||||
if len(targets) > 0 {
|
||||
configs = append(configs, discovery.StaticConfig(targets))
|
||||
}
|
||||
return configs, nil
|
||||
}
|
||||
|
||||
// MarshalYAMLWithInlineConfigs helps implement yaml.Marshal for structs
|
||||
// that have a Configs field that should be inlined.
|
||||
func MarshalYAMLWithInlineConfigs(in interface{}) (interface{}, error) {
|
||||
inVal := reflect.ValueOf(in)
|
||||
for inVal.Kind() == reflect.Ptr {
|
||||
inVal = inVal.Elem()
|
||||
}
|
||||
inTyp := inVal.Type()
|
||||
|
||||
cfgTyp := getConfigType(inTyp)
|
||||
cfgPtr := reflect.New(cfgTyp)
|
||||
cfgVal := cfgPtr.Elem()
|
||||
|
||||
// Copy shared fields to dynamic value.
|
||||
var configs *discovery.Configs
|
||||
for i, n := 0, inTyp.NumField(); i < n; i++ {
|
||||
if inTyp.Field(i).Type == configsType {
|
||||
configs = inVal.Field(i).Addr().Interface().(*discovery.Configs)
|
||||
}
|
||||
if cfgTyp.Field(i).PkgPath != "" {
|
||||
continue // Field is unexported: ignore.
|
||||
}
|
||||
cfgVal.Field(i).Set(inVal.Field(i))
|
||||
}
|
||||
if configs == nil {
|
||||
return nil, fmt.Errorf("discovery: Configs field not found in type: %T", in)
|
||||
}
|
||||
|
||||
if err := writeConfigs(cfgVal, *configs); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return cfgPtr.Interface(), nil
|
||||
}
|
||||
|
||||
func writeConfigs(structVal reflect.Value, configs discovery.Configs) error {
|
||||
targets := structVal.FieldByName(staticConfigsFieldName).Addr().Interface().(*[]*targetgroup.Group)
|
||||
for _, c := range configs {
|
||||
if sc, ok := c.(discovery.StaticConfig); ok {
|
||||
*targets = append(*targets, sc...)
|
||||
continue
|
||||
}
|
||||
fieldName, ok := configFieldNames[reflect.TypeOf(c)]
|
||||
if !ok {
|
||||
return fmt.Errorf("discovery: cannot marshal unregistered Config type: %T", c)
|
||||
}
|
||||
field := structVal.FieldByName(fieldName)
|
||||
field.Set(reflect.Append(field, reflect.ValueOf(c)))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func replaceYAMLTypeError(err error, oldTyp, newTyp reflect.Type) error {
|
||||
var e *yaml.TypeError
|
||||
if errors.As(err, &e) {
|
||||
oldStr := oldTyp.String()
|
||||
newStr := newTyp.String()
|
||||
for i, s := range e.Errors {
|
||||
e.Errors[i] = strings.ReplaceAll(s, oldStr, newStr)
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
|
@ -98,7 +98,7 @@ func (m *SDMock) SetupHandlers() {
|
|||
if len(query) == 2 {
|
||||
h := sha1.New()
|
||||
h.Write([]byte(query[1]))
|
||||
// Avoing long filenames for Windows.
|
||||
// Avoiding long filenames for Windows.
|
||||
f += "__" + base64.URLEncoding.EncodeToString(h.Sum(nil))[:10]
|
||||
}
|
||||
}
|
||||
|
|
|
@ -224,7 +224,7 @@
|
|||
"Args": [
|
||||
"--config.file=/etc/prometheus/prometheus.yml",
|
||||
"--storage.tsdb.path=/prometheus",
|
||||
"--storage.tsdb.retention=24h"
|
||||
"--storage.tsdb.retention.time=24h"
|
||||
],
|
||||
"Privileges": {
|
||||
"CredentialSpec": null,
|
||||
|
|
2
discovery/moby/testdata/swarmprom/tasks.json
vendored
2
discovery/moby/testdata/swarmprom/tasks.json
vendored
|
@ -973,7 +973,7 @@
|
|||
"Args": [
|
||||
"--config.file=/etc/prometheus/prometheus.yml",
|
||||
"--storage.tsdb.path=/prometheus",
|
||||
"--storage.tsdb.retention=24h"
|
||||
"--storage.tsdb.retention.time=24h"
|
||||
],
|
||||
"Privileges": {
|
||||
"CredentialSpec": null,
|
||||
|
|
|
@ -41,10 +41,10 @@ const (
|
|||
uyuniMetaLabelPrefix = model.MetaLabelPrefix + "uyuni_"
|
||||
uyuniLabelMinionHostname = uyuniMetaLabelPrefix + "minion_hostname"
|
||||
uyuniLabelPrimaryFQDN = uyuniMetaLabelPrefix + "primary_fqdn"
|
||||
uyuniLablelSystemID = uyuniMetaLabelPrefix + "system_id"
|
||||
uyuniLablelGroups = uyuniMetaLabelPrefix + "groups"
|
||||
uyuniLablelEndpointName = uyuniMetaLabelPrefix + "endpoint_name"
|
||||
uyuniLablelExporter = uyuniMetaLabelPrefix + "exporter"
|
||||
uyuniLabelSystemID = uyuniMetaLabelPrefix + "system_id"
|
||||
uyuniLabelGroups = uyuniMetaLabelPrefix + "groups"
|
||||
uyuniLabelEndpointName = uyuniMetaLabelPrefix + "endpoint_name"
|
||||
uyuniLabelExporter = uyuniMetaLabelPrefix + "exporter"
|
||||
uyuniLabelProxyModule = uyuniMetaLabelPrefix + "proxy_module"
|
||||
uyuniLabelMetricsPath = uyuniMetaLabelPrefix + "metrics_path"
|
||||
uyuniLabelScheme = uyuniMetaLabelPrefix + "scheme"
|
||||
|
@ -270,10 +270,10 @@ func (d *Discovery) getEndpointLabels(
|
|||
model.AddressLabel: model.LabelValue(addr),
|
||||
uyuniLabelMinionHostname: model.LabelValue(networkInfo.Hostname),
|
||||
uyuniLabelPrimaryFQDN: model.LabelValue(networkInfo.PrimaryFQDN),
|
||||
uyuniLablelSystemID: model.LabelValue(strconv.Itoa(endpoint.SystemID)),
|
||||
uyuniLablelGroups: model.LabelValue(strings.Join(managedGroupNames, d.separator)),
|
||||
uyuniLablelEndpointName: model.LabelValue(endpoint.EndpointName),
|
||||
uyuniLablelExporter: model.LabelValue(endpoint.ExporterName),
|
||||
uyuniLabelSystemID: model.LabelValue(strconv.Itoa(endpoint.SystemID)),
|
||||
uyuniLabelGroups: model.LabelValue(strings.Join(managedGroupNames, d.separator)),
|
||||
uyuniLabelEndpointName: model.LabelValue(endpoint.EndpointName),
|
||||
uyuniLabelExporter: model.LabelValue(endpoint.ExporterName),
|
||||
uyuniLabelProxyModule: model.LabelValue(endpoint.Module),
|
||||
uyuniLabelMetricsPath: model.LabelValue(endpoint.Path),
|
||||
uyuniLabelScheme: model.LabelValue(scheme),
|
||||
|
|
|
@ -15,6 +15,7 @@ The Prometheus monitoring server
|
|||
| <code class="text-nowrap">-h</code>, <code class="text-nowrap">--help</code> | Show context-sensitive help (also try --help-long and --help-man). | |
|
||||
| <code class="text-nowrap">--version</code> | Show application version. | |
|
||||
| <code class="text-nowrap">--config.file</code> | Prometheus configuration file path. | `prometheus.yml` |
|
||||
| <code class="text-nowrap">--config.auto-reload-interval</code> | Specifies the interval for checking and automatically reloading the Prometheus configuration file upon detecting changes. | `30s` |
|
||||
| <code class="text-nowrap">--web.listen-address</code> <code class="text-nowrap">...<code class="text-nowrap"> | Address to listen on for UI, API, and telemetry. Can be repeated. | `0.0.0.0:9090` |
|
||||
| <code class="text-nowrap">--auto-gomemlimit.ratio</code> | The ratio of reserved GOMEMLIMIT memory to the detected maximum container or system memory | `0.9` |
|
||||
| <code class="text-nowrap">--web.config.file</code> | [EXPERIMENTAL] Path to configuration file that can enable TLS or authentication. | |
|
||||
|
@ -32,8 +33,7 @@ The Prometheus monitoring server
|
|||
| <code class="text-nowrap">--web.page-title</code> | Document title of Prometheus instance. | `Prometheus Time Series Collection and Processing Server` |
|
||||
| <code class="text-nowrap">--web.cors.origin</code> | Regex for CORS origin. It is fully anchored. Example: 'https?://(domain1\|domain2)\.com' | `.*` |
|
||||
| <code class="text-nowrap">--storage.tsdb.path</code> | Base path for metrics storage. Use with server mode only. | `data/` |
|
||||
| <code class="text-nowrap">--storage.tsdb.retention</code> | [DEPRECATED] How long to retain samples in storage. This flag has been deprecated, use "storage.tsdb.retention.time" instead. Use with server mode only. | |
|
||||
| <code class="text-nowrap">--storage.tsdb.retention.time</code> | How long to retain samples in storage. When this flag is set it overrides "storage.tsdb.retention". If neither this flag nor "storage.tsdb.retention" nor "storage.tsdb.retention.size" is set, the retention time defaults to 15d. Units Supported: y, w, d, h, m, s, ms. Use with server mode only. | |
|
||||
| <code class="text-nowrap">--storage.tsdb.retention.time</code> | How long to retain samples in storage. If neither this flag nor "storage.tsdb.retention.size" is set, the retention time defaults to 15d. Units Supported: y, w, d, h, m, s, ms. Use with server mode only. | |
|
||||
| <code class="text-nowrap">--storage.tsdb.retention.size</code> | Maximum number of bytes that can be stored for blocks. A unit is required, supported units: B, KB, MB, GB, TB, PB, EB. Ex: "512MB". Based on powers-of-2, so 1KB is 1024B. Use with server mode only. | |
|
||||
| <code class="text-nowrap">--storage.tsdb.no-lockfile</code> | Do not create lockfile in data directory. Use with server mode only. | `false` |
|
||||
| <code class="text-nowrap">--storage.tsdb.head-chunks-write-queue-size</code> | Size of the queue through which head chunks are written to the disk to be m-mapped, 0 disables the queue completely. Experimental. Use with server mode only. | `0` |
|
||||
|
@ -56,8 +56,8 @@ The Prometheus monitoring server
|
|||
| <code class="text-nowrap">--query.timeout</code> | Maximum time a query may take before being aborted. Use with server mode only. | `2m` |
|
||||
| <code class="text-nowrap">--query.max-concurrency</code> | Maximum number of queries executed concurrently. Use with server mode only. | `20` |
|
||||
| <code class="text-nowrap">--query.max-samples</code> | Maximum number of samples a single query can load into memory. Note that queries will fail if they try to load more samples than this into memory, so this also limits the number of samples a query can return. Use with server mode only. | `50000000` |
|
||||
| <code class="text-nowrap">--scrape.name-escaping-scheme</code> | Method for escaping legacy invalid names when sending to Prometheus that does not support UTF-8. Can be one of "values", "underscores", or "dots". | `values` |
|
||||
| <code class="text-nowrap">--enable-feature</code> <code class="text-nowrap">...<code class="text-nowrap"> | Comma separated feature names to enable. Valid options: agent, auto-gomaxprocs, auto-gomemlimit, concurrent-rule-eval, created-timestamp-zero-ingestion, delayed-compaction, exemplar-storage, expand-external-labels, extra-scrape-metrics, memory-snapshot-on-shutdown, native-histograms, new-service-discovery-manager, no-default-scrape-port, otlp-write-receiver, promql-experimental-functions, promql-delayed-name-removal, promql-per-step-stats, remote-write-receiver (DEPRECATED), utf8-names. See https://prometheus.io/docs/prometheus/latest/feature_flags/ for more details. | |
|
||||
| <code class="text-nowrap">--enable-feature</code> <code class="text-nowrap">...<code class="text-nowrap"> | Comma separated feature names to enable. Valid options: auto-gomemlimit, exemplar-storage, expand-external-labels, memory-snapshot-on-shutdown, promql-per-step-stats, promql-experimental-functions, extra-scrape-metrics, auto-gomaxprocs, no-default-scrape-port, native-histograms, otlp-write-receiver, created-timestamp-zero-ingestion, concurrent-rule-eval. See https://prometheus.io/docs/prometheus/latest/feature_flags/ for more details. | |
|
||||
| <code class="text-nowrap">--agent</code> | Run Prometheus in 'Agent mode'. | |
|
||||
| <code class="text-nowrap">--log.level</code> | Only log messages with the given severity or above. One of: [debug, info, warn, error] | `info` |
|
||||
| <code class="text-nowrap">--log.format</code> | Output format of log messages. One of: [logfmt, json] | `logfmt` |
|
||||
|
||||
|
|
|
@ -575,7 +575,7 @@ Dump samples from a TSDB.
|
|||
|
||||
| Flag | Description | Default |
|
||||
| --- | --- | --- |
|
||||
| <code class="text-nowrap">--sandbox-dir-root</code> | Root directory where a sandbox directory would be created in case WAL replay generates chunks. The sandbox directory is cleaned up at the end. | `data/` |
|
||||
| <code class="text-nowrap">--sandbox-dir-root</code> | Root directory where a sandbox directory will be created, this sandbox is used in case WAL replay generates chunks (default is the database path). The sandbox is cleaned up at the end. | |
|
||||
| <code class="text-nowrap">--min-time</code> | Minimum timestamp to dump. | `-9223372036854775808` |
|
||||
| <code class="text-nowrap">--max-time</code> | Maximum timestamp to dump. | `9223372036854775807` |
|
||||
| <code class="text-nowrap">--match</code> <code class="text-nowrap">...<code class="text-nowrap"> | Series selector. Can be specified multiple times. | `{__name__=~'(?s:.*)'}` |
|
||||
|
@ -602,7 +602,7 @@ Dump samples from a TSDB.
|
|||
|
||||
| Flag | Description | Default |
|
||||
| --- | --- | --- |
|
||||
| <code class="text-nowrap">--sandbox-dir-root</code> | Root directory where a sandbox directory would be created in case WAL replay generates chunks. The sandbox directory is cleaned up at the end. | `data/` |
|
||||
| <code class="text-nowrap">--sandbox-dir-root</code> | Root directory where a sandbox directory will be created, this sandbox is used in case WAL replay generates chunks (default is the database path). The sandbox is cleaned up at the end. | |
|
||||
| <code class="text-nowrap">--min-time</code> | Minimum timestamp to dump. | `-9223372036854775808` |
|
||||
| <code class="text-nowrap">--max-time</code> | Maximum timestamp to dump. | `9223372036854775807` |
|
||||
| <code class="text-nowrap">--match</code> <code class="text-nowrap">...<code class="text-nowrap"> | Series selector. Can be specified multiple times. | `{__name__=~'(?s:.*)'}` |
|
||||
|
|
|
@ -84,6 +84,10 @@ global:
|
|||
# Reloading the configuration will reopen the file.
|
||||
[ query_log_file: <string> ]
|
||||
|
||||
# File to which scrape failures are logged.
|
||||
# Reloading the configuration will reopen the file.
|
||||
[ scrape_failure_log_file: <string> ]
|
||||
|
||||
# An uncompressed response body larger than this many bytes will cause the
|
||||
# scrape to fail. 0 means no limit. Example: 100MB.
|
||||
# This is an experimental feature, this behaviour could
|
||||
|
@ -122,9 +126,9 @@ global:
|
|||
[ keep_dropped_targets: <int> | default = 0 ]
|
||||
|
||||
# Specifies the validation scheme for metric and label names. Either blank or
|
||||
# "legacy" for letters, numbers, colons, and underscores; or "utf8" for full
|
||||
# UTF-8 support.
|
||||
[ metric_name_validation_scheme <string> | default "legacy" ]
|
||||
# "utf8" for for full UTF-8 support, or "legacy" for letters, numbers, colons,
|
||||
# and underscores.
|
||||
[ metric_name_validation_scheme <string> | default "utf8" ]
|
||||
|
||||
runtime:
|
||||
# Configure the Go garbage collector GOGC parameter
|
||||
|
@ -319,6 +323,10 @@ http_headers:
|
|||
# Files to read header values from.
|
||||
[ files: [<string>, ...] ] ]
|
||||
|
||||
# File to which scrape failures are logged.
|
||||
# Reloading the configuration will reopen the file.
|
||||
[ scrape_failure_log_file: <string> ]
|
||||
|
||||
# List of Azure service discovery configurations.
|
||||
azure_sd_configs:
|
||||
[ - <azure_sd_config> ... ]
|
||||
|
@ -477,10 +485,10 @@ metric_relabel_configs:
|
|||
# that will be kept in memory. 0 means no limit.
|
||||
[ keep_dropped_targets: <int> | default = 0 ]
|
||||
|
||||
# Specifies the validation scheme for metric and label names. Either blank or
|
||||
# "legacy" for letters, numbers, colons, and underscores; or "utf8" for full
|
||||
# UTF-8 support.
|
||||
[ metric_name_validation_scheme <string> | default "legacy" ]
|
||||
# Specifies the validation scheme for metric and label names. Either blank or
|
||||
# "utf8" for full UTF-8 support, or "legacy" for letters, numbers, colons, and
|
||||
# underscores.
|
||||
[ metric_name_validation_scheme <string> | default "utf8" ]
|
||||
|
||||
# Limit on total number of positive and negative buckets allowed in a single
|
||||
# native histogram. The resolution of a histogram with more buckets will be
|
||||
|
@ -608,6 +616,18 @@ tls_config:
|
|||
# Specifies headers to send to proxies during CONNECT requests.
|
||||
[ proxy_connect_header:
|
||||
[ <string>: [<secret>, ...] ] ]
|
||||
|
||||
# Custom HTTP headers to be sent along with each request.
|
||||
# Headers that are set by Prometheus itself can't be overwritten.
|
||||
http_headers:
|
||||
# Header name.
|
||||
[ <string>:
|
||||
# Header values.
|
||||
[ values: [<string>, ...] ]
|
||||
# Headers values. Hidden in configuration page.
|
||||
[ secrets: [<secret>, ...] ]
|
||||
# Files to read header values from.
|
||||
[ files: [<string>, ...] ] ]
|
||||
```
|
||||
|
||||
### `<azure_sd_config>`
|
||||
|
@ -699,6 +719,18 @@ oauth2:
|
|||
[ proxy_connect_header:
|
||||
[ <string>: [<secret>, ...] ] ]
|
||||
|
||||
# Custom HTTP headers to be sent along with each request.
|
||||
# Headers that are set by Prometheus itself can't be overwritten.
|
||||
http_headers:
|
||||
# Header name.
|
||||
[ <string>:
|
||||
# Header values.
|
||||
[ values: [<string>, ...] ]
|
||||
# Headers values. Hidden in configuration page.
|
||||
[ secrets: [<secret>, ...] ]
|
||||
# Files to read header values from.
|
||||
[ files: [<string>, ...] ] ]
|
||||
|
||||
# Configure whether HTTP requests follow HTTP 3xx redirects.
|
||||
[ follow_redirects: <boolean> | default = true ]
|
||||
|
||||
|
@ -812,6 +844,18 @@ oauth2:
|
|||
[ proxy_connect_header:
|
||||
[ <string>: [<secret>, ...] ] ]
|
||||
|
||||
# Custom HTTP headers to be sent along with each request.
|
||||
# Headers that are set by Prometheus itself can't be overwritten.
|
||||
http_headers:
|
||||
# Header name.
|
||||
[ <string>:
|
||||
# Header values.
|
||||
[ values: [<string>, ...] ]
|
||||
# Headers values. Hidden in configuration page.
|
||||
[ secrets: [<secret>, ...] ]
|
||||
# Files to read header values from.
|
||||
[ files: [<string>, ...] ] ]
|
||||
|
||||
# Configure whether HTTP requests follow HTTP 3xx redirects.
|
||||
[ follow_redirects: <boolean> | default = true ]
|
||||
|
||||
|
@ -899,6 +943,18 @@ oauth2:
|
|||
[ proxy_connect_header:
|
||||
[ <string>: [<secret>, ...] ] ]
|
||||
|
||||
# Custom HTTP headers to be sent along with each request.
|
||||
# Headers that are set by Prometheus itself can't be overwritten.
|
||||
http_headers:
|
||||
# Header name.
|
||||
[ <string>:
|
||||
# Header values.
|
||||
[ values: [<string>, ...] ]
|
||||
# Headers values. Hidden in configuration page.
|
||||
[ secrets: [<secret>, ...] ]
|
||||
# Files to read header values from.
|
||||
[ files: [<string>, ...] ] ]
|
||||
|
||||
# Configure whether HTTP requests follow HTTP 3xx redirects.
|
||||
[ follow_redirects: <boolean> | default = true ]
|
||||
|
||||
|
@ -957,6 +1013,18 @@ host: <string>
|
|||
[ proxy_connect_header:
|
||||
[ <string>: [<secret>, ...] ] ]
|
||||
|
||||
# Custom HTTP headers to be sent along with each request.
|
||||
# Headers that are set by Prometheus itself can't be overwritten.
|
||||
http_headers:
|
||||
# Header name.
|
||||
[ <string>:
|
||||
# Header values.
|
||||
[ values: [<string>, ...] ]
|
||||
# Headers values. Hidden in configuration page.
|
||||
[ secrets: [<secret>, ...] ]
|
||||
# Files to read header values from.
|
||||
[ files: [<string>, ...] ] ]
|
||||
|
||||
# TLS configuration.
|
||||
tls_config:
|
||||
[ <tls_config> ]
|
||||
|
@ -1137,6 +1205,18 @@ host: <string>
|
|||
[ proxy_connect_header:
|
||||
[ <string>: [<secret>, ...] ] ]
|
||||
|
||||
# Custom HTTP headers to be sent along with each request.
|
||||
# Headers that are set by Prometheus itself can't be overwritten.
|
||||
http_headers:
|
||||
# Header name.
|
||||
[ <string>:
|
||||
# Header values.
|
||||
[ values: [<string>, ...] ]
|
||||
# Headers values. Hidden in configuration page.
|
||||
[ secrets: [<secret>, ...] ]
|
||||
# Files to read header values from.
|
||||
[ files: [<string>, ...] ] ]
|
||||
|
||||
# TLS configuration.
|
||||
tls_config:
|
||||
[ <tls_config> ]
|
||||
|
@ -1327,7 +1407,7 @@ authorization:
|
|||
# `credentials_file`.
|
||||
[ credentials: <secret> ]
|
||||
# Sets the credentials to the credentials read from the configured file.
|
||||
# It is mutuall exclusive with `credentials`.
|
||||
# It is mutually exclusive with `credentials`.
|
||||
[ credentials_file: <filename> ]
|
||||
|
||||
# Optional OAuth 2.0 configuration, currently not supported by AWS.
|
||||
|
@ -1346,6 +1426,18 @@ oauth2:
|
|||
[ proxy_connect_header:
|
||||
[ <string>: [<secret>, ...] ] ]
|
||||
|
||||
# Custom HTTP headers to be sent along with each request.
|
||||
# Headers that are set by Prometheus itself can't be overwritten.
|
||||
http_headers:
|
||||
# Header name.
|
||||
[ <string>:
|
||||
# Header values.
|
||||
[ values: [<string>, ...] ]
|
||||
# Headers values. Hidden in configuration page.
|
||||
[ secrets: [<secret>, ...] ]
|
||||
# Files to read header values from.
|
||||
[ files: [<string>, ...] ] ]
|
||||
|
||||
# Configure whether HTTP requests follow HTTP 3xx redirects.
|
||||
[ follow_redirects: <boolean> | default = true ]
|
||||
|
||||
|
@ -1623,6 +1715,18 @@ oauth2:
|
|||
[ proxy_connect_header:
|
||||
[ <string>: [<secret>, ...] ] ]
|
||||
|
||||
# Custom HTTP headers to be sent along with each request.
|
||||
# Headers that are set by Prometheus itself can't be overwritten.
|
||||
http_headers:
|
||||
# Header name.
|
||||
[ <string>:
|
||||
# Header values.
|
||||
[ values: [<string>, ...] ]
|
||||
# Headers values. Hidden in configuration page.
|
||||
[ secrets: [<secret>, ...] ]
|
||||
# Files to read header values from.
|
||||
[ files: [<string>, ...] ] ]
|
||||
|
||||
# Configure whether HTTP requests follow HTTP 3xx redirects.
|
||||
[ follow_redirects: <boolean> | default = true ]
|
||||
|
||||
|
@ -1849,6 +1953,18 @@ oauth2:
|
|||
[ proxy_connect_header:
|
||||
[ <string>: [<secret>, ...] ] ]
|
||||
|
||||
# Custom HTTP headers to be sent along with each request.
|
||||
# Headers that are set by Prometheus itself can't be overwritten.
|
||||
http_headers:
|
||||
# Header name.
|
||||
[ <string>:
|
||||
# Header values.
|
||||
[ values: [<string>, ...] ]
|
||||
# Headers values. Hidden in configuration page.
|
||||
[ secrets: [<secret>, ...] ]
|
||||
# Files to read header values from.
|
||||
[ files: [<string>, ...] ] ]
|
||||
|
||||
# Configure whether HTTP requests follow HTTP 3xx redirects.
|
||||
[ follow_redirects: <boolean> | default = true ]
|
||||
|
||||
|
@ -1943,6 +2059,18 @@ oauth2:
|
|||
[ proxy_connect_header:
|
||||
[ <string>: [<secret>, ...] ] ]
|
||||
|
||||
# Custom HTTP headers to be sent along with each request.
|
||||
# Headers that are set by Prometheus itself can't be overwritten.
|
||||
http_headers:
|
||||
# Header name.
|
||||
[ <string>:
|
||||
# Header values.
|
||||
[ values: [<string>, ...] ]
|
||||
# Headers values. Hidden in configuration page.
|
||||
[ secrets: [<secret>, ...] ]
|
||||
# Files to read header values from.
|
||||
[ files: [<string>, ...] ] ]
|
||||
|
||||
# Configure whether HTTP requests follow HTTP 3xx redirects.
|
||||
[ follow_redirects: <boolean> | default = true ]
|
||||
|
||||
|
@ -2026,6 +2154,18 @@ oauth2:
|
|||
[ proxy_connect_header:
|
||||
[ <string>: [<secret>, ...] ] ]
|
||||
|
||||
# Custom HTTP headers to be sent along with each request.
|
||||
# Headers that are set by Prometheus itself can't be overwritten.
|
||||
http_headers:
|
||||
# Header name.
|
||||
[ <string>:
|
||||
# Header values.
|
||||
[ values: [<string>, ...] ]
|
||||
# Headers values. Hidden in configuration page.
|
||||
[ secrets: [<secret>, ...] ]
|
||||
# Files to read header values from.
|
||||
[ files: [<string>, ...] ] ]
|
||||
|
||||
# Configure whether HTTP requests follow HTTP 3xx redirects.
|
||||
[ follow_redirects: <boolean> | default = true ]
|
||||
|
||||
|
@ -2158,6 +2298,8 @@ The `endpointslice` role discovers targets from existing endpointslices. For eac
|
|||
address referenced in the endpointslice object one target is discovered. If the endpoint is backed by a pod, all
|
||||
additional container ports of the pod, not bound to an endpoint port, are discovered as targets as well.
|
||||
|
||||
The role requires the `discovery.k8s.io/v1` API version (available since Kubernetes v1.21).
|
||||
|
||||
Available meta labels:
|
||||
|
||||
* `__meta_kubernetes_namespace`: The namespace of the endpoints object.
|
||||
|
@ -2178,7 +2320,7 @@ Available meta labels:
|
|||
* `__meta_kubernetes_endpointslice_endpoint_topology_present_kubernetes_io_hostname`: Flag that shows if the referenced object has a kubernetes.io/hostname annotation.
|
||||
* `__meta_kubernetes_endpointslice_endpoint_hostname`: Hostname of the referenced endpoint.
|
||||
* `__meta_kubernetes_endpointslice_endpoint_node_name`: Name of the Node hosting the referenced endpoint.
|
||||
* `__meta_kubernetes_endpointslice_endpoint_zone`: Zone the referenced endpoint exists in (only available when using the `discovery.k8s.io/v1` API group).
|
||||
* `__meta_kubernetes_endpointslice_endpoint_zone`: Zone the referenced endpoint exists in.
|
||||
* `__meta_kubernetes_endpointslice_port`: Port of the referenced endpoint.
|
||||
* `__meta_kubernetes_endpointslice_port_name`: Named port of the referenced endpoint.
|
||||
* `__meta_kubernetes_endpointslice_port_protocol`: Protocol of the referenced endpoint.
|
||||
|
@ -2191,6 +2333,8 @@ The `ingress` role discovers a target for each path of each ingress.
|
|||
This is generally useful for blackbox monitoring of an ingress.
|
||||
The address will be set to the host specified in the ingress spec.
|
||||
|
||||
The role requires the `networking.k8s.io/v1` API version (available since Kubernetes v1.19).
|
||||
|
||||
Available meta labels:
|
||||
|
||||
* `__meta_kubernetes_namespace`: The namespace of the ingress object.
|
||||
|
@ -2260,6 +2404,18 @@ oauth2:
|
|||
[ proxy_connect_header:
|
||||
[ <string>: [<secret>, ...] ] ]
|
||||
|
||||
# Custom HTTP headers to be sent along with each request.
|
||||
# Headers that are set by Prometheus itself can't be overwritten.
|
||||
http_headers:
|
||||
# Header name.
|
||||
[ <string>:
|
||||
# Header values.
|
||||
[ values: [<string>, ...] ]
|
||||
# Headers values. Hidden in configuration page.
|
||||
[ secrets: [<secret>, ...] ]
|
||||
# Files to read header values from.
|
||||
[ files: [<string>, ...] ] ]
|
||||
|
||||
# Configure whether HTTP requests follow HTTP 3xx redirects.
|
||||
[ follow_redirects: <boolean> | default = true ]
|
||||
|
||||
|
@ -2351,6 +2507,18 @@ server: <string>
|
|||
[ proxy_connect_header:
|
||||
[ <string>: [<secret>, ...] ] ]
|
||||
|
||||
# Custom HTTP headers to be sent along with each request.
|
||||
# Headers that are set by Prometheus itself can't be overwritten.
|
||||
http_headers:
|
||||
# Header name.
|
||||
[ <string>:
|
||||
# Header values.
|
||||
[ values: [<string>, ...] ]
|
||||
# Headers values. Hidden in configuration page.
|
||||
[ secrets: [<secret>, ...] ]
|
||||
# Files to read header values from.
|
||||
[ files: [<string>, ...] ] ]
|
||||
|
||||
# TLS configuration.
|
||||
tls_config:
|
||||
[ <tls_config> ]
|
||||
|
@ -2459,7 +2627,7 @@ authorization:
|
|||
# `credentials_file`.
|
||||
[ credentials: <secret> ]
|
||||
# Sets the credentials to the credentials read from the configured file.
|
||||
# It is mutuall exclusive with `credentials`.
|
||||
# It is mutually exclusive with `credentials`.
|
||||
[ credentials_file: <filename> ]
|
||||
|
||||
# Optional OAuth 2.0 configuration, currently not supported by AWS.
|
||||
|
@ -2478,6 +2646,18 @@ oauth2:
|
|||
[ proxy_connect_header:
|
||||
[ <string>: [<secret>, ...] ] ]
|
||||
|
||||
# Custom HTTP headers to be sent along with each request.
|
||||
# Headers that are set by Prometheus itself can't be overwritten.
|
||||
http_headers:
|
||||
# Header name.
|
||||
[ <string>:
|
||||
# Header values.
|
||||
[ values: [<string>, ...] ]
|
||||
# Headers values. Hidden in configuration page.
|
||||
[ secrets: [<secret>, ...] ]
|
||||
# Files to read header values from.
|
||||
[ files: [<string>, ...] ] ]
|
||||
|
||||
# Configure whether HTTP requests follow HTTP 3xx redirects.
|
||||
[ follow_redirects: <boolean> | default = true ]
|
||||
|
||||
|
@ -2567,6 +2747,18 @@ oauth2:
|
|||
[ proxy_connect_header:
|
||||
[ <string>: [<secret>, ...] ] ]
|
||||
|
||||
# Custom HTTP headers to be sent along with each request.
|
||||
# Headers that are set by Prometheus itself can't be overwritten.
|
||||
http_headers:
|
||||
# Header name.
|
||||
[ <string>:
|
||||
# Header values.
|
||||
[ values: [<string>, ...] ]
|
||||
# Headers values. Hidden in configuration page.
|
||||
[ secrets: [<secret>, ...] ]
|
||||
# Files to read header values from.
|
||||
[ files: [<string>, ...] ] ]
|
||||
|
||||
# Configure whether HTTP requests follow HTTP 3xx redirects.
|
||||
[ follow_redirects: <boolean> | default = true ]
|
||||
|
||||
|
@ -2674,6 +2866,18 @@ tls_config:
|
|||
# Specifies headers to send to proxies during CONNECT requests.
|
||||
[ proxy_connect_header:
|
||||
[ <string>: [<secret>, ...] ] ]
|
||||
|
||||
# Custom HTTP headers to be sent along with each request.
|
||||
# Headers that are set by Prometheus itself can't be overwritten.
|
||||
http_headers:
|
||||
# Header name.
|
||||
[ <string>:
|
||||
# Header values.
|
||||
[ values: [<string>, ...] ]
|
||||
# Headers values. Hidden in configuration page.
|
||||
[ secrets: [<secret>, ...] ]
|
||||
# Files to read header values from.
|
||||
[ files: [<string>, ...] ] ]
|
||||
```
|
||||
|
||||
By default every app listed in Marathon will be scraped by Prometheus. If not all
|
||||
|
@ -2773,6 +2977,18 @@ oauth2:
|
|||
[ proxy_connect_header:
|
||||
[ <string>: [<secret>, ...] ] ]
|
||||
|
||||
# Custom HTTP headers to be sent along with each request.
|
||||
# Headers that are set by Prometheus itself can't be overwritten.
|
||||
http_headers:
|
||||
# Header name.
|
||||
[ <string>:
|
||||
# Header values.
|
||||
[ values: [<string>, ...] ]
|
||||
# Headers values. Hidden in configuration page.
|
||||
[ secrets: [<secret>, ...] ]
|
||||
# Files to read header values from.
|
||||
[ files: [<string>, ...] ] ]
|
||||
|
||||
# Configure whether HTTP requests follow HTTP 3xx redirects.
|
||||
[ follow_redirects: <boolean> | default = true ]
|
||||
|
||||
|
@ -2959,6 +3175,18 @@ tls_config:
|
|||
[ proxy_connect_header:
|
||||
[ <string>: [<secret>, ...] ] ]
|
||||
|
||||
# Custom HTTP headers to be sent along with each request.
|
||||
# Headers that are set by Prometheus itself can't be overwritten.
|
||||
http_headers:
|
||||
# Header name.
|
||||
[ <string>:
|
||||
# Header values.
|
||||
[ values: [<string>, ...] ]
|
||||
# Headers values. Hidden in configuration page.
|
||||
[ secrets: [<secret>, ...] ]
|
||||
# Files to read header values from.
|
||||
[ files: [<string>, ...] ] ]
|
||||
|
||||
# Configure whether HTTP requests follow HTTP 3xx redirects.
|
||||
[ follow_redirects: <boolean> | default = true ]
|
||||
|
||||
|
@ -3085,6 +3313,18 @@ tags_filter:
|
|||
[ proxy_connect_header:
|
||||
[ <string>: [<secret>, ...] ] ]
|
||||
|
||||
# Custom HTTP headers to be sent along with each request.
|
||||
# Headers that are set by Prometheus itself can't be overwritten.
|
||||
http_headers:
|
||||
# Header name.
|
||||
[ <string>:
|
||||
# Header values.
|
||||
[ values: [<string>, ...] ]
|
||||
# Headers values. Hidden in configuration page.
|
||||
[ secrets: [<secret>, ...] ]
|
||||
# Files to read header values from.
|
||||
[ files: [<string>, ...] ] ]
|
||||
|
||||
# TLS configuration.
|
||||
tls_config:
|
||||
[ <tls_config> ]
|
||||
|
@ -3161,6 +3401,18 @@ oauth2:
|
|||
[ proxy_connect_header:
|
||||
[ <string>: [<secret>, ...] ] ]
|
||||
|
||||
# Custom HTTP headers to be sent along with each request.
|
||||
# Headers that are set by Prometheus itself can't be overwritten.
|
||||
http_headers:
|
||||
# Header name.
|
||||
[ <string>:
|
||||
# Header values.
|
||||
[ values: [<string>, ...] ]
|
||||
# Headers values. Hidden in configuration page.
|
||||
[ secrets: [<secret>, ...] ]
|
||||
# Files to read header values from.
|
||||
[ files: [<string>, ...] ] ]
|
||||
|
||||
# Configure whether HTTP requests follow HTTP 3xx redirects.
|
||||
[ follow_redirects: <boolean> | default = true ]
|
||||
|
||||
|
@ -3243,6 +3495,18 @@ oauth2:
|
|||
[ proxy_connect_header:
|
||||
[ <string>: [<secret>, ...] ] ]
|
||||
|
||||
# Custom HTTP headers to be sent along with each request.
|
||||
# Headers that are set by Prometheus itself can't be overwritten.
|
||||
http_headers:
|
||||
# Header name.
|
||||
[ <string>:
|
||||
# Header values.
|
||||
[ values: [<string>, ...] ]
|
||||
# Headers values. Hidden in configuration page.
|
||||
[ secrets: [<secret>, ...] ]
|
||||
# Files to read header values from.
|
||||
[ files: [<string>, ...] ] ]
|
||||
|
||||
# Configure whether HTTP requests follow HTTP 3xx redirects.
|
||||
[ follow_redirects: <boolean> | default = true ]
|
||||
|
||||
|
@ -3468,6 +3732,18 @@ tls_config:
|
|||
[ proxy_connect_header:
|
||||
[ <string>: [<secret>, ...] ] ]
|
||||
|
||||
# Custom HTTP headers to be sent along with each request.
|
||||
# Headers that are set by Prometheus itself can't be overwritten.
|
||||
http_headers:
|
||||
# Header name.
|
||||
[ <string>:
|
||||
# Header values.
|
||||
[ values: [<string>, ...] ]
|
||||
# Headers values. Hidden in configuration page.
|
||||
[ secrets: [<secret>, ...] ]
|
||||
# Files to read header values from.
|
||||
[ files: [<string>, ...] ] ]
|
||||
|
||||
# Configure whether HTTP requests follow HTTP 3xx redirects.
|
||||
[ follow_redirects: <boolean> | default = true ]
|
||||
|
||||
|
@ -3712,7 +3988,7 @@ azuread:
|
|||
# Cannot be used at the same time as basic_auth, authorization, oauth2, sigv4 or azuread.
|
||||
# To use the default credentials from the Google Cloud SDK, use `google_iam: {}`.
|
||||
google_iam:
|
||||
# Service account key with monitoring write permessions.
|
||||
# Service account key with monitoring write permissions.
|
||||
credentials_file: <file_name>
|
||||
|
||||
# Configures the remote write request's TLS settings.
|
||||
|
@ -3731,6 +4007,18 @@ tls_config:
|
|||
[ proxy_connect_header:
|
||||
[ <string>: [<secret>, ...] ] ]
|
||||
|
||||
# Custom HTTP headers to be sent along with each request.
|
||||
# Headers that are set by Prometheus itself can't be overwritten.
|
||||
http_headers:
|
||||
# Header name.
|
||||
[ <string>:
|
||||
# Header values.
|
||||
[ values: [<string>, ...] ]
|
||||
# Headers values. Hidden in configuration page.
|
||||
[ secrets: [<secret>, ...] ]
|
||||
# Files to read header values from.
|
||||
[ files: [<string>, ...] ] ]
|
||||
|
||||
# Configure whether HTTP requests follow HTTP 3xx redirects.
|
||||
[ follow_redirects: <boolean> | default = true ]
|
||||
|
||||
|
@ -3852,6 +4140,18 @@ tls_config:
|
|||
[ proxy_connect_header:
|
||||
[ <string>: [<secret>, ...] ] ]
|
||||
|
||||
# Custom HTTP headers to be sent along with each request.
|
||||
# Headers that are set by Prometheus itself can't be overwritten.
|
||||
http_headers:
|
||||
# Header name.
|
||||
[ <string>:
|
||||
# Header values.
|
||||
[ values: [<string>, ...] ]
|
||||
# Headers values. Hidden in configuration page.
|
||||
[ secrets: [<secret>, ...] ]
|
||||
# Files to read header values from.
|
||||
[ files: [<string>, ...] ] ]
|
||||
|
||||
# Configure whether HTTP requests follow HTTP 3xx redirects.
|
||||
[ follow_redirects: <boolean> | default = true ]
|
||||
|
||||
|
|
|
@ -20,14 +20,6 @@ values according to the values of the current environment variables. References
|
|||
to undefined variables are replaced by the empty string.
|
||||
The `$` character can be escaped by using `$$`.
|
||||
|
||||
## Remote Write Receiver
|
||||
|
||||
`--enable-feature=remote-write-receiver`
|
||||
|
||||
The remote write receiver allows Prometheus to accept remote write requests from other Prometheus servers. More details can be found [here](storage.md#overview).
|
||||
|
||||
Activating the remote write receiver via a feature flag is deprecated. Use `--web.enable-remote-write-receiver` instead. This feature flag will be ignored in future versions of Prometheus.
|
||||
|
||||
## Exemplars storage
|
||||
|
||||
`--enable-feature=exemplar-storage`
|
||||
|
@ -55,20 +47,6 @@ When enabled, for each instance scrape, Prometheus stores a sample in the follow
|
|||
to find out how close they are to reaching the limit with `scrape_samples_post_metric_relabeling / scrape_sample_limit`. Note that `scrape_sample_limit` can be zero if there is no limit configured, which means that the query above can return `+Inf` for targets with no limit (as we divide by zero). If you want to query only for targets that do have a sample limit use this query: `scrape_samples_post_metric_relabeling / (scrape_sample_limit > 0)`.
|
||||
- `scrape_body_size_bytes`. The uncompressed size of the most recent scrape response, if successful. Scrapes failing because `body_size_limit` is exceeded report `-1`, other scrape failures report `0`.
|
||||
|
||||
## New service discovery manager
|
||||
|
||||
`--enable-feature=new-service-discovery-manager`
|
||||
|
||||
When enabled, Prometheus uses a new service discovery manager that does not
|
||||
restart unchanged discoveries upon reloading. This makes reloads faster and reduces
|
||||
pressure on service discoveries' sources.
|
||||
|
||||
Users are encouraged to test the new service discovery manager and report any
|
||||
issues upstream.
|
||||
|
||||
In future releases, this new service discovery manager will become the default and
|
||||
this feature flag will be ignored.
|
||||
|
||||
## Prometheus agent
|
||||
|
||||
`--enable-feature=agent`
|
||||
|
@ -193,7 +171,7 @@ This should **only** be applied to metrics that currently produce such labels.
|
|||
`--enable-feature=otlp-write-receiver`
|
||||
|
||||
The OTLP receiver allows Prometheus to accept [OpenTelemetry](https://opentelemetry.io/) metrics writes.
|
||||
Prometheus is best used as a Pull based system, and staleness, `up` metric, and other Pull enabled features
|
||||
Prometheus is best used as a Pull based system, and staleness, `up` metric, and other Pull enabled features
|
||||
won't work when you push OTLP metrics.
|
||||
|
||||
## Experimental PromQL functions
|
||||
|
@ -226,6 +204,12 @@ This has the potential to improve rule group evaluation latency and resource uti
|
|||
|
||||
The number of concurrent rule evaluations can be configured with `--rules.max-concurrent-rule-evals`, which is set to `4` by default.
|
||||
|
||||
## Serve old Prometheus UI
|
||||
|
||||
Fall back to serving the old (Prometheus 2.x) web UI instead of the new UI. The new UI that was released as part of Prometheus 3.0 is a complete rewrite and aims to be cleaner, less cluttered, and more modern under the hood. However, it is not fully feature complete and battle-tested yet, so some users may still prefer using the old UI.
|
||||
|
||||
`--enable-feature=old-ui`
|
||||
|
||||
## Metadata WAL Records
|
||||
|
||||
`--enable-feature=metadata-wal-records`
|
||||
|
@ -258,10 +242,15 @@ When enabled, Prometheus will change the way in which the `__name__` label is re
|
|||
|
||||
This allows optionally preserving the `__name__` label via the `label_replace` and `label_join` functions, and helps prevent the "vector cannot contain metrics with the same labelset" error, which can happen when applying a regex-matcher to the `__name__` label.
|
||||
|
||||
## UTF-8 Name Support
|
||||
## Auto Reload Config
|
||||
|
||||
`--enable-feature=utf8-names`
|
||||
`--enable-feature=auto-reload-config`
|
||||
|
||||
When enabled, changes the metric and label name validation scheme inside Prometheus to allow the full UTF-8 character set.
|
||||
By itself, this flag does not enable the request of UTF-8 names via content negotiation.
|
||||
Users will also have to set `metric_name_validation_scheme` in scrape configs to enable the feature either on the global config or on a per-scrape config basis.
|
||||
When enabled, Prometheus will automatically reload its configuration file at a
|
||||
specified interval. The interval is defined by the
|
||||
`--config.auto-reload-interval` flag, which defaults to `30s`.
|
||||
|
||||
Configuration reloads are triggered by detecting changes in the checksum of the
|
||||
main configuration file or any referenced files, such as rule and scrape
|
||||
configurations. To ensure consistency and avoid issues during reloads, it's
|
||||
recommended to update these files atomically.
|
||||
|
|
|
@ -239,6 +239,75 @@ $ curl 'http://localhost:9090/api/v1/format_query?query=foo/bar'
|
|||
}
|
||||
```
|
||||
|
||||
## Parsing a PromQL expressions into a abstract syntax tree (AST)
|
||||
|
||||
This endpoint is **experimental** and might change in the future. It is currently only meant to be used by Prometheus' own web UI, and the endpoint name and exact format returned may change from one Prometheus version to another. It may also be removed again in case it is no longer needed by the UI.
|
||||
|
||||
The following endpoint parses a PromQL expression and returns it as a JSON-formatted AST (abstract syntax tree) representation:
|
||||
|
||||
```
|
||||
GET /api/v1/parse_query
|
||||
POST /api/v1/parse_query
|
||||
```
|
||||
|
||||
URL query parameters:
|
||||
|
||||
- `query=<string>`: Prometheus expression query string.
|
||||
|
||||
You can URL-encode these parameters directly in the request body by using the `POST` method and
|
||||
`Content-Type: application/x-www-form-urlencoded` header. This is useful when specifying a large
|
||||
query that may breach server-side URL character limits.
|
||||
|
||||
The `data` section of the query result is a string containing the AST of the parsed query expression.
|
||||
|
||||
The following example parses the expression `foo/bar`:
|
||||
|
||||
```json
|
||||
$ curl 'http://localhost:9090/api/v1/parse_query?query=foo/bar'
|
||||
{
|
||||
"data" : {
|
||||
"bool" : false,
|
||||
"lhs" : {
|
||||
"matchers" : [
|
||||
{
|
||||
"name" : "__name__",
|
||||
"type" : "=",
|
||||
"value" : "foo"
|
||||
}
|
||||
],
|
||||
"name" : "foo",
|
||||
"offset" : 0,
|
||||
"startOrEnd" : null,
|
||||
"timestamp" : null,
|
||||
"type" : "vectorSelector"
|
||||
},
|
||||
"matching" : {
|
||||
"card" : "one-to-one",
|
||||
"include" : [],
|
||||
"labels" : [],
|
||||
"on" : false
|
||||
},
|
||||
"op" : "/",
|
||||
"rhs" : {
|
||||
"matchers" : [
|
||||
{
|
||||
"name" : "__name__",
|
||||
"type" : "=",
|
||||
"value" : "bar"
|
||||
}
|
||||
],
|
||||
"name" : "bar",
|
||||
"offset" : 0,
|
||||
"startOrEnd" : null,
|
||||
"timestamp" : null,
|
||||
"type" : "vectorSelector"
|
||||
},
|
||||
"type" : "binaryExpr"
|
||||
},
|
||||
"status" : "success"
|
||||
}
|
||||
```
|
||||
|
||||
## Querying metadata
|
||||
|
||||
Prometheus offers a set of API endpoints to query metadata about series and their labels.
|
||||
|
@ -693,7 +762,7 @@ URL query parameters:
|
|||
- `rule_name[]=<string>`: only return rules with the given rule name. If the parameter is repeated, rules with any of the provided names are returned. If we've filtered out all the rules of a group, the group is not returned. When the parameter is absent or empty, no filtering is done.
|
||||
- `rule_group[]=<string>`: only return rules with the given rule group name. If the parameter is repeated, rules with any of the provided rule group names are returned. When the parameter is absent or empty, no filtering is done.
|
||||
- `file[]=<string>`: only return rules with the given filepath. If the parameter is repeated, rules with any of the provided filepaths are returned. When the parameter is absent or empty, no filtering is done.
|
||||
- `exclude_alerts=<bool>`: only return rules, do not return active alerts.
|
||||
- `exclude_alerts=<bool>`: only return rules, do not return active alerts.
|
||||
- `match[]=<label_selector>`: only return rules that have configured labels that satisfy the label selectors. If the parameter is repeated, rules that match any of the sets of label selectors are returned. Note that matching is on the labels in the definition of each rule, not on the values after template expansion (for alerting rules). Optional.
|
||||
|
||||
```json
|
||||
|
|
|
@ -203,12 +203,12 @@ Range vector literals work like instant vector literals, except that they
|
|||
select a range of samples back from the current instant. Syntactically, a [time
|
||||
duration](#time-durations) is appended in square brackets (`[]`) at the end of
|
||||
a vector selector to specify how far back in time values should be fetched for
|
||||
each resulting range vector element. The range is a closed interval,
|
||||
i.e. samples with timestamps coinciding with either boundary of the range are
|
||||
still included in the selection.
|
||||
each resulting range vector element. The range is a left-open and right-closed interval,
|
||||
i.e. samples with timestamps coinciding with the left boundary of the range are excluded from the selection,
|
||||
while samples coinciding with the right boundary of the range are included in the selection.
|
||||
|
||||
In this example, we select all the values we have recorded within the last 5
|
||||
minutes for all time series that have the metric name `http_requests_total` and
|
||||
In this example, we select all the values recorded less than 5m ago for all time series
|
||||
that have the metric name `http_requests_total` and
|
||||
a `job` label set to `prometheus`:
|
||||
|
||||
http_requests_total{job="prometheus"}[5m]
|
||||
|
@ -358,7 +358,7 @@ independently of the actual present time series data. This is mainly to support
|
|||
cases like aggregation (`sum`, `avg`, and so on), where multiple aggregated
|
||||
time series do not precisely align in time. Because of their independence,
|
||||
Prometheus needs to assign a value at those timestamps for each relevant time
|
||||
series. It does so by taking the newest sample before this timestamp within the lookback period.
|
||||
series. It does so by taking the newest sample that is less than the lookback period ago.
|
||||
The lookback period is 5 minutes by default.
|
||||
|
||||
If a target scrape or rule evaluation no longer returns a sample for a time
|
||||
|
|
|
@ -87,10 +87,9 @@ or 31 days, whichever is smaller.
|
|||
Prometheus has several flags that configure local storage. The most important are:
|
||||
|
||||
- `--storage.tsdb.path`: Where Prometheus writes its database. Defaults to `data/`.
|
||||
- `--storage.tsdb.retention.time`: How long to retain samples in storage. When this flag is
|
||||
set, it overrides `storage.tsdb.retention`. If neither this flag nor `storage.tsdb.retention`
|
||||
nor `storage.tsdb.retention.size` is set, the retention time defaults to `15d`.
|
||||
Supported units: y, w, d, h, m, s, ms.
|
||||
- `--storage.tsdb.retention.time`: How long to retain samples in storage. If neither
|
||||
this flag nor `storage.tsdb.retention.size` is set, the retention time defaults to
|
||||
`15d`. Supported units: y, w, d, h, m, s, ms.
|
||||
- `--storage.tsdb.retention.size`: The maximum number of bytes of storage blocks to retain.
|
||||
The oldest data will be removed first. Defaults to `0` or disabled. Units supported:
|
||||
B, KB, MB, GB, TB, PB, EB. Ex: "512MB". Based on powers-of-2, so 1KB is 1024B. Only
|
||||
|
@ -98,7 +97,6 @@ Prometheus has several flags that configure local storage. The most important ar
|
|||
chunks are counted in the total size. So the minimum requirement for the disk is the
|
||||
peak space taken by the `wal` (the WAL and Checkpoint) and `chunks_head`
|
||||
(m-mapped Head chunks) directory combined (peaks every 2 hours).
|
||||
- `--storage.tsdb.retention`: Deprecated in favor of `storage.tsdb.retention.time`.
|
||||
- `--storage.tsdb.wal-compression`: Enables compression of the write-ahead log (WAL).
|
||||
Depending on your data, you can expect the WAL size to be halved with little extra
|
||||
cpu load. This flag was introduced in 2.11.0 and enabled by default in 2.20.0.
|
||||
|
@ -146,7 +144,7 @@ a buffer, ensuring that older entries will be removed before the allocated stora
|
|||
for Prometheus becomes full.
|
||||
|
||||
At present, we recommend setting the retention size to, at most, 80-85% of your
|
||||
allocated Prometheus disk space. This increases the likelihood that older entires
|
||||
allocated Prometheus disk space. This increases the likelihood that older entries
|
||||
will be removed prior to hitting any disk limitations.
|
||||
|
||||
## Remote storage integrations
|
||||
|
@ -157,31 +155,27 @@ a set of interfaces that allow integrating with remote storage systems.
|
|||
|
||||
### Overview
|
||||
|
||||
Prometheus integrates with remote storage systems in three ways:
|
||||
Prometheus integrates with remote storage systems in four ways:
|
||||
|
||||
- Prometheus can write samples that it ingests to a remote URL in a standardized format.
|
||||
- Prometheus can receive samples from other Prometheus servers in a standardized format.
|
||||
- Prometheus can read (back) sample data from a remote URL in a standardized format.
|
||||
- Prometheus can write samples that it ingests to a remote URL in a [Remote Write format](https://prometheus.io/docs/specs/remote_write_spec_2_0/).
|
||||
- Prometheus can receive samples from other clients in a [Remote Write format](https://prometheus.io/docs/specs/remote_write_spec_2_0/).
|
||||
- Prometheus can read (back) sample data from a remote URL in a [Remote Read format](https://github.com/prometheus/prometheus/blob/main/prompb/remote.proto#L31).
|
||||
- Prometheus can return sample data requested by clients in a [Remote Read format](https://github.com/prometheus/prometheus/blob/main/prompb/remote.proto#L31).
|
||||
|
||||
![Remote read and write architecture](images/remote_integrations.png)
|
||||
|
||||
The read and write protocols both use a snappy-compressed protocol buffer encoding over
|
||||
HTTP. The protocols are not considered as stable APIs yet and may change to use gRPC
|
||||
over HTTP/2 in the future, when all hops between Prometheus and the remote storage can
|
||||
safely be assumed to support HTTP/2.
|
||||
The remote read and write protocols both use a snappy-compressed protocol buffer encoding over
|
||||
HTTP. The read protocol is not yet considered as stable API.
|
||||
|
||||
For details on configuring remote storage integrations in Prometheus, see the
|
||||
The write protocol has a [stable specification for 1.0 version](https://prometheus.io/docs/specs/remote_write_spec/)
|
||||
and [experimental specification for 2.0 version](https://prometheus.io/docs/specs/remote_write_spec_2_0/),
|
||||
both supported by Prometheus server.
|
||||
|
||||
For details on configuring remote storage integrations in Prometheus as a client, see the
|
||||
[remote write](configuration/configuration.md#remote_write) and
|
||||
[remote read](configuration/configuration.md#remote_read) sections of the Prometheus
|
||||
configuration documentation.
|
||||
|
||||
The built-in remote write receiver can be enabled by setting the
|
||||
`--web.enable-remote-write-receiver` command line flag. When enabled,
|
||||
the remote write receiver endpoint is `/api/v1/write`.
|
||||
|
||||
For details on the request and response messages, see the
|
||||
[remote storage protocol buffer definitions](https://github.com/prometheus/prometheus/blob/main/prompb/remote.proto).
|
||||
|
||||
Note that on the read path, Prometheus only fetches raw series data for a set of
|
||||
label selectors and time ranges from the remote end. All PromQL evaluation on the
|
||||
raw data still happens in Prometheus itself. This means that remote read queries
|
||||
|
@ -189,6 +183,11 @@ have some scalability limit, since all necessary data needs to be loaded into th
|
|||
querying Prometheus server first and then processed there. However, supporting
|
||||
fully distributed evaluation of PromQL was deemed infeasible for the time being.
|
||||
|
||||
Prometheus also serves both protocols. The built-in remote write receiver can be enabled
|
||||
by setting the `--web.enable-remote-write-receiver` command line flag. When enabled,
|
||||
the remote write receiver endpoint is `/api/v1/write`. The remote read endpoint is
|
||||
available on [`/api/v1/read`](https://prometheus.io/docs/prometheus/latest/querying/remote_read_api/).
|
||||
|
||||
### Existing integrations
|
||||
|
||||
To learn more about existing integrations with remote storage systems, see the
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
# An example scrape configuration for running Prometheus with Ovhcloud.
|
||||
# An example scrape configuration for running Prometheus with OVHcloud.
|
||||
scrape_configs:
|
||||
- job_name: 'ovhcloud'
|
||||
ovhcloud_sd_configs:
|
||||
|
|
|
@ -1,14 +1,14 @@
|
|||
module github.com/prometheus/prometheus/documentation/examples/remote_storage
|
||||
|
||||
go 1.21.0
|
||||
go 1.22.0
|
||||
|
||||
require (
|
||||
github.com/alecthomas/kingpin/v2 v2.4.0
|
||||
github.com/go-kit/log v0.2.1
|
||||
github.com/gogo/protobuf v1.3.2
|
||||
github.com/golang/snappy v0.0.4
|
||||
github.com/influxdata/influxdb v1.11.5
|
||||
github.com/prometheus/client_golang v1.20.0
|
||||
github.com/influxdata/influxdb v1.11.6
|
||||
github.com/prometheus/client_golang v1.20.2
|
||||
github.com/prometheus/common v0.57.0
|
||||
github.com/prometheus/prometheus v0.53.1
|
||||
github.com/stretchr/testify v1.9.0
|
||||
|
|
|
@ -166,8 +166,8 @@ github.com/hetznercloud/hcloud-go/v2 v2.9.0 h1:s0N6R7Zoi2DPfMtUF5o9VeUBzTtHVY6MI
|
|||
github.com/hetznercloud/hcloud-go/v2 v2.9.0/go.mod h1:qtW/TuU7Bs16ibXl/ktJarWqU2LwHr7eGlwoilHxtgg=
|
||||
github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4=
|
||||
github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY=
|
||||
github.com/influxdata/influxdb v1.11.5 h1:+em5VOl6lhAZubXj5o6SobCwvrRs3XDlBx/MUI4schI=
|
||||
github.com/influxdata/influxdb v1.11.5/go.mod h1:k8sWREQl1/9t46VrkrH5adUM4UNGIt206ipO3plbkw8=
|
||||
github.com/influxdata/influxdb v1.11.6 h1:zS5MRY+RQ5/XFTer5R8xQRnY17JYSbacvO6OaP164wU=
|
||||
github.com/influxdata/influxdb v1.11.6/go.mod h1:F10NoQb9qa04lME3pTPWQrYt4JZ/ke1Eei+1ttgHHrg=
|
||||
github.com/ionos-cloud/sdk-go/v6 v6.1.11 h1:J/uRN4UWO3wCyGOeDdMKv8LWRzKu6UIkLEaes38Kzh8=
|
||||
github.com/ionos-cloud/sdk-go/v6 v6.1.11/go.mod h1:EzEgRIDxBELvfoa/uBN0kOQaqovLjUWEB7iW4/Q+t4k=
|
||||
github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg=
|
||||
|
@ -253,8 +253,8 @@ github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXP
|
|||
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
|
||||
github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
|
||||
github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
|
||||
github.com/prometheus/client_golang v1.20.0 h1:jBzTZ7B099Rg24tny+qngoynol8LtVYlA2bqx3vEloI=
|
||||
github.com/prometheus/client_golang v1.20.0/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE=
|
||||
github.com/prometheus/client_golang v1.20.2 h1:5ctymQzZlyOON1666svgwn3s6IKWgfbjsejTMiXIyjg=
|
||||
github.com/prometheus/client_golang v1.20.2/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE=
|
||||
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
||||
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
|
|
98
go.mod
98
go.mod
|
@ -1,11 +1,11 @@
|
|||
module github.com/prometheus/prometheus
|
||||
|
||||
go 1.21.0
|
||||
go 1.22.0
|
||||
|
||||
toolchain go1.22.5
|
||||
toolchain go1.23.0
|
||||
|
||||
require (
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.13.0
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.14.0
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.7.0
|
||||
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.7.0
|
||||
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4 v4.3.0
|
||||
|
@ -17,10 +17,10 @@ require (
|
|||
github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3
|
||||
github.com/cespare/xxhash/v2 v2.3.0
|
||||
github.com/dennwc/varint v1.0.0
|
||||
github.com/digitalocean/godo v1.119.0
|
||||
github.com/docker/docker v27.1.1+incompatible
|
||||
github.com/digitalocean/godo v1.122.0
|
||||
github.com/docker/docker v27.2.0+incompatible
|
||||
github.com/edsrzf/mmap-go v1.1.0
|
||||
github.com/envoyproxy/go-control-plane v0.12.0
|
||||
github.com/envoyproxy/go-control-plane v0.13.0
|
||||
github.com/envoyproxy/protoc-gen-validate v1.1.0
|
||||
github.com/facette/natsort v0.0.0-20181210072756-2cd4dd1e2dcb
|
||||
github.com/fsnotify/fsnotify v1.7.0
|
||||
|
@ -43,8 +43,8 @@ require (
|
|||
github.com/json-iterator/go v1.1.12
|
||||
github.com/klauspost/compress v1.17.9
|
||||
github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b
|
||||
github.com/linode/linodego v1.38.0
|
||||
github.com/miekg/dns v1.1.61
|
||||
github.com/linode/linodego v1.40.0
|
||||
github.com/miekg/dns v1.1.62
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822
|
||||
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f
|
||||
github.com/nsf/jsondiff v0.0.0-20230430225905-43f6cf3098c1
|
||||
|
@ -52,51 +52,51 @@ require (
|
|||
github.com/oklog/ulid v1.3.1
|
||||
github.com/ovh/go-ovh v1.6.0
|
||||
github.com/prometheus/alertmanager v0.27.0
|
||||
github.com/prometheus/client_golang v1.20.2
|
||||
github.com/prometheus/client_golang v1.20.3
|
||||
github.com/prometheus/client_model v0.6.1
|
||||
github.com/prometheus/common v0.56.0
|
||||
github.com/prometheus/common v0.59.1
|
||||
github.com/prometheus/common/assets v0.2.0
|
||||
github.com/prometheus/common/sigv4 v0.1.0
|
||||
github.com/prometheus/exporter-toolkit v0.11.0
|
||||
github.com/scaleway/scaleway-sdk-go v1.0.0-beta.29
|
||||
github.com/prometheus/exporter-toolkit v0.12.0
|
||||
github.com/scaleway/scaleway-sdk-go v1.0.0-beta.30
|
||||
github.com/shurcooL/httpfs v0.0.0-20230704072500-f1e31cf0ba5c
|
||||
github.com/stretchr/testify v1.9.0
|
||||
github.com/vultr/govultr/v2 v2.17.2
|
||||
go.opentelemetry.io/collector/pdata v1.12.0
|
||||
go.opentelemetry.io/collector/semconv v0.105.0
|
||||
go.opentelemetry.io/collector/pdata v1.14.1
|
||||
go.opentelemetry.io/collector/semconv v0.108.1
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0
|
||||
go.opentelemetry.io/otel v1.28.0
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.28.0
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.28.0
|
||||
go.opentelemetry.io/otel/sdk v1.28.0
|
||||
go.opentelemetry.io/otel/trace v1.28.0
|
||||
go.opentelemetry.io/otel v1.29.0
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.29.0
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.29.0
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.29.0
|
||||
go.opentelemetry.io/otel/sdk v1.29.0
|
||||
go.opentelemetry.io/otel/trace v1.29.0
|
||||
go.uber.org/atomic v1.11.0
|
||||
go.uber.org/automaxprocs v1.5.3
|
||||
go.uber.org/goleak v1.3.0
|
||||
go.uber.org/multierr v1.11.0
|
||||
golang.org/x/oauth2 v0.22.0
|
||||
golang.org/x/sync v0.7.0
|
||||
golang.org/x/sys v0.22.0
|
||||
golang.org/x/text v0.16.0
|
||||
golang.org/x/time v0.5.0
|
||||
golang.org/x/tools v0.23.0
|
||||
google.golang.org/api v0.190.0
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20240725223205-93522f1f2a9f
|
||||
google.golang.org/grpc v1.65.0
|
||||
golang.org/x/oauth2 v0.23.0
|
||||
golang.org/x/sync v0.8.0
|
||||
golang.org/x/sys v0.25.0
|
||||
golang.org/x/text v0.18.0
|
||||
golang.org/x/time v0.6.0
|
||||
golang.org/x/tools v0.24.0
|
||||
google.golang.org/api v0.195.0
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20240827150818-7e3bb234dfed
|
||||
google.golang.org/grpc v1.66.0
|
||||
google.golang.org/protobuf v1.34.2
|
||||
gopkg.in/yaml.v2 v2.4.0
|
||||
gopkg.in/yaml.v3 v3.0.1
|
||||
k8s.io/api v0.29.3
|
||||
k8s.io/apimachinery v0.29.3
|
||||
k8s.io/client-go v0.29.3
|
||||
k8s.io/api v0.31.0
|
||||
k8s.io/apimachinery v0.31.0
|
||||
k8s.io/client-go v0.31.0
|
||||
k8s.io/klog v1.0.0
|
||||
k8s.io/klog/v2 v2.130.1
|
||||
)
|
||||
|
||||
require (
|
||||
cloud.google.com/go/auth v0.7.3 // indirect
|
||||
cloud.google.com/go/auth/oauth2adapt v0.2.3 // indirect
|
||||
cloud.google.com/go/auth v0.9.3 // indirect
|
||||
cloud.google.com/go/auth/oauth2adapt v0.2.4 // indirect
|
||||
cloud.google.com/go/compute/metadata v0.5.0 // indirect
|
||||
github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 // indirect
|
||||
github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 // indirect
|
||||
|
@ -115,9 +115,9 @@ require (
|
|||
github.com/docker/go-connections v0.4.0 // indirect
|
||||
github.com/docker/go-units v0.5.0 // indirect
|
||||
github.com/emicklei/go-restful/v3 v3.11.0 // indirect
|
||||
github.com/evanphx/json-patch v5.6.0+incompatible // indirect
|
||||
github.com/fatih/color v1.16.0 // indirect
|
||||
github.com/felixge/httpsnoop v1.0.4 // indirect
|
||||
github.com/fxamacker/cbor/v2 v2.7.0 // indirect
|
||||
github.com/ghodss/yaml v1.0.0 // indirect
|
||||
github.com/go-kit/kit v0.12.0 // indirect
|
||||
github.com/go-logr/logr v1.4.2 // indirect
|
||||
|
@ -140,10 +140,10 @@ require (
|
|||
github.com/google/go-querystring v1.1.0 // indirect
|
||||
github.com/google/gofuzz v1.2.0 // indirect
|
||||
github.com/google/s2a-go v0.1.8 // indirect
|
||||
github.com/googleapis/enterprise-certificate-proxy v0.3.2 // indirect
|
||||
github.com/googleapis/enterprise-certificate-proxy v0.3.4 // indirect
|
||||
github.com/googleapis/gax-go/v2 v2.13.0 // indirect
|
||||
github.com/gorilla/websocket v1.5.0 // indirect
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 // indirect
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0 // indirect
|
||||
github.com/hashicorp/cronexpr v1.1.2 // indirect
|
||||
github.com/hashicorp/errwrap v1.1.0 // indirect
|
||||
github.com/hashicorp/go-cleanhttp v0.5.2 // indirect
|
||||
|
@ -163,6 +163,8 @@ require (
|
|||
github.com/mailru/easyjson v0.7.7 // indirect
|
||||
github.com/mattn/go-colorable v0.1.13 // indirect
|
||||
github.com/mattn/go-isatty v0.0.20 // indirect
|
||||
github.com/mdlayher/socket v0.4.1 // indirect
|
||||
github.com/mdlayher/vsock v1.2.1 // indirect
|
||||
github.com/mitchellh/go-homedir v1.1.0 // indirect
|
||||
github.com/mitchellh/mapstructure v1.5.0 // indirect
|
||||
github.com/moby/docker-image-spec v1.3.1 // indirect
|
||||
|
@ -176,36 +178,38 @@ require (
|
|||
github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 // indirect
|
||||
github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect
|
||||
github.com/pkg/errors v0.9.1 // indirect
|
||||
github.com/planetscale/vtprotobuf v0.6.0 // indirect
|
||||
github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
|
||||
github.com/prometheus/procfs v0.15.1 // indirect
|
||||
github.com/sirupsen/logrus v1.9.3 // indirect
|
||||
github.com/spf13/pflag v1.0.5 // indirect
|
||||
github.com/stretchr/objx v0.5.2 // indirect
|
||||
github.com/x448/float16 v0.8.4 // indirect
|
||||
github.com/xhit/go-str2duration/v2 v2.1.0 // indirect
|
||||
go.mongodb.org/mongo-driver v1.14.0 // indirect
|
||||
go.opencensus.io v0.24.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v1.28.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v1.29.0 // indirect
|
||||
go.opentelemetry.io/proto/otlp v1.3.1 // indirect
|
||||
golang.org/x/crypto v0.25.0 // indirect
|
||||
golang.org/x/crypto v0.26.0 // indirect
|
||||
golang.org/x/exp v0.0.0-20240119083558-1b970713d09a // indirect
|
||||
golang.org/x/mod v0.19.0 // indirect
|
||||
golang.org/x/net v0.27.0 // indirect
|
||||
golang.org/x/term v0.22.0 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240730163845-b1a4ccb954bf // indirect
|
||||
golang.org/x/mod v0.20.0 // indirect
|
||||
golang.org/x/net v0.28.0 // indirect
|
||||
golang.org/x/term v0.23.0 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 // indirect
|
||||
gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect
|
||||
gopkg.in/inf.v0 v0.9.1 // indirect
|
||||
gopkg.in/ini.v1 v1.67.0 // indirect
|
||||
gotest.tools/v3 v3.0.3 // indirect
|
||||
k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 // indirect
|
||||
k8s.io/utils v0.0.0-20230726121419-3b25d923346b // indirect
|
||||
k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 // indirect
|
||||
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect
|
||||
sigs.k8s.io/yaml v1.3.0 // indirect
|
||||
sigs.k8s.io/yaml v1.4.0 // indirect
|
||||
)
|
||||
|
||||
replace (
|
||||
k8s.io/klog => github.com/simonpasquier/klog-gokit v0.3.0
|
||||
k8s.io/klog/v2 => github.com/simonpasquier/klog-gokit/v3 v3.3.0
|
||||
k8s.io/klog/v2 => github.com/simonpasquier/klog-gokit/v3 v3.5.0
|
||||
)
|
||||
|
||||
// Exclude linodego v1.0.0 as it is no longer published on github.
|
||||
|
|
199
go.sum
199
go.sum
|
@ -12,10 +12,10 @@ cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bP
|
|||
cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk=
|
||||
cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs=
|
||||
cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY=
|
||||
cloud.google.com/go/auth v0.7.3 h1:98Vr+5jMaCZ5NZk6e/uBgf60phTk/XN84r8QEWB9yjY=
|
||||
cloud.google.com/go/auth v0.7.3/go.mod h1:HJtWUx1P5eqjy/f6Iq5KeytNpbAcGolPhOgyop2LlzA=
|
||||
cloud.google.com/go/auth/oauth2adapt v0.2.3 h1:MlxF+Pd3OmSudg/b1yZ5lJwoXCEaeedAguodky1PcKI=
|
||||
cloud.google.com/go/auth/oauth2adapt v0.2.3/go.mod h1:tMQXOfZzFuNuUxOypHlQEXgdfX5cuhwU+ffUuXRJE8I=
|
||||
cloud.google.com/go/auth v0.9.3 h1:VOEUIAADkkLtyfr3BLa3R8Ed/j6w1jTBmARx+wb5w5U=
|
||||
cloud.google.com/go/auth v0.9.3/go.mod h1:7z6VY+7h3KUdRov5F1i8NDP5ZzWKYmEPO842BgCsmTk=
|
||||
cloud.google.com/go/auth/oauth2adapt v0.2.4 h1:0GWE/FUsXhf6C+jAkWgYm7X9tK8cuEIfy19DBn6B6bY=
|
||||
cloud.google.com/go/auth/oauth2adapt v0.2.4/go.mod h1:jC/jOpwFP6JBxhB3P5Rr0a9HLMC/Pe3eaL4NmdvqPtc=
|
||||
cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
|
||||
cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE=
|
||||
cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc=
|
||||
|
@ -36,8 +36,8 @@ cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohl
|
|||
cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs=
|
||||
cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0=
|
||||
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.13.0 h1:GJHeeA2N7xrG3q30L2UXDyuWRzDM900/65j70wcM4Ww=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.13.0/go.mod h1:l38EPgmsp71HHLq9j7De57JcKOWPyhrsW1Awm1JS6K0=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.14.0 h1:nyQWyZvwGTvunIMxi1Y9uXkcyr+I7TeNrr/foo4Kpk8=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.14.0/go.mod h1:l38EPgmsp71HHLq9j7De57JcKOWPyhrsW1Awm1JS6K0=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.7.0 h1:tfLQ34V6F7tVSwoTf/4lH5sE0o6eCJuNDTmH09nDpbc=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.7.0/go.mod h1:9kIvujWAA58nmPmWB1m23fyWic1kYZMxD9CxaWn4Qpg=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 h1:ywEEhmNahHBihViHepv3xPBn1663uRv2t2q/ESv9seY=
|
||||
|
@ -143,14 +143,14 @@ github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8Yc
|
|||
github.com/dennwc/varint v1.0.0 h1:kGNFFSSw8ToIy3obO/kKr8U9GZYUAxQEVuix4zfDWzE=
|
||||
github.com/dennwc/varint v1.0.0/go.mod h1:hnItb35rvZvJrbTALZtY/iQfDs48JKRG1RPpgziApxA=
|
||||
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
|
||||
github.com/digitalocean/godo v1.119.0 h1:dmFNQwSIAcH3z+FVovHLkazKDC2uA8oOlGvg5+H4vRw=
|
||||
github.com/digitalocean/godo v1.119.0/go.mod h1:WQVH83OHUy6gC4gXpEVQKtxTd4L5oCp+5OialidkPLY=
|
||||
github.com/digitalocean/godo v1.122.0 h1:ziytLQi8QKtDp2K1A+YrYl2dWLHLh2uaMzWvcz9HkKg=
|
||||
github.com/digitalocean/godo v1.122.0/go.mod h1:WQVH83OHUy6gC4gXpEVQKtxTd4L5oCp+5OialidkPLY=
|
||||
github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0=
|
||||
github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E=
|
||||
github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI=
|
||||
github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ=
|
||||
github.com/docker/docker v27.1.1+incompatible h1:hO/M4MtV36kzKldqnA37IWhebRA+LnqqcqDja6kVaKY=
|
||||
github.com/docker/docker v27.1.1+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||
github.com/docker/docker v27.2.0+incompatible h1:Rk9nIVdfH3+Vz4cyI/uhbINhEZ/oLmc+CBXmH6fbNk4=
|
||||
github.com/docker/docker v27.2.0+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||
github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ=
|
||||
github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec=
|
||||
github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4=
|
||||
|
@ -168,13 +168,11 @@ github.com/envoyproxy/go-control-plane v0.6.9/go.mod h1:SBwIajubJHhxtWwsL9s8ss4s
|
|||
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
|
||||
github.com/envoyproxy/go-control-plane v0.12.0 h1:4X+VP1GHd1Mhj6IB5mMeGbLCleqxjletLK6K0rbxyZI=
|
||||
github.com/envoyproxy/go-control-plane v0.12.0/go.mod h1:ZBTaoJ23lqITozF0M6G4/IragXCQKCnYbmlmtHvwRG0=
|
||||
github.com/envoyproxy/go-control-plane v0.13.0 h1:HzkeUz1Knt+3bK+8LG1bxOO/jzWZmdxpwC51i202les=
|
||||
github.com/envoyproxy/go-control-plane v0.13.0/go.mod h1:GRaKG3dwvFoTg4nj7aXdZnvMg4d7nvT/wl9WgVXn3Q8=
|
||||
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
|
||||
github.com/envoyproxy/protoc-gen-validate v1.1.0 h1:tntQDh69XqOCOZsDz0lVJQez/2L6Uu2PdjCQwWCJ3bM=
|
||||
github.com/envoyproxy/protoc-gen-validate v1.1.0/go.mod h1:sXRDRVmzEbkM7CVcM06s9shE/m23dg3wzjl0UWqJ2q4=
|
||||
github.com/evanphx/json-patch v5.6.0+incompatible h1:jBYDEEiFBPxA0v50tFdvOzQQTCvpL6mnFh5mB2/l16U=
|
||||
github.com/evanphx/json-patch v5.6.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
|
||||
github.com/facette/natsort v0.0.0-20181210072756-2cd4dd1e2dcb h1:IT4JYU7k4ikYg1SCxNI1/Tieq/NFvh6dzLdgi7eu0tM=
|
||||
github.com/facette/natsort v0.0.0-20181210072756-2cd4dd1e2dcb/go.mod h1:bH6Xx7IW64qjjJq8M2u4dxNaBiDfKK+z/3eGDpXEQhc=
|
||||
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
|
||||
|
@ -191,6 +189,8 @@ github.com/frankban/quicktest v1.14.5/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7z
|
|||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||
github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA=
|
||||
github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM=
|
||||
github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E=
|
||||
github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ=
|
||||
github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk=
|
||||
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
|
||||
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
|
||||
|
@ -236,8 +236,8 @@ github.com/go-resty/resty/v2 v2.13.1 h1:x+LHXBI2nMB1vqndymf26quycC4aggYJ7DECYbiz
|
|||
github.com/go-resty/resty/v2 v2.13.1/go.mod h1:GznXlLxkq6Nh4sU59rPmUw3VtgpO3aS96ORAI6Q7d+0=
|
||||
github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
|
||||
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
|
||||
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI=
|
||||
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls=
|
||||
github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI=
|
||||
github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8=
|
||||
github.com/go-zookeeper/zk v1.0.3 h1:7M2kwOsc//9VeeFiPtf+uSJlVpU66x9Ba5+8XK7/TDg=
|
||||
github.com/go-zookeeper/zk v1.0.3/go.mod h1:nOB03cncLtlp4t+UAkGSV+9beXP/akpekBwL+UX1Qcw=
|
||||
github.com/godbus/dbus/v5 v5.0.4 h1:9349emZab16e7zQvpmsbtjc18ykshndd8y2PG3sgJbA=
|
||||
|
@ -328,8 +328,8 @@ github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+
|
|||
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
|
||||
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/googleapis/enterprise-certificate-proxy v0.3.2 h1:Vie5ybvEvT75RniqhfFxPRy3Bf7vr3h0cechB90XaQs=
|
||||
github.com/googleapis/enterprise-certificate-proxy v0.3.2/go.mod h1:VLSiSSBs/ksPL8kq3OBOQ6WRI2QnaFynd1DCjZ62+V0=
|
||||
github.com/googleapis/enterprise-certificate-proxy v0.3.4 h1:XYIDZApgAnrN1c855gTgghdIA6Stxb52D5RnLI1SLyw=
|
||||
github.com/googleapis/enterprise-certificate-proxy v0.3.4/go.mod h1:YKe7cfqYXjKGpGvmSg28/fFvhNzinZQm8DGnaburhGA=
|
||||
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
|
||||
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
|
||||
github.com/googleapis/gax-go/v2 v2.13.0 h1:yitjD5f7jQHhyDsnhKEBU52NdvvdSeGzlAnDPT0hH1s=
|
||||
|
@ -350,8 +350,8 @@ github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgf
|
|||
github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo=
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 h1:bkypFPDjIYGfCYD5mRBvpqxfYX1YCS1PXdKYWi8FsN0=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0/go.mod h1:P+Lt/0by1T8bfcF3z737NnSbmxQAppXMRziHUxPOC8k=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0 h1:asbCHRVmodnJTuQ3qamDwqVOIjwqUPTYmYuemVOx+Ys=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0/go.mod h1:ggCgvZ2r7uOoQjOyu2Y1NhHmEPPzzuhWgcza5M1Ji1I=
|
||||
github.com/hashicorp/consul/api v1.3.0/go.mod h1:MmDNSzIMUjNpY/mQ398R4bk2FnqQLoPndWW5VkKPlCE=
|
||||
github.com/hashicorp/consul/api v1.29.4 h1:P6slzxDLBOxUSj3fWo2o65VuKtbtOXFi7TSSgtXutuE=
|
||||
github.com/hashicorp/consul/api v1.29.4/go.mod h1:HUlfw+l2Zy68ceJavv2zAyArl2fqhGWnMycyt56sBgg=
|
||||
|
@ -472,8 +472,8 @@ github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0
|
|||
github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
|
||||
github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM=
|
||||
github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4=
|
||||
github.com/linode/linodego v1.38.0 h1:wP3oW9OhGc6vhze8NPf2knbwH4TzSbrjzuCd9okjbTY=
|
||||
github.com/linode/linodego v1.38.0/go.mod h1:L7GXKFD3PoN2xSEtFc04wIXP5WK65O10jYQx0PQISWQ=
|
||||
github.com/linode/linodego v1.40.0 h1:7ESY0PwK94hoggoCtIroT1Xk6b1flrFBNZ6KwqbTqlI=
|
||||
github.com/linode/linodego v1.40.0/go.mod h1:NsUw4l8QrLdIofRg1NYFBbW5ZERnmbZykVBszPZLORM=
|
||||
github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ=
|
||||
github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0=
|
||||
github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
|
||||
|
@ -497,11 +497,15 @@ github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzp
|
|||
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
|
||||
github.com/maxatome/go-testdeep v1.12.0 h1:Ql7Go8Tg0C1D/uMMX59LAoYK7LffeJQ6X2T04nTH68g=
|
||||
github.com/maxatome/go-testdeep v1.12.0/go.mod h1:lPZc/HAcJMP92l7yI6TRz1aZN5URwUBUAfUNvrclaNM=
|
||||
github.com/mdlayher/socket v0.4.1 h1:eM9y2/jlbs1M615oshPQOHZzj6R6wMT7bX5NPiQvn2U=
|
||||
github.com/mdlayher/socket v0.4.1/go.mod h1:cAqeGjoufqdxWkD7DkpyS+wcefOtmu5OQ8KuoJGIReA=
|
||||
github.com/mdlayher/vsock v1.2.1 h1:pC1mTJTvjo1r9n9fbm7S1j04rCgCzhCOS5DY0zqHlnQ=
|
||||
github.com/mdlayher/vsock v1.2.1/go.mod h1:NRfCibel++DgeMD8z/hP+PPTjlNJsdPOmxcnENvE+SE=
|
||||
github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg=
|
||||
github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso=
|
||||
github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI=
|
||||
github.com/miekg/dns v1.1.61 h1:nLxbwF3XxhwVSm8g9Dghm9MHPaUZuqhPiGL+675ZmEs=
|
||||
github.com/miekg/dns v1.1.61/go.mod h1:mnAarhS3nWaW+NVP2wTkYVIZyHNJ098SJZUki3eykwQ=
|
||||
github.com/miekg/dns v1.1.62 h1:cN8OuEF1/x5Rq6Np+h1epln8OiyPWV+lROx9LxcGgIQ=
|
||||
github.com/miekg/dns v1.1.62/go.mod h1:mvDlcItzm+br7MToIKqkglaGhlFMHJ9DTNNWONWXbNQ=
|
||||
github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc=
|
||||
github.com/mitchellh/cli v1.1.0/go.mod h1:xcISNoH86gajksDmfB23e/pu+B+GeFRMYmoHXxx3xhI=
|
||||
github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
|
||||
|
@ -553,11 +557,11 @@ github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:v
|
|||
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/ginkgo v1.7.0 h1:WSHQ+IS43OoUrWtD1/bbclrwK8TTH5hzp+umCiuxHgs=
|
||||
github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/ginkgo/v2 v2.13.0 h1:0jY9lJquiL8fcf3M4LAXN5aMlS/b2BV86HFFPCPMgE4=
|
||||
github.com/onsi/ginkgo/v2 v2.13.0/go.mod h1:TE309ZR8s5FsKKpuB1YAQYBzCaAfUgatB/xlT/ETL/o=
|
||||
github.com/onsi/ginkgo/v2 v2.19.0 h1:9Cnnf7UHo57Hy3k6/m5k3dRfGTMXGvxhHFvkDTCTpvA=
|
||||
github.com/onsi/ginkgo/v2 v2.19.0/go.mod h1:rlwLi9PilAFJ8jCg9UE1QP6VBpd6/xj3SRC0d6TU0To=
|
||||
github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
|
||||
github.com/onsi/gomega v1.29.0 h1:KIA/t2t5UBzoirT4H9tsML45GEbo3ouUnBHsCfD2tVg=
|
||||
github.com/onsi/gomega v1.29.0/go.mod h1:9sxs+SwGrKI0+PWe4Fxa9tFQQBG5xSsSbMXOI8PPpoQ=
|
||||
github.com/onsi/gomega v1.19.0 h1:4ieX6qQjPP/BfC3mpsAtIGGlxTWPeA3Inl/7DtXw1tw=
|
||||
github.com/onsi/gomega v1.19.0/go.mod h1:LY+I3pBVzYsTBU1AnDwOSxaYi9WoWiqgwooUqq9yPro=
|
||||
github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk=
|
||||
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
|
||||
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
|
||||
|
@ -592,8 +596,8 @@ github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE
|
|||
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
||||
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/profile v1.2.1/go.mod h1:hJw3o1OdXxsrSjjVksARp5W95eeEaEfptyVZyv6JUPA=
|
||||
github.com/planetscale/vtprotobuf v0.6.0 h1:nBeETjudeJ5ZgBHUz1fVHvbqUKnYOXNhsIEabROxmNA=
|
||||
github.com/planetscale/vtprotobuf v0.6.0/go.mod h1:t/avpk3KcrXxUnYOhZhMXJlSEyie6gQbtLq5NM3loB8=
|
||||
github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 h1:GFCKgmp0tecUJ0sJuv4pzYCqS9+RGSn52M3FUwPs+uo=
|
||||
github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10/go.mod h1:t/avpk3KcrXxUnYOhZhMXJlSEyie6gQbtLq5NM3loB8=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
|
@ -610,8 +614,8 @@ github.com/prometheus/client_golang v1.3.0/go.mod h1:hJaj2vgQTGQmVCsAACORcieXFeD
|
|||
github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU=
|
||||
github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
|
||||
github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
|
||||
github.com/prometheus/client_golang v1.20.2 h1:5ctymQzZlyOON1666svgwn3s6IKWgfbjsejTMiXIyjg=
|
||||
github.com/prometheus/client_golang v1.20.2/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE=
|
||||
github.com/prometheus/client_golang v1.20.3 h1:oPksm4K8B+Vt35tUhw6GbSNSgVlVSBH0qELP/7u83l4=
|
||||
github.com/prometheus/client_golang v1.20.3/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE=
|
||||
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
||||
github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
||||
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
|
@ -627,14 +631,14 @@ github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8b
|
|||
github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
|
||||
github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc=
|
||||
github.com/prometheus/common v0.29.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
|
||||
github.com/prometheus/common v0.56.0 h1:UffReloqkBtvtQEYDg2s+uDPGRrJyC6vZWPGXf6OhPY=
|
||||
github.com/prometheus/common v0.56.0/go.mod h1:7uRPFSUTbfZWsJ7MHY56sqt7hLQu3bxXHDnNhl8E9qI=
|
||||
github.com/prometheus/common v0.59.1 h1:LXb1quJHWm1P6wq/U824uxYi4Sg0oGvNeUm1z5dJoX0=
|
||||
github.com/prometheus/common v0.59.1/go.mod h1:GpWM7dewqmVYcd7SmRaiWVe9SSqjf0UrwnYnpEZNuT0=
|
||||
github.com/prometheus/common/assets v0.2.0 h1:0P5OrzoHrYBOSM1OigWL3mY8ZvV2N4zIE/5AahrSrfM=
|
||||
github.com/prometheus/common/assets v0.2.0/go.mod h1:D17UVUE12bHbim7HzwUvtqm6gwBEaDQ0F+hIGbFbccI=
|
||||
github.com/prometheus/common/sigv4 v0.1.0 h1:qoVebwtwwEhS85Czm2dSROY5fTo2PAPEVdDeppTwGX4=
|
||||
github.com/prometheus/common/sigv4 v0.1.0/go.mod h1:2Jkxxk9yYvCkE5G1sQT7GuEXm57JrvHu9k5YwTjsNtI=
|
||||
github.com/prometheus/exporter-toolkit v0.11.0 h1:yNTsuZ0aNCNFQ3aFTD2uhPOvr4iD7fdBvKPAEGkNf+g=
|
||||
github.com/prometheus/exporter-toolkit v0.11.0/go.mod h1:BVnENhnNecpwoTLiABx7mrPB/OLRIgN74qlQbV+FK1Q=
|
||||
github.com/prometheus/exporter-toolkit v0.12.0 h1:DkE5RcEZR3lQA2QD5JLVQIf41dFKNsVMXFhgqcif7fo=
|
||||
github.com/prometheus/exporter-toolkit v0.12.0/go.mod h1:fQH0KtTn0yrrS0S82kqppRjDDiwMfIQUwT+RBRRhwUc=
|
||||
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||
github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
|
||||
|
@ -652,8 +656,8 @@ github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99
|
|||
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
|
||||
github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E=
|
||||
github.com/scaleway/scaleway-sdk-go v1.0.0-beta.29 h1:BkTk4gynLjguayxrYxZoMZjBnAOh7ntQvUkOFmkMqPU=
|
||||
github.com/scaleway/scaleway-sdk-go v1.0.0-beta.29/go.mod h1:fCa7OJZ/9DRTnOKmxvT6pn+LPWUptQAmHF/SBJUGEcg=
|
||||
github.com/scaleway/scaleway-sdk-go v1.0.0-beta.30 h1:yoKAVkEVwAqbGbR8n87rHQ1dulL25rKloGadb3vm770=
|
||||
github.com/scaleway/scaleway-sdk-go v1.0.0-beta.30/go.mod h1:sH0u6fq6x4R5M7WxkoQFY/o7UaiItec0o1LinLCJNq8=
|
||||
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I=
|
||||
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
|
||||
github.com/shoenig/test v1.7.1 h1:UJcjSAI3aUKx52kfcfhblgyhZceouhvvs3OYdWgn+PY=
|
||||
|
@ -663,8 +667,8 @@ github.com/shurcooL/httpfs v0.0.0-20230704072500-f1e31cf0ba5c/go.mod h1:owqhoLW1
|
|||
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
|
||||
github.com/simonpasquier/klog-gokit v0.3.0 h1:TkFK21cbwDRS+CiystjqbAiq5ubJcVTk9hLUck5Ntcs=
|
||||
github.com/simonpasquier/klog-gokit v0.3.0/go.mod h1:+SUlDQNrhVtGt2FieaqNftzzk8P72zpWlACateWxA9k=
|
||||
github.com/simonpasquier/klog-gokit/v3 v3.3.0 h1:HMzH999kO5gEgJTaWWO+xjncW5oycspcsBnjn9b853Q=
|
||||
github.com/simonpasquier/klog-gokit/v3 v3.3.0/go.mod h1:uSbnWC3T7kt1dQyY9sjv0Ao1SehMAJdVnUNSKhjaDsg=
|
||||
github.com/simonpasquier/klog-gokit/v3 v3.5.0 h1:ewnk+ickph0hkQFgdI4pffKIbruAxxWcg0Fe/vQmLOM=
|
||||
github.com/simonpasquier/klog-gokit/v3 v3.5.0/go.mod h1:S9flvRzzpaYLYtXI2w8jf9R/IU/Cy14NrbvDUevNP1E=
|
||||
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
|
||||
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
|
||||
github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
|
||||
|
@ -705,6 +709,8 @@ github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijb
|
|||
github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
|
||||
github.com/vultr/govultr/v2 v2.17.2 h1:gej/rwr91Puc/tgh+j33p/BLR16UrIPnSr+AIwYWZQs=
|
||||
github.com/vultr/govultr/v2 v2.17.2/go.mod h1:ZFOKGWmgjytfyjeyAdhQlSWwTjh2ig+X49cAp50dzXI=
|
||||
github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM=
|
||||
github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg=
|
||||
github.com/xhit/go-str2duration/v2 v2.1.0 h1:lxklc02Drh6ynqX+DdPyp5pCKLUQpRT8bp8Ydu2Bstc=
|
||||
github.com/xhit/go-str2duration/v2 v2.1.0/go.mod h1:ohY8p+0f07DiV6Em5LKB0s2YpLtXVyJfNt1+BlmyAsU=
|
||||
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
|
||||
|
@ -726,26 +732,26 @@ go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
|||
go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
||||
go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0=
|
||||
go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo=
|
||||
go.opentelemetry.io/collector/pdata v1.12.0 h1:Xx5VK1p4VO0md8MWm2icwC1MnJ7f8EimKItMWw46BmA=
|
||||
go.opentelemetry.io/collector/pdata v1.12.0/go.mod h1:MYeB0MmMAxeM0hstCFrCqWLzdyeYySim2dG6pDT6nYI=
|
||||
go.opentelemetry.io/collector/semconv v0.105.0 h1:8p6dZ3JfxFTjbY38d8xlQGB1TQ3nPUvs+D0RERniZ1g=
|
||||
go.opentelemetry.io/collector/semconv v0.105.0/go.mod h1:yMVUCNoQPZVq/IPfrHrnntZTWsLf5YGZ7qwKulIl5hw=
|
||||
go.opentelemetry.io/collector/pdata v1.14.1 h1:wXZjtQA7Vy5HFqco+yA95ENyMQU5heBB1IxMHQf6mUk=
|
||||
go.opentelemetry.io/collector/pdata v1.14.1/go.mod h1:z1dTjwwtcoXxZx2/nkHysjxMeaxe9pEmYTEr4SMNIx8=
|
||||
go.opentelemetry.io/collector/semconv v0.108.1 h1:Txk9tauUnamZaxS5vlf1O0uZ4VD6nioRBR0nX8L/fU4=
|
||||
go.opentelemetry.io/collector/semconv v0.108.1/go.mod h1:zCJ5njhWpejR+A40kiEoeFm1xq1uzyZwMnRNX6/D82A=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 h1:4K4tsIXefpVJtvA/8srF4V4y0akAoPHkIslgAkjixJA=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0/go.mod h1:jjdQuTGVsXV4vSs+CJ2qYDeDPf9yIJV23qlIzBm73Vg=
|
||||
go.opentelemetry.io/otel v1.28.0 h1:/SqNcYk+idO0CxKEUOtKQClMK/MimZihKYMruSMViUo=
|
||||
go.opentelemetry.io/otel v1.28.0/go.mod h1:q68ijF8Fc8CnMHKyzqL6akLO46ePnjkgfIMIjUIX9z4=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0 h1:3Q/xZUyC1BBkualc9ROb4G8qkH90LXEIICcs5zv1OYY=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0/go.mod h1:s75jGIWA9OfCMzF0xr+ZgfrB5FEbbV7UuYo32ahUiFI=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.28.0 h1:R3X6ZXmNPRR8ul6i3WgFURCHzaXjHdm0karRG/+dj3s=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.28.0/go.mod h1:QWFXnDavXWwMx2EEcZsf3yxgEKAqsxQ+Syjp+seyInw=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.28.0 h1:j9+03ymgYhPKmeXGk5Zu+cIZOlVzd9Zv7QIiyItjFBU=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.28.0/go.mod h1:Y5+XiUG4Emn1hTfciPzGPJaSI+RpDts6BnCIir0SLqk=
|
||||
go.opentelemetry.io/otel/metric v1.28.0 h1:f0HGvSl1KRAU1DLgLGFjrwVyismPlnuU6JD6bOeuA5Q=
|
||||
go.opentelemetry.io/otel/metric v1.28.0/go.mod h1:Fb1eVBFZmLVTMb6PPohq3TO9IIhUisDsbJoL/+uQW4s=
|
||||
go.opentelemetry.io/otel/sdk v1.28.0 h1:b9d7hIry8yZsgtbmM0DKyPWMMUMlK9NEKuIG4aBqWyE=
|
||||
go.opentelemetry.io/otel/sdk v1.28.0/go.mod h1:oYj7ClPUA7Iw3m+r7GeEjz0qckQRJK2B8zjcZEfu7Pg=
|
||||
go.opentelemetry.io/otel/trace v1.28.0 h1:GhQ9cUuQGmNDd5BTCP2dAvv75RdMxEfTmYejp+lkx9g=
|
||||
go.opentelemetry.io/otel/trace v1.28.0/go.mod h1:jPyXzNPg6da9+38HEwElrQiHlVMTnVfM3/yv2OlIHaI=
|
||||
go.opentelemetry.io/otel v1.29.0 h1:PdomN/Al4q/lN6iBJEN3AwPvUiHPMlt93c8bqTG5Llw=
|
||||
go.opentelemetry.io/otel v1.29.0/go.mod h1:N/WtXPs1CNCUEx+Agz5uouwCba+i+bJGFicT8SR4NP8=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.29.0 h1:dIIDULZJpgdiHz5tXrTgKIMLkus6jEFa7x5SOKcyR7E=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.29.0/go.mod h1:jlRVBe7+Z1wyxFSUs48L6OBQZ5JwH2Hg/Vbl+t9rAgI=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.29.0 h1:nSiV3s7wiCam610XcLbYOmMfJxB9gO4uK3Xgv5gmTgg=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.29.0/go.mod h1:hKn/e/Nmd19/x1gvIHwtOwVWM+VhuITSWip3JUDghj0=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.29.0 h1:JAv0Jwtl01UFiyWZEMiJZBiTlv5A50zNs8lsthXqIio=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.29.0/go.mod h1:QNKLmUEAq2QUbPQUfvw4fmv0bgbK7UlOSFCnXyfvSNc=
|
||||
go.opentelemetry.io/otel/metric v1.29.0 h1:vPf/HFWTNkPu1aYeIsc98l4ktOQaL6LeSoeV2g+8YLc=
|
||||
go.opentelemetry.io/otel/metric v1.29.0/go.mod h1:auu/QWieFVWx+DmQOUMgj0F8LHWdgalxXqvp7BII/W8=
|
||||
go.opentelemetry.io/otel/sdk v1.29.0 h1:vkqKjk7gwhS8VaWb0POZKmIEDimRCMsopNYnriHyryo=
|
||||
go.opentelemetry.io/otel/sdk v1.29.0/go.mod h1:pM8Dx5WKnvxLCb+8lG1PRNIDxu9g9b9g59Qr7hfAAok=
|
||||
go.opentelemetry.io/otel/trace v1.29.0 h1:J/8ZNK4XgR7a21DZUAsbF8pZ5Jcw1VhACmnYt39JTi4=
|
||||
go.opentelemetry.io/otel/trace v1.29.0/go.mod h1:eHl3w0sp3paPkYstJOmAimxhiFXPg+MMTlEh3nsQgWQ=
|
||||
go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0=
|
||||
go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8=
|
||||
go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
|
||||
|
@ -776,8 +782,8 @@ golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5y
|
|||
golang.org/x/crypto v0.0.0-20220829220503-c86fa9a7ed90/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
||||
golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU=
|
||||
golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8=
|
||||
golang.org/x/crypto v0.25.0 h1:ypSNr+bnYL2YhwoMt2zPxHFmbAN1KZs/njMG3hxUp30=
|
||||
golang.org/x/crypto v0.25.0/go.mod h1:T+wALwcMOSE0kXgUAnPAHqTLW+XHgcELELW8VaDgm/M=
|
||||
golang.org/x/crypto v0.26.0 h1:RrRspgV4mU+YwB4FYnuBoKsUapNIL5cohGAmSH3azsw=
|
||||
golang.org/x/crypto v0.26.0/go.mod h1:GY7jblb9wI+FOo5y8/S2oY4zWP07AkOJ4+jxCqdqn54=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
|
||||
|
@ -812,8 +818,8 @@ golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
|||
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
|
||||
golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
||||
golang.org/x/mod v0.19.0 h1:fEdghXQSo20giMthA7cd28ZC+jts4amQ3YMXiP5oMQ8=
|
||||
golang.org/x/mod v0.19.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
|
||||
golang.org/x/mod v0.20.0 h1:utOm6MM3R3dnawAiJgn0y+xvuYRsm1RKM/4giyfDgV0=
|
||||
golang.org/x/mod v0.20.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
|
||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
|
@ -859,16 +865,16 @@ golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
|
|||
golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
|
||||
golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44=
|
||||
golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM=
|
||||
golang.org/x/net v0.27.0 h1:5K3Njcw06/l2y9vpGCSdcxWOYHOUk3dVNGDXN+FvAys=
|
||||
golang.org/x/net v0.27.0/go.mod h1:dDi0PyhWNoiUOrAS8uXv/vnScO4wnHQO4mj9fn/RytE=
|
||||
golang.org/x/net v0.28.0 h1:a9JDOJc5GMUJ0+UDqmLT86WiEy7iWyIhz8gz8E4e5hE=
|
||||
golang.org/x/net v0.28.0/go.mod h1:yqtgsTWOOnlGLG9GFRrK3++bGOUEkNBoHZc8MEDWPNg=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/oauth2 v0.22.0 h1:BzDx2FehcG7jJwgWLELCdmLuxk2i+x9UDpSiss2u0ZA=
|
||||
golang.org/x/oauth2 v0.22.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI=
|
||||
golang.org/x/oauth2 v0.23.0 h1:PbgcYx2W7i4LvjJWEbf0ngHV6qJYr86PkAV3bXdLEbs=
|
||||
golang.org/x/oauth2 v0.23.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
|
@ -882,8 +888,8 @@ golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJ
|
|||
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M=
|
||||
golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ=
|
||||
golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
|
@ -949,16 +955,16 @@ golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
|||
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.22.0 h1:RI27ohtqKCnwULzJLqkv897zojh5/DwS/ENaMzUOaWI=
|
||||
golang.org/x/sys v0.22.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.25.0 h1:r+8e+loiHxRqhXVl6ML1nO3l1+oFoWbnlu2Ehimmi34=
|
||||
golang.org/x/sys v0.25.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
|
||||
golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo=
|
||||
golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk=
|
||||
golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY=
|
||||
golang.org/x/term v0.22.0 h1:BbsgPEJULsl2fV/AT3v15Mjva5yXKQDyKf+TbDz7QJk=
|
||||
golang.org/x/term v0.22.0/go.mod h1:F3qCibpT5AMpCRfhfT53vVJwhLtIVHhB9XDjfFvnMI4=
|
||||
golang.org/x/term v0.23.0 h1:F6D4vR+EHoL9/sWAWgAR1H2DcHr4PareCbAaCo1RpuU=
|
||||
golang.org/x/term v0.23.0/go.mod h1:DgV24QBUrK6jhZXl+20l6UWznPlwAHm1Q1mGHtydmSk=
|
||||
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
|
@ -970,14 +976,15 @@ golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
|||
golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
|
||||
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
|
||||
golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
|
||||
golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4=
|
||||
golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI=
|
||||
golang.org/x/text v0.18.0 h1:XvMDiNzPAl0jr17s6W9lcaIhGUfUORdGCNsuLmPG224=
|
||||
golang.org/x/text v0.18.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY=
|
||||
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk=
|
||||
golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
|
||||
golang.org/x/time v0.6.0 h1:eTDhh4ZXt5Qf0augr54TN6suAUudPcawVZeIAPU7D4U=
|
||||
golang.org/x/time v0.6.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
|
||||
golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
|
@ -1028,8 +1035,8 @@ golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc
|
|||
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
|
||||
golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
|
||||
golang.org/x/tools v0.23.0 h1:SGsXPZ+2l4JsgaCKkx+FQ9YZ5XEtA1GZYuoDjenLjvg=
|
||||
golang.org/x/tools v0.23.0/go.mod h1:pnu6ufv6vQkll6szChhK3C3L/ruaIv5eBeztNG8wtsI=
|
||||
golang.org/x/tools v0.24.0 h1:J1shsA93PJUEVaUSaay7UXAyE8aimq3GW0pjlolpa24=
|
||||
golang.org/x/tools v0.24.0/go.mod h1:YhNqVBIfWHdzvTLs0d8LCuMhkKUgSUKldakyV7W/WDQ=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
|
@ -1049,8 +1056,8 @@ google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/
|
|||
google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
|
||||
google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
|
||||
google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
|
||||
google.golang.org/api v0.190.0 h1:ASM+IhLY1zljNdLu19W1jTmU6A+gMk6M46Wlur61s+Q=
|
||||
google.golang.org/api v0.190.0/go.mod h1:QIr6I9iedBLnfqoD6L6Vze1UvS5Hzj5r2aUBOaZnLHo=
|
||||
google.golang.org/api v0.195.0 h1:Ude4N8FvTKnnQJHU48RFI40jOBgIrL8Zqr3/QeST6yU=
|
||||
google.golang.org/api v0.195.0/go.mod h1:DOGRWuv3P8TU8Lnz7uQc4hyNqrBpMtD9ppW3wBJurgc=
|
||||
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||
google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
|
@ -1087,10 +1094,10 @@ google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1m
|
|||
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
|
||||
google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA=
|
||||
google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20240725223205-93522f1f2a9f h1:b1Ln/PG8orm0SsBbHZWke8dDp2lrCD4jSmfglFpTZbk=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20240725223205-93522f1f2a9f/go.mod h1:AHT0dDg3SoMOgZGnZk29b5xTbPHMoEC8qthmBLJCpys=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240730163845-b1a4ccb954bf h1:liao9UHurZLtiEwBgT9LMOnKYsHze6eA6w1KQCMVN2Q=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240730163845-b1a4ccb954bf/go.mod h1:Ue6ibwXGpU+dqIcODieyLOcgj7z8+IcskoNIgZxtrFY=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20240827150818-7e3bb234dfed h1:3RgNmBoI9MZhsj3QxC+AP/qQhNwpCLOvYDYYsFrhFt0=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20240827150818-7e3bb234dfed/go.mod h1:OCdP9MfskevB/rbYvHTsXTtKC+3bHWajPdoKgjcYkfo=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 h1:pPJltXNxVzT4pK9yD8vR9X75DaWYYmLGMsEvBfFQZzQ=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU=
|
||||
google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=
|
||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||
google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM=
|
||||
|
@ -1109,8 +1116,8 @@ google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3Iji
|
|||
google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
|
||||
google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0=
|
||||
google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=
|
||||
google.golang.org/grpc v1.65.0 h1:bs/cUb4lp1G5iImFFd3u5ixQzweKizoZJAwBNLR42lc=
|
||||
google.golang.org/grpc v1.65.0/go.mod h1:WgYC2ypjlB0EiQi6wdKixMqukr6lBc0Vo+oOgjrM5ZQ=
|
||||
google.golang.org/grpc v1.66.0 h1:DibZuoBznOxbDQxRINckZcUvnCEvrW9pcWIE2yF9r1c=
|
||||
google.golang.org/grpc v1.66.0/go.mod h1:s3/l6xSSCURdVfAnL+TqCNMyTDAGN6+lZeVxnZR128Y=
|
||||
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
||||
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
|
||||
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
|
||||
|
@ -1132,6 +1139,8 @@ gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntN
|
|||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
|
||||
gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw=
|
||||
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
|
||||
gopkg.in/evanphx/json-patch.v4 v4.12.0 h1:n6jtcsulIzXPJaxegRbvFNNrZDjbij7ny3gmSPG+6V4=
|
||||
gopkg.in/evanphx/json-patch.v4 v4.12.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M=
|
||||
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
|
||||
gopkg.in/gcfg.v1 v1.2.3/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o=
|
||||
gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
|
||||
|
@ -1165,16 +1174,16 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh
|
|||
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
|
||||
honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
|
||||
honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
|
||||
k8s.io/api v0.29.3 h1:2ORfZ7+bGC3YJqGpV0KSDDEVf8hdGQ6A03/50vj8pmw=
|
||||
k8s.io/api v0.29.3/go.mod h1:y2yg2NTyHUUkIoTC+phinTnEa3KFM6RZ3szxt014a80=
|
||||
k8s.io/apimachinery v0.29.3 h1:2tbx+5L7RNvqJjn7RIuIKu9XTsIZ9Z5wX2G22XAa5EU=
|
||||
k8s.io/apimachinery v0.29.3/go.mod h1:hx/S4V2PNW4OMg3WizRrHutyB5la0iCUbZym+W0EQIU=
|
||||
k8s.io/client-go v0.29.3 h1:R/zaZbEAxqComZ9FHeQwOh3Y1ZUs7FaHKZdQtIc2WZg=
|
||||
k8s.io/client-go v0.29.3/go.mod h1:tkDisCvgPfiRpxGnOORfkljmS+UrW+WtXAy2fTvXJB0=
|
||||
k8s.io/api v0.31.0 h1:b9LiSjR2ym/SzTOlfMHm1tr7/21aD7fSkqgD/CVJBCo=
|
||||
k8s.io/api v0.31.0/go.mod h1:0YiFF+JfFxMM6+1hQei8FY8M7s1Mth+z/q7eF1aJkTE=
|
||||
k8s.io/apimachinery v0.31.0 h1:m9jOiSr3FoSSL5WO9bjm1n6B9KROYYgNZOb4tyZ1lBc=
|
||||
k8s.io/apimachinery v0.31.0/go.mod h1:rsPdaZJfTfLsNJSQzNHQvYoTmxhoOEofxtOsF3rtsMo=
|
||||
k8s.io/client-go v0.31.0 h1:QqEJzNjbN2Yv1H79SsS+SWnXkBgVu4Pj3CJQgbx0gI8=
|
||||
k8s.io/client-go v0.31.0/go.mod h1:Y9wvC76g4fLjmU0BA+rV+h2cncoadjvjjkkIGoTLcGU=
|
||||
k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 h1:BZqlfIlq5YbRMFko6/PM7FjZpUb45WallggurYhKGag=
|
||||
k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340/go.mod h1:yD4MZYeKMBwQKVht279WycxKyM84kkAx2DPrTXaeb98=
|
||||
k8s.io/utils v0.0.0-20230726121419-3b25d923346b h1:sgn3ZU783SCgtaSJjpcVVlRqd6GSnlTLKgpAAttJvpI=
|
||||
k8s.io/utils v0.0.0-20230726121419-3b25d923346b/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
|
||||
k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 h1:pUdcCO1Lk/tbT5ztQWOBi5HBgbBP1J8+AsQnQCKsi8A=
|
||||
k8s.io/utils v0.0.0-20240711033017-18e509b52bc8/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
|
||||
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
|
||||
rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
|
||||
rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
|
||||
|
@ -1183,6 +1192,6 @@ sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h6
|
|||
sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08=
|
||||
sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o=
|
||||
sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo=
|
||||
sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8=
|
||||
sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E=
|
||||
sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY=
|
||||
sourcegraph.com/sourcegraph/appdash v0.0.0-20190731080439-ebfcffb1b5c0/go.mod h1:hI742Nqp5OhwiqlzhgfbWU4mW4yO10fP+LoT9WOswdU=
|
||||
|
|
|
@ -342,7 +342,7 @@ func (h *FloatHistogram) Add(other *FloatHistogram) (*FloatHistogram, error) {
|
|||
default:
|
||||
// All other cases shouldn't actually happen.
|
||||
// They are a direct collision of CounterReset and NotCounterReset.
|
||||
// Conservatively set the CounterResetHint to "unknown" and isse a warning.
|
||||
// Conservatively set the CounterResetHint to "unknown" and issue a warning.
|
||||
h.CounterResetHint = UnknownCounterReset
|
||||
// TODO(trevorwhitney): Actually issue the warning as soon as the plumbing for it is in place
|
||||
}
|
||||
|
@ -658,7 +658,7 @@ func detectReset(currIt, prevIt *floatBucketIterator) bool {
|
|||
if !currIt.Next() {
|
||||
// Reached end of currIt early, therefore
|
||||
// previous histogram has a bucket that the
|
||||
// current one does not have. Unlass all
|
||||
// current one does not have. Unless all
|
||||
// remaining buckets in the previous histogram
|
||||
// are unpopulated, this is a reset.
|
||||
for {
|
||||
|
@ -891,7 +891,7 @@ func (h *FloatHistogram) trimBucketsInZeroBucket() {
|
|||
// reconcileZeroBuckets finds a zero bucket large enough to include the zero
|
||||
// buckets of both histograms (the receiving histogram and the other histogram)
|
||||
// with a zero threshold that is not within a populated bucket in either
|
||||
// histogram. This method modifies the receiving histogram accourdingly, but
|
||||
// histogram. This method modifies the receiving histogram accordingly, but
|
||||
// leaves the other histogram as is. Instead, it returns the zero count the
|
||||
// other histogram would have if it were modified.
|
||||
func (h *FloatHistogram) reconcileZeroBuckets(other *FloatHistogram) float64 {
|
||||
|
|
|
@ -131,6 +131,54 @@ func TestFloatHistogramMul(t *testing.T) {
|
|||
NegativeBuckets: []float64{9, 3, 15, 18},
|
||||
},
|
||||
},
|
||||
{
|
||||
"negation",
|
||||
&FloatHistogram{
|
||||
ZeroThreshold: 0.01,
|
||||
ZeroCount: 11,
|
||||
Count: 30,
|
||||
Sum: 23,
|
||||
PositiveSpans: []Span{{-2, 2}, {1, 3}},
|
||||
PositiveBuckets: []float64{1, 0, 3, 4, 7},
|
||||
NegativeSpans: []Span{{3, 2}, {3, 2}},
|
||||
NegativeBuckets: []float64{3, 1, 5, 6},
|
||||
},
|
||||
-1,
|
||||
&FloatHistogram{
|
||||
ZeroThreshold: 0.01,
|
||||
ZeroCount: -11,
|
||||
Count: -30,
|
||||
Sum: -23,
|
||||
PositiveSpans: []Span{{-2, 2}, {1, 3}},
|
||||
PositiveBuckets: []float64{-1, 0, -3, -4, -7},
|
||||
NegativeSpans: []Span{{3, 2}, {3, 2}},
|
||||
NegativeBuckets: []float64{-3, -1, -5, -6},
|
||||
},
|
||||
},
|
||||
{
|
||||
"negative multiplier",
|
||||
&FloatHistogram{
|
||||
ZeroThreshold: 0.01,
|
||||
ZeroCount: 11,
|
||||
Count: 30,
|
||||
Sum: 23,
|
||||
PositiveSpans: []Span{{-2, 2}, {1, 3}},
|
||||
PositiveBuckets: []float64{1, 0, 3, 4, 7},
|
||||
NegativeSpans: []Span{{3, 2}, {3, 2}},
|
||||
NegativeBuckets: []float64{3, 1, 5, 6},
|
||||
},
|
||||
-2,
|
||||
&FloatHistogram{
|
||||
ZeroThreshold: 0.01,
|
||||
ZeroCount: -22,
|
||||
Count: -60,
|
||||
Sum: -46,
|
||||
PositiveSpans: []Span{{-2, 2}, {1, 3}},
|
||||
PositiveBuckets: []float64{-2, 0, -6, -8, -14},
|
||||
NegativeSpans: []Span{{3, 2}, {3, 2}},
|
||||
NegativeBuckets: []float64{-6, -2, -10, -12},
|
||||
},
|
||||
},
|
||||
{
|
||||
"no-op with custom buckets",
|
||||
&FloatHistogram{
|
||||
|
@ -409,6 +457,54 @@ func TestFloatHistogramDiv(t *testing.T) {
|
|||
NegativeBuckets: []float64{1.5, 0.5, 2.5, 3},
|
||||
},
|
||||
},
|
||||
{
|
||||
"negation",
|
||||
&FloatHistogram{
|
||||
ZeroThreshold: 0.01,
|
||||
ZeroCount: 5.5,
|
||||
Count: 3493.3,
|
||||
Sum: 2349209.324,
|
||||
PositiveSpans: []Span{{-2, 1}, {2, 3}},
|
||||
PositiveBuckets: []float64{1, 3.3, 4.2, 0.1},
|
||||
NegativeSpans: []Span{{3, 2}, {3, 2}},
|
||||
NegativeBuckets: []float64{3.1, 3, 1.234e5, 1000},
|
||||
},
|
||||
-1,
|
||||
&FloatHistogram{
|
||||
ZeroThreshold: 0.01,
|
||||
ZeroCount: -5.5,
|
||||
Count: -3493.3,
|
||||
Sum: -2349209.324,
|
||||
PositiveSpans: []Span{{-2, 1}, {2, 3}},
|
||||
PositiveBuckets: []float64{-1, -3.3, -4.2, -0.1},
|
||||
NegativeSpans: []Span{{3, 2}, {3, 2}},
|
||||
NegativeBuckets: []float64{-3.1, -3, -1.234e5, -1000},
|
||||
},
|
||||
},
|
||||
{
|
||||
"negative half",
|
||||
&FloatHistogram{
|
||||
ZeroThreshold: 0.01,
|
||||
ZeroCount: 11,
|
||||
Count: 30,
|
||||
Sum: 23,
|
||||
PositiveSpans: []Span{{-2, 2}, {1, 3}},
|
||||
PositiveBuckets: []float64{1, 0, 3, 4, 7},
|
||||
NegativeSpans: []Span{{3, 2}, {3, 2}},
|
||||
NegativeBuckets: []float64{3, 1, 5, 6},
|
||||
},
|
||||
-2,
|
||||
&FloatHistogram{
|
||||
ZeroThreshold: 0.01,
|
||||
ZeroCount: -5.5,
|
||||
Count: -15,
|
||||
Sum: -11.5,
|
||||
PositiveSpans: []Span{{-2, 2}, {1, 3}},
|
||||
PositiveBuckets: []float64{-0.5, 0, -1.5, -2, -3.5},
|
||||
NegativeSpans: []Span{{3, 2}, {3, 2}},
|
||||
NegativeBuckets: []float64{-1.5, -0.5, -2.5, -3},
|
||||
},
|
||||
},
|
||||
{
|
||||
"no-op with custom buckets",
|
||||
&FloatHistogram{
|
||||
|
|
|
@ -140,7 +140,7 @@ testmetric{label="\"bar\""} 1`
|
|||
v: 8.3835e-05,
|
||||
lset: labels.FromStrings("__name__", "go_gc_duration_seconds", "quantile", "1.0", "a", "b"),
|
||||
}, {
|
||||
// NOTE: Unlike OpenMetrics, Promparse allows spaces between label terms. This appears to be unintended and should probably be fixed.
|
||||
// NOTE: Unlike OpenMetrics, PromParser allows spaces between label terms. This appears to be unintended and should probably be fixed.
|
||||
m: `go_gc_duration_seconds { quantile = "2.0" a = "b" }`,
|
||||
v: 8.3835e-05,
|
||||
lset: labels.FromStrings("__name__", "go_gc_duration_seconds", "quantile", "2.0", "a", "b"),
|
||||
|
|
|
@ -600,7 +600,7 @@ func readDelimited(b []byte, mf *dto.MetricFamily) (n int, err error) {
|
|||
return totalLength, proto.Unmarshal(b[varIntLength:totalLength], mf)
|
||||
}
|
||||
|
||||
// formatOpenMetricsFloat works like the usual Go string formatting of a fleat
|
||||
// formatOpenMetricsFloat works like the usual Go string formatting of a float
|
||||
// but appends ".0" if the resulting number would otherwise contain neither a
|
||||
// "." nor an "e".
|
||||
func formatOpenMetricsFloat(f float64) string {
|
||||
|
|
|
@ -743,7 +743,7 @@ func TestHangingNotifier(t *testing.T) {
|
|||
|
||||
// Initialize the discovery manager
|
||||
// This is relevant as the updates aren't sent continually in real life, but only each updatert.
|
||||
// The old implementation of TestHangingNotifier didn't take that into acount.
|
||||
// The old implementation of TestHangingNotifier didn't take that into account.
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
reg := prometheus.NewRegistry()
|
||||
|
|
|
@ -25,6 +25,7 @@ import (
|
|||
"github.com/prometheus/prometheus/model/labels"
|
||||
"github.com/prometheus/prometheus/promql"
|
||||
"github.com/prometheus/prometheus/promql/parser"
|
||||
"github.com/prometheus/prometheus/promql/promqltest"
|
||||
"github.com/prometheus/prometheus/storage"
|
||||
"github.com/prometheus/prometheus/tsdb/tsdbutil"
|
||||
"github.com/prometheus/prometheus/util/teststorage"
|
||||
|
@ -274,7 +275,7 @@ func BenchmarkRangeQuery(b *testing.B) {
|
|||
MaxSamples: 50000000,
|
||||
Timeout: 100 * time.Second,
|
||||
}
|
||||
engine := promql.NewEngine(opts)
|
||||
engine := promqltest.NewTestEngineWithOpts(b, opts)
|
||||
|
||||
const interval = 10000 // 10s interval.
|
||||
// A day of data plus 10k steps.
|
||||
|
@ -365,7 +366,7 @@ func BenchmarkNativeHistograms(b *testing.B) {
|
|||
|
||||
for _, tc := range cases {
|
||||
b.Run(tc.name, func(b *testing.B) {
|
||||
ng := promql.NewEngine(opts)
|
||||
ng := promqltest.NewTestEngineWithOpts(b, opts)
|
||||
for i := 0; i < b.N; i++ {
|
||||
qry, err := ng.NewRangeQuery(context.Background(), testStorage, nil, tc.query, start, end, step)
|
||||
if err != nil {
|
||||
|
|
163
promql/engine.go
163
promql/engine.go
|
@ -19,6 +19,7 @@ import (
|
|||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"reflect"
|
||||
"runtime"
|
||||
|
@ -271,6 +272,8 @@ func contextErr(err error, env string) error {
|
|||
//
|
||||
// 2) Enforcement of the maximum number of concurrent queries.
|
||||
type QueryTracker interface {
|
||||
io.Closer
|
||||
|
||||
// GetMaxConcurrent returns maximum number of concurrent queries that are allowed by this tracker.
|
||||
GetMaxConcurrent() int
|
||||
|
||||
|
@ -430,6 +433,18 @@ func NewEngine(opts EngineOpts) *Engine {
|
|||
}
|
||||
}
|
||||
|
||||
// Close closes ng.
|
||||
func (ng *Engine) Close() error {
|
||||
if ng == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
if ng.activeQueryTracker != nil {
|
||||
return ng.activeQueryTracker.Close()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetQueryLogger sets the query logger.
|
||||
func (ng *Engine) SetQueryLogger(l QueryLogger) {
|
||||
ng.queryLoggerLock.Lock()
|
||||
|
@ -713,7 +728,6 @@ func (ng *Engine) execEvalStmt(ctx context.Context, query *query, s *parser.Eval
|
|||
startTimestamp: start,
|
||||
endTimestamp: start,
|
||||
interval: 1,
|
||||
ctx: ctxInnerEval,
|
||||
maxSamples: ng.maxSamplesPerQuery,
|
||||
logger: ng.logger,
|
||||
lookbackDelta: s.LookbackDelta,
|
||||
|
@ -723,7 +737,7 @@ func (ng *Engine) execEvalStmt(ctx context.Context, query *query, s *parser.Eval
|
|||
}
|
||||
query.sampleStats.InitStepTracking(start, start, 1)
|
||||
|
||||
val, warnings, err := evaluator.Eval(s.Expr)
|
||||
val, warnings, err := evaluator.Eval(ctxInnerEval, s.Expr)
|
||||
|
||||
evalSpanTimer.Finish()
|
||||
|
||||
|
@ -772,7 +786,6 @@ func (ng *Engine) execEvalStmt(ctx context.Context, query *query, s *parser.Eval
|
|||
startTimestamp: timeMilliseconds(s.Start),
|
||||
endTimestamp: timeMilliseconds(s.End),
|
||||
interval: durationMilliseconds(s.Interval),
|
||||
ctx: ctxInnerEval,
|
||||
maxSamples: ng.maxSamplesPerQuery,
|
||||
logger: ng.logger,
|
||||
lookbackDelta: s.LookbackDelta,
|
||||
|
@ -781,7 +794,7 @@ func (ng *Engine) execEvalStmt(ctx context.Context, query *query, s *parser.Eval
|
|||
enableDelayedNameRemoval: ng.enableDelayedNameRemoval,
|
||||
}
|
||||
query.sampleStats.InitStepTracking(evaluator.startTimestamp, evaluator.endTimestamp, evaluator.interval)
|
||||
val, warnings, err := evaluator.Eval(s.Expr)
|
||||
val, warnings, err := evaluator.Eval(ctxInnerEval, s.Expr)
|
||||
|
||||
evalSpanTimer.Finish()
|
||||
|
||||
|
@ -896,11 +909,17 @@ func getTimeRangesForSelector(s *parser.EvalStmt, n *parser.VectorSelector, path
|
|||
}
|
||||
|
||||
if evalRange == 0 {
|
||||
start -= durationMilliseconds(s.LookbackDelta)
|
||||
// Reduce the start by one fewer ms than the lookback delta
|
||||
// because wo want to exclude samples that are precisely the
|
||||
// lookback delta before the eval time.
|
||||
start -= durationMilliseconds(s.LookbackDelta) - 1
|
||||
} else {
|
||||
// For all matrix queries we want to ensure that we have (end-start) + range selected
|
||||
// this way we have `range` data before the start time
|
||||
start -= durationMilliseconds(evalRange)
|
||||
// For all matrix queries we want to ensure that we have
|
||||
// (end-start) + range selected this way we have `range` data
|
||||
// before the start time. We subtract one from the range to
|
||||
// exclude samples positioned directly at the lower boundary of
|
||||
// the range.
|
||||
start -= durationMilliseconds(evalRange) - 1
|
||||
}
|
||||
|
||||
offsetMilliseconds := durationMilliseconds(n.OriginalOffset)
|
||||
|
@ -993,6 +1012,8 @@ func checkAndExpandSeriesSet(ctx context.Context, expr parser.Expr) (annotations
|
|||
if e.Series != nil {
|
||||
return nil, nil
|
||||
}
|
||||
span := trace.SpanFromContext(ctx)
|
||||
span.AddEvent("expand start", trace.WithAttributes(attribute.String("selector", e.String())))
|
||||
series, ws, err := expandSeriesSet(ctx, e.UnexpandedSeriesSet)
|
||||
if e.SkipHistogramBuckets {
|
||||
for i := range series {
|
||||
|
@ -1000,6 +1021,7 @@ func checkAndExpandSeriesSet(ctx context.Context, expr parser.Expr) (annotations
|
|||
}
|
||||
}
|
||||
e.Series = series
|
||||
span.AddEvent("expand end", trace.WithAttributes(attribute.Int("num_series", len(series))))
|
||||
return ws, err
|
||||
}
|
||||
return nil, nil
|
||||
|
@ -1029,8 +1051,6 @@ func (e errWithWarnings) Error() string { return e.err.Error() }
|
|||
// querier and reports errors. On timeout or cancellation of its context it
|
||||
// terminates.
|
||||
type evaluator struct {
|
||||
ctx context.Context
|
||||
|
||||
startTimestamp int64 // Start time in milliseconds.
|
||||
endTimestamp int64 // End time in milliseconds.
|
||||
interval int64 // Interval in milliseconds.
|
||||
|
@ -1079,10 +1099,10 @@ func (ev *evaluator) recover(expr parser.Expr, ws *annotations.Annotations, errp
|
|||
}
|
||||
}
|
||||
|
||||
func (ev *evaluator) Eval(expr parser.Expr) (v parser.Value, ws annotations.Annotations, err error) {
|
||||
func (ev *evaluator) Eval(ctx context.Context, expr parser.Expr) (v parser.Value, ws annotations.Annotations, err error) {
|
||||
defer ev.recover(expr, &ws, &err)
|
||||
|
||||
v, ws = ev.eval(expr)
|
||||
v, ws = ev.eval(ctx, expr)
|
||||
if ev.enableDelayedNameRemoval {
|
||||
ev.cleanupMetricLabels(v)
|
||||
}
|
||||
|
@ -1133,7 +1153,7 @@ func (enh *EvalNodeHelper) resetBuilder(lbls labels.Labels) {
|
|||
// function call results.
|
||||
// The prepSeries function (if provided) can be used to prepare the helper
|
||||
// for each series, then passed to each call funcCall.
|
||||
func (ev *evaluator) rangeEval(prepSeries func(labels.Labels, *EvalSeriesHelper), funcCall func([]parser.Value, [][]EvalSeriesHelper, *EvalNodeHelper) (Vector, annotations.Annotations), exprs ...parser.Expr) (Matrix, annotations.Annotations) {
|
||||
func (ev *evaluator) rangeEval(ctx context.Context, prepSeries func(labels.Labels, *EvalSeriesHelper), funcCall func([]parser.Value, [][]EvalSeriesHelper, *EvalNodeHelper) (Vector, annotations.Annotations), exprs ...parser.Expr) (Matrix, annotations.Annotations) {
|
||||
numSteps := int((ev.endTimestamp-ev.startTimestamp)/ev.interval) + 1
|
||||
matrixes := make([]Matrix, len(exprs))
|
||||
origMatrixes := make([]Matrix, len(exprs))
|
||||
|
@ -1144,7 +1164,7 @@ func (ev *evaluator) rangeEval(prepSeries func(labels.Labels, *EvalSeriesHelper)
|
|||
// Functions will take string arguments from the expressions, not the values.
|
||||
if e != nil && e.Type() != parser.ValueTypeString {
|
||||
// ev.currentSamples will be updated to the correct value within the ev.eval call.
|
||||
val, ws := ev.eval(e)
|
||||
val, ws := ev.eval(ctx, e)
|
||||
warnings.Merge(ws)
|
||||
matrixes[i] = val.(Matrix)
|
||||
|
||||
|
@ -1196,7 +1216,7 @@ func (ev *evaluator) rangeEval(prepSeries func(labels.Labels, *EvalSeriesHelper)
|
|||
}
|
||||
|
||||
for ts := ev.startTimestamp; ts <= ev.endTimestamp; ts += ev.interval {
|
||||
if err := contextDone(ev.ctx, "expression evaluation"); err != nil {
|
||||
if err := contextDone(ctx, "expression evaluation"); err != nil {
|
||||
ev.error(err)
|
||||
}
|
||||
// Reset number of samples in memory after each timestamp.
|
||||
|
@ -1306,7 +1326,7 @@ func (ev *evaluator) rangeEval(prepSeries func(labels.Labels, *EvalSeriesHelper)
|
|||
return mat, warnings
|
||||
}
|
||||
|
||||
func (ev *evaluator) rangeEvalAgg(aggExpr *parser.AggregateExpr, sortedGrouping []string, inputMatrix Matrix, param float64) (Matrix, annotations.Annotations) {
|
||||
func (ev *evaluator) rangeEvalAgg(ctx context.Context, aggExpr *parser.AggregateExpr, sortedGrouping []string, inputMatrix Matrix, param float64) (Matrix, annotations.Annotations) {
|
||||
// Keep a copy of the original point slice so that it can be returned to the pool.
|
||||
origMatrix := slices.Clone(inputMatrix)
|
||||
defer func() {
|
||||
|
@ -1386,7 +1406,7 @@ func (ev *evaluator) rangeEvalAgg(aggExpr *parser.AggregateExpr, sortedGrouping
|
|||
}
|
||||
|
||||
for ts := ev.startTimestamp; ts <= ev.endTimestamp; ts += ev.interval {
|
||||
if err := contextDone(ev.ctx, "expression evaluation"); err != nil {
|
||||
if err := contextDone(ctx, "expression evaluation"); err != nil {
|
||||
ev.error(err)
|
||||
}
|
||||
// Reset number of samples in memory after each timestamp.
|
||||
|
@ -1437,11 +1457,11 @@ func (ev *evaluator) rangeEvalAgg(aggExpr *parser.AggregateExpr, sortedGrouping
|
|||
|
||||
// evalSubquery evaluates given SubqueryExpr and returns an equivalent
|
||||
// evaluated MatrixSelector in its place. Note that the Name and LabelMatchers are not set.
|
||||
func (ev *evaluator) evalSubquery(subq *parser.SubqueryExpr) (*parser.MatrixSelector, int, annotations.Annotations) {
|
||||
func (ev *evaluator) evalSubquery(ctx context.Context, subq *parser.SubqueryExpr) (*parser.MatrixSelector, int, annotations.Annotations) {
|
||||
samplesStats := ev.samplesStats
|
||||
// Avoid double counting samples when running a subquery, those samples will be counted in later stage.
|
||||
ev.samplesStats = ev.samplesStats.NewChild()
|
||||
val, ws := ev.eval(subq)
|
||||
val, ws := ev.eval(ctx, subq)
|
||||
// But do incorporate the peak from the subquery
|
||||
samplesStats.UpdatePeakFromSubquery(ev.samplesStats)
|
||||
ev.samplesStats = samplesStats
|
||||
|
@ -1468,18 +1488,20 @@ func (ev *evaluator) evalSubquery(subq *parser.SubqueryExpr) (*parser.MatrixSele
|
|||
}
|
||||
|
||||
// eval evaluates the given expression as the given AST expression node requires.
|
||||
func (ev *evaluator) eval(expr parser.Expr) (parser.Value, annotations.Annotations) {
|
||||
func (ev *evaluator) eval(ctx context.Context, expr parser.Expr) (parser.Value, annotations.Annotations) {
|
||||
// This is the top-level evaluation method.
|
||||
// Thus, we check for timeout/cancellation here.
|
||||
if err := contextDone(ev.ctx, "expression evaluation"); err != nil {
|
||||
if err := contextDone(ctx, "expression evaluation"); err != nil {
|
||||
ev.error(err)
|
||||
}
|
||||
numSteps := int((ev.endTimestamp-ev.startTimestamp)/ev.interval) + 1
|
||||
|
||||
// Create a new span to help investigate inner evaluation performances.
|
||||
ctxWithSpan, span := otel.Tracer("").Start(ev.ctx, stats.InnerEvalTime.SpanOperation()+" eval "+reflect.TypeOf(expr).String())
|
||||
ev.ctx = ctxWithSpan
|
||||
ctx, span := otel.Tracer("").Start(ctx, stats.InnerEvalTime.SpanOperation()+" eval "+reflect.TypeOf(expr).String())
|
||||
defer span.End()
|
||||
if ss, ok := expr.(interface{ ShortString() string }); ok {
|
||||
span.SetAttributes(attribute.String("operation", ss.ShortString()))
|
||||
}
|
||||
|
||||
switch e := expr.(type) {
|
||||
case *parser.AggregateExpr:
|
||||
|
@ -1500,7 +1522,7 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, annotations.Annotatio
|
|||
sortedGrouping = append(sortedGrouping, valueLabel.Val)
|
||||
slices.Sort(sortedGrouping)
|
||||
}
|
||||
return ev.rangeEval(nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
|
||||
return ev.rangeEval(ctx, nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
|
||||
return ev.aggregationCountValues(e, sortedGrouping, valueLabel.Val, v[0].(Vector), enh)
|
||||
}, e.Expr)
|
||||
}
|
||||
|
@ -1510,16 +1532,16 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, annotations.Annotatio
|
|||
// param is the number k for topk/bottomk, or q for quantile.
|
||||
var fParam float64
|
||||
if param != nil {
|
||||
val, ws := ev.eval(param)
|
||||
val, ws := ev.eval(ctx, param)
|
||||
warnings.Merge(ws)
|
||||
fParam = val.(Matrix)[0].Floats[0].F
|
||||
}
|
||||
// Now fetch the data to be aggregated.
|
||||
val, ws := ev.eval(e.Expr)
|
||||
val, ws := ev.eval(ctx, e.Expr)
|
||||
warnings.Merge(ws)
|
||||
inputMatrix := val.(Matrix)
|
||||
|
||||
result, ws := ev.rangeEvalAgg(e, sortedGrouping, inputMatrix, fParam)
|
||||
result, ws := ev.rangeEvalAgg(ctx, e, sortedGrouping, inputMatrix, fParam)
|
||||
warnings.Merge(ws)
|
||||
ev.currentSamples = originalNumSamples + result.TotalSamples()
|
||||
ev.samplesStats.UpdatePeak(ev.currentSamples)
|
||||
|
@ -1537,7 +1559,7 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, annotations.Annotatio
|
|||
unwrapParenExpr(&arg)
|
||||
vs, ok := arg.(*parser.VectorSelector)
|
||||
if ok {
|
||||
return ev.rangeEvalTimestampFunctionOverVectorSelector(vs, call, e)
|
||||
return ev.rangeEvalTimestampFunctionOverVectorSelector(ctx, vs, call, e)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1561,7 +1583,7 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, annotations.Annotatio
|
|||
matrixArgIndex = i
|
||||
matrixArg = true
|
||||
// Replacing parser.SubqueryExpr with parser.MatrixSelector.
|
||||
val, totalSamples, ws := ev.evalSubquery(subq)
|
||||
val, totalSamples, ws := ev.evalSubquery(ctx, subq)
|
||||
e.Args[i] = val
|
||||
warnings.Merge(ws)
|
||||
defer func() {
|
||||
|
@ -1576,14 +1598,14 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, annotations.Annotatio
|
|||
// Special handling for functions that work on series not samples.
|
||||
switch e.Func.Name {
|
||||
case "label_replace":
|
||||
return ev.evalLabelReplace(e.Args)
|
||||
return ev.evalLabelReplace(ctx, e.Args)
|
||||
case "label_join":
|
||||
return ev.evalLabelJoin(e.Args)
|
||||
return ev.evalLabelJoin(ctx, e.Args)
|
||||
}
|
||||
|
||||
if !matrixArg {
|
||||
// Does not have a matrix argument.
|
||||
return ev.rangeEval(nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
|
||||
return ev.rangeEval(ctx, nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
|
||||
vec, annos := call(v, e.Args, enh)
|
||||
return vec, warnings.Merge(annos)
|
||||
}, e.Args...)
|
||||
|
@ -1595,7 +1617,7 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, annotations.Annotatio
|
|||
otherInArgs := make([]Vector, len(e.Args))
|
||||
for i, e := range e.Args {
|
||||
if i != matrixArgIndex {
|
||||
val, ws := ev.eval(e)
|
||||
val, ws := ev.eval(ctx, e)
|
||||
otherArgs[i] = val.(Matrix)
|
||||
otherInArgs[i] = Vector{Sample{}}
|
||||
inArgs[i] = otherInArgs[i]
|
||||
|
@ -1609,7 +1631,7 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, annotations.Annotatio
|
|||
sel := arg.(*parser.MatrixSelector)
|
||||
selVS := sel.VectorSelector.(*parser.VectorSelector)
|
||||
|
||||
ws, err := checkAndExpandSeriesSet(ev.ctx, sel)
|
||||
ws, err := checkAndExpandSeriesSet(ctx, sel)
|
||||
warnings.Merge(ws)
|
||||
if err != nil {
|
||||
ev.error(errWithWarnings{fmt.Errorf("expanding series: %w", err), warnings})
|
||||
|
@ -1639,7 +1661,7 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, annotations.Annotatio
|
|||
dropName := e.Func.Name != "last_over_time"
|
||||
|
||||
for i, s := range selVS.Series {
|
||||
if err := contextDone(ev.ctx, "expression evaluation"); err != nil {
|
||||
if err := contextDone(ctx, "expression evaluation"); err != nil {
|
||||
ev.error(err)
|
||||
}
|
||||
ev.currentSamples -= len(floats) + totalHPointSize(histograms)
|
||||
|
@ -1785,10 +1807,10 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, annotations.Annotatio
|
|||
return mat, warnings
|
||||
|
||||
case *parser.ParenExpr:
|
||||
return ev.eval(e.Expr)
|
||||
return ev.eval(ctx, e.Expr)
|
||||
|
||||
case *parser.UnaryExpr:
|
||||
val, ws := ev.eval(e.Expr)
|
||||
val, ws := ev.eval(ctx, e.Expr)
|
||||
mat := val.(Matrix)
|
||||
if e.Op == parser.SUB {
|
||||
for i := range mat {
|
||||
|
@ -1799,6 +1821,9 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, annotations.Annotatio
|
|||
for j := range mat[i].Floats {
|
||||
mat[i].Floats[j].F = -mat[i].Floats[j].F
|
||||
}
|
||||
for j := range mat[i].Histograms {
|
||||
mat[i].Histograms[j].H = mat[i].Histograms[j].H.Copy().Mul(-1)
|
||||
}
|
||||
}
|
||||
if !ev.enableDelayedNameRemoval && mat.ContainsSameLabelset() {
|
||||
ev.errorf("vector cannot contain metrics with the same labelset")
|
||||
|
@ -1809,7 +1834,7 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, annotations.Annotatio
|
|||
case *parser.BinaryExpr:
|
||||
switch lt, rt := e.LHS.Type(), e.RHS.Type(); {
|
||||
case lt == parser.ValueTypeScalar && rt == parser.ValueTypeScalar:
|
||||
return ev.rangeEval(nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
|
||||
return ev.rangeEval(ctx, nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
|
||||
val := scalarBinop(e.Op, v[0].(Vector)[0].F, v[1].(Vector)[0].F)
|
||||
return append(enh.Out, Sample{F: val}), nil
|
||||
}, e.LHS, e.RHS)
|
||||
|
@ -1822,47 +1847,49 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, annotations.Annotatio
|
|||
}
|
||||
switch e.Op {
|
||||
case parser.LAND:
|
||||
return ev.rangeEval(initSignatures, func(v []parser.Value, sh [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
|
||||
return ev.rangeEval(ctx, initSignatures, func(v []parser.Value, sh [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
|
||||
return ev.VectorAnd(v[0].(Vector), v[1].(Vector), e.VectorMatching, sh[0], sh[1], enh), nil
|
||||
}, e.LHS, e.RHS)
|
||||
case parser.LOR:
|
||||
return ev.rangeEval(initSignatures, func(v []parser.Value, sh [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
|
||||
return ev.rangeEval(ctx, initSignatures, func(v []parser.Value, sh [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
|
||||
return ev.VectorOr(v[0].(Vector), v[1].(Vector), e.VectorMatching, sh[0], sh[1], enh), nil
|
||||
}, e.LHS, e.RHS)
|
||||
case parser.LUNLESS:
|
||||
return ev.rangeEval(initSignatures, func(v []parser.Value, sh [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
|
||||
return ev.rangeEval(ctx, initSignatures, func(v []parser.Value, sh [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
|
||||
return ev.VectorUnless(v[0].(Vector), v[1].(Vector), e.VectorMatching, sh[0], sh[1], enh), nil
|
||||
}, e.LHS, e.RHS)
|
||||
default:
|
||||
return ev.rangeEval(initSignatures, func(v []parser.Value, sh [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
|
||||
return ev.rangeEval(ctx, initSignatures, func(v []parser.Value, sh [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
|
||||
vec, err := ev.VectorBinop(e.Op, v[0].(Vector), v[1].(Vector), e.VectorMatching, e.ReturnBool, sh[0], sh[1], enh)
|
||||
return vec, handleVectorBinopError(err, e)
|
||||
}, e.LHS, e.RHS)
|
||||
}
|
||||
|
||||
case lt == parser.ValueTypeVector && rt == parser.ValueTypeScalar:
|
||||
return ev.rangeEval(nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
|
||||
return ev.rangeEval(ctx, nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
|
||||
vec, err := ev.VectorscalarBinop(e.Op, v[0].(Vector), Scalar{V: v[1].(Vector)[0].F}, false, e.ReturnBool, enh)
|
||||
return vec, handleVectorBinopError(err, e)
|
||||
}, e.LHS, e.RHS)
|
||||
|
||||
case lt == parser.ValueTypeScalar && rt == parser.ValueTypeVector:
|
||||
return ev.rangeEval(nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
|
||||
return ev.rangeEval(ctx, nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
|
||||
vec, err := ev.VectorscalarBinop(e.Op, v[1].(Vector), Scalar{V: v[0].(Vector)[0].F}, true, e.ReturnBool, enh)
|
||||
return vec, handleVectorBinopError(err, e)
|
||||
}, e.LHS, e.RHS)
|
||||
}
|
||||
|
||||
case *parser.NumberLiteral:
|
||||
return ev.rangeEval(nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
|
||||
span.SetAttributes(attribute.Float64("value", e.Val))
|
||||
return ev.rangeEval(ctx, nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
|
||||
return append(enh.Out, Sample{F: e.Val, Metric: labels.EmptyLabels()}), nil
|
||||
})
|
||||
|
||||
case *parser.StringLiteral:
|
||||
span.SetAttributes(attribute.String("value", e.Val))
|
||||
return String{V: e.Val, T: ev.startTimestamp}, nil
|
||||
|
||||
case *parser.VectorSelector:
|
||||
ws, err := checkAndExpandSeriesSet(ev.ctx, e)
|
||||
ws, err := checkAndExpandSeriesSet(ctx, e)
|
||||
if err != nil {
|
||||
ev.error(errWithWarnings{fmt.Errorf("expanding series: %w", err), ws})
|
||||
}
|
||||
|
@ -1871,7 +1898,7 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, annotations.Annotatio
|
|||
it := storage.NewMemoizedEmptyIterator(durationMilliseconds(ev.lookbackDelta))
|
||||
var chkIter chunkenc.Iterator
|
||||
for i, s := range e.Series {
|
||||
if err := contextDone(ev.ctx, "expression evaluation"); err != nil {
|
||||
if err := contextDone(ctx, "expression evaluation"); err != nil {
|
||||
ev.error(err)
|
||||
}
|
||||
chkIter = s.Iterator(chkIter)
|
||||
|
@ -1922,14 +1949,13 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, annotations.Annotatio
|
|||
if ev.startTimestamp != ev.endTimestamp {
|
||||
panic(errors.New("cannot do range evaluation of matrix selector"))
|
||||
}
|
||||
return ev.matrixSelector(e)
|
||||
return ev.matrixSelector(ctx, e)
|
||||
|
||||
case *parser.SubqueryExpr:
|
||||
offsetMillis := durationMilliseconds(e.Offset)
|
||||
rangeMillis := durationMilliseconds(e.Range)
|
||||
newEv := &evaluator{
|
||||
endTimestamp: ev.endTimestamp - offsetMillis,
|
||||
ctx: ev.ctx,
|
||||
currentSamples: ev.currentSamples,
|
||||
maxSamples: ev.maxSamples,
|
||||
logger: ev.logger,
|
||||
|
@ -1959,7 +1985,7 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, annotations.Annotatio
|
|||
setOffsetForAtModifier(newEv.startTimestamp, e.Expr)
|
||||
}
|
||||
|
||||
res, ws := newEv.eval(e.Expr)
|
||||
res, ws := newEv.eval(ctx, e.Expr)
|
||||
ev.currentSamples = newEv.currentSamples
|
||||
ev.samplesStats.UpdatePeakFromSubquery(newEv.samplesStats)
|
||||
ev.samplesStats.IncrementSamplesAtTimestamp(ev.endTimestamp, newEv.samplesStats.TotalSamples)
|
||||
|
@ -1967,14 +1993,13 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, annotations.Annotatio
|
|||
case *parser.StepInvariantExpr:
|
||||
switch ce := e.Expr.(type) {
|
||||
case *parser.StringLiteral, *parser.NumberLiteral:
|
||||
return ev.eval(ce)
|
||||
return ev.eval(ctx, ce)
|
||||
}
|
||||
|
||||
newEv := &evaluator{
|
||||
startTimestamp: ev.startTimestamp,
|
||||
endTimestamp: ev.startTimestamp, // Always a single evaluation.
|
||||
interval: ev.interval,
|
||||
ctx: ev.ctx,
|
||||
currentSamples: ev.currentSamples,
|
||||
maxSamples: ev.maxSamples,
|
||||
logger: ev.logger,
|
||||
|
@ -1983,7 +2008,7 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, annotations.Annotatio
|
|||
noStepSubqueryIntervalFn: ev.noStepSubqueryIntervalFn,
|
||||
enableDelayedNameRemoval: ev.enableDelayedNameRemoval,
|
||||
}
|
||||
res, ws := newEv.eval(e.Expr)
|
||||
res, ws := newEv.eval(ctx, e.Expr)
|
||||
ev.currentSamples = newEv.currentSamples
|
||||
ev.samplesStats.UpdatePeakFromSubquery(newEv.samplesStats)
|
||||
for ts, step := ev.startTimestamp, -1; ts <= ev.endTimestamp; ts += ev.interval {
|
||||
|
@ -2059,8 +2084,8 @@ func reuseOrGetFPointSlices(prevSS *Series, numSteps int) (r []FPoint) {
|
|||
return getFPointSlice(numSteps)
|
||||
}
|
||||
|
||||
func (ev *evaluator) rangeEvalTimestampFunctionOverVectorSelector(vs *parser.VectorSelector, call FunctionCall, e *parser.Call) (parser.Value, annotations.Annotations) {
|
||||
ws, err := checkAndExpandSeriesSet(ev.ctx, vs)
|
||||
func (ev *evaluator) rangeEvalTimestampFunctionOverVectorSelector(ctx context.Context, vs *parser.VectorSelector, call FunctionCall, e *parser.Call) (parser.Value, annotations.Annotations) {
|
||||
ws, err := checkAndExpandSeriesSet(ctx, vs)
|
||||
if err != nil {
|
||||
ev.error(errWithWarnings{fmt.Errorf("expanding series: %w", err), ws})
|
||||
}
|
||||
|
@ -2068,10 +2093,10 @@ func (ev *evaluator) rangeEvalTimestampFunctionOverVectorSelector(vs *parser.Vec
|
|||
seriesIterators := make([]*storage.MemoizedSeriesIterator, len(vs.Series))
|
||||
for i, s := range vs.Series {
|
||||
it := s.Iterator(nil)
|
||||
seriesIterators[i] = storage.NewMemoizedIterator(it, durationMilliseconds(ev.lookbackDelta))
|
||||
seriesIterators[i] = storage.NewMemoizedIterator(it, durationMilliseconds(ev.lookbackDelta)-1)
|
||||
}
|
||||
|
||||
return ev.rangeEval(nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
|
||||
return ev.rangeEval(ctx, nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
|
||||
if vs.Timestamp != nil {
|
||||
// This is a special case for "timestamp()" when the @ modifier is used, to ensure that
|
||||
// we return a point for each time step in this case.
|
||||
|
@ -2130,7 +2155,7 @@ func (ev *evaluator) vectorSelectorSingle(it *storage.MemoizedSeriesIterator, no
|
|||
if valueType == chunkenc.ValNone || t > refTime {
|
||||
var ok bool
|
||||
t, v, h, ok = it.PeekPrev()
|
||||
if !ok || t < refTime-durationMilliseconds(ev.lookbackDelta) {
|
||||
if !ok || t <= refTime-durationMilliseconds(ev.lookbackDelta) {
|
||||
return 0, 0, nil, false
|
||||
}
|
||||
}
|
||||
|
@ -2207,7 +2232,7 @@ func putMatrixSelectorHPointSlice(p []HPoint) {
|
|||
}
|
||||
|
||||
// matrixSelector evaluates a *parser.MatrixSelector expression.
|
||||
func (ev *evaluator) matrixSelector(node *parser.MatrixSelector) (Matrix, annotations.Annotations) {
|
||||
func (ev *evaluator) matrixSelector(ctx context.Context, node *parser.MatrixSelector) (Matrix, annotations.Annotations) {
|
||||
var (
|
||||
vs = node.VectorSelector.(*parser.VectorSelector)
|
||||
|
||||
|
@ -2218,7 +2243,7 @@ func (ev *evaluator) matrixSelector(node *parser.MatrixSelector) (Matrix, annota
|
|||
|
||||
it = storage.NewBuffer(durationMilliseconds(node.Range))
|
||||
)
|
||||
ws, err := checkAndExpandSeriesSet(ev.ctx, node)
|
||||
ws, err := checkAndExpandSeriesSet(ctx, node)
|
||||
if err != nil {
|
||||
ev.error(errWithWarnings{fmt.Errorf("expanding series: %w", err), ws})
|
||||
}
|
||||
|
@ -2226,7 +2251,7 @@ func (ev *evaluator) matrixSelector(node *parser.MatrixSelector) (Matrix, annota
|
|||
var chkIter chunkenc.Iterator
|
||||
series := vs.Series
|
||||
for i, s := range series {
|
||||
if err := contextDone(ev.ctx, "expression evaluation"); err != nil {
|
||||
if err := contextDone(ctx, "expression evaluation"); err != nil {
|
||||
ev.error(err)
|
||||
}
|
||||
chkIter = s.Iterator(chkIter)
|
||||
|
@ -2264,20 +2289,20 @@ func (ev *evaluator) matrixIterSlice(
|
|||
mintFloats, mintHistograms := mint, mint
|
||||
|
||||
// First floats...
|
||||
if len(floats) > 0 && floats[len(floats)-1].T >= mint {
|
||||
if len(floats) > 0 && floats[len(floats)-1].T > mint {
|
||||
// There is an overlap between previous and current ranges, retain common
|
||||
// points. In most such cases:
|
||||
// (a) the overlap is significantly larger than the eval step; and/or
|
||||
// (b) the number of samples is relatively small.
|
||||
// so a linear search will be as fast as a binary search.
|
||||
var drop int
|
||||
for drop = 0; floats[drop].T < mint; drop++ {
|
||||
for drop = 0; floats[drop].T <= mint; drop++ {
|
||||
}
|
||||
ev.currentSamples -= drop
|
||||
copy(floats, floats[drop:])
|
||||
floats = floats[:len(floats)-drop]
|
||||
// Only append points with timestamps after the last timestamp we have.
|
||||
mintFloats = floats[len(floats)-1].T + 1
|
||||
mintFloats = floats[len(floats)-1].T
|
||||
} else {
|
||||
ev.currentSamples -= len(floats)
|
||||
if floats != nil {
|
||||
|
@ -2286,14 +2311,14 @@ func (ev *evaluator) matrixIterSlice(
|
|||
}
|
||||
|
||||
// ...then the same for histograms. TODO(beorn7): Use generics?
|
||||
if len(histograms) > 0 && histograms[len(histograms)-1].T >= mint {
|
||||
if len(histograms) > 0 && histograms[len(histograms)-1].T > mint {
|
||||
// There is an overlap between previous and current ranges, retain common
|
||||
// points. In most such cases:
|
||||
// (a) the overlap is significantly larger than the eval step; and/or
|
||||
// (b) the number of samples is relatively small.
|
||||
// so a linear search will be as fast as a binary search.
|
||||
var drop int
|
||||
for drop = 0; histograms[drop].T < mint; drop++ {
|
||||
for drop = 0; histograms[drop].T <= mint; drop++ {
|
||||
}
|
||||
// Rotate the buffer around the drop index so that points before mint can be
|
||||
// reused to store new histograms.
|
||||
|
@ -2304,7 +2329,7 @@ func (ev *evaluator) matrixIterSlice(
|
|||
histograms = histograms[:len(histograms)-drop]
|
||||
ev.currentSamples -= totalHPointSize(histograms)
|
||||
// Only append points with timestamps after the last timestamp we have.
|
||||
mintHistograms = histograms[len(histograms)-1].T + 1
|
||||
mintHistograms = histograms[len(histograms)-1].T
|
||||
} else {
|
||||
ev.currentSamples -= totalHPointSize(histograms)
|
||||
if histograms != nil {
|
||||
|
@ -2328,7 +2353,7 @@ loop:
|
|||
case chunkenc.ValFloatHistogram, chunkenc.ValHistogram:
|
||||
t := buf.AtT()
|
||||
// Values in the buffer are guaranteed to be smaller than maxt.
|
||||
if t >= mintHistograms {
|
||||
if t > mintHistograms {
|
||||
if histograms == nil {
|
||||
histograms = getMatrixSelectorHPoints()
|
||||
}
|
||||
|
@ -2354,7 +2379,7 @@ loop:
|
|||
continue loop
|
||||
}
|
||||
// Values in the buffer are guaranteed to be smaller than maxt.
|
||||
if t >= mintFloats {
|
||||
if t > mintFloats {
|
||||
ev.currentSamples++
|
||||
if ev.currentSamples > ev.maxSamples {
|
||||
ev.error(ErrTooManySamples(env))
|
||||
|
|
File diff suppressed because it is too large
Load diff
|
@ -14,6 +14,7 @@
|
|||
package promql
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math"
|
||||
|
@ -130,10 +131,18 @@ func extrapolatedRate(vals []parser.Value, args parser.Expressions, enh *EvalNod
|
|||
sampledInterval := float64(lastT-firstT) / 1000
|
||||
averageDurationBetweenSamples := sampledInterval / float64(numSamplesMinusOne)
|
||||
|
||||
// If the first/last samples are close to the boundaries of the range,
|
||||
// extrapolate the result. This is as we expect that another sample
|
||||
// will exist given the spacing between samples we've seen thus far,
|
||||
// with an allowance for noise.
|
||||
// If samples are close enough to the (lower or upper) boundary of the
|
||||
// range, we extrapolate the rate all the way to the boundary in
|
||||
// question. "Close enough" is defined as "up to 10% more than the
|
||||
// average duration between samples within the range", see
|
||||
// extrapolationThreshold below. Essentially, we are assuming a more or
|
||||
// less regular spacing between samples, and if we don't see a sample
|
||||
// where we would expect one, we assume the series does not cover the
|
||||
// whole range, but starts and/or ends within the range. We still
|
||||
// extrapolate the rate in this case, but not all the way to the
|
||||
// boundary, but only by half of the average duration between samples
|
||||
// (which is our guess for where the series actually starts or ends).
|
||||
|
||||
extrapolationThreshold := averageDurationBetweenSamples * 1.1
|
||||
extrapolateToInterval := sampledInterval
|
||||
|
||||
|
@ -1463,7 +1472,7 @@ func funcChanges(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelp
|
|||
}
|
||||
|
||||
// label_replace function operates only on series; does not look at timestamps or values.
|
||||
func (ev *evaluator) evalLabelReplace(args parser.Expressions) (parser.Value, annotations.Annotations) {
|
||||
func (ev *evaluator) evalLabelReplace(ctx context.Context, args parser.Expressions) (parser.Value, annotations.Annotations) {
|
||||
var (
|
||||
dst = stringFromArg(args[1])
|
||||
repl = stringFromArg(args[2])
|
||||
|
@ -1479,7 +1488,7 @@ func (ev *evaluator) evalLabelReplace(args parser.Expressions) (parser.Value, an
|
|||
panic(fmt.Errorf("invalid destination label name in label_replace(): %s", dst))
|
||||
}
|
||||
|
||||
val, ws := ev.eval(args[0])
|
||||
val, ws := ev.eval(ctx, args[0])
|
||||
matrix := val.(Matrix)
|
||||
lb := labels.NewBuilder(labels.EmptyLabels())
|
||||
|
||||
|
@ -1520,7 +1529,7 @@ func funcVector(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelpe
|
|||
}
|
||||
|
||||
// label_join function operates only on series; does not look at timestamps or values.
|
||||
func (ev *evaluator) evalLabelJoin(args parser.Expressions) (parser.Value, annotations.Annotations) {
|
||||
func (ev *evaluator) evalLabelJoin(ctx context.Context, args parser.Expressions) (parser.Value, annotations.Annotations) {
|
||||
var (
|
||||
dst = stringFromArg(args[1])
|
||||
sep = stringFromArg(args[2])
|
||||
|
@ -1537,7 +1546,7 @@ func (ev *evaluator) evalLabelJoin(args parser.Expressions) (parser.Value, annot
|
|||
panic(fmt.Errorf("invalid destination label name in label_join(): %s", dst))
|
||||
}
|
||||
|
||||
val, ws := ev.eval(args[0])
|
||||
val, ws := ev.eval(ctx, args[0])
|
||||
matrix := val.(Matrix)
|
||||
srcVals := make([]string, len(srcLabels))
|
||||
lb := labels.NewBuilder(labels.EmptyLabels())
|
||||
|
|
|
@ -24,6 +24,7 @@ import (
|
|||
"github.com/prometheus/prometheus/model/timestamp"
|
||||
"github.com/prometheus/prometheus/promql"
|
||||
"github.com/prometheus/prometheus/promql/parser"
|
||||
"github.com/prometheus/prometheus/promql/promqltest"
|
||||
"github.com/prometheus/prometheus/util/teststorage"
|
||||
)
|
||||
|
||||
|
@ -39,7 +40,7 @@ func TestDeriv(t *testing.T) {
|
|||
MaxSamples: 10000,
|
||||
Timeout: 10 * time.Second,
|
||||
}
|
||||
engine := promql.NewEngine(opts)
|
||||
engine := promqltest.NewTestEngineWithOpts(t, opts)
|
||||
|
||||
a := storage.Appender(context.Background())
|
||||
|
||||
|
|
|
@ -818,12 +818,12 @@ histogram_desc_item
|
|||
$$ = yylex.(*parser).newMap()
|
||||
$$["sum"] = $3
|
||||
}
|
||||
| COUNT_DESC COLON number
|
||||
| COUNT_DESC COLON signed_or_unsigned_number
|
||||
{
|
||||
$$ = yylex.(*parser).newMap()
|
||||
$$["count"] = $3
|
||||
}
|
||||
| ZERO_BUCKET_DESC COLON number
|
||||
| ZERO_BUCKET_DESC COLON signed_or_unsigned_number
|
||||
{
|
||||
$$ = yylex.(*parser).newMap()
|
||||
$$["z_bucket"] = $3
|
||||
|
@ -875,11 +875,11 @@ bucket_set : LEFT_BRACKET bucket_set_list SPACE RIGHT_BRACKET
|
|||
}
|
||||
;
|
||||
|
||||
bucket_set_list : bucket_set_list SPACE number
|
||||
bucket_set_list : bucket_set_list SPACE signed_or_unsigned_number
|
||||
{
|
||||
$$ = append($1, $3)
|
||||
}
|
||||
| number
|
||||
| signed_or_unsigned_number
|
||||
{
|
||||
$$ = []float64{$1}
|
||||
}
|
||||
|
|
|
@ -410,55 +410,55 @@ const yyPrivate = 57344
|
|||
const yyLast = 799
|
||||
|
||||
var yyAct = [...]int16{
|
||||
155, 334, 332, 276, 339, 152, 226, 39, 192, 44,
|
||||
291, 290, 156, 118, 82, 178, 229, 107, 106, 346,
|
||||
347, 348, 349, 109, 108, 198, 239, 199, 133, 110,
|
||||
105, 60, 245, 121, 6, 329, 325, 111, 328, 228,
|
||||
200, 201, 160, 119, 304, 267, 293, 128, 260, 160,
|
||||
151, 261, 159, 302, 358, 311, 122, 55, 89, 159,
|
||||
196, 241, 242, 259, 113, 243, 114, 54, 98, 99,
|
||||
302, 112, 101, 256, 104, 88, 230, 232, 234, 235,
|
||||
152, 334, 332, 155, 339, 226, 39, 192, 276, 44,
|
||||
291, 290, 118, 82, 178, 229, 107, 106, 346, 347,
|
||||
348, 349, 109, 108, 198, 239, 199, 156, 110, 105,
|
||||
6, 245, 200, 201, 133, 325, 111, 329, 228, 60,
|
||||
357, 293, 328, 304, 267, 160, 266, 128, 55, 151,
|
||||
302, 311, 302, 196, 340, 159, 55, 89, 54, 356,
|
||||
241, 242, 355, 113, 243, 114, 54, 98, 99, 265,
|
||||
112, 101, 256, 104, 88, 230, 232, 234, 235, 236,
|
||||
244, 246, 249, 250, 251, 252, 253, 257, 258, 105,
|
||||
333, 231, 233, 237, 238, 240, 247, 248, 103, 115,
|
||||
109, 254, 255, 324, 150, 218, 110, 264, 111, 270,
|
||||
77, 35, 7, 149, 188, 163, 322, 321, 173, 320,
|
||||
167, 170, 323, 165, 271, 166, 2, 3, 4, 5,
|
||||
263, 101, 194, 104, 180, 184, 197, 187, 186, 319,
|
||||
272, 202, 203, 204, 205, 206, 207, 208, 209, 210,
|
||||
211, 212, 213, 214, 215, 216, 195, 299, 103, 318,
|
||||
217, 36, 298, 1, 190, 219, 220, 317, 160, 160,
|
||||
316, 193, 160, 154, 182, 196, 229, 297, 159, 159,
|
||||
160, 358, 159, 268, 181, 183, 239, 260, 296, 262,
|
||||
159, 315, 245, 129, 314, 55, 225, 313, 161, 228,
|
||||
161, 161, 259, 312, 161, 54, 86, 295, 310, 288,
|
||||
289, 8, 161, 292, 162, 37, 162, 162, 49, 269,
|
||||
162, 241, 242, 309, 179, 243, 180, 127, 162, 126,
|
||||
308, 223, 294, 256, 48, 222, 230, 232, 234, 235,
|
||||
236, 244, 246, 249, 250, 251, 252, 253, 257, 258,
|
||||
160, 115, 231, 233, 237, 238, 240, 247, 248, 103,
|
||||
159, 109, 254, 255, 324, 150, 357, 110, 333, 218,
|
||||
111, 340, 310, 149, 77, 163, 7, 105, 35, 173,
|
||||
167, 170, 161, 323, 165, 356, 166, 309, 355, 194,
|
||||
2, 3, 4, 5, 308, 322, 184, 197, 162, 186,
|
||||
321, 195, 202, 203, 204, 205, 206, 207, 208, 209,
|
||||
210, 211, 212, 213, 214, 215, 216, 229, 129, 101,
|
||||
217, 104, 219, 220, 190, 266, 270, 239, 160, 121,
|
||||
268, 193, 264, 245, 55, 196, 154, 225, 159, 119,
|
||||
228, 271, 188, 160, 54, 161, 103, 117, 265, 84,
|
||||
262, 299, 122, 159, 320, 263, 298, 272, 10, 83,
|
||||
161, 162, 241, 242, 269, 187, 243, 185, 79, 288,
|
||||
289, 297, 319, 292, 256, 161, 162, 230, 232, 234,
|
||||
235, 236, 244, 246, 249, 250, 251, 252, 253, 257,
|
||||
258, 162, 294, 231, 233, 237, 238, 240, 247, 248,
|
||||
318, 317, 316, 254, 255, 180, 315, 134, 135, 136,
|
||||
137, 138, 139, 140, 141, 142, 143, 144, 145, 146,
|
||||
147, 148, 157, 158, 169, 105, 314, 296, 300, 301,
|
||||
303, 223, 305, 313, 55, 222, 179, 168, 180, 84,
|
||||
306, 307, 177, 125, 54, 182, 295, 176, 124, 83,
|
||||
221, 312, 87, 89, 8, 181, 183, 81, 37, 86,
|
||||
175, 123, 36, 98, 99, 326, 327, 101, 102, 104,
|
||||
88, 127, 331, 126, 50, 336, 337, 338, 182, 335,
|
||||
78, 1, 342, 341, 344, 343, 49, 48, 181, 183,
|
||||
350, 351, 47, 55, 103, 352, 53, 77, 164, 56,
|
||||
46, 354, 22, 54, 59, 55, 172, 9, 9, 57,
|
||||
132, 45, 43, 130, 171, 54, 359, 42, 131, 41,
|
||||
40, 51, 191, 353, 273, 75, 85, 189, 224, 80,
|
||||
345, 18, 19, 120, 153, 20, 58, 227, 52, 116,
|
||||
221, 169, 231, 233, 237, 238, 240, 247, 248, 157,
|
||||
158, 164, 254, 255, 168, 10, 182, 300, 55, 301,
|
||||
303, 47, 305, 46, 132, 79, 181, 183, 54, 306,
|
||||
307, 45, 134, 135, 136, 137, 138, 139, 140, 141,
|
||||
142, 143, 144, 145, 146, 147, 148, 43, 59, 50,
|
||||
84, 9, 9, 121, 326, 78, 327, 130, 171, 121,
|
||||
83, 42, 131, 119, 335, 336, 337, 331, 185, 119,
|
||||
338, 261, 342, 341, 344, 343, 122, 117, 41, 177,
|
||||
350, 351, 122, 55, 176, 352, 53, 77, 40, 56,
|
||||
125, 354, 22, 54, 84, 124, 172, 175, 51, 57,
|
||||
191, 353, 273, 85, 83, 189, 359, 224, 123, 80,
|
||||
345, 120, 81, 153, 58, 75, 227, 52, 116, 0,
|
||||
0, 18, 19, 0, 0, 20, 0, 0, 0, 0,
|
||||
0, 76, 0, 0, 0, 0, 61, 62, 63, 64,
|
||||
65, 66, 67, 68, 69, 70, 71, 72, 73, 74,
|
||||
0, 0, 0, 13, 0, 0, 0, 24, 0, 30,
|
||||
0, 0, 31, 32, 55, 38, 0, 53, 77, 0,
|
||||
0, 0, 31, 32, 55, 38, 105, 53, 77, 0,
|
||||
56, 275, 0, 22, 54, 0, 0, 0, 274, 0,
|
||||
57, 0, 278, 279, 277, 284, 286, 283, 285, 280,
|
||||
281, 282, 287, 0, 0, 0, 75, 0, 0, 0,
|
||||
0, 0, 18, 19, 0, 0, 20, 0, 0, 0,
|
||||
0, 0, 76, 0, 0, 0, 0, 61, 62, 63,
|
||||
281, 282, 287, 87, 89, 0, 75, 0, 0, 0,
|
||||
0, 0, 18, 19, 98, 99, 20, 0, 101, 102,
|
||||
104, 88, 76, 0, 0, 0, 0, 61, 62, 63,
|
||||
64, 65, 66, 67, 68, 69, 70, 71, 72, 73,
|
||||
74, 0, 0, 0, 13, 0, 0, 0, 24, 0,
|
||||
74, 0, 0, 0, 13, 103, 0, 0, 24, 0,
|
||||
30, 0, 55, 31, 32, 53, 77, 0, 56, 330,
|
||||
0, 22, 54, 0, 0, 0, 0, 0, 57, 0,
|
||||
278, 279, 277, 284, 286, 283, 285, 280, 281, 282,
|
||||
|
@ -493,51 +493,51 @@ var yyAct = [...]int16{
|
|||
}
|
||||
|
||||
var yyPact = [...]int16{
|
||||
32, 106, 569, 569, 405, 526, -1000, -1000, -1000, 105,
|
||||
28, 102, 569, 569, 405, 526, -1000, -1000, -1000, 98,
|
||||
-1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
|
||||
-1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
|
||||
-1000, -1000, -1000, -1000, -1000, 277, -1000, 297, -1000, 650,
|
||||
-1000, -1000, -1000, -1000, -1000, 342, -1000, 204, -1000, 650,
|
||||
-1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
|
||||
-1000, -1000, 22, 95, -1000, -1000, 483, -1000, 483, 101,
|
||||
-1000, -1000, 21, 93, -1000, -1000, 483, -1000, 483, 97,
|
||||
-1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
|
||||
-1000, -1000, -1000, -1000, -1000, -1000, -1000, 167, -1000, -1000,
|
||||
281, -1000, -1000, 309, -1000, 23, -1000, -50, -50, -50,
|
||||
-50, -50, -50, -50, -50, -50, -50, -50, -50, -50,
|
||||
-50, -50, -50, 48, 174, 336, 95, -56, -1000, 262,
|
||||
262, 324, -1000, 631, 103, -1000, 280, -1000, -1000, 274,
|
||||
241, -1000, -1000, -1000, 187, -1000, 180, -1000, 159, 483,
|
||||
-1000, -57, -40, -1000, 483, 483, 483, 483, 483, 483,
|
||||
-1000, -1000, -1000, -1000, -1000, -1000, -1000, 307, -1000, -1000,
|
||||
338, -1000, -1000, 225, -1000, 23, -1000, -44, -44, -44,
|
||||
-44, -44, -44, -44, -44, -44, -44, -44, -44, -44,
|
||||
-44, -44, -44, 47, 171, 259, 93, -57, -1000, 249,
|
||||
249, 324, -1000, 631, 75, -1000, 327, -1000, -1000, 222,
|
||||
130, -1000, -1000, -1000, 298, -1000, 112, -1000, 159, 483,
|
||||
-1000, -58, -48, -1000, 483, 483, 483, 483, 483, 483,
|
||||
483, 483, 483, 483, 483, 483, 483, 483, 483, -1000,
|
||||
165, -1000, -1000, 94, -1000, -1000, -1000, -1000, -1000, -1000,
|
||||
-1000, 40, 40, 269, -1000, -1000, -1000, -1000, 155, -1000,
|
||||
-1000, 41, -1000, 650, -1000, -1000, 31, -1000, 170, -1000,
|
||||
-1000, -1000, -1000, -1000, 163, -1000, -1000, -1000, -1000, -1000,
|
||||
19, 144, 140, -1000, -1000, -1000, 404, 16, 262, 262,
|
||||
262, 262, 103, 103, 251, 251, 251, 715, 696, 251,
|
||||
251, 715, 103, 103, 251, 103, 16, -1000, 24, -1000,
|
||||
-1000, -1000, 265, -1000, 189, -1000, -1000, -1000, -1000, -1000,
|
||||
39, -1000, -1000, 90, -1000, -1000, -1000, -1000, -1000, -1000,
|
||||
-1000, 36, 36, 229, -1000, -1000, -1000, -1000, 174, -1000,
|
||||
-1000, 180, -1000, 650, -1000, -1000, 301, -1000, 105, -1000,
|
||||
-1000, -1000, -1000, -1000, 44, -1000, -1000, -1000, -1000, -1000,
|
||||
18, 157, 83, -1000, -1000, -1000, 404, 15, 249, 249,
|
||||
249, 249, 75, 75, 402, 402, 402, 715, 696, 402,
|
||||
402, 715, 75, 75, 402, 75, 15, -1000, 19, -1000,
|
||||
-1000, -1000, 186, -1000, 155, -1000, -1000, -1000, -1000, -1000,
|
||||
-1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
|
||||
-1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
|
||||
-1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
|
||||
483, -1000, -1000, -1000, -1000, -1000, -1000, 34, 34, 18,
|
||||
34, 44, 44, 110, 38, -1000, -1000, 285, 267, 260,
|
||||
240, 236, 235, 234, 206, 188, 134, 129, -1000, -1000,
|
||||
-1000, -1000, -1000, -1000, 102, -1000, -1000, -1000, 14, -1000,
|
||||
650, -1000, -1000, -1000, 34, -1000, 12, 9, 482, -1000,
|
||||
-1000, -1000, 51, 81, 40, 40, 40, 97, 97, 51,
|
||||
97, 51, -73, -1000, -1000, -1000, -1000, -1000, 34, 34,
|
||||
-1000, -1000, -1000, 34, -1000, -1000, -1000, -1000, -1000, -1000,
|
||||
40, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
|
||||
-1000, -1000, -1000, 104, -1000, 33, -1000, -1000, -1000, -1000,
|
||||
483, -1000, -1000, -1000, -1000, -1000, -1000, 31, 31, 17,
|
||||
31, 37, 37, 206, 34, -1000, -1000, 197, 191, 188,
|
||||
185, 164, 161, 153, 133, 113, 111, 110, -1000, -1000,
|
||||
-1000, -1000, -1000, -1000, 101, -1000, -1000, -1000, 13, -1000,
|
||||
650, -1000, -1000, -1000, 31, -1000, 16, 11, 482, -1000,
|
||||
-1000, -1000, 33, 163, 163, 163, 36, 40, 40, 33,
|
||||
40, 33, -74, -1000, -1000, -1000, -1000, -1000, 31, 31,
|
||||
-1000, -1000, -1000, 31, -1000, -1000, -1000, -1000, -1000, -1000,
|
||||
163, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
|
||||
-1000, -1000, -1000, 38, -1000, 160, -1000, -1000, -1000, -1000,
|
||||
}
|
||||
|
||||
var yyPgo = [...]int16{
|
||||
0, 379, 13, 378, 6, 15, 377, 344, 376, 374,
|
||||
373, 370, 198, 294, 369, 14, 368, 10, 11, 367,
|
||||
366, 8, 364, 3, 4, 363, 2, 1, 0, 362,
|
||||
12, 5, 361, 360, 18, 158, 359, 358, 7, 357,
|
||||
354, 17, 353, 31, 352, 9, 351, 350, 340, 332,
|
||||
327, 326, 314, 321, 302,
|
||||
0, 368, 12, 367, 5, 14, 366, 298, 364, 363,
|
||||
361, 360, 265, 211, 359, 13, 357, 10, 11, 355,
|
||||
353, 7, 352, 8, 4, 351, 2, 1, 3, 350,
|
||||
27, 0, 348, 338, 17, 193, 328, 312, 6, 311,
|
||||
308, 16, 307, 39, 297, 9, 281, 274, 273, 271,
|
||||
234, 218, 299, 163, 161,
|
||||
}
|
||||
|
||||
var yyR1 = [...]int8{
|
||||
|
@ -630,9 +630,9 @@ var yyChk = [...]int16{
|
|||
-38, -27, 19, -27, 26, -27, -21, -21, 24, 17,
|
||||
2, 17, 6, 6, 6, 6, 6, 6, 6, 6,
|
||||
6, 6, 6, 21, 2, 22, -4, -27, 26, 26,
|
||||
17, -23, -26, 57, -27, -31, -28, -28, -28, -24,
|
||||
17, -23, -26, 57, -27, -31, -31, -31, -28, -24,
|
||||
14, -24, -26, -24, -26, -11, 92, 93, 94, 95,
|
||||
-27, -27, -27, -25, -28, 24, 21, 2, 21, -28,
|
||||
-27, -27, -27, -25, -31, 24, 21, 2, 21, -31,
|
||||
}
|
||||
|
||||
var yyDef = [...]int16{
|
||||
|
|
|
@ -610,6 +610,9 @@ func lexBuckets(l *Lexer) stateFn {
|
|||
case isSpace(r):
|
||||
l.emit(SPACE)
|
||||
return lexSpace
|
||||
case r == '-':
|
||||
l.emit(SUB)
|
||||
return lexNumber
|
||||
case isDigit(r):
|
||||
l.backup()
|
||||
return lexNumber
|
||||
|
|
|
@ -4084,17 +4084,17 @@ func TestParseHistogramSeries(t *testing.T) {
|
|||
},
|
||||
{
|
||||
name: "all properties used",
|
||||
input: `{} {{schema:1 sum:-0.3 count:3.1 z_bucket:7.1 z_bucket_w:0.05 buckets:[5.1 10 7] offset:-3 n_buckets:[4.1 5] n_offset:-5 counter_reset_hint:gauge}}`,
|
||||
input: `{} {{schema:1 sum:0.3 count:3.1 z_bucket:7.1 z_bucket_w:0.05 buckets:[5.1 10 7] offset:3 n_buckets:[4.1 5] n_offset:5 counter_reset_hint:gauge}}`,
|
||||
expected: []histogram.FloatHistogram{{
|
||||
Schema: 1,
|
||||
Sum: -0.3,
|
||||
Sum: 0.3,
|
||||
Count: 3.1,
|
||||
ZeroCount: 7.1,
|
||||
ZeroThreshold: 0.05,
|
||||
PositiveBuckets: []float64{5.1, 10, 7},
|
||||
PositiveSpans: []histogram.Span{{Offset: -3, Length: 3}},
|
||||
PositiveSpans: []histogram.Span{{Offset: 3, Length: 3}},
|
||||
NegativeBuckets: []float64{4.1, 5},
|
||||
NegativeSpans: []histogram.Span{{Offset: -5, Length: 2}},
|
||||
NegativeSpans: []histogram.Span{{Offset: 5, Length: 2}},
|
||||
CounterResetHint: histogram.GaugeType,
|
||||
}},
|
||||
},
|
||||
|
@ -4114,6 +4114,22 @@ func TestParseHistogramSeries(t *testing.T) {
|
|||
CounterResetHint: histogram.GaugeType,
|
||||
}},
|
||||
},
|
||||
{
|
||||
name: "all properties used, with negative values where supported",
|
||||
input: `{} {{schema:1 sum:-0.3 count:-3.1 z_bucket:-7.1 z_bucket_w:0.05 buckets:[-5.1 -10 -7] offset:-3 n_buckets:[-4.1 -5] n_offset:-5 counter_reset_hint:gauge}}`,
|
||||
expected: []histogram.FloatHistogram{{
|
||||
Schema: 1,
|
||||
Sum: -0.3,
|
||||
Count: -3.1,
|
||||
ZeroCount: -7.1,
|
||||
ZeroThreshold: 0.05,
|
||||
PositiveBuckets: []float64{-5.1, -10, -7},
|
||||
PositiveSpans: []histogram.Span{{Offset: -3, Length: 3}},
|
||||
NegativeBuckets: []float64{-4.1, -5},
|
||||
NegativeSpans: []histogram.Span{{Offset: -5, Length: 2}},
|
||||
CounterResetHint: histogram.GaugeType,
|
||||
}},
|
||||
},
|
||||
{
|
||||
name: "static series",
|
||||
input: `{} {{buckets:[5 10 7] schema:1}}x2`,
|
||||
|
|
|
@ -72,6 +72,11 @@ func (node *AggregateExpr) String() string {
|
|||
return aggrString
|
||||
}
|
||||
|
||||
func (node *AggregateExpr) ShortString() string {
|
||||
aggrString := node.getAggOpStr()
|
||||
return aggrString
|
||||
}
|
||||
|
||||
func (node *AggregateExpr) getAggOpStr() string {
|
||||
aggrString := node.Op.String()
|
||||
|
||||
|
@ -95,14 +100,20 @@ func joinLabels(ss []string) string {
|
|||
return strings.Join(ss, ", ")
|
||||
}
|
||||
|
||||
func (node *BinaryExpr) String() string {
|
||||
returnBool := ""
|
||||
func (node *BinaryExpr) returnBool() string {
|
||||
if node.ReturnBool {
|
||||
returnBool = " bool"
|
||||
return " bool"
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (node *BinaryExpr) String() string {
|
||||
matching := node.getMatchingStr()
|
||||
return fmt.Sprintf("%s %s%s%s %s", node.LHS, node.Op, returnBool, matching, node.RHS)
|
||||
return fmt.Sprintf("%s %s%s%s %s", node.LHS, node.Op, node.returnBool(), matching, node.RHS)
|
||||
}
|
||||
|
||||
func (node *BinaryExpr) ShortString() string {
|
||||
return fmt.Sprintf("%s%s%s", node.Op, node.returnBool(), node.getMatchingStr())
|
||||
}
|
||||
|
||||
func (node *BinaryExpr) getMatchingStr() string {
|
||||
|
@ -130,9 +141,13 @@ func (node *Call) String() string {
|
|||
return fmt.Sprintf("%s(%s)", node.Func.Name, node.Args)
|
||||
}
|
||||
|
||||
func (node *MatrixSelector) String() string {
|
||||
func (node *Call) ShortString() string {
|
||||
return node.Func.Name
|
||||
}
|
||||
|
||||
func (node *MatrixSelector) atOffset() (string, string) {
|
||||
// Copy the Vector selector before changing the offset
|
||||
vecSelector := *node.VectorSelector.(*VectorSelector)
|
||||
vecSelector := node.VectorSelector.(*VectorSelector)
|
||||
offset := ""
|
||||
switch {
|
||||
case vecSelector.OriginalOffset > time.Duration(0):
|
||||
|
@ -149,7 +164,13 @@ func (node *MatrixSelector) String() string {
|
|||
case vecSelector.StartOrEnd == END:
|
||||
at = " @ end()"
|
||||
}
|
||||
return at, offset
|
||||
}
|
||||
|
||||
func (node *MatrixSelector) String() string {
|
||||
at, offset := node.atOffset()
|
||||
// Copy the Vector selector before changing the offset
|
||||
vecSelector := *node.VectorSelector.(*VectorSelector)
|
||||
// Do not print the @ and offset twice.
|
||||
offsetVal, atVal, preproc := vecSelector.OriginalOffset, vecSelector.Timestamp, vecSelector.StartOrEnd
|
||||
vecSelector.OriginalOffset = 0
|
||||
|
@ -163,10 +184,19 @@ func (node *MatrixSelector) String() string {
|
|||
return str
|
||||
}
|
||||
|
||||
func (node *MatrixSelector) ShortString() string {
|
||||
at, offset := node.atOffset()
|
||||
return fmt.Sprintf("[%s]%s%s", model.Duration(node.Range), at, offset)
|
||||
}
|
||||
|
||||
func (node *SubqueryExpr) String() string {
|
||||
return fmt.Sprintf("%s%s", node.Expr.String(), node.getSubqueryTimeSuffix())
|
||||
}
|
||||
|
||||
func (node *SubqueryExpr) ShortString() string {
|
||||
return node.getSubqueryTimeSuffix()
|
||||
}
|
||||
|
||||
// getSubqueryTimeSuffix returns the '[<range>:<step>] @ <timestamp> offset <offset>' suffix of the subquery.
|
||||
func (node *SubqueryExpr) getSubqueryTimeSuffix() string {
|
||||
step := ""
|
||||
|
@ -208,6 +238,10 @@ func (node *UnaryExpr) String() string {
|
|||
return fmt.Sprintf("%s%s", node.Op, node.Expr)
|
||||
}
|
||||
|
||||
func (node *UnaryExpr) ShortString() string {
|
||||
return node.Op.String()
|
||||
}
|
||||
|
||||
func (node *VectorSelector) String() string {
|
||||
var labelStrings []string
|
||||
if len(node.LabelMatchers) > 1 {
|
||||
|
|
|
@ -28,12 +28,12 @@ import (
|
|||
"github.com/prometheus/prometheus/util/teststorage"
|
||||
)
|
||||
|
||||
func newTestEngine() *promql.Engine {
|
||||
return promqltest.NewTestEngine(false, 0, promqltest.DefaultMaxSamplesPerQuery)
|
||||
func newTestEngine(t *testing.T) *promql.Engine {
|
||||
return promqltest.NewTestEngine(t, false, 0, promqltest.DefaultMaxSamplesPerQuery)
|
||||
}
|
||||
|
||||
func TestEvaluations(t *testing.T) {
|
||||
promqltest.RunBuiltinTests(t, newTestEngine())
|
||||
promqltest.RunBuiltinTests(t, newTestEngine(t))
|
||||
}
|
||||
|
||||
// Run a lot of queries at the same time, to check for race conditions.
|
||||
|
@ -48,7 +48,7 @@ func TestConcurrentRangeQueries(t *testing.T) {
|
|||
}
|
||||
// Enable experimental functions testing
|
||||
parser.EnableExperimentalFunctions = true
|
||||
engine := promql.NewEngine(opts)
|
||||
engine := promqltest.NewTestEngineWithOpts(t, opts)
|
||||
|
||||
const interval = 10000 // 10s interval.
|
||||
// A day of data plus 10k steps.
|
||||
|
|
|
@ -79,8 +79,9 @@ func LoadedStorage(t testutil.T, input string) *teststorage.TestStorage {
|
|||
return test.storage
|
||||
}
|
||||
|
||||
func NewTestEngine(enablePerStepStats bool, lookbackDelta time.Duration, maxSamples int) *promql.Engine {
|
||||
return promql.NewEngine(promql.EngineOpts{
|
||||
// NewTestEngine creates a promql.Engine with enablePerStepStats, lookbackDelta and maxSamples, and returns it.
|
||||
func NewTestEngine(tb testing.TB, enablePerStepStats bool, lookbackDelta time.Duration, maxSamples int) *promql.Engine {
|
||||
return NewTestEngineWithOpts(tb, promql.EngineOpts{
|
||||
Logger: nil,
|
||||
Reg: nil,
|
||||
MaxSamples: maxSamples,
|
||||
|
@ -94,6 +95,16 @@ func NewTestEngine(enablePerStepStats bool, lookbackDelta time.Duration, maxSamp
|
|||
})
|
||||
}
|
||||
|
||||
// NewTestEngineWithOpts creates a promql.Engine with opts and returns it.
|
||||
func NewTestEngineWithOpts(tb testing.TB, opts promql.EngineOpts) *promql.Engine {
|
||||
tb.Helper()
|
||||
ng := promql.NewEngine(opts)
|
||||
tb.Cleanup(func() {
|
||||
require.NoError(tb, ng.Close())
|
||||
})
|
||||
return ng
|
||||
}
|
||||
|
||||
// RunBuiltinTests runs an acceptance test suite against the provided engine.
|
||||
func RunBuiltinTests(t TBRun, engine promql.QueryEngine) {
|
||||
t.Cleanup(func() { parser.EnableExperimentalFunctions = false })
|
||||
|
@ -1436,7 +1447,11 @@ func (ll *LazyLoader) Storage() storage.Storage {
|
|||
// Close closes resources associated with the LazyLoader.
|
||||
func (ll *LazyLoader) Close() error {
|
||||
ll.cancelCtx()
|
||||
return ll.storage.Close()
|
||||
err := ll.queryEngine.Close()
|
||||
if sErr := ll.storage.Close(); sErr != nil {
|
||||
return errors.Join(sErr, err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func makeInt64Pointer(val int64) *int64 {
|
||||
|
|
|
@ -237,7 +237,7 @@ eval instant at 5m sum by (group) (http_requests)
|
|||
load 5m
|
||||
testmetric {{}}
|
||||
|
||||
eval instant at 5m testmetric
|
||||
eval instant at 0m testmetric
|
||||
`,
|
||||
expectedError: `error in eval testmetric (line 5): unexpected metric {__name__="testmetric"} in result, has value {count:0, sum:0}`,
|
||||
},
|
||||
|
@ -595,7 +595,7 @@ eval range from 0 to 5m step 5m testmetric
|
|||
|
||||
for name, testCase := range testCases {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
err := runTest(t, testCase.input, NewTestEngine(false, 0, DefaultMaxSamplesPerQuery))
|
||||
err := runTest(t, testCase.input, NewTestEngine(t, false, 0, DefaultMaxSamplesPerQuery))
|
||||
|
||||
if testCase.expectedError == "" {
|
||||
require.NoError(t, err)
|
||||
|
|
12
promql/promqltest/testdata/aggregators.test
vendored
12
promql/promqltest/testdata/aggregators.test
vendored
|
@ -250,7 +250,7 @@ clear
|
|||
load 5m
|
||||
http_requests{job="api-server", instance="0", group="production"} 0+10x10
|
||||
http_requests{job="api-server", instance="1", group="production"} 0+20x10
|
||||
http_requests{job="api-server", instance="2", group="production"} NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN
|
||||
http_requests{job="api-server", instance="2", group="production"} NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN
|
||||
http_requests{job="api-server", instance="0", group="canary"} 0+30x10
|
||||
http_requests{job="api-server", instance="1", group="canary"} 0+40x10
|
||||
http_requests{job="app-server", instance="0", group="production"} 0+50x10
|
||||
|
@ -337,32 +337,32 @@ load 5m
|
|||
version{job="app-server", instance="0", group="canary"} 7
|
||||
version{job="app-server", instance="1", group="canary"} 7
|
||||
|
||||
eval instant at 5m count_values("version", version)
|
||||
eval instant at 1m count_values("version", version)
|
||||
{version="6"} 5
|
||||
{version="7"} 2
|
||||
{version="8"} 2
|
||||
|
||||
|
||||
eval instant at 5m count_values(((("version"))), version)
|
||||
eval instant at 1m count_values(((("version"))), version)
|
||||
{version="6"} 5
|
||||
{version="7"} 2
|
||||
{version="8"} 2
|
||||
|
||||
|
||||
eval instant at 5m count_values without (instance)("version", version)
|
||||
eval instant at 1m count_values without (instance)("version", version)
|
||||
{job="api-server", group="production", version="6"} 3
|
||||
{job="api-server", group="canary", version="8"} 2
|
||||
{job="app-server", group="production", version="6"} 2
|
||||
{job="app-server", group="canary", version="7"} 2
|
||||
|
||||
# Overwrite label with output. Don't do this.
|
||||
eval instant at 5m count_values without (instance)("job", version)
|
||||
eval instant at 1m count_values without (instance)("job", version)
|
||||
{job="6", group="production"} 5
|
||||
{job="8", group="canary"} 2
|
||||
{job="7", group="canary"} 2
|
||||
|
||||
# Overwrite label with output. Don't do this.
|
||||
eval instant at 5m count_values by (job, group)("job", version)
|
||||
eval instant at 1m count_values by (job, group)("job", version)
|
||||
{job="6", group="production"} 5
|
||||
{job="8", group="canary"} 2
|
||||
{job="7", group="canary"} 2
|
||||
|
|
64
promql/promqltest/testdata/at_modifier.test
vendored
64
promql/promqltest/testdata/at_modifier.test
vendored
|
@ -121,45 +121,43 @@ eval instant at 25s sum_over_time(metric{job="1"}[100:1] offset 20 @ 100)
|
|||
|
||||
# Since vector selector has timestamp, the result value does not depend on the timestamp of subqueries.
|
||||
# Inner most sum=1+2+...+10=55.
|
||||
# With [100s:25s] subquery, it's 55*5.
|
||||
# With [100s:25s] subquery, it's 55*4.
|
||||
eval instant at 100s sum_over_time(sum_over_time(metric{job="1"}[100s] @ 100)[100s:25s] @ 50)
|
||||
{job="1"} 275
|
||||
{job="1"} 220
|
||||
|
||||
# Nested subqueries with different timestamps on both.
|
||||
|
||||
# Since vector selector has timestamp, the result value does not depend on the timestamp of subqueries.
|
||||
# Sum of innermost subquery is 275 as above. The outer subquery repeats it 4 times.
|
||||
# Sum of innermost subquery is 220 as above. The outer subquery repeats it 3 times.
|
||||
eval instant at 0s sum_over_time(sum_over_time(sum_over_time(metric{job="1"}[100s] @ 100)[100s:25s] @ 50)[3s:1s] @ 3000)
|
||||
{job="1"} 1100
|
||||
{job="1"} 660
|
||||
|
||||
# Testing the inner subquery timestamp since vector selector does not have @.
|
||||
|
||||
# Inner sum for subquery [100s:25s] @ 50 are
|
||||
# at -50 nothing, at -25 nothing, at 0=0, at 25=2, at 50=4+5=9.
|
||||
# This sum of 11 is repeated 4 times by outer subquery.
|
||||
# at -50 nothing, at -25 nothing, at 0=0, at 25=2, at 50=5.
|
||||
# This sum of 7 is repeated 3 times by outer subquery.
|
||||
eval instant at 0s sum_over_time(sum_over_time(sum_over_time(metric{job="1"}[10s])[100s:25s] @ 50)[3s:1s] @ 200)
|
||||
{job="1"} 44
|
||||
{job="1"} 21
|
||||
|
||||
# Inner sum for subquery [100s:25s] @ 200 are
|
||||
# at 100=9+10, at 125=12, at 150=14+15, at 175=17, at 200=19+20.
|
||||
# This sum of 116 is repeated 4 times by outer subquery.
|
||||
# at 125=12, at 150=15, at 175=17, at 200=20.
|
||||
# This sum of 64 is repeated 3 times by outer subquery.
|
||||
eval instant at 0s sum_over_time(sum_over_time(sum_over_time(metric{job="1"}[10s])[100s:25s] @ 200)[3s:1s] @ 50)
|
||||
{job="1"} 464
|
||||
{job="1"} 192
|
||||
|
||||
# Nested subqueries with timestamp only on outer subquery.
|
||||
# Outer most subquery:
|
||||
# at 900=783
|
||||
# inner subquery: at 870=87+86+85, at 880=88+87+86, at 890=89+88+87
|
||||
# at 925=537
|
||||
# inner subquery: at 895=89+88, at 905=90+89, at 915=90+91
|
||||
# at 950=828
|
||||
# inner subquery: at 920=92+91+90, at 930=93+92+91, at 940=94+93+92
|
||||
# at 975=567
|
||||
# inner subquery: at 945=94+93, at 955=95+94, at 965=96+95
|
||||
# at 1000=873
|
||||
# inner subquery: at 970=97+96+95, at 980=98+97+96, at 990=99+98+97
|
||||
# at 925=360
|
||||
# inner subquery: at 905=90+89, at 915=91+90
|
||||
# at 950=372
|
||||
# inner subquery: at 930=93+92, at 940=94+93
|
||||
# at 975=380
|
||||
# inner subquery: at 955=95+94, at 965=96+95
|
||||
# at 1000=392
|
||||
# inner subquery: at 980=98+97, at 990=99+98
|
||||
eval instant at 0s sum_over_time(sum_over_time(sum_over_time(metric{job="1"}[20s])[20s:10s] offset 10s)[100s:25s] @ 1000)
|
||||
{job="1"} 3588
|
||||
{job="1"} 1504
|
||||
|
||||
# minute is counted on the value of the sample.
|
||||
eval instant at 10s minute(metric @ 1500)
|
||||
|
@ -182,32 +180,32 @@ eval instant at 15m timestamp(timestamp(metric{job="1"} @ 10))
|
|||
|
||||
# minute is counted on the value of the sample.
|
||||
eval instant at 0s sum_over_time(minute(metric @ 1500)[100s:10s])
|
||||
{job="1"} 22
|
||||
{job="2"} 55
|
||||
{job="1"} 20
|
||||
{job="2"} 50
|
||||
|
||||
# If nothing passed, minute() takes eval time.
|
||||
# Here the eval time is determined by the subquery.
|
||||
# [50m:1m] at 6000, i.e. 100m, is 50m to 100m.
|
||||
# sum=50+51+52+...+59+0+1+2+...+40.
|
||||
# sum=51+52+...+59+0+1+2+...+40.
|
||||
eval instant at 0s sum_over_time(minute()[50m:1m] @ 6000)
|
||||
{} 1315
|
||||
|
||||
# sum=46+47+...+59+0+1+2+...+35.
|
||||
eval instant at 0s sum_over_time(minute()[50m:1m] @ 6000 offset 5m)
|
||||
{} 1365
|
||||
|
||||
# sum=45+46+47+...+59+0+1+2+...+35.
|
||||
eval instant at 0s sum_over_time(minute()[50m:1m] @ 6000 offset 5m)
|
||||
{} 1410
|
||||
|
||||
# time() is the eval time which is determined by subquery here.
|
||||
# 2900+2901+...+3000 = (3000*3001 - 2899*2900)/2.
|
||||
# 2901+...+3000 = (3000*3001 - 2899*2900)/2.
|
||||
eval instant at 0s sum_over_time(vector(time())[100s:1s] @ 3000)
|
||||
{} 297950
|
||||
{} 295050
|
||||
|
||||
# 2300+2301+...+2400 = (2400*2401 - 2299*2300)/2.
|
||||
# 2301+...+2400 = (2400*2401 - 2299*2300)/2.
|
||||
eval instant at 0s sum_over_time(vector(time())[100s:1s] @ 3000 offset 600s)
|
||||
{} 237350
|
||||
{} 235050
|
||||
|
||||
# timestamp() takes the time of the sample and not the evaluation time.
|
||||
eval instant at 0s sum_over_time(timestamp(metric{job="1"} @ 10)[100s:10s] @ 3000)
|
||||
{job="1"} 110
|
||||
{job="1"} 100
|
||||
|
||||
# The result of inner timestamp() will have the timestamp as the
|
||||
# eval time, hence entire expression is not step invariant and depends on eval time.
|
||||
|
|
154
promql/promqltest/testdata/functions.test
vendored
154
promql/promqltest/testdata/functions.test
vendored
|
@ -6,11 +6,13 @@ load 5m
|
|||
|
||||
# Tests for resets().
|
||||
eval instant at 50m resets(http_requests[5m])
|
||||
|
||||
eval instant at 50m resets(http_requests[10m])
|
||||
{path="/foo"} 0
|
||||
{path="/bar"} 0
|
||||
{path="/biz"} 0
|
||||
|
||||
eval instant at 50m resets(http_requests[300])
|
||||
eval instant at 50m resets(http_requests[600])
|
||||
{path="/foo"} 0
|
||||
{path="/bar"} 0
|
||||
{path="/biz"} 0
|
||||
|
@ -21,6 +23,11 @@ eval instant at 50m resets(http_requests[20m])
|
|||
{path="/biz"} 0
|
||||
|
||||
eval instant at 50m resets(http_requests[30m])
|
||||
{path="/foo"} 1
|
||||
{path="/bar"} 0
|
||||
{path="/biz"} 0
|
||||
|
||||
eval instant at 50m resets(http_requests[32m])
|
||||
{path="/foo"} 2
|
||||
{path="/bar"} 1
|
||||
{path="/biz"} 0
|
||||
|
@ -34,28 +41,30 @@ eval instant at 50m resets(nonexistent_metric[50m])
|
|||
|
||||
# Tests for changes().
|
||||
eval instant at 50m changes(http_requests[5m])
|
||||
|
||||
eval instant at 50m changes(http_requests[6m])
|
||||
{path="/foo"} 0
|
||||
{path="/bar"} 0
|
||||
{path="/biz"} 0
|
||||
|
||||
eval instant at 50m changes(http_requests[20m])
|
||||
{path="/foo"} 3
|
||||
{path="/bar"} 3
|
||||
{path="/foo"} 2
|
||||
{path="/bar"} 2
|
||||
{path="/biz"} 0
|
||||
|
||||
eval instant at 50m changes(http_requests[30m])
|
||||
{path="/foo"} 4
|
||||
{path="/bar"} 5
|
||||
{path="/biz"} 1
|
||||
{path="/foo"} 3
|
||||
{path="/bar"} 4
|
||||
{path="/biz"} 0
|
||||
|
||||
eval instant at 50m changes(http_requests[50m])
|
||||
{path="/foo"} 8
|
||||
{path="/bar"} 9
|
||||
{path="/foo"} 7
|
||||
{path="/bar"} 8
|
||||
{path="/biz"} 1
|
||||
|
||||
eval instant at 50m changes((http_requests[50m]))
|
||||
{path="/foo"} 8
|
||||
{path="/bar"} 9
|
||||
{path="/foo"} 7
|
||||
{path="/bar"} 8
|
||||
{path="/biz"} 1
|
||||
|
||||
eval instant at 50m changes(nonexistent_metric[50m])
|
||||
|
@ -66,7 +75,7 @@ load 5m
|
|||
x{a="b"} NaN NaN NaN
|
||||
x{a="c"} 0 NaN 0
|
||||
|
||||
eval instant at 15m changes(x[15m])
|
||||
eval instant at 15m changes(x[20m])
|
||||
{a="b"} 0
|
||||
{a="c"} 2
|
||||
|
||||
|
@ -75,14 +84,14 @@ clear
|
|||
# Tests for increase().
|
||||
load 5m
|
||||
http_requests{path="/foo"} 0+10x10
|
||||
http_requests{path="/bar"} 0+10x5 0+10x5
|
||||
http_requests{path="/bar"} 0+18x5 0+18x5
|
||||
http_requests{path="/dings"} 10+10x10
|
||||
http_requests{path="/bumms"} 1+10x10
|
||||
|
||||
# Tests for increase().
|
||||
eval instant at 50m increase(http_requests[50m])
|
||||
{path="/foo"} 100
|
||||
{path="/bar"} 90
|
||||
{path="/bar"} 160
|
||||
{path="/dings"} 100
|
||||
{path="/bumms"} 100
|
||||
|
||||
|
@ -95,7 +104,7 @@ eval instant at 50m increase(http_requests[50m])
|
|||
# value, and therefore the extrapolation happens only by 30s.
|
||||
eval instant at 50m increase(http_requests[100m])
|
||||
{path="/foo"} 100
|
||||
{path="/bar"} 90
|
||||
{path="/bar"} 162
|
||||
{path="/dings"} 105
|
||||
{path="/bumms"} 101
|
||||
|
||||
|
@ -115,15 +124,17 @@ clear
|
|||
|
||||
# Tests for rate().
|
||||
load 5m
|
||||
testcounter_reset_middle 0+10x4 0+10x5
|
||||
testcounter_reset_middle 0+27x4 0+27x5
|
||||
testcounter_reset_end 0+10x9 0 10
|
||||
|
||||
# Counter resets at in the middle of range are handled correctly by rate().
|
||||
eval instant at 50m rate(testcounter_reset_middle[50m])
|
||||
{} 0.03
|
||||
{} 0.08
|
||||
|
||||
# Counter resets at end of range are ignored by rate().
|
||||
eval instant at 50m rate(testcounter_reset_end[5m])
|
||||
|
||||
eval instant at 50m rate(testcounter_reset_end[6m])
|
||||
{} 0
|
||||
|
||||
clear
|
||||
|
@ -242,24 +253,24 @@ eval instant at 50m deriv(testcounter_reset_middle[100m])
|
|||
# intercept at t=3000: 38.63636363636364
|
||||
# intercept at t=3000+3600: 76.81818181818181
|
||||
eval instant at 50m predict_linear(testcounter_reset_middle[50m], 3600)
|
||||
{} 76.81818181818181
|
||||
{} 70
|
||||
|
||||
eval instant at 50m predict_linear(testcounter_reset_middle[50m], 1h)
|
||||
{} 76.81818181818181
|
||||
{} 70
|
||||
|
||||
# intercept at t = 3000+3600 = 6600
|
||||
eval instant at 50m predict_linear(testcounter_reset_middle[50m] @ 3000, 3600)
|
||||
eval instant at 50m predict_linear(testcounter_reset_middle[55m] @ 3000, 3600)
|
||||
{} 76.81818181818181
|
||||
|
||||
eval instant at 50m predict_linear(testcounter_reset_middle[50m] @ 3000, 1h)
|
||||
eval instant at 50m predict_linear(testcounter_reset_middle[55m] @ 3000, 1h)
|
||||
{} 76.81818181818181
|
||||
|
||||
# intercept at t = 600+3600 = 4200
|
||||
eval instant at 10m predict_linear(testcounter_reset_middle[50m] @ 3000, 3600)
|
||||
eval instant at 10m predict_linear(testcounter_reset_middle[55m] @ 3000, 3600)
|
||||
{} 51.36363636363637
|
||||
|
||||
# intercept at t = 4200+3600 = 7800
|
||||
eval instant at 70m predict_linear(testcounter_reset_middle[50m] @ 3000, 3600)
|
||||
eval instant at 70m predict_linear(testcounter_reset_middle[55m] @ 3000, 3600)
|
||||
{} 89.54545454545455
|
||||
|
||||
# With http_requests, there is a sample value exactly at the end of
|
||||
|
@ -467,7 +478,7 @@ load 5m
|
|||
http_requests{job="api-server", instance="1", group="production"} 0+20x10
|
||||
http_requests{job="api-server", instance="0", group="canary"} 0+30x10
|
||||
http_requests{job="api-server", instance="1", group="canary"} 0+40x10
|
||||
http_requests{job="api-server", instance="2", group="canary"} NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN
|
||||
http_requests{job="api-server", instance="2", group="canary"} NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN
|
||||
http_requests{job="app-server", instance="0", group="production"} 0+50x10
|
||||
http_requests{job="app-server", instance="1", group="production"} 0+60x10
|
||||
http_requests{job="app-server", instance="0", group="canary"} 0+70x10
|
||||
|
@ -502,7 +513,7 @@ load 5m
|
|||
http_requests{job="api-server", instance="1", group="production"} 0+20x10
|
||||
http_requests{job="api-server", instance="0", group="canary"} 0+30x10
|
||||
http_requests{job="api-server", instance="1", group="canary"} 0+40x10
|
||||
http_requests{job="api-server", instance="2", group="canary"} NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN
|
||||
http_requests{job="api-server", instance="2", group="canary"} NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN
|
||||
http_requests{job="app-server", instance="0", group="production"} 0+50x10
|
||||
http_requests{job="app-server", instance="1", group="production"} 0+60x10
|
||||
http_requests{job="app-server", instance="0", group="canary"} 0+70x10
|
||||
|
@ -688,10 +699,10 @@ load 10s
|
|||
metric9 -9.988465674311579e+307 -9.988465674311579e+307 -9.988465674311579e+307
|
||||
metric10 -9.988465674311579e+307 9.988465674311579e+307
|
||||
|
||||
eval instant at 1m avg_over_time(metric[1m])
|
||||
eval instant at 55s avg_over_time(metric[1m])
|
||||
{} 3
|
||||
|
||||
eval instant at 1m sum_over_time(metric[1m])/count_over_time(metric[1m])
|
||||
eval instant at 55s sum_over_time(metric[1m])/count_over_time(metric[1m])
|
||||
{} 3
|
||||
|
||||
eval instant at 1m avg_over_time(metric2[1m])
|
||||
|
@ -758,8 +769,8 @@ eval instant at 1m avg_over_time(metric8[1m])
|
|||
{} 9.988465674311579e+307
|
||||
|
||||
# This overflows float64.
|
||||
eval instant at 1m sum_over_time(metric8[1m])/count_over_time(metric8[1m])
|
||||
{} Inf
|
||||
eval instant at 1m sum_over_time(metric8[2m])/count_over_time(metric8[2m])
|
||||
{} +Inf
|
||||
|
||||
eval instant at 1m avg_over_time(metric9[1m])
|
||||
{} -9.988465674311579e+307
|
||||
|
@ -768,10 +779,16 @@ eval instant at 1m avg_over_time(metric9[1m])
|
|||
eval instant at 1m sum_over_time(metric9[1m])/count_over_time(metric9[1m])
|
||||
{} -Inf
|
||||
|
||||
eval instant at 1m avg_over_time(metric10[1m])
|
||||
eval instant at 45s avg_over_time(metric10[1m])
|
||||
{} 0
|
||||
|
||||
eval instant at 1m sum_over_time(metric10[1m])/count_over_time(metric10[1m])
|
||||
eval instant at 1m avg_over_time(metric10[2m])
|
||||
{} 0
|
||||
|
||||
eval instant at 45s sum_over_time(metric10[1m])/count_over_time(metric10[1m])
|
||||
{} 0
|
||||
|
||||
eval instant at 1m sum_over_time(metric10[2m])/count_over_time(metric10[2m])
|
||||
{} 0
|
||||
|
||||
# Test if very big intermediate values cause loss of detail.
|
||||
|
@ -779,10 +796,10 @@ clear
|
|||
load 10s
|
||||
metric 1 1e100 1 -1e100
|
||||
|
||||
eval instant at 1m sum_over_time(metric[1m])
|
||||
eval instant at 1m sum_over_time(metric[2m])
|
||||
{} 2
|
||||
|
||||
eval instant at 1m avg_over_time(metric[1m])
|
||||
eval instant at 1m avg_over_time(metric[2m])
|
||||
{} 0.5
|
||||
|
||||
# Tests for stddev_over_time and stdvar_over_time.
|
||||
|
@ -790,13 +807,13 @@ clear
|
|||
load 10s
|
||||
metric 0 8 8 2 3
|
||||
|
||||
eval instant at 1m stdvar_over_time(metric[1m])
|
||||
eval instant at 1m stdvar_over_time(metric[2m])
|
||||
{} 10.56
|
||||
|
||||
eval instant at 1m stddev_over_time(metric[1m])
|
||||
eval instant at 1m stddev_over_time(metric[2m])
|
||||
{} 3.249615
|
||||
|
||||
eval instant at 1m stddev_over_time((metric[1m]))
|
||||
eval instant at 1m stddev_over_time((metric[2m]))
|
||||
{} 3.249615
|
||||
|
||||
# Tests for stddev_over_time and stdvar_over_time #4927.
|
||||
|
@ -826,42 +843,42 @@ load 10s
|
|||
data{test="three samples"} 0 1 2
|
||||
data{test="uneven samples"} 0 1 4
|
||||
|
||||
eval instant at 1m quantile_over_time(0, data[1m])
|
||||
eval instant at 1m quantile_over_time(0, data[2m])
|
||||
{test="two samples"} 0
|
||||
{test="three samples"} 0
|
||||
{test="uneven samples"} 0
|
||||
|
||||
eval instant at 1m quantile_over_time(0.5, data[1m])
|
||||
eval instant at 1m quantile_over_time(0.5, data[2m])
|
||||
{test="two samples"} 0.5
|
||||
{test="three samples"} 1
|
||||
{test="uneven samples"} 1
|
||||
|
||||
eval instant at 1m quantile_over_time(0.75, data[1m])
|
||||
eval instant at 1m quantile_over_time(0.75, data[2m])
|
||||
{test="two samples"} 0.75
|
||||
{test="three samples"} 1.5
|
||||
{test="uneven samples"} 2.5
|
||||
|
||||
eval instant at 1m quantile_over_time(0.8, data[1m])
|
||||
eval instant at 1m quantile_over_time(0.8, data[2m])
|
||||
{test="two samples"} 0.8
|
||||
{test="three samples"} 1.6
|
||||
{test="uneven samples"} 2.8
|
||||
|
||||
eval instant at 1m quantile_over_time(1, data[1m])
|
||||
eval instant at 1m quantile_over_time(1, data[2m])
|
||||
{test="two samples"} 1
|
||||
{test="three samples"} 2
|
||||
{test="uneven samples"} 4
|
||||
|
||||
eval_warn instant at 1m quantile_over_time(-1, data[1m])
|
||||
eval_warn instant at 1m quantile_over_time(-1, data[2m])
|
||||
{test="two samples"} -Inf
|
||||
{test="three samples"} -Inf
|
||||
{test="uneven samples"} -Inf
|
||||
|
||||
eval_warn instant at 1m quantile_over_time(2, data[1m])
|
||||
eval_warn instant at 1m quantile_over_time(2, data[2m])
|
||||
{test="two samples"} +Inf
|
||||
{test="three samples"} +Inf
|
||||
{test="uneven samples"} +Inf
|
||||
|
||||
eval_warn instant at 1m (quantile_over_time(2, (data[1m])))
|
||||
eval_warn instant at 1m (quantile_over_time(2, (data[2m])))
|
||||
{test="two samples"} +Inf
|
||||
{test="three samples"} +Inf
|
||||
{test="uneven samples"} +Inf
|
||||
|
@ -969,21 +986,21 @@ load 10s
|
|||
data{type="some_nan3"} NaN 0 1
|
||||
data{type="only_nan"} NaN NaN NaN
|
||||
|
||||
eval instant at 1m min_over_time(data[1m])
|
||||
eval instant at 1m min_over_time(data[2m])
|
||||
{type="numbers"} 0
|
||||
{type="some_nan"} 0
|
||||
{type="some_nan2"} 1
|
||||
{type="some_nan3"} 0
|
||||
{type="only_nan"} NaN
|
||||
|
||||
eval instant at 1m max_over_time(data[1m])
|
||||
eval instant at 1m max_over_time(data[2m])
|
||||
{type="numbers"} 3
|
||||
{type="some_nan"} 2
|
||||
{type="some_nan2"} 2
|
||||
{type="some_nan3"} 1
|
||||
{type="only_nan"} NaN
|
||||
|
||||
eval instant at 1m last_over_time(data[1m])
|
||||
eval instant at 1m last_over_time(data[2m])
|
||||
data{type="numbers"} 3
|
||||
data{type="some_nan"} NaN
|
||||
data{type="some_nan2"} 1
|
||||
|
@ -1076,13 +1093,19 @@ eval instant at 1m absent_over_time(httpd_log_lines_total[30s])
|
|||
{} 1
|
||||
|
||||
eval instant at 15m absent_over_time(http_requests[5m])
|
||||
|
||||
eval instant at 16m absent_over_time(http_requests[5m])
|
||||
{} 1
|
||||
|
||||
eval instant at 15m absent_over_time(http_requests[10m])
|
||||
|
||||
eval instant at 16m absent_over_time(http_requests[6m])
|
||||
{} 1
|
||||
|
||||
eval instant at 16m absent_over_time(http_requests[16m])
|
||||
|
||||
eval instant at 16m absent_over_time(httpd_handshake_failures_total[1m])
|
||||
{} 1
|
||||
|
||||
eval instant at 16m absent_over_time(httpd_handshake_failures_total[2m])
|
||||
|
||||
eval instant at 16m absent_over_time({instance="127.0.0.1"}[5m])
|
||||
|
||||
|
@ -1138,17 +1161,18 @@ eval instant at 0m present_over_time(httpd_log_lines_total[30s])
|
|||
eval instant at 1m present_over_time(httpd_log_lines_total[30s])
|
||||
|
||||
eval instant at 15m present_over_time(http_requests[5m])
|
||||
|
||||
eval instant at 15m present_over_time(http_requests[10m])
|
||||
{instance="127.0.0.1", job="httpd", path="/bar"} 1
|
||||
{instance="127.0.0.1", job="httpd", path="/foo"} 1
|
||||
|
||||
eval instant at 16m present_over_time(http_requests[5m])
|
||||
|
||||
eval instant at 16m present_over_time(http_requests[6m])
|
||||
|
||||
eval instant at 16m present_over_time(http_requests[16m])
|
||||
{instance="127.0.0.1", job="httpd", path="/bar"} 1
|
||||
{instance="127.0.0.1", job="httpd", path="/foo"} 1
|
||||
|
||||
eval instant at 16m present_over_time(httpd_handshake_failures_total[1m])
|
||||
{instance="127.0.0.1", job="node"} 1
|
||||
|
||||
eval instant at 16m present_over_time({instance="127.0.0.1"}[5m])
|
||||
{instance="127.0.0.1",job="node"} 1
|
||||
|
@ -1169,59 +1193,59 @@ load 5m
|
|||
exp_root_log{l="x"} 10
|
||||
exp_root_log{l="y"} 20
|
||||
|
||||
eval instant at 5m exp(exp_root_log)
|
||||
eval instant at 1m exp(exp_root_log)
|
||||
{l="x"} 22026.465794806718
|
||||
{l="y"} 485165195.4097903
|
||||
|
||||
eval instant at 5m exp(exp_root_log - 10)
|
||||
eval instant at 1m exp(exp_root_log - 10)
|
||||
{l="y"} 22026.465794806718
|
||||
{l="x"} 1
|
||||
|
||||
eval instant at 5m exp(exp_root_log - 20)
|
||||
eval instant at 1m exp(exp_root_log - 20)
|
||||
{l="x"} 4.5399929762484854e-05
|
||||
{l="y"} 1
|
||||
|
||||
eval instant at 5m ln(exp_root_log)
|
||||
eval instant at 1m ln(exp_root_log)
|
||||
{l="x"} 2.302585092994046
|
||||
{l="y"} 2.995732273553991
|
||||
|
||||
eval instant at 5m ln(exp_root_log - 10)
|
||||
eval instant at 1m ln(exp_root_log - 10)
|
||||
{l="y"} 2.302585092994046
|
||||
{l="x"} -Inf
|
||||
|
||||
eval instant at 5m ln(exp_root_log - 20)
|
||||
eval instant at 1m ln(exp_root_log - 20)
|
||||
{l="y"} -Inf
|
||||
{l="x"} NaN
|
||||
|
||||
eval instant at 5m exp(ln(exp_root_log))
|
||||
eval instant at 1m exp(ln(exp_root_log))
|
||||
{l="y"} 20
|
||||
{l="x"} 10
|
||||
|
||||
eval instant at 5m sqrt(exp_root_log)
|
||||
eval instant at 1m sqrt(exp_root_log)
|
||||
{l="x"} 3.1622776601683795
|
||||
{l="y"} 4.47213595499958
|
||||
|
||||
eval instant at 5m log2(exp_root_log)
|
||||
eval instant at 1m log2(exp_root_log)
|
||||
{l="x"} 3.3219280948873626
|
||||
{l="y"} 4.321928094887363
|
||||
|
||||
eval instant at 5m log2(exp_root_log - 10)
|
||||
eval instant at 1m log2(exp_root_log - 10)
|
||||
{l="y"} 3.3219280948873626
|
||||
{l="x"} -Inf
|
||||
|
||||
eval instant at 5m log2(exp_root_log - 20)
|
||||
eval instant at 1m log2(exp_root_log - 20)
|
||||
{l="x"} NaN
|
||||
{l="y"} -Inf
|
||||
|
||||
eval instant at 5m log10(exp_root_log)
|
||||
eval instant at 1m log10(exp_root_log)
|
||||
{l="x"} 1
|
||||
{l="y"} 1.301029995663981
|
||||
|
||||
eval instant at 5m log10(exp_root_log - 10)
|
||||
eval instant at 1m log10(exp_root_log - 10)
|
||||
{l="y"} 1
|
||||
{l="x"} -Inf
|
||||
|
||||
eval instant at 5m log10(exp_root_log - 20)
|
||||
eval instant at 1m log10(exp_root_log - 20)
|
||||
{l="x"} NaN
|
||||
{l="y"} -Inf
|
||||
|
||||
|
|
82
promql/promqltest/testdata/histograms.test
vendored
82
promql/promqltest/testdata/histograms.test
vendored
|
@ -108,8 +108,8 @@ eval instant at 50m histogram_stdvar(testhistogram3)
|
|||
eval instant at 50m histogram_fraction(0, 0.2, testhistogram3)
|
||||
{start="positive"} 0.6363636363636364
|
||||
{start="negative"} 0
|
||||
|
||||
eval instant at 50m histogram_fraction(0, 0.2, rate(testhistogram3[5m]))
|
||||
|
||||
eval instant at 50m histogram_fraction(0, 0.2, rate(testhistogram3[10m]))
|
||||
{start="positive"} 0.6363636363636364
|
||||
{start="negative"} 0
|
||||
|
||||
|
@ -118,8 +118,8 @@ eval instant at 50m histogram_fraction(0, 0.2, rate(testhistogram3[5m]))
|
|||
|
||||
eval instant at 50m testhistogram3_bucket{le=".2"} / ignoring(le) testhistogram3_count
|
||||
{start="positive"} 0.6363636363636364
|
||||
|
||||
eval instant at 50m rate(testhistogram3_bucket{le=".2"}[5m]) / ignoring(le) rate(testhistogram3_count[5m])
|
||||
|
||||
eval instant at 50m rate(testhistogram3_bucket{le=".2"}[10m]) / ignoring(le) rate(testhistogram3_count[10m])
|
||||
{start="positive"} 0.6363636363636364
|
||||
|
||||
# Test histogram_quantile, native and classic.
|
||||
|
@ -241,28 +241,27 @@ eval instant at 50m histogram_quantile(0.8, testhistogram_bucket)
|
|||
{start="negative"} 0.3
|
||||
|
||||
# More realistic with rates.
|
||||
|
||||
eval instant at 50m histogram_quantile(0.2, rate(testhistogram[5m]))
|
||||
eval instant at 50m histogram_quantile(0.2, rate(testhistogram[10m]))
|
||||
{start="positive"} 0.048
|
||||
{start="negative"} -0.2
|
||||
|
||||
eval instant at 50m histogram_quantile(0.2, rate(testhistogram_bucket[5m]))
|
||||
eval instant at 50m histogram_quantile(0.2, rate(testhistogram_bucket[10m]))
|
||||
{start="positive"} 0.048
|
||||
{start="negative"} -0.2
|
||||
|
||||
eval instant at 50m histogram_quantile(0.5, rate(testhistogram[5m]))
|
||||
eval instant at 50m histogram_quantile(0.5, rate(testhistogram[10m]))
|
||||
{start="positive"} 0.15
|
||||
{start="negative"} -0.15
|
||||
|
||||
eval instant at 50m histogram_quantile(0.5, rate(testhistogram_bucket[5m]))
|
||||
eval instant at 50m histogram_quantile(0.5, rate(testhistogram_bucket[10m]))
|
||||
{start="positive"} 0.15
|
||||
{start="negative"} -0.15
|
||||
|
||||
eval instant at 50m histogram_quantile(0.8, rate(testhistogram[5m]))
|
||||
eval instant at 50m histogram_quantile(0.8, rate(testhistogram[10m]))
|
||||
{start="positive"} 0.72
|
||||
{start="negative"} 0.3
|
||||
|
||||
eval instant at 50m histogram_quantile(0.8, rate(testhistogram_bucket[5m]))
|
||||
eval instant at 50m histogram_quantile(0.8, rate(testhistogram_bucket[10m]))
|
||||
{start="positive"} 0.72
|
||||
{start="negative"} 0.3
|
||||
|
||||
|
@ -307,115 +306,112 @@ eval instant at 47m histogram_quantile(5./6., rate(testhistogram2_bucket[15m]))
|
|||
# Aggregated histogram: Everything in one. Note how native histograms
|
||||
# don't require aggregation by le.
|
||||
|
||||
eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds[5m])))
|
||||
eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds[10m])))
|
||||
{} 0.075
|
||||
|
||||
eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds_bucket[5m])) by (le))
|
||||
eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds_bucket[10m])) by (le))
|
||||
{} 0.075
|
||||
|
||||
eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds[5m])))
|
||||
eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds[10m])))
|
||||
{} 0.1277777777777778
|
||||
|
||||
eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds_bucket[5m])) by (le))
|
||||
eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds_bucket[10m])) by (le))
|
||||
{} 0.1277777777777778
|
||||
|
||||
# Aggregated histogram: Everything in one. Now with avg, which does not change anything.
|
||||
|
||||
eval instant at 50m histogram_quantile(0.3, avg(rate(request_duration_seconds[5m])))
|
||||
eval instant at 50m histogram_quantile(0.3, avg(rate(request_duration_seconds[10m])))
|
||||
{} 0.075
|
||||
|
||||
eval instant at 50m histogram_quantile(0.3, avg(rate(request_duration_seconds_bucket[5m])) by (le))
|
||||
eval instant at 50m histogram_quantile(0.3, avg(rate(request_duration_seconds_bucket[10m])) by (le))
|
||||
{} 0.075
|
||||
|
||||
eval instant at 50m histogram_quantile(0.5, avg(rate(request_duration_seconds[5m])))
|
||||
eval instant at 50m histogram_quantile(0.5, avg(rate(request_duration_seconds[10m])))
|
||||
{} 0.12777777777777778
|
||||
|
||||
eval instant at 50m histogram_quantile(0.5, avg(rate(request_duration_seconds_bucket[5m])) by (le))
|
||||
eval instant at 50m histogram_quantile(0.5, avg(rate(request_duration_seconds_bucket[10m])) by (le))
|
||||
{} 0.12777777777777778
|
||||
|
||||
# Aggregated histogram: By instance.
|
||||
|
||||
eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds[5m])) by (instance))
|
||||
eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds[10m])) by (instance))
|
||||
{instance="ins1"} 0.075
|
||||
{instance="ins2"} 0.075
|
||||
|
||||
eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds_bucket[5m])) by (le, instance))
|
||||
eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds_bucket[10m])) by (le, instance))
|
||||
{instance="ins1"} 0.075
|
||||
{instance="ins2"} 0.075
|
||||
|
||||
eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds[5m])) by (instance))
|
||||
eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds[10m])) by (instance))
|
||||
{instance="ins1"} 0.1333333333
|
||||
{instance="ins2"} 0.125
|
||||
|
||||
eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds_bucket[5m])) by (le, instance))
|
||||
eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds_bucket[10m])) by (le, instance))
|
||||
{instance="ins1"} 0.1333333333
|
||||
{instance="ins2"} 0.125
|
||||
|
||||
# Aggregated histogram: By job.
|
||||
|
||||
eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds[5m])) by (job))
|
||||
eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds[10m])) by (job))
|
||||
{job="job1"} 0.1
|
||||
{job="job2"} 0.0642857142857143
|
||||
|
||||
eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds_bucket[5m])) by (le, job))
|
||||
eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds_bucket[10m])) by (le, job))
|
||||
{job="job1"} 0.1
|
||||
{job="job2"} 0.0642857142857143
|
||||
|
||||
eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds[5m])) by (job))
|
||||
eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds[10m])) by (job))
|
||||
{job="job1"} 0.14
|
||||
{job="job2"} 0.1125
|
||||
|
||||
eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds_bucket[5m])) by (le, job))
|
||||
eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds_bucket[10m])) by (le, job))
|
||||
{job="job1"} 0.14
|
||||
{job="job2"} 0.1125
|
||||
|
||||
# Aggregated histogram: By job and instance.
|
||||
|
||||
eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds[5m])) by (job, instance))
|
||||
eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds[10m])) by (job, instance))
|
||||
{instance="ins1", job="job1"} 0.11
|
||||
{instance="ins2", job="job1"} 0.09
|
||||
{instance="ins1", job="job2"} 0.06
|
||||
{instance="ins2", job="job2"} 0.0675
|
||||
|
||||
eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds_bucket[5m])) by (le, job, instance))
|
||||
eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds_bucket[10m])) by (le, job, instance))
|
||||
{instance="ins1", job="job1"} 0.11
|
||||
{instance="ins2", job="job1"} 0.09
|
||||
{instance="ins1", job="job2"} 0.06
|
||||
{instance="ins2", job="job2"} 0.0675
|
||||
|
||||
eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds[5m])) by (job, instance))
|
||||
eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds[10m])) by (job, instance))
|
||||
{instance="ins1", job="job1"} 0.15
|
||||
{instance="ins2", job="job1"} 0.1333333333333333
|
||||
{instance="ins1", job="job2"} 0.1
|
||||
{instance="ins2", job="job2"} 0.1166666666666667
|
||||
|
||||
eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds_bucket[5m])) by (le, job, instance))
|
||||
eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds_bucket[10m])) by (le, job, instance))
|
||||
{instance="ins1", job="job1"} 0.15
|
||||
{instance="ins2", job="job1"} 0.1333333333333333
|
||||
{instance="ins1", job="job2"} 0.1
|
||||
{instance="ins2", job="job2"} 0.1166666666666667
|
||||
|
||||
# The unaggregated histogram for comparison. Same result as the previous one.
|
||||
|
||||
eval instant at 50m histogram_quantile(0.3, rate(request_duration_seconds[5m]))
|
||||
eval instant at 50m histogram_quantile(0.3, rate(request_duration_seconds[10m]))
|
||||
{instance="ins1", job="job1"} 0.11
|
||||
{instance="ins2", job="job1"} 0.09
|
||||
{instance="ins1", job="job2"} 0.06
|
||||
{instance="ins2", job="job2"} 0.0675
|
||||
|
||||
eval instant at 50m histogram_quantile(0.3, rate(request_duration_seconds_bucket[5m]))
|
||||
eval instant at 50m histogram_quantile(0.3, rate(request_duration_seconds_bucket[10m]))
|
||||
{instance="ins1", job="job1"} 0.11
|
||||
{instance="ins2", job="job1"} 0.09
|
||||
{instance="ins1", job="job2"} 0.06
|
||||
{instance="ins2", job="job2"} 0.0675
|
||||
|
||||
eval instant at 50m histogram_quantile(0.5, rate(request_duration_seconds[5m]))
|
||||
eval instant at 50m histogram_quantile(0.5, rate(request_duration_seconds[10m]))
|
||||
{instance="ins1", job="job1"} 0.15
|
||||
{instance="ins2", job="job1"} 0.13333333333333333
|
||||
{instance="ins1", job="job2"} 0.1
|
||||
{instance="ins2", job="job2"} 0.11666666666666667
|
||||
|
||||
eval instant at 50m histogram_quantile(0.5, rate(request_duration_seconds_bucket[5m]))
|
||||
eval instant at 50m histogram_quantile(0.5, rate(request_duration_seconds_bucket[10m]))
|
||||
{instance="ins1", job="job1"} 0.15
|
||||
{instance="ins2", job="job1"} 0.13333333333333333
|
||||
{instance="ins1", job="job2"} 0.1
|
||||
|
@ -448,19 +444,19 @@ eval instant at 50m histogram_quantile(0.99, nonmonotonic_bucket)
|
|||
{} 979.75
|
||||
|
||||
# Buckets with different representations of the same upper bound.
|
||||
eval instant at 50m histogram_quantile(0.5, rate(mixed_bucket[5m]))
|
||||
eval instant at 50m histogram_quantile(0.5, rate(mixed_bucket[10m]))
|
||||
{instance="ins1", job="job1"} 0.15
|
||||
{instance="ins2", job="job1"} NaN
|
||||
|
||||
eval instant at 50m histogram_quantile(0.5, rate(mixed[5m]))
|
||||
eval instant at 50m histogram_quantile(0.5, rate(mixed[10m]))
|
||||
{instance="ins1", job="job1"} 0.2
|
||||
{instance="ins2", job="job1"} NaN
|
||||
|
||||
eval instant at 50m histogram_quantile(0.75, rate(mixed_bucket[5m]))
|
||||
eval instant at 50m histogram_quantile(0.75, rate(mixed_bucket[10m]))
|
||||
{instance="ins1", job="job1"} 0.2
|
||||
{instance="ins2", job="job1"} NaN
|
||||
|
||||
eval instant at 50m histogram_quantile(1, rate(mixed_bucket[5m]))
|
||||
eval instant at 50m histogram_quantile(1, rate(mixed_bucket[10m]))
|
||||
{instance="ins1", job="job1"} 0.2
|
||||
{instance="ins2", job="job1"} NaN
|
||||
|
||||
|
@ -469,7 +465,7 @@ load_with_nhcb 5m
|
|||
empty_bucket{le="0.2", job="job1", instance="ins1"} 0x10
|
||||
empty_bucket{le="+Inf", job="job1", instance="ins1"} 0x10
|
||||
|
||||
eval instant at 50m histogram_quantile(0.2, rate(empty_bucket[5m]))
|
||||
eval instant at 50m histogram_quantile(0.2, rate(empty_bucket[10m]))
|
||||
{instance="ins1", job="job1"} NaN
|
||||
|
||||
# Load a duplicate histogram with a different name to test failure scenario on multiple histograms with the same label set.
|
||||
|
|
|
@ -4,81 +4,81 @@ load 5m
|
|||
another_metric{env="1"} 60 120 180
|
||||
|
||||
# Does not drop __name__ for vector selector
|
||||
eval instant at 15m metric{env="1"}
|
||||
eval instant at 10m metric{env="1"}
|
||||
metric{env="1"} 120
|
||||
|
||||
# Drops __name__ for unary operators
|
||||
eval instant at 15m -metric
|
||||
eval instant at 10m -metric
|
||||
{env="1"} -120
|
||||
|
||||
# Drops __name__ for binary operators
|
||||
eval instant at 15m metric + another_metric
|
||||
eval instant at 10m metric + another_metric
|
||||
{env="1"} 300
|
||||
|
||||
# Does not drop __name__ for binary comparison operators
|
||||
eval instant at 15m metric <= another_metric
|
||||
eval instant at 10m metric <= another_metric
|
||||
metric{env="1"} 120
|
||||
|
||||
# Drops __name__ for binary comparison operators with "bool" modifier
|
||||
eval instant at 15m metric <= bool another_metric
|
||||
eval instant at 10m metric <= bool another_metric
|
||||
{env="1"} 1
|
||||
|
||||
# Drops __name__ for vector-scalar operations
|
||||
eval instant at 15m metric * 2
|
||||
eval instant at 10m metric * 2
|
||||
{env="1"} 240
|
||||
|
||||
# Drops __name__ for instant-vector functions
|
||||
eval instant at 15m clamp(metric, 0, 100)
|
||||
eval instant at 10m clamp(metric, 0, 100)
|
||||
{env="1"} 100
|
||||
|
||||
# Drops __name__ for range-vector functions
|
||||
eval instant at 15m rate(metric{env="1"}[10m])
|
||||
eval instant at 10m rate(metric{env="1"}[10m])
|
||||
{env="1"} 0.2
|
||||
|
||||
# Does not drop __name__ for last_over_time function
|
||||
eval instant at 15m last_over_time(metric{env="1"}[10m])
|
||||
eval instant at 10m last_over_time(metric{env="1"}[10m])
|
||||
metric{env="1"} 120
|
||||
|
||||
# Drops name for other _over_time functions
|
||||
eval instant at 15m max_over_time(metric{env="1"}[10m])
|
||||
eval instant at 10m max_over_time(metric{env="1"}[10m])
|
||||
{env="1"} 120
|
||||
|
||||
# Allows relabeling (to-be-dropped) __name__ via label_replace
|
||||
eval instant at 15m label_replace(rate({env="1"}[10m]), "my_name", "rate_$1", "__name__", "(.+)")
|
||||
eval instant at 10m label_replace(rate({env="1"}[10m]), "my_name", "rate_$1", "__name__", "(.+)")
|
||||
{my_name="rate_metric", env="1"} 0.2
|
||||
{my_name="rate_another_metric", env="1"} 0.2
|
||||
|
||||
# Allows preserving __name__ via label_replace
|
||||
eval instant at 15m label_replace(rate({env="1"}[10m]), "__name__", "rate_$1", "__name__", "(.+)")
|
||||
eval instant at 10m label_replace(rate({env="1"}[10m]), "__name__", "rate_$1", "__name__", "(.+)")
|
||||
rate_metric{env="1"} 0.2
|
||||
rate_another_metric{env="1"} 0.2
|
||||
|
||||
# Allows relabeling (to-be-dropped) __name__ via label_join
|
||||
eval instant at 15m label_join(rate({env="1"}[10m]), "my_name", "_", "__name__")
|
||||
eval instant at 10m label_join(rate({env="1"}[10m]), "my_name", "_", "__name__")
|
||||
{my_name="metric", env="1"} 0.2
|
||||
{my_name="another_metric", env="1"} 0.2
|
||||
|
||||
# Allows preserving __name__ via label_join
|
||||
eval instant at 15m label_join(rate({env="1"}[10m]), "__name__", "_", "__name__", "env")
|
||||
eval instant at 10m label_join(rate({env="1"}[10m]), "__name__", "_", "__name__", "env")
|
||||
metric_1{env="1"} 0.2
|
||||
another_metric_1{env="1"} 0.2
|
||||
|
||||
# Does not drop metric names fro aggregation operators
|
||||
eval instant at 15m sum by (__name__, env) (metric{env="1"})
|
||||
eval instant at 10m sum by (__name__, env) (metric{env="1"})
|
||||
metric{env="1"} 120
|
||||
|
||||
# Aggregation operators by __name__ lead to duplicate labelset errors (aggregation is partitioned by not yet removed __name__ label)
|
||||
# This is an accidental side effect of delayed __name__ label dropping
|
||||
eval_fail instant at 15m sum by (__name__) (rate({env="1"}[10m]))
|
||||
eval_fail instant at 10m sum by (__name__) (rate({env="1"}[10m]))
|
||||
|
||||
# Aggregation operators aggregate metrics with same labelset and to-be-dropped names
|
||||
# This is an accidental side effect of delayed __name__ label dropping
|
||||
eval instant at 15m sum(rate({env="1"}[10m])) by (env)
|
||||
eval instant at 10m sum(rate({env="1"}[10m])) by (env)
|
||||
{env="1"} 0.4
|
||||
|
||||
# Aggregationk operators propagate __name__ label dropping information
|
||||
eval instant at 15m topk(10, sum by (__name__, env) (metric{env="1"}))
|
||||
eval instant at 10m topk(10, sum by (__name__, env) (metric{env="1"}))
|
||||
metric{env="1"} 120
|
||||
|
||||
eval instant at 15m topk(10, sum by (__name__, env) (rate(metric{env="1"}[10m])))
|
||||
eval instant at 10m topk(10, sum by (__name__, env) (rate(metric{env="1"}[10m])))
|
||||
{env="1"} 0.2
|
||||
|
|
142
promql/promqltest/testdata/native_histograms.test
vendored
142
promql/promqltest/testdata/native_histograms.test
vendored
|
@ -2,55 +2,55 @@
|
|||
load 5m
|
||||
empty_histogram {{}}
|
||||
|
||||
eval instant at 5m empty_histogram
|
||||
eval instant at 1m empty_histogram
|
||||
{__name__="empty_histogram"} {{}}
|
||||
|
||||
eval instant at 5m histogram_count(empty_histogram)
|
||||
eval instant at 1m histogram_count(empty_histogram)
|
||||
{} 0
|
||||
|
||||
eval instant at 5m histogram_sum(empty_histogram)
|
||||
eval instant at 1m histogram_sum(empty_histogram)
|
||||
{} 0
|
||||
|
||||
eval instant at 5m histogram_avg(empty_histogram)
|
||||
eval instant at 1m histogram_avg(empty_histogram)
|
||||
{} NaN
|
||||
|
||||
eval instant at 5m histogram_fraction(-Inf, +Inf, empty_histogram)
|
||||
eval instant at 1m histogram_fraction(-Inf, +Inf, empty_histogram)
|
||||
{} NaN
|
||||
|
||||
eval instant at 5m histogram_fraction(0, 8, empty_histogram)
|
||||
eval instant at 1m histogram_fraction(0, 8, empty_histogram)
|
||||
{} NaN
|
||||
|
||||
|
||||
clear
|
||||
|
||||
# buckets:[1 2 1] means 1 observation in the 1st bucket, 2 observations in the 2nd and 1 observation in the 3rd (total 4).
|
||||
load 5m
|
||||
single_histogram {{schema:0 sum:5 count:4 buckets:[1 2 1]}}
|
||||
|
||||
# histogram_count extracts the count property from the histogram.
|
||||
eval instant at 5m histogram_count(single_histogram)
|
||||
eval instant at 1m histogram_count(single_histogram)
|
||||
{} 4
|
||||
|
||||
# histogram_sum extracts the sum property from the histogram.
|
||||
eval instant at 5m histogram_sum(single_histogram)
|
||||
eval instant at 1m histogram_sum(single_histogram)
|
||||
{} 5
|
||||
|
||||
# histogram_avg calculates the average from sum and count properties.
|
||||
eval instant at 5m histogram_avg(single_histogram)
|
||||
eval instant at 1m histogram_avg(single_histogram)
|
||||
{} 1.25
|
||||
|
||||
# We expect half of the values to fall in the range 1 < x <= 2.
|
||||
eval instant at 5m histogram_fraction(1, 2, single_histogram)
|
||||
eval instant at 1m histogram_fraction(1, 2, single_histogram)
|
||||
{} 0.5
|
||||
|
||||
# We expect all values to fall in the range 0 < x <= 8.
|
||||
eval instant at 5m histogram_fraction(0, 8, single_histogram)
|
||||
eval instant at 1m histogram_fraction(0, 8, single_histogram)
|
||||
{} 1
|
||||
|
||||
# Median is 1.5 due to linear estimation of the midpoint of the middle bucket, whose values are within range 1 < x <= 2.
|
||||
eval instant at 5m histogram_quantile(0.5, single_histogram)
|
||||
eval instant at 1m histogram_quantile(0.5, single_histogram)
|
||||
{} 1.5
|
||||
|
||||
|
||||
clear
|
||||
|
||||
# Repeat the same histogram 10 times.
|
||||
load 5m
|
||||
|
@ -88,7 +88,7 @@ eval instant at 50m histogram_fraction(1, 2, multi_histogram)
|
|||
eval instant at 50m histogram_quantile(0.5, multi_histogram)
|
||||
{} 1.5
|
||||
|
||||
|
||||
clear
|
||||
|
||||
# Accumulate the histogram addition for 10 iterations, offset is a bucket position where offset:0 is always the bucket
|
||||
# with an upper limit of 1 and offset:1 is the bucket which follows to the right. Negative offsets represent bucket
|
||||
|
@ -133,14 +133,14 @@ eval instant at 50m histogram_quantile(0.5, incr_histogram)
|
|||
{} 1.5
|
||||
|
||||
# Per-second average rate of increase should be 1/(5*60) for count and buckets, then 2/(5*60) for sum.
|
||||
eval instant at 50m rate(incr_histogram[5m])
|
||||
{} {{count:0.0033333333333333335 sum:0.006666666666666667 offset:1 buckets:[0.0033333333333333335]}}
|
||||
eval instant at 50m rate(incr_histogram[10m])
|
||||
{} {{count:0.0033333333333333335 sum:0.006666666666666667 offset:1 buckets:[0.0033333333333333335]}}
|
||||
|
||||
# Calculate the 50th percentile of observations over the last 10m.
|
||||
eval instant at 50m histogram_quantile(0.5, rate(incr_histogram[10m]))
|
||||
{} 1.5
|
||||
|
||||
|
||||
clear
|
||||
|
||||
# Schema represents the histogram resolution, different schema have compatible bucket boundaries, e.g.:
|
||||
# 0: 1 2 4 8 16 32 64 (higher resolution)
|
||||
|
@ -166,77 +166,77 @@ eval instant at 5m histogram_avg(low_res_histogram)
|
|||
eval instant at 5m histogram_fraction(1, 4, low_res_histogram)
|
||||
{} 1
|
||||
|
||||
|
||||
clear
|
||||
|
||||
# z_bucket:1 means there is one observation in the zero bucket and z_bucket_w:0.5 means the zero bucket has the range
|
||||
# 0 < x <= 0.5. Sum and count are expected to represent all observations in the histogram, including those in the zero bucket.
|
||||
load 5m
|
||||
single_zero_histogram {{schema:0 z_bucket:1 z_bucket_w:0.5 sum:0.25 count:1}}
|
||||
|
||||
eval instant at 5m histogram_count(single_zero_histogram)
|
||||
eval instant at 1m histogram_count(single_zero_histogram)
|
||||
{} 1
|
||||
|
||||
eval instant at 5m histogram_sum(single_zero_histogram)
|
||||
eval instant at 1m histogram_sum(single_zero_histogram)
|
||||
{} 0.25
|
||||
|
||||
eval instant at 5m histogram_avg(single_zero_histogram)
|
||||
eval instant at 1m histogram_avg(single_zero_histogram)
|
||||
{} 0.25
|
||||
|
||||
# When only the zero bucket is populated, or there are negative buckets, the distribution is assumed to be equally
|
||||
# distributed around zero; i.e. that there are an equal number of positive and negative observations. Therefore the
|
||||
# entire distribution must lie within the full range of the zero bucket, in this case: -0.5 < x <= +0.5.
|
||||
eval instant at 5m histogram_fraction(-0.5, 0.5, single_zero_histogram)
|
||||
eval instant at 1m histogram_fraction(-0.5, 0.5, single_zero_histogram)
|
||||
{} 1
|
||||
|
||||
# Half of the observations are estimated to be zero, as this is the midpoint between -0.5 and +0.5.
|
||||
eval instant at 5m histogram_quantile(0.5, single_zero_histogram)
|
||||
eval instant at 1m histogram_quantile(0.5, single_zero_histogram)
|
||||
{} 0
|
||||
|
||||
|
||||
clear
|
||||
|
||||
# Let's turn single_histogram upside-down.
|
||||
load 5m
|
||||
negative_histogram {{schema:0 sum:-5 count:4 n_buckets:[1 2 1]}}
|
||||
|
||||
eval instant at 5m histogram_count(negative_histogram)
|
||||
eval instant at 1m histogram_count(negative_histogram)
|
||||
{} 4
|
||||
|
||||
eval instant at 5m histogram_sum(negative_histogram)
|
||||
eval instant at 1m histogram_sum(negative_histogram)
|
||||
{} -5
|
||||
|
||||
eval instant at 5m histogram_avg(negative_histogram)
|
||||
eval instant at 1m histogram_avg(negative_histogram)
|
||||
{} -1.25
|
||||
|
||||
# We expect half of the values to fall in the range -2 < x <= -1.
|
||||
eval instant at 5m histogram_fraction(-2, -1, negative_histogram)
|
||||
eval instant at 1m histogram_fraction(-2, -1, negative_histogram)
|
||||
{} 0.5
|
||||
|
||||
eval instant at 5m histogram_quantile(0.5, negative_histogram)
|
||||
eval instant at 1m histogram_quantile(0.5, negative_histogram)
|
||||
{} -1.5
|
||||
|
||||
|
||||
clear
|
||||
|
||||
# Two histogram samples.
|
||||
load 5m
|
||||
two_samples_histogram {{schema:0 sum:4 count:4 buckets:[1 2 1]}} {{schema:0 sum:-4 count:4 n_buckets:[1 2 1]}}
|
||||
|
||||
# We expect to see the newest sample.
|
||||
eval instant at 10m histogram_count(two_samples_histogram)
|
||||
eval instant at 5m histogram_count(two_samples_histogram)
|
||||
{} 4
|
||||
|
||||
eval instant at 10m histogram_sum(two_samples_histogram)
|
||||
eval instant at 5m histogram_sum(two_samples_histogram)
|
||||
{} -4
|
||||
|
||||
eval instant at 10m histogram_avg(two_samples_histogram)
|
||||
eval instant at 5m histogram_avg(two_samples_histogram)
|
||||
{} -1
|
||||
|
||||
eval instant at 10m histogram_fraction(-2, -1, two_samples_histogram)
|
||||
eval instant at 5m histogram_fraction(-2, -1, two_samples_histogram)
|
||||
{} 0.5
|
||||
|
||||
eval instant at 10m histogram_quantile(0.5, two_samples_histogram)
|
||||
eval instant at 5m histogram_quantile(0.5, two_samples_histogram)
|
||||
{} -1.5
|
||||
|
||||
|
||||
clear
|
||||
|
||||
# Add two histograms with negated data.
|
||||
load 5m
|
||||
|
@ -259,6 +259,8 @@ eval instant at 5m histogram_fraction(0, 4, balanced_histogram)
|
|||
eval instant at 5m histogram_quantile(0.5, balanced_histogram)
|
||||
{} 0.5
|
||||
|
||||
clear
|
||||
|
||||
# Add histogram to test sum(last_over_time) regression
|
||||
load 5m
|
||||
incr_sum_histogram{number="1"} {{schema:0 sum:0 count:0 buckets:[1]}}+{{schema:0 sum:1 count:1 buckets:[1]}}x10
|
||||
|
@ -270,6 +272,8 @@ eval instant at 50m histogram_sum(sum(incr_sum_histogram))
|
|||
eval instant at 50m histogram_sum(sum(last_over_time(incr_sum_histogram[5m])))
|
||||
{} 30
|
||||
|
||||
clear
|
||||
|
||||
# Apply rate function to histogram.
|
||||
load 15s
|
||||
histogram_rate {{schema:1 count:12 sum:18.4 z_bucket:2 z_bucket_w:0.001 buckets:[1 2 0 1 1] n_buckets:[1 2 0 1 1]}}+{{schema:1 count:9 sum:18.4 z_bucket:1 z_bucket_w:0.001 buckets:[1 1 0 1 1] n_buckets:[1 1 0 1 1]}}x100
|
||||
|
@ -280,6 +284,8 @@ eval instant at 5m rate(histogram_rate[45s])
|
|||
eval range from 5m to 5m30s step 30s rate(histogram_rate[45s])
|
||||
{} {{schema:1 count:0.6 sum:1.2266666666666652 z_bucket:0.06666666666666667 z_bucket_w:0.001 buckets:[0.06666666666666667 0.06666666666666667 0 0.06666666666666667 0.06666666666666667] n_buckets:[0.06666666666666667 0.06666666666666667 0 0.06666666666666667 0.06666666666666667]}}x1
|
||||
|
||||
clear
|
||||
|
||||
# Apply count and sum function to histogram.
|
||||
load 10m
|
||||
histogram_count_sum_2 {{schema:0 count:24 sum:100 z_bucket:4 z_bucket_w:0.001 buckets:[2 3 0 1 4] n_buckets:[2 3 0 1 4]}}x1
|
||||
|
@ -290,6 +296,8 @@ eval instant at 10m histogram_count(histogram_count_sum_2)
|
|||
eval instant at 10m histogram_sum(histogram_count_sum_2)
|
||||
{} 100
|
||||
|
||||
clear
|
||||
|
||||
# Apply stddev and stdvar function to histogram with {1, 2, 3, 4} (low res).
|
||||
load 10m
|
||||
histogram_stddev_stdvar_1 {{schema:2 count:4 sum:10 buckets:[1 0 0 0 1 0 0 1 1]}}x1
|
||||
|
@ -300,6 +308,8 @@ eval instant at 10m histogram_stddev(histogram_stddev_stdvar_1)
|
|||
eval instant at 10m histogram_stdvar(histogram_stddev_stdvar_1)
|
||||
{} 1.163807968526718
|
||||
|
||||
clear
|
||||
|
||||
# Apply stddev and stdvar function to histogram with {1, 1, 1, 1} (high res).
|
||||
load 10m
|
||||
histogram_stddev_stdvar_2 {{schema:8 count:10 sum:10 buckets:[1 2 3 4]}}x1
|
||||
|
@ -310,6 +320,8 @@ eval instant at 10m histogram_stddev(histogram_stddev_stdvar_2)
|
|||
eval instant at 10m histogram_stdvar(histogram_stddev_stdvar_2)
|
||||
{} 2.3971123370139447e-05
|
||||
|
||||
clear
|
||||
|
||||
# Apply stddev and stdvar function to histogram with {-50, -8, 0, 3, 8, 9}.
|
||||
load 10m
|
||||
histogram_stddev_stdvar_3 {{schema:3 count:7 sum:62 z_bucket:1 buckets:[0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 1 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 ] n_buckets:[0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 ]}}x1
|
||||
|
@ -320,6 +332,8 @@ eval instant at 10m histogram_stddev(histogram_stddev_stdvar_3)
|
|||
eval instant at 10m histogram_stdvar(histogram_stddev_stdvar_3)
|
||||
{} 1844.4651144196398
|
||||
|
||||
clear
|
||||
|
||||
# Apply stddev and stdvar function to histogram with {-100000, -10000, -1000, -888, -888, -100, -50, -9, -8, -3}.
|
||||
load 10m
|
||||
histogram_stddev_stdvar_4 {{schema:0 count:10 sum:-112946 z_bucket:0 n_buckets:[0 0 1 1 1 0 1 1 0 0 3 0 0 0 1 0 0 1]}}x1
|
||||
|
@ -330,6 +344,8 @@ eval instant at 10m histogram_stddev(histogram_stddev_stdvar_4)
|
|||
eval instant at 10m histogram_stdvar(histogram_stddev_stdvar_4)
|
||||
{} 759352122.1939945
|
||||
|
||||
clear
|
||||
|
||||
# Apply stddev and stdvar function to histogram with {-10x10}.
|
||||
load 10m
|
||||
histogram_stddev_stdvar_5 {{schema:0 count:10 sum:-100 z_bucket:0 n_buckets:[0 0 0 0 10]}}x1
|
||||
|
@ -340,6 +356,8 @@ eval instant at 10m histogram_stddev(histogram_stddev_stdvar_5)
|
|||
eval instant at 10m histogram_stdvar(histogram_stddev_stdvar_5)
|
||||
{} 1.725830020304794
|
||||
|
||||
clear
|
||||
|
||||
# Apply stddev and stdvar function to histogram with {-50, -8, 0, 3, 8, 9, NaN}.
|
||||
load 10m
|
||||
histogram_stddev_stdvar_6 {{schema:3 count:7 sum:NaN z_bucket:1 buckets:[0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 1 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 ] n_buckets:[0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 ]}}x1
|
||||
|
@ -350,6 +368,8 @@ eval instant at 10m histogram_stddev(histogram_stddev_stdvar_6)
|
|||
eval instant at 10m histogram_stdvar(histogram_stddev_stdvar_6)
|
||||
{} NaN
|
||||
|
||||
clear
|
||||
|
||||
# Apply stddev and stdvar function to histogram with {-50, -8, 0, 3, 8, 9, Inf}.
|
||||
load 10m
|
||||
histogram_stddev_stdvar_7 {{schema:3 count:7 sum:Inf z_bucket:1 buckets:[0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 1 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 ] n_buckets:[0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 ]}}x1
|
||||
|
@ -360,6 +380,8 @@ eval instant at 10m histogram_stddev(histogram_stddev_stdvar_7)
|
|||
eval instant at 10m histogram_stdvar(histogram_stddev_stdvar_7)
|
||||
{} Inf
|
||||
|
||||
clear
|
||||
|
||||
# Apply quantile function to histogram with all positive buckets with zero bucket.
|
||||
load 10m
|
||||
histogram_quantile_1 {{schema:0 count:12 sum:100 z_bucket:2 z_bucket_w:0.001 buckets:[2 3 0 1 4]}}x1
|
||||
|
@ -391,6 +413,8 @@ eval instant at 10m histogram_quantile(0, histogram_quantile_1)
|
|||
eval_warn instant at 10m histogram_quantile(-1, histogram_quantile_1)
|
||||
{} -Inf
|
||||
|
||||
clear
|
||||
|
||||
# Apply quantile function to histogram with all negative buckets with zero bucket.
|
||||
load 10m
|
||||
histogram_quantile_2 {{schema:0 count:12 sum:100 z_bucket:2 z_bucket_w:0.001 n_buckets:[2 3 0 1 4]}}x1
|
||||
|
@ -419,6 +443,8 @@ eval instant at 10m histogram_quantile(0, histogram_quantile_2)
|
|||
eval_warn instant at 10m histogram_quantile(-1, histogram_quantile_2)
|
||||
{} -Inf
|
||||
|
||||
clear
|
||||
|
||||
# Apply quantile function to histogram with both positive and negative buckets with zero bucket.
|
||||
load 10m
|
||||
histogram_quantile_3 {{schema:0 count:24 sum:100 z_bucket:4 z_bucket_w:0.001 buckets:[2 3 0 1 4] n_buckets:[2 3 0 1 4]}}x1
|
||||
|
@ -462,6 +488,8 @@ eval instant at 10m histogram_quantile(0, histogram_quantile_3)
|
|||
eval_warn instant at 10m histogram_quantile(-1, histogram_quantile_3)
|
||||
{} -Inf
|
||||
|
||||
clear
|
||||
|
||||
# Apply fraction function to empty histogram.
|
||||
load 10m
|
||||
histogram_fraction_1 {{}}x1
|
||||
|
@ -469,6 +497,8 @@ load 10m
|
|||
eval instant at 10m histogram_fraction(3.1415, 42, histogram_fraction_1)
|
||||
{} NaN
|
||||
|
||||
clear
|
||||
|
||||
# Apply fraction function to histogram with positive and zero buckets.
|
||||
load 10m
|
||||
histogram_fraction_2 {{schema:0 count:12 sum:100 z_bucket:2 z_bucket_w:0.001 buckets:[2 3 0 1 4]}}x1
|
||||
|
@ -633,6 +663,8 @@ eval instant at 10m histogram_fraction(NaN, NaN, histogram_fraction_3)
|
|||
eval instant at 10m histogram_fraction(-Inf, +Inf, histogram_fraction_3)
|
||||
{} 1
|
||||
|
||||
clear
|
||||
|
||||
# Apply fraction function to histogram with both positive, negative and zero buckets.
|
||||
load 10m
|
||||
histogram_fraction_4 {{schema:0 count:24 sum:100 z_bucket:4 z_bucket_w:0.001 buckets:[2 3 0 1 4] n_buckets:[2 3 0 1 4]}}x1
|
||||
|
@ -720,27 +752,39 @@ eval instant at 10m histogram_sum(scalar(histogram_fraction(-Inf, +Inf, sum(hist
|
|||
|
||||
# Apply multiplication and division operator to histogram.
|
||||
load 10m
|
||||
histogram_mul_div {{schema:0 count:21 sum:33 z_bucket:3 z_bucket_w:0.001 buckets:[3 3 3] n_buckets:[3 3 3]}}x1
|
||||
histogram_mul_div {{schema:0 count:30 sum:33 z_bucket:3 z_bucket_w:0.001 buckets:[3 3 3] n_buckets:[6 6 6]}}x1
|
||||
float_series_3 3+0x1
|
||||
float_series_0 0+0x1
|
||||
|
||||
eval instant at 10m histogram_mul_div*3
|
||||
{} {{schema:0 count:63 sum:99 z_bucket:9 z_bucket_w:0.001 buckets:[9 9 9] n_buckets:[9 9 9]}}
|
||||
{} {{schema:0 count:90 sum:99 z_bucket:9 z_bucket_w:0.001 buckets:[9 9 9] n_buckets:[18 18 18]}}
|
||||
|
||||
eval instant at 10m histogram_mul_div*-1
|
||||
{} {{schema:0 count:-30 sum:-33 z_bucket:-3 z_bucket_w:0.001 buckets:[-3 -3 -3] n_buckets:[-6 -6 -6]}}
|
||||
|
||||
eval instant at 10m -histogram_mul_div
|
||||
{} {{schema:0 count:-30 sum:-33 z_bucket:-3 z_bucket_w:0.001 buckets:[-3 -3 -3] n_buckets:[-6 -6 -6]}}
|
||||
|
||||
eval instant at 10m histogram_mul_div*-3
|
||||
{} {{schema:0 count:-90 sum:-99 z_bucket:-9 z_bucket_w:0.001 buckets:[-9 -9 -9] n_buckets:[-18 -18 -18]}}
|
||||
|
||||
eval instant at 10m 3*histogram_mul_div
|
||||
{} {{schema:0 count:63 sum:99 z_bucket:9 z_bucket_w:0.001 buckets:[9 9 9] n_buckets:[9 9 9]}}
|
||||
{} {{schema:0 count:90 sum:99 z_bucket:9 z_bucket_w:0.001 buckets:[9 9 9] n_buckets:[18 18 18]}}
|
||||
|
||||
eval instant at 10m histogram_mul_div*float_series_3
|
||||
{} {{schema:0 count:63 sum:99 z_bucket:9 z_bucket_w:0.001 buckets:[9 9 9] n_buckets:[9 9 9]}}
|
||||
{} {{schema:0 count:90 sum:99 z_bucket:9 z_bucket_w:0.001 buckets:[9 9 9] n_buckets:[18 18 18]}}
|
||||
|
||||
eval instant at 10m float_series_3*histogram_mul_div
|
||||
{} {{schema:0 count:63 sum:99 z_bucket:9 z_bucket_w:0.001 buckets:[9 9 9] n_buckets:[9 9 9]}}
|
||||
{} {{schema:0 count:90 sum:99 z_bucket:9 z_bucket_w:0.001 buckets:[9 9 9] n_buckets:[18 18 18]}}
|
||||
|
||||
eval instant at 10m histogram_mul_div/3
|
||||
{} {{schema:0 count:7 sum:11 z_bucket:1 z_bucket_w:0.001 buckets:[1 1 1] n_buckets:[1 1 1]}}
|
||||
{} {{schema:0 count:10 sum:11 z_bucket:1 z_bucket_w:0.001 buckets:[1 1 1] n_buckets:[2 2 2]}}
|
||||
|
||||
eval instant at 10m histogram_mul_div/-3
|
||||
{} {{schema:0 count:-10 sum:-11 z_bucket:-1 z_bucket_w:0.001 buckets:[-1 -1 -1] n_buckets:[-2 -2 -2]}}
|
||||
|
||||
eval instant at 10m histogram_mul_div/float_series_3
|
||||
{} {{schema:0 count:7 sum:11 z_bucket:1 z_bucket_w:0.001 buckets:[1 1 1] n_buckets:[1 1 1]}}
|
||||
{} {{schema:0 count:10 sum:11 z_bucket:1 z_bucket_w:0.001 buckets:[1 1 1] n_buckets:[2 2 2]}}
|
||||
|
||||
eval instant at 10m histogram_mul_div*0
|
||||
{} {{schema:0 count:0 sum:0 z_bucket:0 z_bucket_w:0.001 buckets:[0 0 0] n_buckets:[0 0 0]}}
|
||||
|
@ -802,7 +846,7 @@ load 30s
|
|||
some_metric {{schema:0 sum:1 count:1 buckets:[1] counter_reset_hint:gauge}} {{schema:0 sum:2 count:2 buckets:[2] counter_reset_hint:gauge}} {{schema:0 sum:3 count:3 buckets:[3] counter_reset_hint:gauge}}
|
||||
|
||||
# Test the case where we only have two points for rate
|
||||
eval_warn instant at 30s rate(some_metric[30s])
|
||||
eval_warn instant at 30s rate(some_metric[1m])
|
||||
{} {{count:0.03333333333333333 sum:0.03333333333333333 buckets:[0.03333333333333333]}}
|
||||
|
||||
# Test the case where we have more than two points for rate
|
||||
|
@ -824,11 +868,11 @@ eval_warn instant at 1m30s rate(some_metric[1m])
|
|||
# Should produce no results.
|
||||
|
||||
# Start with custom, end with exponential.
|
||||
eval_warn instant at 1m rate(some_metric[30s])
|
||||
eval_warn instant at 1m rate(some_metric[1m])
|
||||
# Should produce no results.
|
||||
|
||||
# Start with exponential, end with custom.
|
||||
eval_warn instant at 30s rate(some_metric[30s])
|
||||
eval_warn instant at 30s rate(some_metric[1m])
|
||||
# Should produce no results.
|
||||
|
||||
clear
|
||||
|
@ -963,8 +1007,8 @@ clear
|
|||
load 1m
|
||||
histogram_sum_over_time {{schema:0 count:25 sum:1234.5 z_bucket:4 z_bucket_w:0.001 buckets:[1 2 0 1 1] n_buckets:[2 4 0 0 1 9]}} {{schema:0 count:41 sum:2345.6 z_bucket:5 z_bucket_w:0.001 buckets:[1 3 1 2 1 1 1] n_buckets:[0 1 4 2 7 0 0 0 0 5 5 2]}} {{schema:0 count:41 sum:1111.1 z_bucket:5 z_bucket_w:0.001 buckets:[1 3 1 2 1 1 1] n_buckets:[0 1 4 2 7 0 0 0 0 5 5 2]}} {{schema:1 count:0}}
|
||||
|
||||
eval instant at 3m sum_over_time(histogram_sum_over_time[3m:1m])
|
||||
eval instant at 3m sum_over_time(histogram_sum_over_time[4m:1m])
|
||||
{} {{schema:0 count:107 sum:4691.2 z_bucket:14 z_bucket_w:0.001 buckets:[3 8 2 5 3 2 2] n_buckets:[2 6 8 4 15 9 0 0 0 10 10 4]}}
|
||||
|
||||
eval instant at 3m avg_over_time(histogram_sum_over_time[3m:1m])
|
||||
eval instant at 3m avg_over_time(histogram_sum_over_time[4m:1m])
|
||||
{} {{schema:0 count:26.75 sum:1172.8 z_bucket:3.5 z_bucket_w:0.001 buckets:[0.75 2 0.5 1.25 0.75 0.5 0.5] n_buckets:[0.5 1.5 2 1 3.75 2.25 0 0 0 2.5 2.5 1]}}
|
||||
|
|
68
promql/promqltest/testdata/operators.test
vendored
68
promql/promqltest/testdata/operators.test
vendored
|
@ -113,7 +113,7 @@ eval instant at 50m http_requests{job="api-server", group="canary"}
|
|||
http_requests{group="canary", instance="0", job="api-server"} 300
|
||||
http_requests{group="canary", instance="1", job="api-server"} 400
|
||||
|
||||
eval instant at 50m http_requests{job="api-server", group="canary"} + rate(http_requests{job="api-server"}[5m]) * 5 * 60
|
||||
eval instant at 50m http_requests{job="api-server", group="canary"} + rate(http_requests{job="api-server"}[10m]) * 5 * 60
|
||||
{group="canary", instance="0", job="api-server"} 330
|
||||
{group="canary", instance="1", job="api-server"} 440
|
||||
|
||||
|
@ -308,65 +308,65 @@ load 5m
|
|||
threshold{instance="abc",job="node",target="a@b.com"} 0
|
||||
|
||||
# Copy machine role to node variable.
|
||||
eval instant at 5m node_role * on (instance) group_right (role) node_var
|
||||
eval instant at 1m node_role * on (instance) group_right (role) node_var
|
||||
{instance="abc",job="node",role="prometheus"} 2
|
||||
|
||||
eval instant at 5m node_var * on (instance) group_left (role) node_role
|
||||
eval instant at 1m node_var * on (instance) group_left (role) node_role
|
||||
{instance="abc",job="node",role="prometheus"} 2
|
||||
|
||||
eval instant at 5m node_var * ignoring (role) group_left (role) node_role
|
||||
eval instant at 1m node_var * ignoring (role) group_left (role) node_role
|
||||
{instance="abc",job="node",role="prometheus"} 2
|
||||
|
||||
eval instant at 5m node_role * ignoring (role) group_right (role) node_var
|
||||
eval instant at 1m node_role * ignoring (role) group_right (role) node_var
|
||||
{instance="abc",job="node",role="prometheus"} 2
|
||||
|
||||
# Copy machine role to node variable with instrumentation labels.
|
||||
eval instant at 5m node_cpu * ignoring (role, mode) group_left (role) node_role
|
||||
eval instant at 1m node_cpu * ignoring (role, mode) group_left (role) node_role
|
||||
{instance="abc",job="node",mode="idle",role="prometheus"} 3
|
||||
{instance="abc",job="node",mode="user",role="prometheus"} 1
|
||||
|
||||
eval instant at 5m node_cpu * on (instance) group_left (role) node_role
|
||||
eval instant at 1m node_cpu * on (instance) group_left (role) node_role
|
||||
{instance="abc",job="node",mode="idle",role="prometheus"} 3
|
||||
{instance="abc",job="node",mode="user",role="prometheus"} 1
|
||||
|
||||
|
||||
# Ratio of total.
|
||||
eval instant at 5m node_cpu / on (instance) group_left sum by (instance,job)(node_cpu)
|
||||
eval instant at 1m node_cpu / on (instance) group_left sum by (instance,job)(node_cpu)
|
||||
{instance="abc",job="node",mode="idle"} .75
|
||||
{instance="abc",job="node",mode="user"} .25
|
||||
{instance="def",job="node",mode="idle"} .80
|
||||
{instance="def",job="node",mode="user"} .20
|
||||
|
||||
eval instant at 5m sum by (mode, job)(node_cpu) / on (job) group_left sum by (job)(node_cpu)
|
||||
eval instant at 1m sum by (mode, job)(node_cpu) / on (job) group_left sum by (job)(node_cpu)
|
||||
{job="node",mode="idle"} 0.7857142857142857
|
||||
{job="node",mode="user"} 0.21428571428571427
|
||||
|
||||
eval instant at 5m sum(sum by (mode, job)(node_cpu) / on (job) group_left sum by (job)(node_cpu))
|
||||
eval instant at 1m sum(sum by (mode, job)(node_cpu) / on (job) group_left sum by (job)(node_cpu))
|
||||
{} 1.0
|
||||
|
||||
|
||||
eval instant at 5m node_cpu / ignoring (mode) group_left sum without (mode)(node_cpu)
|
||||
eval instant at 1m node_cpu / ignoring (mode) group_left sum without (mode)(node_cpu)
|
||||
{instance="abc",job="node",mode="idle"} .75
|
||||
{instance="abc",job="node",mode="user"} .25
|
||||
{instance="def",job="node",mode="idle"} .80
|
||||
{instance="def",job="node",mode="user"} .20
|
||||
|
||||
eval instant at 5m node_cpu / ignoring (mode) group_left(dummy) sum without (mode)(node_cpu)
|
||||
eval instant at 1m node_cpu / ignoring (mode) group_left(dummy) sum without (mode)(node_cpu)
|
||||
{instance="abc",job="node",mode="idle"} .75
|
||||
{instance="abc",job="node",mode="user"} .25
|
||||
{instance="def",job="node",mode="idle"} .80
|
||||
{instance="def",job="node",mode="user"} .20
|
||||
|
||||
eval instant at 5m sum without (instance)(node_cpu) / ignoring (mode) group_left sum without (instance, mode)(node_cpu)
|
||||
eval instant at 1m sum without (instance)(node_cpu) / ignoring (mode) group_left sum without (instance, mode)(node_cpu)
|
||||
{job="node",mode="idle"} 0.7857142857142857
|
||||
{job="node",mode="user"} 0.21428571428571427
|
||||
|
||||
eval instant at 5m sum(sum without (instance)(node_cpu) / ignoring (mode) group_left sum without (instance, mode)(node_cpu))
|
||||
eval instant at 1m sum(sum without (instance)(node_cpu) / ignoring (mode) group_left sum without (instance, mode)(node_cpu))
|
||||
{} 1.0
|
||||
|
||||
|
||||
# Copy over label from metric with no matching labels, without having to list cross-job target labels ('job' here).
|
||||
eval instant at 5m node_cpu + on(dummy) group_left(foo) random*0
|
||||
eval instant at 1m node_cpu + on(dummy) group_left(foo) random*0
|
||||
{instance="abc",job="node",mode="idle",foo="bar"} 3
|
||||
{instance="abc",job="node",mode="user",foo="bar"} 1
|
||||
{instance="def",job="node",mode="idle",foo="bar"} 8
|
||||
|
@ -374,12 +374,12 @@ eval instant at 5m node_cpu + on(dummy) group_left(foo) random*0
|
|||
|
||||
|
||||
# Use threshold from metric, and copy over target.
|
||||
eval instant at 5m node_cpu > on(job, instance) group_left(target) threshold
|
||||
eval instant at 1m node_cpu > on(job, instance) group_left(target) threshold
|
||||
node_cpu{instance="abc",job="node",mode="idle",target="a@b.com"} 3
|
||||
node_cpu{instance="abc",job="node",mode="user",target="a@b.com"} 1
|
||||
|
||||
# Use threshold from metric, and a default (1) if it's not present.
|
||||
eval instant at 5m node_cpu > on(job, instance) group_left(target) (threshold or on (job, instance) (sum by (job, instance)(node_cpu) * 0 + 1))
|
||||
eval instant at 1m node_cpu > on(job, instance) group_left(target) (threshold or on (job, instance) (sum by (job, instance)(node_cpu) * 0 + 1))
|
||||
node_cpu{instance="abc",job="node",mode="idle",target="a@b.com"} 3
|
||||
node_cpu{instance="abc",job="node",mode="user",target="a@b.com"} 1
|
||||
node_cpu{instance="def",job="node",mode="idle"} 8
|
||||
|
@ -387,37 +387,37 @@ eval instant at 5m node_cpu > on(job, instance) group_left(target) (threshold or
|
|||
|
||||
|
||||
# Check that binops drop the metric name.
|
||||
eval instant at 5m node_cpu + 2
|
||||
eval instant at 1m node_cpu + 2
|
||||
{instance="abc",job="node",mode="idle"} 5
|
||||
{instance="abc",job="node",mode="user"} 3
|
||||
{instance="def",job="node",mode="idle"} 10
|
||||
{instance="def",job="node",mode="user"} 4
|
||||
|
||||
eval instant at 5m node_cpu - 2
|
||||
eval instant at 1m node_cpu - 2
|
||||
{instance="abc",job="node",mode="idle"} 1
|
||||
{instance="abc",job="node",mode="user"} -1
|
||||
{instance="def",job="node",mode="idle"} 6
|
||||
{instance="def",job="node",mode="user"} 0
|
||||
|
||||
eval instant at 5m node_cpu / 2
|
||||
eval instant at 1m node_cpu / 2
|
||||
{instance="abc",job="node",mode="idle"} 1.5
|
||||
{instance="abc",job="node",mode="user"} 0.5
|
||||
{instance="def",job="node",mode="idle"} 4
|
||||
{instance="def",job="node",mode="user"} 1
|
||||
|
||||
eval instant at 5m node_cpu * 2
|
||||
eval instant at 1m node_cpu * 2
|
||||
{instance="abc",job="node",mode="idle"} 6
|
||||
{instance="abc",job="node",mode="user"} 2
|
||||
{instance="def",job="node",mode="idle"} 16
|
||||
{instance="def",job="node",mode="user"} 4
|
||||
|
||||
eval instant at 5m node_cpu ^ 2
|
||||
eval instant at 1m node_cpu ^ 2
|
||||
{instance="abc",job="node",mode="idle"} 9
|
||||
{instance="abc",job="node",mode="user"} 1
|
||||
{instance="def",job="node",mode="idle"} 64
|
||||
{instance="def",job="node",mode="user"} 4
|
||||
|
||||
eval instant at 5m node_cpu % 2
|
||||
eval instant at 1m node_cpu % 2
|
||||
{instance="abc",job="node",mode="idle"} 1
|
||||
{instance="abc",job="node",mode="user"} 1
|
||||
{instance="def",job="node",mode="idle"} 0
|
||||
|
@ -432,14 +432,14 @@ load 5m
|
|||
metricB{baz="meh"} 4
|
||||
|
||||
# On with no labels, for metrics with no common labels.
|
||||
eval instant at 5m random + on() metricA
|
||||
eval instant at 1m random + on() metricA
|
||||
{} 5
|
||||
|
||||
# Ignoring with no labels is the same as no ignoring.
|
||||
eval instant at 5m metricA + ignoring() metricB
|
||||
eval instant at 1m metricA + ignoring() metricB
|
||||
{baz="meh"} 7
|
||||
|
||||
eval instant at 5m metricA + metricB
|
||||
eval instant at 1m metricA + metricB
|
||||
{baz="meh"} 7
|
||||
|
||||
clear
|
||||
|
@ -457,16 +457,16 @@ load 5m
|
|||
test_total{instance="localhost"} 50
|
||||
test_smaller{instance="localhost"} 10
|
||||
|
||||
eval instant at 5m test_total > bool test_smaller
|
||||
eval instant at 1m test_total > bool test_smaller
|
||||
{instance="localhost"} 1
|
||||
|
||||
eval instant at 5m test_total > test_smaller
|
||||
eval instant at 1m test_total > test_smaller
|
||||
test_total{instance="localhost"} 50
|
||||
|
||||
eval instant at 5m test_total < bool test_smaller
|
||||
eval instant at 1m test_total < bool test_smaller
|
||||
{instance="localhost"} 0
|
||||
|
||||
eval instant at 5m test_total < test_smaller
|
||||
eval instant at 1m test_total < test_smaller
|
||||
|
||||
clear
|
||||
|
||||
|
@ -476,14 +476,14 @@ load 5m
|
|||
trigx{} 20
|
||||
trigNaN{} NaN
|
||||
|
||||
eval instant at 5m trigy atan2 trigx
|
||||
eval instant at 1m trigy atan2 trigx
|
||||
{} 0.4636476090008061
|
||||
|
||||
eval instant at 5m trigy atan2 trigNaN
|
||||
eval instant at 1m trigy atan2 trigNaN
|
||||
{} NaN
|
||||
|
||||
eval instant at 5m 10 atan2 20
|
||||
eval instant at 1m 10 atan2 20
|
||||
0.4636476090008061
|
||||
|
||||
eval instant at 5m 10 atan2 NaN
|
||||
eval instant at 1m 10 atan2 NaN
|
||||
NaN
|
||||
|
|
14
promql/promqltest/testdata/range_queries.test
vendored
14
promql/promqltest/testdata/range_queries.test
vendored
|
@ -1,18 +1,18 @@
|
|||
# sum_over_time with all values
|
||||
load 30s
|
||||
load 15s
|
||||
bar 0 1 10 100 1000
|
||||
|
||||
eval range from 0 to 2m step 1m sum_over_time(bar[30s])
|
||||
eval range from 0 to 1m step 30s sum_over_time(bar[30s])
|
||||
{} 0 11 1100
|
||||
|
||||
clear
|
||||
|
||||
# sum_over_time with trailing values
|
||||
load 30s
|
||||
load 15s
|
||||
bar 0 1 10 100 1000 0 0 0 0
|
||||
|
||||
eval range from 0 to 2m step 1m sum_over_time(bar[30s])
|
||||
{} 0 11 1100
|
||||
{} 0 1100 0
|
||||
|
||||
clear
|
||||
|
||||
|
@ -21,15 +21,15 @@ load 30s
|
|||
bar 0 1 10 100 1000 10000 100000 1000000 10000000
|
||||
|
||||
eval range from 0 to 4m step 1m sum_over_time(bar[30s])
|
||||
{} 0 11 1100 110000 11000000
|
||||
{} 0 10 1000 100000 10000000
|
||||
|
||||
clear
|
||||
|
||||
# sum_over_time with all values random
|
||||
load 30s
|
||||
load 15s
|
||||
bar 5 17 42 2 7 905 51
|
||||
|
||||
eval range from 0 to 3m step 1m sum_over_time(bar[30s])
|
||||
eval range from 0 to 90s step 30s sum_over_time(bar[30s])
|
||||
{} 5 59 9 956
|
||||
|
||||
clear
|
||||
|
|
12
promql/promqltest/testdata/staleness.test
vendored
12
promql/promqltest/testdata/staleness.test
vendored
|
@ -14,10 +14,10 @@ eval instant at 40s metric
|
|||
{__name__="metric"} 2
|
||||
|
||||
# It goes stale 5 minutes after the last sample.
|
||||
eval instant at 330s metric
|
||||
eval instant at 329s metric
|
||||
{__name__="metric"} 2
|
||||
|
||||
eval instant at 331s metric
|
||||
eval instant at 330s metric
|
||||
|
||||
|
||||
# Range vector ignores stale sample.
|
||||
|
@ -30,9 +30,13 @@ eval instant at 10s count_over_time(metric[1s])
|
|||
eval instant at 20s count_over_time(metric[1s])
|
||||
|
||||
eval instant at 20s count_over_time(metric[10s])
|
||||
|
||||
eval instant at 20s count_over_time(metric[20s])
|
||||
{} 1
|
||||
|
||||
eval instant at 20s count_over_time(metric[10])
|
||||
|
||||
eval instant at 20s count_over_time(metric[20])
|
||||
{} 1
|
||||
|
||||
|
||||
|
@ -48,7 +52,7 @@ eval instant at 0s metric
|
|||
eval instant at 150s metric
|
||||
{__name__="metric"} 0
|
||||
|
||||
eval instant at 300s metric
|
||||
eval instant at 299s metric
|
||||
{__name__="metric"} 0
|
||||
|
||||
eval instant at 301s metric
|
||||
eval instant at 300s metric
|
||||
|
|
40
promql/promqltest/testdata/subquery.test
vendored
40
promql/promqltest/testdata/subquery.test
vendored
|
@ -10,18 +10,18 @@ eval instant at 10s sum_over_time(metric[50s:5s])
|
|||
|
||||
# Every evaluation yields the last value, i.e. 2
|
||||
eval instant at 5m sum_over_time(metric[50s:10s])
|
||||
{} 12
|
||||
{} 10
|
||||
|
||||
# Series becomes stale at 5m10s (5m after last sample)
|
||||
# Hence subquery gets a single sample at 6m-50s=5m10s.
|
||||
eval instant at 6m sum_over_time(metric[50s:10s])
|
||||
# Hence subquery gets a single sample at 5m10s.
|
||||
eval instant at 5m59s sum_over_time(metric[60s:10s])
|
||||
{} 2
|
||||
|
||||
eval instant at 10s rate(metric[20s:10s])
|
||||
{} 0.1
|
||||
|
||||
eval instant at 20s rate(metric[20s:5s])
|
||||
{} 0.05
|
||||
{} 0.06666666666666667
|
||||
|
||||
clear
|
||||
|
||||
|
@ -49,16 +49,16 @@ load 10s
|
|||
metric3 0+3x1000
|
||||
|
||||
eval instant at 1000s sum_over_time(metric1[30s:10s])
|
||||
{} 394
|
||||
{} 297
|
||||
|
||||
# This is (394*2 - 100), because other than the last 100 at 1000s,
|
||||
# This is (97 + 98*2 + 99*2 + 100), because other than 97@975s and 100@1000s,
|
||||
# everything else is repeated with the 5s step.
|
||||
eval instant at 1000s sum_over_time(metric1[30s:5s])
|
||||
{} 688
|
||||
{} 591
|
||||
|
||||
# Offset is aligned with the step.
|
||||
# Offset is aligned with the step, so this is from [98@980s, 99@990s, 100@1000s].
|
||||
eval instant at 1010s sum_over_time(metric1[30s:10s] offset 10s)
|
||||
{} 394
|
||||
{} 297
|
||||
|
||||
# Same result for different offsets due to step alignment.
|
||||
eval instant at 1010s sum_over_time(metric1[30s:10s] offset 9s)
|
||||
|
@ -93,16 +93,16 @@ eval instant at 1010s sum_over_time((metric1)[30:10] offset 3)
|
|||
|
||||
# Nested subqueries
|
||||
eval instant at 1000s rate(sum_over_time(metric1[30s:10s])[50s:10s])
|
||||
{} 0.4
|
||||
{} 0.30000000000000004
|
||||
|
||||
eval instant at 1000s rate(sum_over_time(metric2[30s:10s])[50s:10s])
|
||||
{} 0.8
|
||||
{} 0.6000000000000001
|
||||
|
||||
eval instant at 1000s rate(sum_over_time(metric3[30s:10s])[50s:10s])
|
||||
{} 1.2
|
||||
{} 0.9
|
||||
|
||||
eval instant at 1000s rate(sum_over_time((metric1+metric2+metric3)[30s:10s])[30s:10s])
|
||||
{} 2.4
|
||||
{} 1.8
|
||||
|
||||
clear
|
||||
|
||||
|
@ -115,16 +115,20 @@ load 7s
|
|||
eval instant at 80s rate(metric[1m])
|
||||
{} 2.517857143
|
||||
|
||||
# No extrapolation, [2@20, 144@80]: (144 - 2) / 60
|
||||
eval instant at 80s rate(metric[1m:10s])
|
||||
{} 2.366666667
|
||||
# Extrapolated to range start for counter, [2@20, 144@80]: (144 - 2) / (80 - 20)
|
||||
eval instant at 80s rate(metric[1m500ms:10s])
|
||||
{} 2.3666666666666667
|
||||
|
||||
# Extrapolated to zero value for counter, [2@20, 144@80]: (144 - 0) / 61
|
||||
eval instant at 80s rate(metric[1m1s:10s])
|
||||
{} 2.360655737704918
|
||||
|
||||
# Only one value between 10s and 20s, 2@14
|
||||
eval instant at 20s min_over_time(metric[10s])
|
||||
{} 2
|
||||
|
||||
# min(1@10, 2@20)
|
||||
eval instant at 20s min_over_time(metric[10s:10s])
|
||||
# min(2@20)
|
||||
eval instant at 20s min_over_time(metric[15s:10s])
|
||||
{} 1
|
||||
|
||||
eval instant at 20m min_over_time(rate(metric[5m])[20m:1m])
|
||||
|
|
36
promql/promqltest/testdata/trig_functions.test
vendored
36
promql/promqltest/testdata/trig_functions.test
vendored
|
@ -5,92 +5,92 @@ load 5m
|
|||
trig{l="y"} 20
|
||||
trig{l="NaN"} NaN
|
||||
|
||||
eval instant at 5m sin(trig)
|
||||
eval instant at 1m sin(trig)
|
||||
{l="x"} -0.5440211108893699
|
||||
{l="y"} 0.9129452507276277
|
||||
{l="NaN"} NaN
|
||||
|
||||
eval instant at 5m cos(trig)
|
||||
eval instant at 1m cos(trig)
|
||||
{l="x"} -0.8390715290764524
|
||||
{l="y"} 0.40808206181339196
|
||||
{l="NaN"} NaN
|
||||
|
||||
eval instant at 5m tan(trig)
|
||||
eval instant at 1m tan(trig)
|
||||
{l="x"} 0.6483608274590867
|
||||
{l="y"} 2.2371609442247427
|
||||
{l="NaN"} NaN
|
||||
|
||||
eval instant at 5m asin(trig - 10.1)
|
||||
eval instant at 1m asin(trig - 10.1)
|
||||
{l="x"} -0.10016742116155944
|
||||
{l="y"} NaN
|
||||
{l="NaN"} NaN
|
||||
|
||||
eval instant at 5m acos(trig - 10.1)
|
||||
eval instant at 1m acos(trig - 10.1)
|
||||
{l="x"} 1.670963747956456
|
||||
{l="y"} NaN
|
||||
{l="NaN"} NaN
|
||||
|
||||
eval instant at 5m atan(trig)
|
||||
eval instant at 1m atan(trig)
|
||||
{l="x"} 1.4711276743037345
|
||||
{l="y"} 1.5208379310729538
|
||||
{l="NaN"} NaN
|
||||
|
||||
eval instant at 5m sinh(trig)
|
||||
eval instant at 1m sinh(trig)
|
||||
{l="x"} 11013.232920103324
|
||||
{l="y"} 2.4258259770489514e+08
|
||||
{l="NaN"} NaN
|
||||
|
||||
eval instant at 5m cosh(trig)
|
||||
eval instant at 1m cosh(trig)
|
||||
{l="x"} 11013.232920103324
|
||||
{l="y"} 2.4258259770489514e+08
|
||||
{l="NaN"} NaN
|
||||
|
||||
eval instant at 5m tanh(trig)
|
||||
eval instant at 1m tanh(trig)
|
||||
{l="x"} 0.9999999958776927
|
||||
{l="y"} 1
|
||||
{l="NaN"} NaN
|
||||
|
||||
eval instant at 5m asinh(trig)
|
||||
eval instant at 1m asinh(trig)
|
||||
{l="x"} 2.99822295029797
|
||||
{l="y"} 3.6895038689889055
|
||||
{l="NaN"} NaN
|
||||
|
||||
eval instant at 5m acosh(trig)
|
||||
eval instant at 1m acosh(trig)
|
||||
{l="x"} 2.993222846126381
|
||||
{l="y"} 3.6882538673612966
|
||||
{l="NaN"} NaN
|
||||
|
||||
eval instant at 5m atanh(trig - 10.1)
|
||||
eval instant at 1m atanh(trig - 10.1)
|
||||
{l="x"} -0.10033534773107522
|
||||
{l="y"} NaN
|
||||
{l="NaN"} NaN
|
||||
|
||||
eval instant at 5m rad(trig)
|
||||
eval instant at 1m rad(trig)
|
||||
{l="x"} 0.17453292519943295
|
||||
{l="y"} 0.3490658503988659
|
||||
{l="NaN"} NaN
|
||||
|
||||
eval instant at 5m rad(trig - 10)
|
||||
eval instant at 1m rad(trig - 10)
|
||||
{l="x"} 0
|
||||
{l="y"} 0.17453292519943295
|
||||
{l="NaN"} NaN
|
||||
|
||||
eval instant at 5m rad(trig - 20)
|
||||
eval instant at 1m rad(trig - 20)
|
||||
{l="x"} -0.17453292519943295
|
||||
{l="y"} 0
|
||||
{l="NaN"} NaN
|
||||
|
||||
eval instant at 5m deg(trig)
|
||||
eval instant at 1m deg(trig)
|
||||
{l="x"} 572.9577951308232
|
||||
{l="y"} 1145.9155902616465
|
||||
{l="NaN"} NaN
|
||||
|
||||
eval instant at 5m deg(trig - 10)
|
||||
eval instant at 1m deg(trig - 10)
|
||||
{l="x"} 0
|
||||
{l="y"} 572.9577951308232
|
||||
{l="NaN"} NaN
|
||||
|
||||
eval instant at 5m deg(trig - 20)
|
||||
eval instant at 1m deg(trig - 20)
|
||||
{l="x"} -572.9577951308232
|
||||
{l="y"} 0
|
||||
{l="NaN"} NaN
|
||||
|
|
|
@ -31,7 +31,7 @@ import (
|
|||
)
|
||||
|
||||
type ActiveQueryTracker struct {
|
||||
mmapedFile []byte
|
||||
mmappedFile []byte
|
||||
getNextIndex chan int
|
||||
logger log.Logger
|
||||
closer io.Closer
|
||||
|
@ -87,24 +87,24 @@ func logUnfinishedQueries(filename string, filesize int, logger log.Logger) {
|
|||
}
|
||||
}
|
||||
|
||||
type mmapedFile struct {
|
||||
type mmappedFile struct {
|
||||
f io.Closer
|
||||
m mmap.MMap
|
||||
}
|
||||
|
||||
func (f *mmapedFile) Close() error {
|
||||
func (f *mmappedFile) Close() error {
|
||||
err := f.m.Unmap()
|
||||
if err != nil {
|
||||
err = fmt.Errorf("mmapedFile: unmapping: %w", err)
|
||||
err = fmt.Errorf("mmappedFile: unmapping: %w", err)
|
||||
}
|
||||
if fErr := f.f.Close(); fErr != nil {
|
||||
return errors.Join(fmt.Errorf("close mmapedFile.f: %w", fErr), err)
|
||||
return errors.Join(fmt.Errorf("close mmappedFile.f: %w", fErr), err)
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func getMMapedFile(filename string, filesize int, logger log.Logger) ([]byte, io.Closer, error) {
|
||||
func getMMappedFile(filename string, filesize int, logger log.Logger) ([]byte, io.Closer, error) {
|
||||
file, err := os.OpenFile(filename, os.O_CREATE|os.O_RDWR|os.O_TRUNC, 0o666)
|
||||
if err != nil {
|
||||
absPath, pathErr := filepath.Abs(filename)
|
||||
|
@ -129,7 +129,7 @@ func getMMapedFile(filename string, filesize int, logger log.Logger) ([]byte, io
|
|||
return nil, nil, err
|
||||
}
|
||||
|
||||
return fileAsBytes, &mmapedFile{f: file, m: fileAsBytes}, err
|
||||
return fileAsBytes, &mmappedFile{f: file, m: fileAsBytes}, err
|
||||
}
|
||||
|
||||
func NewActiveQueryTracker(localStoragePath string, maxConcurrent int, logger log.Logger) *ActiveQueryTracker {
|
||||
|
@ -141,14 +141,14 @@ func NewActiveQueryTracker(localStoragePath string, maxConcurrent int, logger lo
|
|||
filename, filesize := filepath.Join(localStoragePath, "queries.active"), 1+maxConcurrent*entrySize
|
||||
logUnfinishedQueries(filename, filesize, logger)
|
||||
|
||||
fileAsBytes, closer, err := getMMapedFile(filename, filesize, logger)
|
||||
fileAsBytes, closer, err := getMMappedFile(filename, filesize, logger)
|
||||
if err != nil {
|
||||
panic("Unable to create mmap-ed active query log")
|
||||
}
|
||||
|
||||
copy(fileAsBytes, "[")
|
||||
activeQueryTracker := ActiveQueryTracker{
|
||||
mmapedFile: fileAsBytes,
|
||||
mmappedFile: fileAsBytes,
|
||||
closer: closer,
|
||||
getNextIndex: make(chan int, maxConcurrent),
|
||||
logger: logger,
|
||||
|
@ -206,14 +206,14 @@ func (tracker ActiveQueryTracker) GetMaxConcurrent() int {
|
|||
}
|
||||
|
||||
func (tracker ActiveQueryTracker) Delete(insertIndex int) {
|
||||
copy(tracker.mmapedFile[insertIndex:], strings.Repeat("\x00", entrySize))
|
||||
copy(tracker.mmappedFile[insertIndex:], strings.Repeat("\x00", entrySize))
|
||||
tracker.getNextIndex <- insertIndex
|
||||
}
|
||||
|
||||
func (tracker ActiveQueryTracker) Insert(ctx context.Context, query string) (int, error) {
|
||||
select {
|
||||
case i := <-tracker.getNextIndex:
|
||||
fileBytes := tracker.mmapedFile
|
||||
fileBytes := tracker.mmappedFile
|
||||
entry := newJSONEntry(query, tracker.logger)
|
||||
start, end := i, i+entrySize
|
||||
|
||||
|
|
|
@ -26,7 +26,7 @@ import (
|
|||
func TestQueryLogging(t *testing.T) {
|
||||
fileAsBytes := make([]byte, 4096)
|
||||
queryLogger := ActiveQueryTracker{
|
||||
mmapedFile: fileAsBytes,
|
||||
mmappedFile: fileAsBytes,
|
||||
logger: nil,
|
||||
getNextIndex: make(chan int, 4),
|
||||
}
|
||||
|
@ -70,7 +70,7 @@ func TestQueryLogging(t *testing.T) {
|
|||
func TestIndexReuse(t *testing.T) {
|
||||
queryBytes := make([]byte, 1+3*entrySize)
|
||||
queryLogger := ActiveQueryTracker{
|
||||
mmapedFile: queryBytes,
|
||||
mmappedFile: queryBytes,
|
||||
logger: nil,
|
||||
getNextIndex: make(chan int, 3),
|
||||
}
|
||||
|
@ -106,10 +106,10 @@ func TestIndexReuse(t *testing.T) {
|
|||
|
||||
func TestMMapFile(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
fpath := filepath.Join(dir, "mmapedFile")
|
||||
fpath := filepath.Join(dir, "mmappedFile")
|
||||
const data = "ab"
|
||||
|
||||
fileAsBytes, closer, err := getMMapedFile(fpath, 2, nil)
|
||||
fileAsBytes, closer, err := getMMappedFile(fpath, 2, nil)
|
||||
require.NoError(t, err)
|
||||
copy(fileAsBytes, data)
|
||||
require.NoError(t, closer.Close())
|
||||
|
|
|
@ -526,7 +526,7 @@ func (ssi *storageSeriesIterator) Next() chunkenc.ValueType {
|
|||
ssi.currH = p.H
|
||||
return chunkenc.ValFloatHistogram
|
||||
default:
|
||||
panic("storageSeriesIterater.Next failed to pick value type")
|
||||
panic("storageSeriesIterator.Next failed to pick value type")
|
||||
}
|
||||
}
|
||||
|
||||
|
|
BIN
remote.test
Executable file
BIN
remote.test
Executable file
Binary file not shown.
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue