mirror of
https://github.com/prometheus/prometheus.git
synced 2024-11-09 23:24:05 -08:00
Merge branch 'prometheus:main' into patch-impl2
This commit is contained in:
commit
5497623f2e
50
.github/workflows/ci.yml
vendored
50
.github/workflows/ci.yml
vendored
|
@ -12,15 +12,13 @@ jobs:
|
|||
# Whenever the Go version is updated here, .promu.yml
|
||||
# should also be updated.
|
||||
image: quay.io/prometheus/golang-builder:1.23-base
|
||||
env:
|
||||
# Preliminary fix to make Go tests with race detector not use too much memory,
|
||||
# see https://github.com/prometheus/prometheus/issues/14858.
|
||||
GOMEMLIMIT: 10GiB
|
||||
steps:
|
||||
- uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
|
||||
- uses: prometheus/promci@45166329da36d74895901808f1c8c97efafc7f84 # v0.3.0
|
||||
- uses: prometheus/promci@468927c440349ab56c4a1aafd453b312841503c2 # v0.4.4
|
||||
- uses: ./.github/promci/actions/setup_environment
|
||||
- run: make GOOPTS=--tags=stringlabels GO_ONLY=1 SKIP_GOLANGCI_LINT=1
|
||||
with:
|
||||
enable_npm: true
|
||||
- run: make GOOPTS=--tags=stringlabels GO_ONLY=1 SKIP_GOLANGCI_LINT=1 test-flags=""
|
||||
- run: go test --tags=stringlabels ./tsdb/ -test.tsdb-isolation=false
|
||||
- run: make -C documentation/examples/remote_storage
|
||||
- run: make -C documentation/examples
|
||||
|
@ -32,7 +30,7 @@ jobs:
|
|||
image: quay.io/prometheus/golang-builder:1.23-base
|
||||
steps:
|
||||
- uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
|
||||
- uses: prometheus/promci@45166329da36d74895901808f1c8c97efafc7f84 # v0.3.0
|
||||
- uses: prometheus/promci@468927c440349ab56c4a1aafd453b312841503c2 # v0.4.4
|
||||
- uses: ./.github/promci/actions/setup_environment
|
||||
- run: go test --tags=dedupelabels ./...
|
||||
- run: GOARCH=386 go test ./cmd/prometheus
|
||||
|
@ -65,7 +63,7 @@ jobs:
|
|||
|
||||
steps:
|
||||
- uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
|
||||
- uses: prometheus/promci@45166329da36d74895901808f1c8c97efafc7f84 # v0.3.0
|
||||
- uses: prometheus/promci@468927c440349ab56c4a1aafd453b312841503c2 # v0.4.4
|
||||
- uses: ./.github/promci/actions/setup_environment
|
||||
with:
|
||||
enable_go: false
|
||||
|
@ -114,6 +112,8 @@ jobs:
|
|||
if: |
|
||||
!(github.event_name == 'push' && startsWith(github.ref, 'refs/tags/v2.'))
|
||||
&&
|
||||
!(github.event_name == 'push' && startsWith(github.ref, 'refs/tags/v3.'))
|
||||
&&
|
||||
!(github.event_name == 'pull_request' && startsWith(github.event.pull_request.base.ref, 'release-'))
|
||||
&&
|
||||
!(github.event_name == 'push' && github.event.ref == 'refs/heads/main')
|
||||
|
@ -122,7 +122,7 @@ jobs:
|
|||
thread: [ 0, 1, 2 ]
|
||||
steps:
|
||||
- uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
|
||||
- uses: prometheus/promci@45166329da36d74895901808f1c8c97efafc7f84 # v0.3.0
|
||||
- uses: prometheus/promci@468927c440349ab56c4a1aafd453b312841503c2 # v0.4.4
|
||||
- uses: ./.github/promci/actions/build
|
||||
with:
|
||||
promu_opts: "-p linux/amd64 -p windows/amd64 -p linux/arm64 -p darwin/amd64 -p darwin/arm64 -p linux/386"
|
||||
|
@ -134,6 +134,8 @@ jobs:
|
|||
if: |
|
||||
(github.event_name == 'push' && startsWith(github.ref, 'refs/tags/v2.'))
|
||||
||
|
||||
(github.event_name == 'push' && startsWith(github.ref, 'refs/tags/v3.'))
|
||||
||
|
||||
(github.event_name == 'pull_request' && startsWith(github.event.pull_request.base.ref, 'release-'))
|
||||
||
|
||||
(github.event_name == 'push' && github.event.ref == 'refs/heads/main')
|
||||
|
@ -145,7 +147,7 @@ jobs:
|
|||
# should also be updated.
|
||||
steps:
|
||||
- uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
|
||||
- uses: prometheus/promci@45166329da36d74895901808f1c8c97efafc7f84 # v0.3.0
|
||||
- uses: prometheus/promci@468927c440349ab56c4a1aafd453b312841503c2 # v0.4.4
|
||||
- uses: ./.github/promci/actions/build
|
||||
with:
|
||||
parallelism: 12
|
||||
|
@ -207,7 +209,7 @@ jobs:
|
|||
if: github.event_name == 'push' && github.event.ref == 'refs/heads/main'
|
||||
steps:
|
||||
- uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
|
||||
- uses: prometheus/promci@45166329da36d74895901808f1c8c97efafc7f84 # v0.3.0
|
||||
- uses: prometheus/promci@468927c440349ab56c4a1aafd453b312841503c2 # v0.4.4
|
||||
- uses: ./.github/promci/actions/publish_main
|
||||
with:
|
||||
docker_hub_login: ${{ secrets.docker_hub_login }}
|
||||
|
@ -218,10 +220,13 @@ jobs:
|
|||
name: Publish release artefacts
|
||||
runs-on: ubuntu-latest
|
||||
needs: [test_ui, test_go, test_go_more, test_go_oldest, test_windows, golangci, codeql, build_all]
|
||||
if: github.event_name == 'push' && startsWith(github.ref, 'refs/tags/v2.')
|
||||
if: |
|
||||
(github.event_name == 'push' && startsWith(github.ref, 'refs/tags/v2.'))
|
||||
||
|
||||
(github.event_name == 'push' && startsWith(github.ref, 'refs/tags/v3.'))
|
||||
steps:
|
||||
- uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
|
||||
- uses: prometheus/promci@45166329da36d74895901808f1c8c97efafc7f84 # v0.3.0
|
||||
- uses: prometheus/promci@468927c440349ab56c4a1aafd453b312841503c2 # v0.4.4
|
||||
- uses: ./.github/promci/actions/publish_release
|
||||
with:
|
||||
docker_hub_login: ${{ secrets.docker_hub_login }}
|
||||
|
@ -236,7 +241,7 @@ jobs:
|
|||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
|
||||
- uses: prometheus/promci@45166329da36d74895901808f1c8c97efafc7f84 # v0.3.0
|
||||
- uses: prometheus/promci@468927c440349ab56c4a1aafd453b312841503c2 # v0.4.4
|
||||
- name: Install nodejs
|
||||
uses: actions/setup-node@1e60f620b9541d16bece96c5465dc8ee9832be0b # v4.0.3
|
||||
with:
|
||||
|
@ -249,17 +254,26 @@ jobs:
|
|||
restore-keys: |
|
||||
${{ runner.os }}-node-
|
||||
- name: Check libraries version
|
||||
if: github.event_name == 'push' && startsWith(github.ref, 'refs/tags/v2.')
|
||||
run: ./scripts/ui_release.sh --check-package "$(echo ${{ github.ref_name }}|sed s/v2/v0/)"
|
||||
if: |
|
||||
(github.event_name == 'push' && startsWith(github.ref, 'refs/tags/v2.'))
|
||||
||
|
||||
(github.event_name == 'push' && startsWith(github.ref, 'refs/tags/v3.'))
|
||||
run: ./scripts/ui_release.sh --check-package "$(./scripts/get_module_version.sh ${{ github.ref_name }})"
|
||||
- name: build
|
||||
run: make assets
|
||||
- name: Copy files before publishing libs
|
||||
run: ./scripts/ui_release.sh --copy
|
||||
- name: Publish dry-run libraries
|
||||
if: "!(github.event_name == 'push' && startsWith(github.ref, 'refs/tags/v2.'))"
|
||||
if: |
|
||||
!(github.event_name == 'push' && startsWith(github.ref, 'refs/tags/v2.'))
|
||||
&&
|
||||
!(github.event_name == 'push' && startsWith(github.ref, 'refs/tags/v3.'))
|
||||
run: ./scripts/ui_release.sh --publish dry-run
|
||||
- name: Publish libraries
|
||||
if: github.event_name == 'push' && startsWith(github.ref, 'refs/tags/v2.')
|
||||
if: |
|
||||
(github.event_name == 'push' && startsWith(github.ref, 'refs/tags/v2.'))
|
||||
||
|
||||
(github.event_name == 'push' && startsWith(github.ref, 'refs/tags/v3.'))
|
||||
run: ./scripts/ui_release.sh --publish
|
||||
env:
|
||||
# The setup-node action writes an .npmrc file with this env variable
|
||||
|
|
2
.github/workflows/fuzzing.yml
vendored
2
.github/workflows/fuzzing.yml
vendored
|
@ -21,7 +21,7 @@ jobs:
|
|||
fuzz-seconds: 600
|
||||
dry-run: false
|
||||
- name: Upload Crash
|
||||
uses: actions/upload-artifact@0b2256b8c012f0828dc542b3febcab082c67f72b # v4.3.4
|
||||
uses: actions/upload-artifact@50769540e7f4bd5e21e526ee35c689e35e0d6874 # v4.4.0
|
||||
if: failure() && steps.build.outcome == 'success'
|
||||
with:
|
||||
name: artifacts
|
||||
|
|
2
.github/workflows/scorecards.yml
vendored
2
.github/workflows/scorecards.yml
vendored
|
@ -37,7 +37,7 @@ jobs:
|
|||
# Upload the results as artifacts (optional). Commenting out will disable uploads of run results in SARIF
|
||||
# format to the repository Actions tab.
|
||||
- name: "Upload artifact"
|
||||
uses: actions/upload-artifact@0b2256b8c012f0828dc542b3febcab082c67f72b # tag=v4.3.4
|
||||
uses: actions/upload-artifact@50769540e7f4bd5e21e526ee35c689e35e0d6874 # tag=v4.4.0
|
||||
with:
|
||||
name: SARIF file
|
||||
path: results.sarif
|
||||
|
|
3
.gitignore
vendored
3
.gitignore
vendored
|
@ -22,8 +22,7 @@ benchmark.txt
|
|||
/documentation/examples/remote_storage/example_write_adapter/example_write_adapter
|
||||
|
||||
npm_licenses.tar.bz2
|
||||
/web/ui/static/react-app
|
||||
/web/ui/static/mantine-ui
|
||||
/web/ui/static
|
||||
|
||||
/vendor
|
||||
/.build
|
||||
|
|
|
@ -28,8 +28,6 @@ tarball:
|
|||
# Whenever there are new files to include in the tarball,
|
||||
# remember to make sure the new files will be generated after `make build`.
|
||||
files:
|
||||
- consoles
|
||||
- console_libraries
|
||||
- documentation/examples/prometheus.yml
|
||||
- LICENSE
|
||||
- NOTICE
|
||||
|
|
46
CHANGELOG.md
46
CHANGELOG.md
|
@ -1,10 +1,46 @@
|
|||
# Changelog
|
||||
|
||||
## unreleased
|
||||
## 3.0.0-beta.0 / 2024-09-05
|
||||
|
||||
Release 3.0.0-beta.0 includes new features such as a brand new UI and UTF-8 support enabled by default. As a new major version, several breaking changes are introduced. The breaking changes are mainly around the removal of deprecated feature flags and CLI arguments, and the full list can be found below. Most users should be able to try this release out of the box without any configuration changes.
|
||||
|
||||
As is traditional with a beta release, we do **not** recommend users install 3.0.0-beta on critical production systems, but we do want everyone to test it out and find bugs.
|
||||
|
||||
* [CHANGE] UI: The old web UI has been replaced by a completely new one that is less cluttered and adds a few new features (PromLens-style tree view, better metrics explorer, "Explain" tab). However, it is still missing some features of the old UI (notably, exemplar display and heatmaps). To switch back to the old UI, you can use the feature flag `--enable-feature=old-ui` for the time being. #14872
|
||||
* [CHANGE] PromQL: Range selectors and the lookback delta are now left-open, i.e. a sample coinciding with the lower time limit is excluded rather than included. #13904
|
||||
* [CHANGE] Kubernetes SD: Remove support for `discovery.k8s.io/v1beta1` API version of EndpointSlice. This version is no longer served as of Kubernetes v1.25. #14365
|
||||
* [CHANGE] Kubernetes SD: Remove support for `networking.k8s.io/v1beta1` API version of Ingress. This version is no longer served as of Kubernetes v1.22. #14365
|
||||
* [CHANGE] UTF-8: Enable UTF-8 support by default. Prometheus now allows all UTF-8 characters in metric and label names. The corresponding `utf8-name` feature flag has been removed. #14705
|
||||
* [CHANGE] Console: Remove example files for the console feature. Users can continue using the console feature by supplying their own JavaScript and templates. #14807
|
||||
* [CHANGE] SD: Enable the new service discovery manager by default. This SD manager does not restart unchanged discoveries upon reloading. This makes reloads faster and reduces pressure on service discoveries' sources. The corresponding `new-service-discovery-manager` feature flag has been removed. #14770
|
||||
* [CHANGE] Agent mode has been promoted to stable. The feature flag `agent` has been removed. To run Prometheus in Agent mode, use the new `--agent` cmdline arg instead. #14747
|
||||
* [CHANGE] Remove deprecated `remote-write-receiver`,`promql-at-modifier`, and `promql-negative-offset` feature flags. #13456, #14526
|
||||
* [CHANGE] Remove deprecated `storage.tsdb.allow-overlapping-blocks`, `alertmanager.timeout`, and `storage.tsdb.retention` flags. #14640, #14643
|
||||
* [FEATURE] Promtool: Allow additional labels to be added to blocks created from openmetrics. #14402
|
||||
* [FEATURE] OTLP receiver: Add new option `otlp.promote_resource_attributes`, for any OTel resource attributes that should be promoted to metric labels. #14200
|
||||
* [FEATURE] Automatic reloading of the Prometheus configuration file at a specified interval #14769
|
||||
* [ENHANCEMENT] OTLP receiver: Warn when encountering exponential histograms with zero count and non-zero sum. #14706
|
||||
* [ENHANCEMENT] OTLP receiver: Interrupt translation on context cancellation/timeout. #14612
|
||||
* [ENHANCEMENT] Scrape: Only parse created timestamp if `created-timestamp-zero-ingestion` feature flag is enabled. This is as a lot of memory is used when parsing the created timestamp in the OM text format. #14815
|
||||
* [ENHANCEMENT] Scrape: Add support for logging scrape failures to a specified file. #14734
|
||||
* [ENHANCEMENT] Remote Read client: Enable streaming remote read if the server supports it. #11379
|
||||
* [ENHANCEMENT] PromQL: Delay deletion of `__name__` label to the end of the query evaluation. This is **experimental** and enabled under the feature-flag `promql-delayed-name-removal`. #14477
|
||||
* [ENHANCEMENT] Move AM discovery page from "Monitoring status" to "Server status". #14875
|
||||
* [ENHANCEMENT] Tracing: Improve PromQL tracing, including showing the operation performed for aggregates, operators, and calls.#14816
|
||||
* [ENHANCEMENT] Add support for multiple listening addresses. #14665
|
||||
* [ENHANCEMENT] Add the ability to set custom HTTP headers. #14817
|
||||
* [BUGFIX] TSDB: Fix shard initialization after WAL repair. #14731
|
||||
* [BUGFIX] UTF-8: Ensure correct validation when legacy mode turned on. #14736
|
||||
* [BUGFIX] SD: Make discovery manager notify consumers of dropped targets for still defined jobs. #13147
|
||||
* [BUGFIX] SD: Prevent the new service discovery manager from storing stale targets. #13622
|
||||
* [BUGFIX] Remote Write 2.0: Ensure metadata records are sent from the WAL to remote write during WAL replay. #14766
|
||||
* [BUGFIX] Scrape: Do no override target parameter labels with config params. #11029
|
||||
* [BUGFIX] Scrape: Reset exemplar position when scraping histograms in protobuf. #14810
|
||||
* [BUGFIX] Native Histograms: Do not re-use spans between histograms. #14771
|
||||
* [BUGFIX] Scrape: Only parse created timestamp if `created-timestamp-zero-ingestion` feature flag is enabled. This is as a lot of memory is used when parsing the created timestamp in the OM text format. #14815
|
||||
* [BUGFIX] TSDB: Fix panic in query during truncation with OOO head. #14831
|
||||
* [BUGFIX] TSDB: Fix panic in chunk querier. #14874
|
||||
* [BUGFIX] promql.Engine.Close: No-op if nil. #14861
|
||||
* [BUGFIX] tsdb/wlog.Watcher.readSegmentForGC: Only count unknown record types against record_decode_failures_total metric. #14042
|
||||
|
||||
## 2.54.1 / 2024-08-27
|
||||
|
@ -104,7 +140,7 @@ This release changes the default for GOGC, the Go runtime control for the trade-
|
|||
* [ENHANCEMENT] TSDB: Pause regular block compactions if the head needs to be compacted (prioritize head as it increases memory consumption). #13754
|
||||
* [ENHANCEMENT] Observability: Improved logging during signal handling termination. #13772
|
||||
* [ENHANCEMENT] Observability: All log lines for drop series use "num_dropped" key consistently. #13823
|
||||
* [ENHANCEMENT] Observability: Log chunk snapshot and mmaped chunk replay duration during WAL replay. #13838
|
||||
* [ENHANCEMENT] Observability: Log chunk snapshot and mmapped chunk replay duration during WAL replay. #13838
|
||||
* [ENHANCEMENT] Observability: Log if the block is being created from WBL during compaction. #13846
|
||||
* [BUGFIX] PromQL: Fix inaccurate sample number statistic when querying histograms. #13667
|
||||
* [BUGFIX] PromQL: Fix `histogram_stddev` and `histogram_stdvar` for cases where the histogram has negative buckets. #13852
|
||||
|
@ -641,7 +677,7 @@ The binaries published with this release are built with Go1.17.8 to avoid [CVE-2
|
|||
|
||||
## 2.33.0 / 2022-01-29
|
||||
|
||||
* [CHANGE] PromQL: Promote negative offset and `@` modifer to stable features. #10121
|
||||
* [CHANGE] PromQL: Promote negative offset and `@` modifier to stable features. #10121
|
||||
* [CHANGE] Web: Promote remote-write-receiver to stable. #10119
|
||||
* [FEATURE] Config: Add `stripPort` template function. #10002
|
||||
* [FEATURE] Promtool: Add cardinality analysis to `check metrics`, enabled by flag `--extended`. #10045
|
||||
|
@ -878,7 +914,7 @@ This vulnerability has been reported by Aaron Devaney from MDSec.
|
|||
* [ENHANCEMENT] Templating: Enable parsing strings in `humanize` functions. #8682
|
||||
* [BUGFIX] UI: Provide errors instead of blank page on TSDB Status Page. #8654 #8659
|
||||
* [BUGFIX] TSDB: Do not panic when writing very large records to the WAL. #8790
|
||||
* [BUGFIX] TSDB: Avoid panic when mmaped memory is referenced after the file is closed. #8723
|
||||
* [BUGFIX] TSDB: Avoid panic when mmapped memory is referenced after the file is closed. #8723
|
||||
* [BUGFIX] Scaleway Discovery: Fix nil pointer dereference. #8737
|
||||
* [BUGFIX] Consul Discovery: Restart no longer required after config update with no targets. #8766
|
||||
|
||||
|
@ -1804,7 +1840,7 @@ information, read the announcement blog post and migration guide.
|
|||
## 1.7.0 / 2017-06-06
|
||||
|
||||
* [CHANGE] Compress remote storage requests and responses with unframed/raw snappy.
|
||||
* [CHANGE] Properly ellide secrets in config.
|
||||
* [CHANGE] Properly elide secrets in config.
|
||||
* [FEATURE] Add OpenStack service discovery.
|
||||
* [FEATURE] Add ability to limit Kubernetes service discovery to certain namespaces.
|
||||
* [FEATURE] Add metric for discovered number of Alertmanagers.
|
||||
|
|
|
@ -8,21 +8,16 @@ ARG OS="linux"
|
|||
COPY .build/${OS}-${ARCH}/prometheus /bin/prometheus
|
||||
COPY .build/${OS}-${ARCH}/promtool /bin/promtool
|
||||
COPY documentation/examples/prometheus.yml /etc/prometheus/prometheus.yml
|
||||
COPY console_libraries/ /usr/share/prometheus/console_libraries/
|
||||
COPY consoles/ /usr/share/prometheus/consoles/
|
||||
COPY LICENSE /LICENSE
|
||||
COPY NOTICE /NOTICE
|
||||
COPY npm_licenses.tar.bz2 /npm_licenses.tar.bz2
|
||||
|
||||
WORKDIR /prometheus
|
||||
RUN ln -s /usr/share/prometheus/console_libraries /usr/share/prometheus/consoles/ /etc/prometheus/ && \
|
||||
chown -R nobody:nobody /etc/prometheus /prometheus
|
||||
RUN chown -R nobody:nobody /etc/prometheus /prometheus
|
||||
|
||||
USER nobody
|
||||
EXPOSE 9090
|
||||
VOLUME [ "/prometheus" ]
|
||||
ENTRYPOINT [ "/bin/prometheus" ]
|
||||
CMD [ "--config.file=/etc/prometheus/prometheus.yml", \
|
||||
"--storage.tsdb.path=/prometheus", \
|
||||
"--web.console.libraries=/usr/share/prometheus/console_libraries", \
|
||||
"--web.console.templates=/usr/share/prometheus/consoles" ]
|
||||
"--storage.tsdb.path=/prometheus" ]
|
||||
|
|
|
@ -13,13 +13,12 @@ Maintainers for specific parts of the codebase:
|
|||
* `k8s`: Frederic Branczyk (<fbranczyk@gmail.com> / @brancz)
|
||||
* `documentation`
|
||||
* `prometheus-mixin`: Matthias Loibl (<mail@matthiasloibl.com> / @metalmatze)
|
||||
* `model/histogram` and other code related to native histograms: Björn Rabenstein (<beorn@grafana.com> / @beorn7),
|
||||
* `model/histogram` and other code related to native histograms: Björn Rabenstein (<beorn@grafana.com> / @beorn7),
|
||||
George Krajcsovits (<gyorgy.krajcsovits@grafana.com> / @krajorama)
|
||||
* `storage`
|
||||
* `remote`: Callum Styan (<callumstyan@gmail.com> / @cstyan), Bartłomiej Płotka (<bwplotka@gmail.com> / @bwplotka), Tom Wilkie (tom.wilkie@gmail.com / @tomwilkie), Nicolás Pazos ( <npazosmendez@gmail.com> / @npazosmendez), Alex Greenbank ( <alex.greenbank@grafana.com> / @alexgreenbank)
|
||||
* `otlptranslator`: Arve Knudsen (<arve.knudsen@gmail.com> / @aknuds1), Jesús Vázquez (<jesus.vazquez@grafana.com> / @jesusvazquez)
|
||||
* `tsdb`: Ganesh Vernekar (<ganesh@grafana.com> / @codesome), Bartłomiej Płotka (<bwplotka@gmail.com> / @bwplotka), Jesús Vázquez (<jesus.vazquez@grafana.com> / @jesusvazquez)
|
||||
* `agent`: Robert Fratto (<robert.fratto@grafana.com> / @rfratto)
|
||||
* `web`
|
||||
* `ui`: Julius Volz (<julius.volz@gmail.com> / @juliusv)
|
||||
* `module`: Augustin Husson (<husson.augustin@gmail.com> @nexucis)
|
||||
|
|
2
Makefile
2
Makefile
|
@ -42,7 +42,7 @@ upgrade-npm-deps:
|
|||
|
||||
.PHONY: ui-bump-version
|
||||
ui-bump-version:
|
||||
version=$$(sed s/2/0/ < VERSION) && ./scripts/ui_release.sh --bump-version "$${version}"
|
||||
version=$$(./scripts/get_module_version.sh) && ./scripts/ui_release.sh --bump-version "$${version}"
|
||||
cd web/ui && npm install
|
||||
git add "./web/ui/package-lock.json" "./**/package.json"
|
||||
|
||||
|
|
|
@ -115,7 +115,7 @@ The Makefile provides several targets:
|
|||
|
||||
Prometheus is bundled with many service discovery plugins.
|
||||
When building Prometheus from source, you can edit the [plugins.yml](./plugins.yml)
|
||||
file to disable some service discoveries. The file is a yaml-formated list of go
|
||||
file to disable some service discoveries. The file is a yaml-formatted list of go
|
||||
import path that will be built into the Prometheus binary.
|
||||
|
||||
After you have changed the file, you
|
||||
|
|
|
@ -187,7 +187,7 @@ the Prometheus server, we use major version zero releases for the libraries.
|
|||
Tag the new library release via the following commands:
|
||||
|
||||
```bash
|
||||
tag="v$(sed s/2/0/ < VERSION)"
|
||||
tag="v$(./scripts/get_module_version.sh)"
|
||||
git tag -s "${tag}" -m "${tag}"
|
||||
git push origin "${tag}"
|
||||
```
|
||||
|
|
|
@ -58,8 +58,6 @@ import (
|
|||
|
||||
"github.com/prometheus/prometheus/config"
|
||||
"github.com/prometheus/prometheus/discovery"
|
||||
"github.com/prometheus/prometheus/discovery/legacymanager"
|
||||
"github.com/prometheus/prometheus/discovery/targetgroup"
|
||||
"github.com/prometheus/prometheus/model/exemplar"
|
||||
"github.com/prometheus/prometheus/model/histogram"
|
||||
"github.com/prometheus/prometheus/model/labels"
|
||||
|
@ -103,6 +101,8 @@ var (
|
|||
)
|
||||
|
||||
func init() {
|
||||
// This can be removed when the default validation scheme in common is updated.
|
||||
model.NameValidationScheme = model.UTF8Validation
|
||||
prometheus.MustRegister(versioncollector.NewCollector(strings.ReplaceAll(appName, "-", "_")))
|
||||
|
||||
var err error
|
||||
|
@ -162,7 +162,6 @@ type flagConfig struct {
|
|||
// These options are extracted from featureList
|
||||
// for ease of use.
|
||||
enableExpandExternalLabels bool
|
||||
enableNewSDManager bool
|
||||
enablePerStepStats bool
|
||||
enableAutoGOMAXPROCS bool
|
||||
enableAutoGOMEMLIMIT bool
|
||||
|
@ -182,9 +181,6 @@ func (c *flagConfig) setFeatureListOptions(logger log.Logger) error {
|
|||
opts := strings.Split(f, ",")
|
||||
for _, o := range opts {
|
||||
switch o {
|
||||
case "remote-write-receiver":
|
||||
c.web.EnableRemoteWriteReceiver = true
|
||||
level.Warn(logger).Log("msg", "Remote write receiver enabled via feature flag remote-write-receiver. This is DEPRECATED. Use --web.enable-remote-write-receiver.")
|
||||
case "otlp-write-receiver":
|
||||
c.web.EnableOTLPWriteReceiver = true
|
||||
level.Info(logger).Log("msg", "Experimental OTLP write receiver enabled")
|
||||
|
@ -203,12 +199,6 @@ func (c *flagConfig) setFeatureListOptions(logger log.Logger) error {
|
|||
case "metadata-wal-records":
|
||||
c.scrape.AppendMetadata = true
|
||||
level.Info(logger).Log("msg", "Experimental metadata records in WAL enabled, required for remote write 2.0")
|
||||
case "new-service-discovery-manager":
|
||||
c.enableNewSDManager = true
|
||||
level.Info(logger).Log("msg", "Experimental service discovery manager")
|
||||
case "agent":
|
||||
agentMode = true
|
||||
level.Info(logger).Log("msg", "Experimental agent mode enabled.")
|
||||
case "promql-per-step-stats":
|
||||
c.enablePerStepStats = true
|
||||
level.Info(logger).Log("msg", "Experimental per-step statistics reporting")
|
||||
|
@ -252,13 +242,8 @@ func (c *flagConfig) setFeatureListOptions(logger log.Logger) error {
|
|||
case "promql-delayed-name-removal":
|
||||
c.promqlEnableDelayedNameRemoval = true
|
||||
level.Info(logger).Log("msg", "Experimental PromQL delayed name removal enabled.")
|
||||
case "utf8-names":
|
||||
model.NameValidationScheme = model.UTF8Validation
|
||||
level.Info(logger).Log("msg", "Experimental UTF-8 support enabled")
|
||||
case "":
|
||||
continue
|
||||
case "promql-at-modifier", "promql-negative-offset":
|
||||
level.Warn(logger).Log("msg", "This option for --enable-feature is now permanently enabled and therefore a no-op.", "option", o)
|
||||
case "old-ui":
|
||||
c.web.UseOldUI = true
|
||||
level.Info(logger).Log("msg", "Serving previous version of the Prometheus web UI.")
|
||||
|
@ -277,11 +262,6 @@ func main() {
|
|||
runtime.SetMutexProfileFraction(20)
|
||||
}
|
||||
|
||||
var (
|
||||
oldFlagRetentionDuration model.Duration
|
||||
newFlagRetentionDuration model.Duration
|
||||
)
|
||||
|
||||
// Unregister the default GoCollector, and reregister with our defaults.
|
||||
if prometheus.Unregister(collectors.NewGoCollector()) {
|
||||
prometheus.MustRegister(
|
||||
|
@ -391,11 +371,8 @@ func main() {
|
|||
"Size at which to split the tsdb WAL segment files. Example: 100MB").
|
||||
Hidden().PlaceHolder("<bytes>").BytesVar(&cfg.tsdb.WALSegmentSize)
|
||||
|
||||
serverOnlyFlag(a, "storage.tsdb.retention", "[DEPRECATED] How long to retain samples in storage. This flag has been deprecated, use \"storage.tsdb.retention.time\" instead.").
|
||||
SetValue(&oldFlagRetentionDuration)
|
||||
|
||||
serverOnlyFlag(a, "storage.tsdb.retention.time", "How long to retain samples in storage. When this flag is set it overrides \"storage.tsdb.retention\". If neither this flag nor \"storage.tsdb.retention\" nor \"storage.tsdb.retention.size\" is set, the retention time defaults to "+defaultRetentionString+". Units Supported: y, w, d, h, m, s, ms.").
|
||||
SetValue(&newFlagRetentionDuration)
|
||||
serverOnlyFlag(a, "storage.tsdb.retention.time", "How long to retain samples in storage. If neither this flag nor \"storage.tsdb.retention.size\" is set, the retention time defaults to "+defaultRetentionString+". Units Supported: y, w, d, h, m, s, ms.").
|
||||
SetValue(&cfg.tsdb.RetentionDuration)
|
||||
|
||||
serverOnlyFlag(a, "storage.tsdb.retention.size", "Maximum number of bytes that can be stored for blocks. A unit is required, supported units: B, KB, MB, GB, TB, PB, EB. Ex: \"512MB\". Based on powers-of-2, so 1KB is 1024B.").
|
||||
BytesVar(&cfg.tsdb.MaxBytes)
|
||||
|
@ -403,14 +380,6 @@ func main() {
|
|||
serverOnlyFlag(a, "storage.tsdb.no-lockfile", "Do not create lockfile in data directory.").
|
||||
Default("false").BoolVar(&cfg.tsdb.NoLockfile)
|
||||
|
||||
// TODO: Remove in Prometheus 3.0.
|
||||
var b bool
|
||||
serverOnlyFlag(a, "storage.tsdb.allow-overlapping-blocks", "[DEPRECATED] This flag has no effect. Overlapping blocks are enabled by default now.").
|
||||
Default("true").Hidden().BoolVar(&b)
|
||||
|
||||
serverOnlyFlag(a, "storage.tsdb.allow-overlapping-compaction", "Allow compaction of overlapping blocks. If set to false, TSDB stops vertical compaction and leaves overlapping blocks there. The use case is to let another component handle the compaction of overlapping blocks.").
|
||||
Default("true").Hidden().BoolVar(&cfg.tsdb.EnableOverlappingCompaction)
|
||||
|
||||
serverOnlyFlag(a, "storage.tsdb.wal-compression", "Compress the tsdb WAL.").
|
||||
Hidden().Default("true").BoolVar(&cfg.tsdb.WALCompression)
|
||||
|
||||
|
@ -487,9 +456,6 @@ func main() {
|
|||
serverOnlyFlag(a, "alertmanager.drain-notification-queue-on-shutdown", "Send any outstanding Alertmanager notifications when shutting down. If false, any outstanding Alertmanager notifications will be dropped when shutting down.").
|
||||
Default("true").BoolVar(&cfg.notifier.DrainOnShutdown)
|
||||
|
||||
// TODO: Remove in Prometheus 3.0.
|
||||
alertmanagerTimeout := a.Flag("alertmanager.timeout", "[DEPRECATED] This flag has no effect.").Hidden().String()
|
||||
|
||||
serverOnlyFlag(a, "query.lookback-delta", "The maximum lookback duration for retrieving metrics during expression evaluations and federation.").
|
||||
Default("5m").SetValue(&cfg.lookbackDelta)
|
||||
|
||||
|
@ -505,11 +471,11 @@ func main() {
|
|||
a.Flag("scrape.discovery-reload-interval", "Interval used by scrape manager to throttle target groups updates.").
|
||||
Hidden().Default("5s").SetValue(&cfg.scrape.DiscoveryReloadInterval)
|
||||
|
||||
a.Flag("scrape.name-escaping-scheme", `Method for escaping legacy invalid names when sending to Prometheus that does not support UTF-8. Can be one of "values", "underscores", or "dots".`).Default(scrape.DefaultNameEscapingScheme.String()).StringVar(&cfg.nameEscapingScheme)
|
||||
|
||||
a.Flag("enable-feature", "Comma separated feature names to enable. Valid options: agent, auto-gomaxprocs, auto-gomemlimit, auto-reload-config, concurrent-rule-eval, created-timestamp-zero-ingestion, delayed-compaction, exemplar-storage, expand-external-labels, extra-scrape-metrics, memory-snapshot-on-shutdown, native-histograms, new-service-discovery-manager, no-default-scrape-port, old-ui, otlp-write-receiver, promql-experimental-functions, promql-delayed-name-removal, promql-per-step-stats, remote-write-receiver (DEPRECATED), utf8-names. See https://prometheus.io/docs/prometheus/latest/feature_flags/ for more details.").
|
||||
a.Flag("enable-feature", "Comma separated feature names to enable. Valid options: auto-gomemlimit, exemplar-storage, expand-external-labels, memory-snapshot-on-shutdown, promql-per-step-stats, promql-experimental-functions, extra-scrape-metrics, auto-gomaxprocs, no-default-scrape-port, native-histograms, otlp-write-receiver, created-timestamp-zero-ingestion, concurrent-rule-eval. See https://prometheus.io/docs/prometheus/latest/feature_flags/ for more details.").
|
||||
Default("").StringsVar(&cfg.featureList)
|
||||
|
||||
a.Flag("agent", "Run Prometheus in 'Agent mode'.").BoolVar(&agentMode)
|
||||
|
||||
promlogflag.AddFlags(a, &cfg.promlogConfig)
|
||||
|
||||
a.Flag("write-documentation", "Generate command line documentation. Internal use.").Hidden().Action(func(ctx *kingpin.ParseContext) error {
|
||||
|
@ -576,10 +542,6 @@ func main() {
|
|||
os.Exit(2)
|
||||
}
|
||||
|
||||
if *alertmanagerTimeout != "" {
|
||||
level.Warn(logger).Log("msg", "The flag --alertmanager.timeout has no effect and will be removed in the future.")
|
||||
}
|
||||
|
||||
// Throw error for invalid config before starting other components.
|
||||
var cfgFile *config.Config
|
||||
if cfgFile, err = config.LoadFile(cfg.configFile, agentMode, false, log.NewNopLogger()); err != nil {
|
||||
|
@ -626,17 +588,6 @@ func main() {
|
|||
cfg.web.RoutePrefix = "/" + strings.Trim(cfg.web.RoutePrefix, "/")
|
||||
|
||||
if !agentMode {
|
||||
// Time retention settings.
|
||||
if oldFlagRetentionDuration != 0 {
|
||||
level.Warn(logger).Log("deprecation_notice", "'storage.tsdb.retention' flag is deprecated use 'storage.tsdb.retention.time' instead.")
|
||||
cfg.tsdb.RetentionDuration = oldFlagRetentionDuration
|
||||
}
|
||||
|
||||
// When the new flag is set it takes precedence.
|
||||
if newFlagRetentionDuration != 0 {
|
||||
cfg.tsdb.RetentionDuration = newFlagRetentionDuration
|
||||
}
|
||||
|
||||
if cfg.tsdb.RetentionDuration == 0 && cfg.tsdb.MaxBytes == 0 {
|
||||
cfg.tsdb.RetentionDuration = defaultRetentionDuration
|
||||
level.Info(logger).Log("msg", "No time or size retention was set so using the default time retention", "duration", defaultRetentionDuration)
|
||||
|
@ -708,8 +659,8 @@ func main() {
|
|||
|
||||
ctxScrape, cancelScrape = context.WithCancel(context.Background())
|
||||
ctxNotify, cancelNotify = context.WithCancel(context.Background())
|
||||
discoveryManagerScrape discoveryManager
|
||||
discoveryManagerNotify discoveryManager
|
||||
discoveryManagerScrape *discovery.Manager
|
||||
discoveryManagerNotify *discovery.Manager
|
||||
)
|
||||
|
||||
// Kubernetes client metrics are used by Kubernetes SD.
|
||||
|
@ -729,42 +680,16 @@ func main() {
|
|||
os.Exit(1)
|
||||
}
|
||||
|
||||
if cfg.enableNewSDManager {
|
||||
{
|
||||
discMgr := discovery.NewManager(ctxScrape, log.With(logger, "component", "discovery manager scrape"), prometheus.DefaultRegisterer, sdMetrics, discovery.Name("scrape"))
|
||||
if discMgr == nil {
|
||||
level.Error(logger).Log("msg", "failed to create a discovery manager scrape")
|
||||
os.Exit(1)
|
||||
}
|
||||
discoveryManagerScrape = discMgr
|
||||
}
|
||||
discoveryManagerScrape = discovery.NewManager(ctxScrape, log.With(logger, "component", "discovery manager scrape"), prometheus.DefaultRegisterer, sdMetrics, discovery.Name("scrape"))
|
||||
if discoveryManagerScrape == nil {
|
||||
level.Error(logger).Log("msg", "failed to create a discovery manager scrape")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
{
|
||||
discMgr := discovery.NewManager(ctxNotify, log.With(logger, "component", "discovery manager notify"), prometheus.DefaultRegisterer, sdMetrics, discovery.Name("notify"))
|
||||
if discMgr == nil {
|
||||
level.Error(logger).Log("msg", "failed to create a discovery manager notify")
|
||||
os.Exit(1)
|
||||
}
|
||||
discoveryManagerNotify = discMgr
|
||||
}
|
||||
} else {
|
||||
{
|
||||
discMgr := legacymanager.NewManager(ctxScrape, log.With(logger, "component", "discovery manager scrape"), prometheus.DefaultRegisterer, sdMetrics, legacymanager.Name("scrape"))
|
||||
if discMgr == nil {
|
||||
level.Error(logger).Log("msg", "failed to create a discovery manager scrape")
|
||||
os.Exit(1)
|
||||
}
|
||||
discoveryManagerScrape = discMgr
|
||||
}
|
||||
|
||||
{
|
||||
discMgr := legacymanager.NewManager(ctxNotify, log.With(logger, "component", "discovery manager notify"), prometheus.DefaultRegisterer, sdMetrics, legacymanager.Name("notify"))
|
||||
if discMgr == nil {
|
||||
level.Error(logger).Log("msg", "failed to create a discovery manager notify")
|
||||
os.Exit(1)
|
||||
}
|
||||
discoveryManagerNotify = discMgr
|
||||
}
|
||||
discoveryManagerNotify = discovery.NewManager(ctxNotify, log.With(logger, "component", "discovery manager notify"), prometheus.DefaultRegisterer, sdMetrics, discovery.Name("notify"))
|
||||
if discoveryManagerNotify == nil {
|
||||
level.Error(logger).Log("msg", "failed to create a discovery manager notify")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
scrapeManager, err := scrape.NewManager(
|
||||
|
@ -1869,15 +1794,6 @@ func (opts agentOptions) ToAgentOptions(outOfOrderTimeWindow int64) agent.Option
|
|||
}
|
||||
}
|
||||
|
||||
// discoveryManager interfaces the discovery manager. This is used to keep using
|
||||
// the manager that restarts SD's on reload for a few releases until we feel
|
||||
// the new manager can be enabled for all users.
|
||||
type discoveryManager interface {
|
||||
ApplyConfig(cfg map[string]discovery.Configs) error
|
||||
Run() error
|
||||
SyncCh() <-chan map[string][]*targetgroup.Group
|
||||
}
|
||||
|
||||
// rwProtoMsgFlagParser is a custom parser for config.RemoteWriteProtoMsg enum.
|
||||
type rwProtoMsgFlagParser struct {
|
||||
msgs *[]config.RemoteWriteProtoMsg
|
||||
|
|
|
@ -42,6 +42,11 @@ import (
|
|||
"github.com/prometheus/prometheus/rules"
|
||||
)
|
||||
|
||||
func init() {
|
||||
// This can be removed when the default validation scheme in common is updated.
|
||||
model.NameValidationScheme = model.UTF8Validation
|
||||
}
|
||||
|
||||
const startupTime = 10 * time.Second
|
||||
|
||||
var (
|
||||
|
@ -348,7 +353,7 @@ func getCurrentGaugeValuesFor(t *testing.T, reg prometheus.Gatherer, metricNames
|
|||
}
|
||||
|
||||
func TestAgentSuccessfulStartup(t *testing.T) {
|
||||
prom := exec.Command(promPath, "-test.main", "--enable-feature=agent", "--web.listen-address=0.0.0.0:0", "--config.file="+agentConfig)
|
||||
prom := exec.Command(promPath, "-test.main", "--agent", "--web.listen-address=0.0.0.0:0", "--config.file="+agentConfig)
|
||||
require.NoError(t, prom.Start())
|
||||
|
||||
actualExitStatus := 0
|
||||
|
@ -366,7 +371,7 @@ func TestAgentSuccessfulStartup(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestAgentFailedStartupWithServerFlag(t *testing.T) {
|
||||
prom := exec.Command(promPath, "-test.main", "--enable-feature=agent", "--storage.tsdb.path=.", "--web.listen-address=0.0.0.0:0", "--config.file="+promConfig)
|
||||
prom := exec.Command(promPath, "-test.main", "--agent", "--storage.tsdb.path=.", "--web.listen-address=0.0.0.0:0", "--config.file="+promConfig)
|
||||
|
||||
output := bytes.Buffer{}
|
||||
prom.Stderr = &output
|
||||
|
@ -393,7 +398,7 @@ func TestAgentFailedStartupWithServerFlag(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestAgentFailedStartupWithInvalidConfig(t *testing.T) {
|
||||
prom := exec.Command(promPath, "-test.main", "--enable-feature=agent", "--web.listen-address=0.0.0.0:0", "--config.file="+promConfig)
|
||||
prom := exec.Command(promPath, "-test.main", "--agent", "--web.listen-address=0.0.0.0:0", "--config.file="+promConfig)
|
||||
require.NoError(t, prom.Start())
|
||||
|
||||
actualExitStatus := 0
|
||||
|
@ -431,7 +436,7 @@ func TestModeSpecificFlags(t *testing.T) {
|
|||
args := []string{"-test.main", tc.arg, t.TempDir(), "--web.listen-address=0.0.0.0:0"}
|
||||
|
||||
if tc.mode == "agent" {
|
||||
args = append(args, "--enable-feature=agent", "--config.file="+agentConfig)
|
||||
args = append(args, "--agent", "--config.file="+agentConfig)
|
||||
} else {
|
||||
args = append(args, "--config.file="+promConfig)
|
||||
}
|
||||
|
|
|
@ -62,6 +62,11 @@ import (
|
|||
"github.com/prometheus/prometheus/util/documentcli"
|
||||
)
|
||||
|
||||
func init() {
|
||||
// This can be removed when the default validation scheme in common is updated.
|
||||
model.NameValidationScheme = model.UTF8Validation
|
||||
}
|
||||
|
||||
const (
|
||||
successExitCode = 0
|
||||
failureExitCode = 1
|
||||
|
@ -325,8 +330,6 @@ func main() {
|
|||
noDefaultScrapePort = true
|
||||
case "":
|
||||
continue
|
||||
case "promql-at-modifier", "promql-negative-offset":
|
||||
fmt.Printf(" WARNING: Option for --enable-feature is a no-op after promotion to a stable feature: %q\n", o)
|
||||
default:
|
||||
fmt.Printf(" WARNING: Unknown option for --enable-feature: %q\n", o)
|
||||
}
|
||||
|
|
|
@ -31,6 +31,7 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/prometheus/common/model"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/prometheus/prometheus/model/labels"
|
||||
|
@ -38,6 +39,11 @@ import (
|
|||
"github.com/prometheus/prometheus/promql/promqltest"
|
||||
)
|
||||
|
||||
func init() {
|
||||
// This can be removed when the default validation scheme in common is updated.
|
||||
model.NameValidationScheme = model.UTF8Validation
|
||||
}
|
||||
|
||||
var promtoolPath = os.Args[0]
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
|
|
10
cmd/promtool/testdata/unittest.yml
vendored
10
cmd/promtool/testdata/unittest.yml
vendored
|
@ -89,11 +89,11 @@ tests:
|
|||
|
||||
# Ensure lookback delta is respected, when a value is missing.
|
||||
- expr: timestamp(test_missing)
|
||||
eval_time: 5m
|
||||
eval_time: 4m59s
|
||||
exp_samples:
|
||||
- value: 0
|
||||
- expr: timestamp(test_missing)
|
||||
eval_time: 5m1s
|
||||
eval_time: 5m
|
||||
exp_samples: []
|
||||
|
||||
# Minimal test case to check edge case of a single sample.
|
||||
|
@ -113,7 +113,7 @@ tests:
|
|||
- expr: count_over_time(fixed_data[1h])
|
||||
eval_time: 1h
|
||||
exp_samples:
|
||||
- value: 61
|
||||
- value: 60
|
||||
- expr: timestamp(fixed_data)
|
||||
eval_time: 1h
|
||||
exp_samples:
|
||||
|
@ -183,7 +183,7 @@ tests:
|
|||
- expr: job:test:count_over_time1m
|
||||
eval_time: 1m
|
||||
exp_samples:
|
||||
- value: 61
|
||||
- value: 60
|
||||
labels: 'job:test:count_over_time1m{job="test"}'
|
||||
- expr: timestamp(job:test:count_over_time1m)
|
||||
eval_time: 1m10s
|
||||
|
@ -194,7 +194,7 @@ tests:
|
|||
- expr: job:test:count_over_time1m
|
||||
eval_time: 2m
|
||||
exp_samples:
|
||||
- value: 61
|
||||
- value: 60
|
||||
labels: 'job:test:count_over_time1m{job="test"}'
|
||||
- expr: timestamp(job:test:count_over_time1m)
|
||||
eval_time: 2m59s999ms
|
||||
|
|
|
@ -367,25 +367,25 @@ func printBlocks(blocks []tsdb.BlockReader, writeHeader, humanReadable bool) {
|
|||
fmt.Fprintf(tw,
|
||||
"%v\t%v\t%v\t%v\t%v\t%v\t%v\t%v\n",
|
||||
meta.ULID,
|
||||
getFormatedTime(meta.MinTime, humanReadable),
|
||||
getFormatedTime(meta.MaxTime, humanReadable),
|
||||
getFormattedTime(meta.MinTime, humanReadable),
|
||||
getFormattedTime(meta.MaxTime, humanReadable),
|
||||
time.Duration(meta.MaxTime-meta.MinTime)*time.Millisecond,
|
||||
meta.Stats.NumSamples,
|
||||
meta.Stats.NumChunks,
|
||||
meta.Stats.NumSeries,
|
||||
getFormatedBytes(b.Size(), humanReadable),
|
||||
getFormattedBytes(b.Size(), humanReadable),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
func getFormatedTime(timestamp int64, humanReadable bool) string {
|
||||
func getFormattedTime(timestamp int64, humanReadable bool) string {
|
||||
if humanReadable {
|
||||
return time.Unix(timestamp/1000, 0).UTC().String()
|
||||
}
|
||||
return strconv.FormatInt(timestamp, 10)
|
||||
}
|
||||
|
||||
func getFormatedBytes(bytes int64, humanReadable bool) string {
|
||||
func getFormattedBytes(bytes int64, humanReadable bool) string {
|
||||
if humanReadable {
|
||||
return units.Base2Bytes(bytes).String()
|
||||
}
|
||||
|
|
|
@ -784,10 +784,10 @@ func (c *ScrapeConfig) Validate(globalConfig GlobalConfig) error {
|
|||
}
|
||||
|
||||
switch globalConfig.MetricNameValidationScheme {
|
||||
case "", LegacyValidationConfig:
|
||||
case UTF8ValidationConfig:
|
||||
case LegacyValidationConfig:
|
||||
case "", UTF8ValidationConfig:
|
||||
if model.NameValidationScheme != model.UTF8Validation {
|
||||
return fmt.Errorf("utf8 name validation requested but feature not enabled via --enable-feature=utf8-names")
|
||||
panic("utf8 name validation requested but model.NameValidationScheme is not set to UTF8")
|
||||
}
|
||||
default:
|
||||
return fmt.Errorf("unknown name validation method specified, must be either 'legacy' or 'utf8', got %s", globalConfig.MetricNameValidationScheme)
|
||||
|
|
|
@ -62,6 +62,11 @@ import (
|
|||
"github.com/prometheus/prometheus/util/testutil"
|
||||
)
|
||||
|
||||
func init() {
|
||||
// This can be removed when the default validation scheme in common is updated.
|
||||
model.NameValidationScheme = model.UTF8Validation
|
||||
}
|
||||
|
||||
func mustParseURL(u string) *config.URL {
|
||||
parsed, err := url.Parse(u)
|
||||
if err != nil {
|
||||
|
@ -2084,6 +2089,10 @@ var expectedErrors = []struct {
|
|||
}
|
||||
|
||||
func TestBadConfigs(t *testing.T) {
|
||||
model.NameValidationScheme = model.LegacyValidation
|
||||
defer func() {
|
||||
model.NameValidationScheme = model.UTF8Validation
|
||||
}()
|
||||
for _, ee := range expectedErrors {
|
||||
_, err := LoadFile("testdata/"+ee.filename, false, false, log.NewNopLogger())
|
||||
require.Error(t, err, "%s", ee.filename)
|
||||
|
@ -2093,6 +2102,10 @@ func TestBadConfigs(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestBadStaticConfigsJSON(t *testing.T) {
|
||||
model.NameValidationScheme = model.LegacyValidation
|
||||
defer func() {
|
||||
model.NameValidationScheme = model.UTF8Validation
|
||||
}()
|
||||
content, err := os.ReadFile("testdata/static_config.bad.json")
|
||||
require.NoError(t, err)
|
||||
var tg targetgroup.Group
|
||||
|
@ -2101,6 +2114,10 @@ func TestBadStaticConfigsJSON(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestBadStaticConfigsYML(t *testing.T) {
|
||||
model.NameValidationScheme = model.LegacyValidation
|
||||
defer func() {
|
||||
model.NameValidationScheme = model.UTF8Validation
|
||||
}()
|
||||
content, err := os.ReadFile("testdata/static_config.bad.yml")
|
||||
require.NoError(t, err)
|
||||
var tg targetgroup.Group
|
||||
|
@ -2365,17 +2382,17 @@ func TestScrapeConfigNameValidationSettings(t *testing.T) {
|
|||
{
|
||||
name: "global setting implies local settings",
|
||||
inputFile: "scrape_config_global_validation_mode",
|
||||
expectScheme: "utf8",
|
||||
expectScheme: "legacy",
|
||||
},
|
||||
{
|
||||
name: "local setting",
|
||||
inputFile: "scrape_config_local_validation_mode",
|
||||
expectScheme: "utf8",
|
||||
expectScheme: "legacy",
|
||||
},
|
||||
{
|
||||
name: "local setting overrides global setting",
|
||||
inputFile: "scrape_config_local_global_validation_mode",
|
||||
expectScheme: "legacy",
|
||||
expectScheme: "utf8",
|
||||
},
|
||||
}
|
||||
|
||||
|
|
2
config/testdata/jobname_dup.bad.yml
vendored
2
config/testdata/jobname_dup.bad.yml
vendored
|
@ -1,4 +1,6 @@
|
|||
# Two scrape configs with the same job names are not allowed.
|
||||
global:
|
||||
metric_name_validation_scheme: legacy
|
||||
scrape_configs:
|
||||
- job_name: prometheus
|
||||
- job_name: service-x
|
||||
|
|
2
config/testdata/lowercase.bad.yml
vendored
2
config/testdata/lowercase.bad.yml
vendored
|
@ -1,3 +1,5 @@
|
|||
global:
|
||||
metric_name_validation_scheme: legacy
|
||||
scrape_configs:
|
||||
- job_name: prometheus
|
||||
relabel_configs:
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
global:
|
||||
metric_name_validation_scheme: utf8
|
||||
metric_name_validation_scheme: legacy
|
||||
scrape_configs:
|
||||
- job_name: prometheus
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
global:
|
||||
metric_name_validation_scheme: utf8
|
||||
metric_name_validation_scheme: legacy
|
||||
scrape_configs:
|
||||
- job_name: prometheus
|
||||
metric_name_validation_scheme: legacy
|
||||
metric_name_validation_scheme: utf8
|
||||
|
|
|
@ -1,3 +1,3 @@
|
|||
scrape_configs:
|
||||
- job_name: prometheus
|
||||
metric_name_validation_scheme: utf8
|
||||
metric_name_validation_scheme: legacy
|
||||
|
|
|
@ -1,82 +0,0 @@
|
|||
{{/* vim: set ft=html: */}}
|
||||
|
||||
{{/* Navbar, should be passed . */}}
|
||||
{{ define "navbar" }}
|
||||
<nav class="navbar fixed-top navbar-expand-sm navbar-dark bg-dark">
|
||||
<div class="container-fluid">
|
||||
<!-- Brand and toggle get grouped for better mobile display -->
|
||||
<div class="navbar-header">
|
||||
<button type="button" class="navbar-toggler" data-toggle="collapse" data-target="#bs-example-navbar-collapse-1" aria-expanded="false" aria-controls="navbar-nav" aria-label="toggle navigation">
|
||||
<span class="navbar-toggler-icon"></span>
|
||||
|
||||
|
||||
|
||||
|
||||
</button>
|
||||
<a class="navbar-brand" href="{{ pathPrefix }}/">Prometheus</a>
|
||||
</div>
|
||||
|
||||
<div class="collapse navbar-collapse" id="bs-example-navbar-collapse-1">
|
||||
<ul class="nav navbar-nav">
|
||||
<li class="nav-item"><a class="nav-link" href="{{ pathPrefix }}/alerts">Alerts</a></li>
|
||||
<li class="nav-item"><a class="nav-link" href="https://www.pagerduty.com/">PagerDuty</a></li>
|
||||
</ul>
|
||||
</div>
|
||||
</div>
|
||||
</nav>
|
||||
{{ end }}
|
||||
|
||||
{{/* LHS menu, should be passed . */}}
|
||||
{{ define "menu" }}
|
||||
<div class="prom_lhs_menu row">
|
||||
<nav class="col-md-2 md-block bg-dark sidebar prom_lhs_menu_nav">
|
||||
<div class="sidebar-sticky">
|
||||
<ul class="nav flex-column">
|
||||
|
||||
{{ template "_menuItem" (args . "index.html.example" "Overview") }}
|
||||
|
||||
{{ if query "up{job='node'}" }}
|
||||
{{ template "_menuItem" (args . "node.html" "Node") }}
|
||||
{{ if match "^node" .Path }}
|
||||
{{ if .Params.instance }}
|
||||
<ul>
|
||||
<li {{ if eq .Path "node-overview.html" }}class="prom_lhs_menu_selected nav-item"{{ end }}>
|
||||
<a class="nav-link" href="node-overview.html?instance={{ .Params.instance }}">{{ reReplaceAll "(.*?://)([^:/]+?)(:\\d+)?/.*" "$2" .Params.instance }}</a>
|
||||
</li>
|
||||
<ul>
|
||||
<li {{ if eq .Path "node-cpu.html" }}class="prom_lhs_menu_selected nav-item"{{ end }}>
|
||||
<a class="nav-link" href="node-cpu.html?instance={{ .Params.instance }}">CPU</a>
|
||||
</li>
|
||||
<li {{ if eq .Path "node-disk.html" }}class="prom_lhs_menu_selected nav-item"{{ end }}>
|
||||
<a class="nav-link" href="node-disk.html?instance={{ .Params.instance }}">Disk</a>
|
||||
</li>
|
||||
</ul>
|
||||
</ul>
|
||||
{{ end }}
|
||||
{{ end }}
|
||||
{{ end }}
|
||||
|
||||
{{ if query "up{job='prometheus'}" }}
|
||||
{{ template "_menuItem" (args . "prometheus.html" "Prometheus") }}
|
||||
{{ if match "^prometheus" .Path }}
|
||||
{{ if .Params.instance }}
|
||||
<ul>
|
||||
<li {{ if eq .Path "prometheus-overview.html" }}class="prom_lhs_menu_selected nav-item"{{ end }}>
|
||||
<a class="nav-link" href="prometheus-overview.html?instance={{ .Params.instance }}">{{.Params.instance }}</a>
|
||||
</li>
|
||||
</ul>
|
||||
{{ end }}
|
||||
{{ end }}
|
||||
{{ end }}
|
||||
|
||||
</ul>
|
||||
</div>
|
||||
</nav>
|
||||
</div>
|
||||
{{ end }}
|
||||
|
||||
{{/* Helper, pass (args . path name) */}}
|
||||
{{ define "_menuItem" }}
|
||||
<li {{ if eq .arg0.Path .arg1 }} class="prom_lhs_menu_selected nav-item" {{ end }}><a class="nav-link" href="{{ .arg1 }}">{{ .arg2 }}</a></li>
|
||||
{{ end }}
|
||||
|
|
@ -1,138 +0,0 @@
|
|||
{{/* vim: set ft=html: */}}
|
||||
{{/* Load Prometheus console library JS/CSS. Should go in <head> */}}
|
||||
{{ define "prom_console_head" }}
|
||||
<link type="text/css" rel="stylesheet" href="{{ pathPrefix }}/classic/static/vendor/rickshaw/rickshaw.min.css">
|
||||
<link type="text/css" rel="stylesheet" href="{{ pathPrefix }}/classic/static/vendor/bootstrap-4.5.2/css/bootstrap.min.css">
|
||||
<link type="text/css" rel="stylesheet" href="{{ pathPrefix }}/classic/static/css/prom_console.css">
|
||||
<link type="text/css" rel="stylesheet" href="{{ pathPrefix }}/classic/static/vendor/bootstrap4-glyphicons/css/bootstrap-glyphicons.min.css">
|
||||
<script src="{{ pathPrefix }}/classic/static/vendor/rickshaw/vendor/d3.v3.js"></script>
|
||||
<script src="{{ pathPrefix }}/classic/static/vendor/rickshaw/vendor/d3.layout.min.js"></script>
|
||||
<script src="{{ pathPrefix }}/classic/static/vendor/rickshaw/rickshaw.min.js"></script>
|
||||
<script src="{{ pathPrefix }}/classic/static/vendor/js/jquery-3.5.1.min.js"></script>
|
||||
<script src="{{ pathPrefix }}/classic/static/vendor/js/popper.min.js"></script>
|
||||
<script src="{{ pathPrefix }}/classic/static/vendor/bootstrap-4.5.2/js/bootstrap.min.js"></script>
|
||||
|
||||
<script>
|
||||
var PATH_PREFIX = "{{ pathPrefix }}";
|
||||
</script>
|
||||
<script src="{{ pathPrefix }}/classic/static/js/prom_console.js"></script>
|
||||
{{ end }}
|
||||
|
||||
{{/* Top of all pages. */}}
|
||||
{{ define "head" -}}
|
||||
<!doctype html>
|
||||
<html lang="en">
|
||||
<head>
|
||||
{{ template "prom_console_head" }}
|
||||
</head>
|
||||
<body>
|
||||
{{ template "navbar" . }}
|
||||
|
||||
{{ template "menu" . }}
|
||||
{{ end }}
|
||||
|
||||
{{ define "__prom_query_drilldown_noop" }}{{ . }}{{ end }}
|
||||
{{ define "humanize" }}{{ humanize . }}{{ end }}
|
||||
{{ define "humanizeNoSmallPrefix" }}{{ if and (lt . 1.0) (gt . -1.0) }}{{ printf "%.3g" . }}{{ else }}{{ humanize . }}{{ end }}{{ end }}
|
||||
{{ define "humanize1024" }}{{ humanize1024 . }}{{ end }}
|
||||
{{ define "humanizeDuration" }}{{ humanizeDuration . }}{{ end }}
|
||||
{{ define "humanizePercentage" }}{{ humanizePercentage . }}{{ end }}
|
||||
{{ define "humanizeTimestamp" }}{{ humanizeTimestamp . }}{{ end }}
|
||||
{{ define "printf.1f" }}{{ printf "%.1f" . }}{{ end }}
|
||||
{{ define "printf.3g" }}{{ printf "%.3g" . }}{{ end }}
|
||||
|
||||
{{/* prom_query_drilldown (args expr suffix? renderTemplate?)
|
||||
Displays the result of the expression, with a link to /graph for it.
|
||||
|
||||
renderTemplate is the name of the template to use to render the value.
|
||||
*/}}
|
||||
{{ define "prom_query_drilldown" }}
|
||||
{{ $expr := .arg0 }}{{ $suffix := (or .arg1 "") }}{{ $renderTemplate := (or .arg2 "__prom_query_drilldown_noop") }}
|
||||
<a class="prom_query_drilldown" href="{{ pathPrefix }}{{ graphLink $expr }}">{{ with query $expr }}{{tmpl $renderTemplate ( . | first | value )}}{{ $suffix }}{{ else }}-{{ end }}</a>
|
||||
{{ end }}
|
||||
|
||||
{{ define "prom_path" }}/consoles/{{ .Path }}?{{ range $param, $value := .Params }}{{ $param }}={{ $value }}&{{ end }}{{ end }}"
|
||||
|
||||
{{ define "prom_right_table_head" }}
|
||||
<div class="prom_console_rhs">
|
||||
<table class="table table-bordered table-hover table-sm">
|
||||
{{ end }}
|
||||
{{ define "prom_right_table_tail" }}
|
||||
</table>
|
||||
</div>
|
||||
{{ end }}
|
||||
|
||||
{{/* RHS table head, pass job name. Should be used after prom_right_table_head. */}}
|
||||
{{ define "prom_right_table_job_head" }}
|
||||
<tr>
|
||||
<th>{{ . }}</th>
|
||||
<th>{{ template "prom_query_drilldown" (args (printf "sum(up{job='%s'})" .)) }} / {{ template "prom_query_drilldown" (args (printf "count(up{job='%s'})" .)) }}</th>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>CPU</td>
|
||||
<td>{{ template "prom_query_drilldown" (args (printf "avg by(job)(irate(process_cpu_seconds_total{job='%s'}[5m]))" .) "s/s" "humanizeNoSmallPrefix") }}</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Memory</td>
|
||||
<td>{{ template "prom_query_drilldown" (args (printf "avg by(job)(process_resident_memory_bytes{job='%s'})" .) "B" "humanize1024") }}</td>
|
||||
</tr>
|
||||
{{ end }}
|
||||
|
||||
|
||||
{{ define "prom_content_head" }}
|
||||
<div class="prom_console_content">
|
||||
<div class="container-fluid">
|
||||
{{ template "prom_graph_timecontrol" . }}
|
||||
{{ end }}
|
||||
{{ define "prom_content_tail" }}
|
||||
</div>
|
||||
</div>
|
||||
{{ end }}
|
||||
|
||||
{{ define "prom_graph_timecontrol" }}
|
||||
<div class="prom_graph_timecontrol">
|
||||
<div class="prom_graph_timecontrol_inner">
|
||||
<div class="prom_graph_timecontrol_group ">
|
||||
<button class="btn btn-light pull-left" type="button" id="prom_graph_duration_shrink" title="Shrink the time range.">
|
||||
<i class="glyphicon glyphicon-minus"></i>
|
||||
</button><!-- Comments between elements to remove spaces
|
||||
--><input class="input pull-left align-middle" size="3" title="Time range of graph" type="text" id="prom_graph_duration"><!--
|
||||
--><button class="btn btn-light pull-left" type="button" id="prom_graph_duration_grow" title="Grow the time range.">
|
||||
<i class="glyphicon glyphicon-plus"></i>
|
||||
</button>
|
||||
</div>
|
||||
<div class="prom_graph_timecontrol_group ">
|
||||
<button class="btn btn-light pull-left" type="button" id="prom_graph_time_back" title="Rewind the end time.">
|
||||
<i class="glyphicon glyphicon-backward"></i>
|
||||
</button><!--
|
||||
--><input class="input pull-left align-middle" title="End time of graph" placeholder="Until" type="text" id="prom_graph_time_end" size="16" value=""><!--
|
||||
--><button class="btn btn-light pull-left" type="button" id="prom_graph_time_forward" title="Advance the end time.">
|
||||
<i class="glyphicon glyphicon-forward"></i>
|
||||
</button>
|
||||
</div>
|
||||
<div class="prom_graph_timecontrol_group ">
|
||||
<div class="btn-group dropup prom_graph_timecontrol_refresh pull-left">
|
||||
<button type="button" class="btn btn-light pull-left" id="prom_graph_refresh_button" title="Refresh.">
|
||||
<i class="glyphicon glyphicon-repeat"></i>
|
||||
<span class="icon-repeat"></span>
|
||||
(<span id="prom_graph_refresh_button_value">Off</span>)
|
||||
</button>
|
||||
<button type="button" class="btn btn-light pull-left dropdown-toggle" data-toggle="dropdown" title="Set autorefresh."aria-haspopup="true" aria-expanded="false">
|
||||
<span class="caret"></span>
|
||||
</button>
|
||||
<ul class="dropdown-menu" id="prom_graph_refresh_intervals" role="menu">
|
||||
</ul>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<script>
|
||||
new PromConsole.TimeControl();
|
||||
</script>
|
||||
</div>
|
||||
{{ end }}
|
||||
|
||||
{{/* Bottom of all pages. */}}
|
||||
{{ define "tail" }}
|
||||
</body>
|
||||
</html>
|
||||
{{ end }}
|
|
@ -1,28 +0,0 @@
|
|||
{{ template "head" . }}
|
||||
|
||||
{{ template "prom_right_table_head" }}
|
||||
{{ template "prom_right_table_tail" }}
|
||||
|
||||
{{ template "prom_content_head" . }}
|
||||
<h1>Overview</h1>
|
||||
<p>These are example consoles for Prometheus.</p>
|
||||
|
||||
<p>These consoles expect exporters to have the following job labels:</p>
|
||||
<table class="table table-sm table-striped table-bordered" style="width: 0%">
|
||||
<tr>
|
||||
<th>Exporter</th>
|
||||
<th>Job label</th>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Node Exporter</td>
|
||||
<td><code>node</code></td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Prometheus</td>
|
||||
<td><code>prometheus</code></td>
|
||||
</tr>
|
||||
</table>
|
||||
|
||||
{{ template "prom_content_tail" . }}
|
||||
|
||||
{{ template "tail" }}
|
|
@ -1,60 +0,0 @@
|
|||
{{ template "head" . }}
|
||||
|
||||
{{ template "prom_right_table_head" }}
|
||||
<tr>
|
||||
<th colspan="2">CPU(s): {{ template "prom_query_drilldown" (args (printf "scalar(count(count by (cpu)(node_cpu_seconds_total{job='node',instance='%s'})))" .Params.instance)) }}</th>
|
||||
</tr>
|
||||
{{ range printf "sum by (mode)(irate(node_cpu_seconds_total{job='node',instance='%s'}[5m])) * 100 / scalar(count(count by (cpu)(node_cpu_seconds_total{job='node',instance='%s'})))" .Params.instance .Params.instance | query | sortByLabel "mode" }}
|
||||
<tr>
|
||||
<td>{{ .Labels.mode | title }} CPU</td>
|
||||
<td>{{ .Value | printf "%.1f" }}%</td>
|
||||
</tr>
|
||||
{{ end }}
|
||||
<tr><th colspan="2">Misc</th></tr>
|
||||
<tr>
|
||||
<td>Processes Running</td>
|
||||
<td>{{ template "prom_query_drilldown" (args (printf "node_procs_running{job='node',instance='%s'}" .Params.instance) "" "humanize") }}</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Processes Blocked</td>
|
||||
<td>{{ template "prom_query_drilldown" (args (printf "node_procs_blocked{job='node',instance='%s'}" .Params.instance) "" "humanize") }}</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Forks</td>
|
||||
<td>{{ template "prom_query_drilldown" (args (printf "irate(node_forks_total{job='node',instance='%s'}[5m])" .Params.instance) "/s" "humanize") }}</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Context Switches</td>
|
||||
<td>{{ template "prom_query_drilldown" (args (printf "irate(node_context_switches_total{job='node',instance='%s'}[5m])" .Params.instance) "/s" "humanize") }}</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Interrupts</td>
|
||||
<td>{{ template "prom_query_drilldown" (args (printf "irate(node_intr_total{job='node',instance='%s'}[5m])" .Params.instance) "/s" "humanize") }}</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>1m Loadavg</td>
|
||||
<td>{{ template "prom_query_drilldown" (args (printf "node_load1{job='node',instance='%s'}" .Params.instance)) }}</td>
|
||||
</tr>
|
||||
<tr>
|
||||
</tr>
|
||||
{{ template "prom_right_table_tail" }}
|
||||
|
||||
{{ template "prom_content_head" . }}
|
||||
<h1>Node CPU - {{ reReplaceAll "(.*?://)([^:/]+?)(:\\d+)?/.*" "$2" .Params.instance }}</h1>
|
||||
|
||||
<h3>CPU Usage</h3>
|
||||
<div id="cpuGraph"></div>
|
||||
<script>
|
||||
new PromConsole.Graph({
|
||||
node: document.querySelector("#cpuGraph"),
|
||||
expr: "sum by (mode)(irate(node_cpu_seconds_total{job='node',instance='{{ .Params.instance }}',mode!='idle',mode!='iowait',mode!='steal'}[5m]))",
|
||||
renderer: 'area',
|
||||
max: {{ with printf "count(count by (cpu)(node_cpu_seconds_total{job='node',instance='%s'}))" .Params.instance | query }}{{ . | first | value }}{{ else}}undefined{{end}},
|
||||
yAxisFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
|
||||
yHoverFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
|
||||
yTitle: 'Cores'
|
||||
})
|
||||
</script>
|
||||
{{ template "prom_content_tail" . }}
|
||||
|
||||
{{ template "tail" }}
|
|
@ -1,78 +0,0 @@
|
|||
{{ template "head" . }}
|
||||
|
||||
{{ template "prom_right_table_head" }}
|
||||
<tr>
|
||||
<th colspan="2">Disks</th>
|
||||
</tr>
|
||||
{{ range printf "node_disk_io_time_seconds_total{job='node',instance='%s'}" .Params.instance | query | sortByLabel "device" }}
|
||||
<th colspan="2">{{ .Labels.device }}</th>
|
||||
<tr>
|
||||
<td>Utilization</td>
|
||||
<td>{{ template "prom_query_drilldown" (args (printf "irate(node_disk_io_time_seconds_total{job='node',instance='%s',device='%s'}[5m]) * 100" .Labels.instance .Labels.device) "%" "printf.1f") }}</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Throughput</td>
|
||||
<td>{{ template "prom_query_drilldown" (args (printf "irate(node_disk_read_bytes_total{job='node',instance='%s',device='%s'}[5m]) + irate(node_disk_written_bytes_total{job='node',instance='%s',device='%s'}[5m])" .Labels.instance .Labels.device .Labels.instance .Labels.device) "B/s" "humanize") }}</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Avg Read Time</td>
|
||||
<td>{{ template "prom_query_drilldown" (args (printf "irate(node_disk_read_time_seconds_total{job='node',instance='%s',device='%s'}[5m]) / irate(node_disk_reads_completed_total{job='node',instance='%s',device='%s'}[5m])" .Labels.instance .Labels.device .Labels.instance .Labels.device) "s" "humanize") }}</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Avg Write Time</td>
|
||||
<td>{{ template "prom_query_drilldown" (args (printf "irate(node_disk_write_time_seconds_total{job='node',instance='%s',device='%s'}[5m]) / irate(node_disk_writes_completed_total{job='node',instance='%s',device='%s'}[5m])" .Labels.instance .Labels.device .Labels.instance .Labels.device) "s" "humanize") }}</td>
|
||||
</tr>
|
||||
{{ end }}
|
||||
<tr>
|
||||
<th colspan="2">Filesystem Fullness</th>
|
||||
</tr>
|
||||
{{ define "roughlyNearZero" }}
|
||||
{{ if gt .1 . }}~0{{ else }}{{ printf "%.1f" . }}{{ end }}
|
||||
{{ end }}
|
||||
{{ range printf "node_filesystem_size_bytes{job='node',instance='%s'}" .Params.instance | query | sortByLabel "mountpoint" }}
|
||||
<tr>
|
||||
<td>{{ .Labels.mountpoint }}</td>
|
||||
<td>{{ template "prom_query_drilldown" (args (printf "100 - node_filesystem_avail_bytes{job='node',instance='%s',mountpoint='%s'} / node_filesystem_size_bytes{job='node'} * 100" .Labels.instance .Labels.mountpoint) "%" "roughlyNearZero") }}</td>
|
||||
</tr>
|
||||
{{ end }}
|
||||
<tr>
|
||||
</tr>
|
||||
{{ template "prom_right_table_tail" }}
|
||||
|
||||
{{ template "prom_content_head" . }}
|
||||
<h1>Node Disk - {{ reReplaceAll "(.*?://)([^:/]+?)(:\\d+)?/.*" "$2" .Params.instance }}</h1>
|
||||
|
||||
<h3>Disk I/O Utilization</h3>
|
||||
<div id="diskioGraph"></div>
|
||||
<script>
|
||||
new PromConsole.Graph({
|
||||
node: document.querySelector("#diskioGraph"),
|
||||
expr: [
|
||||
"irate(node_disk_io_time_seconds_total{job='node',instance='{{ .Params.instance }}',device!~'^(md\\\\d+$|dm-)'}[5m]) * 100",
|
||||
],
|
||||
min: 0,
|
||||
name: '[[ device ]]',
|
||||
yUnits: "%",
|
||||
yAxisFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
|
||||
yHoverFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
|
||||
yTitle: 'Disk I/O Utilization'
|
||||
})
|
||||
</script>
|
||||
<h3>Filesystem Usage</h3>
|
||||
<div id="fsGraph"></div>
|
||||
<script>
|
||||
new PromConsole.Graph({
|
||||
node: document.querySelector("#fsGraph"),
|
||||
expr: "100 - node_filesystem_avail_bytes{job='node',instance='{{ .Params.instance }}'} / node_filesystem_size_bytes{job='node'} * 100",
|
||||
min: 0,
|
||||
max: 100,
|
||||
name: '[[ mountpoint ]]',
|
||||
yUnits: "%",
|
||||
yAxisFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
|
||||
yHoverFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
|
||||
yTitle: 'Filesystem Fullness'
|
||||
})
|
||||
</script>
|
||||
{{ template "prom_content_tail" . }}
|
||||
|
||||
{{ template "tail" }}
|
|
@ -1,121 +0,0 @@
|
|||
{{ template "head" . }}
|
||||
|
||||
{{ template "prom_right_table_head" }}
|
||||
<tr><th colspan="2">Overview</th></tr>
|
||||
<tr>
|
||||
<td>User CPU</td>
|
||||
<td>{{ template "prom_query_drilldown" (args (printf "sum(irate(node_cpu_seconds_total{job='node',instance='%s',mode='user'}[5m])) * 100 / count(count by (cpu)(node_cpu_seconds_total{job='node',instance='%s'}))" .Params.instance .Params.instance) "%" "printf.1f") }}</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>System CPU</td>
|
||||
<td>{{ template "prom_query_drilldown" (args (printf "sum(irate(node_cpu_seconds_total{job='node',instance='%s',mode='system'}[5m])) * 100 / count(count by (cpu)(node_cpu_seconds_total{job='node',instance='%s'}))" .Params.instance .Params.instance) "%" "printf.1f") }}</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Memory Total</td>
|
||||
<td>{{ template "prom_query_drilldown" (args (printf "node_memory_MemTotal_bytes{job='node',instance='%s'}" .Params.instance) "B" "humanize1024") }}</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Memory Free</td>
|
||||
<td>{{ template "prom_query_drilldown" (args (printf "node_memory_MemFree_bytes{job='node',instance='%s'}" .Params.instance) "B" "humanize1024") }}</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<th colspan="2">Network</th>
|
||||
</tr>
|
||||
{{ range printf "node_network_receive_bytes_total{job='node',instance='%s',device!='lo'}" .Params.instance | query | sortByLabel "device" }}
|
||||
<tr>
|
||||
<td>{{ .Labels.device }} Received</td>
|
||||
<td>{{ template "prom_query_drilldown" (args (printf "irate(node_network_receive_bytes_total{job='node',instance='%s',device='%s'}[5m])" .Labels.instance .Labels.device) "B/s" "humanize") }}</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>{{ .Labels.device }} Transmitted</td>
|
||||
<td>{{ template "prom_query_drilldown" (args (printf "irate(node_network_transmit_bytes_total{job='node',instance='%s',device='%s'}[5m])" .Labels.instance .Labels.device) "B/s" "humanize") }}</td>
|
||||
</tr>
|
||||
{{ end }}
|
||||
<tr>
|
||||
<th colspan="2">Disks</th>
|
||||
</tr>
|
||||
{{ range printf "node_disk_io_time_seconds_total{job='node',instance='%s',device!~'^(md\\\\d+$|dm-)'}" .Params.instance | query | sortByLabel "device" }}
|
||||
<tr>
|
||||
<td>{{ .Labels.device }} Utilization</td>
|
||||
<td>{{ template "prom_query_drilldown" (args (printf "irate(node_disk_io_time_seconds_total{job='node',instance='%s',device='%s'}[5m]) * 100" .Labels.instance .Labels.device) "%" "printf.1f") }}</td>
|
||||
</tr>
|
||||
{{ end }}
|
||||
{{ range printf "node_disk_io_time_seconds_total{job='node',instance='%s'}" .Params.instance | query | sortByLabel "device" }}
|
||||
<tr>
|
||||
<td>{{ .Labels.device }} Throughput</td>
|
||||
<td>{{ template "prom_query_drilldown" (args (printf "irate(node_disk_read_bytes_total{job='node',instance='%s',device='%s'}[5m]) + irate(node_disk_written_bytes_total{job='node',instance='%s',device='%s'}[5m])" .Labels.instance .Labels.device .Labels.instance .Labels.device) "B/s" "humanize") }}</td>
|
||||
</tr>
|
||||
{{ end }}
|
||||
<tr>
|
||||
<th colspan="2">Filesystem Fullness</th>
|
||||
</tr>
|
||||
{{ define "roughlyNearZero" }}
|
||||
{{ if gt .1 . }}~0{{ else }}{{ printf "%.1f" . }}{{ end }}
|
||||
{{ end }}
|
||||
{{ range printf "node_filesystem_size_bytes{job='node',instance='%s'}" .Params.instance | query | sortByLabel "mountpoint" }}
|
||||
<tr>
|
||||
<td>{{ .Labels.mountpoint }}</td>
|
||||
<td>{{ template "prom_query_drilldown" (args (printf "100 - node_filesystem_avail_bytes{job='node',instance='%s',mountpoint='%s'} / node_filesystem_size_bytes{job='node'} * 100" .Labels.instance .Labels.mountpoint) "%" "roughlyNearZero") }}</td>
|
||||
</tr>
|
||||
{{ end }}
|
||||
|
||||
{{ template "prom_right_table_tail" }}
|
||||
|
||||
{{ template "prom_content_head" . }}
|
||||
<h1>Node Overview - {{ reReplaceAll "(.*?://)([^:/]+?)(:\\d+)?/.*" "$2" .Params.instance }}</h1>
|
||||
|
||||
<h3>CPU Usage</h3>
|
||||
<div id="cpuGraph"></div>
|
||||
<script>
|
||||
new PromConsole.Graph({
|
||||
node: document.querySelector("#cpuGraph"),
|
||||
expr: "sum by (mode)(irate(node_cpu_seconds_total{job='node',instance='{{ .Params.instance }}',mode!='idle',mode!='iowait',mode!='steal'}[5m]))",
|
||||
renderer: 'area',
|
||||
max: {{ with printf "count(count by (cpu)(node_cpu_seconds_total{job='node',instance='%s'}))" .Params.instance | query }}{{ . | first | value }}{{ else}}undefined{{end}},
|
||||
yAxisFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
|
||||
yHoverFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
|
||||
yTitle: 'Cores'
|
||||
})
|
||||
</script>
|
||||
|
||||
<h3>Disk I/O Utilization</h3>
|
||||
<div id="diskioGraph"></div>
|
||||
<script>
|
||||
new PromConsole.Graph({
|
||||
node: document.querySelector("#diskioGraph"),
|
||||
expr: [
|
||||
"irate(node_disk_io_time_seconds_total{job='node',instance='{{ .Params.instance }}',device!~'^(md\\\\d+$|dm-)'}[5m]) * 100",
|
||||
],
|
||||
min: 0,
|
||||
name: '[[ device ]]',
|
||||
yAxisFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
|
||||
yHoverFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
|
||||
yUnits: "%",
|
||||
yTitle: 'Disk I/O Utilization'
|
||||
})
|
||||
</script>
|
||||
|
||||
<h3>Memory</h3>
|
||||
<div id="memoryGraph"></div>
|
||||
<script>
|
||||
new PromConsole.Graph({
|
||||
node: document.querySelector("#memoryGraph"),
|
||||
renderer: 'area',
|
||||
expr: [
|
||||
"node_memory_Cached_bytes{job='node',instance='{{ .Params.instance }}'}",
|
||||
"node_memory_Buffers_bytes{job='node',instance='{{ .Params.instance }}'}",
|
||||
"node_memory_MemTotal_bytes{job='node',instance='{{ .Params.instance }}'} - node_memory_MemFree_bytes{job='node',instance='{{.Params.instance}}'} - node_memory_Buffers_bytes{job='node',instance='{{.Params.instance}}'} - node_memory_Cached_bytes{job='node',instance='{{.Params.instance}}'}",
|
||||
"node_memory_MemFree{job='node',instance='{{ .Params.instance }}'}",
|
||||
],
|
||||
name: ["Cached", "Buffers", "Used", "Free"],
|
||||
min: 0,
|
||||
yUnits: "B",
|
||||
yAxisFormatter: PromConsole.NumberFormatter.humanize1024,
|
||||
yHoverFormatter: PromConsole.NumberFormatter.humanize1024,
|
||||
yTitle: 'Memory'
|
||||
})
|
||||
</script>
|
||||
|
||||
{{ template "prom_content_tail" . }}
|
||||
|
||||
{{ template "tail" }}
|
|
@ -1,35 +0,0 @@
|
|||
{{ template "head" . }}
|
||||
|
||||
{{ template "prom_right_table_head" }}
|
||||
<tr>
|
||||
<th>Node</th>
|
||||
<th>{{ template "prom_query_drilldown" (args "sum(up{job='node'})") }} / {{ template "prom_query_drilldown" (args "count(up{job='node'})") }}</th>
|
||||
</tr>
|
||||
{{ template "prom_right_table_tail" }}
|
||||
|
||||
{{ template "prom_content_head" . }}
|
||||
<h1>Node</h1>
|
||||
|
||||
<table class="table table-condensed table-striped table-bordered" style="width: 0%">
|
||||
<tr>
|
||||
<th>Node</th>
|
||||
<th>Up</th>
|
||||
<th>CPU<br/>Used</th>
|
||||
<th>Memory<br/> Available</th>
|
||||
</tr>
|
||||
{{ range query "up{job='node'}" | sortByLabel "instance" }}
|
||||
<tr>
|
||||
<td><a href="node-overview.html?instance={{ .Labels.instance }}">{{ reReplaceAll "(.*?://)([^:/]+?)(:\\d+)?/.*" "$2" .Labels.instance }}</a></td>
|
||||
<td{{ if eq (. | value) 1.0 }}>Yes{{ else }} class="alert-danger">No{{ end }}</td>
|
||||
<td>{{ template "prom_query_drilldown" (args (printf "100 * (1 - avg by(instance) (sum without(mode) (irate(node_cpu_seconds_total{job='node',mode=~'idle|iowait|steal',instance='%s'}[5m]))))" .Labels.instance) "%" "printf.1f") }}</td>
|
||||
<td>{{ template "prom_query_drilldown" (args (printf "node_memory_MemFree_bytes{job='node',instance='%s'} + node_memory_Cached_bytes{job='node',instance='%s'} + node_memory_Buffers_bytes{job='node',instance='%s'}" .Labels.instance .Labels.instance .Labels.instance) "B" "humanize1024") }}</td>
|
||||
</tr>
|
||||
{{ else }}
|
||||
<tr><td colspan=4>No nodes found.</td></tr>
|
||||
{{ end }}
|
||||
</table>
|
||||
|
||||
|
||||
{{ template "prom_content_tail" . }}
|
||||
|
||||
{{ template "tail" }}
|
|
@ -1,96 +0,0 @@
|
|||
{{ template "head" . }}
|
||||
|
||||
{{ template "prom_right_table_head" }}
|
||||
<tr>
|
||||
<th colspan="2">Overview</th>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>CPU</td>
|
||||
<td>{{ template "prom_query_drilldown" (args (printf "irate(process_cpu_seconds_total{job='prometheus',instance='%s'}[5m])" .Params.instance) "s/s" "humanizeNoSmallPrefix") }}</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Memory</td>
|
||||
<td>{{ template "prom_query_drilldown" (args (printf "process_resident_memory_bytes{job='prometheus',instance='%s'}" .Params.instance) "B" "humanize1024") }}</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Version</td>
|
||||
<td>{{ with query (printf "prometheus_build_info{job='prometheus',instance='%s'}" .Params.instance) }}{{. | first | label "version"}}{{end}}</td>
|
||||
</tr>
|
||||
|
||||
<tr>
|
||||
<th colspan="2">Storage</th>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Ingested Samples</td>
|
||||
<td>{{ template "prom_query_drilldown" (args (printf "irate(prometheus_tsdb_head_samples_appended_total{job='prometheus',instance='%s'}[5m])" .Params.instance) "/s" "humanizeNoSmallPrefix") }}</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Head Series</td>
|
||||
<td>{{ template "prom_query_drilldown" (args (printf "prometheus_tsdb_head_series{job='prometheus',instance='%s'}" .Params.instance) "" "humanize") }}</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Blocks Loaded</td>
|
||||
<td>{{ template "prom_query_drilldown" (args (printf "prometheus_tsdb_blocks_loaded{job='prometheus',instance='%s'}" .Params.instance) "" "humanize") }}</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<th colspan="2">Rules</th>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Evaluation Duration</td>
|
||||
<td>{{ template "prom_query_drilldown" (args (printf "irate(prometheus_evaluator_duration_seconds_sum{job='prometheus',instance='%s'}[5m]) / irate(prometheus_evaluator_duration_seconds_count{job='prometheus',instance='%s'}[5m])" .Params.instance .Params.instance) "" "humanizeDuration") }}</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Notification Latency</td>
|
||||
<td>{{ template "prom_query_drilldown" (args (printf "irate(prometheus_notifications_latency_seconds_sum{job='prometheus',instance='%s'}[5m]) / irate(prometheus_notifications_latency_seconds_count{job='prometheus',instance='%s'}[5m])" .Params.instance .Params.instance) "" "humanizeDuration") }}</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Notification Queue</td>
|
||||
<td>{{ template "prom_query_drilldown" (args (printf "prometheus_notifications_queue_length{job='prometheus',instance='%s'}" .Params.instance) "" "humanize") }}</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<th colspan="2">HTTP Server</th>
|
||||
</tr>
|
||||
{{ range printf "prometheus_http_request_duration_seconds_count{job='prometheus',instance='%s'}" .Params.instance | query | sortByLabel "handler" }}
|
||||
<tr>
|
||||
<td>{{ .Labels.handler }}</td>
|
||||
<td>{{ template "prom_query_drilldown" (args (printf "irate(prometheus_http_request_duration_seconds_count{job='prometheus',instance='%s',handler='%s'}[5m])" .Labels.instance .Labels.handler) "/s" "humanizeNoSmallPrefix") }}</td>
|
||||
</tr>
|
||||
{{ end }}
|
||||
|
||||
{{ template "prom_right_table_tail" }}
|
||||
|
||||
{{ template "prom_content_head" . }}
|
||||
<div class="prom_content_div">
|
||||
<h1>Prometheus Overview - {{ .Params.instance }}</h1>
|
||||
|
||||
<h3>Ingested Samples</h3>
|
||||
<div id="samplesGraph"></div>
|
||||
<script>
|
||||
new PromConsole.Graph({
|
||||
node: document.querySelector("#samplesGraph"),
|
||||
expr: "irate(prometheus_tsdb_head_samples_appended_total{job='prometheus',instance='{{ .Params.instance }}'}[5m])",
|
||||
name: 'Ingested Samples',
|
||||
yAxisFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
|
||||
yHoverFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
|
||||
yTitle: "Samples",
|
||||
yUnits: "/s",
|
||||
})
|
||||
</script>
|
||||
|
||||
<h3>HTTP Server</h3>
|
||||
<div id="serverGraph"></div>
|
||||
<script>
|
||||
new PromConsole.Graph({
|
||||
node: document.querySelector("#serverGraph"),
|
||||
expr: "irate(prometheus_http_request_duration_seconds_count{job='prometheus',instance='{{ .Params.instance }}'}[5m])",
|
||||
name: '[[handler]]',
|
||||
yAxisFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
|
||||
yHoverFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
|
||||
yTitle: "Requests",
|
||||
yUnits: "/s",
|
||||
})
|
||||
</script>
|
||||
</div>
|
||||
{{ template "prom_content_tail" . }}
|
||||
|
||||
{{ template "tail" }}
|
|
@ -1,34 +0,0 @@
|
|||
{{ template "head" . }}
|
||||
|
||||
{{ template "prom_right_table_head" }}
|
||||
<tr>
|
||||
<th>Prometheus</th>
|
||||
<th>{{ template "prom_query_drilldown" (args "sum(up{job='prometheus'})") }} / {{ template "prom_query_drilldown" (args "count(up{job='prometheus'})") }}</th>
|
||||
</tr>
|
||||
{{ template "prom_right_table_tail" }}
|
||||
|
||||
{{ template "prom_content_head" . }}
|
||||
<h1>Prometheus</h1>
|
||||
|
||||
<table class="table table-sm table-striped table-bordered" style="width: 0%">
|
||||
<tr>
|
||||
<th>Prometheus</th>
|
||||
<th>Up</th>
|
||||
<th>Ingested Samples</th>
|
||||
<th>Memory</th>
|
||||
</tr>
|
||||
{{ range query "up{job='prometheus'}" | sortByLabel "instance" }}
|
||||
<tr>
|
||||
<td><a href="prometheus-overview.html?instance={{ .Labels.instance }}">{{ .Labels.instance }}</a></td>
|
||||
<td {{ if eq (. | value) 1.0 }}>Yes{{ else }} class="alert-danger">No{{ end }}</td>
|
||||
<td class="text-right">{{ template "prom_query_drilldown" (args (printf "irate(prometheus_tsdb_head_samples_appended_total{job='prometheus',instance='%s'}[5m])" .Labels.instance) "/s" "humanizeNoSmallPrefix") }}</td>
|
||||
<td class="text-right">{{ template "prom_query_drilldown" (args (printf "process_resident_memory_bytes{job='prometheus',instance='%s'}" .Labels.instance) "B" "humanize1024")}}</td>
|
||||
</tr>
|
||||
{{ else }}
|
||||
<tr><td colspan=4>No devices found.</td></tr>
|
||||
{{ end }}
|
||||
</table>
|
||||
|
||||
{{ template "prom_content_tail" . }}
|
||||
|
||||
{{ template "tail" }}
|
|
@ -26,7 +26,6 @@ import (
|
|||
"github.com/prometheus/common/model"
|
||||
apiv1 "k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/discovery/v1"
|
||||
"k8s.io/api/discovery/v1beta1"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/client-go/util/workqueue"
|
||||
|
||||
|
@ -103,9 +102,9 @@ func NewEndpointSlice(l log.Logger, eps cache.SharedIndexInformer, svc, pod, nod
|
|||
return
|
||||
}
|
||||
|
||||
// TODO(brancz): use cache.Indexer to index endpoints by
|
||||
// disv1beta1.LabelServiceName so this operation doesn't have to
|
||||
// iterate over all endpoint objects.
|
||||
// TODO(brancz): use cache.Indexer to index endpointslices by
|
||||
// LabelServiceName so this operation doesn't have to iterate over all
|
||||
// endpoint objects.
|
||||
for _, obj := range e.endpointSliceStore.List() {
|
||||
esa, err := e.getEndpointSliceAdaptor(obj)
|
||||
if err != nil {
|
||||
|
@ -241,8 +240,6 @@ func (e *EndpointSlice) getEndpointSliceAdaptor(o interface{}) (endpointSliceAda
|
|||
switch endpointSlice := o.(type) {
|
||||
case *v1.EndpointSlice:
|
||||
return newEndpointSliceAdaptorFromV1(endpointSlice), nil
|
||||
case *v1beta1.EndpointSlice:
|
||||
return newEndpointSliceAdaptorFromV1beta1(endpointSlice), nil
|
||||
default:
|
||||
return nil, fmt.Errorf("received unexpected object: %v", o)
|
||||
}
|
||||
|
|
|
@ -16,7 +16,6 @@ package kubernetes
|
|||
import (
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/discovery/v1"
|
||||
"k8s.io/api/discovery/v1beta1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
|
@ -109,59 +108,6 @@ func (e *endpointSliceAdaptorV1) labelServiceName() string {
|
|||
return v1.LabelServiceName
|
||||
}
|
||||
|
||||
// Adaptor for k8s.io/api/discovery/v1beta1.
|
||||
type endpointSliceAdaptorV1Beta1 struct {
|
||||
endpointSlice *v1beta1.EndpointSlice
|
||||
}
|
||||
|
||||
func newEndpointSliceAdaptorFromV1beta1(endpointSlice *v1beta1.EndpointSlice) endpointSliceAdaptor {
|
||||
return &endpointSliceAdaptorV1Beta1{endpointSlice: endpointSlice}
|
||||
}
|
||||
|
||||
func (e *endpointSliceAdaptorV1Beta1) get() interface{} {
|
||||
return e.endpointSlice
|
||||
}
|
||||
|
||||
func (e *endpointSliceAdaptorV1Beta1) getObjectMeta() metav1.ObjectMeta {
|
||||
return e.endpointSlice.ObjectMeta
|
||||
}
|
||||
|
||||
func (e *endpointSliceAdaptorV1Beta1) name() string {
|
||||
return e.endpointSlice.Name
|
||||
}
|
||||
|
||||
func (e *endpointSliceAdaptorV1Beta1) namespace() string {
|
||||
return e.endpointSlice.Namespace
|
||||
}
|
||||
|
||||
func (e *endpointSliceAdaptorV1Beta1) addressType() string {
|
||||
return string(e.endpointSlice.AddressType)
|
||||
}
|
||||
|
||||
func (e *endpointSliceAdaptorV1Beta1) endpoints() []endpointSliceEndpointAdaptor {
|
||||
eps := make([]endpointSliceEndpointAdaptor, 0, len(e.endpointSlice.Endpoints))
|
||||
for i := 0; i < len(e.endpointSlice.Endpoints); i++ {
|
||||
eps = append(eps, newEndpointSliceEndpointAdaptorFromV1beta1(e.endpointSlice.Endpoints[i]))
|
||||
}
|
||||
return eps
|
||||
}
|
||||
|
||||
func (e *endpointSliceAdaptorV1Beta1) ports() []endpointSlicePortAdaptor {
|
||||
ports := make([]endpointSlicePortAdaptor, 0, len(e.endpointSlice.Ports))
|
||||
for i := 0; i < len(e.endpointSlice.Ports); i++ {
|
||||
ports = append(ports, newEndpointSlicePortAdaptorFromV1beta1(e.endpointSlice.Ports[i]))
|
||||
}
|
||||
return ports
|
||||
}
|
||||
|
||||
func (e *endpointSliceAdaptorV1Beta1) labels() map[string]string {
|
||||
return e.endpointSlice.Labels
|
||||
}
|
||||
|
||||
func (e *endpointSliceAdaptorV1Beta1) labelServiceName() string {
|
||||
return v1beta1.LabelServiceName
|
||||
}
|
||||
|
||||
type endpointSliceEndpointAdaptorV1 struct {
|
||||
endpoint v1.Endpoint
|
||||
}
|
||||
|
@ -218,62 +164,6 @@ func (e *endpointSliceEndpointConditionsAdaptorV1) terminating() *bool {
|
|||
return e.endpointConditions.Terminating
|
||||
}
|
||||
|
||||
type endpointSliceEndpointAdaptorV1beta1 struct {
|
||||
endpoint v1beta1.Endpoint
|
||||
}
|
||||
|
||||
func newEndpointSliceEndpointAdaptorFromV1beta1(endpoint v1beta1.Endpoint) endpointSliceEndpointAdaptor {
|
||||
return &endpointSliceEndpointAdaptorV1beta1{endpoint: endpoint}
|
||||
}
|
||||
|
||||
func (e *endpointSliceEndpointAdaptorV1beta1) addresses() []string {
|
||||
return e.endpoint.Addresses
|
||||
}
|
||||
|
||||
func (e *endpointSliceEndpointAdaptorV1beta1) hostname() *string {
|
||||
return e.endpoint.Hostname
|
||||
}
|
||||
|
||||
func (e *endpointSliceEndpointAdaptorV1beta1) nodename() *string {
|
||||
return e.endpoint.NodeName
|
||||
}
|
||||
|
||||
func (e *endpointSliceEndpointAdaptorV1beta1) zone() *string {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (e *endpointSliceEndpointAdaptorV1beta1) conditions() endpointSliceEndpointConditionsAdaptor {
|
||||
return newEndpointSliceEndpointConditionsAdaptorFromV1beta1(e.endpoint.Conditions)
|
||||
}
|
||||
|
||||
func (e *endpointSliceEndpointAdaptorV1beta1) targetRef() *corev1.ObjectReference {
|
||||
return e.endpoint.TargetRef
|
||||
}
|
||||
|
||||
func (e *endpointSliceEndpointAdaptorV1beta1) topology() map[string]string {
|
||||
return e.endpoint.Topology
|
||||
}
|
||||
|
||||
type endpointSliceEndpointConditionsAdaptorV1beta1 struct {
|
||||
endpointConditions v1beta1.EndpointConditions
|
||||
}
|
||||
|
||||
func newEndpointSliceEndpointConditionsAdaptorFromV1beta1(endpointConditions v1beta1.EndpointConditions) endpointSliceEndpointConditionsAdaptor {
|
||||
return &endpointSliceEndpointConditionsAdaptorV1beta1{endpointConditions: endpointConditions}
|
||||
}
|
||||
|
||||
func (e *endpointSliceEndpointConditionsAdaptorV1beta1) ready() *bool {
|
||||
return e.endpointConditions.Ready
|
||||
}
|
||||
|
||||
func (e *endpointSliceEndpointConditionsAdaptorV1beta1) serving() *bool {
|
||||
return e.endpointConditions.Serving
|
||||
}
|
||||
|
||||
func (e *endpointSliceEndpointConditionsAdaptorV1beta1) terminating() *bool {
|
||||
return e.endpointConditions.Terminating
|
||||
}
|
||||
|
||||
type endpointSlicePortAdaptorV1 struct {
|
||||
endpointPort v1.EndpointPort
|
||||
}
|
||||
|
@ -298,28 +188,3 @@ func (e *endpointSlicePortAdaptorV1) protocol() *string {
|
|||
func (e *endpointSlicePortAdaptorV1) appProtocol() *string {
|
||||
return e.endpointPort.AppProtocol
|
||||
}
|
||||
|
||||
type endpointSlicePortAdaptorV1beta1 struct {
|
||||
endpointPort v1beta1.EndpointPort
|
||||
}
|
||||
|
||||
func newEndpointSlicePortAdaptorFromV1beta1(port v1beta1.EndpointPort) endpointSlicePortAdaptor {
|
||||
return &endpointSlicePortAdaptorV1beta1{endpointPort: port}
|
||||
}
|
||||
|
||||
func (e *endpointSlicePortAdaptorV1beta1) name() *string {
|
||||
return e.endpointPort.Name
|
||||
}
|
||||
|
||||
func (e *endpointSlicePortAdaptorV1beta1) port() *int32 {
|
||||
return e.endpointPort.Port
|
||||
}
|
||||
|
||||
func (e *endpointSlicePortAdaptorV1beta1) protocol() *string {
|
||||
val := string(*e.endpointPort.Protocol)
|
||||
return &val
|
||||
}
|
||||
|
||||
func (e *endpointSlicePortAdaptorV1beta1) appProtocol() *string {
|
||||
return e.endpointPort.AppProtocol
|
||||
}
|
||||
|
|
|
@ -18,7 +18,6 @@ import (
|
|||
|
||||
"github.com/stretchr/testify/require"
|
||||
v1 "k8s.io/api/discovery/v1"
|
||||
"k8s.io/api/discovery/v1beta1"
|
||||
)
|
||||
|
||||
func Test_EndpointSliceAdaptor_v1(t *testing.T) {
|
||||
|
@ -48,31 +47,3 @@ func Test_EndpointSliceAdaptor_v1(t *testing.T) {
|
|||
require.Equal(t, endpointSlice.Ports[i].AppProtocol, portAdaptor.appProtocol())
|
||||
}
|
||||
}
|
||||
|
||||
func Test_EndpointSliceAdaptor_v1beta1(t *testing.T) {
|
||||
endpointSlice := makeEndpointSliceV1beta1()
|
||||
adaptor := newEndpointSliceAdaptorFromV1beta1(endpointSlice)
|
||||
|
||||
require.Equal(t, endpointSlice.ObjectMeta.Name, adaptor.name())
|
||||
require.Equal(t, endpointSlice.ObjectMeta.Namespace, adaptor.namespace())
|
||||
require.Equal(t, endpointSlice.AddressType, v1beta1.AddressType(adaptor.addressType()))
|
||||
require.Equal(t, endpointSlice.Labels, adaptor.labels())
|
||||
require.Equal(t, "testendpoints", endpointSlice.Labels[v1beta1.LabelServiceName])
|
||||
|
||||
for i, endpointAdaptor := range adaptor.endpoints() {
|
||||
require.Equal(t, endpointSlice.Endpoints[i].Addresses, endpointAdaptor.addresses())
|
||||
require.Equal(t, endpointSlice.Endpoints[i].Hostname, endpointAdaptor.hostname())
|
||||
require.Equal(t, endpointSlice.Endpoints[i].Conditions.Ready, endpointAdaptor.conditions().ready())
|
||||
require.Equal(t, endpointSlice.Endpoints[i].Conditions.Serving, endpointAdaptor.conditions().serving())
|
||||
require.Equal(t, endpointSlice.Endpoints[i].Conditions.Terminating, endpointAdaptor.conditions().terminating())
|
||||
require.Equal(t, endpointSlice.Endpoints[i].TargetRef, endpointAdaptor.targetRef())
|
||||
require.Equal(t, endpointSlice.Endpoints[i].Topology, endpointAdaptor.topology())
|
||||
}
|
||||
|
||||
for i, portAdaptor := range adaptor.ports() {
|
||||
require.Equal(t, endpointSlice.Ports[i].Name, portAdaptor.name())
|
||||
require.Equal(t, endpointSlice.Ports[i].Port, portAdaptor.port())
|
||||
require.EqualValues(t, endpointSlice.Ports[i].Protocol, portAdaptor.protocol())
|
||||
require.Equal(t, endpointSlice.Ports[i].AppProtocol, portAdaptor.appProtocol())
|
||||
}
|
||||
}
|
||||
|
|
|
@ -21,7 +21,6 @@ import (
|
|||
"github.com/stretchr/testify/require"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/discovery/v1"
|
||||
"k8s.io/api/discovery/v1beta1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
|
@ -114,62 +113,8 @@ func makeEndpointSliceV1() *v1.EndpointSlice {
|
|||
}
|
||||
}
|
||||
|
||||
func makeEndpointSliceV1beta1() *v1beta1.EndpointSlice {
|
||||
return &v1beta1.EndpointSlice{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "testendpoints",
|
||||
Namespace: "default",
|
||||
Labels: map[string]string{
|
||||
v1beta1.LabelServiceName: "testendpoints",
|
||||
},
|
||||
Annotations: map[string]string{
|
||||
"test.annotation": "test",
|
||||
},
|
||||
},
|
||||
AddressType: v1beta1.AddressTypeIPv4,
|
||||
Ports: []v1beta1.EndpointPort{
|
||||
{
|
||||
Name: strptr("testport"),
|
||||
Port: int32ptr(9000),
|
||||
Protocol: protocolptr(corev1.ProtocolTCP),
|
||||
},
|
||||
},
|
||||
Endpoints: []v1beta1.Endpoint{
|
||||
{
|
||||
Addresses: []string{"1.2.3.4"},
|
||||
Hostname: strptr("testendpoint1"),
|
||||
}, {
|
||||
Addresses: []string{"2.3.4.5"},
|
||||
Conditions: v1beta1.EndpointConditions{
|
||||
Ready: boolptr(true),
|
||||
Serving: boolptr(true),
|
||||
Terminating: boolptr(false),
|
||||
},
|
||||
}, {
|
||||
Addresses: []string{"3.4.5.6"},
|
||||
Conditions: v1beta1.EndpointConditions{
|
||||
Ready: boolptr(false),
|
||||
Serving: boolptr(true),
|
||||
Terminating: boolptr(true),
|
||||
},
|
||||
}, {
|
||||
Addresses: []string{"4.5.6.7"},
|
||||
Conditions: v1beta1.EndpointConditions{
|
||||
Ready: boolptr(true),
|
||||
Serving: boolptr(true),
|
||||
Terminating: boolptr(false),
|
||||
},
|
||||
TargetRef: &corev1.ObjectReference{
|
||||
Kind: "Node",
|
||||
Name: "barbaz",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func TestEndpointSliceDiscoveryBeforeRun(t *testing.T) {
|
||||
n, c := makeDiscoveryWithVersion(RoleEndpointSlice, NamespaceDiscovery{Names: []string{"default"}}, "v1.25.0")
|
||||
n, c := makeDiscovery(RoleEndpointSlice, NamespaceDiscovery{Names: []string{"default"}})
|
||||
|
||||
k8sDiscoveryTest{
|
||||
discovery: n,
|
||||
|
@ -249,71 +194,6 @@ func TestEndpointSliceDiscoveryBeforeRun(t *testing.T) {
|
|||
}.Run(t)
|
||||
}
|
||||
|
||||
func TestEndpointSliceDiscoveryBeforeRunV1beta1(t *testing.T) {
|
||||
n, c := makeDiscoveryWithVersion(RoleEndpointSlice, NamespaceDiscovery{Names: []string{"default"}}, "1.20.0")
|
||||
|
||||
k8sDiscoveryTest{
|
||||
discovery: n,
|
||||
beforeRun: func() {
|
||||
obj := makeEndpointSliceV1beta1()
|
||||
c.DiscoveryV1beta1().EndpointSlices(obj.Namespace).Create(context.Background(), obj, metav1.CreateOptions{})
|
||||
},
|
||||
expectedMaxItems: 1,
|
||||
expectedRes: map[string]*targetgroup.Group{
|
||||
"endpointslice/default/testendpoints": {
|
||||
Targets: []model.LabelSet{
|
||||
{
|
||||
"__address__": "1.2.3.4:9000",
|
||||
"__meta_kubernetes_endpointslice_endpoint_hostname": "testendpoint1",
|
||||
"__meta_kubernetes_endpointslice_port": "9000",
|
||||
"__meta_kubernetes_endpointslice_port_name": "testport",
|
||||
"__meta_kubernetes_endpointslice_port_protocol": "TCP",
|
||||
},
|
||||
{
|
||||
"__address__": "2.3.4.5:9000",
|
||||
"__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true",
|
||||
"__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true",
|
||||
"__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false",
|
||||
"__meta_kubernetes_endpointslice_port": "9000",
|
||||
"__meta_kubernetes_endpointslice_port_name": "testport",
|
||||
"__meta_kubernetes_endpointslice_port_protocol": "TCP",
|
||||
},
|
||||
{
|
||||
"__address__": "3.4.5.6:9000",
|
||||
"__meta_kubernetes_endpointslice_endpoint_conditions_ready": "false",
|
||||
"__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true",
|
||||
"__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "true",
|
||||
"__meta_kubernetes_endpointslice_port": "9000",
|
||||
"__meta_kubernetes_endpointslice_port_name": "testport",
|
||||
"__meta_kubernetes_endpointslice_port_protocol": "TCP",
|
||||
},
|
||||
{
|
||||
"__address__": "4.5.6.7:9000",
|
||||
"__meta_kubernetes_endpointslice_address_target_kind": "Node",
|
||||
"__meta_kubernetes_endpointslice_address_target_name": "barbaz",
|
||||
"__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true",
|
||||
"__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true",
|
||||
"__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false",
|
||||
"__meta_kubernetes_endpointslice_port": "9000",
|
||||
"__meta_kubernetes_endpointslice_port_name": "testport",
|
||||
"__meta_kubernetes_endpointslice_port_protocol": "TCP",
|
||||
},
|
||||
},
|
||||
Labels: model.LabelSet{
|
||||
"__meta_kubernetes_endpointslice_address_type": "IPv4",
|
||||
"__meta_kubernetes_namespace": "default",
|
||||
"__meta_kubernetes_endpointslice_name": "testendpoints",
|
||||
"__meta_kubernetes_endpointslice_label_kubernetes_io_service_name": "testendpoints",
|
||||
"__meta_kubernetes_endpointslice_labelpresent_kubernetes_io_service_name": "true",
|
||||
"__meta_kubernetes_endpointslice_annotation_test_annotation": "test",
|
||||
"__meta_kubernetes_endpointslice_annotationpresent_test_annotation": "true",
|
||||
},
|
||||
Source: "endpointslice/default/testendpoints",
|
||||
},
|
||||
},
|
||||
}.Run(t)
|
||||
}
|
||||
|
||||
func TestEndpointSliceDiscoveryAdd(t *testing.T) {
|
||||
obj := &corev1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
|
@ -353,25 +233,25 @@ func TestEndpointSliceDiscoveryAdd(t *testing.T) {
|
|||
PodIP: "1.2.3.4",
|
||||
},
|
||||
}
|
||||
n, c := makeDiscoveryWithVersion(RoleEndpointSlice, NamespaceDiscovery{Names: []string{"default"}}, "v1.20.0", obj)
|
||||
n, c := makeDiscovery(RoleEndpointSlice, NamespaceDiscovery{Names: []string{"default"}}, obj)
|
||||
|
||||
k8sDiscoveryTest{
|
||||
discovery: n,
|
||||
afterStart: func() {
|
||||
obj := &v1beta1.EndpointSlice{
|
||||
obj := &v1.EndpointSlice{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "testendpoints",
|
||||
Namespace: "default",
|
||||
},
|
||||
AddressType: v1beta1.AddressTypeIPv4,
|
||||
Ports: []v1beta1.EndpointPort{
|
||||
AddressType: v1.AddressTypeIPv4,
|
||||
Ports: []v1.EndpointPort{
|
||||
{
|
||||
Name: strptr("testport"),
|
||||
Port: int32ptr(9000),
|
||||
Protocol: protocolptr(corev1.ProtocolTCP),
|
||||
},
|
||||
},
|
||||
Endpoints: []v1beta1.Endpoint{
|
||||
Endpoints: []v1.Endpoint{
|
||||
{
|
||||
Addresses: []string{"4.3.2.1"},
|
||||
TargetRef: &corev1.ObjectReference{
|
||||
|
@ -379,13 +259,13 @@ func TestEndpointSliceDiscoveryAdd(t *testing.T) {
|
|||
Name: "testpod",
|
||||
Namespace: "default",
|
||||
},
|
||||
Conditions: v1beta1.EndpointConditions{
|
||||
Conditions: v1.EndpointConditions{
|
||||
Ready: boolptr(false),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
c.DiscoveryV1beta1().EndpointSlices(obj.Namespace).Create(context.Background(), obj, metav1.CreateOptions{})
|
||||
c.DiscoveryV1().EndpointSlices(obj.Namespace).Create(context.Background(), obj, metav1.CreateOptions{})
|
||||
},
|
||||
expectedMaxItems: 1,
|
||||
expectedRes: map[string]*targetgroup.Group{
|
||||
|
@ -440,118 +320,34 @@ func TestEndpointSliceDiscoveryAdd(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestEndpointSliceDiscoveryDelete(t *testing.T) {
|
||||
n, c := makeDiscoveryWithVersion(RoleEndpointSlice, NamespaceDiscovery{Names: []string{"default"}}, "v1.21.0", makeEndpointSliceV1())
|
||||
n, c := makeDiscovery(RoleEndpointSlice, NamespaceDiscovery{Names: []string{"default"}}, makeEndpointSliceV1())
|
||||
|
||||
k8sDiscoveryTest{
|
||||
discovery: n,
|
||||
afterStart: func() {
|
||||
obj := makeEndpointSliceV1()
|
||||
c.DiscoveryV1beta1().EndpointSlices(obj.Namespace).Delete(context.Background(), obj.Name, metav1.DeleteOptions{})
|
||||
c.DiscoveryV1().EndpointSlices(obj.Namespace).Delete(context.Background(), obj.Name, metav1.DeleteOptions{})
|
||||
},
|
||||
expectedMaxItems: 2,
|
||||
expectedRes: map[string]*targetgroup.Group{
|
||||
"endpointslice/default/testendpoints": {
|
||||
Source: "endpointslice/default/testendpoints",
|
||||
Targets: []model.LabelSet{
|
||||
{
|
||||
"__address__": "1.2.3.4:9000",
|
||||
"__meta_kubernetes_endpointslice_address_target_kind": "",
|
||||
"__meta_kubernetes_endpointslice_address_target_name": "",
|
||||
"__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true",
|
||||
"__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true",
|
||||
"__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false",
|
||||
"__meta_kubernetes_endpointslice_endpoint_hostname": "testendpoint1",
|
||||
"__meta_kubernetes_endpointslice_endpoint_node_name": "foobar",
|
||||
"__meta_kubernetes_endpointslice_endpoint_topology_present_topology": "true",
|
||||
"__meta_kubernetes_endpointslice_endpoint_topology_topology": "value",
|
||||
"__meta_kubernetes_endpointslice_endpoint_zone": "us-east-1a",
|
||||
"__meta_kubernetes_endpointslice_port": "9000",
|
||||
"__meta_kubernetes_endpointslice_port_app_protocol": "http",
|
||||
"__meta_kubernetes_endpointslice_port_name": "testport",
|
||||
"__meta_kubernetes_endpointslice_port_protocol": "TCP",
|
||||
},
|
||||
{
|
||||
"__address__": "2.3.4.5:9000",
|
||||
"__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true",
|
||||
"__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true",
|
||||
"__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false",
|
||||
"__meta_kubernetes_endpointslice_endpoint_zone": "us-east-1b",
|
||||
"__meta_kubernetes_endpointslice_port": "9000",
|
||||
"__meta_kubernetes_endpointslice_port_app_protocol": "http",
|
||||
"__meta_kubernetes_endpointslice_port_name": "testport",
|
||||
"__meta_kubernetes_endpointslice_port_protocol": "TCP",
|
||||
},
|
||||
{
|
||||
"__address__": "3.4.5.6:9000",
|
||||
"__meta_kubernetes_endpointslice_endpoint_conditions_ready": "false",
|
||||
"__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true",
|
||||
"__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "true",
|
||||
"__meta_kubernetes_endpointslice_endpoint_zone": "us-east-1c",
|
||||
"__meta_kubernetes_endpointslice_port": "9000",
|
||||
"__meta_kubernetes_endpointslice_port_app_protocol": "http",
|
||||
"__meta_kubernetes_endpointslice_port_name": "testport",
|
||||
"__meta_kubernetes_endpointslice_port_protocol": "TCP",
|
||||
},
|
||||
{
|
||||
"__address__": "4.5.6.7:9000",
|
||||
"__meta_kubernetes_endpointslice_address_target_kind": "Node",
|
||||
"__meta_kubernetes_endpointslice_address_target_name": "barbaz",
|
||||
"__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true",
|
||||
"__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true",
|
||||
"__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false",
|
||||
"__meta_kubernetes_endpointslice_endpoint_zone": "us-east-1a",
|
||||
"__meta_kubernetes_endpointslice_port": "9000",
|
||||
"__meta_kubernetes_endpointslice_port_app_protocol": "http",
|
||||
"__meta_kubernetes_endpointslice_port_name": "testport",
|
||||
"__meta_kubernetes_endpointslice_port_protocol": "TCP",
|
||||
},
|
||||
},
|
||||
Labels: map[model.LabelName]model.LabelValue{
|
||||
"__meta_kubernetes_endpointslice_address_type": "IPv4",
|
||||
"__meta_kubernetes_endpointslice_name": "testendpoints",
|
||||
"__meta_kubernetes_endpointslice_label_kubernetes_io_service_name": "testendpoints",
|
||||
"__meta_kubernetes_endpointslice_labelpresent_kubernetes_io_service_name": "true",
|
||||
"__meta_kubernetes_endpointslice_annotation_test_annotation": "test",
|
||||
"__meta_kubernetes_endpointslice_annotationpresent_test_annotation": "true",
|
||||
"__meta_kubernetes_namespace": "default",
|
||||
},
|
||||
},
|
||||
},
|
||||
}.Run(t)
|
||||
}
|
||||
|
||||
func TestEndpointSliceDiscoveryUpdate(t *testing.T) {
|
||||
n, c := makeDiscoveryWithVersion(RoleEndpointSlice, NamespaceDiscovery{Names: []string{"default"}}, "v1.21.0", makeEndpointSliceV1())
|
||||
n, c := makeDiscovery(RoleEndpointSlice, NamespaceDiscovery{Names: []string{"default"}}, makeEndpointSliceV1())
|
||||
|
||||
k8sDiscoveryTest{
|
||||
discovery: n,
|
||||
afterStart: func() {
|
||||
obj := &v1beta1.EndpointSlice{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "testendpoints",
|
||||
Namespace: "default",
|
||||
},
|
||||
AddressType: v1beta1.AddressTypeIPv4,
|
||||
Ports: []v1beta1.EndpointPort{
|
||||
{
|
||||
Name: strptr("testport"),
|
||||
Port: int32ptr(9000),
|
||||
Protocol: protocolptr(corev1.ProtocolTCP),
|
||||
},
|
||||
},
|
||||
Endpoints: []v1beta1.Endpoint{
|
||||
{
|
||||
Addresses: []string{"1.2.3.4"},
|
||||
Hostname: strptr("testendpoint1"),
|
||||
}, {
|
||||
Addresses: []string{"2.3.4.5"},
|
||||
Conditions: v1beta1.EndpointConditions{
|
||||
Ready: boolptr(true),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
c.DiscoveryV1beta1().EndpointSlices(obj.Namespace).Update(context.Background(), obj, metav1.UpdateOptions{})
|
||||
obj := makeEndpointSliceV1()
|
||||
obj.ObjectMeta.Labels = nil
|
||||
obj.ObjectMeta.Annotations = nil
|
||||
obj.Endpoints = obj.Endpoints[0:2]
|
||||
c.DiscoveryV1().EndpointSlices(obj.Namespace).Update(context.Background(), obj, metav1.UpdateOptions{})
|
||||
},
|
||||
expectedMaxItems: 2,
|
||||
expectedRes: map[string]*targetgroup.Group{
|
||||
|
@ -586,39 +382,11 @@ func TestEndpointSliceDiscoveryUpdate(t *testing.T) {
|
|||
"__meta_kubernetes_endpointslice_port_name": "testport",
|
||||
"__meta_kubernetes_endpointslice_port_protocol": "TCP",
|
||||
},
|
||||
{
|
||||
"__address__": "3.4.5.6:9000",
|
||||
"__meta_kubernetes_endpointslice_endpoint_conditions_ready": "false",
|
||||
"__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true",
|
||||
"__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "true",
|
||||
"__meta_kubernetes_endpointslice_endpoint_zone": "us-east-1c",
|
||||
"__meta_kubernetes_endpointslice_port": "9000",
|
||||
"__meta_kubernetes_endpointslice_port_app_protocol": "http",
|
||||
"__meta_kubernetes_endpointslice_port_name": "testport",
|
||||
"__meta_kubernetes_endpointslice_port_protocol": "TCP",
|
||||
},
|
||||
{
|
||||
"__address__": "4.5.6.7:9000",
|
||||
"__meta_kubernetes_endpointslice_address_target_kind": "Node",
|
||||
"__meta_kubernetes_endpointslice_address_target_name": "barbaz",
|
||||
"__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true",
|
||||
"__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true",
|
||||
"__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false",
|
||||
"__meta_kubernetes_endpointslice_endpoint_zone": "us-east-1a",
|
||||
"__meta_kubernetes_endpointslice_port": "9000",
|
||||
"__meta_kubernetes_endpointslice_port_app_protocol": "http",
|
||||
"__meta_kubernetes_endpointslice_port_name": "testport",
|
||||
"__meta_kubernetes_endpointslice_port_protocol": "TCP",
|
||||
},
|
||||
},
|
||||
Labels: model.LabelSet{
|
||||
"__meta_kubernetes_endpointslice_address_type": "IPv4",
|
||||
"__meta_kubernetes_endpointslice_name": "testendpoints",
|
||||
"__meta_kubernetes_endpointslice_label_kubernetes_io_service_name": "testendpoints",
|
||||
"__meta_kubernetes_endpointslice_labelpresent_kubernetes_io_service_name": "true",
|
||||
"__meta_kubernetes_endpointslice_annotation_test_annotation": "test",
|
||||
"__meta_kubernetes_endpointslice_annotationpresent_test_annotation": "true",
|
||||
"__meta_kubernetes_namespace": "default",
|
||||
"__meta_kubernetes_endpointslice_address_type": "IPv4",
|
||||
"__meta_kubernetes_endpointslice_name": "testendpoints",
|
||||
"__meta_kubernetes_namespace": "default",
|
||||
},
|
||||
},
|
||||
},
|
||||
|
@ -626,85 +394,18 @@ func TestEndpointSliceDiscoveryUpdate(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestEndpointSliceDiscoveryEmptyEndpoints(t *testing.T) {
|
||||
n, c := makeDiscoveryWithVersion(RoleEndpointSlice, NamespaceDiscovery{Names: []string{"default"}}, "v1.21.0", makeEndpointSliceV1())
|
||||
n, c := makeDiscovery(RoleEndpointSlice, NamespaceDiscovery{Names: []string{"default"}}, makeEndpointSliceV1())
|
||||
|
||||
k8sDiscoveryTest{
|
||||
discovery: n,
|
||||
afterStart: func() {
|
||||
obj := &v1beta1.EndpointSlice{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "testendpoints",
|
||||
Namespace: "default",
|
||||
},
|
||||
AddressType: v1beta1.AddressTypeIPv4,
|
||||
Ports: []v1beta1.EndpointPort{
|
||||
{
|
||||
Name: strptr("testport"),
|
||||
Port: int32ptr(9000),
|
||||
Protocol: protocolptr(corev1.ProtocolTCP),
|
||||
},
|
||||
},
|
||||
Endpoints: []v1beta1.Endpoint{},
|
||||
}
|
||||
c.DiscoveryV1beta1().EndpointSlices(obj.Namespace).Update(context.Background(), obj, metav1.UpdateOptions{})
|
||||
obj := makeEndpointSliceV1()
|
||||
obj.Endpoints = []v1.Endpoint{}
|
||||
c.DiscoveryV1().EndpointSlices(obj.Namespace).Update(context.Background(), obj, metav1.UpdateOptions{})
|
||||
},
|
||||
expectedMaxItems: 2,
|
||||
expectedRes: map[string]*targetgroup.Group{
|
||||
"endpointslice/default/testendpoints": {
|
||||
Targets: []model.LabelSet{
|
||||
{
|
||||
"__address__": "1.2.3.4:9000",
|
||||
"__meta_kubernetes_endpointslice_address_target_kind": "",
|
||||
"__meta_kubernetes_endpointslice_address_target_name": "",
|
||||
"__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true",
|
||||
"__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true",
|
||||
"__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false",
|
||||
"__meta_kubernetes_endpointslice_endpoint_hostname": "testendpoint1",
|
||||
"__meta_kubernetes_endpointslice_endpoint_node_name": "foobar",
|
||||
"__meta_kubernetes_endpointslice_endpoint_topology_present_topology": "true",
|
||||
"__meta_kubernetes_endpointslice_endpoint_topology_topology": "value",
|
||||
"__meta_kubernetes_endpointslice_endpoint_zone": "us-east-1a",
|
||||
"__meta_kubernetes_endpointslice_port": "9000",
|
||||
"__meta_kubernetes_endpointslice_port_app_protocol": "http",
|
||||
"__meta_kubernetes_endpointslice_port_name": "testport",
|
||||
"__meta_kubernetes_endpointslice_port_protocol": "TCP",
|
||||
},
|
||||
{
|
||||
"__address__": "2.3.4.5:9000",
|
||||
"__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true",
|
||||
"__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true",
|
||||
"__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false",
|
||||
"__meta_kubernetes_endpointslice_endpoint_zone": "us-east-1b",
|
||||
"__meta_kubernetes_endpointslice_port": "9000",
|
||||
"__meta_kubernetes_endpointslice_port_app_protocol": "http",
|
||||
"__meta_kubernetes_endpointslice_port_name": "testport",
|
||||
"__meta_kubernetes_endpointslice_port_protocol": "TCP",
|
||||
},
|
||||
{
|
||||
"__address__": "3.4.5.6:9000",
|
||||
"__meta_kubernetes_endpointslice_endpoint_conditions_ready": "false",
|
||||
"__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true",
|
||||
"__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "true",
|
||||
"__meta_kubernetes_endpointslice_endpoint_zone": "us-east-1c",
|
||||
"__meta_kubernetes_endpointslice_port": "9000",
|
||||
"__meta_kubernetes_endpointslice_port_app_protocol": "http",
|
||||
"__meta_kubernetes_endpointslice_port_name": "testport",
|
||||
"__meta_kubernetes_endpointslice_port_protocol": "TCP",
|
||||
},
|
||||
{
|
||||
"__address__": "4.5.6.7:9000",
|
||||
"__meta_kubernetes_endpointslice_address_target_kind": "Node",
|
||||
"__meta_kubernetes_endpointslice_address_target_name": "barbaz",
|
||||
"__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true",
|
||||
"__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true",
|
||||
"__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false",
|
||||
"__meta_kubernetes_endpointslice_endpoint_zone": "us-east-1a",
|
||||
"__meta_kubernetes_endpointslice_port": "9000",
|
||||
"__meta_kubernetes_endpointslice_port_app_protocol": "http",
|
||||
"__meta_kubernetes_endpointslice_port_name": "testport",
|
||||
"__meta_kubernetes_endpointslice_port_protocol": "TCP",
|
||||
},
|
||||
},
|
||||
Labels: model.LabelSet{
|
||||
"__meta_kubernetes_endpointslice_address_type": "IPv4",
|
||||
"__meta_kubernetes_endpointslice_name": "testendpoints",
|
||||
|
@ -721,7 +422,7 @@ func TestEndpointSliceDiscoveryEmptyEndpoints(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestEndpointSliceDiscoveryWithService(t *testing.T) {
|
||||
n, c := makeDiscoveryWithVersion(RoleEndpointSlice, NamespaceDiscovery{Names: []string{"default"}}, "v1.21.0", makeEndpointSliceV1())
|
||||
n, c := makeDiscovery(RoleEndpointSlice, NamespaceDiscovery{Names: []string{"default"}}, makeEndpointSliceV1())
|
||||
|
||||
k8sDiscoveryTest{
|
||||
discovery: n,
|
||||
|
@ -813,7 +514,7 @@ func TestEndpointSliceDiscoveryWithService(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestEndpointSliceDiscoveryWithServiceUpdate(t *testing.T) {
|
||||
n, c := makeDiscoveryWithVersion(RoleEndpointSlice, NamespaceDiscovery{Names: []string{"default"}}, "v1.21.0", makeEndpointSliceV1())
|
||||
n, c := makeDiscovery(RoleEndpointSlice, NamespaceDiscovery{Names: []string{"default"}}, makeEndpointSliceV1())
|
||||
|
||||
k8sDiscoveryTest{
|
||||
discovery: n,
|
||||
|
|
|
@ -24,7 +24,6 @@ import (
|
|||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/common/model"
|
||||
v1 "k8s.io/api/networking/v1"
|
||||
"k8s.io/api/networking/v1beta1"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/client-go/util/workqueue"
|
||||
|
||||
|
@ -127,8 +126,6 @@ func (i *Ingress) process(ctx context.Context, ch chan<- []*targetgroup.Group) b
|
|||
switch ingress := o.(type) {
|
||||
case *v1.Ingress:
|
||||
ia = newIngressAdaptorFromV1(ingress)
|
||||
case *v1beta1.Ingress:
|
||||
ia = newIngressAdaptorFromV1beta1(ingress)
|
||||
default:
|
||||
level.Error(i.logger).Log("msg", "converting to Ingress object failed", "err",
|
||||
fmt.Errorf("received unexpected object: %v", o))
|
||||
|
|
|
@ -15,7 +15,6 @@ package kubernetes
|
|||
|
||||
import (
|
||||
v1 "k8s.io/api/networking/v1"
|
||||
"k8s.io/api/networking/v1beta1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
|
@ -89,56 +88,3 @@ func (i *ingressRuleAdaptorV1) paths() []string {
|
|||
}
|
||||
|
||||
func (i *ingressRuleAdaptorV1) host() string { return i.rule.Host }
|
||||
|
||||
// Adaptor for networking.k8s.io/v1beta1.
|
||||
type ingressAdaptorV1Beta1 struct {
|
||||
ingress *v1beta1.Ingress
|
||||
}
|
||||
|
||||
func newIngressAdaptorFromV1beta1(ingress *v1beta1.Ingress) ingressAdaptor {
|
||||
return &ingressAdaptorV1Beta1{ingress: ingress}
|
||||
}
|
||||
func (i *ingressAdaptorV1Beta1) getObjectMeta() metav1.ObjectMeta { return i.ingress.ObjectMeta }
|
||||
func (i *ingressAdaptorV1Beta1) name() string { return i.ingress.Name }
|
||||
func (i *ingressAdaptorV1Beta1) namespace() string { return i.ingress.Namespace }
|
||||
func (i *ingressAdaptorV1Beta1) labels() map[string]string { return i.ingress.Labels }
|
||||
func (i *ingressAdaptorV1Beta1) annotations() map[string]string { return i.ingress.Annotations }
|
||||
func (i *ingressAdaptorV1Beta1) ingressClassName() *string { return i.ingress.Spec.IngressClassName }
|
||||
|
||||
func (i *ingressAdaptorV1Beta1) tlsHosts() []string {
|
||||
var hosts []string
|
||||
for _, tls := range i.ingress.Spec.TLS {
|
||||
hosts = append(hosts, tls.Hosts...)
|
||||
}
|
||||
return hosts
|
||||
}
|
||||
|
||||
func (i *ingressAdaptorV1Beta1) rules() []ingressRuleAdaptor {
|
||||
var rules []ingressRuleAdaptor
|
||||
for _, rule := range i.ingress.Spec.Rules {
|
||||
rules = append(rules, newIngressRuleAdaptorFromV1Beta1(rule))
|
||||
}
|
||||
return rules
|
||||
}
|
||||
|
||||
type ingressRuleAdaptorV1Beta1 struct {
|
||||
rule v1beta1.IngressRule
|
||||
}
|
||||
|
||||
func newIngressRuleAdaptorFromV1Beta1(rule v1beta1.IngressRule) ingressRuleAdaptor {
|
||||
return &ingressRuleAdaptorV1Beta1{rule: rule}
|
||||
}
|
||||
|
||||
func (i *ingressRuleAdaptorV1Beta1) paths() []string {
|
||||
rv := i.rule.IngressRuleValue
|
||||
if rv.HTTP == nil {
|
||||
return nil
|
||||
}
|
||||
paths := make([]string, len(rv.HTTP.Paths))
|
||||
for n, p := range rv.HTTP.Paths {
|
||||
paths[n] = p.Path
|
||||
}
|
||||
return paths
|
||||
}
|
||||
|
||||
func (i *ingressRuleAdaptorV1Beta1) host() string { return i.rule.Host }
|
||||
|
|
|
@ -20,7 +20,6 @@ import (
|
|||
|
||||
"github.com/prometheus/common/model"
|
||||
v1 "k8s.io/api/networking/v1"
|
||||
"k8s.io/api/networking/v1beta1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
"github.com/prometheus/prometheus/discovery/targetgroup"
|
||||
|
@ -89,60 +88,6 @@ func makeIngress(tls TLSMode) *v1.Ingress {
|
|||
return ret
|
||||
}
|
||||
|
||||
func makeIngressV1beta1(tls TLSMode) *v1beta1.Ingress {
|
||||
ret := &v1beta1.Ingress{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "testingress",
|
||||
Namespace: "default",
|
||||
Labels: map[string]string{"test/label": "testvalue"},
|
||||
Annotations: map[string]string{"test/annotation": "testannotationvalue"},
|
||||
},
|
||||
Spec: v1beta1.IngressSpec{
|
||||
IngressClassName: classString("testclass"),
|
||||
TLS: nil,
|
||||
Rules: []v1beta1.IngressRule{
|
||||
{
|
||||
Host: "example.com",
|
||||
IngressRuleValue: v1beta1.IngressRuleValue{
|
||||
HTTP: &v1beta1.HTTPIngressRuleValue{
|
||||
Paths: []v1beta1.HTTPIngressPath{
|
||||
{Path: "/"},
|
||||
{Path: "/foo"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
// No backend config, ignored
|
||||
Host: "nobackend.example.com",
|
||||
IngressRuleValue: v1beta1.IngressRuleValue{
|
||||
HTTP: &v1beta1.HTTPIngressRuleValue{},
|
||||
},
|
||||
},
|
||||
{
|
||||
Host: "test.example.com",
|
||||
IngressRuleValue: v1beta1.IngressRuleValue{
|
||||
HTTP: &v1beta1.HTTPIngressRuleValue{
|
||||
Paths: []v1beta1.HTTPIngressPath{{}},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
switch tls {
|
||||
case TLSYes:
|
||||
ret.Spec.TLS = []v1beta1.IngressTLS{{Hosts: []string{"example.com", "test.example.com"}}}
|
||||
case TLSMixed:
|
||||
ret.Spec.TLS = []v1beta1.IngressTLS{{Hosts: []string{"example.com"}}}
|
||||
case TLSWildcard:
|
||||
ret.Spec.TLS = []v1beta1.IngressTLS{{Hosts: []string{"*.example.com"}}}
|
||||
}
|
||||
|
||||
return ret
|
||||
}
|
||||
|
||||
func classString(v string) *string {
|
||||
return &v
|
||||
}
|
||||
|
@ -212,20 +157,6 @@ func TestIngressDiscoveryAdd(t *testing.T) {
|
|||
}.Run(t)
|
||||
}
|
||||
|
||||
func TestIngressDiscoveryAddV1beta1(t *testing.T) {
|
||||
n, c := makeDiscoveryWithVersion(RoleIngress, NamespaceDiscovery{Names: []string{"default"}}, "v1.18.0")
|
||||
|
||||
k8sDiscoveryTest{
|
||||
discovery: n,
|
||||
afterStart: func() {
|
||||
obj := makeIngressV1beta1(TLSNo)
|
||||
c.NetworkingV1beta1().Ingresses("default").Create(context.Background(), obj, metav1.CreateOptions{})
|
||||
},
|
||||
expectedMaxItems: 1,
|
||||
expectedRes: expectedTargetGroups("default", TLSNo),
|
||||
}.Run(t)
|
||||
}
|
||||
|
||||
func TestIngressDiscoveryAddTLS(t *testing.T) {
|
||||
n, c := makeDiscovery(RoleIngress, NamespaceDiscovery{Names: []string{"default"}})
|
||||
|
||||
|
@ -240,20 +171,6 @@ func TestIngressDiscoveryAddTLS(t *testing.T) {
|
|||
}.Run(t)
|
||||
}
|
||||
|
||||
func TestIngressDiscoveryAddTLSV1beta1(t *testing.T) {
|
||||
n, c := makeDiscoveryWithVersion(RoleIngress, NamespaceDiscovery{Names: []string{"default"}}, "v1.18.0")
|
||||
|
||||
k8sDiscoveryTest{
|
||||
discovery: n,
|
||||
afterStart: func() {
|
||||
obj := makeIngressV1beta1(TLSYes)
|
||||
c.NetworkingV1beta1().Ingresses("default").Create(context.Background(), obj, metav1.CreateOptions{})
|
||||
},
|
||||
expectedMaxItems: 1,
|
||||
expectedRes: expectedTargetGroups("default", TLSYes),
|
||||
}.Run(t)
|
||||
}
|
||||
|
||||
func TestIngressDiscoveryAddMixed(t *testing.T) {
|
||||
n, c := makeDiscovery(RoleIngress, NamespaceDiscovery{Names: []string{"default"}})
|
||||
|
||||
|
@ -268,20 +185,6 @@ func TestIngressDiscoveryAddMixed(t *testing.T) {
|
|||
}.Run(t)
|
||||
}
|
||||
|
||||
func TestIngressDiscoveryAddMixedV1beta1(t *testing.T) {
|
||||
n, c := makeDiscoveryWithVersion(RoleIngress, NamespaceDiscovery{Names: []string{"default"}}, "v1.18.0")
|
||||
|
||||
k8sDiscoveryTest{
|
||||
discovery: n,
|
||||
afterStart: func() {
|
||||
obj := makeIngressV1beta1(TLSMixed)
|
||||
c.NetworkingV1beta1().Ingresses("default").Create(context.Background(), obj, metav1.CreateOptions{})
|
||||
},
|
||||
expectedMaxItems: 1,
|
||||
expectedRes: expectedTargetGroups("default", TLSMixed),
|
||||
}.Run(t)
|
||||
}
|
||||
|
||||
func TestIngressDiscoveryNamespaces(t *testing.T) {
|
||||
n, c := makeDiscovery(RoleIngress, NamespaceDiscovery{Names: []string{"ns1", "ns2"}})
|
||||
|
||||
|
@ -303,27 +206,6 @@ func TestIngressDiscoveryNamespaces(t *testing.T) {
|
|||
}.Run(t)
|
||||
}
|
||||
|
||||
func TestIngressDiscoveryNamespacesV1beta1(t *testing.T) {
|
||||
n, c := makeDiscoveryWithVersion(RoleIngress, NamespaceDiscovery{Names: []string{"ns1", "ns2"}}, "v1.18.0")
|
||||
|
||||
expected := expectedTargetGroups("ns1", TLSNo)
|
||||
for k, v := range expectedTargetGroups("ns2", TLSNo) {
|
||||
expected[k] = v
|
||||
}
|
||||
k8sDiscoveryTest{
|
||||
discovery: n,
|
||||
afterStart: func() {
|
||||
for _, ns := range []string{"ns1", "ns2"} {
|
||||
obj := makeIngressV1beta1(TLSNo)
|
||||
obj.Namespace = ns
|
||||
c.NetworkingV1beta1().Ingresses(obj.Namespace).Create(context.Background(), obj, metav1.CreateOptions{})
|
||||
}
|
||||
},
|
||||
expectedMaxItems: 2,
|
||||
expectedRes: expected,
|
||||
}.Run(t)
|
||||
}
|
||||
|
||||
func TestIngressDiscoveryOwnNamespace(t *testing.T) {
|
||||
n, c := makeDiscovery(RoleIngress, NamespaceDiscovery{IncludeOwnNamespace: true})
|
||||
|
||||
|
|
|
@ -25,8 +25,6 @@ import (
|
|||
|
||||
"github.com/prometheus/prometheus/util/strutil"
|
||||
|
||||
disv1beta1 "k8s.io/api/discovery/v1beta1"
|
||||
|
||||
"github.com/go-kit/log"
|
||||
"github.com/go-kit/log/level"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
|
@ -36,12 +34,10 @@ import (
|
|||
apiv1 "k8s.io/api/core/v1"
|
||||
disv1 "k8s.io/api/discovery/v1"
|
||||
networkv1 "k8s.io/api/networking/v1"
|
||||
"k8s.io/api/networking/v1beta1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
utilversion "k8s.io/apimachinery/pkg/util/version"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/rest"
|
||||
|
@ -401,55 +397,22 @@ func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) {
|
|||
|
||||
switch d.role {
|
||||
case RoleEndpointSlice:
|
||||
// Check "networking.k8s.io/v1" availability with retries.
|
||||
// If "v1" is not available, use "networking.k8s.io/v1beta1" for backward compatibility
|
||||
var v1Supported bool
|
||||
if retryOnError(ctx, 10*time.Second,
|
||||
func() (err error) {
|
||||
v1Supported, err = checkDiscoveryV1Supported(d.client)
|
||||
if err != nil {
|
||||
level.Error(d.logger).Log("msg", "Failed to check networking.k8s.io/v1 availability", "err", err)
|
||||
}
|
||||
return err
|
||||
},
|
||||
) {
|
||||
d.Unlock()
|
||||
return
|
||||
}
|
||||
|
||||
for _, namespace := range namespaces {
|
||||
var informer cache.SharedIndexInformer
|
||||
if v1Supported {
|
||||
e := d.client.DiscoveryV1().EndpointSlices(namespace)
|
||||
elw := &cache.ListWatch{
|
||||
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
|
||||
options.FieldSelector = d.selectors.endpointslice.field
|
||||
options.LabelSelector = d.selectors.endpointslice.label
|
||||
return e.List(ctx, options)
|
||||
},
|
||||
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
|
||||
options.FieldSelector = d.selectors.endpointslice.field
|
||||
options.LabelSelector = d.selectors.endpointslice.label
|
||||
return e.Watch(ctx, options)
|
||||
},
|
||||
}
|
||||
informer = d.newEndpointSlicesByNodeInformer(elw, &disv1.EndpointSlice{})
|
||||
} else {
|
||||
e := d.client.DiscoveryV1beta1().EndpointSlices(namespace)
|
||||
elw := &cache.ListWatch{
|
||||
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
|
||||
options.FieldSelector = d.selectors.endpointslice.field
|
||||
options.LabelSelector = d.selectors.endpointslice.label
|
||||
return e.List(ctx, options)
|
||||
},
|
||||
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
|
||||
options.FieldSelector = d.selectors.endpointslice.field
|
||||
options.LabelSelector = d.selectors.endpointslice.label
|
||||
return e.Watch(ctx, options)
|
||||
},
|
||||
}
|
||||
informer = d.newEndpointSlicesByNodeInformer(elw, &disv1beta1.EndpointSlice{})
|
||||
e := d.client.DiscoveryV1().EndpointSlices(namespace)
|
||||
elw := &cache.ListWatch{
|
||||
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
|
||||
options.FieldSelector = d.selectors.endpointslice.field
|
||||
options.LabelSelector = d.selectors.endpointslice.label
|
||||
return e.List(ctx, options)
|
||||
},
|
||||
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
|
||||
options.FieldSelector = d.selectors.endpointslice.field
|
||||
options.LabelSelector = d.selectors.endpointslice.label
|
||||
return e.Watch(ctx, options)
|
||||
},
|
||||
}
|
||||
informer = d.newEndpointSlicesByNodeInformer(elw, &disv1.EndpointSlice{})
|
||||
|
||||
s := d.client.CoreV1().Services(namespace)
|
||||
slw := &cache.ListWatch{
|
||||
|
@ -609,55 +572,22 @@ func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) {
|
|||
go svc.informer.Run(ctx.Done())
|
||||
}
|
||||
case RoleIngress:
|
||||
// Check "networking.k8s.io/v1" availability with retries.
|
||||
// If "v1" is not available, use "networking.k8s.io/v1beta1" for backward compatibility
|
||||
var v1Supported bool
|
||||
if retryOnError(ctx, 10*time.Second,
|
||||
func() (err error) {
|
||||
v1Supported, err = checkNetworkingV1Supported(d.client)
|
||||
if err != nil {
|
||||
level.Error(d.logger).Log("msg", "Failed to check networking.k8s.io/v1 availability", "err", err)
|
||||
}
|
||||
return err
|
||||
},
|
||||
) {
|
||||
d.Unlock()
|
||||
return
|
||||
}
|
||||
|
||||
for _, namespace := range namespaces {
|
||||
var informer cache.SharedInformer
|
||||
if v1Supported {
|
||||
i := d.client.NetworkingV1().Ingresses(namespace)
|
||||
ilw := &cache.ListWatch{
|
||||
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
|
||||
options.FieldSelector = d.selectors.ingress.field
|
||||
options.LabelSelector = d.selectors.ingress.label
|
||||
return i.List(ctx, options)
|
||||
},
|
||||
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
|
||||
options.FieldSelector = d.selectors.ingress.field
|
||||
options.LabelSelector = d.selectors.ingress.label
|
||||
return i.Watch(ctx, options)
|
||||
},
|
||||
}
|
||||
informer = d.mustNewSharedInformer(ilw, &networkv1.Ingress{}, resyncDisabled)
|
||||
} else {
|
||||
i := d.client.NetworkingV1beta1().Ingresses(namespace)
|
||||
ilw := &cache.ListWatch{
|
||||
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
|
||||
options.FieldSelector = d.selectors.ingress.field
|
||||
options.LabelSelector = d.selectors.ingress.label
|
||||
return i.List(ctx, options)
|
||||
},
|
||||
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
|
||||
options.FieldSelector = d.selectors.ingress.field
|
||||
options.LabelSelector = d.selectors.ingress.label
|
||||
return i.Watch(ctx, options)
|
||||
},
|
||||
}
|
||||
informer = d.mustNewSharedInformer(ilw, &v1beta1.Ingress{}, resyncDisabled)
|
||||
i := d.client.NetworkingV1().Ingresses(namespace)
|
||||
ilw := &cache.ListWatch{
|
||||
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
|
||||
options.FieldSelector = d.selectors.ingress.field
|
||||
options.LabelSelector = d.selectors.ingress.label
|
||||
return i.List(ctx, options)
|
||||
},
|
||||
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
|
||||
options.FieldSelector = d.selectors.ingress.field
|
||||
options.LabelSelector = d.selectors.ingress.label
|
||||
return i.Watch(ctx, options)
|
||||
},
|
||||
}
|
||||
informer = d.mustNewSharedInformer(ilw, &networkv1.Ingress{}, resyncDisabled)
|
||||
ingress := NewIngress(
|
||||
log.With(d.logger, "role", "ingress"),
|
||||
informer,
|
||||
|
@ -720,20 +650,6 @@ func retryOnError(ctx context.Context, interval time.Duration, f func() error) (
|
|||
}
|
||||
}
|
||||
|
||||
func checkNetworkingV1Supported(client kubernetes.Interface) (bool, error) {
|
||||
k8sVer, err := client.Discovery().ServerVersion()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
semVer, err := utilversion.ParseSemantic(k8sVer.String())
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
// networking.k8s.io/v1 is available since Kubernetes v1.19
|
||||
// https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.19.md
|
||||
return semVer.Major() >= 1 && semVer.Minor() >= 19, nil
|
||||
}
|
||||
|
||||
func (d *Discovery) newNodeInformer(ctx context.Context) cache.SharedInformer {
|
||||
nlw := &cache.ListWatch{
|
||||
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
|
||||
|
@ -834,19 +750,6 @@ func (d *Discovery) newEndpointSlicesByNodeInformer(plw *cache.ListWatch, object
|
|||
}
|
||||
}
|
||||
}
|
||||
case *disv1beta1.EndpointSlice:
|
||||
for _, target := range e.Endpoints {
|
||||
if target.TargetRef != nil {
|
||||
switch target.TargetRef.Kind {
|
||||
case "Pod":
|
||||
if target.NodeName != nil {
|
||||
nodes = append(nodes, *target.NodeName)
|
||||
}
|
||||
case "Node":
|
||||
nodes = append(nodes, target.TargetRef.Name)
|
||||
}
|
||||
}
|
||||
}
|
||||
default:
|
||||
return nil, fmt.Errorf("object is not an endpointslice")
|
||||
}
|
||||
|
@ -882,21 +785,6 @@ func (d *Discovery) mustNewSharedIndexInformer(lw cache.ListerWatcher, exampleOb
|
|||
return informer
|
||||
}
|
||||
|
||||
func checkDiscoveryV1Supported(client kubernetes.Interface) (bool, error) {
|
||||
k8sVer, err := client.Discovery().ServerVersion()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
semVer, err := utilversion.ParseSemantic(k8sVer.String())
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
// The discovery.k8s.io/v1beta1 API version of EndpointSlice will no longer be served in v1.25.
|
||||
// discovery.k8s.io/v1 is available since Kubernetes v1.21
|
||||
// https://kubernetes.io/docs/reference/using-api/deprecation-guide/#v1-25
|
||||
return semVer.Major() >= 1 && semVer.Minor() >= 21, nil
|
||||
}
|
||||
|
||||
func addObjectMetaLabels(labelSet model.LabelSet, objectMeta metav1.ObjectMeta, role Role) {
|
||||
labelSet[model.LabelName(metaLabelPrefix+string(role)+"_name")] = lv(objectMeta.Name)
|
||||
|
||||
|
|
|
@ -46,7 +46,7 @@ func TestMain(m *testing.M) {
|
|||
|
||||
// makeDiscovery creates a kubernetes.Discovery instance for testing.
|
||||
func makeDiscovery(role Role, nsDiscovery NamespaceDiscovery, objects ...runtime.Object) (*Discovery, kubernetes.Interface) {
|
||||
return makeDiscoveryWithVersion(role, nsDiscovery, "v1.22.0", objects...)
|
||||
return makeDiscoveryWithVersion(role, nsDiscovery, "v1.25.0", objects...)
|
||||
}
|
||||
|
||||
// makeDiscoveryWithVersion creates a kubernetes.Discovery instance with the specified kubernetes version for testing.
|
||||
|
@ -285,40 +285,6 @@ func TestRetryOnError(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestCheckNetworkingV1Supported(t *testing.T) {
|
||||
tests := []struct {
|
||||
version string
|
||||
wantSupported bool
|
||||
wantErr bool
|
||||
}{
|
||||
{version: "v1.18.0", wantSupported: false, wantErr: false},
|
||||
{version: "v1.18.1", wantSupported: false, wantErr: false},
|
||||
// networking v1 is supported since Kubernetes v1.19
|
||||
{version: "v1.19.0", wantSupported: true, wantErr: false},
|
||||
{version: "v1.20.0-beta.2", wantSupported: true, wantErr: false},
|
||||
// error patterns
|
||||
{version: "", wantSupported: false, wantErr: true},
|
||||
{version: "<>", wantSupported: false, wantErr: true},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
tc := tc
|
||||
t.Run(tc.version, func(t *testing.T) {
|
||||
clientset := fake.NewSimpleClientset()
|
||||
fakeDiscovery, _ := clientset.Discovery().(*fakediscovery.FakeDiscovery)
|
||||
fakeDiscovery.FakedServerVersion = &version.Info{GitVersion: tc.version}
|
||||
supported, err := checkNetworkingV1Supported(clientset)
|
||||
|
||||
if tc.wantErr {
|
||||
require.Error(t, err)
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
require.Equal(t, tc.wantSupported, supported)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestFailuresCountMetric(t *testing.T) {
|
||||
tests := []struct {
|
||||
role Role
|
||||
|
|
|
@ -1,332 +0,0 @@
|
|||
// Copyright 2016 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package legacymanager
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/go-kit/log"
|
||||
"github.com/go-kit/log/level"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
|
||||
"github.com/prometheus/prometheus/discovery"
|
||||
"github.com/prometheus/prometheus/discovery/targetgroup"
|
||||
)
|
||||
|
||||
type poolKey struct {
|
||||
setName string
|
||||
provider string
|
||||
}
|
||||
|
||||
// provider holds a Discoverer instance, its configuration and its subscribers.
|
||||
type provider struct {
|
||||
name string
|
||||
d discovery.Discoverer
|
||||
subs []string
|
||||
config interface{}
|
||||
}
|
||||
|
||||
// NewManager is the Discovery Manager constructor.
|
||||
func NewManager(ctx context.Context, logger log.Logger, registerer prometheus.Registerer, sdMetrics map[string]discovery.DiscovererMetrics, options ...func(*Manager)) *Manager {
|
||||
if logger == nil {
|
||||
logger = log.NewNopLogger()
|
||||
}
|
||||
mgr := &Manager{
|
||||
logger: logger,
|
||||
syncCh: make(chan map[string][]*targetgroup.Group),
|
||||
targets: make(map[poolKey]map[string]*targetgroup.Group),
|
||||
discoverCancel: []context.CancelFunc{},
|
||||
ctx: ctx,
|
||||
updatert: 5 * time.Second,
|
||||
triggerSend: make(chan struct{}, 1),
|
||||
registerer: registerer,
|
||||
sdMetrics: sdMetrics,
|
||||
}
|
||||
for _, option := range options {
|
||||
option(mgr)
|
||||
}
|
||||
|
||||
// Register the metrics.
|
||||
// We have to do this after setting all options, so that the name of the Manager is set.
|
||||
if metrics, err := discovery.NewManagerMetrics(registerer, mgr.name); err == nil {
|
||||
mgr.metrics = metrics
|
||||
} else {
|
||||
level.Error(logger).Log("msg", "Failed to create discovery manager metrics", "manager", mgr.name, "err", err)
|
||||
return nil
|
||||
}
|
||||
|
||||
return mgr
|
||||
}
|
||||
|
||||
// Name sets the name of the manager.
|
||||
func Name(n string) func(*Manager) {
|
||||
return func(m *Manager) {
|
||||
m.mtx.Lock()
|
||||
defer m.mtx.Unlock()
|
||||
m.name = n
|
||||
}
|
||||
}
|
||||
|
||||
// Manager maintains a set of discovery providers and sends each update to a map channel.
|
||||
// Targets are grouped by the target set name.
|
||||
type Manager struct {
|
||||
logger log.Logger
|
||||
name string
|
||||
mtx sync.RWMutex
|
||||
ctx context.Context
|
||||
discoverCancel []context.CancelFunc
|
||||
|
||||
// Some Discoverers(eg. k8s) send only the updates for a given target group
|
||||
// so we use map[tg.Source]*targetgroup.Group to know which group to update.
|
||||
targets map[poolKey]map[string]*targetgroup.Group
|
||||
// providers keeps track of SD providers.
|
||||
providers []*provider
|
||||
// The sync channel sends the updates as a map where the key is the job value from the scrape config.
|
||||
syncCh chan map[string][]*targetgroup.Group
|
||||
|
||||
// How long to wait before sending updates to the channel. The variable
|
||||
// should only be modified in unit tests.
|
||||
updatert time.Duration
|
||||
|
||||
// The triggerSend channel signals to the manager that new updates have been received from providers.
|
||||
triggerSend chan struct{}
|
||||
|
||||
// A registerer for all service discovery metrics.
|
||||
registerer prometheus.Registerer
|
||||
|
||||
metrics *discovery.Metrics
|
||||
sdMetrics map[string]discovery.DiscovererMetrics
|
||||
}
|
||||
|
||||
// Run starts the background processing.
|
||||
func (m *Manager) Run() error {
|
||||
go m.sender()
|
||||
<-m.ctx.Done()
|
||||
m.cancelDiscoverers()
|
||||
return m.ctx.Err()
|
||||
}
|
||||
|
||||
// SyncCh returns a read only channel used by all the clients to receive target updates.
|
||||
func (m *Manager) SyncCh() <-chan map[string][]*targetgroup.Group {
|
||||
return m.syncCh
|
||||
}
|
||||
|
||||
// ApplyConfig removes all running discovery providers and starts new ones using the provided config.
|
||||
func (m *Manager) ApplyConfig(cfg map[string]discovery.Configs) error {
|
||||
m.mtx.Lock()
|
||||
defer m.mtx.Unlock()
|
||||
|
||||
for pk := range m.targets {
|
||||
if _, ok := cfg[pk.setName]; !ok {
|
||||
m.metrics.DiscoveredTargets.DeleteLabelValues(m.name, pk.setName)
|
||||
}
|
||||
}
|
||||
m.cancelDiscoverers()
|
||||
m.targets = make(map[poolKey]map[string]*targetgroup.Group)
|
||||
m.providers = nil
|
||||
m.discoverCancel = nil
|
||||
|
||||
failedCount := 0
|
||||
for name, scfg := range cfg {
|
||||
failedCount += m.registerProviders(scfg, name)
|
||||
m.metrics.DiscoveredTargets.WithLabelValues(name).Set(0)
|
||||
}
|
||||
m.metrics.FailedConfigs.Set(float64(failedCount))
|
||||
|
||||
for _, prov := range m.providers {
|
||||
m.startProvider(m.ctx, prov)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// StartCustomProvider is used for sdtool. Only use this if you know what you're doing.
|
||||
func (m *Manager) StartCustomProvider(ctx context.Context, name string, worker discovery.Discoverer) {
|
||||
p := &provider{
|
||||
name: name,
|
||||
d: worker,
|
||||
subs: []string{name},
|
||||
}
|
||||
m.providers = append(m.providers, p)
|
||||
m.startProvider(ctx, p)
|
||||
}
|
||||
|
||||
func (m *Manager) startProvider(ctx context.Context, p *provider) {
|
||||
level.Debug(m.logger).Log("msg", "Starting provider", "provider", p.name, "subs", fmt.Sprintf("%v", p.subs))
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
updates := make(chan []*targetgroup.Group)
|
||||
|
||||
m.discoverCancel = append(m.discoverCancel, cancel)
|
||||
|
||||
go p.d.Run(ctx, updates)
|
||||
go m.updater(ctx, p, updates)
|
||||
}
|
||||
|
||||
func (m *Manager) updater(ctx context.Context, p *provider, updates chan []*targetgroup.Group) {
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case tgs, ok := <-updates:
|
||||
m.metrics.ReceivedUpdates.Inc()
|
||||
if !ok {
|
||||
level.Debug(m.logger).Log("msg", "Discoverer channel closed", "provider", p.name)
|
||||
return
|
||||
}
|
||||
|
||||
for _, s := range p.subs {
|
||||
m.updateGroup(poolKey{setName: s, provider: p.name}, tgs)
|
||||
}
|
||||
|
||||
select {
|
||||
case m.triggerSend <- struct{}{}:
|
||||
default:
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (m *Manager) sender() {
|
||||
ticker := time.NewTicker(m.updatert)
|
||||
defer ticker.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-m.ctx.Done():
|
||||
return
|
||||
case <-ticker.C: // Some discoverers send updates too often so we throttle these with the ticker.
|
||||
select {
|
||||
case <-m.triggerSend:
|
||||
m.metrics.SentUpdates.Inc()
|
||||
select {
|
||||
case m.syncCh <- m.allGroups():
|
||||
default:
|
||||
m.metrics.DelayedUpdates.Inc()
|
||||
level.Debug(m.logger).Log("msg", "Discovery receiver's channel was full so will retry the next cycle")
|
||||
select {
|
||||
case m.triggerSend <- struct{}{}:
|
||||
default:
|
||||
}
|
||||
}
|
||||
default:
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (m *Manager) cancelDiscoverers() {
|
||||
for _, c := range m.discoverCancel {
|
||||
c()
|
||||
}
|
||||
}
|
||||
|
||||
func (m *Manager) updateGroup(poolKey poolKey, tgs []*targetgroup.Group) {
|
||||
m.mtx.Lock()
|
||||
defer m.mtx.Unlock()
|
||||
|
||||
if _, ok := m.targets[poolKey]; !ok {
|
||||
m.targets[poolKey] = make(map[string]*targetgroup.Group)
|
||||
}
|
||||
for _, tg := range tgs {
|
||||
if tg != nil { // Some Discoverers send nil target group so need to check for it to avoid panics.
|
||||
m.targets[poolKey][tg.Source] = tg
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (m *Manager) allGroups() map[string][]*targetgroup.Group {
|
||||
m.mtx.RLock()
|
||||
defer m.mtx.RUnlock()
|
||||
|
||||
tSets := map[string][]*targetgroup.Group{}
|
||||
n := map[string]int{}
|
||||
for pkey, tsets := range m.targets {
|
||||
for _, tg := range tsets {
|
||||
// Even if the target group 'tg' is empty we still need to send it to the 'Scrape manager'
|
||||
// to signal that it needs to stop all scrape loops for this target set.
|
||||
tSets[pkey.setName] = append(tSets[pkey.setName], tg)
|
||||
n[pkey.setName] += len(tg.Targets)
|
||||
}
|
||||
}
|
||||
for setName, v := range n {
|
||||
m.metrics.DiscoveredTargets.WithLabelValues(setName).Set(float64(v))
|
||||
}
|
||||
return tSets
|
||||
}
|
||||
|
||||
// registerProviders returns a number of failed SD config.
|
||||
func (m *Manager) registerProviders(cfgs discovery.Configs, setName string) int {
|
||||
var (
|
||||
failed int
|
||||
added bool
|
||||
)
|
||||
add := func(cfg discovery.Config) {
|
||||
for _, p := range m.providers {
|
||||
if reflect.DeepEqual(cfg, p.config) {
|
||||
p.subs = append(p.subs, setName)
|
||||
added = true
|
||||
return
|
||||
}
|
||||
}
|
||||
typ := cfg.Name()
|
||||
d, err := cfg.NewDiscoverer(discovery.DiscovererOptions{
|
||||
Logger: log.With(m.logger, "discovery", typ, "config", setName),
|
||||
Metrics: m.sdMetrics[typ],
|
||||
})
|
||||
if err != nil {
|
||||
level.Error(m.logger).Log("msg", "Cannot create service discovery", "err", err, "type", typ, "config", setName)
|
||||
failed++
|
||||
return
|
||||
}
|
||||
m.providers = append(m.providers, &provider{
|
||||
name: fmt.Sprintf("%s/%d", typ, len(m.providers)),
|
||||
d: d,
|
||||
config: cfg,
|
||||
subs: []string{setName},
|
||||
})
|
||||
added = true
|
||||
}
|
||||
for _, cfg := range cfgs {
|
||||
add(cfg)
|
||||
}
|
||||
if !added {
|
||||
// Add an empty target group to force the refresh of the corresponding
|
||||
// scrape pool and to notify the receiver that this target set has no
|
||||
// current targets.
|
||||
// It can happen because the combined set of SD configurations is empty
|
||||
// or because we fail to instantiate all the SD configurations.
|
||||
add(discovery.StaticConfig{{}})
|
||||
}
|
||||
return failed
|
||||
}
|
||||
|
||||
// StaticProvider holds a list of target groups that never change.
|
||||
type StaticProvider struct {
|
||||
TargetGroups []*targetgroup.Group
|
||||
}
|
||||
|
||||
// Run implements the Worker interface.
|
||||
func (sd *StaticProvider) Run(ctx context.Context, ch chan<- []*targetgroup.Group) {
|
||||
// We still have to consider that the consumer exits right away in which case
|
||||
// the context will be canceled.
|
||||
select {
|
||||
case ch <- sd.TargetGroups:
|
||||
case <-ctx.Done():
|
||||
}
|
||||
close(ch)
|
||||
}
|
File diff suppressed because it is too large
Load diff
|
@ -1,261 +0,0 @@
|
|||
// Copyright 2020 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package legacymanager
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"gopkg.in/yaml.v2"
|
||||
|
||||
"github.com/prometheus/prometheus/discovery"
|
||||
"github.com/prometheus/prometheus/discovery/targetgroup"
|
||||
)
|
||||
|
||||
const (
|
||||
configFieldPrefix = "AUTO_DISCOVERY_"
|
||||
staticConfigsKey = "static_configs"
|
||||
staticConfigsFieldName = configFieldPrefix + staticConfigsKey
|
||||
)
|
||||
|
||||
var (
|
||||
configNames = make(map[string]discovery.Config)
|
||||
configFieldNames = make(map[reflect.Type]string)
|
||||
configFields []reflect.StructField
|
||||
|
||||
configTypesMu sync.Mutex
|
||||
configTypes = make(map[reflect.Type]reflect.Type)
|
||||
|
||||
emptyStructType = reflect.TypeOf(struct{}{})
|
||||
configsType = reflect.TypeOf(discovery.Configs{})
|
||||
)
|
||||
|
||||
// RegisterConfig registers the given Config type for YAML marshaling and unmarshaling.
|
||||
func RegisterConfig(config discovery.Config) {
|
||||
registerConfig(config.Name()+"_sd_configs", reflect.TypeOf(config), config)
|
||||
}
|
||||
|
||||
func init() {
|
||||
// N.B.: static_configs is the only Config type implemented by default.
|
||||
// All other types are registered at init by their implementing packages.
|
||||
elemTyp := reflect.TypeOf(&targetgroup.Group{})
|
||||
registerConfig(staticConfigsKey, elemTyp, discovery.StaticConfig{})
|
||||
}
|
||||
|
||||
func registerConfig(yamlKey string, elemType reflect.Type, config discovery.Config) {
|
||||
name := config.Name()
|
||||
if _, ok := configNames[name]; ok {
|
||||
panic(fmt.Sprintf("discovery: Config named %q is already registered", name))
|
||||
}
|
||||
configNames[name] = config
|
||||
|
||||
fieldName := configFieldPrefix + yamlKey // Field must be exported.
|
||||
configFieldNames[elemType] = fieldName
|
||||
|
||||
// Insert fields in sorted order.
|
||||
i := sort.Search(len(configFields), func(k int) bool {
|
||||
return fieldName < configFields[k].Name
|
||||
})
|
||||
configFields = append(configFields, reflect.StructField{}) // Add empty field at end.
|
||||
copy(configFields[i+1:], configFields[i:]) // Shift fields to the right.
|
||||
configFields[i] = reflect.StructField{ // Write new field in place.
|
||||
Name: fieldName,
|
||||
Type: reflect.SliceOf(elemType),
|
||||
Tag: reflect.StructTag(`yaml:"` + yamlKey + `,omitempty"`),
|
||||
}
|
||||
}
|
||||
|
||||
func getConfigType(out reflect.Type) reflect.Type {
|
||||
configTypesMu.Lock()
|
||||
defer configTypesMu.Unlock()
|
||||
if typ, ok := configTypes[out]; ok {
|
||||
return typ
|
||||
}
|
||||
// Initial exported fields map one-to-one.
|
||||
var fields []reflect.StructField
|
||||
for i, n := 0, out.NumField(); i < n; i++ {
|
||||
switch field := out.Field(i); {
|
||||
case field.PkgPath == "" && field.Type != configsType:
|
||||
fields = append(fields, field)
|
||||
default:
|
||||
fields = append(fields, reflect.StructField{
|
||||
Name: "_" + field.Name, // Field must be unexported.
|
||||
PkgPath: out.PkgPath(),
|
||||
Type: emptyStructType,
|
||||
})
|
||||
}
|
||||
}
|
||||
// Append extra config fields on the end.
|
||||
fields = append(fields, configFields...)
|
||||
typ := reflect.StructOf(fields)
|
||||
configTypes[out] = typ
|
||||
return typ
|
||||
}
|
||||
|
||||
// UnmarshalYAMLWithInlineConfigs helps implement yaml.Unmarshal for structs
|
||||
// that have a Configs field that should be inlined.
|
||||
func UnmarshalYAMLWithInlineConfigs(out interface{}, unmarshal func(interface{}) error) error {
|
||||
outVal := reflect.ValueOf(out)
|
||||
if outVal.Kind() != reflect.Ptr {
|
||||
return fmt.Errorf("discovery: can only unmarshal into a struct pointer: %T", out)
|
||||
}
|
||||
outVal = outVal.Elem()
|
||||
if outVal.Kind() != reflect.Struct {
|
||||
return fmt.Errorf("discovery: can only unmarshal into a struct pointer: %T", out)
|
||||
}
|
||||
outTyp := outVal.Type()
|
||||
|
||||
cfgTyp := getConfigType(outTyp)
|
||||
cfgPtr := reflect.New(cfgTyp)
|
||||
cfgVal := cfgPtr.Elem()
|
||||
|
||||
// Copy shared fields (defaults) to dynamic value.
|
||||
var configs *discovery.Configs
|
||||
for i, n := 0, outVal.NumField(); i < n; i++ {
|
||||
if outTyp.Field(i).Type == configsType {
|
||||
configs = outVal.Field(i).Addr().Interface().(*discovery.Configs)
|
||||
continue
|
||||
}
|
||||
if cfgTyp.Field(i).PkgPath != "" {
|
||||
continue // Field is unexported: ignore.
|
||||
}
|
||||
cfgVal.Field(i).Set(outVal.Field(i))
|
||||
}
|
||||
if configs == nil {
|
||||
return fmt.Errorf("discovery: Configs field not found in type: %T", out)
|
||||
}
|
||||
|
||||
// Unmarshal into dynamic value.
|
||||
if err := unmarshal(cfgPtr.Interface()); err != nil {
|
||||
return replaceYAMLTypeError(err, cfgTyp, outTyp)
|
||||
}
|
||||
|
||||
// Copy shared fields from dynamic value.
|
||||
for i, n := 0, outVal.NumField(); i < n; i++ {
|
||||
if cfgTyp.Field(i).PkgPath != "" {
|
||||
continue // Field is unexported: ignore.
|
||||
}
|
||||
outVal.Field(i).Set(cfgVal.Field(i))
|
||||
}
|
||||
|
||||
var err error
|
||||
*configs, err = readConfigs(cfgVal, outVal.NumField())
|
||||
return err
|
||||
}
|
||||
|
||||
func readConfigs(structVal reflect.Value, startField int) (discovery.Configs, error) {
|
||||
var (
|
||||
configs discovery.Configs
|
||||
targets []*targetgroup.Group
|
||||
)
|
||||
for i, n := startField, structVal.NumField(); i < n; i++ {
|
||||
field := structVal.Field(i)
|
||||
if field.Kind() != reflect.Slice {
|
||||
panic("discovery: internal error: field is not a slice")
|
||||
}
|
||||
for k := 0; k < field.Len(); k++ {
|
||||
val := field.Index(k)
|
||||
if val.IsZero() || (val.Kind() == reflect.Ptr && val.Elem().IsZero()) {
|
||||
key := configFieldNames[field.Type().Elem()]
|
||||
key = strings.TrimPrefix(key, configFieldPrefix)
|
||||
return nil, fmt.Errorf("empty or null section in %s", key)
|
||||
}
|
||||
switch c := val.Interface().(type) {
|
||||
case *targetgroup.Group:
|
||||
// Add index to the static config target groups for unique identification
|
||||
// within scrape pool.
|
||||
c.Source = strconv.Itoa(len(targets))
|
||||
// Coalesce multiple static configs into a single static config.
|
||||
targets = append(targets, c)
|
||||
case discovery.Config:
|
||||
configs = append(configs, c)
|
||||
default:
|
||||
panic("discovery: internal error: slice element is not a Config")
|
||||
}
|
||||
}
|
||||
}
|
||||
if len(targets) > 0 {
|
||||
configs = append(configs, discovery.StaticConfig(targets))
|
||||
}
|
||||
return configs, nil
|
||||
}
|
||||
|
||||
// MarshalYAMLWithInlineConfigs helps implement yaml.Marshal for structs
|
||||
// that have a Configs field that should be inlined.
|
||||
func MarshalYAMLWithInlineConfigs(in interface{}) (interface{}, error) {
|
||||
inVal := reflect.ValueOf(in)
|
||||
for inVal.Kind() == reflect.Ptr {
|
||||
inVal = inVal.Elem()
|
||||
}
|
||||
inTyp := inVal.Type()
|
||||
|
||||
cfgTyp := getConfigType(inTyp)
|
||||
cfgPtr := reflect.New(cfgTyp)
|
||||
cfgVal := cfgPtr.Elem()
|
||||
|
||||
// Copy shared fields to dynamic value.
|
||||
var configs *discovery.Configs
|
||||
for i, n := 0, inTyp.NumField(); i < n; i++ {
|
||||
if inTyp.Field(i).Type == configsType {
|
||||
configs = inVal.Field(i).Addr().Interface().(*discovery.Configs)
|
||||
}
|
||||
if cfgTyp.Field(i).PkgPath != "" {
|
||||
continue // Field is unexported: ignore.
|
||||
}
|
||||
cfgVal.Field(i).Set(inVal.Field(i))
|
||||
}
|
||||
if configs == nil {
|
||||
return nil, fmt.Errorf("discovery: Configs field not found in type: %T", in)
|
||||
}
|
||||
|
||||
if err := writeConfigs(cfgVal, *configs); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return cfgPtr.Interface(), nil
|
||||
}
|
||||
|
||||
func writeConfigs(structVal reflect.Value, configs discovery.Configs) error {
|
||||
targets := structVal.FieldByName(staticConfigsFieldName).Addr().Interface().(*[]*targetgroup.Group)
|
||||
for _, c := range configs {
|
||||
if sc, ok := c.(discovery.StaticConfig); ok {
|
||||
*targets = append(*targets, sc...)
|
||||
continue
|
||||
}
|
||||
fieldName, ok := configFieldNames[reflect.TypeOf(c)]
|
||||
if !ok {
|
||||
return fmt.Errorf("discovery: cannot marshal unregistered Config type: %T", c)
|
||||
}
|
||||
field := structVal.FieldByName(fieldName)
|
||||
field.Set(reflect.Append(field, reflect.ValueOf(c)))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func replaceYAMLTypeError(err error, oldTyp, newTyp reflect.Type) error {
|
||||
var e *yaml.TypeError
|
||||
if errors.As(err, &e) {
|
||||
oldStr := oldTyp.String()
|
||||
newStr := newTyp.String()
|
||||
for i, s := range e.Errors {
|
||||
e.Errors[i] = strings.ReplaceAll(s, oldStr, newStr)
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
|
@ -98,7 +98,7 @@ func (m *SDMock) SetupHandlers() {
|
|||
if len(query) == 2 {
|
||||
h := sha1.New()
|
||||
h.Write([]byte(query[1]))
|
||||
// Avoing long filenames for Windows.
|
||||
// Avoiding long filenames for Windows.
|
||||
f += "__" + base64.URLEncoding.EncodeToString(h.Sum(nil))[:10]
|
||||
}
|
||||
}
|
||||
|
|
|
@ -224,7 +224,7 @@
|
|||
"Args": [
|
||||
"--config.file=/etc/prometheus/prometheus.yml",
|
||||
"--storage.tsdb.path=/prometheus",
|
||||
"--storage.tsdb.retention=24h"
|
||||
"--storage.tsdb.retention.time=24h"
|
||||
],
|
||||
"Privileges": {
|
||||
"CredentialSpec": null,
|
||||
|
|
2
discovery/moby/testdata/swarmprom/tasks.json
vendored
2
discovery/moby/testdata/swarmprom/tasks.json
vendored
|
@ -973,7 +973,7 @@
|
|||
"Args": [
|
||||
"--config.file=/etc/prometheus/prometheus.yml",
|
||||
"--storage.tsdb.path=/prometheus",
|
||||
"--storage.tsdb.retention=24h"
|
||||
"--storage.tsdb.retention.time=24h"
|
||||
],
|
||||
"Privileges": {
|
||||
"CredentialSpec": null,
|
||||
|
|
|
@ -41,10 +41,10 @@ const (
|
|||
uyuniMetaLabelPrefix = model.MetaLabelPrefix + "uyuni_"
|
||||
uyuniLabelMinionHostname = uyuniMetaLabelPrefix + "minion_hostname"
|
||||
uyuniLabelPrimaryFQDN = uyuniMetaLabelPrefix + "primary_fqdn"
|
||||
uyuniLablelSystemID = uyuniMetaLabelPrefix + "system_id"
|
||||
uyuniLablelGroups = uyuniMetaLabelPrefix + "groups"
|
||||
uyuniLablelEndpointName = uyuniMetaLabelPrefix + "endpoint_name"
|
||||
uyuniLablelExporter = uyuniMetaLabelPrefix + "exporter"
|
||||
uyuniLabelSystemID = uyuniMetaLabelPrefix + "system_id"
|
||||
uyuniLabelGroups = uyuniMetaLabelPrefix + "groups"
|
||||
uyuniLabelEndpointName = uyuniMetaLabelPrefix + "endpoint_name"
|
||||
uyuniLabelExporter = uyuniMetaLabelPrefix + "exporter"
|
||||
uyuniLabelProxyModule = uyuniMetaLabelPrefix + "proxy_module"
|
||||
uyuniLabelMetricsPath = uyuniMetaLabelPrefix + "metrics_path"
|
||||
uyuniLabelScheme = uyuniMetaLabelPrefix + "scheme"
|
||||
|
@ -270,10 +270,10 @@ func (d *Discovery) getEndpointLabels(
|
|||
model.AddressLabel: model.LabelValue(addr),
|
||||
uyuniLabelMinionHostname: model.LabelValue(networkInfo.Hostname),
|
||||
uyuniLabelPrimaryFQDN: model.LabelValue(networkInfo.PrimaryFQDN),
|
||||
uyuniLablelSystemID: model.LabelValue(strconv.Itoa(endpoint.SystemID)),
|
||||
uyuniLablelGroups: model.LabelValue(strings.Join(managedGroupNames, d.separator)),
|
||||
uyuniLablelEndpointName: model.LabelValue(endpoint.EndpointName),
|
||||
uyuniLablelExporter: model.LabelValue(endpoint.ExporterName),
|
||||
uyuniLabelSystemID: model.LabelValue(strconv.Itoa(endpoint.SystemID)),
|
||||
uyuniLabelGroups: model.LabelValue(strings.Join(managedGroupNames, d.separator)),
|
||||
uyuniLabelEndpointName: model.LabelValue(endpoint.EndpointName),
|
||||
uyuniLabelExporter: model.LabelValue(endpoint.ExporterName),
|
||||
uyuniLabelProxyModule: model.LabelValue(endpoint.Module),
|
||||
uyuniLabelMetricsPath: model.LabelValue(endpoint.Path),
|
||||
uyuniLabelScheme: model.LabelValue(scheme),
|
||||
|
|
|
@ -33,8 +33,7 @@ The Prometheus monitoring server
|
|||
| <code class="text-nowrap">--web.page-title</code> | Document title of Prometheus instance. | `Prometheus Time Series Collection and Processing Server` |
|
||||
| <code class="text-nowrap">--web.cors.origin</code> | Regex for CORS origin. It is fully anchored. Example: 'https?://(domain1\|domain2)\.com' | `.*` |
|
||||
| <code class="text-nowrap">--storage.tsdb.path</code> | Base path for metrics storage. Use with server mode only. | `data/` |
|
||||
| <code class="text-nowrap">--storage.tsdb.retention</code> | [DEPRECATED] How long to retain samples in storage. This flag has been deprecated, use "storage.tsdb.retention.time" instead. Use with server mode only. | |
|
||||
| <code class="text-nowrap">--storage.tsdb.retention.time</code> | How long to retain samples in storage. When this flag is set it overrides "storage.tsdb.retention". If neither this flag nor "storage.tsdb.retention" nor "storage.tsdb.retention.size" is set, the retention time defaults to 15d. Units Supported: y, w, d, h, m, s, ms. Use with server mode only. | |
|
||||
| <code class="text-nowrap">--storage.tsdb.retention.time</code> | How long to retain samples in storage. If neither this flag nor "storage.tsdb.retention.size" is set, the retention time defaults to 15d. Units Supported: y, w, d, h, m, s, ms. Use with server mode only. | |
|
||||
| <code class="text-nowrap">--storage.tsdb.retention.size</code> | Maximum number of bytes that can be stored for blocks. A unit is required, supported units: B, KB, MB, GB, TB, PB, EB. Ex: "512MB". Based on powers-of-2, so 1KB is 1024B. Use with server mode only. | |
|
||||
| <code class="text-nowrap">--storage.tsdb.no-lockfile</code> | Do not create lockfile in data directory. Use with server mode only. | `false` |
|
||||
| <code class="text-nowrap">--storage.tsdb.head-chunks-write-queue-size</code> | Size of the queue through which head chunks are written to the disk to be m-mapped, 0 disables the queue completely. Experimental. Use with server mode only. | `0` |
|
||||
|
@ -57,8 +56,8 @@ The Prometheus monitoring server
|
|||
| <code class="text-nowrap">--query.timeout</code> | Maximum time a query may take before being aborted. Use with server mode only. | `2m` |
|
||||
| <code class="text-nowrap">--query.max-concurrency</code> | Maximum number of queries executed concurrently. Use with server mode only. | `20` |
|
||||
| <code class="text-nowrap">--query.max-samples</code> | Maximum number of samples a single query can load into memory. Note that queries will fail if they try to load more samples than this into memory, so this also limits the number of samples a query can return. Use with server mode only. | `50000000` |
|
||||
| <code class="text-nowrap">--scrape.name-escaping-scheme</code> | Method for escaping legacy invalid names when sending to Prometheus that does not support UTF-8. Can be one of "values", "underscores", or "dots". | `values` |
|
||||
| <code class="text-nowrap">--enable-feature</code> <code class="text-nowrap">...<code class="text-nowrap"> | Comma separated feature names to enable. Valid options: agent, auto-gomaxprocs, auto-gomemlimit, auto-reload-config, concurrent-rule-eval, created-timestamp-zero-ingestion, delayed-compaction, exemplar-storage, expand-external-labels, extra-scrape-metrics, memory-snapshot-on-shutdown, native-histograms, new-service-discovery-manager, no-default-scrape-port, old-ui, otlp-write-receiver, promql-experimental-functions, promql-delayed-name-removal, promql-per-step-stats, remote-write-receiver (DEPRECATED), utf8-names. See https://prometheus.io/docs/prometheus/latest/feature_flags/ for more details. | |
|
||||
| <code class="text-nowrap">--enable-feature</code> <code class="text-nowrap">...<code class="text-nowrap"> | Comma separated feature names to enable. Valid options: auto-gomemlimit, exemplar-storage, expand-external-labels, memory-snapshot-on-shutdown, promql-per-step-stats, promql-experimental-functions, extra-scrape-metrics, auto-gomaxprocs, no-default-scrape-port, native-histograms, otlp-write-receiver, created-timestamp-zero-ingestion, concurrent-rule-eval. See https://prometheus.io/docs/prometheus/latest/feature_flags/ for more details. | |
|
||||
| <code class="text-nowrap">--agent</code> | Run Prometheus in 'Agent mode'. | |
|
||||
| <code class="text-nowrap">--log.level</code> | Only log messages with the given severity or above. One of: [debug, info, warn, error] | `info` |
|
||||
| <code class="text-nowrap">--log.format</code> | Output format of log messages. One of: [logfmt, json] | `logfmt` |
|
||||
|
||||
|
|
|
@ -126,9 +126,9 @@ global:
|
|||
[ keep_dropped_targets: <int> | default = 0 ]
|
||||
|
||||
# Specifies the validation scheme for metric and label names. Either blank or
|
||||
# "legacy" for letters, numbers, colons, and underscores; or "utf8" for full
|
||||
# UTF-8 support.
|
||||
[ metric_name_validation_scheme <string> | default "legacy" ]
|
||||
# "utf8" for for full UTF-8 support, or "legacy" for letters, numbers, colons,
|
||||
# and underscores.
|
||||
[ metric_name_validation_scheme <string> | default "utf8" ]
|
||||
|
||||
runtime:
|
||||
# Configure the Go garbage collector GOGC parameter
|
||||
|
@ -485,10 +485,10 @@ metric_relabel_configs:
|
|||
# that will be kept in memory. 0 means no limit.
|
||||
[ keep_dropped_targets: <int> | default = 0 ]
|
||||
|
||||
# Specifies the validation scheme for metric and label names. Either blank or
|
||||
# "legacy" for letters, numbers, colons, and underscores; or "utf8" for full
|
||||
# UTF-8 support.
|
||||
[ metric_name_validation_scheme <string> | default "legacy" ]
|
||||
# Specifies the validation scheme for metric and label names. Either blank or
|
||||
# "utf8" for full UTF-8 support, or "legacy" for letters, numbers, colons, and
|
||||
# underscores.
|
||||
[ metric_name_validation_scheme <string> | default "utf8" ]
|
||||
|
||||
# Limit on total number of positive and negative buckets allowed in a single
|
||||
# native histogram. The resolution of a histogram with more buckets will be
|
||||
|
@ -1407,7 +1407,7 @@ authorization:
|
|||
# `credentials_file`.
|
||||
[ credentials: <secret> ]
|
||||
# Sets the credentials to the credentials read from the configured file.
|
||||
# It is mutuall exclusive with `credentials`.
|
||||
# It is mutually exclusive with `credentials`.
|
||||
[ credentials_file: <filename> ]
|
||||
|
||||
# Optional OAuth 2.0 configuration, currently not supported by AWS.
|
||||
|
@ -2298,6 +2298,8 @@ The `endpointslice` role discovers targets from existing endpointslices. For eac
|
|||
address referenced in the endpointslice object one target is discovered. If the endpoint is backed by a pod, all
|
||||
additional container ports of the pod, not bound to an endpoint port, are discovered as targets as well.
|
||||
|
||||
The role requires the `discovery.k8s.io/v1` API version (available since Kubernetes v1.21).
|
||||
|
||||
Available meta labels:
|
||||
|
||||
* `__meta_kubernetes_namespace`: The namespace of the endpoints object.
|
||||
|
@ -2318,7 +2320,7 @@ Available meta labels:
|
|||
* `__meta_kubernetes_endpointslice_endpoint_topology_present_kubernetes_io_hostname`: Flag that shows if the referenced object has a kubernetes.io/hostname annotation.
|
||||
* `__meta_kubernetes_endpointslice_endpoint_hostname`: Hostname of the referenced endpoint.
|
||||
* `__meta_kubernetes_endpointslice_endpoint_node_name`: Name of the Node hosting the referenced endpoint.
|
||||
* `__meta_kubernetes_endpointslice_endpoint_zone`: Zone the referenced endpoint exists in (only available when using the `discovery.k8s.io/v1` API group).
|
||||
* `__meta_kubernetes_endpointslice_endpoint_zone`: Zone the referenced endpoint exists in.
|
||||
* `__meta_kubernetes_endpointslice_port`: Port of the referenced endpoint.
|
||||
* `__meta_kubernetes_endpointslice_port_name`: Named port of the referenced endpoint.
|
||||
* `__meta_kubernetes_endpointslice_port_protocol`: Protocol of the referenced endpoint.
|
||||
|
@ -2331,6 +2333,8 @@ The `ingress` role discovers a target for each path of each ingress.
|
|||
This is generally useful for blackbox monitoring of an ingress.
|
||||
The address will be set to the host specified in the ingress spec.
|
||||
|
||||
The role requires the `networking.k8s.io/v1` API version (available since Kubernetes v1.19).
|
||||
|
||||
Available meta labels:
|
||||
|
||||
* `__meta_kubernetes_namespace`: The namespace of the ingress object.
|
||||
|
@ -2623,7 +2627,7 @@ authorization:
|
|||
# `credentials_file`.
|
||||
[ credentials: <secret> ]
|
||||
# Sets the credentials to the credentials read from the configured file.
|
||||
# It is mutuall exclusive with `credentials`.
|
||||
# It is mutually exclusive with `credentials`.
|
||||
[ credentials_file: <filename> ]
|
||||
|
||||
# Optional OAuth 2.0 configuration, currently not supported by AWS.
|
||||
|
@ -3984,7 +3988,7 @@ azuread:
|
|||
# Cannot be used at the same time as basic_auth, authorization, oauth2, sigv4 or azuread.
|
||||
# To use the default credentials from the Google Cloud SDK, use `google_iam: {}`.
|
||||
google_iam:
|
||||
# Service account key with monitoring write permessions.
|
||||
# Service account key with monitoring write permissions.
|
||||
credentials_file: <file_name>
|
||||
|
||||
# Configures the remote write request's TLS settings.
|
||||
|
|
|
@ -20,14 +20,6 @@ values according to the values of the current environment variables. References
|
|||
to undefined variables are replaced by the empty string.
|
||||
The `$` character can be escaped by using `$$`.
|
||||
|
||||
## Remote Write Receiver
|
||||
|
||||
`--enable-feature=remote-write-receiver`
|
||||
|
||||
The remote write receiver allows Prometheus to accept remote write requests from other Prometheus servers. More details can be found [here](storage.md#overview).
|
||||
|
||||
Activating the remote write receiver via a feature flag is deprecated. Use `--web.enable-remote-write-receiver` instead. This feature flag will be ignored in future versions of Prometheus.
|
||||
|
||||
## Exemplars storage
|
||||
|
||||
`--enable-feature=exemplar-storage`
|
||||
|
@ -55,20 +47,6 @@ When enabled, for each instance scrape, Prometheus stores a sample in the follow
|
|||
to find out how close they are to reaching the limit with `scrape_samples_post_metric_relabeling / scrape_sample_limit`. Note that `scrape_sample_limit` can be zero if there is no limit configured, which means that the query above can return `+Inf` for targets with no limit (as we divide by zero). If you want to query only for targets that do have a sample limit use this query: `scrape_samples_post_metric_relabeling / (scrape_sample_limit > 0)`.
|
||||
- `scrape_body_size_bytes`. The uncompressed size of the most recent scrape response, if successful. Scrapes failing because `body_size_limit` is exceeded report `-1`, other scrape failures report `0`.
|
||||
|
||||
## New service discovery manager
|
||||
|
||||
`--enable-feature=new-service-discovery-manager`
|
||||
|
||||
When enabled, Prometheus uses a new service discovery manager that does not
|
||||
restart unchanged discoveries upon reloading. This makes reloads faster and reduces
|
||||
pressure on service discoveries' sources.
|
||||
|
||||
Users are encouraged to test the new service discovery manager and report any
|
||||
issues upstream.
|
||||
|
||||
In future releases, this new service discovery manager will become the default and
|
||||
this feature flag will be ignored.
|
||||
|
||||
## Prometheus agent
|
||||
|
||||
`--enable-feature=agent`
|
||||
|
@ -264,14 +242,6 @@ When enabled, Prometheus will change the way in which the `__name__` label is re
|
|||
|
||||
This allows optionally preserving the `__name__` label via the `label_replace` and `label_join` functions, and helps prevent the "vector cannot contain metrics with the same labelset" error, which can happen when applying a regex-matcher to the `__name__` label.
|
||||
|
||||
## UTF-8 Name Support
|
||||
|
||||
`--enable-feature=utf8-names`
|
||||
|
||||
When enabled, changes the metric and label name validation scheme inside Prometheus to allow the full UTF-8 character set.
|
||||
By itself, this flag does not enable the request of UTF-8 names via content negotiation.
|
||||
Users will also have to set `metric_name_validation_scheme` in scrape configs to enable the feature either on the global config or on a per-scrape config basis.
|
||||
|
||||
## Auto Reload Config
|
||||
|
||||
`--enable-feature=auto-reload-config`
|
||||
|
|
|
@ -203,12 +203,12 @@ Range vector literals work like instant vector literals, except that they
|
|||
select a range of samples back from the current instant. Syntactically, a [time
|
||||
duration](#time-durations) is appended in square brackets (`[]`) at the end of
|
||||
a vector selector to specify how far back in time values should be fetched for
|
||||
each resulting range vector element. The range is a closed interval,
|
||||
i.e. samples with timestamps coinciding with either boundary of the range are
|
||||
still included in the selection.
|
||||
each resulting range vector element. The range is a left-open and right-closed interval,
|
||||
i.e. samples with timestamps coinciding with the left boundary of the range are excluded from the selection,
|
||||
while samples coinciding with the right boundary of the range are included in the selection.
|
||||
|
||||
In this example, we select all the values we have recorded within the last 5
|
||||
minutes for all time series that have the metric name `http_requests_total` and
|
||||
In this example, we select all the values recorded less than 5m ago for all time series
|
||||
that have the metric name `http_requests_total` and
|
||||
a `job` label set to `prometheus`:
|
||||
|
||||
http_requests_total{job="prometheus"}[5m]
|
||||
|
@ -358,7 +358,7 @@ independently of the actual present time series data. This is mainly to support
|
|||
cases like aggregation (`sum`, `avg`, and so on), where multiple aggregated
|
||||
time series do not precisely align in time. Because of their independence,
|
||||
Prometheus needs to assign a value at those timestamps for each relevant time
|
||||
series. It does so by taking the newest sample before this timestamp within the lookback period.
|
||||
series. It does so by taking the newest sample that is less than the lookback period ago.
|
||||
The lookback period is 5 minutes by default.
|
||||
|
||||
If a target scrape or rule evaluation no longer returns a sample for a time
|
||||
|
|
|
@ -87,10 +87,9 @@ or 31 days, whichever is smaller.
|
|||
Prometheus has several flags that configure local storage. The most important are:
|
||||
|
||||
- `--storage.tsdb.path`: Where Prometheus writes its database. Defaults to `data/`.
|
||||
- `--storage.tsdb.retention.time`: How long to retain samples in storage. When this flag is
|
||||
set, it overrides `storage.tsdb.retention`. If neither this flag nor `storage.tsdb.retention`
|
||||
nor `storage.tsdb.retention.size` is set, the retention time defaults to `15d`.
|
||||
Supported units: y, w, d, h, m, s, ms.
|
||||
- `--storage.tsdb.retention.time`: How long to retain samples in storage. If neither
|
||||
this flag nor `storage.tsdb.retention.size` is set, the retention time defaults to
|
||||
`15d`. Supported units: y, w, d, h, m, s, ms.
|
||||
- `--storage.tsdb.retention.size`: The maximum number of bytes of storage blocks to retain.
|
||||
The oldest data will be removed first. Defaults to `0` or disabled. Units supported:
|
||||
B, KB, MB, GB, TB, PB, EB. Ex: "512MB". Based on powers-of-2, so 1KB is 1024B. Only
|
||||
|
@ -98,7 +97,6 @@ Prometheus has several flags that configure local storage. The most important ar
|
|||
chunks are counted in the total size. So the minimum requirement for the disk is the
|
||||
peak space taken by the `wal` (the WAL and Checkpoint) and `chunks_head`
|
||||
(m-mapped Head chunks) directory combined (peaks every 2 hours).
|
||||
- `--storage.tsdb.retention`: Deprecated in favor of `storage.tsdb.retention.time`.
|
||||
- `--storage.tsdb.wal-compression`: Enables compression of the write-ahead log (WAL).
|
||||
Depending on your data, you can expect the WAL size to be halved with little extra
|
||||
cpu load. This flag was introduced in 2.11.0 and enabled by default in 2.20.0.
|
||||
|
@ -146,7 +144,7 @@ a buffer, ensuring that older entries will be removed before the allocated stora
|
|||
for Prometheus becomes full.
|
||||
|
||||
At present, we recommend setting the retention size to, at most, 80-85% of your
|
||||
allocated Prometheus disk space. This increases the likelihood that older entires
|
||||
allocated Prometheus disk space. This increases the likelihood that older entries
|
||||
will be removed prior to hitting any disk limitations.
|
||||
|
||||
## Remote storage integrations
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
# An example scrape configuration for running Prometheus with Ovhcloud.
|
||||
# An example scrape configuration for running Prometheus with OVHcloud.
|
||||
scrape_configs:
|
||||
- job_name: 'ovhcloud'
|
||||
ovhcloud_sd_configs:
|
||||
|
|
6
go.mod
6
go.mod
|
@ -64,7 +64,7 @@ require (
|
|||
github.com/vultr/govultr/v2 v2.17.2
|
||||
go.opentelemetry.io/collector/pdata v1.14.1
|
||||
go.opentelemetry.io/collector/semconv v0.108.1
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0
|
||||
go.opentelemetry.io/otel v1.29.0
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.29.0
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.29.0
|
||||
|
@ -81,7 +81,7 @@ require (
|
|||
golang.org/x/text v0.18.0
|
||||
golang.org/x/time v0.6.0
|
||||
golang.org/x/tools v0.24.0
|
||||
google.golang.org/api v0.196.0
|
||||
google.golang.org/api v0.195.0
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20240827150818-7e3bb234dfed
|
||||
google.golang.org/grpc v1.66.0
|
||||
google.golang.org/protobuf v1.34.2
|
||||
|
@ -140,7 +140,7 @@ require (
|
|||
github.com/google/go-querystring v1.1.0 // indirect
|
||||
github.com/google/gofuzz v1.2.0 // indirect
|
||||
github.com/google/s2a-go v0.1.8 // indirect
|
||||
github.com/googleapis/enterprise-certificate-proxy v0.3.3 // indirect
|
||||
github.com/googleapis/enterprise-certificate-proxy v0.3.4 // indirect
|
||||
github.com/googleapis/gax-go/v2 v2.13.0 // indirect
|
||||
github.com/gorilla/websocket v1.5.0 // indirect
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0 // indirect
|
||||
|
|
12
go.sum
12
go.sum
|
@ -328,8 +328,8 @@ github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+
|
|||
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
|
||||
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/googleapis/enterprise-certificate-proxy v0.3.3 h1:QRje2j5GZimBzlbhGA2V2QlGNgL8G6e+wGo/+/2bWI0=
|
||||
github.com/googleapis/enterprise-certificate-proxy v0.3.3/go.mod h1:YKe7cfqYXjKGpGvmSg28/fFvhNzinZQm8DGnaburhGA=
|
||||
github.com/googleapis/enterprise-certificate-proxy v0.3.4 h1:XYIDZApgAnrN1c855gTgghdIA6Stxb52D5RnLI1SLyw=
|
||||
github.com/googleapis/enterprise-certificate-proxy v0.3.4/go.mod h1:YKe7cfqYXjKGpGvmSg28/fFvhNzinZQm8DGnaburhGA=
|
||||
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
|
||||
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
|
||||
github.com/googleapis/gax-go/v2 v2.13.0 h1:yitjD5f7jQHhyDsnhKEBU52NdvvdSeGzlAnDPT0hH1s=
|
||||
|
@ -736,8 +736,8 @@ go.opentelemetry.io/collector/pdata v1.14.1 h1:wXZjtQA7Vy5HFqco+yA95ENyMQU5heBB1
|
|||
go.opentelemetry.io/collector/pdata v1.14.1/go.mod h1:z1dTjwwtcoXxZx2/nkHysjxMeaxe9pEmYTEr4SMNIx8=
|
||||
go.opentelemetry.io/collector/semconv v0.108.1 h1:Txk9tauUnamZaxS5vlf1O0uZ4VD6nioRBR0nX8L/fU4=
|
||||
go.opentelemetry.io/collector/semconv v0.108.1/go.mod h1:zCJ5njhWpejR+A40kiEoeFm1xq1uzyZwMnRNX6/D82A=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0 h1:TT4fX+nBOA/+LUkobKGW1ydGcn+G3vRw9+g5HwCphpk=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0/go.mod h1:L7UH0GbB0p47T4Rri3uHjbpCFYrVrwc1I25QhNPiGK8=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 h1:4K4tsIXefpVJtvA/8srF4V4y0akAoPHkIslgAkjixJA=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0/go.mod h1:jjdQuTGVsXV4vSs+CJ2qYDeDPf9yIJV23qlIzBm73Vg=
|
||||
go.opentelemetry.io/otel v1.29.0 h1:PdomN/Al4q/lN6iBJEN3AwPvUiHPMlt93c8bqTG5Llw=
|
||||
go.opentelemetry.io/otel v1.29.0/go.mod h1:N/WtXPs1CNCUEx+Agz5uouwCba+i+bJGFicT8SR4NP8=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.29.0 h1:dIIDULZJpgdiHz5tXrTgKIMLkus6jEFa7x5SOKcyR7E=
|
||||
|
@ -1056,8 +1056,8 @@ google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/
|
|||
google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
|
||||
google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
|
||||
google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
|
||||
google.golang.org/api v0.196.0 h1:k/RafYqebaIJBO3+SMnfEGtFVlvp5vSgqTUF54UN/zg=
|
||||
google.golang.org/api v0.196.0/go.mod h1:g9IL21uGkYgvQ5BZg6BAtoGJQIm8r6EgaAbpNey5wBE=
|
||||
google.golang.org/api v0.195.0 h1:Ude4N8FvTKnnQJHU48RFI40jOBgIrL8Zqr3/QeST6yU=
|
||||
google.golang.org/api v0.195.0/go.mod h1:DOGRWuv3P8TU8Lnz7uQc4hyNqrBpMtD9ppW3wBJurgc=
|
||||
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||
google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
|
|
|
@ -342,7 +342,7 @@ func (h *FloatHistogram) Add(other *FloatHistogram) (*FloatHistogram, error) {
|
|||
default:
|
||||
// All other cases shouldn't actually happen.
|
||||
// They are a direct collision of CounterReset and NotCounterReset.
|
||||
// Conservatively set the CounterResetHint to "unknown" and isse a warning.
|
||||
// Conservatively set the CounterResetHint to "unknown" and issue a warning.
|
||||
h.CounterResetHint = UnknownCounterReset
|
||||
// TODO(trevorwhitney): Actually issue the warning as soon as the plumbing for it is in place
|
||||
}
|
||||
|
@ -658,7 +658,7 @@ func detectReset(currIt, prevIt *floatBucketIterator) bool {
|
|||
if !currIt.Next() {
|
||||
// Reached end of currIt early, therefore
|
||||
// previous histogram has a bucket that the
|
||||
// current one does not have. Unlass all
|
||||
// current one does not have. Unless all
|
||||
// remaining buckets in the previous histogram
|
||||
// are unpopulated, this is a reset.
|
||||
for {
|
||||
|
@ -891,7 +891,7 @@ func (h *FloatHistogram) trimBucketsInZeroBucket() {
|
|||
// reconcileZeroBuckets finds a zero bucket large enough to include the zero
|
||||
// buckets of both histograms (the receiving histogram and the other histogram)
|
||||
// with a zero threshold that is not within a populated bucket in either
|
||||
// histogram. This method modifies the receiving histogram accourdingly, but
|
||||
// histogram. This method modifies the receiving histogram accordingly, but
|
||||
// leaves the other histogram as is. Instead, it returns the zero count the
|
||||
// other histogram would have if it were modified.
|
||||
func (h *FloatHistogram) reconcileZeroBuckets(other *FloatHistogram) float64 {
|
||||
|
|
|
@ -131,6 +131,54 @@ func TestFloatHistogramMul(t *testing.T) {
|
|||
NegativeBuckets: []float64{9, 3, 15, 18},
|
||||
},
|
||||
},
|
||||
{
|
||||
"negation",
|
||||
&FloatHistogram{
|
||||
ZeroThreshold: 0.01,
|
||||
ZeroCount: 11,
|
||||
Count: 30,
|
||||
Sum: 23,
|
||||
PositiveSpans: []Span{{-2, 2}, {1, 3}},
|
||||
PositiveBuckets: []float64{1, 0, 3, 4, 7},
|
||||
NegativeSpans: []Span{{3, 2}, {3, 2}},
|
||||
NegativeBuckets: []float64{3, 1, 5, 6},
|
||||
},
|
||||
-1,
|
||||
&FloatHistogram{
|
||||
ZeroThreshold: 0.01,
|
||||
ZeroCount: -11,
|
||||
Count: -30,
|
||||
Sum: -23,
|
||||
PositiveSpans: []Span{{-2, 2}, {1, 3}},
|
||||
PositiveBuckets: []float64{-1, 0, -3, -4, -7},
|
||||
NegativeSpans: []Span{{3, 2}, {3, 2}},
|
||||
NegativeBuckets: []float64{-3, -1, -5, -6},
|
||||
},
|
||||
},
|
||||
{
|
||||
"negative multiplier",
|
||||
&FloatHistogram{
|
||||
ZeroThreshold: 0.01,
|
||||
ZeroCount: 11,
|
||||
Count: 30,
|
||||
Sum: 23,
|
||||
PositiveSpans: []Span{{-2, 2}, {1, 3}},
|
||||
PositiveBuckets: []float64{1, 0, 3, 4, 7},
|
||||
NegativeSpans: []Span{{3, 2}, {3, 2}},
|
||||
NegativeBuckets: []float64{3, 1, 5, 6},
|
||||
},
|
||||
-2,
|
||||
&FloatHistogram{
|
||||
ZeroThreshold: 0.01,
|
||||
ZeroCount: -22,
|
||||
Count: -60,
|
||||
Sum: -46,
|
||||
PositiveSpans: []Span{{-2, 2}, {1, 3}},
|
||||
PositiveBuckets: []float64{-2, 0, -6, -8, -14},
|
||||
NegativeSpans: []Span{{3, 2}, {3, 2}},
|
||||
NegativeBuckets: []float64{-6, -2, -10, -12},
|
||||
},
|
||||
},
|
||||
{
|
||||
"no-op with custom buckets",
|
||||
&FloatHistogram{
|
||||
|
@ -409,6 +457,54 @@ func TestFloatHistogramDiv(t *testing.T) {
|
|||
NegativeBuckets: []float64{1.5, 0.5, 2.5, 3},
|
||||
},
|
||||
},
|
||||
{
|
||||
"negation",
|
||||
&FloatHistogram{
|
||||
ZeroThreshold: 0.01,
|
||||
ZeroCount: 5.5,
|
||||
Count: 3493.3,
|
||||
Sum: 2349209.324,
|
||||
PositiveSpans: []Span{{-2, 1}, {2, 3}},
|
||||
PositiveBuckets: []float64{1, 3.3, 4.2, 0.1},
|
||||
NegativeSpans: []Span{{3, 2}, {3, 2}},
|
||||
NegativeBuckets: []float64{3.1, 3, 1.234e5, 1000},
|
||||
},
|
||||
-1,
|
||||
&FloatHistogram{
|
||||
ZeroThreshold: 0.01,
|
||||
ZeroCount: -5.5,
|
||||
Count: -3493.3,
|
||||
Sum: -2349209.324,
|
||||
PositiveSpans: []Span{{-2, 1}, {2, 3}},
|
||||
PositiveBuckets: []float64{-1, -3.3, -4.2, -0.1},
|
||||
NegativeSpans: []Span{{3, 2}, {3, 2}},
|
||||
NegativeBuckets: []float64{-3.1, -3, -1.234e5, -1000},
|
||||
},
|
||||
},
|
||||
{
|
||||
"negative half",
|
||||
&FloatHistogram{
|
||||
ZeroThreshold: 0.01,
|
||||
ZeroCount: 11,
|
||||
Count: 30,
|
||||
Sum: 23,
|
||||
PositiveSpans: []Span{{-2, 2}, {1, 3}},
|
||||
PositiveBuckets: []float64{1, 0, 3, 4, 7},
|
||||
NegativeSpans: []Span{{3, 2}, {3, 2}},
|
||||
NegativeBuckets: []float64{3, 1, 5, 6},
|
||||
},
|
||||
-2,
|
||||
&FloatHistogram{
|
||||
ZeroThreshold: 0.01,
|
||||
ZeroCount: -5.5,
|
||||
Count: -15,
|
||||
Sum: -11.5,
|
||||
PositiveSpans: []Span{{-2, 2}, {1, 3}},
|
||||
PositiveBuckets: []float64{-0.5, 0, -1.5, -2, -3.5},
|
||||
NegativeSpans: []Span{{3, 2}, {3, 2}},
|
||||
NegativeBuckets: []float64{-1.5, -0.5, -2.5, -3},
|
||||
},
|
||||
},
|
||||
{
|
||||
"no-op with custom buckets",
|
||||
&FloatHistogram{
|
||||
|
|
|
@ -140,7 +140,7 @@ testmetric{label="\"bar\""} 1`
|
|||
v: 8.3835e-05,
|
||||
lset: labels.FromStrings("__name__", "go_gc_duration_seconds", "quantile", "1.0", "a", "b"),
|
||||
}, {
|
||||
// NOTE: Unlike OpenMetrics, Promparse allows spaces between label terms. This appears to be unintended and should probably be fixed.
|
||||
// NOTE: Unlike OpenMetrics, PromParser allows spaces between label terms. This appears to be unintended and should probably be fixed.
|
||||
m: `go_gc_duration_seconds { quantile = "2.0" a = "b" }`,
|
||||
v: 8.3835e-05,
|
||||
lset: labels.FromStrings("__name__", "go_gc_duration_seconds", "quantile", "2.0", "a", "b"),
|
||||
|
|
|
@ -604,7 +604,7 @@ func readDelimited(b []byte, mf *dto.MetricFamily) (n int, err error) {
|
|||
return totalLength, mf.Unmarshal(b[varIntLength:totalLength])
|
||||
}
|
||||
|
||||
// formatOpenMetricsFloat works like the usual Go string formatting of a fleat
|
||||
// formatOpenMetricsFloat works like the usual Go string formatting of a float
|
||||
// but appends ".0" if the resulting number would otherwise contain neither a
|
||||
// "." nor an "e".
|
||||
func formatOpenMetricsFloat(f float64) string {
|
||||
|
|
|
@ -743,7 +743,7 @@ func TestHangingNotifier(t *testing.T) {
|
|||
|
||||
// Initialize the discovery manager
|
||||
// This is relevant as the updates aren't sent continually in real life, but only each updatert.
|
||||
// The old implementation of TestHangingNotifier didn't take that into acount.
|
||||
// The old implementation of TestHangingNotifier didn't take that into account.
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
reg := prometheus.NewRegistry()
|
||||
|
|
|
@ -909,11 +909,17 @@ func getTimeRangesForSelector(s *parser.EvalStmt, n *parser.VectorSelector, path
|
|||
}
|
||||
|
||||
if evalRange == 0 {
|
||||
start -= durationMilliseconds(s.LookbackDelta)
|
||||
// Reduce the start by one fewer ms than the lookback delta
|
||||
// because wo want to exclude samples that are precisely the
|
||||
// lookback delta before the eval time.
|
||||
start -= durationMilliseconds(s.LookbackDelta) - 1
|
||||
} else {
|
||||
// For all matrix queries we want to ensure that we have (end-start) + range selected
|
||||
// this way we have `range` data before the start time
|
||||
start -= durationMilliseconds(evalRange)
|
||||
// For all matrix queries we want to ensure that we have
|
||||
// (end-start) + range selected this way we have `range` data
|
||||
// before the start time. We subtract one from the range to
|
||||
// exclude samples positioned directly at the lower boundary of
|
||||
// the range.
|
||||
start -= durationMilliseconds(evalRange) - 1
|
||||
}
|
||||
|
||||
offsetMilliseconds := durationMilliseconds(n.OriginalOffset)
|
||||
|
@ -1815,6 +1821,9 @@ func (ev *evaluator) eval(ctx context.Context, expr parser.Expr) (parser.Value,
|
|||
for j := range mat[i].Floats {
|
||||
mat[i].Floats[j].F = -mat[i].Floats[j].F
|
||||
}
|
||||
for j := range mat[i].Histograms {
|
||||
mat[i].Histograms[j].H = mat[i].Histograms[j].H.Copy().Mul(-1)
|
||||
}
|
||||
}
|
||||
if !ev.enableDelayedNameRemoval && mat.ContainsSameLabelset() {
|
||||
ev.errorf("vector cannot contain metrics with the same labelset")
|
||||
|
@ -2084,7 +2093,7 @@ func (ev *evaluator) rangeEvalTimestampFunctionOverVectorSelector(ctx context.Co
|
|||
seriesIterators := make([]*storage.MemoizedSeriesIterator, len(vs.Series))
|
||||
for i, s := range vs.Series {
|
||||
it := s.Iterator(nil)
|
||||
seriesIterators[i] = storage.NewMemoizedIterator(it, durationMilliseconds(ev.lookbackDelta))
|
||||
seriesIterators[i] = storage.NewMemoizedIterator(it, durationMilliseconds(ev.lookbackDelta)-1)
|
||||
}
|
||||
|
||||
return ev.rangeEval(ctx, nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
|
||||
|
@ -2146,7 +2155,7 @@ func (ev *evaluator) vectorSelectorSingle(it *storage.MemoizedSeriesIterator, no
|
|||
if valueType == chunkenc.ValNone || t > refTime {
|
||||
var ok bool
|
||||
t, v, h, ok = it.PeekPrev()
|
||||
if !ok || t < refTime-durationMilliseconds(ev.lookbackDelta) {
|
||||
if !ok || t <= refTime-durationMilliseconds(ev.lookbackDelta) {
|
||||
return 0, 0, nil, false
|
||||
}
|
||||
}
|
||||
|
@ -2280,20 +2289,20 @@ func (ev *evaluator) matrixIterSlice(
|
|||
mintFloats, mintHistograms := mint, mint
|
||||
|
||||
// First floats...
|
||||
if len(floats) > 0 && floats[len(floats)-1].T >= mint {
|
||||
if len(floats) > 0 && floats[len(floats)-1].T > mint {
|
||||
// There is an overlap between previous and current ranges, retain common
|
||||
// points. In most such cases:
|
||||
// (a) the overlap is significantly larger than the eval step; and/or
|
||||
// (b) the number of samples is relatively small.
|
||||
// so a linear search will be as fast as a binary search.
|
||||
var drop int
|
||||
for drop = 0; floats[drop].T < mint; drop++ {
|
||||
for drop = 0; floats[drop].T <= mint; drop++ {
|
||||
}
|
||||
ev.currentSamples -= drop
|
||||
copy(floats, floats[drop:])
|
||||
floats = floats[:len(floats)-drop]
|
||||
// Only append points with timestamps after the last timestamp we have.
|
||||
mintFloats = floats[len(floats)-1].T + 1
|
||||
mintFloats = floats[len(floats)-1].T
|
||||
} else {
|
||||
ev.currentSamples -= len(floats)
|
||||
if floats != nil {
|
||||
|
@ -2302,14 +2311,14 @@ func (ev *evaluator) matrixIterSlice(
|
|||
}
|
||||
|
||||
// ...then the same for histograms. TODO(beorn7): Use generics?
|
||||
if len(histograms) > 0 && histograms[len(histograms)-1].T >= mint {
|
||||
if len(histograms) > 0 && histograms[len(histograms)-1].T > mint {
|
||||
// There is an overlap between previous and current ranges, retain common
|
||||
// points. In most such cases:
|
||||
// (a) the overlap is significantly larger than the eval step; and/or
|
||||
// (b) the number of samples is relatively small.
|
||||
// so a linear search will be as fast as a binary search.
|
||||
var drop int
|
||||
for drop = 0; histograms[drop].T < mint; drop++ {
|
||||
for drop = 0; histograms[drop].T <= mint; drop++ {
|
||||
}
|
||||
// Rotate the buffer around the drop index so that points before mint can be
|
||||
// reused to store new histograms.
|
||||
|
@ -2320,7 +2329,7 @@ func (ev *evaluator) matrixIterSlice(
|
|||
histograms = histograms[:len(histograms)-drop]
|
||||
ev.currentSamples -= totalHPointSize(histograms)
|
||||
// Only append points with timestamps after the last timestamp we have.
|
||||
mintHistograms = histograms[len(histograms)-1].T + 1
|
||||
mintHistograms = histograms[len(histograms)-1].T
|
||||
} else {
|
||||
ev.currentSamples -= totalHPointSize(histograms)
|
||||
if histograms != nil {
|
||||
|
@ -2344,7 +2353,7 @@ loop:
|
|||
case chunkenc.ValFloatHistogram, chunkenc.ValHistogram:
|
||||
t := buf.AtT()
|
||||
// Values in the buffer are guaranteed to be smaller than maxt.
|
||||
if t >= mintHistograms {
|
||||
if t > mintHistograms {
|
||||
if histograms == nil {
|
||||
histograms = getMatrixSelectorHPoints()
|
||||
}
|
||||
|
@ -2370,7 +2379,7 @@ loop:
|
|||
continue loop
|
||||
}
|
||||
// Values in the buffer are guaranteed to be smaller than maxt.
|
||||
if t >= mintFloats {
|
||||
if t > mintFloats {
|
||||
ev.currentSamples++
|
||||
if ev.currentSamples > ev.maxSamples {
|
||||
ev.error(ErrTooManySamples(env))
|
||||
|
|
|
@ -321,271 +321,271 @@ func TestSelectHintsSetCorrectly(t *testing.T) {
|
|||
{
|
||||
query: "foo", start: 10000,
|
||||
expected: []*storage.SelectHints{
|
||||
{Start: 5000, End: 10000},
|
||||
{Start: 5001, End: 10000},
|
||||
},
|
||||
}, {
|
||||
query: "foo @ 15", start: 10000,
|
||||
expected: []*storage.SelectHints{
|
||||
{Start: 10000, End: 15000},
|
||||
{Start: 10001, End: 15000},
|
||||
},
|
||||
}, {
|
||||
query: "foo @ 1", start: 10000,
|
||||
expected: []*storage.SelectHints{
|
||||
{Start: -4000, End: 1000},
|
||||
{Start: -3999, End: 1000},
|
||||
},
|
||||
}, {
|
||||
query: "foo[2m]", start: 200000,
|
||||
expected: []*storage.SelectHints{
|
||||
{Start: 80000, End: 200000, Range: 120000},
|
||||
{Start: 80001, End: 200000, Range: 120000},
|
||||
},
|
||||
}, {
|
||||
query: "foo[2m] @ 180", start: 200000,
|
||||
expected: []*storage.SelectHints{
|
||||
{Start: 60000, End: 180000, Range: 120000},
|
||||
{Start: 60001, End: 180000, Range: 120000},
|
||||
},
|
||||
}, {
|
||||
query: "foo[2m] @ 300", start: 200000,
|
||||
expected: []*storage.SelectHints{
|
||||
{Start: 180000, End: 300000, Range: 120000},
|
||||
{Start: 180001, End: 300000, Range: 120000},
|
||||
},
|
||||
}, {
|
||||
query: "foo[2m] @ 60", start: 200000,
|
||||
expected: []*storage.SelectHints{
|
||||
{Start: -60000, End: 60000, Range: 120000},
|
||||
{Start: -59999, End: 60000, Range: 120000},
|
||||
},
|
||||
}, {
|
||||
query: "foo[2m] offset 2m", start: 300000,
|
||||
expected: []*storage.SelectHints{
|
||||
{Start: 60000, End: 180000, Range: 120000},
|
||||
{Start: 60001, End: 180000, Range: 120000},
|
||||
},
|
||||
}, {
|
||||
query: "foo[2m] @ 200 offset 2m", start: 300000,
|
||||
expected: []*storage.SelectHints{
|
||||
{Start: -40000, End: 80000, Range: 120000},
|
||||
{Start: -39999, End: 80000, Range: 120000},
|
||||
},
|
||||
}, {
|
||||
query: "foo[2m:1s]", start: 300000,
|
||||
expected: []*storage.SelectHints{
|
||||
{Start: 175000, End: 300000, Step: 1000},
|
||||
{Start: 175001, End: 300000, Step: 1000},
|
||||
},
|
||||
}, {
|
||||
query: "count_over_time(foo[2m:1s])", start: 300000,
|
||||
expected: []*storage.SelectHints{
|
||||
{Start: 175000, End: 300000, Func: "count_over_time", Step: 1000},
|
||||
{Start: 175001, End: 300000, Func: "count_over_time", Step: 1000},
|
||||
},
|
||||
}, {
|
||||
query: "count_over_time(foo[2m:1s] @ 300)", start: 200000,
|
||||
expected: []*storage.SelectHints{
|
||||
{Start: 175000, End: 300000, Func: "count_over_time", Step: 1000},
|
||||
{Start: 175001, End: 300000, Func: "count_over_time", Step: 1000},
|
||||
},
|
||||
}, {
|
||||
query: "count_over_time(foo[2m:1s] @ 200)", start: 200000,
|
||||
expected: []*storage.SelectHints{
|
||||
{Start: 75000, End: 200000, Func: "count_over_time", Step: 1000},
|
||||
{Start: 75001, End: 200000, Func: "count_over_time", Step: 1000},
|
||||
},
|
||||
}, {
|
||||
query: "count_over_time(foo[2m:1s] @ 100)", start: 200000,
|
||||
expected: []*storage.SelectHints{
|
||||
{Start: -25000, End: 100000, Func: "count_over_time", Step: 1000},
|
||||
{Start: -24999, End: 100000, Func: "count_over_time", Step: 1000},
|
||||
},
|
||||
}, {
|
||||
query: "count_over_time(foo[2m:1s] offset 10s)", start: 300000,
|
||||
expected: []*storage.SelectHints{
|
||||
{Start: 165000, End: 290000, Func: "count_over_time", Step: 1000},
|
||||
{Start: 165001, End: 290000, Func: "count_over_time", Step: 1000},
|
||||
},
|
||||
}, {
|
||||
query: "count_over_time((foo offset 10s)[2m:1s] offset 10s)", start: 300000,
|
||||
expected: []*storage.SelectHints{
|
||||
{Start: 155000, End: 280000, Func: "count_over_time", Step: 1000},
|
||||
{Start: 155001, End: 280000, Func: "count_over_time", Step: 1000},
|
||||
},
|
||||
}, {
|
||||
// When the @ is on the vector selector, the enclosing subquery parameters
|
||||
// don't affect the hint ranges.
|
||||
query: "count_over_time((foo @ 200 offset 10s)[2m:1s] offset 10s)", start: 300000,
|
||||
expected: []*storage.SelectHints{
|
||||
{Start: 185000, End: 190000, Func: "count_over_time", Step: 1000},
|
||||
{Start: 185001, End: 190000, Func: "count_over_time", Step: 1000},
|
||||
},
|
||||
}, {
|
||||
// When the @ is on the vector selector, the enclosing subquery parameters
|
||||
// don't affect the hint ranges.
|
||||
query: "count_over_time((foo @ 200 offset 10s)[2m:1s] @ 100 offset 10s)", start: 300000,
|
||||
expected: []*storage.SelectHints{
|
||||
{Start: 185000, End: 190000, Func: "count_over_time", Step: 1000},
|
||||
{Start: 185001, End: 190000, Func: "count_over_time", Step: 1000},
|
||||
},
|
||||
}, {
|
||||
query: "count_over_time((foo offset 10s)[2m:1s] @ 100 offset 10s)", start: 300000,
|
||||
expected: []*storage.SelectHints{
|
||||
{Start: -45000, End: 80000, Func: "count_over_time", Step: 1000},
|
||||
{Start: -44999, End: 80000, Func: "count_over_time", Step: 1000},
|
||||
},
|
||||
}, {
|
||||
query: "foo", start: 10000, end: 20000,
|
||||
expected: []*storage.SelectHints{
|
||||
{Start: 5000, End: 20000, Step: 1000},
|
||||
{Start: 5001, End: 20000, Step: 1000},
|
||||
},
|
||||
}, {
|
||||
query: "foo @ 15", start: 10000, end: 20000,
|
||||
expected: []*storage.SelectHints{
|
||||
{Start: 10000, End: 15000, Step: 1000},
|
||||
{Start: 10001, End: 15000, Step: 1000},
|
||||
},
|
||||
}, {
|
||||
query: "foo @ 1", start: 10000, end: 20000,
|
||||
expected: []*storage.SelectHints{
|
||||
{Start: -4000, End: 1000, Step: 1000},
|
||||
{Start: -3999, End: 1000, Step: 1000},
|
||||
},
|
||||
}, {
|
||||
query: "rate(foo[2m] @ 180)", start: 200000, end: 500000,
|
||||
expected: []*storage.SelectHints{
|
||||
{Start: 60000, End: 180000, Range: 120000, Func: "rate", Step: 1000},
|
||||
{Start: 60001, End: 180000, Range: 120000, Func: "rate", Step: 1000},
|
||||
},
|
||||
}, {
|
||||
query: "rate(foo[2m] @ 300)", start: 200000, end: 500000,
|
||||
expected: []*storage.SelectHints{
|
||||
{Start: 180000, End: 300000, Range: 120000, Func: "rate", Step: 1000},
|
||||
{Start: 180001, End: 300000, Range: 120000, Func: "rate", Step: 1000},
|
||||
},
|
||||
}, {
|
||||
query: "rate(foo[2m] @ 60)", start: 200000, end: 500000,
|
||||
expected: []*storage.SelectHints{
|
||||
{Start: -60000, End: 60000, Range: 120000, Func: "rate", Step: 1000},
|
||||
{Start: -59999, End: 60000, Range: 120000, Func: "rate", Step: 1000},
|
||||
},
|
||||
}, {
|
||||
query: "rate(foo[2m])", start: 200000, end: 500000,
|
||||
expected: []*storage.SelectHints{
|
||||
{Start: 80000, End: 500000, Range: 120000, Func: "rate", Step: 1000},
|
||||
{Start: 80001, End: 500000, Range: 120000, Func: "rate", Step: 1000},
|
||||
},
|
||||
}, {
|
||||
query: "rate(foo[2m] offset 2m)", start: 300000, end: 500000,
|
||||
expected: []*storage.SelectHints{
|
||||
{Start: 60000, End: 380000, Range: 120000, Func: "rate", Step: 1000},
|
||||
{Start: 60001, End: 380000, Range: 120000, Func: "rate", Step: 1000},
|
||||
},
|
||||
}, {
|
||||
query: "rate(foo[2m:1s])", start: 300000, end: 500000,
|
||||
expected: []*storage.SelectHints{
|
||||
{Start: 175000, End: 500000, Func: "rate", Step: 1000},
|
||||
{Start: 175001, End: 500000, Func: "rate", Step: 1000},
|
||||
},
|
||||
}, {
|
||||
query: "count_over_time(foo[2m:1s])", start: 300000, end: 500000,
|
||||
expected: []*storage.SelectHints{
|
||||
{Start: 175000, End: 500000, Func: "count_over_time", Step: 1000},
|
||||
{Start: 175001, End: 500000, Func: "count_over_time", Step: 1000},
|
||||
},
|
||||
}, {
|
||||
query: "count_over_time(foo[2m:1s] offset 10s)", start: 300000, end: 500000,
|
||||
expected: []*storage.SelectHints{
|
||||
{Start: 165000, End: 490000, Func: "count_over_time", Step: 1000},
|
||||
{Start: 165001, End: 490000, Func: "count_over_time", Step: 1000},
|
||||
},
|
||||
}, {
|
||||
query: "count_over_time(foo[2m:1s] @ 300)", start: 200000, end: 500000,
|
||||
expected: []*storage.SelectHints{
|
||||
{Start: 175000, End: 300000, Func: "count_over_time", Step: 1000},
|
||||
{Start: 175001, End: 300000, Func: "count_over_time", Step: 1000},
|
||||
},
|
||||
}, {
|
||||
query: "count_over_time(foo[2m:1s] @ 200)", start: 200000, end: 500000,
|
||||
expected: []*storage.SelectHints{
|
||||
{Start: 75000, End: 200000, Func: "count_over_time", Step: 1000},
|
||||
{Start: 75001, End: 200000, Func: "count_over_time", Step: 1000},
|
||||
},
|
||||
}, {
|
||||
query: "count_over_time(foo[2m:1s] @ 100)", start: 200000, end: 500000,
|
||||
expected: []*storage.SelectHints{
|
||||
{Start: -25000, End: 100000, Func: "count_over_time", Step: 1000},
|
||||
{Start: -24999, End: 100000, Func: "count_over_time", Step: 1000},
|
||||
},
|
||||
}, {
|
||||
query: "count_over_time((foo offset 10s)[2m:1s] offset 10s)", start: 300000, end: 500000,
|
||||
expected: []*storage.SelectHints{
|
||||
{Start: 155000, End: 480000, Func: "count_over_time", Step: 1000},
|
||||
{Start: 155001, End: 480000, Func: "count_over_time", Step: 1000},
|
||||
},
|
||||
}, {
|
||||
// When the @ is on the vector selector, the enclosing subquery parameters
|
||||
// don't affect the hint ranges.
|
||||
query: "count_over_time((foo @ 200 offset 10s)[2m:1s] offset 10s)", start: 300000, end: 500000,
|
||||
expected: []*storage.SelectHints{
|
||||
{Start: 185000, End: 190000, Func: "count_over_time", Step: 1000},
|
||||
{Start: 185001, End: 190000, Func: "count_over_time", Step: 1000},
|
||||
},
|
||||
}, {
|
||||
// When the @ is on the vector selector, the enclosing subquery parameters
|
||||
// don't affect the hint ranges.
|
||||
query: "count_over_time((foo @ 200 offset 10s)[2m:1s] @ 100 offset 10s)", start: 300000, end: 500000,
|
||||
expected: []*storage.SelectHints{
|
||||
{Start: 185000, End: 190000, Func: "count_over_time", Step: 1000},
|
||||
{Start: 185001, End: 190000, Func: "count_over_time", Step: 1000},
|
||||
},
|
||||
}, {
|
||||
query: "count_over_time((foo offset 10s)[2m:1s] @ 100 offset 10s)", start: 300000, end: 500000,
|
||||
expected: []*storage.SelectHints{
|
||||
{Start: -45000, End: 80000, Func: "count_over_time", Step: 1000},
|
||||
{Start: -44999, End: 80000, Func: "count_over_time", Step: 1000},
|
||||
},
|
||||
}, {
|
||||
query: "sum by (dim1) (foo)", start: 10000,
|
||||
expected: []*storage.SelectHints{
|
||||
{Start: 5000, End: 10000, Func: "sum", By: true, Grouping: []string{"dim1"}},
|
||||
{Start: 5001, End: 10000, Func: "sum", By: true, Grouping: []string{"dim1"}},
|
||||
},
|
||||
}, {
|
||||
query: "sum without (dim1) (foo)", start: 10000,
|
||||
expected: []*storage.SelectHints{
|
||||
{Start: 5000, End: 10000, Func: "sum", Grouping: []string{"dim1"}},
|
||||
{Start: 5001, End: 10000, Func: "sum", Grouping: []string{"dim1"}},
|
||||
},
|
||||
}, {
|
||||
query: "sum by (dim1) (avg_over_time(foo[1s]))", start: 10000,
|
||||
expected: []*storage.SelectHints{
|
||||
{Start: 9000, End: 10000, Func: "avg_over_time", Range: 1000},
|
||||
{Start: 9001, End: 10000, Func: "avg_over_time", Range: 1000},
|
||||
},
|
||||
}, {
|
||||
query: "sum by (dim1) (max by (dim2) (foo))", start: 10000,
|
||||
expected: []*storage.SelectHints{
|
||||
{Start: 5000, End: 10000, Func: "max", By: true, Grouping: []string{"dim2"}},
|
||||
{Start: 5001, End: 10000, Func: "max", By: true, Grouping: []string{"dim2"}},
|
||||
},
|
||||
}, {
|
||||
query: "(max by (dim1) (foo))[5s:1s]", start: 10000,
|
||||
expected: []*storage.SelectHints{
|
||||
{Start: 0, End: 10000, Func: "max", By: true, Grouping: []string{"dim1"}, Step: 1000},
|
||||
{Start: 1, End: 10000, Func: "max", By: true, Grouping: []string{"dim1"}, Step: 1000},
|
||||
},
|
||||
}, {
|
||||
query: "(sum(http_requests{group=~\"p.*\"})+max(http_requests{group=~\"c.*\"}))[20s:5s]", start: 120000,
|
||||
expected: []*storage.SelectHints{
|
||||
{Start: 95000, End: 120000, Func: "sum", By: true, Step: 5000},
|
||||
{Start: 95000, End: 120000, Func: "max", By: true, Step: 5000},
|
||||
{Start: 95001, End: 120000, Func: "sum", By: true, Step: 5000},
|
||||
{Start: 95001, End: 120000, Func: "max", By: true, Step: 5000},
|
||||
},
|
||||
}, {
|
||||
query: "foo @ 50 + bar @ 250 + baz @ 900", start: 100000, end: 500000,
|
||||
expected: []*storage.SelectHints{
|
||||
{Start: 45000, End: 50000, Step: 1000},
|
||||
{Start: 245000, End: 250000, Step: 1000},
|
||||
{Start: 895000, End: 900000, Step: 1000},
|
||||
{Start: 45001, End: 50000, Step: 1000},
|
||||
{Start: 245001, End: 250000, Step: 1000},
|
||||
{Start: 895001, End: 900000, Step: 1000},
|
||||
},
|
||||
}, {
|
||||
query: "foo @ 50 + bar + baz @ 900", start: 100000, end: 500000,
|
||||
expected: []*storage.SelectHints{
|
||||
{Start: 45000, End: 50000, Step: 1000},
|
||||
{Start: 95000, End: 500000, Step: 1000},
|
||||
{Start: 895000, End: 900000, Step: 1000},
|
||||
{Start: 45001, End: 50000, Step: 1000},
|
||||
{Start: 95001, End: 500000, Step: 1000},
|
||||
{Start: 895001, End: 900000, Step: 1000},
|
||||
},
|
||||
}, {
|
||||
query: "rate(foo[2s] @ 50) + bar @ 250 + baz @ 900", start: 100000, end: 500000,
|
||||
expected: []*storage.SelectHints{
|
||||
{Start: 48000, End: 50000, Step: 1000, Func: "rate", Range: 2000},
|
||||
{Start: 245000, End: 250000, Step: 1000},
|
||||
{Start: 895000, End: 900000, Step: 1000},
|
||||
{Start: 48001, End: 50000, Step: 1000, Func: "rate", Range: 2000},
|
||||
{Start: 245001, End: 250000, Step: 1000},
|
||||
{Start: 895001, End: 900000, Step: 1000},
|
||||
},
|
||||
}, {
|
||||
query: "rate(foo[2s:1s] @ 50) + bar + baz", start: 100000, end: 500000,
|
||||
expected: []*storage.SelectHints{
|
||||
{Start: 43000, End: 50000, Step: 1000, Func: "rate"},
|
||||
{Start: 95000, End: 500000, Step: 1000},
|
||||
{Start: 95000, End: 500000, Step: 1000},
|
||||
{Start: 43001, End: 50000, Step: 1000, Func: "rate"},
|
||||
{Start: 95001, End: 500000, Step: 1000},
|
||||
{Start: 95001, End: 500000, Step: 1000},
|
||||
},
|
||||
}, {
|
||||
query: "rate(foo[2s:1s] @ 50) + bar + rate(baz[2m:1s] @ 900 offset 2m) ", start: 100000, end: 500000,
|
||||
expected: []*storage.SelectHints{
|
||||
{Start: 43000, End: 50000, Step: 1000, Func: "rate"},
|
||||
{Start: 95000, End: 500000, Step: 1000},
|
||||
{Start: 655000, End: 780000, Step: 1000, Func: "rate"},
|
||||
{Start: 43001, End: 50000, Step: 1000, Func: "rate"},
|
||||
{Start: 95001, End: 500000, Step: 1000},
|
||||
{Start: 655001, End: 780000, Step: 1000, Func: "rate"},
|
||||
},
|
||||
}, { // Hints are based on the inner most subquery timestamp.
|
||||
query: `sum_over_time(sum_over_time(metric{job="1"}[100s])[100s:25s] @ 50)[3s:1s] @ 3000`, start: 100000,
|
||||
expected: []*storage.SelectHints{
|
||||
{Start: -150000, End: 50000, Range: 100000, Func: "sum_over_time", Step: 25000},
|
||||
{Start: -149999, End: 50000, Range: 100000, Func: "sum_over_time", Step: 25000},
|
||||
},
|
||||
}, { // Hints are based on the inner most subquery timestamp.
|
||||
query: `sum_over_time(sum_over_time(metric{job="1"}[100s])[100s:25s] @ 3000)[3s:1s] @ 50`,
|
||||
expected: []*storage.SelectHints{
|
||||
{Start: 2800000, End: 3000000, Range: 100000, Func: "sum_over_time", Step: 25000},
|
||||
{Start: 2800001, End: 3000000, Range: 100000, Func: "sum_over_time", Step: 25000},
|
||||
},
|
||||
},
|
||||
} {
|
||||
|
@ -935,22 +935,20 @@ load 10s
|
|||
},
|
||||
},
|
||||
{
|
||||
Query: "max_over_time(metricWith1SampleEvery10Seconds[59s])[20s:5s]",
|
||||
Query: "max_over_time(metricWith1SampleEvery10Seconds[60s])[20s:5s]",
|
||||
Start: time.Unix(201, 0),
|
||||
PeakSamples: 10,
|
||||
TotalSamples: 24, // (1 sample / 10 seconds * 60 seconds) * 20/5 (using 59s so we always return 6 samples
|
||||
// as if we run a query on 00 looking back 60 seconds we will return 7 samples;
|
||||
// see next test).
|
||||
TotalSamples: 24, // (1 sample / 10 seconds * 60 seconds) * 4
|
||||
TotalSamplesPerStep: stats.TotalSamplesPerStep{
|
||||
201000: 24,
|
||||
},
|
||||
},
|
||||
{
|
||||
Query: "max_over_time(metricWith1SampleEvery10Seconds[60s])[20s:5s]",
|
||||
Query: "max_over_time(metricWith1SampleEvery10Seconds[61s])[20s:5s]",
|
||||
Start: time.Unix(201, 0),
|
||||
PeakSamples: 11,
|
||||
TotalSamples: 26, // (1 sample / 10 seconds * 60 seconds) * 4 + 2 as
|
||||
// max_over_time(metricWith1SampleEvery10Seconds[60s]) @ 190 and 200 will return 7 samples.
|
||||
// max_over_time(metricWith1SampleEvery10Seconds[61s]) @ 190 and 200 will return 7 samples.
|
||||
TotalSamplesPerStep: stats.TotalSamplesPerStep{
|
||||
201000: 26,
|
||||
},
|
||||
|
@ -959,10 +957,9 @@ load 10s
|
|||
Query: "max_over_time(metricWith1HistogramEvery10Seconds[60s])[20s:5s]",
|
||||
Start: time.Unix(201, 0),
|
||||
PeakSamples: 78,
|
||||
TotalSamples: 338, // (1 histogram (size 13 HPoint) / 10 seconds * 60 seconds) * 4 + 2 * 13 as
|
||||
// max_over_time(metricWith1SampleEvery10Seconds[60s]) @ 190 and 200 will return 7 samples.
|
||||
TotalSamples: 312, // (1 histogram (size 13) / 10 seconds * 60 seconds) * 4
|
||||
TotalSamplesPerStep: stats.TotalSamplesPerStep{
|
||||
201000: 338,
|
||||
201000: 312,
|
||||
},
|
||||
},
|
||||
{
|
||||
|
@ -1427,23 +1424,23 @@ load 10s
|
|||
},
|
||||
{
|
||||
// The peak samples in memory is during the first evaluation:
|
||||
// - Subquery takes 22 samples, 11 for each bigmetric,
|
||||
// - Result is calculated per series where the series samples is buffered, hence 11 more here.
|
||||
// - Subquery takes 22 samples, 11 for each bigmetric, but samples on the left bound won't be evaluated.
|
||||
// - Result is calculated per series where the series samples is buffered, hence 10 more here.
|
||||
// - The result of two series is added before the last series buffer is discarded, so 2 more here.
|
||||
// Hence at peak it is 22 (subquery) + 11 (buffer of a series) + 2 (result from 2 series).
|
||||
// Hence at peak it is 22 (subquery) + 10 (buffer of a series) + 2 (result from 2 series).
|
||||
// The subquery samples and the buffer is discarded before duplicating.
|
||||
Query: `rate(bigmetric[10s:1s] @ 10)`,
|
||||
MaxSamples: 35,
|
||||
MaxSamples: 34,
|
||||
Start: time.Unix(0, 0),
|
||||
End: time.Unix(10, 0),
|
||||
Interval: 5 * time.Second,
|
||||
},
|
||||
{
|
||||
// Here the reasoning is same as above. But LHS and RHS are done one after another.
|
||||
// So while one of them takes 35 samples at peak, we need to hold the 2 sample
|
||||
// So while one of them takes 34 samples at peak, we need to hold the 2 sample
|
||||
// result of the other till then.
|
||||
Query: `rate(bigmetric[10s:1s] @ 10) + rate(bigmetric[10s:1s] @ 30)`,
|
||||
MaxSamples: 37,
|
||||
MaxSamples: 36,
|
||||
Start: time.Unix(0, 0),
|
||||
End: time.Unix(10, 0),
|
||||
Interval: 5 * time.Second,
|
||||
|
@ -1452,25 +1449,25 @@ load 10s
|
|||
// promql.Sample as above but with only 1 part as step invariant.
|
||||
// Here the peak is caused by the non-step invariant part as it touches more time range.
|
||||
// Hence at peak it is 2*21 (subquery from 0s to 20s)
|
||||
// + 11 (buffer of a series per evaluation)
|
||||
// + 10 (buffer of a series per evaluation)
|
||||
// + 6 (result from 2 series at 3 eval times).
|
||||
Query: `rate(bigmetric[10s:1s]) + rate(bigmetric[10s:1s] @ 30)`,
|
||||
MaxSamples: 59,
|
||||
MaxSamples: 58,
|
||||
Start: time.Unix(10, 0),
|
||||
End: time.Unix(20, 0),
|
||||
Interval: 5 * time.Second,
|
||||
},
|
||||
{
|
||||
// Nested subquery.
|
||||
// We saw that innermost rate takes 35 samples which is still the peak
|
||||
// We saw that innermost rate takes 34 samples which is still the peak
|
||||
// since the other two subqueries just duplicate the result.
|
||||
Query: `rate(rate(bigmetric[10s:1s] @ 10)[100s:25s] @ 1000)[100s:20s] @ 2000`,
|
||||
MaxSamples: 35,
|
||||
MaxSamples: 34,
|
||||
Start: time.Unix(10, 0),
|
||||
},
|
||||
{
|
||||
// Nested subquery.
|
||||
// Now the outmost subquery produces more samples than inner most rate.
|
||||
// Now the outermost subquery produces more samples than inner most rate.
|
||||
Query: `rate(rate(bigmetric[10s:1s] @ 10)[100s:25s] @ 1000)[17s:1s] @ 2000`,
|
||||
MaxSamples: 36,
|
||||
Start: time.Unix(10, 0),
|
||||
|
@ -1579,11 +1576,11 @@ load 1ms
|
|||
start: 10,
|
||||
result: promql.Matrix{
|
||||
promql.Series{
|
||||
Floats: []promql.FPoint{{F: 28, T: 280000}, {F: 29, T: 290000}, {F: 30, T: 300000}},
|
||||
Floats: []promql.FPoint{{F: 29, T: 290000}, {F: 30, T: 300000}},
|
||||
Metric: lbls1,
|
||||
},
|
||||
promql.Series{
|
||||
Floats: []promql.FPoint{{F: 56, T: 280000}, {F: 58, T: 290000}, {F: 60, T: 300000}},
|
||||
Floats: []promql.FPoint{{F: 58, T: 290000}, {F: 60, T: 300000}},
|
||||
Metric: lbls2,
|
||||
},
|
||||
},
|
||||
|
@ -1592,7 +1589,7 @@ load 1ms
|
|||
start: 100,
|
||||
result: promql.Matrix{
|
||||
promql.Series{
|
||||
Floats: []promql.FPoint{{F: 3, T: -2000}, {F: 2, T: -1000}, {F: 1, T: 0}},
|
||||
Floats: []promql.FPoint{{F: 2, T: -1000}, {F: 1, T: 0}},
|
||||
Metric: lblsneg,
|
||||
},
|
||||
},
|
||||
|
@ -1601,7 +1598,7 @@ load 1ms
|
|||
start: 100,
|
||||
result: promql.Matrix{
|
||||
promql.Series{
|
||||
Floats: []promql.FPoint{{F: 504, T: -503000}, {F: 503, T: -502000}, {F: 502, T: -501000}, {F: 501, T: -500000}},
|
||||
Floats: []promql.FPoint{{F: 503, T: -502000}, {F: 502, T: -501000}, {F: 501, T: -500000}},
|
||||
Metric: lblsneg,
|
||||
},
|
||||
},
|
||||
|
@ -1610,7 +1607,7 @@ load 1ms
|
|||
start: 100,
|
||||
result: promql.Matrix{
|
||||
promql.Series{
|
||||
Floats: []promql.FPoint{{F: 2342, T: 2342}, {F: 2343, T: 2343}, {F: 2344, T: 2344}, {F: 2345, T: 2345}},
|
||||
Floats: []promql.FPoint{{F: 2343, T: 2343}, {F: 2344, T: 2344}, {F: 2345, T: 2345}},
|
||||
Metric: lblsms,
|
||||
},
|
||||
},
|
||||
|
@ -3060,7 +3057,7 @@ func TestInstantQueryWithRangeVectorSelector(t *testing.T) {
|
|||
ts time.Time
|
||||
}{
|
||||
"matches series with points in range": {
|
||||
expr: "some_metric[1m]",
|
||||
expr: "some_metric[2m]",
|
||||
ts: baseT.Add(2 * time.Minute),
|
||||
expected: promql.Matrix{
|
||||
{
|
||||
|
@ -3096,7 +3093,6 @@ func TestInstantQueryWithRangeVectorSelector(t *testing.T) {
|
|||
{
|
||||
Metric: labels.FromStrings("__name__", "some_metric_with_stale_marker"),
|
||||
Floats: []promql.FPoint{
|
||||
{T: timestamp.FromTime(baseT), F: 0},
|
||||
{T: timestamp.FromTime(baseT.Add(time.Minute)), F: 1},
|
||||
{T: timestamp.FromTime(baseT.Add(3 * time.Minute)), F: 3},
|
||||
},
|
||||
|
@ -3118,6 +3114,217 @@ func TestInstantQueryWithRangeVectorSelector(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestNativeHistogram_Sum_Count_Add_AvgOperator(t *testing.T) {
|
||||
// TODO(codesome): Integrate histograms into the PromQL testing framework
|
||||
// and write more tests there.
|
||||
cases := []struct {
|
||||
histograms []histogram.Histogram
|
||||
expected histogram.FloatHistogram
|
||||
expectedAvg histogram.FloatHistogram
|
||||
}{
|
||||
{
|
||||
histograms: []histogram.Histogram{
|
||||
{
|
||||
CounterResetHint: histogram.GaugeType,
|
||||
Schema: 0,
|
||||
Count: 25,
|
||||
Sum: 1234.5,
|
||||
ZeroThreshold: 0.001,
|
||||
ZeroCount: 4,
|
||||
PositiveSpans: []histogram.Span{
|
||||
{Offset: 0, Length: 2},
|
||||
{Offset: 1, Length: 2},
|
||||
},
|
||||
PositiveBuckets: []int64{1, 1, -1, 0},
|
||||
NegativeSpans: []histogram.Span{
|
||||
{Offset: 0, Length: 2},
|
||||
{Offset: 2, Length: 2},
|
||||
},
|
||||
NegativeBuckets: []int64{2, 2, -3, 8},
|
||||
},
|
||||
{
|
||||
CounterResetHint: histogram.GaugeType,
|
||||
Schema: 0,
|
||||
Count: 41,
|
||||
Sum: 2345.6,
|
||||
ZeroThreshold: 0.001,
|
||||
ZeroCount: 5,
|
||||
PositiveSpans: []histogram.Span{
|
||||
{Offset: 0, Length: 4},
|
||||
{Offset: 0, Length: 0},
|
||||
{Offset: 0, Length: 3},
|
||||
},
|
||||
PositiveBuckets: []int64{1, 2, -2, 1, -1, 0, 0},
|
||||
NegativeSpans: []histogram.Span{
|
||||
{Offset: 1, Length: 4},
|
||||
{Offset: 2, Length: 0},
|
||||
{Offset: 2, Length: 3},
|
||||
},
|
||||
NegativeBuckets: []int64{1, 3, -2, 5, -2, 0, -3},
|
||||
},
|
||||
{
|
||||
CounterResetHint: histogram.GaugeType,
|
||||
Schema: 0,
|
||||
Count: 41,
|
||||
Sum: 1111.1,
|
||||
ZeroThreshold: 0.001,
|
||||
ZeroCount: 5,
|
||||
PositiveSpans: []histogram.Span{
|
||||
{Offset: 0, Length: 4},
|
||||
{Offset: 0, Length: 0},
|
||||
{Offset: 0, Length: 3},
|
||||
},
|
||||
PositiveBuckets: []int64{1, 2, -2, 1, -1, 0, 0},
|
||||
NegativeSpans: []histogram.Span{
|
||||
{Offset: 1, Length: 4},
|
||||
{Offset: 2, Length: 0},
|
||||
{Offset: 2, Length: 3},
|
||||
},
|
||||
NegativeBuckets: []int64{1, 3, -2, 5, -2, 0, -3},
|
||||
},
|
||||
{
|
||||
CounterResetHint: histogram.GaugeType,
|
||||
Schema: 1, // Everything is 0 just to make the count 4 so avg has nicer numbers.
|
||||
},
|
||||
},
|
||||
expected: histogram.FloatHistogram{
|
||||
CounterResetHint: histogram.GaugeType,
|
||||
Schema: 0,
|
||||
ZeroThreshold: 0.001,
|
||||
ZeroCount: 14,
|
||||
Count: 107,
|
||||
Sum: 4691.2,
|
||||
PositiveSpans: []histogram.Span{
|
||||
{Offset: 0, Length: 7},
|
||||
},
|
||||
PositiveBuckets: []float64{3, 8, 2, 5, 3, 2, 2},
|
||||
NegativeSpans: []histogram.Span{
|
||||
{Offset: 0, Length: 6},
|
||||
{Offset: 3, Length: 3},
|
||||
},
|
||||
NegativeBuckets: []float64{2, 6, 8, 4, 15, 9, 10, 10, 4},
|
||||
},
|
||||
expectedAvg: histogram.FloatHistogram{
|
||||
CounterResetHint: histogram.GaugeType,
|
||||
Schema: 0,
|
||||
ZeroThreshold: 0.001,
|
||||
ZeroCount: 3.5,
|
||||
Count: 26.75,
|
||||
Sum: 1172.8,
|
||||
PositiveSpans: []histogram.Span{
|
||||
{Offset: 0, Length: 7},
|
||||
},
|
||||
PositiveBuckets: []float64{0.75, 2, 0.5, 1.25, 0.75, 0.5, 0.5},
|
||||
NegativeSpans: []histogram.Span{
|
||||
{Offset: 0, Length: 6},
|
||||
{Offset: 3, Length: 3},
|
||||
},
|
||||
NegativeBuckets: []float64{0.5, 1.5, 2, 1, 3.75, 2.25, 2.5, 2.5, 1},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
idx0 := int64(0)
|
||||
for _, c := range cases {
|
||||
for _, floatHisto := range []bool{true, false} {
|
||||
t.Run(fmt.Sprintf("floatHistogram=%t %d", floatHisto, idx0), func(t *testing.T) {
|
||||
storage := teststorage.New(t)
|
||||
t.Cleanup(func() { storage.Close() })
|
||||
|
||||
seriesName := "sparse_histogram_series"
|
||||
seriesNameOverTime := "sparse_histogram_series_over_time"
|
||||
|
||||
engine := newTestEngine(t)
|
||||
|
||||
ts := idx0 * int64(10*time.Minute/time.Millisecond)
|
||||
app := storage.Appender(context.Background())
|
||||
_, err := app.Append(0, labels.FromStrings("__name__", "float_series", "idx", "0"), ts, 42)
|
||||
require.NoError(t, err)
|
||||
for idx1, h := range c.histograms {
|
||||
lbls := labels.FromStrings("__name__", seriesName, "idx", strconv.Itoa(idx1))
|
||||
// Since we mutate h later, we need to create a copy here.
|
||||
var err error
|
||||
if floatHisto {
|
||||
_, err = app.AppendHistogram(0, lbls, ts, nil, h.Copy().ToFloat(nil))
|
||||
} else {
|
||||
_, err = app.AppendHistogram(0, lbls, ts, h.Copy(), nil)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
|
||||
lbls = labels.FromStrings("__name__", seriesNameOverTime)
|
||||
newTs := ts + int64(idx1)*int64(time.Minute/time.Millisecond)
|
||||
// Since we mutate h later, we need to create a copy here.
|
||||
if floatHisto {
|
||||
_, err = app.AppendHistogram(0, lbls, newTs, nil, h.Copy().ToFloat(nil))
|
||||
} else {
|
||||
_, err = app.AppendHistogram(0, lbls, newTs, h.Copy(), nil)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
}
|
||||
require.NoError(t, app.Commit())
|
||||
|
||||
queryAndCheck := func(queryString string, ts int64, exp promql.Vector) {
|
||||
qry, err := engine.NewInstantQuery(context.Background(), storage, nil, queryString, timestamp.Time(ts))
|
||||
require.NoError(t, err)
|
||||
|
||||
res := qry.Exec(context.Background())
|
||||
require.NoError(t, res.Err)
|
||||
require.Empty(t, res.Warnings)
|
||||
|
||||
vector, err := res.Vector()
|
||||
require.NoError(t, err)
|
||||
|
||||
testutil.RequireEqual(t, exp, vector)
|
||||
}
|
||||
queryAndCheckAnnotations := func(queryString string, ts int64, expWarnings annotations.Annotations) {
|
||||
qry, err := engine.NewInstantQuery(context.Background(), storage, nil, queryString, timestamp.Time(ts))
|
||||
require.NoError(t, err)
|
||||
|
||||
res := qry.Exec(context.Background())
|
||||
require.NoError(t, res.Err)
|
||||
require.Equal(t, expWarnings, res.Warnings)
|
||||
}
|
||||
|
||||
// sum().
|
||||
queryString := fmt.Sprintf("sum(%s)", seriesName)
|
||||
queryAndCheck(queryString, ts, []promql.Sample{{T: ts, H: &c.expected, Metric: labels.EmptyLabels()}})
|
||||
|
||||
queryString = `sum({idx="0"})`
|
||||
var annos annotations.Annotations
|
||||
annos.Add(annotations.NewMixedFloatsHistogramsAggWarning(posrange.PositionRange{Start: 4, End: 13}))
|
||||
queryAndCheckAnnotations(queryString, ts, annos)
|
||||
|
||||
// + operator.
|
||||
queryString = fmt.Sprintf(`%s{idx="0"}`, seriesName)
|
||||
for idx := 1; idx < len(c.histograms); idx++ {
|
||||
queryString += fmt.Sprintf(` + ignoring(idx) %s{idx="%d"}`, seriesName, idx)
|
||||
}
|
||||
queryAndCheck(queryString, ts, []promql.Sample{{T: ts, H: &c.expected, Metric: labels.EmptyLabels()}})
|
||||
|
||||
// count().
|
||||
queryString = fmt.Sprintf("count(%s)", seriesName)
|
||||
queryAndCheck(queryString, ts, []promql.Sample{{T: ts, F: 4, Metric: labels.EmptyLabels()}})
|
||||
|
||||
// avg().
|
||||
queryString = fmt.Sprintf("avg(%s)", seriesName)
|
||||
queryAndCheck(queryString, ts, []promql.Sample{{T: ts, H: &c.expectedAvg, Metric: labels.EmptyLabels()}})
|
||||
|
||||
offset := int64(len(c.histograms) - 1)
|
||||
newTs := ts + offset*int64(time.Minute/time.Millisecond)
|
||||
|
||||
// sum_over_time().
|
||||
queryString = fmt.Sprintf("sum_over_time(%s[%dm:1m])", seriesNameOverTime, offset+1)
|
||||
queryAndCheck(queryString, newTs, []promql.Sample{{T: newTs, H: &c.expected, Metric: labels.EmptyLabels(), DropName: true}})
|
||||
|
||||
// avg_over_time().
|
||||
queryString = fmt.Sprintf("avg_over_time(%s[%dm:1m])", seriesNameOverTime, offset+1)
|
||||
queryAndCheck(queryString, newTs, []promql.Sample{{T: newTs, H: &c.expectedAvg, Metric: labels.EmptyLabels(), DropName: true}})
|
||||
})
|
||||
idx0++
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestNativeHistogram_SubOperator(t *testing.T) {
|
||||
// TODO(codesome): Integrate histograms into the PromQL testing framework
|
||||
// and write more tests there.
|
||||
|
@ -3370,43 +3577,43 @@ metric 0 1 2
|
|||
}{
|
||||
{
|
||||
name: "default lookback delta",
|
||||
ts: lastDatapointTs.Add(defaultLookbackDelta),
|
||||
ts: lastDatapointTs.Add(defaultLookbackDelta - time.Millisecond),
|
||||
expectSamples: true,
|
||||
},
|
||||
{
|
||||
name: "outside default lookback delta",
|
||||
ts: lastDatapointTs.Add(defaultLookbackDelta + time.Millisecond),
|
||||
ts: lastDatapointTs.Add(defaultLookbackDelta),
|
||||
expectSamples: false,
|
||||
},
|
||||
{
|
||||
name: "custom engine lookback delta",
|
||||
ts: lastDatapointTs.Add(10 * time.Minute),
|
||||
ts: lastDatapointTs.Add(10*time.Minute - time.Millisecond),
|
||||
engineLookback: 10 * time.Minute,
|
||||
expectSamples: true,
|
||||
},
|
||||
{
|
||||
name: "outside custom engine lookback delta",
|
||||
ts: lastDatapointTs.Add(10*time.Minute + time.Millisecond),
|
||||
ts: lastDatapointTs.Add(10 * time.Minute),
|
||||
engineLookback: 10 * time.Minute,
|
||||
expectSamples: false,
|
||||
},
|
||||
{
|
||||
name: "custom query lookback delta",
|
||||
ts: lastDatapointTs.Add(20 * time.Minute),
|
||||
ts: lastDatapointTs.Add(20*time.Minute - time.Millisecond),
|
||||
engineLookback: 10 * time.Minute,
|
||||
queryLookback: 20 * time.Minute,
|
||||
expectSamples: true,
|
||||
},
|
||||
{
|
||||
name: "outside custom query lookback delta",
|
||||
ts: lastDatapointTs.Add(20*time.Minute + time.Millisecond),
|
||||
ts: lastDatapointTs.Add(20 * time.Minute),
|
||||
engineLookback: 10 * time.Minute,
|
||||
queryLookback: 20 * time.Minute,
|
||||
expectSamples: false,
|
||||
},
|
||||
{
|
||||
name: "negative custom query lookback delta",
|
||||
ts: lastDatapointTs.Add(20 * time.Minute),
|
||||
ts: lastDatapointTs.Add(20*time.Minute - time.Millisecond),
|
||||
engineLookback: -10 * time.Minute,
|
||||
queryLookback: 20 * time.Minute,
|
||||
expectSamples: true,
|
||||
|
@ -3473,18 +3680,18 @@ histogram {{sum:4 count:4 buckets:[2 2]}} {{sum:6 count:6 buckets:[3 3]}} {{sum:
|
|||
}
|
||||
}
|
||||
|
||||
qry, err := engine.NewRangeQuery(context.Background(), storage, nil, "increase(histogram[60s])", time.Unix(0, 0), time.Unix(0, 0).Add(1*time.Minute), time.Minute)
|
||||
qry, err := engine.NewRangeQuery(context.Background(), storage, nil, "increase(histogram[90s])", time.Unix(0, 0), time.Unix(0, 0).Add(60*time.Second), time.Minute)
|
||||
require.NoError(t, err)
|
||||
verify(t, qry, []histogram.FloatHistogram{
|
||||
{
|
||||
Count: 2,
|
||||
Sum: 2, // Increase from 4 to 6 is 2.
|
||||
Count: 3,
|
||||
Sum: 3, // Increase from 4 to 6 is 2. Interpolation adds 1.
|
||||
PositiveSpans: []histogram.Span{{Offset: 0, Length: 2}}, // Two buckets changed between the first and second histogram.
|
||||
PositiveBuckets: []float64{1, 1}, // Increase from 2 to 3 is 1 in both buckets.
|
||||
PositiveBuckets: []float64{1.5, 1.5}, // Increase from 2 to 3 is 1 in both buckets. Interpolation adds 0.5.
|
||||
},
|
||||
})
|
||||
|
||||
qry, err = engine.NewInstantQuery(context.Background(), storage, nil, "histogram[60s]", time.Unix(0, 0).Add(2*time.Minute))
|
||||
qry, err = engine.NewInstantQuery(context.Background(), storage, nil, "histogram[61s]", time.Unix(0, 0).Add(2*time.Minute))
|
||||
require.NoError(t, err)
|
||||
verify(t, qry, []histogram.FloatHistogram{
|
||||
{
|
||||
|
|
|
@ -818,12 +818,12 @@ histogram_desc_item
|
|||
$$ = yylex.(*parser).newMap()
|
||||
$$["sum"] = $3
|
||||
}
|
||||
| COUNT_DESC COLON number
|
||||
| COUNT_DESC COLON signed_or_unsigned_number
|
||||
{
|
||||
$$ = yylex.(*parser).newMap()
|
||||
$$["count"] = $3
|
||||
}
|
||||
| ZERO_BUCKET_DESC COLON number
|
||||
| ZERO_BUCKET_DESC COLON signed_or_unsigned_number
|
||||
{
|
||||
$$ = yylex.(*parser).newMap()
|
||||
$$["z_bucket"] = $3
|
||||
|
@ -875,11 +875,11 @@ bucket_set : LEFT_BRACKET bucket_set_list SPACE RIGHT_BRACKET
|
|||
}
|
||||
;
|
||||
|
||||
bucket_set_list : bucket_set_list SPACE number
|
||||
bucket_set_list : bucket_set_list SPACE signed_or_unsigned_number
|
||||
{
|
||||
$$ = append($1, $3)
|
||||
}
|
||||
| number
|
||||
| signed_or_unsigned_number
|
||||
{
|
||||
$$ = []float64{$1}
|
||||
}
|
||||
|
|
|
@ -410,55 +410,55 @@ const yyPrivate = 57344
|
|||
const yyLast = 799
|
||||
|
||||
var yyAct = [...]int16{
|
||||
155, 334, 332, 276, 339, 152, 226, 39, 192, 44,
|
||||
291, 290, 156, 118, 82, 178, 229, 107, 106, 346,
|
||||
347, 348, 349, 109, 108, 198, 239, 199, 133, 110,
|
||||
105, 60, 245, 121, 6, 329, 325, 111, 328, 228,
|
||||
200, 201, 160, 119, 304, 267, 293, 128, 260, 160,
|
||||
151, 261, 159, 302, 358, 311, 122, 55, 89, 159,
|
||||
196, 241, 242, 259, 113, 243, 114, 54, 98, 99,
|
||||
302, 112, 101, 256, 104, 88, 230, 232, 234, 235,
|
||||
152, 334, 332, 155, 339, 226, 39, 192, 276, 44,
|
||||
291, 290, 118, 82, 178, 229, 107, 106, 346, 347,
|
||||
348, 349, 109, 108, 198, 239, 199, 156, 110, 105,
|
||||
6, 245, 200, 201, 133, 325, 111, 329, 228, 60,
|
||||
357, 293, 328, 304, 267, 160, 266, 128, 55, 151,
|
||||
302, 311, 302, 196, 340, 159, 55, 89, 54, 356,
|
||||
241, 242, 355, 113, 243, 114, 54, 98, 99, 265,
|
||||
112, 101, 256, 104, 88, 230, 232, 234, 235, 236,
|
||||
244, 246, 249, 250, 251, 252, 253, 257, 258, 105,
|
||||
333, 231, 233, 237, 238, 240, 247, 248, 103, 115,
|
||||
109, 254, 255, 324, 150, 218, 110, 264, 111, 270,
|
||||
77, 35, 7, 149, 188, 163, 322, 321, 173, 320,
|
||||
167, 170, 323, 165, 271, 166, 2, 3, 4, 5,
|
||||
263, 101, 194, 104, 180, 184, 197, 187, 186, 319,
|
||||
272, 202, 203, 204, 205, 206, 207, 208, 209, 210,
|
||||
211, 212, 213, 214, 215, 216, 195, 299, 103, 318,
|
||||
217, 36, 298, 1, 190, 219, 220, 317, 160, 160,
|
||||
316, 193, 160, 154, 182, 196, 229, 297, 159, 159,
|
||||
160, 358, 159, 268, 181, 183, 239, 260, 296, 262,
|
||||
159, 315, 245, 129, 314, 55, 225, 313, 161, 228,
|
||||
161, 161, 259, 312, 161, 54, 86, 295, 310, 288,
|
||||
289, 8, 161, 292, 162, 37, 162, 162, 49, 269,
|
||||
162, 241, 242, 309, 179, 243, 180, 127, 162, 126,
|
||||
308, 223, 294, 256, 48, 222, 230, 232, 234, 235,
|
||||
236, 244, 246, 249, 250, 251, 252, 253, 257, 258,
|
||||
160, 115, 231, 233, 237, 238, 240, 247, 248, 103,
|
||||
159, 109, 254, 255, 324, 150, 357, 110, 333, 218,
|
||||
111, 340, 310, 149, 77, 163, 7, 105, 35, 173,
|
||||
167, 170, 161, 323, 165, 356, 166, 309, 355, 194,
|
||||
2, 3, 4, 5, 308, 322, 184, 197, 162, 186,
|
||||
321, 195, 202, 203, 204, 205, 206, 207, 208, 209,
|
||||
210, 211, 212, 213, 214, 215, 216, 229, 129, 101,
|
||||
217, 104, 219, 220, 190, 266, 270, 239, 160, 121,
|
||||
268, 193, 264, 245, 55, 196, 154, 225, 159, 119,
|
||||
228, 271, 188, 160, 54, 161, 103, 117, 265, 84,
|
||||
262, 299, 122, 159, 320, 263, 298, 272, 10, 83,
|
||||
161, 162, 241, 242, 269, 187, 243, 185, 79, 288,
|
||||
289, 297, 319, 292, 256, 161, 162, 230, 232, 234,
|
||||
235, 236, 244, 246, 249, 250, 251, 252, 253, 257,
|
||||
258, 162, 294, 231, 233, 237, 238, 240, 247, 248,
|
||||
318, 317, 316, 254, 255, 180, 315, 134, 135, 136,
|
||||
137, 138, 139, 140, 141, 142, 143, 144, 145, 146,
|
||||
147, 148, 157, 158, 169, 105, 314, 296, 300, 301,
|
||||
303, 223, 305, 313, 55, 222, 179, 168, 180, 84,
|
||||
306, 307, 177, 125, 54, 182, 295, 176, 124, 83,
|
||||
221, 312, 87, 89, 8, 181, 183, 81, 37, 86,
|
||||
175, 123, 36, 98, 99, 326, 327, 101, 102, 104,
|
||||
88, 127, 331, 126, 50, 336, 337, 338, 182, 335,
|
||||
78, 1, 342, 341, 344, 343, 49, 48, 181, 183,
|
||||
350, 351, 47, 55, 103, 352, 53, 77, 164, 56,
|
||||
46, 354, 22, 54, 59, 55, 172, 9, 9, 57,
|
||||
132, 45, 43, 130, 171, 54, 359, 42, 131, 41,
|
||||
40, 51, 191, 353, 273, 75, 85, 189, 224, 80,
|
||||
345, 18, 19, 120, 153, 20, 58, 227, 52, 116,
|
||||
221, 169, 231, 233, 237, 238, 240, 247, 248, 157,
|
||||
158, 164, 254, 255, 168, 10, 182, 300, 55, 301,
|
||||
303, 47, 305, 46, 132, 79, 181, 183, 54, 306,
|
||||
307, 45, 134, 135, 136, 137, 138, 139, 140, 141,
|
||||
142, 143, 144, 145, 146, 147, 148, 43, 59, 50,
|
||||
84, 9, 9, 121, 326, 78, 327, 130, 171, 121,
|
||||
83, 42, 131, 119, 335, 336, 337, 331, 185, 119,
|
||||
338, 261, 342, 341, 344, 343, 122, 117, 41, 177,
|
||||
350, 351, 122, 55, 176, 352, 53, 77, 40, 56,
|
||||
125, 354, 22, 54, 84, 124, 172, 175, 51, 57,
|
||||
191, 353, 273, 85, 83, 189, 359, 224, 123, 80,
|
||||
345, 120, 81, 153, 58, 75, 227, 52, 116, 0,
|
||||
0, 18, 19, 0, 0, 20, 0, 0, 0, 0,
|
||||
0, 76, 0, 0, 0, 0, 61, 62, 63, 64,
|
||||
65, 66, 67, 68, 69, 70, 71, 72, 73, 74,
|
||||
0, 0, 0, 13, 0, 0, 0, 24, 0, 30,
|
||||
0, 0, 31, 32, 55, 38, 0, 53, 77, 0,
|
||||
0, 0, 31, 32, 55, 38, 105, 53, 77, 0,
|
||||
56, 275, 0, 22, 54, 0, 0, 0, 274, 0,
|
||||
57, 0, 278, 279, 277, 284, 286, 283, 285, 280,
|
||||
281, 282, 287, 0, 0, 0, 75, 0, 0, 0,
|
||||
0, 0, 18, 19, 0, 0, 20, 0, 0, 0,
|
||||
0, 0, 76, 0, 0, 0, 0, 61, 62, 63,
|
||||
281, 282, 287, 87, 89, 0, 75, 0, 0, 0,
|
||||
0, 0, 18, 19, 98, 99, 20, 0, 101, 102,
|
||||
104, 88, 76, 0, 0, 0, 0, 61, 62, 63,
|
||||
64, 65, 66, 67, 68, 69, 70, 71, 72, 73,
|
||||
74, 0, 0, 0, 13, 0, 0, 0, 24, 0,
|
||||
74, 0, 0, 0, 13, 103, 0, 0, 24, 0,
|
||||
30, 0, 55, 31, 32, 53, 77, 0, 56, 330,
|
||||
0, 22, 54, 0, 0, 0, 0, 0, 57, 0,
|
||||
278, 279, 277, 284, 286, 283, 285, 280, 281, 282,
|
||||
|
@ -493,51 +493,51 @@ var yyAct = [...]int16{
|
|||
}
|
||||
|
||||
var yyPact = [...]int16{
|
||||
32, 106, 569, 569, 405, 526, -1000, -1000, -1000, 105,
|
||||
28, 102, 569, 569, 405, 526, -1000, -1000, -1000, 98,
|
||||
-1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
|
||||
-1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
|
||||
-1000, -1000, -1000, -1000, -1000, 277, -1000, 297, -1000, 650,
|
||||
-1000, -1000, -1000, -1000, -1000, 342, -1000, 204, -1000, 650,
|
||||
-1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
|
||||
-1000, -1000, 22, 95, -1000, -1000, 483, -1000, 483, 101,
|
||||
-1000, -1000, 21, 93, -1000, -1000, 483, -1000, 483, 97,
|
||||
-1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
|
||||
-1000, -1000, -1000, -1000, -1000, -1000, -1000, 167, -1000, -1000,
|
||||
281, -1000, -1000, 309, -1000, 23, -1000, -50, -50, -50,
|
||||
-50, -50, -50, -50, -50, -50, -50, -50, -50, -50,
|
||||
-50, -50, -50, 48, 174, 336, 95, -56, -1000, 262,
|
||||
262, 324, -1000, 631, 103, -1000, 280, -1000, -1000, 274,
|
||||
241, -1000, -1000, -1000, 187, -1000, 180, -1000, 159, 483,
|
||||
-1000, -57, -40, -1000, 483, 483, 483, 483, 483, 483,
|
||||
-1000, -1000, -1000, -1000, -1000, -1000, -1000, 307, -1000, -1000,
|
||||
338, -1000, -1000, 225, -1000, 23, -1000, -44, -44, -44,
|
||||
-44, -44, -44, -44, -44, -44, -44, -44, -44, -44,
|
||||
-44, -44, -44, 47, 171, 259, 93, -57, -1000, 249,
|
||||
249, 324, -1000, 631, 75, -1000, 327, -1000, -1000, 222,
|
||||
130, -1000, -1000, -1000, 298, -1000, 112, -1000, 159, 483,
|
||||
-1000, -58, -48, -1000, 483, 483, 483, 483, 483, 483,
|
||||
483, 483, 483, 483, 483, 483, 483, 483, 483, -1000,
|
||||
165, -1000, -1000, 94, -1000, -1000, -1000, -1000, -1000, -1000,
|
||||
-1000, 40, 40, 269, -1000, -1000, -1000, -1000, 155, -1000,
|
||||
-1000, 41, -1000, 650, -1000, -1000, 31, -1000, 170, -1000,
|
||||
-1000, -1000, -1000, -1000, 163, -1000, -1000, -1000, -1000, -1000,
|
||||
19, 144, 140, -1000, -1000, -1000, 404, 16, 262, 262,
|
||||
262, 262, 103, 103, 251, 251, 251, 715, 696, 251,
|
||||
251, 715, 103, 103, 251, 103, 16, -1000, 24, -1000,
|
||||
-1000, -1000, 265, -1000, 189, -1000, -1000, -1000, -1000, -1000,
|
||||
39, -1000, -1000, 90, -1000, -1000, -1000, -1000, -1000, -1000,
|
||||
-1000, 36, 36, 229, -1000, -1000, -1000, -1000, 174, -1000,
|
||||
-1000, 180, -1000, 650, -1000, -1000, 301, -1000, 105, -1000,
|
||||
-1000, -1000, -1000, -1000, 44, -1000, -1000, -1000, -1000, -1000,
|
||||
18, 157, 83, -1000, -1000, -1000, 404, 15, 249, 249,
|
||||
249, 249, 75, 75, 402, 402, 402, 715, 696, 402,
|
||||
402, 715, 75, 75, 402, 75, 15, -1000, 19, -1000,
|
||||
-1000, -1000, 186, -1000, 155, -1000, -1000, -1000, -1000, -1000,
|
||||
-1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
|
||||
-1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
|
||||
-1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
|
||||
483, -1000, -1000, -1000, -1000, -1000, -1000, 34, 34, 18,
|
||||
34, 44, 44, 110, 38, -1000, -1000, 285, 267, 260,
|
||||
240, 236, 235, 234, 206, 188, 134, 129, -1000, -1000,
|
||||
-1000, -1000, -1000, -1000, 102, -1000, -1000, -1000, 14, -1000,
|
||||
650, -1000, -1000, -1000, 34, -1000, 12, 9, 482, -1000,
|
||||
-1000, -1000, 51, 81, 40, 40, 40, 97, 97, 51,
|
||||
97, 51, -73, -1000, -1000, -1000, -1000, -1000, 34, 34,
|
||||
-1000, -1000, -1000, 34, -1000, -1000, -1000, -1000, -1000, -1000,
|
||||
40, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
|
||||
-1000, -1000, -1000, 104, -1000, 33, -1000, -1000, -1000, -1000,
|
||||
483, -1000, -1000, -1000, -1000, -1000, -1000, 31, 31, 17,
|
||||
31, 37, 37, 206, 34, -1000, -1000, 197, 191, 188,
|
||||
185, 164, 161, 153, 133, 113, 111, 110, -1000, -1000,
|
||||
-1000, -1000, -1000, -1000, 101, -1000, -1000, -1000, 13, -1000,
|
||||
650, -1000, -1000, -1000, 31, -1000, 16, 11, 482, -1000,
|
||||
-1000, -1000, 33, 163, 163, 163, 36, 40, 40, 33,
|
||||
40, 33, -74, -1000, -1000, -1000, -1000, -1000, 31, 31,
|
||||
-1000, -1000, -1000, 31, -1000, -1000, -1000, -1000, -1000, -1000,
|
||||
163, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
|
||||
-1000, -1000, -1000, 38, -1000, 160, -1000, -1000, -1000, -1000,
|
||||
}
|
||||
|
||||
var yyPgo = [...]int16{
|
||||
0, 379, 13, 378, 6, 15, 377, 344, 376, 374,
|
||||
373, 370, 198, 294, 369, 14, 368, 10, 11, 367,
|
||||
366, 8, 364, 3, 4, 363, 2, 1, 0, 362,
|
||||
12, 5, 361, 360, 18, 158, 359, 358, 7, 357,
|
||||
354, 17, 353, 31, 352, 9, 351, 350, 340, 332,
|
||||
327, 326, 314, 321, 302,
|
||||
0, 368, 12, 367, 5, 14, 366, 298, 364, 363,
|
||||
361, 360, 265, 211, 359, 13, 357, 10, 11, 355,
|
||||
353, 7, 352, 8, 4, 351, 2, 1, 3, 350,
|
||||
27, 0, 348, 338, 17, 193, 328, 312, 6, 311,
|
||||
308, 16, 307, 39, 297, 9, 281, 274, 273, 271,
|
||||
234, 218, 299, 163, 161,
|
||||
}
|
||||
|
||||
var yyR1 = [...]int8{
|
||||
|
@ -630,9 +630,9 @@ var yyChk = [...]int16{
|
|||
-38, -27, 19, -27, 26, -27, -21, -21, 24, 17,
|
||||
2, 17, 6, 6, 6, 6, 6, 6, 6, 6,
|
||||
6, 6, 6, 21, 2, 22, -4, -27, 26, 26,
|
||||
17, -23, -26, 57, -27, -31, -28, -28, -28, -24,
|
||||
17, -23, -26, 57, -27, -31, -31, -31, -28, -24,
|
||||
14, -24, -26, -24, -26, -11, 92, 93, 94, 95,
|
||||
-27, -27, -27, -25, -28, 24, 21, 2, 21, -28,
|
||||
-27, -27, -27, -25, -31, 24, 21, 2, 21, -31,
|
||||
}
|
||||
|
||||
var yyDef = [...]int16{
|
||||
|
|
|
@ -610,6 +610,9 @@ func lexBuckets(l *Lexer) stateFn {
|
|||
case isSpace(r):
|
||||
l.emit(SPACE)
|
||||
return lexSpace
|
||||
case r == '-':
|
||||
l.emit(SUB)
|
||||
return lexNumber
|
||||
case isDigit(r):
|
||||
l.backup()
|
||||
return lexNumber
|
||||
|
|
|
@ -4084,17 +4084,17 @@ func TestParseHistogramSeries(t *testing.T) {
|
|||
},
|
||||
{
|
||||
name: "all properties used",
|
||||
input: `{} {{schema:1 sum:-0.3 count:3.1 z_bucket:7.1 z_bucket_w:0.05 buckets:[5.1 10 7] offset:-3 n_buckets:[4.1 5] n_offset:-5 counter_reset_hint:gauge}}`,
|
||||
input: `{} {{schema:1 sum:0.3 count:3.1 z_bucket:7.1 z_bucket_w:0.05 buckets:[5.1 10 7] offset:3 n_buckets:[4.1 5] n_offset:5 counter_reset_hint:gauge}}`,
|
||||
expected: []histogram.FloatHistogram{{
|
||||
Schema: 1,
|
||||
Sum: -0.3,
|
||||
Sum: 0.3,
|
||||
Count: 3.1,
|
||||
ZeroCount: 7.1,
|
||||
ZeroThreshold: 0.05,
|
||||
PositiveBuckets: []float64{5.1, 10, 7},
|
||||
PositiveSpans: []histogram.Span{{Offset: -3, Length: 3}},
|
||||
PositiveSpans: []histogram.Span{{Offset: 3, Length: 3}},
|
||||
NegativeBuckets: []float64{4.1, 5},
|
||||
NegativeSpans: []histogram.Span{{Offset: -5, Length: 2}},
|
||||
NegativeSpans: []histogram.Span{{Offset: 5, Length: 2}},
|
||||
CounterResetHint: histogram.GaugeType,
|
||||
}},
|
||||
},
|
||||
|
@ -4114,6 +4114,22 @@ func TestParseHistogramSeries(t *testing.T) {
|
|||
CounterResetHint: histogram.GaugeType,
|
||||
}},
|
||||
},
|
||||
{
|
||||
name: "all properties used, with negative values where supported",
|
||||
input: `{} {{schema:1 sum:-0.3 count:-3.1 z_bucket:-7.1 z_bucket_w:0.05 buckets:[-5.1 -10 -7] offset:-3 n_buckets:[-4.1 -5] n_offset:-5 counter_reset_hint:gauge}}`,
|
||||
expected: []histogram.FloatHistogram{{
|
||||
Schema: 1,
|
||||
Sum: -0.3,
|
||||
Count: -3.1,
|
||||
ZeroCount: -7.1,
|
||||
ZeroThreshold: 0.05,
|
||||
PositiveBuckets: []float64{-5.1, -10, -7},
|
||||
PositiveSpans: []histogram.Span{{Offset: -3, Length: 3}},
|
||||
NegativeBuckets: []float64{-4.1, -5},
|
||||
NegativeSpans: []histogram.Span{{Offset: -5, Length: 2}},
|
||||
CounterResetHint: histogram.GaugeType,
|
||||
}},
|
||||
},
|
||||
{
|
||||
name: "static series",
|
||||
input: `{} {{buckets:[5 10 7] schema:1}}x2`,
|
||||
|
|
|
@ -237,7 +237,7 @@ eval instant at 5m sum by (group) (http_requests)
|
|||
load 5m
|
||||
testmetric {{}}
|
||||
|
||||
eval instant at 5m testmetric
|
||||
eval instant at 0m testmetric
|
||||
`,
|
||||
expectedError: `error in eval testmetric (line 5): unexpected metric {__name__="testmetric"} in result, has value {count:0, sum:0}`,
|
||||
},
|
||||
|
|
12
promql/promqltest/testdata/aggregators.test
vendored
12
promql/promqltest/testdata/aggregators.test
vendored
|
@ -250,7 +250,7 @@ clear
|
|||
load 5m
|
||||
http_requests{job="api-server", instance="0", group="production"} 0+10x10
|
||||
http_requests{job="api-server", instance="1", group="production"} 0+20x10
|
||||
http_requests{job="api-server", instance="2", group="production"} NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN
|
||||
http_requests{job="api-server", instance="2", group="production"} NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN
|
||||
http_requests{job="api-server", instance="0", group="canary"} 0+30x10
|
||||
http_requests{job="api-server", instance="1", group="canary"} 0+40x10
|
||||
http_requests{job="app-server", instance="0", group="production"} 0+50x10
|
||||
|
@ -337,32 +337,32 @@ load 5m
|
|||
version{job="app-server", instance="0", group="canary"} 7
|
||||
version{job="app-server", instance="1", group="canary"} 7
|
||||
|
||||
eval instant at 5m count_values("version", version)
|
||||
eval instant at 1m count_values("version", version)
|
||||
{version="6"} 5
|
||||
{version="7"} 2
|
||||
{version="8"} 2
|
||||
|
||||
|
||||
eval instant at 5m count_values(((("version"))), version)
|
||||
eval instant at 1m count_values(((("version"))), version)
|
||||
{version="6"} 5
|
||||
{version="7"} 2
|
||||
{version="8"} 2
|
||||
|
||||
|
||||
eval instant at 5m count_values without (instance)("version", version)
|
||||
eval instant at 1m count_values without (instance)("version", version)
|
||||
{job="api-server", group="production", version="6"} 3
|
||||
{job="api-server", group="canary", version="8"} 2
|
||||
{job="app-server", group="production", version="6"} 2
|
||||
{job="app-server", group="canary", version="7"} 2
|
||||
|
||||
# Overwrite label with output. Don't do this.
|
||||
eval instant at 5m count_values without (instance)("job", version)
|
||||
eval instant at 1m count_values without (instance)("job", version)
|
||||
{job="6", group="production"} 5
|
||||
{job="8", group="canary"} 2
|
||||
{job="7", group="canary"} 2
|
||||
|
||||
# Overwrite label with output. Don't do this.
|
||||
eval instant at 5m count_values by (job, group)("job", version)
|
||||
eval instant at 1m count_values by (job, group)("job", version)
|
||||
{job="6", group="production"} 5
|
||||
{job="8", group="canary"} 2
|
||||
{job="7", group="canary"} 2
|
||||
|
|
64
promql/promqltest/testdata/at_modifier.test
vendored
64
promql/promqltest/testdata/at_modifier.test
vendored
|
@ -121,45 +121,43 @@ eval instant at 25s sum_over_time(metric{job="1"}[100:1] offset 20 @ 100)
|
|||
|
||||
# Since vector selector has timestamp, the result value does not depend on the timestamp of subqueries.
|
||||
# Inner most sum=1+2+...+10=55.
|
||||
# With [100s:25s] subquery, it's 55*5.
|
||||
# With [100s:25s] subquery, it's 55*4.
|
||||
eval instant at 100s sum_over_time(sum_over_time(metric{job="1"}[100s] @ 100)[100s:25s] @ 50)
|
||||
{job="1"} 275
|
||||
{job="1"} 220
|
||||
|
||||
# Nested subqueries with different timestamps on both.
|
||||
|
||||
# Since vector selector has timestamp, the result value does not depend on the timestamp of subqueries.
|
||||
# Sum of innermost subquery is 275 as above. The outer subquery repeats it 4 times.
|
||||
# Sum of innermost subquery is 220 as above. The outer subquery repeats it 3 times.
|
||||
eval instant at 0s sum_over_time(sum_over_time(sum_over_time(metric{job="1"}[100s] @ 100)[100s:25s] @ 50)[3s:1s] @ 3000)
|
||||
{job="1"} 1100
|
||||
{job="1"} 660
|
||||
|
||||
# Testing the inner subquery timestamp since vector selector does not have @.
|
||||
|
||||
# Inner sum for subquery [100s:25s] @ 50 are
|
||||
# at -50 nothing, at -25 nothing, at 0=0, at 25=2, at 50=4+5=9.
|
||||
# This sum of 11 is repeated 4 times by outer subquery.
|
||||
# at -50 nothing, at -25 nothing, at 0=0, at 25=2, at 50=5.
|
||||
# This sum of 7 is repeated 3 times by outer subquery.
|
||||
eval instant at 0s sum_over_time(sum_over_time(sum_over_time(metric{job="1"}[10s])[100s:25s] @ 50)[3s:1s] @ 200)
|
||||
{job="1"} 44
|
||||
{job="1"} 21
|
||||
|
||||
# Inner sum for subquery [100s:25s] @ 200 are
|
||||
# at 100=9+10, at 125=12, at 150=14+15, at 175=17, at 200=19+20.
|
||||
# This sum of 116 is repeated 4 times by outer subquery.
|
||||
# at 125=12, at 150=15, at 175=17, at 200=20.
|
||||
# This sum of 64 is repeated 3 times by outer subquery.
|
||||
eval instant at 0s sum_over_time(sum_over_time(sum_over_time(metric{job="1"}[10s])[100s:25s] @ 200)[3s:1s] @ 50)
|
||||
{job="1"} 464
|
||||
{job="1"} 192
|
||||
|
||||
# Nested subqueries with timestamp only on outer subquery.
|
||||
# Outer most subquery:
|
||||
# at 900=783
|
||||
# inner subquery: at 870=87+86+85, at 880=88+87+86, at 890=89+88+87
|
||||
# at 925=537
|
||||
# inner subquery: at 895=89+88, at 905=90+89, at 915=90+91
|
||||
# at 950=828
|
||||
# inner subquery: at 920=92+91+90, at 930=93+92+91, at 940=94+93+92
|
||||
# at 975=567
|
||||
# inner subquery: at 945=94+93, at 955=95+94, at 965=96+95
|
||||
# at 1000=873
|
||||
# inner subquery: at 970=97+96+95, at 980=98+97+96, at 990=99+98+97
|
||||
# at 925=360
|
||||
# inner subquery: at 905=90+89, at 915=91+90
|
||||
# at 950=372
|
||||
# inner subquery: at 930=93+92, at 940=94+93
|
||||
# at 975=380
|
||||
# inner subquery: at 955=95+94, at 965=96+95
|
||||
# at 1000=392
|
||||
# inner subquery: at 980=98+97, at 990=99+98
|
||||
eval instant at 0s sum_over_time(sum_over_time(sum_over_time(metric{job="1"}[20s])[20s:10s] offset 10s)[100s:25s] @ 1000)
|
||||
{job="1"} 3588
|
||||
{job="1"} 1504
|
||||
|
||||
# minute is counted on the value of the sample.
|
||||
eval instant at 10s minute(metric @ 1500)
|
||||
|
@ -182,32 +180,32 @@ eval instant at 15m timestamp(timestamp(metric{job="1"} @ 10))
|
|||
|
||||
# minute is counted on the value of the sample.
|
||||
eval instant at 0s sum_over_time(minute(metric @ 1500)[100s:10s])
|
||||
{job="1"} 22
|
||||
{job="2"} 55
|
||||
{job="1"} 20
|
||||
{job="2"} 50
|
||||
|
||||
# If nothing passed, minute() takes eval time.
|
||||
# Here the eval time is determined by the subquery.
|
||||
# [50m:1m] at 6000, i.e. 100m, is 50m to 100m.
|
||||
# sum=50+51+52+...+59+0+1+2+...+40.
|
||||
# sum=51+52+...+59+0+1+2+...+40.
|
||||
eval instant at 0s sum_over_time(minute()[50m:1m] @ 6000)
|
||||
{} 1315
|
||||
|
||||
# sum=46+47+...+59+0+1+2+...+35.
|
||||
eval instant at 0s sum_over_time(minute()[50m:1m] @ 6000 offset 5m)
|
||||
{} 1365
|
||||
|
||||
# sum=45+46+47+...+59+0+1+2+...+35.
|
||||
eval instant at 0s sum_over_time(minute()[50m:1m] @ 6000 offset 5m)
|
||||
{} 1410
|
||||
|
||||
# time() is the eval time which is determined by subquery here.
|
||||
# 2900+2901+...+3000 = (3000*3001 - 2899*2900)/2.
|
||||
# 2901+...+3000 = (3000*3001 - 2899*2900)/2.
|
||||
eval instant at 0s sum_over_time(vector(time())[100s:1s] @ 3000)
|
||||
{} 297950
|
||||
{} 295050
|
||||
|
||||
# 2300+2301+...+2400 = (2400*2401 - 2299*2300)/2.
|
||||
# 2301+...+2400 = (2400*2401 - 2299*2300)/2.
|
||||
eval instant at 0s sum_over_time(vector(time())[100s:1s] @ 3000 offset 600s)
|
||||
{} 237350
|
||||
{} 235050
|
||||
|
||||
# timestamp() takes the time of the sample and not the evaluation time.
|
||||
eval instant at 0s sum_over_time(timestamp(metric{job="1"} @ 10)[100s:10s] @ 3000)
|
||||
{job="1"} 110
|
||||
{job="1"} 100
|
||||
|
||||
# The result of inner timestamp() will have the timestamp as the
|
||||
# eval time, hence entire expression is not step invariant and depends on eval time.
|
||||
|
|
154
promql/promqltest/testdata/functions.test
vendored
154
promql/promqltest/testdata/functions.test
vendored
|
@ -6,11 +6,13 @@ load 5m
|
|||
|
||||
# Tests for resets().
|
||||
eval instant at 50m resets(http_requests[5m])
|
||||
|
||||
eval instant at 50m resets(http_requests[10m])
|
||||
{path="/foo"} 0
|
||||
{path="/bar"} 0
|
||||
{path="/biz"} 0
|
||||
|
||||
eval instant at 50m resets(http_requests[300])
|
||||
eval instant at 50m resets(http_requests[600])
|
||||
{path="/foo"} 0
|
||||
{path="/bar"} 0
|
||||
{path="/biz"} 0
|
||||
|
@ -21,6 +23,11 @@ eval instant at 50m resets(http_requests[20m])
|
|||
{path="/biz"} 0
|
||||
|
||||
eval instant at 50m resets(http_requests[30m])
|
||||
{path="/foo"} 1
|
||||
{path="/bar"} 0
|
||||
{path="/biz"} 0
|
||||
|
||||
eval instant at 50m resets(http_requests[32m])
|
||||
{path="/foo"} 2
|
||||
{path="/bar"} 1
|
||||
{path="/biz"} 0
|
||||
|
@ -34,28 +41,30 @@ eval instant at 50m resets(nonexistent_metric[50m])
|
|||
|
||||
# Tests for changes().
|
||||
eval instant at 50m changes(http_requests[5m])
|
||||
|
||||
eval instant at 50m changes(http_requests[6m])
|
||||
{path="/foo"} 0
|
||||
{path="/bar"} 0
|
||||
{path="/biz"} 0
|
||||
|
||||
eval instant at 50m changes(http_requests[20m])
|
||||
{path="/foo"} 3
|
||||
{path="/bar"} 3
|
||||
{path="/foo"} 2
|
||||
{path="/bar"} 2
|
||||
{path="/biz"} 0
|
||||
|
||||
eval instant at 50m changes(http_requests[30m])
|
||||
{path="/foo"} 4
|
||||
{path="/bar"} 5
|
||||
{path="/biz"} 1
|
||||
{path="/foo"} 3
|
||||
{path="/bar"} 4
|
||||
{path="/biz"} 0
|
||||
|
||||
eval instant at 50m changes(http_requests[50m])
|
||||
{path="/foo"} 8
|
||||
{path="/bar"} 9
|
||||
{path="/foo"} 7
|
||||
{path="/bar"} 8
|
||||
{path="/biz"} 1
|
||||
|
||||
eval instant at 50m changes((http_requests[50m]))
|
||||
{path="/foo"} 8
|
||||
{path="/bar"} 9
|
||||
{path="/foo"} 7
|
||||
{path="/bar"} 8
|
||||
{path="/biz"} 1
|
||||
|
||||
eval instant at 50m changes(nonexistent_metric[50m])
|
||||
|
@ -66,7 +75,7 @@ load 5m
|
|||
x{a="b"} NaN NaN NaN
|
||||
x{a="c"} 0 NaN 0
|
||||
|
||||
eval instant at 15m changes(x[15m])
|
||||
eval instant at 15m changes(x[20m])
|
||||
{a="b"} 0
|
||||
{a="c"} 2
|
||||
|
||||
|
@ -75,14 +84,14 @@ clear
|
|||
# Tests for increase().
|
||||
load 5m
|
||||
http_requests{path="/foo"} 0+10x10
|
||||
http_requests{path="/bar"} 0+10x5 0+10x5
|
||||
http_requests{path="/bar"} 0+18x5 0+18x5
|
||||
http_requests{path="/dings"} 10+10x10
|
||||
http_requests{path="/bumms"} 1+10x10
|
||||
|
||||
# Tests for increase().
|
||||
eval instant at 50m increase(http_requests[50m])
|
||||
{path="/foo"} 100
|
||||
{path="/bar"} 90
|
||||
{path="/bar"} 160
|
||||
{path="/dings"} 100
|
||||
{path="/bumms"} 100
|
||||
|
||||
|
@ -95,7 +104,7 @@ eval instant at 50m increase(http_requests[50m])
|
|||
# value, and therefore the extrapolation happens only by 30s.
|
||||
eval instant at 50m increase(http_requests[100m])
|
||||
{path="/foo"} 100
|
||||
{path="/bar"} 90
|
||||
{path="/bar"} 162
|
||||
{path="/dings"} 105
|
||||
{path="/bumms"} 101
|
||||
|
||||
|
@ -115,15 +124,17 @@ clear
|
|||
|
||||
# Tests for rate().
|
||||
load 5m
|
||||
testcounter_reset_middle 0+10x4 0+10x5
|
||||
testcounter_reset_middle 0+27x4 0+27x5
|
||||
testcounter_reset_end 0+10x9 0 10
|
||||
|
||||
# Counter resets at in the middle of range are handled correctly by rate().
|
||||
eval instant at 50m rate(testcounter_reset_middle[50m])
|
||||
{} 0.03
|
||||
{} 0.08
|
||||
|
||||
# Counter resets at end of range are ignored by rate().
|
||||
eval instant at 50m rate(testcounter_reset_end[5m])
|
||||
|
||||
eval instant at 50m rate(testcounter_reset_end[6m])
|
||||
{} 0
|
||||
|
||||
clear
|
||||
|
@ -242,24 +253,24 @@ eval instant at 50m deriv(testcounter_reset_middle[100m])
|
|||
# intercept at t=3000: 38.63636363636364
|
||||
# intercept at t=3000+3600: 76.81818181818181
|
||||
eval instant at 50m predict_linear(testcounter_reset_middle[50m], 3600)
|
||||
{} 76.81818181818181
|
||||
{} 70
|
||||
|
||||
eval instant at 50m predict_linear(testcounter_reset_middle[50m], 1h)
|
||||
{} 76.81818181818181
|
||||
{} 70
|
||||
|
||||
# intercept at t = 3000+3600 = 6600
|
||||
eval instant at 50m predict_linear(testcounter_reset_middle[50m] @ 3000, 3600)
|
||||
eval instant at 50m predict_linear(testcounter_reset_middle[55m] @ 3000, 3600)
|
||||
{} 76.81818181818181
|
||||
|
||||
eval instant at 50m predict_linear(testcounter_reset_middle[50m] @ 3000, 1h)
|
||||
eval instant at 50m predict_linear(testcounter_reset_middle[55m] @ 3000, 1h)
|
||||
{} 76.81818181818181
|
||||
|
||||
# intercept at t = 600+3600 = 4200
|
||||
eval instant at 10m predict_linear(testcounter_reset_middle[50m] @ 3000, 3600)
|
||||
eval instant at 10m predict_linear(testcounter_reset_middle[55m] @ 3000, 3600)
|
||||
{} 51.36363636363637
|
||||
|
||||
# intercept at t = 4200+3600 = 7800
|
||||
eval instant at 70m predict_linear(testcounter_reset_middle[50m] @ 3000, 3600)
|
||||
eval instant at 70m predict_linear(testcounter_reset_middle[55m] @ 3000, 3600)
|
||||
{} 89.54545454545455
|
||||
|
||||
# With http_requests, there is a sample value exactly at the end of
|
||||
|
@ -467,7 +478,7 @@ load 5m
|
|||
http_requests{job="api-server", instance="1", group="production"} 0+20x10
|
||||
http_requests{job="api-server", instance="0", group="canary"} 0+30x10
|
||||
http_requests{job="api-server", instance="1", group="canary"} 0+40x10
|
||||
http_requests{job="api-server", instance="2", group="canary"} NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN
|
||||
http_requests{job="api-server", instance="2", group="canary"} NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN
|
||||
http_requests{job="app-server", instance="0", group="production"} 0+50x10
|
||||
http_requests{job="app-server", instance="1", group="production"} 0+60x10
|
||||
http_requests{job="app-server", instance="0", group="canary"} 0+70x10
|
||||
|
@ -502,7 +513,7 @@ load 5m
|
|||
http_requests{job="api-server", instance="1", group="production"} 0+20x10
|
||||
http_requests{job="api-server", instance="0", group="canary"} 0+30x10
|
||||
http_requests{job="api-server", instance="1", group="canary"} 0+40x10
|
||||
http_requests{job="api-server", instance="2", group="canary"} NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN
|
||||
http_requests{job="api-server", instance="2", group="canary"} NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN
|
||||
http_requests{job="app-server", instance="0", group="production"} 0+50x10
|
||||
http_requests{job="app-server", instance="1", group="production"} 0+60x10
|
||||
http_requests{job="app-server", instance="0", group="canary"} 0+70x10
|
||||
|
@ -688,10 +699,10 @@ load 10s
|
|||
metric9 -9.988465674311579e+307 -9.988465674311579e+307 -9.988465674311579e+307
|
||||
metric10 -9.988465674311579e+307 9.988465674311579e+307
|
||||
|
||||
eval instant at 1m avg_over_time(metric[1m])
|
||||
eval instant at 55s avg_over_time(metric[1m])
|
||||
{} 3
|
||||
|
||||
eval instant at 1m sum_over_time(metric[1m])/count_over_time(metric[1m])
|
||||
eval instant at 55s sum_over_time(metric[1m])/count_over_time(metric[1m])
|
||||
{} 3
|
||||
|
||||
eval instant at 1m avg_over_time(metric2[1m])
|
||||
|
@ -758,8 +769,8 @@ eval instant at 1m avg_over_time(metric8[1m])
|
|||
{} 9.988465674311579e+307
|
||||
|
||||
# This overflows float64.
|
||||
eval instant at 1m sum_over_time(metric8[1m])/count_over_time(metric8[1m])
|
||||
{} Inf
|
||||
eval instant at 1m sum_over_time(metric8[2m])/count_over_time(metric8[2m])
|
||||
{} +Inf
|
||||
|
||||
eval instant at 1m avg_over_time(metric9[1m])
|
||||
{} -9.988465674311579e+307
|
||||
|
@ -768,10 +779,16 @@ eval instant at 1m avg_over_time(metric9[1m])
|
|||
eval instant at 1m sum_over_time(metric9[1m])/count_over_time(metric9[1m])
|
||||
{} -Inf
|
||||
|
||||
eval instant at 1m avg_over_time(metric10[1m])
|
||||
eval instant at 45s avg_over_time(metric10[1m])
|
||||
{} 0
|
||||
|
||||
eval instant at 1m sum_over_time(metric10[1m])/count_over_time(metric10[1m])
|
||||
eval instant at 1m avg_over_time(metric10[2m])
|
||||
{} 0
|
||||
|
||||
eval instant at 45s sum_over_time(metric10[1m])/count_over_time(metric10[1m])
|
||||
{} 0
|
||||
|
||||
eval instant at 1m sum_over_time(metric10[2m])/count_over_time(metric10[2m])
|
||||
{} 0
|
||||
|
||||
# Test if very big intermediate values cause loss of detail.
|
||||
|
@ -779,10 +796,10 @@ clear
|
|||
load 10s
|
||||
metric 1 1e100 1 -1e100
|
||||
|
||||
eval instant at 1m sum_over_time(metric[1m])
|
||||
eval instant at 1m sum_over_time(metric[2m])
|
||||
{} 2
|
||||
|
||||
eval instant at 1m avg_over_time(metric[1m])
|
||||
eval instant at 1m avg_over_time(metric[2m])
|
||||
{} 0.5
|
||||
|
||||
# Tests for stddev_over_time and stdvar_over_time.
|
||||
|
@ -790,13 +807,13 @@ clear
|
|||
load 10s
|
||||
metric 0 8 8 2 3
|
||||
|
||||
eval instant at 1m stdvar_over_time(metric[1m])
|
||||
eval instant at 1m stdvar_over_time(metric[2m])
|
||||
{} 10.56
|
||||
|
||||
eval instant at 1m stddev_over_time(metric[1m])
|
||||
eval instant at 1m stddev_over_time(metric[2m])
|
||||
{} 3.249615
|
||||
|
||||
eval instant at 1m stddev_over_time((metric[1m]))
|
||||
eval instant at 1m stddev_over_time((metric[2m]))
|
||||
{} 3.249615
|
||||
|
||||
# Tests for stddev_over_time and stdvar_over_time #4927.
|
||||
|
@ -826,42 +843,42 @@ load 10s
|
|||
data{test="three samples"} 0 1 2
|
||||
data{test="uneven samples"} 0 1 4
|
||||
|
||||
eval instant at 1m quantile_over_time(0, data[1m])
|
||||
eval instant at 1m quantile_over_time(0, data[2m])
|
||||
{test="two samples"} 0
|
||||
{test="three samples"} 0
|
||||
{test="uneven samples"} 0
|
||||
|
||||
eval instant at 1m quantile_over_time(0.5, data[1m])
|
||||
eval instant at 1m quantile_over_time(0.5, data[2m])
|
||||
{test="two samples"} 0.5
|
||||
{test="three samples"} 1
|
||||
{test="uneven samples"} 1
|
||||
|
||||
eval instant at 1m quantile_over_time(0.75, data[1m])
|
||||
eval instant at 1m quantile_over_time(0.75, data[2m])
|
||||
{test="two samples"} 0.75
|
||||
{test="three samples"} 1.5
|
||||
{test="uneven samples"} 2.5
|
||||
|
||||
eval instant at 1m quantile_over_time(0.8, data[1m])
|
||||
eval instant at 1m quantile_over_time(0.8, data[2m])
|
||||
{test="two samples"} 0.8
|
||||
{test="three samples"} 1.6
|
||||
{test="uneven samples"} 2.8
|
||||
|
||||
eval instant at 1m quantile_over_time(1, data[1m])
|
||||
eval instant at 1m quantile_over_time(1, data[2m])
|
||||
{test="two samples"} 1
|
||||
{test="three samples"} 2
|
||||
{test="uneven samples"} 4
|
||||
|
||||
eval_warn instant at 1m quantile_over_time(-1, data[1m])
|
||||
eval_warn instant at 1m quantile_over_time(-1, data[2m])
|
||||
{test="two samples"} -Inf
|
||||
{test="three samples"} -Inf
|
||||
{test="uneven samples"} -Inf
|
||||
|
||||
eval_warn instant at 1m quantile_over_time(2, data[1m])
|
||||
eval_warn instant at 1m quantile_over_time(2, data[2m])
|
||||
{test="two samples"} +Inf
|
||||
{test="three samples"} +Inf
|
||||
{test="uneven samples"} +Inf
|
||||
|
||||
eval_warn instant at 1m (quantile_over_time(2, (data[1m])))
|
||||
eval_warn instant at 1m (quantile_over_time(2, (data[2m])))
|
||||
{test="two samples"} +Inf
|
||||
{test="three samples"} +Inf
|
||||
{test="uneven samples"} +Inf
|
||||
|
@ -969,21 +986,21 @@ load 10s
|
|||
data{type="some_nan3"} NaN 0 1
|
||||
data{type="only_nan"} NaN NaN NaN
|
||||
|
||||
eval instant at 1m min_over_time(data[1m])
|
||||
eval instant at 1m min_over_time(data[2m])
|
||||
{type="numbers"} 0
|
||||
{type="some_nan"} 0
|
||||
{type="some_nan2"} 1
|
||||
{type="some_nan3"} 0
|
||||
{type="only_nan"} NaN
|
||||
|
||||
eval instant at 1m max_over_time(data[1m])
|
||||
eval instant at 1m max_over_time(data[2m])
|
||||
{type="numbers"} 3
|
||||
{type="some_nan"} 2
|
||||
{type="some_nan2"} 2
|
||||
{type="some_nan3"} 1
|
||||
{type="only_nan"} NaN
|
||||
|
||||
eval instant at 1m last_over_time(data[1m])
|
||||
eval instant at 1m last_over_time(data[2m])
|
||||
data{type="numbers"} 3
|
||||
data{type="some_nan"} NaN
|
||||
data{type="some_nan2"} 1
|
||||
|
@ -1076,13 +1093,19 @@ eval instant at 1m absent_over_time(httpd_log_lines_total[30s])
|
|||
{} 1
|
||||
|
||||
eval instant at 15m absent_over_time(http_requests[5m])
|
||||
|
||||
eval instant at 16m absent_over_time(http_requests[5m])
|
||||
{} 1
|
||||
|
||||
eval instant at 15m absent_over_time(http_requests[10m])
|
||||
|
||||
eval instant at 16m absent_over_time(http_requests[6m])
|
||||
{} 1
|
||||
|
||||
eval instant at 16m absent_over_time(http_requests[16m])
|
||||
|
||||
eval instant at 16m absent_over_time(httpd_handshake_failures_total[1m])
|
||||
{} 1
|
||||
|
||||
eval instant at 16m absent_over_time(httpd_handshake_failures_total[2m])
|
||||
|
||||
eval instant at 16m absent_over_time({instance="127.0.0.1"}[5m])
|
||||
|
||||
|
@ -1138,17 +1161,18 @@ eval instant at 0m present_over_time(httpd_log_lines_total[30s])
|
|||
eval instant at 1m present_over_time(httpd_log_lines_total[30s])
|
||||
|
||||
eval instant at 15m present_over_time(http_requests[5m])
|
||||
|
||||
eval instant at 15m present_over_time(http_requests[10m])
|
||||
{instance="127.0.0.1", job="httpd", path="/bar"} 1
|
||||
{instance="127.0.0.1", job="httpd", path="/foo"} 1
|
||||
|
||||
eval instant at 16m present_over_time(http_requests[5m])
|
||||
|
||||
eval instant at 16m present_over_time(http_requests[6m])
|
||||
|
||||
eval instant at 16m present_over_time(http_requests[16m])
|
||||
{instance="127.0.0.1", job="httpd", path="/bar"} 1
|
||||
{instance="127.0.0.1", job="httpd", path="/foo"} 1
|
||||
|
||||
eval instant at 16m present_over_time(httpd_handshake_failures_total[1m])
|
||||
{instance="127.0.0.1", job="node"} 1
|
||||
|
||||
eval instant at 16m present_over_time({instance="127.0.0.1"}[5m])
|
||||
{instance="127.0.0.1",job="node"} 1
|
||||
|
@ -1169,59 +1193,59 @@ load 5m
|
|||
exp_root_log{l="x"} 10
|
||||
exp_root_log{l="y"} 20
|
||||
|
||||
eval instant at 5m exp(exp_root_log)
|
||||
eval instant at 1m exp(exp_root_log)
|
||||
{l="x"} 22026.465794806718
|
||||
{l="y"} 485165195.4097903
|
||||
|
||||
eval instant at 5m exp(exp_root_log - 10)
|
||||
eval instant at 1m exp(exp_root_log - 10)
|
||||
{l="y"} 22026.465794806718
|
||||
{l="x"} 1
|
||||
|
||||
eval instant at 5m exp(exp_root_log - 20)
|
||||
eval instant at 1m exp(exp_root_log - 20)
|
||||
{l="x"} 4.5399929762484854e-05
|
||||
{l="y"} 1
|
||||
|
||||
eval instant at 5m ln(exp_root_log)
|
||||
eval instant at 1m ln(exp_root_log)
|
||||
{l="x"} 2.302585092994046
|
||||
{l="y"} 2.995732273553991
|
||||
|
||||
eval instant at 5m ln(exp_root_log - 10)
|
||||
eval instant at 1m ln(exp_root_log - 10)
|
||||
{l="y"} 2.302585092994046
|
||||
{l="x"} -Inf
|
||||
|
||||
eval instant at 5m ln(exp_root_log - 20)
|
||||
eval instant at 1m ln(exp_root_log - 20)
|
||||
{l="y"} -Inf
|
||||
{l="x"} NaN
|
||||
|
||||
eval instant at 5m exp(ln(exp_root_log))
|
||||
eval instant at 1m exp(ln(exp_root_log))
|
||||
{l="y"} 20
|
||||
{l="x"} 10
|
||||
|
||||
eval instant at 5m sqrt(exp_root_log)
|
||||
eval instant at 1m sqrt(exp_root_log)
|
||||
{l="x"} 3.1622776601683795
|
||||
{l="y"} 4.47213595499958
|
||||
|
||||
eval instant at 5m log2(exp_root_log)
|
||||
eval instant at 1m log2(exp_root_log)
|
||||
{l="x"} 3.3219280948873626
|
||||
{l="y"} 4.321928094887363
|
||||
|
||||
eval instant at 5m log2(exp_root_log - 10)
|
||||
eval instant at 1m log2(exp_root_log - 10)
|
||||
{l="y"} 3.3219280948873626
|
||||
{l="x"} -Inf
|
||||
|
||||
eval instant at 5m log2(exp_root_log - 20)
|
||||
eval instant at 1m log2(exp_root_log - 20)
|
||||
{l="x"} NaN
|
||||
{l="y"} -Inf
|
||||
|
||||
eval instant at 5m log10(exp_root_log)
|
||||
eval instant at 1m log10(exp_root_log)
|
||||
{l="x"} 1
|
||||
{l="y"} 1.301029995663981
|
||||
|
||||
eval instant at 5m log10(exp_root_log - 10)
|
||||
eval instant at 1m log10(exp_root_log - 10)
|
||||
{l="y"} 1
|
||||
{l="x"} -Inf
|
||||
|
||||
eval instant at 5m log10(exp_root_log - 20)
|
||||
eval instant at 1m log10(exp_root_log - 20)
|
||||
{l="x"} NaN
|
||||
{l="y"} -Inf
|
||||
|
||||
|
|
82
promql/promqltest/testdata/histograms.test
vendored
82
promql/promqltest/testdata/histograms.test
vendored
|
@ -108,8 +108,8 @@ eval instant at 50m histogram_stdvar(testhistogram3)
|
|||
eval instant at 50m histogram_fraction(0, 0.2, testhistogram3)
|
||||
{start="positive"} 0.6363636363636364
|
||||
{start="negative"} 0
|
||||
|
||||
eval instant at 50m histogram_fraction(0, 0.2, rate(testhistogram3[5m]))
|
||||
|
||||
eval instant at 50m histogram_fraction(0, 0.2, rate(testhistogram3[10m]))
|
||||
{start="positive"} 0.6363636363636364
|
||||
{start="negative"} 0
|
||||
|
||||
|
@ -118,8 +118,8 @@ eval instant at 50m histogram_fraction(0, 0.2, rate(testhistogram3[5m]))
|
|||
|
||||
eval instant at 50m testhistogram3_bucket{le=".2"} / ignoring(le) testhistogram3_count
|
||||
{start="positive"} 0.6363636363636364
|
||||
|
||||
eval instant at 50m rate(testhistogram3_bucket{le=".2"}[5m]) / ignoring(le) rate(testhistogram3_count[5m])
|
||||
|
||||
eval instant at 50m rate(testhistogram3_bucket{le=".2"}[10m]) / ignoring(le) rate(testhistogram3_count[10m])
|
||||
{start="positive"} 0.6363636363636364
|
||||
|
||||
# Test histogram_quantile, native and classic.
|
||||
|
@ -241,28 +241,27 @@ eval instant at 50m histogram_quantile(0.8, testhistogram_bucket)
|
|||
{start="negative"} 0.3
|
||||
|
||||
# More realistic with rates.
|
||||
|
||||
eval instant at 50m histogram_quantile(0.2, rate(testhistogram[5m]))
|
||||
eval instant at 50m histogram_quantile(0.2, rate(testhistogram[10m]))
|
||||
{start="positive"} 0.048
|
||||
{start="negative"} -0.2
|
||||
|
||||
eval instant at 50m histogram_quantile(0.2, rate(testhistogram_bucket[5m]))
|
||||
eval instant at 50m histogram_quantile(0.2, rate(testhistogram_bucket[10m]))
|
||||
{start="positive"} 0.048
|
||||
{start="negative"} -0.2
|
||||
|
||||
eval instant at 50m histogram_quantile(0.5, rate(testhistogram[5m]))
|
||||
eval instant at 50m histogram_quantile(0.5, rate(testhistogram[10m]))
|
||||
{start="positive"} 0.15
|
||||
{start="negative"} -0.15
|
||||
|
||||
eval instant at 50m histogram_quantile(0.5, rate(testhistogram_bucket[5m]))
|
||||
eval instant at 50m histogram_quantile(0.5, rate(testhistogram_bucket[10m]))
|
||||
{start="positive"} 0.15
|
||||
{start="negative"} -0.15
|
||||
|
||||
eval instant at 50m histogram_quantile(0.8, rate(testhistogram[5m]))
|
||||
eval instant at 50m histogram_quantile(0.8, rate(testhistogram[10m]))
|
||||
{start="positive"} 0.72
|
||||
{start="negative"} 0.3
|
||||
|
||||
eval instant at 50m histogram_quantile(0.8, rate(testhistogram_bucket[5m]))
|
||||
eval instant at 50m histogram_quantile(0.8, rate(testhistogram_bucket[10m]))
|
||||
{start="positive"} 0.72
|
||||
{start="negative"} 0.3
|
||||
|
||||
|
@ -307,115 +306,112 @@ eval instant at 47m histogram_quantile(5./6., rate(testhistogram2_bucket[15m]))
|
|||
# Aggregated histogram: Everything in one. Note how native histograms
|
||||
# don't require aggregation by le.
|
||||
|
||||
eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds[5m])))
|
||||
eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds[10m])))
|
||||
{} 0.075
|
||||
|
||||
eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds_bucket[5m])) by (le))
|
||||
eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds_bucket[10m])) by (le))
|
||||
{} 0.075
|
||||
|
||||
eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds[5m])))
|
||||
eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds[10m])))
|
||||
{} 0.1277777777777778
|
||||
|
||||
eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds_bucket[5m])) by (le))
|
||||
eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds_bucket[10m])) by (le))
|
||||
{} 0.1277777777777778
|
||||
|
||||
# Aggregated histogram: Everything in one. Now with avg, which does not change anything.
|
||||
|
||||
eval instant at 50m histogram_quantile(0.3, avg(rate(request_duration_seconds[5m])))
|
||||
eval instant at 50m histogram_quantile(0.3, avg(rate(request_duration_seconds[10m])))
|
||||
{} 0.075
|
||||
|
||||
eval instant at 50m histogram_quantile(0.3, avg(rate(request_duration_seconds_bucket[5m])) by (le))
|
||||
eval instant at 50m histogram_quantile(0.3, avg(rate(request_duration_seconds_bucket[10m])) by (le))
|
||||
{} 0.075
|
||||
|
||||
eval instant at 50m histogram_quantile(0.5, avg(rate(request_duration_seconds[5m])))
|
||||
eval instant at 50m histogram_quantile(0.5, avg(rate(request_duration_seconds[10m])))
|
||||
{} 0.12777777777777778
|
||||
|
||||
eval instant at 50m histogram_quantile(0.5, avg(rate(request_duration_seconds_bucket[5m])) by (le))
|
||||
eval instant at 50m histogram_quantile(0.5, avg(rate(request_duration_seconds_bucket[10m])) by (le))
|
||||
{} 0.12777777777777778
|
||||
|
||||
# Aggregated histogram: By instance.
|
||||
|
||||
eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds[5m])) by (instance))
|
||||
eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds[10m])) by (instance))
|
||||
{instance="ins1"} 0.075
|
||||
{instance="ins2"} 0.075
|
||||
|
||||
eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds_bucket[5m])) by (le, instance))
|
||||
eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds_bucket[10m])) by (le, instance))
|
||||
{instance="ins1"} 0.075
|
||||
{instance="ins2"} 0.075
|
||||
|
||||
eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds[5m])) by (instance))
|
||||
eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds[10m])) by (instance))
|
||||
{instance="ins1"} 0.1333333333
|
||||
{instance="ins2"} 0.125
|
||||
|
||||
eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds_bucket[5m])) by (le, instance))
|
||||
eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds_bucket[10m])) by (le, instance))
|
||||
{instance="ins1"} 0.1333333333
|
||||
{instance="ins2"} 0.125
|
||||
|
||||
# Aggregated histogram: By job.
|
||||
|
||||
eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds[5m])) by (job))
|
||||
eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds[10m])) by (job))
|
||||
{job="job1"} 0.1
|
||||
{job="job2"} 0.0642857142857143
|
||||
|
||||
eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds_bucket[5m])) by (le, job))
|
||||
eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds_bucket[10m])) by (le, job))
|
||||
{job="job1"} 0.1
|
||||
{job="job2"} 0.0642857142857143
|
||||
|
||||
eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds[5m])) by (job))
|
||||
eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds[10m])) by (job))
|
||||
{job="job1"} 0.14
|
||||
{job="job2"} 0.1125
|
||||
|
||||
eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds_bucket[5m])) by (le, job))
|
||||
eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds_bucket[10m])) by (le, job))
|
||||
{job="job1"} 0.14
|
||||
{job="job2"} 0.1125
|
||||
|
||||
# Aggregated histogram: By job and instance.
|
||||
|
||||
eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds[5m])) by (job, instance))
|
||||
eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds[10m])) by (job, instance))
|
||||
{instance="ins1", job="job1"} 0.11
|
||||
{instance="ins2", job="job1"} 0.09
|
||||
{instance="ins1", job="job2"} 0.06
|
||||
{instance="ins2", job="job2"} 0.0675
|
||||
|
||||
eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds_bucket[5m])) by (le, job, instance))
|
||||
eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds_bucket[10m])) by (le, job, instance))
|
||||
{instance="ins1", job="job1"} 0.11
|
||||
{instance="ins2", job="job1"} 0.09
|
||||
{instance="ins1", job="job2"} 0.06
|
||||
{instance="ins2", job="job2"} 0.0675
|
||||
|
||||
eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds[5m])) by (job, instance))
|
||||
eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds[10m])) by (job, instance))
|
||||
{instance="ins1", job="job1"} 0.15
|
||||
{instance="ins2", job="job1"} 0.1333333333333333
|
||||
{instance="ins1", job="job2"} 0.1
|
||||
{instance="ins2", job="job2"} 0.1166666666666667
|
||||
|
||||
eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds_bucket[5m])) by (le, job, instance))
|
||||
eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds_bucket[10m])) by (le, job, instance))
|
||||
{instance="ins1", job="job1"} 0.15
|
||||
{instance="ins2", job="job1"} 0.1333333333333333
|
||||
{instance="ins1", job="job2"} 0.1
|
||||
{instance="ins2", job="job2"} 0.1166666666666667
|
||||
|
||||
# The unaggregated histogram for comparison. Same result as the previous one.
|
||||
|
||||
eval instant at 50m histogram_quantile(0.3, rate(request_duration_seconds[5m]))
|
||||
eval instant at 50m histogram_quantile(0.3, rate(request_duration_seconds[10m]))
|
||||
{instance="ins1", job="job1"} 0.11
|
||||
{instance="ins2", job="job1"} 0.09
|
||||
{instance="ins1", job="job2"} 0.06
|
||||
{instance="ins2", job="job2"} 0.0675
|
||||
|
||||
eval instant at 50m histogram_quantile(0.3, rate(request_duration_seconds_bucket[5m]))
|
||||
eval instant at 50m histogram_quantile(0.3, rate(request_duration_seconds_bucket[10m]))
|
||||
{instance="ins1", job="job1"} 0.11
|
||||
{instance="ins2", job="job1"} 0.09
|
||||
{instance="ins1", job="job2"} 0.06
|
||||
{instance="ins2", job="job2"} 0.0675
|
||||
|
||||
eval instant at 50m histogram_quantile(0.5, rate(request_duration_seconds[5m]))
|
||||
eval instant at 50m histogram_quantile(0.5, rate(request_duration_seconds[10m]))
|
||||
{instance="ins1", job="job1"} 0.15
|
||||
{instance="ins2", job="job1"} 0.13333333333333333
|
||||
{instance="ins1", job="job2"} 0.1
|
||||
{instance="ins2", job="job2"} 0.11666666666666667
|
||||
|
||||
eval instant at 50m histogram_quantile(0.5, rate(request_duration_seconds_bucket[5m]))
|
||||
eval instant at 50m histogram_quantile(0.5, rate(request_duration_seconds_bucket[10m]))
|
||||
{instance="ins1", job="job1"} 0.15
|
||||
{instance="ins2", job="job1"} 0.13333333333333333
|
||||
{instance="ins1", job="job2"} 0.1
|
||||
|
@ -448,19 +444,19 @@ eval instant at 50m histogram_quantile(0.99, nonmonotonic_bucket)
|
|||
{} 979.75
|
||||
|
||||
# Buckets with different representations of the same upper bound.
|
||||
eval instant at 50m histogram_quantile(0.5, rate(mixed_bucket[5m]))
|
||||
eval instant at 50m histogram_quantile(0.5, rate(mixed_bucket[10m]))
|
||||
{instance="ins1", job="job1"} 0.15
|
||||
{instance="ins2", job="job1"} NaN
|
||||
|
||||
eval instant at 50m histogram_quantile(0.5, rate(mixed[5m]))
|
||||
eval instant at 50m histogram_quantile(0.5, rate(mixed[10m]))
|
||||
{instance="ins1", job="job1"} 0.2
|
||||
{instance="ins2", job="job1"} NaN
|
||||
|
||||
eval instant at 50m histogram_quantile(0.75, rate(mixed_bucket[5m]))
|
||||
eval instant at 50m histogram_quantile(0.75, rate(mixed_bucket[10m]))
|
||||
{instance="ins1", job="job1"} 0.2
|
||||
{instance="ins2", job="job1"} NaN
|
||||
|
||||
eval instant at 50m histogram_quantile(1, rate(mixed_bucket[5m]))
|
||||
eval instant at 50m histogram_quantile(1, rate(mixed_bucket[10m]))
|
||||
{instance="ins1", job="job1"} 0.2
|
||||
{instance="ins2", job="job1"} NaN
|
||||
|
||||
|
@ -469,7 +465,7 @@ load_with_nhcb 5m
|
|||
empty_bucket{le="0.2", job="job1", instance="ins1"} 0x10
|
||||
empty_bucket{le="+Inf", job="job1", instance="ins1"} 0x10
|
||||
|
||||
eval instant at 50m histogram_quantile(0.2, rate(empty_bucket[5m]))
|
||||
eval instant at 50m histogram_quantile(0.2, rate(empty_bucket[10m]))
|
||||
{instance="ins1", job="job1"} NaN
|
||||
|
||||
# Load a duplicate histogram with a different name to test failure scenario on multiple histograms with the same label set.
|
||||
|
|
|
@ -4,81 +4,81 @@ load 5m
|
|||
another_metric{env="1"} 60 120 180
|
||||
|
||||
# Does not drop __name__ for vector selector
|
||||
eval instant at 15m metric{env="1"}
|
||||
eval instant at 10m metric{env="1"}
|
||||
metric{env="1"} 120
|
||||
|
||||
# Drops __name__ for unary operators
|
||||
eval instant at 15m -metric
|
||||
eval instant at 10m -metric
|
||||
{env="1"} -120
|
||||
|
||||
# Drops __name__ for binary operators
|
||||
eval instant at 15m metric + another_metric
|
||||
eval instant at 10m metric + another_metric
|
||||
{env="1"} 300
|
||||
|
||||
# Does not drop __name__ for binary comparison operators
|
||||
eval instant at 15m metric <= another_metric
|
||||
eval instant at 10m metric <= another_metric
|
||||
metric{env="1"} 120
|
||||
|
||||
# Drops __name__ for binary comparison operators with "bool" modifier
|
||||
eval instant at 15m metric <= bool another_metric
|
||||
eval instant at 10m metric <= bool another_metric
|
||||
{env="1"} 1
|
||||
|
||||
# Drops __name__ for vector-scalar operations
|
||||
eval instant at 15m metric * 2
|
||||
eval instant at 10m metric * 2
|
||||
{env="1"} 240
|
||||
|
||||
# Drops __name__ for instant-vector functions
|
||||
eval instant at 15m clamp(metric, 0, 100)
|
||||
eval instant at 10m clamp(metric, 0, 100)
|
||||
{env="1"} 100
|
||||
|
||||
# Drops __name__ for range-vector functions
|
||||
eval instant at 15m rate(metric{env="1"}[10m])
|
||||
eval instant at 10m rate(metric{env="1"}[10m])
|
||||
{env="1"} 0.2
|
||||
|
||||
# Does not drop __name__ for last_over_time function
|
||||
eval instant at 15m last_over_time(metric{env="1"}[10m])
|
||||
eval instant at 10m last_over_time(metric{env="1"}[10m])
|
||||
metric{env="1"} 120
|
||||
|
||||
# Drops name for other _over_time functions
|
||||
eval instant at 15m max_over_time(metric{env="1"}[10m])
|
||||
eval instant at 10m max_over_time(metric{env="1"}[10m])
|
||||
{env="1"} 120
|
||||
|
||||
# Allows relabeling (to-be-dropped) __name__ via label_replace
|
||||
eval instant at 15m label_replace(rate({env="1"}[10m]), "my_name", "rate_$1", "__name__", "(.+)")
|
||||
eval instant at 10m label_replace(rate({env="1"}[10m]), "my_name", "rate_$1", "__name__", "(.+)")
|
||||
{my_name="rate_metric", env="1"} 0.2
|
||||
{my_name="rate_another_metric", env="1"} 0.2
|
||||
|
||||
# Allows preserving __name__ via label_replace
|
||||
eval instant at 15m label_replace(rate({env="1"}[10m]), "__name__", "rate_$1", "__name__", "(.+)")
|
||||
eval instant at 10m label_replace(rate({env="1"}[10m]), "__name__", "rate_$1", "__name__", "(.+)")
|
||||
rate_metric{env="1"} 0.2
|
||||
rate_another_metric{env="1"} 0.2
|
||||
|
||||
# Allows relabeling (to-be-dropped) __name__ via label_join
|
||||
eval instant at 15m label_join(rate({env="1"}[10m]), "my_name", "_", "__name__")
|
||||
eval instant at 10m label_join(rate({env="1"}[10m]), "my_name", "_", "__name__")
|
||||
{my_name="metric", env="1"} 0.2
|
||||
{my_name="another_metric", env="1"} 0.2
|
||||
|
||||
# Allows preserving __name__ via label_join
|
||||
eval instant at 15m label_join(rate({env="1"}[10m]), "__name__", "_", "__name__", "env")
|
||||
eval instant at 10m label_join(rate({env="1"}[10m]), "__name__", "_", "__name__", "env")
|
||||
metric_1{env="1"} 0.2
|
||||
another_metric_1{env="1"} 0.2
|
||||
|
||||
# Does not drop metric names fro aggregation operators
|
||||
eval instant at 15m sum by (__name__, env) (metric{env="1"})
|
||||
eval instant at 10m sum by (__name__, env) (metric{env="1"})
|
||||
metric{env="1"} 120
|
||||
|
||||
# Aggregation operators by __name__ lead to duplicate labelset errors (aggregation is partitioned by not yet removed __name__ label)
|
||||
# This is an accidental side effect of delayed __name__ label dropping
|
||||
eval_fail instant at 15m sum by (__name__) (rate({env="1"}[10m]))
|
||||
eval_fail instant at 10m sum by (__name__) (rate({env="1"}[10m]))
|
||||
|
||||
# Aggregation operators aggregate metrics with same labelset and to-be-dropped names
|
||||
# This is an accidental side effect of delayed __name__ label dropping
|
||||
eval instant at 15m sum(rate({env="1"}[10m])) by (env)
|
||||
eval instant at 10m sum(rate({env="1"}[10m])) by (env)
|
||||
{env="1"} 0.4
|
||||
|
||||
# Aggregationk operators propagate __name__ label dropping information
|
||||
eval instant at 15m topk(10, sum by (__name__, env) (metric{env="1"}))
|
||||
eval instant at 10m topk(10, sum by (__name__, env) (metric{env="1"}))
|
||||
metric{env="1"} 120
|
||||
|
||||
eval instant at 15m topk(10, sum by (__name__, env) (rate(metric{env="1"}[10m])))
|
||||
eval instant at 10m topk(10, sum by (__name__, env) (rate(metric{env="1"}[10m])))
|
||||
{env="1"} 0.2
|
||||
|
|
142
promql/promqltest/testdata/native_histograms.test
vendored
142
promql/promqltest/testdata/native_histograms.test
vendored
|
@ -2,55 +2,55 @@
|
|||
load 5m
|
||||
empty_histogram {{}}
|
||||
|
||||
eval instant at 5m empty_histogram
|
||||
eval instant at 1m empty_histogram
|
||||
{__name__="empty_histogram"} {{}}
|
||||
|
||||
eval instant at 5m histogram_count(empty_histogram)
|
||||
eval instant at 1m histogram_count(empty_histogram)
|
||||
{} 0
|
||||
|
||||
eval instant at 5m histogram_sum(empty_histogram)
|
||||
eval instant at 1m histogram_sum(empty_histogram)
|
||||
{} 0
|
||||
|
||||
eval instant at 5m histogram_avg(empty_histogram)
|
||||
eval instant at 1m histogram_avg(empty_histogram)
|
||||
{} NaN
|
||||
|
||||
eval instant at 5m histogram_fraction(-Inf, +Inf, empty_histogram)
|
||||
eval instant at 1m histogram_fraction(-Inf, +Inf, empty_histogram)
|
||||
{} NaN
|
||||
|
||||
eval instant at 5m histogram_fraction(0, 8, empty_histogram)
|
||||
eval instant at 1m histogram_fraction(0, 8, empty_histogram)
|
||||
{} NaN
|
||||
|
||||
|
||||
clear
|
||||
|
||||
# buckets:[1 2 1] means 1 observation in the 1st bucket, 2 observations in the 2nd and 1 observation in the 3rd (total 4).
|
||||
load 5m
|
||||
single_histogram {{schema:0 sum:5 count:4 buckets:[1 2 1]}}
|
||||
|
||||
# histogram_count extracts the count property from the histogram.
|
||||
eval instant at 5m histogram_count(single_histogram)
|
||||
eval instant at 1m histogram_count(single_histogram)
|
||||
{} 4
|
||||
|
||||
# histogram_sum extracts the sum property from the histogram.
|
||||
eval instant at 5m histogram_sum(single_histogram)
|
||||
eval instant at 1m histogram_sum(single_histogram)
|
||||
{} 5
|
||||
|
||||
# histogram_avg calculates the average from sum and count properties.
|
||||
eval instant at 5m histogram_avg(single_histogram)
|
||||
eval instant at 1m histogram_avg(single_histogram)
|
||||
{} 1.25
|
||||
|
||||
# We expect half of the values to fall in the range 1 < x <= 2.
|
||||
eval instant at 5m histogram_fraction(1, 2, single_histogram)
|
||||
eval instant at 1m histogram_fraction(1, 2, single_histogram)
|
||||
{} 0.5
|
||||
|
||||
# We expect all values to fall in the range 0 < x <= 8.
|
||||
eval instant at 5m histogram_fraction(0, 8, single_histogram)
|
||||
eval instant at 1m histogram_fraction(0, 8, single_histogram)
|
||||
{} 1
|
||||
|
||||
# Median is 1.5 due to linear estimation of the midpoint of the middle bucket, whose values are within range 1 < x <= 2.
|
||||
eval instant at 5m histogram_quantile(0.5, single_histogram)
|
||||
eval instant at 1m histogram_quantile(0.5, single_histogram)
|
||||
{} 1.5
|
||||
|
||||
|
||||
clear
|
||||
|
||||
# Repeat the same histogram 10 times.
|
||||
load 5m
|
||||
|
@ -88,7 +88,7 @@ eval instant at 50m histogram_fraction(1, 2, multi_histogram)
|
|||
eval instant at 50m histogram_quantile(0.5, multi_histogram)
|
||||
{} 1.5
|
||||
|
||||
|
||||
clear
|
||||
|
||||
# Accumulate the histogram addition for 10 iterations, offset is a bucket position where offset:0 is always the bucket
|
||||
# with an upper limit of 1 and offset:1 is the bucket which follows to the right. Negative offsets represent bucket
|
||||
|
@ -133,14 +133,14 @@ eval instant at 50m histogram_quantile(0.5, incr_histogram)
|
|||
{} 1.5
|
||||
|
||||
# Per-second average rate of increase should be 1/(5*60) for count and buckets, then 2/(5*60) for sum.
|
||||
eval instant at 50m rate(incr_histogram[5m])
|
||||
{} {{count:0.0033333333333333335 sum:0.006666666666666667 offset:1 buckets:[0.0033333333333333335]}}
|
||||
eval instant at 50m rate(incr_histogram[10m])
|
||||
{} {{count:0.0033333333333333335 sum:0.006666666666666667 offset:1 buckets:[0.0033333333333333335]}}
|
||||
|
||||
# Calculate the 50th percentile of observations over the last 10m.
|
||||
eval instant at 50m histogram_quantile(0.5, rate(incr_histogram[10m]))
|
||||
{} 1.5
|
||||
|
||||
|
||||
clear
|
||||
|
||||
# Schema represents the histogram resolution, different schema have compatible bucket boundaries, e.g.:
|
||||
# 0: 1 2 4 8 16 32 64 (higher resolution)
|
||||
|
@ -166,77 +166,77 @@ eval instant at 5m histogram_avg(low_res_histogram)
|
|||
eval instant at 5m histogram_fraction(1, 4, low_res_histogram)
|
||||
{} 1
|
||||
|
||||
|
||||
clear
|
||||
|
||||
# z_bucket:1 means there is one observation in the zero bucket and z_bucket_w:0.5 means the zero bucket has the range
|
||||
# 0 < x <= 0.5. Sum and count are expected to represent all observations in the histogram, including those in the zero bucket.
|
||||
load 5m
|
||||
single_zero_histogram {{schema:0 z_bucket:1 z_bucket_w:0.5 sum:0.25 count:1}}
|
||||
|
||||
eval instant at 5m histogram_count(single_zero_histogram)
|
||||
eval instant at 1m histogram_count(single_zero_histogram)
|
||||
{} 1
|
||||
|
||||
eval instant at 5m histogram_sum(single_zero_histogram)
|
||||
eval instant at 1m histogram_sum(single_zero_histogram)
|
||||
{} 0.25
|
||||
|
||||
eval instant at 5m histogram_avg(single_zero_histogram)
|
||||
eval instant at 1m histogram_avg(single_zero_histogram)
|
||||
{} 0.25
|
||||
|
||||
# When only the zero bucket is populated, or there are negative buckets, the distribution is assumed to be equally
|
||||
# distributed around zero; i.e. that there are an equal number of positive and negative observations. Therefore the
|
||||
# entire distribution must lie within the full range of the zero bucket, in this case: -0.5 < x <= +0.5.
|
||||
eval instant at 5m histogram_fraction(-0.5, 0.5, single_zero_histogram)
|
||||
eval instant at 1m histogram_fraction(-0.5, 0.5, single_zero_histogram)
|
||||
{} 1
|
||||
|
||||
# Half of the observations are estimated to be zero, as this is the midpoint between -0.5 and +0.5.
|
||||
eval instant at 5m histogram_quantile(0.5, single_zero_histogram)
|
||||
eval instant at 1m histogram_quantile(0.5, single_zero_histogram)
|
||||
{} 0
|
||||
|
||||
|
||||
clear
|
||||
|
||||
# Let's turn single_histogram upside-down.
|
||||
load 5m
|
||||
negative_histogram {{schema:0 sum:-5 count:4 n_buckets:[1 2 1]}}
|
||||
|
||||
eval instant at 5m histogram_count(negative_histogram)
|
||||
eval instant at 1m histogram_count(negative_histogram)
|
||||
{} 4
|
||||
|
||||
eval instant at 5m histogram_sum(negative_histogram)
|
||||
eval instant at 1m histogram_sum(negative_histogram)
|
||||
{} -5
|
||||
|
||||
eval instant at 5m histogram_avg(negative_histogram)
|
||||
eval instant at 1m histogram_avg(negative_histogram)
|
||||
{} -1.25
|
||||
|
||||
# We expect half of the values to fall in the range -2 < x <= -1.
|
||||
eval instant at 5m histogram_fraction(-2, -1, negative_histogram)
|
||||
eval instant at 1m histogram_fraction(-2, -1, negative_histogram)
|
||||
{} 0.5
|
||||
|
||||
eval instant at 5m histogram_quantile(0.5, negative_histogram)
|
||||
eval instant at 1m histogram_quantile(0.5, negative_histogram)
|
||||
{} -1.5
|
||||
|
||||
|
||||
clear
|
||||
|
||||
# Two histogram samples.
|
||||
load 5m
|
||||
two_samples_histogram {{schema:0 sum:4 count:4 buckets:[1 2 1]}} {{schema:0 sum:-4 count:4 n_buckets:[1 2 1]}}
|
||||
|
||||
# We expect to see the newest sample.
|
||||
eval instant at 10m histogram_count(two_samples_histogram)
|
||||
eval instant at 5m histogram_count(two_samples_histogram)
|
||||
{} 4
|
||||
|
||||
eval instant at 10m histogram_sum(two_samples_histogram)
|
||||
eval instant at 5m histogram_sum(two_samples_histogram)
|
||||
{} -4
|
||||
|
||||
eval instant at 10m histogram_avg(two_samples_histogram)
|
||||
eval instant at 5m histogram_avg(two_samples_histogram)
|
||||
{} -1
|
||||
|
||||
eval instant at 10m histogram_fraction(-2, -1, two_samples_histogram)
|
||||
eval instant at 5m histogram_fraction(-2, -1, two_samples_histogram)
|
||||
{} 0.5
|
||||
|
||||
eval instant at 10m histogram_quantile(0.5, two_samples_histogram)
|
||||
eval instant at 5m histogram_quantile(0.5, two_samples_histogram)
|
||||
{} -1.5
|
||||
|
||||
|
||||
clear
|
||||
|
||||
# Add two histograms with negated data.
|
||||
load 5m
|
||||
|
@ -259,6 +259,8 @@ eval instant at 5m histogram_fraction(0, 4, balanced_histogram)
|
|||
eval instant at 5m histogram_quantile(0.5, balanced_histogram)
|
||||
{} 0.5
|
||||
|
||||
clear
|
||||
|
||||
# Add histogram to test sum(last_over_time) regression
|
||||
load 5m
|
||||
incr_sum_histogram{number="1"} {{schema:0 sum:0 count:0 buckets:[1]}}+{{schema:0 sum:1 count:1 buckets:[1]}}x10
|
||||
|
@ -270,6 +272,8 @@ eval instant at 50m histogram_sum(sum(incr_sum_histogram))
|
|||
eval instant at 50m histogram_sum(sum(last_over_time(incr_sum_histogram[5m])))
|
||||
{} 30
|
||||
|
||||
clear
|
||||
|
||||
# Apply rate function to histogram.
|
||||
load 15s
|
||||
histogram_rate {{schema:1 count:12 sum:18.4 z_bucket:2 z_bucket_w:0.001 buckets:[1 2 0 1 1] n_buckets:[1 2 0 1 1]}}+{{schema:1 count:9 sum:18.4 z_bucket:1 z_bucket_w:0.001 buckets:[1 1 0 1 1] n_buckets:[1 1 0 1 1]}}x100
|
||||
|
@ -280,6 +284,8 @@ eval instant at 5m rate(histogram_rate[45s])
|
|||
eval range from 5m to 5m30s step 30s rate(histogram_rate[45s])
|
||||
{} {{schema:1 count:0.6 sum:1.2266666666666652 z_bucket:0.06666666666666667 z_bucket_w:0.001 buckets:[0.06666666666666667 0.06666666666666667 0 0.06666666666666667 0.06666666666666667] n_buckets:[0.06666666666666667 0.06666666666666667 0 0.06666666666666667 0.06666666666666667]}}x1
|
||||
|
||||
clear
|
||||
|
||||
# Apply count and sum function to histogram.
|
||||
load 10m
|
||||
histogram_count_sum_2 {{schema:0 count:24 sum:100 z_bucket:4 z_bucket_w:0.001 buckets:[2 3 0 1 4] n_buckets:[2 3 0 1 4]}}x1
|
||||
|
@ -290,6 +296,8 @@ eval instant at 10m histogram_count(histogram_count_sum_2)
|
|||
eval instant at 10m histogram_sum(histogram_count_sum_2)
|
||||
{} 100
|
||||
|
||||
clear
|
||||
|
||||
# Apply stddev and stdvar function to histogram with {1, 2, 3, 4} (low res).
|
||||
load 10m
|
||||
histogram_stddev_stdvar_1 {{schema:2 count:4 sum:10 buckets:[1 0 0 0 1 0 0 1 1]}}x1
|
||||
|
@ -300,6 +308,8 @@ eval instant at 10m histogram_stddev(histogram_stddev_stdvar_1)
|
|||
eval instant at 10m histogram_stdvar(histogram_stddev_stdvar_1)
|
||||
{} 1.163807968526718
|
||||
|
||||
clear
|
||||
|
||||
# Apply stddev and stdvar function to histogram with {1, 1, 1, 1} (high res).
|
||||
load 10m
|
||||
histogram_stddev_stdvar_2 {{schema:8 count:10 sum:10 buckets:[1 2 3 4]}}x1
|
||||
|
@ -310,6 +320,8 @@ eval instant at 10m histogram_stddev(histogram_stddev_stdvar_2)
|
|||
eval instant at 10m histogram_stdvar(histogram_stddev_stdvar_2)
|
||||
{} 2.3971123370139447e-05
|
||||
|
||||
clear
|
||||
|
||||
# Apply stddev and stdvar function to histogram with {-50, -8, 0, 3, 8, 9}.
|
||||
load 10m
|
||||
histogram_stddev_stdvar_3 {{schema:3 count:7 sum:62 z_bucket:1 buckets:[0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 1 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 ] n_buckets:[0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 ]}}x1
|
||||
|
@ -320,6 +332,8 @@ eval instant at 10m histogram_stddev(histogram_stddev_stdvar_3)
|
|||
eval instant at 10m histogram_stdvar(histogram_stddev_stdvar_3)
|
||||
{} 1844.4651144196398
|
||||
|
||||
clear
|
||||
|
||||
# Apply stddev and stdvar function to histogram with {-100000, -10000, -1000, -888, -888, -100, -50, -9, -8, -3}.
|
||||
load 10m
|
||||
histogram_stddev_stdvar_4 {{schema:0 count:10 sum:-112946 z_bucket:0 n_buckets:[0 0 1 1 1 0 1 1 0 0 3 0 0 0 1 0 0 1]}}x1
|
||||
|
@ -330,6 +344,8 @@ eval instant at 10m histogram_stddev(histogram_stddev_stdvar_4)
|
|||
eval instant at 10m histogram_stdvar(histogram_stddev_stdvar_4)
|
||||
{} 759352122.1939945
|
||||
|
||||
clear
|
||||
|
||||
# Apply stddev and stdvar function to histogram with {-10x10}.
|
||||
load 10m
|
||||
histogram_stddev_stdvar_5 {{schema:0 count:10 sum:-100 z_bucket:0 n_buckets:[0 0 0 0 10]}}x1
|
||||
|
@ -340,6 +356,8 @@ eval instant at 10m histogram_stddev(histogram_stddev_stdvar_5)
|
|||
eval instant at 10m histogram_stdvar(histogram_stddev_stdvar_5)
|
||||
{} 1.725830020304794
|
||||
|
||||
clear
|
||||
|
||||
# Apply stddev and stdvar function to histogram with {-50, -8, 0, 3, 8, 9, NaN}.
|
||||
load 10m
|
||||
histogram_stddev_stdvar_6 {{schema:3 count:7 sum:NaN z_bucket:1 buckets:[0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 1 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 ] n_buckets:[0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 ]}}x1
|
||||
|
@ -350,6 +368,8 @@ eval instant at 10m histogram_stddev(histogram_stddev_stdvar_6)
|
|||
eval instant at 10m histogram_stdvar(histogram_stddev_stdvar_6)
|
||||
{} NaN
|
||||
|
||||
clear
|
||||
|
||||
# Apply stddev and stdvar function to histogram with {-50, -8, 0, 3, 8, 9, Inf}.
|
||||
load 10m
|
||||
histogram_stddev_stdvar_7 {{schema:3 count:7 sum:Inf z_bucket:1 buckets:[0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 1 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 ] n_buckets:[0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 ]}}x1
|
||||
|
@ -360,6 +380,8 @@ eval instant at 10m histogram_stddev(histogram_stddev_stdvar_7)
|
|||
eval instant at 10m histogram_stdvar(histogram_stddev_stdvar_7)
|
||||
{} Inf
|
||||
|
||||
clear
|
||||
|
||||
# Apply quantile function to histogram with all positive buckets with zero bucket.
|
||||
load 10m
|
||||
histogram_quantile_1 {{schema:0 count:12 sum:100 z_bucket:2 z_bucket_w:0.001 buckets:[2 3 0 1 4]}}x1
|
||||
|
@ -391,6 +413,8 @@ eval instant at 10m histogram_quantile(0, histogram_quantile_1)
|
|||
eval_warn instant at 10m histogram_quantile(-1, histogram_quantile_1)
|
||||
{} -Inf
|
||||
|
||||
clear
|
||||
|
||||
# Apply quantile function to histogram with all negative buckets with zero bucket.
|
||||
load 10m
|
||||
histogram_quantile_2 {{schema:0 count:12 sum:100 z_bucket:2 z_bucket_w:0.001 n_buckets:[2 3 0 1 4]}}x1
|
||||
|
@ -419,6 +443,8 @@ eval instant at 10m histogram_quantile(0, histogram_quantile_2)
|
|||
eval_warn instant at 10m histogram_quantile(-1, histogram_quantile_2)
|
||||
{} -Inf
|
||||
|
||||
clear
|
||||
|
||||
# Apply quantile function to histogram with both positive and negative buckets with zero bucket.
|
||||
load 10m
|
||||
histogram_quantile_3 {{schema:0 count:24 sum:100 z_bucket:4 z_bucket_w:0.001 buckets:[2 3 0 1 4] n_buckets:[2 3 0 1 4]}}x1
|
||||
|
@ -462,6 +488,8 @@ eval instant at 10m histogram_quantile(0, histogram_quantile_3)
|
|||
eval_warn instant at 10m histogram_quantile(-1, histogram_quantile_3)
|
||||
{} -Inf
|
||||
|
||||
clear
|
||||
|
||||
# Apply fraction function to empty histogram.
|
||||
load 10m
|
||||
histogram_fraction_1 {{}}x1
|
||||
|
@ -469,6 +497,8 @@ load 10m
|
|||
eval instant at 10m histogram_fraction(3.1415, 42, histogram_fraction_1)
|
||||
{} NaN
|
||||
|
||||
clear
|
||||
|
||||
# Apply fraction function to histogram with positive and zero buckets.
|
||||
load 10m
|
||||
histogram_fraction_2 {{schema:0 count:12 sum:100 z_bucket:2 z_bucket_w:0.001 buckets:[2 3 0 1 4]}}x1
|
||||
|
@ -633,6 +663,8 @@ eval instant at 10m histogram_fraction(NaN, NaN, histogram_fraction_3)
|
|||
eval instant at 10m histogram_fraction(-Inf, +Inf, histogram_fraction_3)
|
||||
{} 1
|
||||
|
||||
clear
|
||||
|
||||
# Apply fraction function to histogram with both positive, negative and zero buckets.
|
||||
load 10m
|
||||
histogram_fraction_4 {{schema:0 count:24 sum:100 z_bucket:4 z_bucket_w:0.001 buckets:[2 3 0 1 4] n_buckets:[2 3 0 1 4]}}x1
|
||||
|
@ -720,27 +752,39 @@ eval instant at 10m histogram_sum(scalar(histogram_fraction(-Inf, +Inf, sum(hist
|
|||
|
||||
# Apply multiplication and division operator to histogram.
|
||||
load 10m
|
||||
histogram_mul_div {{schema:0 count:21 sum:33 z_bucket:3 z_bucket_w:0.001 buckets:[3 3 3] n_buckets:[3 3 3]}}x1
|
||||
histogram_mul_div {{schema:0 count:30 sum:33 z_bucket:3 z_bucket_w:0.001 buckets:[3 3 3] n_buckets:[6 6 6]}}x1
|
||||
float_series_3 3+0x1
|
||||
float_series_0 0+0x1
|
||||
|
||||
eval instant at 10m histogram_mul_div*3
|
||||
{} {{schema:0 count:63 sum:99 z_bucket:9 z_bucket_w:0.001 buckets:[9 9 9] n_buckets:[9 9 9]}}
|
||||
{} {{schema:0 count:90 sum:99 z_bucket:9 z_bucket_w:0.001 buckets:[9 9 9] n_buckets:[18 18 18]}}
|
||||
|
||||
eval instant at 10m histogram_mul_div*-1
|
||||
{} {{schema:0 count:-30 sum:-33 z_bucket:-3 z_bucket_w:0.001 buckets:[-3 -3 -3] n_buckets:[-6 -6 -6]}}
|
||||
|
||||
eval instant at 10m -histogram_mul_div
|
||||
{} {{schema:0 count:-30 sum:-33 z_bucket:-3 z_bucket_w:0.001 buckets:[-3 -3 -3] n_buckets:[-6 -6 -6]}}
|
||||
|
||||
eval instant at 10m histogram_mul_div*-3
|
||||
{} {{schema:0 count:-90 sum:-99 z_bucket:-9 z_bucket_w:0.001 buckets:[-9 -9 -9] n_buckets:[-18 -18 -18]}}
|
||||
|
||||
eval instant at 10m 3*histogram_mul_div
|
||||
{} {{schema:0 count:63 sum:99 z_bucket:9 z_bucket_w:0.001 buckets:[9 9 9] n_buckets:[9 9 9]}}
|
||||
{} {{schema:0 count:90 sum:99 z_bucket:9 z_bucket_w:0.001 buckets:[9 9 9] n_buckets:[18 18 18]}}
|
||||
|
||||
eval instant at 10m histogram_mul_div*float_series_3
|
||||
{} {{schema:0 count:63 sum:99 z_bucket:9 z_bucket_w:0.001 buckets:[9 9 9] n_buckets:[9 9 9]}}
|
||||
{} {{schema:0 count:90 sum:99 z_bucket:9 z_bucket_w:0.001 buckets:[9 9 9] n_buckets:[18 18 18]}}
|
||||
|
||||
eval instant at 10m float_series_3*histogram_mul_div
|
||||
{} {{schema:0 count:63 sum:99 z_bucket:9 z_bucket_w:0.001 buckets:[9 9 9] n_buckets:[9 9 9]}}
|
||||
{} {{schema:0 count:90 sum:99 z_bucket:9 z_bucket_w:0.001 buckets:[9 9 9] n_buckets:[18 18 18]}}
|
||||
|
||||
eval instant at 10m histogram_mul_div/3
|
||||
{} {{schema:0 count:7 sum:11 z_bucket:1 z_bucket_w:0.001 buckets:[1 1 1] n_buckets:[1 1 1]}}
|
||||
{} {{schema:0 count:10 sum:11 z_bucket:1 z_bucket_w:0.001 buckets:[1 1 1] n_buckets:[2 2 2]}}
|
||||
|
||||
eval instant at 10m histogram_mul_div/-3
|
||||
{} {{schema:0 count:-10 sum:-11 z_bucket:-1 z_bucket_w:0.001 buckets:[-1 -1 -1] n_buckets:[-2 -2 -2]}}
|
||||
|
||||
eval instant at 10m histogram_mul_div/float_series_3
|
||||
{} {{schema:0 count:7 sum:11 z_bucket:1 z_bucket_w:0.001 buckets:[1 1 1] n_buckets:[1 1 1]}}
|
||||
{} {{schema:0 count:10 sum:11 z_bucket:1 z_bucket_w:0.001 buckets:[1 1 1] n_buckets:[2 2 2]}}
|
||||
|
||||
eval instant at 10m histogram_mul_div*0
|
||||
{} {{schema:0 count:0 sum:0 z_bucket:0 z_bucket_w:0.001 buckets:[0 0 0] n_buckets:[0 0 0]}}
|
||||
|
@ -802,7 +846,7 @@ load 30s
|
|||
some_metric {{schema:0 sum:1 count:1 buckets:[1] counter_reset_hint:gauge}} {{schema:0 sum:2 count:2 buckets:[2] counter_reset_hint:gauge}} {{schema:0 sum:3 count:3 buckets:[3] counter_reset_hint:gauge}}
|
||||
|
||||
# Test the case where we only have two points for rate
|
||||
eval_warn instant at 30s rate(some_metric[30s])
|
||||
eval_warn instant at 30s rate(some_metric[1m])
|
||||
{} {{count:0.03333333333333333 sum:0.03333333333333333 buckets:[0.03333333333333333]}}
|
||||
|
||||
# Test the case where we have more than two points for rate
|
||||
|
@ -824,11 +868,11 @@ eval_warn instant at 1m30s rate(some_metric[1m])
|
|||
# Should produce no results.
|
||||
|
||||
# Start with custom, end with exponential.
|
||||
eval_warn instant at 1m rate(some_metric[30s])
|
||||
eval_warn instant at 1m rate(some_metric[1m])
|
||||
# Should produce no results.
|
||||
|
||||
# Start with exponential, end with custom.
|
||||
eval_warn instant at 30s rate(some_metric[30s])
|
||||
eval_warn instant at 30s rate(some_metric[1m])
|
||||
# Should produce no results.
|
||||
|
||||
clear
|
||||
|
@ -963,8 +1007,8 @@ clear
|
|||
load 1m
|
||||
histogram_sum_over_time {{schema:0 count:25 sum:1234.5 z_bucket:4 z_bucket_w:0.001 buckets:[1 2 0 1 1] n_buckets:[2 4 0 0 1 9]}} {{schema:0 count:41 sum:2345.6 z_bucket:5 z_bucket_w:0.001 buckets:[1 3 1 2 1 1 1] n_buckets:[0 1 4 2 7 0 0 0 0 5 5 2]}} {{schema:0 count:41 sum:1111.1 z_bucket:5 z_bucket_w:0.001 buckets:[1 3 1 2 1 1 1] n_buckets:[0 1 4 2 7 0 0 0 0 5 5 2]}} {{schema:1 count:0}}
|
||||
|
||||
eval instant at 3m sum_over_time(histogram_sum_over_time[3m:1m])
|
||||
eval instant at 3m sum_over_time(histogram_sum_over_time[4m:1m])
|
||||
{} {{schema:0 count:107 sum:4691.2 z_bucket:14 z_bucket_w:0.001 buckets:[3 8 2 5 3 2 2] n_buckets:[2 6 8 4 15 9 0 0 0 10 10 4]}}
|
||||
|
||||
eval instant at 3m avg_over_time(histogram_sum_over_time[3m:1m])
|
||||
eval instant at 3m avg_over_time(histogram_sum_over_time[4m:1m])
|
||||
{} {{schema:0 count:26.75 sum:1172.8 z_bucket:3.5 z_bucket_w:0.001 buckets:[0.75 2 0.5 1.25 0.75 0.5 0.5] n_buckets:[0.5 1.5 2 1 3.75 2.25 0 0 0 2.5 2.5 1]}}
|
||||
|
|
68
promql/promqltest/testdata/operators.test
vendored
68
promql/promqltest/testdata/operators.test
vendored
|
@ -113,7 +113,7 @@ eval instant at 50m http_requests{job="api-server", group="canary"}
|
|||
http_requests{group="canary", instance="0", job="api-server"} 300
|
||||
http_requests{group="canary", instance="1", job="api-server"} 400
|
||||
|
||||
eval instant at 50m http_requests{job="api-server", group="canary"} + rate(http_requests{job="api-server"}[5m]) * 5 * 60
|
||||
eval instant at 50m http_requests{job="api-server", group="canary"} + rate(http_requests{job="api-server"}[10m]) * 5 * 60
|
||||
{group="canary", instance="0", job="api-server"} 330
|
||||
{group="canary", instance="1", job="api-server"} 440
|
||||
|
||||
|
@ -308,65 +308,65 @@ load 5m
|
|||
threshold{instance="abc",job="node",target="a@b.com"} 0
|
||||
|
||||
# Copy machine role to node variable.
|
||||
eval instant at 5m node_role * on (instance) group_right (role) node_var
|
||||
eval instant at 1m node_role * on (instance) group_right (role) node_var
|
||||
{instance="abc",job="node",role="prometheus"} 2
|
||||
|
||||
eval instant at 5m node_var * on (instance) group_left (role) node_role
|
||||
eval instant at 1m node_var * on (instance) group_left (role) node_role
|
||||
{instance="abc",job="node",role="prometheus"} 2
|
||||
|
||||
eval instant at 5m node_var * ignoring (role) group_left (role) node_role
|
||||
eval instant at 1m node_var * ignoring (role) group_left (role) node_role
|
||||
{instance="abc",job="node",role="prometheus"} 2
|
||||
|
||||
eval instant at 5m node_role * ignoring (role) group_right (role) node_var
|
||||
eval instant at 1m node_role * ignoring (role) group_right (role) node_var
|
||||
{instance="abc",job="node",role="prometheus"} 2
|
||||
|
||||
# Copy machine role to node variable with instrumentation labels.
|
||||
eval instant at 5m node_cpu * ignoring (role, mode) group_left (role) node_role
|
||||
eval instant at 1m node_cpu * ignoring (role, mode) group_left (role) node_role
|
||||
{instance="abc",job="node",mode="idle",role="prometheus"} 3
|
||||
{instance="abc",job="node",mode="user",role="prometheus"} 1
|
||||
|
||||
eval instant at 5m node_cpu * on (instance) group_left (role) node_role
|
||||
eval instant at 1m node_cpu * on (instance) group_left (role) node_role
|
||||
{instance="abc",job="node",mode="idle",role="prometheus"} 3
|
||||
{instance="abc",job="node",mode="user",role="prometheus"} 1
|
||||
|
||||
|
||||
# Ratio of total.
|
||||
eval instant at 5m node_cpu / on (instance) group_left sum by (instance,job)(node_cpu)
|
||||
eval instant at 1m node_cpu / on (instance) group_left sum by (instance,job)(node_cpu)
|
||||
{instance="abc",job="node",mode="idle"} .75
|
||||
{instance="abc",job="node",mode="user"} .25
|
||||
{instance="def",job="node",mode="idle"} .80
|
||||
{instance="def",job="node",mode="user"} .20
|
||||
|
||||
eval instant at 5m sum by (mode, job)(node_cpu) / on (job) group_left sum by (job)(node_cpu)
|
||||
eval instant at 1m sum by (mode, job)(node_cpu) / on (job) group_left sum by (job)(node_cpu)
|
||||
{job="node",mode="idle"} 0.7857142857142857
|
||||
{job="node",mode="user"} 0.21428571428571427
|
||||
|
||||
eval instant at 5m sum(sum by (mode, job)(node_cpu) / on (job) group_left sum by (job)(node_cpu))
|
||||
eval instant at 1m sum(sum by (mode, job)(node_cpu) / on (job) group_left sum by (job)(node_cpu))
|
||||
{} 1.0
|
||||
|
||||
|
||||
eval instant at 5m node_cpu / ignoring (mode) group_left sum without (mode)(node_cpu)
|
||||
eval instant at 1m node_cpu / ignoring (mode) group_left sum without (mode)(node_cpu)
|
||||
{instance="abc",job="node",mode="idle"} .75
|
||||
{instance="abc",job="node",mode="user"} .25
|
||||
{instance="def",job="node",mode="idle"} .80
|
||||
{instance="def",job="node",mode="user"} .20
|
||||
|
||||
eval instant at 5m node_cpu / ignoring (mode) group_left(dummy) sum without (mode)(node_cpu)
|
||||
eval instant at 1m node_cpu / ignoring (mode) group_left(dummy) sum without (mode)(node_cpu)
|
||||
{instance="abc",job="node",mode="idle"} .75
|
||||
{instance="abc",job="node",mode="user"} .25
|
||||
{instance="def",job="node",mode="idle"} .80
|
||||
{instance="def",job="node",mode="user"} .20
|
||||
|
||||
eval instant at 5m sum without (instance)(node_cpu) / ignoring (mode) group_left sum without (instance, mode)(node_cpu)
|
||||
eval instant at 1m sum without (instance)(node_cpu) / ignoring (mode) group_left sum without (instance, mode)(node_cpu)
|
||||
{job="node",mode="idle"} 0.7857142857142857
|
||||
{job="node",mode="user"} 0.21428571428571427
|
||||
|
||||
eval instant at 5m sum(sum without (instance)(node_cpu) / ignoring (mode) group_left sum without (instance, mode)(node_cpu))
|
||||
eval instant at 1m sum(sum without (instance)(node_cpu) / ignoring (mode) group_left sum without (instance, mode)(node_cpu))
|
||||
{} 1.0
|
||||
|
||||
|
||||
# Copy over label from metric with no matching labels, without having to list cross-job target labels ('job' here).
|
||||
eval instant at 5m node_cpu + on(dummy) group_left(foo) random*0
|
||||
eval instant at 1m node_cpu + on(dummy) group_left(foo) random*0
|
||||
{instance="abc",job="node",mode="idle",foo="bar"} 3
|
||||
{instance="abc",job="node",mode="user",foo="bar"} 1
|
||||
{instance="def",job="node",mode="idle",foo="bar"} 8
|
||||
|
@ -374,12 +374,12 @@ eval instant at 5m node_cpu + on(dummy) group_left(foo) random*0
|
|||
|
||||
|
||||
# Use threshold from metric, and copy over target.
|
||||
eval instant at 5m node_cpu > on(job, instance) group_left(target) threshold
|
||||
eval instant at 1m node_cpu > on(job, instance) group_left(target) threshold
|
||||
node_cpu{instance="abc",job="node",mode="idle",target="a@b.com"} 3
|
||||
node_cpu{instance="abc",job="node",mode="user",target="a@b.com"} 1
|
||||
|
||||
# Use threshold from metric, and a default (1) if it's not present.
|
||||
eval instant at 5m node_cpu > on(job, instance) group_left(target) (threshold or on (job, instance) (sum by (job, instance)(node_cpu) * 0 + 1))
|
||||
eval instant at 1m node_cpu > on(job, instance) group_left(target) (threshold or on (job, instance) (sum by (job, instance)(node_cpu) * 0 + 1))
|
||||
node_cpu{instance="abc",job="node",mode="idle",target="a@b.com"} 3
|
||||
node_cpu{instance="abc",job="node",mode="user",target="a@b.com"} 1
|
||||
node_cpu{instance="def",job="node",mode="idle"} 8
|
||||
|
@ -387,37 +387,37 @@ eval instant at 5m node_cpu > on(job, instance) group_left(target) (threshold or
|
|||
|
||||
|
||||
# Check that binops drop the metric name.
|
||||
eval instant at 5m node_cpu + 2
|
||||
eval instant at 1m node_cpu + 2
|
||||
{instance="abc",job="node",mode="idle"} 5
|
||||
{instance="abc",job="node",mode="user"} 3
|
||||
{instance="def",job="node",mode="idle"} 10
|
||||
{instance="def",job="node",mode="user"} 4
|
||||
|
||||
eval instant at 5m node_cpu - 2
|
||||
eval instant at 1m node_cpu - 2
|
||||
{instance="abc",job="node",mode="idle"} 1
|
||||
{instance="abc",job="node",mode="user"} -1
|
||||
{instance="def",job="node",mode="idle"} 6
|
||||
{instance="def",job="node",mode="user"} 0
|
||||
|
||||
eval instant at 5m node_cpu / 2
|
||||
eval instant at 1m node_cpu / 2
|
||||
{instance="abc",job="node",mode="idle"} 1.5
|
||||
{instance="abc",job="node",mode="user"} 0.5
|
||||
{instance="def",job="node",mode="idle"} 4
|
||||
{instance="def",job="node",mode="user"} 1
|
||||
|
||||
eval instant at 5m node_cpu * 2
|
||||
eval instant at 1m node_cpu * 2
|
||||
{instance="abc",job="node",mode="idle"} 6
|
||||
{instance="abc",job="node",mode="user"} 2
|
||||
{instance="def",job="node",mode="idle"} 16
|
||||
{instance="def",job="node",mode="user"} 4
|
||||
|
||||
eval instant at 5m node_cpu ^ 2
|
||||
eval instant at 1m node_cpu ^ 2
|
||||
{instance="abc",job="node",mode="idle"} 9
|
||||
{instance="abc",job="node",mode="user"} 1
|
||||
{instance="def",job="node",mode="idle"} 64
|
||||
{instance="def",job="node",mode="user"} 4
|
||||
|
||||
eval instant at 5m node_cpu % 2
|
||||
eval instant at 1m node_cpu % 2
|
||||
{instance="abc",job="node",mode="idle"} 1
|
||||
{instance="abc",job="node",mode="user"} 1
|
||||
{instance="def",job="node",mode="idle"} 0
|
||||
|
@ -432,14 +432,14 @@ load 5m
|
|||
metricB{baz="meh"} 4
|
||||
|
||||
# On with no labels, for metrics with no common labels.
|
||||
eval instant at 5m random + on() metricA
|
||||
eval instant at 1m random + on() metricA
|
||||
{} 5
|
||||
|
||||
# Ignoring with no labels is the same as no ignoring.
|
||||
eval instant at 5m metricA + ignoring() metricB
|
||||
eval instant at 1m metricA + ignoring() metricB
|
||||
{baz="meh"} 7
|
||||
|
||||
eval instant at 5m metricA + metricB
|
||||
eval instant at 1m metricA + metricB
|
||||
{baz="meh"} 7
|
||||
|
||||
clear
|
||||
|
@ -457,16 +457,16 @@ load 5m
|
|||
test_total{instance="localhost"} 50
|
||||
test_smaller{instance="localhost"} 10
|
||||
|
||||
eval instant at 5m test_total > bool test_smaller
|
||||
eval instant at 1m test_total > bool test_smaller
|
||||
{instance="localhost"} 1
|
||||
|
||||
eval instant at 5m test_total > test_smaller
|
||||
eval instant at 1m test_total > test_smaller
|
||||
test_total{instance="localhost"} 50
|
||||
|
||||
eval instant at 5m test_total < bool test_smaller
|
||||
eval instant at 1m test_total < bool test_smaller
|
||||
{instance="localhost"} 0
|
||||
|
||||
eval instant at 5m test_total < test_smaller
|
||||
eval instant at 1m test_total < test_smaller
|
||||
|
||||
clear
|
||||
|
||||
|
@ -476,14 +476,14 @@ load 5m
|
|||
trigx{} 20
|
||||
trigNaN{} NaN
|
||||
|
||||
eval instant at 5m trigy atan2 trigx
|
||||
eval instant at 1m trigy atan2 trigx
|
||||
{} 0.4636476090008061
|
||||
|
||||
eval instant at 5m trigy atan2 trigNaN
|
||||
eval instant at 1m trigy atan2 trigNaN
|
||||
{} NaN
|
||||
|
||||
eval instant at 5m 10 atan2 20
|
||||
eval instant at 1m 10 atan2 20
|
||||
0.4636476090008061
|
||||
|
||||
eval instant at 5m 10 atan2 NaN
|
||||
eval instant at 1m 10 atan2 NaN
|
||||
NaN
|
||||
|
|
14
promql/promqltest/testdata/range_queries.test
vendored
14
promql/promqltest/testdata/range_queries.test
vendored
|
@ -1,18 +1,18 @@
|
|||
# sum_over_time with all values
|
||||
load 30s
|
||||
load 15s
|
||||
bar 0 1 10 100 1000
|
||||
|
||||
eval range from 0 to 2m step 1m sum_over_time(bar[30s])
|
||||
eval range from 0 to 1m step 30s sum_over_time(bar[30s])
|
||||
{} 0 11 1100
|
||||
|
||||
clear
|
||||
|
||||
# sum_over_time with trailing values
|
||||
load 30s
|
||||
load 15s
|
||||
bar 0 1 10 100 1000 0 0 0 0
|
||||
|
||||
eval range from 0 to 2m step 1m sum_over_time(bar[30s])
|
||||
{} 0 11 1100
|
||||
{} 0 1100 0
|
||||
|
||||
clear
|
||||
|
||||
|
@ -21,15 +21,15 @@ load 30s
|
|||
bar 0 1 10 100 1000 10000 100000 1000000 10000000
|
||||
|
||||
eval range from 0 to 4m step 1m sum_over_time(bar[30s])
|
||||
{} 0 11 1100 110000 11000000
|
||||
{} 0 10 1000 100000 10000000
|
||||
|
||||
clear
|
||||
|
||||
# sum_over_time with all values random
|
||||
load 30s
|
||||
load 15s
|
||||
bar 5 17 42 2 7 905 51
|
||||
|
||||
eval range from 0 to 3m step 1m sum_over_time(bar[30s])
|
||||
eval range from 0 to 90s step 30s sum_over_time(bar[30s])
|
||||
{} 5 59 9 956
|
||||
|
||||
clear
|
||||
|
|
12
promql/promqltest/testdata/staleness.test
vendored
12
promql/promqltest/testdata/staleness.test
vendored
|
@ -14,10 +14,10 @@ eval instant at 40s metric
|
|||
{__name__="metric"} 2
|
||||
|
||||
# It goes stale 5 minutes after the last sample.
|
||||
eval instant at 330s metric
|
||||
eval instant at 329s metric
|
||||
{__name__="metric"} 2
|
||||
|
||||
eval instant at 331s metric
|
||||
eval instant at 330s metric
|
||||
|
||||
|
||||
# Range vector ignores stale sample.
|
||||
|
@ -30,9 +30,13 @@ eval instant at 10s count_over_time(metric[1s])
|
|||
eval instant at 20s count_over_time(metric[1s])
|
||||
|
||||
eval instant at 20s count_over_time(metric[10s])
|
||||
|
||||
eval instant at 20s count_over_time(metric[20s])
|
||||
{} 1
|
||||
|
||||
eval instant at 20s count_over_time(metric[10])
|
||||
|
||||
eval instant at 20s count_over_time(metric[20])
|
||||
{} 1
|
||||
|
||||
|
||||
|
@ -48,7 +52,7 @@ eval instant at 0s metric
|
|||
eval instant at 150s metric
|
||||
{__name__="metric"} 0
|
||||
|
||||
eval instant at 300s metric
|
||||
eval instant at 299s metric
|
||||
{__name__="metric"} 0
|
||||
|
||||
eval instant at 301s metric
|
||||
eval instant at 300s metric
|
||||
|
|
40
promql/promqltest/testdata/subquery.test
vendored
40
promql/promqltest/testdata/subquery.test
vendored
|
@ -10,18 +10,18 @@ eval instant at 10s sum_over_time(metric[50s:5s])
|
|||
|
||||
# Every evaluation yields the last value, i.e. 2
|
||||
eval instant at 5m sum_over_time(metric[50s:10s])
|
||||
{} 12
|
||||
{} 10
|
||||
|
||||
# Series becomes stale at 5m10s (5m after last sample)
|
||||
# Hence subquery gets a single sample at 6m-50s=5m10s.
|
||||
eval instant at 6m sum_over_time(metric[50s:10s])
|
||||
# Hence subquery gets a single sample at 5m10s.
|
||||
eval instant at 5m59s sum_over_time(metric[60s:10s])
|
||||
{} 2
|
||||
|
||||
eval instant at 10s rate(metric[20s:10s])
|
||||
{} 0.1
|
||||
|
||||
eval instant at 20s rate(metric[20s:5s])
|
||||
{} 0.05
|
||||
{} 0.06666666666666667
|
||||
|
||||
clear
|
||||
|
||||
|
@ -49,16 +49,16 @@ load 10s
|
|||
metric3 0+3x1000
|
||||
|
||||
eval instant at 1000s sum_over_time(metric1[30s:10s])
|
||||
{} 394
|
||||
{} 297
|
||||
|
||||
# This is (394*2 - 100), because other than the last 100 at 1000s,
|
||||
# This is (97 + 98*2 + 99*2 + 100), because other than 97@975s and 100@1000s,
|
||||
# everything else is repeated with the 5s step.
|
||||
eval instant at 1000s sum_over_time(metric1[30s:5s])
|
||||
{} 688
|
||||
{} 591
|
||||
|
||||
# Offset is aligned with the step.
|
||||
# Offset is aligned with the step, so this is from [98@980s, 99@990s, 100@1000s].
|
||||
eval instant at 1010s sum_over_time(metric1[30s:10s] offset 10s)
|
||||
{} 394
|
||||
{} 297
|
||||
|
||||
# Same result for different offsets due to step alignment.
|
||||
eval instant at 1010s sum_over_time(metric1[30s:10s] offset 9s)
|
||||
|
@ -93,16 +93,16 @@ eval instant at 1010s sum_over_time((metric1)[30:10] offset 3)
|
|||
|
||||
# Nested subqueries
|
||||
eval instant at 1000s rate(sum_over_time(metric1[30s:10s])[50s:10s])
|
||||
{} 0.4
|
||||
{} 0.30000000000000004
|
||||
|
||||
eval instant at 1000s rate(sum_over_time(metric2[30s:10s])[50s:10s])
|
||||
{} 0.8
|
||||
{} 0.6000000000000001
|
||||
|
||||
eval instant at 1000s rate(sum_over_time(metric3[30s:10s])[50s:10s])
|
||||
{} 1.2
|
||||
{} 0.9
|
||||
|
||||
eval instant at 1000s rate(sum_over_time((metric1+metric2+metric3)[30s:10s])[30s:10s])
|
||||
{} 2.4
|
||||
{} 1.8
|
||||
|
||||
clear
|
||||
|
||||
|
@ -115,16 +115,20 @@ load 7s
|
|||
eval instant at 80s rate(metric[1m])
|
||||
{} 2.517857143
|
||||
|
||||
# No extrapolation, [2@20, 144@80]: (144 - 2) / 60
|
||||
eval instant at 80s rate(metric[1m:10s])
|
||||
{} 2.366666667
|
||||
# Extrapolated to range start for counter, [2@20, 144@80]: (144 - 2) / (80 - 20)
|
||||
eval instant at 80s rate(metric[1m500ms:10s])
|
||||
{} 2.3666666666666667
|
||||
|
||||
# Extrapolated to zero value for counter, [2@20, 144@80]: (144 - 0) / 61
|
||||
eval instant at 80s rate(metric[1m1s:10s])
|
||||
{} 2.360655737704918
|
||||
|
||||
# Only one value between 10s and 20s, 2@14
|
||||
eval instant at 20s min_over_time(metric[10s])
|
||||
{} 2
|
||||
|
||||
# min(1@10, 2@20)
|
||||
eval instant at 20s min_over_time(metric[10s:10s])
|
||||
# min(2@20)
|
||||
eval instant at 20s min_over_time(metric[15s:10s])
|
||||
{} 1
|
||||
|
||||
eval instant at 20m min_over_time(rate(metric[5m])[20m:1m])
|
||||
|
|
36
promql/promqltest/testdata/trig_functions.test
vendored
36
promql/promqltest/testdata/trig_functions.test
vendored
|
@ -5,92 +5,92 @@ load 5m
|
|||
trig{l="y"} 20
|
||||
trig{l="NaN"} NaN
|
||||
|
||||
eval instant at 5m sin(trig)
|
||||
eval instant at 1m sin(trig)
|
||||
{l="x"} -0.5440211108893699
|
||||
{l="y"} 0.9129452507276277
|
||||
{l="NaN"} NaN
|
||||
|
||||
eval instant at 5m cos(trig)
|
||||
eval instant at 1m cos(trig)
|
||||
{l="x"} -0.8390715290764524
|
||||
{l="y"} 0.40808206181339196
|
||||
{l="NaN"} NaN
|
||||
|
||||
eval instant at 5m tan(trig)
|
||||
eval instant at 1m tan(trig)
|
||||
{l="x"} 0.6483608274590867
|
||||
{l="y"} 2.2371609442247427
|
||||
{l="NaN"} NaN
|
||||
|
||||
eval instant at 5m asin(trig - 10.1)
|
||||
eval instant at 1m asin(trig - 10.1)
|
||||
{l="x"} -0.10016742116155944
|
||||
{l="y"} NaN
|
||||
{l="NaN"} NaN
|
||||
|
||||
eval instant at 5m acos(trig - 10.1)
|
||||
eval instant at 1m acos(trig - 10.1)
|
||||
{l="x"} 1.670963747956456
|
||||
{l="y"} NaN
|
||||
{l="NaN"} NaN
|
||||
|
||||
eval instant at 5m atan(trig)
|
||||
eval instant at 1m atan(trig)
|
||||
{l="x"} 1.4711276743037345
|
||||
{l="y"} 1.5208379310729538
|
||||
{l="NaN"} NaN
|
||||
|
||||
eval instant at 5m sinh(trig)
|
||||
eval instant at 1m sinh(trig)
|
||||
{l="x"} 11013.232920103324
|
||||
{l="y"} 2.4258259770489514e+08
|
||||
{l="NaN"} NaN
|
||||
|
||||
eval instant at 5m cosh(trig)
|
||||
eval instant at 1m cosh(trig)
|
||||
{l="x"} 11013.232920103324
|
||||
{l="y"} 2.4258259770489514e+08
|
||||
{l="NaN"} NaN
|
||||
|
||||
eval instant at 5m tanh(trig)
|
||||
eval instant at 1m tanh(trig)
|
||||
{l="x"} 0.9999999958776927
|
||||
{l="y"} 1
|
||||
{l="NaN"} NaN
|
||||
|
||||
eval instant at 5m asinh(trig)
|
||||
eval instant at 1m asinh(trig)
|
||||
{l="x"} 2.99822295029797
|
||||
{l="y"} 3.6895038689889055
|
||||
{l="NaN"} NaN
|
||||
|
||||
eval instant at 5m acosh(trig)
|
||||
eval instant at 1m acosh(trig)
|
||||
{l="x"} 2.993222846126381
|
||||
{l="y"} 3.6882538673612966
|
||||
{l="NaN"} NaN
|
||||
|
||||
eval instant at 5m atanh(trig - 10.1)
|
||||
eval instant at 1m atanh(trig - 10.1)
|
||||
{l="x"} -0.10033534773107522
|
||||
{l="y"} NaN
|
||||
{l="NaN"} NaN
|
||||
|
||||
eval instant at 5m rad(trig)
|
||||
eval instant at 1m rad(trig)
|
||||
{l="x"} 0.17453292519943295
|
||||
{l="y"} 0.3490658503988659
|
||||
{l="NaN"} NaN
|
||||
|
||||
eval instant at 5m rad(trig - 10)
|
||||
eval instant at 1m rad(trig - 10)
|
||||
{l="x"} 0
|
||||
{l="y"} 0.17453292519943295
|
||||
{l="NaN"} NaN
|
||||
|
||||
eval instant at 5m rad(trig - 20)
|
||||
eval instant at 1m rad(trig - 20)
|
||||
{l="x"} -0.17453292519943295
|
||||
{l="y"} 0
|
||||
{l="NaN"} NaN
|
||||
|
||||
eval instant at 5m deg(trig)
|
||||
eval instant at 1m deg(trig)
|
||||
{l="x"} 572.9577951308232
|
||||
{l="y"} 1145.9155902616465
|
||||
{l="NaN"} NaN
|
||||
|
||||
eval instant at 5m deg(trig - 10)
|
||||
eval instant at 1m deg(trig - 10)
|
||||
{l="x"} 0
|
||||
{l="y"} 572.9577951308232
|
||||
{l="NaN"} NaN
|
||||
|
||||
eval instant at 5m deg(trig - 20)
|
||||
eval instant at 1m deg(trig - 20)
|
||||
{l="x"} -572.9577951308232
|
||||
{l="y"} 0
|
||||
{l="NaN"} NaN
|
||||
|
|
|
@ -31,7 +31,7 @@ import (
|
|||
)
|
||||
|
||||
type ActiveQueryTracker struct {
|
||||
mmapedFile []byte
|
||||
mmappedFile []byte
|
||||
getNextIndex chan int
|
||||
logger log.Logger
|
||||
closer io.Closer
|
||||
|
@ -87,24 +87,24 @@ func logUnfinishedQueries(filename string, filesize int, logger log.Logger) {
|
|||
}
|
||||
}
|
||||
|
||||
type mmapedFile struct {
|
||||
type mmappedFile struct {
|
||||
f io.Closer
|
||||
m mmap.MMap
|
||||
}
|
||||
|
||||
func (f *mmapedFile) Close() error {
|
||||
func (f *mmappedFile) Close() error {
|
||||
err := f.m.Unmap()
|
||||
if err != nil {
|
||||
err = fmt.Errorf("mmapedFile: unmapping: %w", err)
|
||||
err = fmt.Errorf("mmappedFile: unmapping: %w", err)
|
||||
}
|
||||
if fErr := f.f.Close(); fErr != nil {
|
||||
return errors.Join(fmt.Errorf("close mmapedFile.f: %w", fErr), err)
|
||||
return errors.Join(fmt.Errorf("close mmappedFile.f: %w", fErr), err)
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func getMMapedFile(filename string, filesize int, logger log.Logger) ([]byte, io.Closer, error) {
|
||||
func getMMappedFile(filename string, filesize int, logger log.Logger) ([]byte, io.Closer, error) {
|
||||
file, err := os.OpenFile(filename, os.O_CREATE|os.O_RDWR|os.O_TRUNC, 0o666)
|
||||
if err != nil {
|
||||
absPath, pathErr := filepath.Abs(filename)
|
||||
|
@ -129,7 +129,7 @@ func getMMapedFile(filename string, filesize int, logger log.Logger) ([]byte, io
|
|||
return nil, nil, err
|
||||
}
|
||||
|
||||
return fileAsBytes, &mmapedFile{f: file, m: fileAsBytes}, err
|
||||
return fileAsBytes, &mmappedFile{f: file, m: fileAsBytes}, err
|
||||
}
|
||||
|
||||
func NewActiveQueryTracker(localStoragePath string, maxConcurrent int, logger log.Logger) *ActiveQueryTracker {
|
||||
|
@ -141,14 +141,14 @@ func NewActiveQueryTracker(localStoragePath string, maxConcurrent int, logger lo
|
|||
filename, filesize := filepath.Join(localStoragePath, "queries.active"), 1+maxConcurrent*entrySize
|
||||
logUnfinishedQueries(filename, filesize, logger)
|
||||
|
||||
fileAsBytes, closer, err := getMMapedFile(filename, filesize, logger)
|
||||
fileAsBytes, closer, err := getMMappedFile(filename, filesize, logger)
|
||||
if err != nil {
|
||||
panic("Unable to create mmap-ed active query log")
|
||||
}
|
||||
|
||||
copy(fileAsBytes, "[")
|
||||
activeQueryTracker := ActiveQueryTracker{
|
||||
mmapedFile: fileAsBytes,
|
||||
mmappedFile: fileAsBytes,
|
||||
closer: closer,
|
||||
getNextIndex: make(chan int, maxConcurrent),
|
||||
logger: logger,
|
||||
|
@ -206,14 +206,14 @@ func (tracker ActiveQueryTracker) GetMaxConcurrent() int {
|
|||
}
|
||||
|
||||
func (tracker ActiveQueryTracker) Delete(insertIndex int) {
|
||||
copy(tracker.mmapedFile[insertIndex:], strings.Repeat("\x00", entrySize))
|
||||
copy(tracker.mmappedFile[insertIndex:], strings.Repeat("\x00", entrySize))
|
||||
tracker.getNextIndex <- insertIndex
|
||||
}
|
||||
|
||||
func (tracker ActiveQueryTracker) Insert(ctx context.Context, query string) (int, error) {
|
||||
select {
|
||||
case i := <-tracker.getNextIndex:
|
||||
fileBytes := tracker.mmapedFile
|
||||
fileBytes := tracker.mmappedFile
|
||||
entry := newJSONEntry(query, tracker.logger)
|
||||
start, end := i, i+entrySize
|
||||
|
||||
|
|
|
@ -26,7 +26,7 @@ import (
|
|||
func TestQueryLogging(t *testing.T) {
|
||||
fileAsBytes := make([]byte, 4096)
|
||||
queryLogger := ActiveQueryTracker{
|
||||
mmapedFile: fileAsBytes,
|
||||
mmappedFile: fileAsBytes,
|
||||
logger: nil,
|
||||
getNextIndex: make(chan int, 4),
|
||||
}
|
||||
|
@ -70,7 +70,7 @@ func TestQueryLogging(t *testing.T) {
|
|||
func TestIndexReuse(t *testing.T) {
|
||||
queryBytes := make([]byte, 1+3*entrySize)
|
||||
queryLogger := ActiveQueryTracker{
|
||||
mmapedFile: queryBytes,
|
||||
mmappedFile: queryBytes,
|
||||
logger: nil,
|
||||
getNextIndex: make(chan int, 3),
|
||||
}
|
||||
|
@ -106,10 +106,10 @@ func TestIndexReuse(t *testing.T) {
|
|||
|
||||
func TestMMapFile(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
fpath := filepath.Join(dir, "mmapedFile")
|
||||
fpath := filepath.Join(dir, "mmappedFile")
|
||||
const data = "ab"
|
||||
|
||||
fileAsBytes, closer, err := getMMapedFile(fpath, 2, nil)
|
||||
fileAsBytes, closer, err := getMMappedFile(fpath, 2, nil)
|
||||
require.NoError(t, err)
|
||||
copy(fileAsBytes, data)
|
||||
require.NoError(t, closer.Close())
|
||||
|
|
|
@ -526,7 +526,7 @@ func (ssi *storageSeriesIterator) Next() chunkenc.ValueType {
|
|||
ssi.currH = p.H
|
||||
return chunkenc.ValFloatHistogram
|
||||
default:
|
||||
panic("storageSeriesIterater.Next failed to pick value type")
|
||||
panic("storageSeriesIterator.Next failed to pick value type")
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -190,7 +190,7 @@ func matchesMatcherSets(matcherSets [][]*labels.Matcher, lbls labels.Labels) boo
|
|||
return ok
|
||||
}
|
||||
|
||||
// Queryable returns the group's querable.
|
||||
// Queryable returns the group's queryable.
|
||||
func (g *Group) Queryable() storage.Queryable { return g.opts.Queryable }
|
||||
|
||||
// Context returns the group's context.
|
||||
|
|
|
@ -45,6 +45,11 @@ import (
|
|||
"github.com/prometheus/prometheus/util/testutil"
|
||||
)
|
||||
|
||||
func init() {
|
||||
// This can be removed when the default validation scheme in common is updated.
|
||||
model.NameValidationScheme = model.UTF8Validation
|
||||
}
|
||||
|
||||
func TestPopulateLabels(t *testing.T) {
|
||||
cases := []struct {
|
||||
in labels.Labels
|
||||
|
@ -1181,7 +1186,7 @@ scrape_configs:
|
|||
}
|
||||
|
||||
// TestOnlyStaleTargetsAreDropped makes sure that when a job has multiple providers, when one of them should no
|
||||
// longer discover targets, only the stale targets of that provier are dropped.
|
||||
// longer discover targets, only the stale targets of that provider are dropped.
|
||||
func TestOnlyStaleTargetsAreDropped(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
|
|
@ -329,9 +329,9 @@ func (sp *scrapePool) restartLoops(reuseCache bool) {
|
|||
mrc = sp.config.MetricRelabelConfigs
|
||||
)
|
||||
|
||||
validationScheme := model.LegacyValidation
|
||||
if sp.config.MetricNameValidationScheme == config.UTF8ValidationConfig {
|
||||
validationScheme = model.UTF8Validation
|
||||
validationScheme := model.UTF8Validation
|
||||
if sp.config.MetricNameValidationScheme == config.LegacyValidationConfig {
|
||||
validationScheme = model.LegacyValidation
|
||||
}
|
||||
|
||||
sp.targetMtx.Lock()
|
||||
|
@ -485,9 +485,9 @@ func (sp *scrapePool) sync(targets []*Target) {
|
|||
scrapeClassicHistograms = sp.config.ScrapeClassicHistograms
|
||||
)
|
||||
|
||||
validationScheme := model.LegacyValidation
|
||||
if sp.config.MetricNameValidationScheme == config.UTF8ValidationConfig {
|
||||
validationScheme = model.UTF8Validation
|
||||
validationScheme := model.UTF8Validation
|
||||
if sp.config.MetricNameValidationScheme == config.LegacyValidationConfig {
|
||||
validationScheme = model.LegacyValidation
|
||||
}
|
||||
|
||||
sp.targetMtx.Lock()
|
||||
|
@ -524,6 +524,7 @@ func (sp *scrapePool) sync(targets []*Target) {
|
|||
interval: interval,
|
||||
timeout: timeout,
|
||||
scrapeClassicHistograms: scrapeClassicHistograms,
|
||||
validationScheme: validationScheme,
|
||||
})
|
||||
if err != nil {
|
||||
l.setForcedError(err)
|
||||
|
|
|
@ -34,6 +34,7 @@ import (
|
|||
"github.com/go-kit/log"
|
||||
"github.com/gogo/protobuf/proto"
|
||||
"github.com/google/go-cmp/cmp"
|
||||
"github.com/grafana/regexp"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
prom_testutil "github.com/prometheus/client_golang/prometheus/testutil"
|
||||
dto "github.com/prometheus/client_model/go"
|
||||
|
@ -1134,6 +1135,7 @@ func TestScrapeLoopSeriesAdded(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestScrapeLoopFailWithInvalidLabelsAfterRelabel(t *testing.T) {
|
||||
model.NameValidationScheme = model.LegacyValidation
|
||||
s := teststorage.New(t)
|
||||
defer s.Close()
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
|
@ -2571,8 +2573,11 @@ func TestTargetScraperScrapeOK(t *testing.T) {
|
|||
expectedTimeout = "1.5"
|
||||
)
|
||||
|
||||
var protobufParsing bool
|
||||
var allowUTF8 bool
|
||||
var (
|
||||
protobufParsing bool
|
||||
allowUTF8 bool
|
||||
qValuePattern = regexp.MustCompile(`q=([0-9]+(\.\d+)?)`)
|
||||
)
|
||||
|
||||
server := httptest.NewServer(
|
||||
http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
|
@ -2585,6 +2590,17 @@ func TestTargetScraperScrapeOK(t *testing.T) {
|
|||
"Expected Accept header to prefer application/vnd.google.protobuf.")
|
||||
}
|
||||
|
||||
contentTypes := strings.Split(accept, ",")
|
||||
for _, ct := range contentTypes {
|
||||
match := qValuePattern.FindStringSubmatch(ct)
|
||||
require.Len(t, match, 3)
|
||||
qValue, err := strconv.ParseFloat(match[1], 64)
|
||||
require.NoError(t, err, "Error parsing q value")
|
||||
require.GreaterOrEqual(t, qValue, float64(0))
|
||||
require.LessOrEqual(t, qValue, float64(1))
|
||||
require.LessOrEqual(t, len(strings.Split(match[1], ".")[1]), 3, "q value should have at most 3 decimal places")
|
||||
}
|
||||
|
||||
timeout := r.Header.Get("X-Prometheus-Scrape-Timeout-Seconds")
|
||||
require.Equal(t, expectedTimeout, timeout, "Expected scrape timeout header.")
|
||||
|
||||
|
@ -3306,18 +3322,7 @@ func TestScrapeReportLimit(t *testing.T) {
|
|||
ScrapeTimeout: model.Duration(100 * time.Millisecond),
|
||||
}
|
||||
|
||||
var (
|
||||
scrapes int
|
||||
scrapedTwice = make(chan bool)
|
||||
)
|
||||
|
||||
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
fmt.Fprint(w, "metric_a 44\nmetric_b 44\nmetric_c 44\nmetric_d 44\n")
|
||||
scrapes++
|
||||
if scrapes == 2 {
|
||||
close(scrapedTwice)
|
||||
}
|
||||
}))
|
||||
ts, scrapedTwice := newScrapableServer("metric_a 44\nmetric_b 44\nmetric_c 44\nmetric_d 44\n")
|
||||
defer ts.Close()
|
||||
|
||||
sp, err := newScrapePool(cfg, s, 0, nil, nil, &Options{}, newTestScrapeMetrics(t))
|
||||
|
@ -3360,6 +3365,52 @@ func TestScrapeReportLimit(t *testing.T) {
|
|||
require.True(t, found)
|
||||
}
|
||||
|
||||
func TestScrapeUTF8(t *testing.T) {
|
||||
s := teststorage.New(t)
|
||||
defer s.Close()
|
||||
model.NameValidationScheme = model.UTF8Validation
|
||||
t.Cleanup(func() { model.NameValidationScheme = model.LegacyValidation })
|
||||
|
||||
cfg := &config.ScrapeConfig{
|
||||
JobName: "test",
|
||||
Scheme: "http",
|
||||
ScrapeInterval: model.Duration(100 * time.Millisecond),
|
||||
ScrapeTimeout: model.Duration(100 * time.Millisecond),
|
||||
MetricNameValidationScheme: config.UTF8ValidationConfig,
|
||||
}
|
||||
ts, scrapedTwice := newScrapableServer("{\"with.dots\"} 42\n")
|
||||
defer ts.Close()
|
||||
|
||||
sp, err := newScrapePool(cfg, s, 0, nil, nil, &Options{}, newTestScrapeMetrics(t))
|
||||
require.NoError(t, err)
|
||||
defer sp.stop()
|
||||
|
||||
testURL, err := url.Parse(ts.URL)
|
||||
require.NoError(t, err)
|
||||
sp.Sync([]*targetgroup.Group{
|
||||
{
|
||||
Targets: []model.LabelSet{{model.AddressLabel: model.LabelValue(testURL.Host)}},
|
||||
},
|
||||
})
|
||||
|
||||
select {
|
||||
case <-time.After(5 * time.Second):
|
||||
t.Fatalf("target was not scraped twice")
|
||||
case <-scrapedTwice:
|
||||
// If the target has been scraped twice, report samples from the first
|
||||
// scrape have been inserted in the database.
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
q, err := s.Querier(time.Time{}.UnixNano(), time.Now().UnixNano())
|
||||
require.NoError(t, err)
|
||||
defer q.Close()
|
||||
series := q.Select(ctx, false, nil, labels.MustNewMatcher(labels.MatchRegexp, "__name__", "with.dots"))
|
||||
|
||||
require.True(t, series.Next(), "series not found in tsdb")
|
||||
}
|
||||
|
||||
func TestScrapeLoopLabelLimit(t *testing.T) {
|
||||
tests := []struct {
|
||||
title string
|
||||
|
@ -3556,16 +3607,7 @@ test_summary_count 199
|
|||
// The expected "quantile" values do not have the trailing ".0".
|
||||
expectedQuantileValues := []string{"0.5", "0.9", "0.95", "0.99", "1"}
|
||||
|
||||
scrapeCount := 0
|
||||
scraped := make(chan bool)
|
||||
|
||||
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
fmt.Fprint(w, metricsText)
|
||||
scrapeCount++
|
||||
if scrapeCount > 2 {
|
||||
close(scraped)
|
||||
}
|
||||
}))
|
||||
ts, scrapedTwice := newScrapableServer(metricsText)
|
||||
defer ts.Close()
|
||||
|
||||
sp, err := newScrapePool(config, simpleStorage, 0, nil, nil, &Options{}, newTestScrapeMetrics(t))
|
||||
|
@ -3584,7 +3626,7 @@ test_summary_count 199
|
|||
select {
|
||||
case <-time.After(5 * time.Second):
|
||||
t.Fatalf("target was not scraped")
|
||||
case <-scraped:
|
||||
case <-scrapedTwice:
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
|
@ -3963,6 +4005,7 @@ func testNativeHistogramMaxSchemaSet(t *testing.T, minBucketFactor string, expec
|
|||
// Create a scrape loop with the HTTP server as the target.
|
||||
configStr := fmt.Sprintf(`
|
||||
global:
|
||||
metric_name_validation_scheme: legacy
|
||||
scrape_interval: 1s
|
||||
scrape_timeout: 1s
|
||||
scrape_configs:
|
||||
|
@ -4033,3 +4076,174 @@ scrape_configs:
|
|||
require.Equal(t, expectedSchema, h.Schema)
|
||||
}
|
||||
}
|
||||
|
||||
func TestTargetScrapeConfigWithLabels(t *testing.T) {
|
||||
const (
|
||||
configTimeout = 1500 * time.Millisecond
|
||||
expectedTimeout = "1.5"
|
||||
expectedTimeoutLabel = "1s500ms"
|
||||
secondTimeout = 500 * time.Millisecond
|
||||
secondTimeoutLabel = "500ms"
|
||||
expectedParam = "value1"
|
||||
secondParam = "value2"
|
||||
expectedPath = "/metric-ok"
|
||||
secondPath = "/metric-nok"
|
||||
httpScheme = "http"
|
||||
paramLabel = "__param_param"
|
||||
jobName = "test"
|
||||
)
|
||||
|
||||
createTestServer := func(t *testing.T, done chan struct{}) *url.URL {
|
||||
server := httptest.NewServer(
|
||||
http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
defer close(done)
|
||||
require.Equal(t, expectedTimeout, r.Header.Get("X-Prometheus-Scrape-Timeout-Seconds"))
|
||||
require.Equal(t, expectedParam, r.URL.Query().Get("param"))
|
||||
require.Equal(t, expectedPath, r.URL.Path)
|
||||
|
||||
w.Header().Set("Content-Type", `text/plain; version=0.0.4`)
|
||||
w.Write([]byte("metric_a 1\nmetric_b 2\n"))
|
||||
}),
|
||||
)
|
||||
t.Cleanup(server.Close)
|
||||
serverURL, err := url.Parse(server.URL)
|
||||
require.NoError(t, err)
|
||||
return serverURL
|
||||
}
|
||||
|
||||
run := func(t *testing.T, cfg *config.ScrapeConfig, targets []*targetgroup.Group) chan struct{} {
|
||||
done := make(chan struct{})
|
||||
srvURL := createTestServer(t, done)
|
||||
|
||||
// Update target addresses to use the dynamically created server URL.
|
||||
for _, target := range targets {
|
||||
for i := range target.Targets {
|
||||
target.Targets[i][model.AddressLabel] = model.LabelValue(srvURL.Host)
|
||||
}
|
||||
}
|
||||
|
||||
sp, err := newScrapePool(cfg, &nopAppendable{}, 0, nil, nil, &Options{}, newTestScrapeMetrics(t))
|
||||
require.NoError(t, err)
|
||||
t.Cleanup(sp.stop)
|
||||
|
||||
sp.Sync(targets)
|
||||
return done
|
||||
}
|
||||
|
||||
cases := []struct {
|
||||
name string
|
||||
cfg *config.ScrapeConfig
|
||||
targets []*targetgroup.Group
|
||||
}{
|
||||
{
|
||||
name: "Everything in scrape config",
|
||||
cfg: &config.ScrapeConfig{
|
||||
ScrapeInterval: model.Duration(2 * time.Second),
|
||||
ScrapeTimeout: model.Duration(configTimeout),
|
||||
Params: url.Values{"param": []string{expectedParam}},
|
||||
JobName: jobName,
|
||||
Scheme: httpScheme,
|
||||
MetricsPath: expectedPath,
|
||||
},
|
||||
targets: []*targetgroup.Group{
|
||||
{
|
||||
Targets: []model.LabelSet{
|
||||
{model.AddressLabel: model.LabelValue("")},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Overridden in target",
|
||||
cfg: &config.ScrapeConfig{
|
||||
ScrapeInterval: model.Duration(2 * time.Second),
|
||||
ScrapeTimeout: model.Duration(secondTimeout),
|
||||
JobName: jobName,
|
||||
Scheme: httpScheme,
|
||||
MetricsPath: secondPath,
|
||||
Params: url.Values{"param": []string{secondParam}},
|
||||
},
|
||||
targets: []*targetgroup.Group{
|
||||
{
|
||||
Targets: []model.LabelSet{
|
||||
{
|
||||
model.AddressLabel: model.LabelValue(""),
|
||||
model.ScrapeTimeoutLabel: expectedTimeoutLabel,
|
||||
model.MetricsPathLabel: expectedPath,
|
||||
paramLabel: expectedParam,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Overridden in relabel_config",
|
||||
cfg: &config.ScrapeConfig{
|
||||
ScrapeInterval: model.Duration(2 * time.Second),
|
||||
ScrapeTimeout: model.Duration(secondTimeout),
|
||||
JobName: jobName,
|
||||
Scheme: httpScheme,
|
||||
MetricsPath: secondPath,
|
||||
Params: url.Values{"param": []string{secondParam}},
|
||||
RelabelConfigs: []*relabel.Config{
|
||||
{
|
||||
Action: relabel.DefaultRelabelConfig.Action,
|
||||
Regex: relabel.DefaultRelabelConfig.Regex,
|
||||
SourceLabels: relabel.DefaultRelabelConfig.SourceLabels,
|
||||
TargetLabel: model.ScrapeTimeoutLabel,
|
||||
Replacement: expectedTimeoutLabel,
|
||||
},
|
||||
{
|
||||
Action: relabel.DefaultRelabelConfig.Action,
|
||||
Regex: relabel.DefaultRelabelConfig.Regex,
|
||||
SourceLabels: relabel.DefaultRelabelConfig.SourceLabels,
|
||||
TargetLabel: paramLabel,
|
||||
Replacement: expectedParam,
|
||||
},
|
||||
{
|
||||
Action: relabel.DefaultRelabelConfig.Action,
|
||||
Regex: relabel.DefaultRelabelConfig.Regex,
|
||||
SourceLabels: relabel.DefaultRelabelConfig.SourceLabels,
|
||||
TargetLabel: model.MetricsPathLabel,
|
||||
Replacement: expectedPath,
|
||||
},
|
||||
},
|
||||
},
|
||||
targets: []*targetgroup.Group{
|
||||
{
|
||||
Targets: []model.LabelSet{
|
||||
{
|
||||
model.AddressLabel: model.LabelValue(""),
|
||||
model.ScrapeTimeoutLabel: secondTimeoutLabel,
|
||||
model.MetricsPathLabel: secondPath,
|
||||
paramLabel: secondParam,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, c := range cases {
|
||||
t.Run(c.name, func(t *testing.T) {
|
||||
select {
|
||||
case <-run(t, c.cfg, c.targets):
|
||||
case <-time.After(10 * time.Second):
|
||||
t.Fatal("timeout after 10 seconds")
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func newScrapableServer(scrapeText string) (s *httptest.Server, scrapedTwice chan bool) {
|
||||
var scrapes int
|
||||
scrapedTwice = make(chan bool)
|
||||
|
||||
return httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
fmt.Fprint(w, scrapeText)
|
||||
scrapes++
|
||||
if scrapes == 2 {
|
||||
close(scrapedTwice)
|
||||
}
|
||||
})), scrapedTwice
|
||||
}
|
||||
|
|
|
@ -441,8 +441,8 @@ func PopulateLabels(lb *labels.Builder, cfg *config.ScrapeConfig, noDefaultPort
|
|||
}
|
||||
// Encode scrape query parameters as labels.
|
||||
for k, v := range cfg.Params {
|
||||
if len(v) > 0 {
|
||||
lb.Set(model.ParamLabelPrefix+k, v[0])
|
||||
if name := model.ParamLabelPrefix + k; len(v) > 0 && lb.Get(name) == "" {
|
||||
lb.Set(name, v[0])
|
||||
}
|
||||
}
|
||||
|
||||
|
|
24
scripts/get_module_version.sh
Executable file
24
scripts/get_module_version.sh
Executable file
|
@ -0,0 +1,24 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
# if no version string is passed as an argument, read VERSION file
|
||||
if [ $# -eq 0 ]; then
|
||||
VERSION="$(< VERSION)"
|
||||
else
|
||||
VERSION=$1
|
||||
fi
|
||||
|
||||
|
||||
# Remove leading 'v' if present
|
||||
VERSION="${VERSION#v}"
|
||||
|
||||
# Extract MAJOR, MINOR, and REST
|
||||
MAJOR="${VERSION%%.*}"
|
||||
MINOR="${VERSION#*.}"; MINOR="${MINOR%%.*}"
|
||||
REST="${VERSION#*.*.}"
|
||||
|
||||
# Format and output based on MAJOR version
|
||||
if [[ "$MAJOR" == "2" ]]; then
|
||||
echo "0.$MINOR.$REST"
|
||||
elif [[ "$MAJOR" == "3" ]]; then
|
||||
printf "0.3%02d.$REST\n" "$MINOR"
|
||||
fi
|
|
@ -241,9 +241,9 @@ type sampleRing struct {
|
|||
delta int64
|
||||
|
||||
// Lookback buffers. We use iBuf for mixed samples, but one of the three
|
||||
// concrete ones for homogenous samples. (Only one of the four bufs is
|
||||
// concrete ones for homogeneous samples. (Only one of the four bufs is
|
||||
// allowed to be populated!) This avoids the overhead of the interface
|
||||
// wrapper for the happy (and by far most common) case of homogenous
|
||||
// wrapper for the happy (and by far most common) case of homogeneous
|
||||
// samples.
|
||||
iBuf []chunks.Sample
|
||||
fBuf []fSample
|
||||
|
@ -268,7 +268,7 @@ const (
|
|||
fhBuf
|
||||
)
|
||||
|
||||
// newSampleRing creates a new sampleRing. If you do not know the prefereed
|
||||
// newSampleRing creates a new sampleRing. If you do not know the preferred
|
||||
// value type yet, use a size of 0 (in which case the provided typ doesn't
|
||||
// matter). On the first add, a buffer of size 16 will be allocated with the
|
||||
// preferred type being the type of the first added sample.
|
||||
|
|
|
@ -68,7 +68,7 @@ func (ad *AzureAdTestSuite) TestAzureAdRoundTripper() {
|
|||
cases := []struct {
|
||||
cfg *AzureADConfig
|
||||
}{
|
||||
// AzureAd roundtripper with Managedidentity.
|
||||
// AzureAd roundtripper with ManagedIdentity.
|
||||
{
|
||||
cfg: &AzureADConfig{
|
||||
Cloud: "AzurePublic",
|
||||
|
|
|
@ -171,7 +171,7 @@ func TestConvertBucketsLayout(t *testing.T) {
|
|||
},
|
||||
// Downscale:
|
||||
// 4+2+0+2, 0+0+0+0, 0+0+0+0, 0+0+0+0, 1+0+0+0 = 8, 0, 0, 0, 1
|
||||
// Check from sclaing from previous: 6+2, 0+0, 0+0, 0+0, 1+0 = 8, 0, 0, 0, 1
|
||||
// Check from scaling from previous: 6+2, 0+0, 0+0, 0+0, 1+0 = 8, 0, 0, 0, 1
|
||||
wantDeltas: []int64{8, -7},
|
||||
},
|
||||
},
|
||||
|
@ -222,7 +222,7 @@ func TestConvertBucketsLayout(t *testing.T) {
|
|||
},
|
||||
// Downscale:
|
||||
// 4+2+0+2, 0+0+0+0, 0+0+0+0, 1+0+0+0 = 8, 0, 0, 1
|
||||
// Check from sclaing from previous: 6+2, 0+0, 0+0, 1+0 = 8, 0, 0, 1
|
||||
// Check from scaling from previous: 6+2, 0+0, 0+0, 1+0 = 8, 0, 0, 1
|
||||
wantDeltas: []int64{8, -8, 0, 1},
|
||||
},
|
||||
},
|
||||
|
|
|
@ -2027,7 +2027,7 @@ func (t *QueueManager) sendWriteRequestWithBackoff(ctx context.Context, attempt
|
|||
// make the problem worse, particularly if we're getting rate limited.
|
||||
//
|
||||
// reshardDisableTimestamp holds the unix timestamp until which resharding
|
||||
// is diableld. We'll update that timestamp if the period we were just told
|
||||
// is disabled. We'll update that timestamp if the period we were just told
|
||||
// to sleep for is newer than the existing disabled timestamp.
|
||||
reshardWaitPeriod := time.Now().Add(time.Duration(sleepDuration) * 2)
|
||||
if oldTS, updated := setAtomicToNewer(&t.reshardDisableEndTimestamp, reshardWaitPeriod.Unix()); updated {
|
||||
|
|
|
@ -351,7 +351,7 @@ func TestMetadataDelivery(t *testing.T) {
|
|||
|
||||
require.Equal(t, 0.0, client_testutil.ToFloat64(m.metrics.failedMetadataTotal))
|
||||
require.Len(t, c.receivedMetadata, numMetadata)
|
||||
// One more write than the rounded qoutient should be performed in order to get samples that didn't
|
||||
// One more write than the rounded quotient should be performed in order to get samples that didn't
|
||||
// fit into MaxSamplesPerSend.
|
||||
require.Equal(t, numMetadata/config.DefaultMetadataConfig.MaxSamplesPerSend+1, c.writesReceived)
|
||||
// Make sure the last samples were sent.
|
||||
|
|
|
@ -312,7 +312,7 @@ func (t *timestampTracker) AppendHistogram(_ storage.SeriesRef, _ labels.Labels,
|
|||
|
||||
func (t *timestampTracker) UpdateMetadata(_ storage.SeriesRef, _ labels.Labels, _ metadata.Metadata) (storage.SeriesRef, error) {
|
||||
// TODO: Add and increment a `metadata` field when we get around to wiring metadata in remote_write.
|
||||
// UpadteMetadata is no-op for remote write (where timestampTracker is being used) for now.
|
||||
// UpdateMetadata is no-op for remote write (where timestampTracker is being used) for now.
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
|
|
|
@ -335,7 +335,7 @@ func validateOptions(opts *Options) *Options {
|
|||
opts.WALCompression = wlog.CompressionNone
|
||||
}
|
||||
|
||||
// Revert Stripesize to DefaultStripsize if Stripsize is either 0 or not a power of 2.
|
||||
// Revert StripeSize to DefaultStripeSize if StripeSize is either 0 or not a power of 2.
|
||||
if opts.StripeSize <= 0 || ((opts.StripeSize & (opts.StripeSize - 1)) != 0) {
|
||||
opts.StripeSize = tsdb.DefaultStripeSize
|
||||
}
|
||||
|
@ -395,7 +395,7 @@ func (db *DB) replayWAL() error {
|
|||
return fmt.Errorf("finding WAL segments: %w", err)
|
||||
}
|
||||
|
||||
// Backfil segments from the most recent checkpoint onwards.
|
||||
// Backfill segments from the most recent checkpoint onwards.
|
||||
for i := startFrom; i <= last; i++ {
|
||||
seg, err := wlog.OpenReadSegment(wlog.SegmentName(db.wal.Dir(), i))
|
||||
if err != nil {
|
||||
|
|
|
@ -14,7 +14,7 @@
|
|||
// The code in this file was largely written by Damian Gryski as part of
|
||||
// https://github.com/dgryski/go-tsz and published under the license below.
|
||||
// It was modified to accommodate reading from byte slices without modifying
|
||||
// the underlying bytes, which would panic when reading from mmap'd
|
||||
// the underlying bytes, which would panic when reading from mmapped
|
||||
// read-only byte slices.
|
||||
package chunkenc
|
||||
|
||||
|
|
|
@ -14,7 +14,7 @@
|
|||
// The code in this file was largely written by Damian Gryski as part of
|
||||
// https://github.com/dgryski/go-tsz and published under the license below.
|
||||
// It was modified to accommodate reading from byte slices without modifying
|
||||
// the underlying bytes, which would panic when reading from mmap'd
|
||||
// the underlying bytes, which would panic when reading from mmapped
|
||||
// read-only byte slices.
|
||||
|
||||
// Copyright (c) 2015,2016 Damian Gryski <damian@gryski.com>
|
||||
|
|
|
@ -24,7 +24,7 @@ import (
|
|||
)
|
||||
|
||||
const (
|
||||
// Minimum recorded peak since the last shrinking of chunkWriteQueue.chunkrefMap to shrink it again.
|
||||
// Minimum recorded peak since the last shrinking of chunkWriteQueue.chunkRefMap to shrink it again.
|
||||
chunkRefMapShrinkThreshold = 1000
|
||||
|
||||
// Minimum interval between shrinking of chunkWriteQueue.chunkRefMap.
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue