Merge branch 'main' into sparsehistogram

This commit is contained in:
beorn7 2021-10-07 23:16:42 +02:00
commit fd5ea4e0b5
307 changed files with 99530 additions and 14948 deletions

View file

@ -3,7 +3,7 @@ version: 2.1
orbs:
prometheus: prometheus/prometheus@0.11.0
go: circleci/go@0.2.0
go: circleci/go@1.7.0
win: circleci/windows@2.3.0
executors:
@ -11,10 +11,10 @@ executors:
# should also be updated.
golang:
docker:
- image: circleci/golang:1.16-node
- image: quay.io/prometheus/golang-builder:1.17-base
golang_115:
docker:
- image: circleci/golang:1.15-node
- image: quay.io/prometheus/golang-builder:1.15-base
jobs:
test_go:
@ -24,8 +24,6 @@ jobs:
- prometheus/setup_environment
- go/load-cache:
key: v1
- run:
command: sudo apt-get install -y yamllint
- run:
command: make GO_ONLY=1
environment:
@ -49,21 +47,23 @@ jobs:
- store_test_results:
path: test-results
test_react:
test_ui:
executor: golang
steps:
- checkout
- restore_cache:
keys:
- v3-npm-deps-{{ checksum "web/ui/react-app/yarn.lock" }}
- v3-npm-deps-{{ checksum "web/ui/package-lock.json" }}
- v3-npm-deps-
- run:
command: make react-app-test
- run: make ui-install
- run: make ui-lint
- run: make ui-build-module
- run: make ui-test
- save_cache:
key: v3-npm-deps-{{ checksum "web/ui/react-app/yarn.lock" }}
key: v3-npm-deps-{{ checksum "web/ui/package-lock.json" }}
paths:
- /home/circleci/.cache/yarn
- ~/.npm
test_windows:
executor:
@ -108,8 +108,6 @@ jobs:
- run:
command: jb install
working_directory: ~/project/documentation/prometheus-mixin
- run:
command: sudo apt-get install -y yamllint
- run:
command: make
working_directory: ~/project/documentation/prometheus-mixin
@ -121,9 +119,8 @@ jobs:
executor: golang
steps:
- checkout
- run: mkdir -v -p "${PATH%%:*}" && curl -sL --fail https://github.com/mikefarah/yq/releases/download/v4.6.3/yq_linux_amd64 -o "${PATH%%:*}/yq" && chmod -v +x "${PATH%%:*}/yq"
- run: sha256sum -c <(echo "c4343783c3361495c0d6d1eb742bba7432aa65e13e9fb8d7e201d544bcf14246 ${PATH%%:*}/yq")
- run: ./scripts/sync_repo_files.sh
- run: ./scripts/sync_codemirror.sh
workflows:
version: 2
@ -133,7 +130,7 @@ workflows:
filters:
tags:
only: /.*/
- test_react:
- test_ui:
filters:
tags:
only: /.*/
@ -159,7 +156,7 @@ workflows:
context: org-context
requires:
- test_go
- test_react
- test_ui
- build
filters:
branches:
@ -169,7 +166,7 @@ workflows:
context: org-context
requires:
- test_go
- test_react
- test_ui
- build
filters:
tags:

2
.github/CODEOWNERS vendored
View file

@ -1,5 +1,7 @@
/web/ui @juliusv
/web/ui/module @juliusv @nexucis
/storage/remote @csmarchbanks @cstyan @bwplotka @tomwilkie
/discovery/kubernetes @brancz
/tsdb @codesome
/promql @codesome @roidelapluie
/cmd/promtool @jessicagreben @dgl

29
.github/workflows/golangci-lint.yml vendored Normal file
View file

@ -0,0 +1,29 @@
name: golangci-lint
on:
push:
paths:
- "go.sum"
- "go.mod"
- "**.go"
- "scripts/errcheck_excludes.txt"
- ".github/workflows/golangci-lint.yml"
pull_request:
paths:
- "go.sum"
- "go.mod"
- "**.go"
- "scripts/errcheck_excludes.txt"
- ".github/workflows/golangci-lint.yml"
jobs:
golangci:
name: lint
runs-on: ubuntu-latest
steps:
- name: Checkout repository
uses: actions/checkout@v2
- name: Lint
uses: golangci/golangci-lint-action@v2
with:
version: v1.42.0

2
.gitignore vendored
View file

@ -25,3 +25,5 @@ npm_licenses.tar.bz2
/vendor
/.build
/**/node_modules

7
.gitpod.Dockerfile vendored Normal file
View file

@ -0,0 +1,7 @@
FROM gitpod/workspace-full
ENV CUSTOM_NODE_VERSION=16
RUN bash -c ". .nvm/nvm.sh && nvm install ${CUSTOM_NODE_VERSION} && nvm use ${CUSTOM_NODE_VERSION} && nvm alias default ${CUSTOM_NODE_VERSION}"
RUN echo "nvm use default &>/dev/null" >> ~/.bashrc.d/51-nvm-fix

View file

@ -1,4 +1,5 @@
---
image:
file: .gitpod.Dockerfile
tasks:
- init:
make build
@ -6,11 +7,11 @@ tasks:
gp sync-done build
./prometheus --config.file=documentation/examples/prometheus.yml
- command: |
cd web/ui/react-app
cd web/ui/
gp sync-await build
unset BROWSER
export DANGEROUSLY_DISABLE_HOST_CHECK=true
yarn start
npm start
openMode: split-right
ports:
- port: 3000

View file

@ -1,7 +1,7 @@
go:
# Whenever the Go version is updated here,
# .circle/config.yml should also be updated.
version: 1.16
version: 1.17
repository:
path: github.com/prometheus/prometheus
build:

View file

@ -24,3 +24,4 @@ rules:
.github/workflows/funcbench.yml
.github/workflows/fuzzing.yml
.github/workflows/prombench.yml
.github/workflows/golangci-lint.yml

View file

@ -1,3 +1,80 @@
## 2.30.3 / 2021-10-05
* [BUGFIX] TSDB: Fix panic on failed snapshot replay. #9438
* [BUGFIX] TSDB: Don't fail snapshot replay with exemplar storage disabled when the snapshot contains exemplars. #9438
## 2.30.2 / 2021-10-01
* [BUGFIX] TSDB: Don't error on overlapping m-mapped chunks during WAL replay. #9381
## 2.30.1 / 2021-09-28
* [ENHANCEMENT] Remote Write: Redact remote write URL when used for metric label. #9383
* [ENHANCEMENT] UI: Redact remote write URL and proxy URL passwords in the `/config` page. #9408
* [BUGFIX] promtool rules backfill: Prevent creation of data before the start time. #9339
* [BUGFIX] promtool rules backfill: Do not query after the end time. #9340
* [BUGFIX] Azure SD: Fix panic when no computername is set. #9387
## 2.30.0 / 2021-09-14
* [FEATURE] **experimental** TSDB: Snapshot in-memory chunks on shutdown for faster restarts. Behind `--enable-feature=memory-snapshot-on-shutdown` flag. #7229
* [FEATURE] **experimental** Scrape: Configure scrape interval and scrape timeout via relabeling using `__scrape_interval__` and `__scrape_timeout__` labels respectively. #8911
* [FEATURE] Scrape: Add `scrape_timeout_seconds` and `scrape_sample_limit` metric. Behind `--enable-feature=extra-scrape-metrics` flag to avoid additional cardinality by default. #9247 #9295
* [ENHANCEMENT] Scrape: Add `--scrape.timestamp-tolerance` flag to adjust scrape timestamp tolerance when enabled via `--scrape.adjust-timestamps`. #9283
* [ENHANCEMENT] Remote Write: Improve throughput when sending exemplars. #8921
* [ENHANCEMENT] TSDB: Optimise WAL loading by removing extra map and caching min-time #9160
* [ENHANCEMENT] promtool: Speed up checking for duplicate rules. #9262/#9306
* [ENHANCEMENT] Scrape: Reduce allocations when parsing the metrics. #9299
* [ENHANCEMENT] docker_sd: Support host network mode #9125
* [BUGFIX] Exemplars: Fix panic when resizing exemplar storage from 0 to a non-zero size. #9286
* [BUGFIX] TSDB: Correctly decrement `prometheus_tsdb_head_active_appenders` when the append has no samples. #9230
* [BUGFIX] promtool rules backfill: Return 1 if backfill was unsuccessful. #9303
* [BUGFIX] promtool rules backfill: Avoid creation of overlapping blocks. #9324
* [BUGFIX] config: Fix a panic when reloading configuration with a `null` relabel action. #9224
## 2.29.2 / 2021-08-27
* [BUGFIX] Fix Kubernetes SD failing to discover Ingress in Kubernetes v1.22. #9205
* [BUGFIX] Fix data race in loading write-ahead-log (WAL). #9259
## 2.29.1 / 2021-08-11
* [BUGFIX] tsdb: align atomically accessed int64 to prevent panic in 32-bit
archs. #9192
## 2.29.0 / 2021-08-11
Note for macOS users: Due to [changes in the upcoming Go 1.17](https://tip.golang.org/doc/go1.17#darwin),
this is the last Prometheus release that supports macOS 10.12 Sierra.
* [CHANGE] Promote `--storage.tsdb.allow-overlapping-blocks` flag to stable. #9117
* [CHANGE] Promote `--storage.tsdb.retention.size` flag to stable. #9004
* [FEATURE] Add Kuma service discovery. #8844
* [FEATURE] Add `present_over_time` PromQL function. #9097
* [FEATURE] Allow configuring exemplar storage via file and make it reloadable. #8974
* [FEATURE] UI: Allow selecting time range with mouse drag. #8977
* [FEATURE] promtool: Add feature flags flag `--enable-feature`. #8958
* [FEATURE] promtool: Add file_sd file validation. #8950
* [ENHANCEMENT] Reduce blocking of outgoing remote write requests from series garbage collection. #9109
* [ENHANCEMENT] Improve write-ahead-log decoding performance. #9106
* [ENHANCEMENT] Improve append performance in TSDB by reducing mutexes usage. #9061
* [ENHANCEMENT] Allow configuring `max_samples_per_send` for remote write metadata. #8959
* [ENHANCEMENT] Add `__meta_gce_interface_ipv4_<name>` meta label to GCE discovery. #8978
* [ENHANCEMENT] Add `__meta_ec2_availability_zone_id` meta label to EC2 discovery. #8896
* [ENHANCEMENT] Add `__meta_azure_machine_computer_name` meta label to Azure discovery. #9112
* [ENHANCEMENT] Add `__meta_hetzner_hcloud_labelpresent_<labelname>` meta label to Hetzner discovery. #9028
* [ENHANCEMENT] promtool: Add compaction efficiency to `promtool tsdb analyze` reports. #8940
* [ENHANCEMENT] promtool: Allow configuring max block duration for backfilling via `--max-block-duration` flag. #8919
* [ENHANCEMENT] UI: Add sorting and filtering to flags page. #8988
* [ENHANCEMENT] UI: Improve alerts page rendering performance. #9005
* [BUGFIX] Log when total symbol size exceeds 2^32 bytes, causing compaction to fail, and skip compaction. #9104
* [BUGFIX] Fix incorrect `target_limit` reloading of zero value. #9120
* [BUGFIX] Fix head GC and pending readers race condition. #9081
* [BUGFIX] Fix timestamp handling in OpenMetrics parser. #9008
* [BUGFIX] Fix potential duplicate metrics in `/federate` endpoint when specifying multiple matchers. #8885
* [BUGFIX] Fix server configuration and validation for authentication via client cert. #9123
* [BUGFIX] Allow `start` and `end` again as label names in PromQL queries. They were disallowed since the introduction of @ timestamp feature. #9119
## 2.28.1 / 2021-07-01
* [BUGFIX]: HTTP SD: Allow `charset` specification in `Content-Type` header. #8981

View file

@ -52,7 +52,7 @@ All our issues are regularly tagged so that you can also filter down the issues
* Commits should be as small as possible, while ensuring that each commit is correct independently (i.e., each commit should compile and pass tests).
* If your patch is not getting reviewed or you need a specific person to review it, you can @-reply a reviewer asking for a review in the pull request or a comment, or you can ask for a review on IRC channel [#prometheus](https://web.libera.chat/?channels=#prometheus) on irc.libera.chat (for the easiest start, [join via Riot](https://riot.im/app/#/room/#prometheus:matrix.org)).
* If your patch is not getting reviewed or you need a specific person to review it, you can @-reply a reviewer asking for a review in the pull request or a comment, or you can ask for a review on the IRC channel [#prometheus-dev](https://web.libera.chat/?channels=#prometheus-dev) on irc.libera.chat (for the easiest start, [join via Element](https://app.element.io/#/room/#prometheus-dev:matrix.org)).
* Add tests relevant to the fixed bug or new feature.
@ -64,10 +64,10 @@ To add or update a new dependency, use the `go get` command:
```bash
# Pick the latest tagged release.
go get example.com/some/module/pkg
go install example.com/some/module/pkg@latest
# Pick a specific version.
go get example.com/some/module/pkg@vX.Y.Z
go install example.com/some/module/pkg@vX.Y.Z
```
Tidy up the `go.mod` and `go.sum` files:

View file

@ -14,14 +14,13 @@ COPY LICENSE /LICENSE
COPY NOTICE /NOTICE
COPY npm_licenses.tar.bz2 /npm_licenses.tar.bz2
RUN ln -s /usr/share/prometheus/console_libraries /usr/share/prometheus/consoles/ /etc/prometheus/
RUN mkdir -p /prometheus && \
chown -R nobody:nobody etc/prometheus /prometheus
WORKDIR /prometheus
RUN ln -s /usr/share/prometheus/console_libraries /usr/share/prometheus/consoles/ /etc/prometheus/ && \
chown -R nobody:nobody /etc/prometheus /prometheus
USER nobody
EXPOSE 9090
VOLUME [ "/prometheus" ]
WORKDIR /prometheus
ENTRYPOINT [ "/bin/prometheus" ]
CMD [ "--config.file=/etc/prometheus/prometheus.yml", \
"--storage.tsdb.path=/prometheus", \

View file

@ -1,7 +1,7 @@
Julien Pivotto (<roidelapluie@prometheus.io> / @roidelapluie) is the main/default maintainer, some parts of the codebase have other maintainers:
* `cmd`
* `promtool`: David Leadbeater (<dgl@dgl.cx> / @dgl)
* `promtool`: David Leadbeater (<dgl@dgl.cx> / @dgl), Jessica Grebenschikov (<jessica.greben1@gmail.com> / @jessicagreben)
* `discovery`
* `k8s`: Frederic Branczyk (<fbranczyk@gmail.com> / @brancz)
* `documentation`
@ -11,6 +11,7 @@ Julien Pivotto (<roidelapluie@prometheus.io> / @roidelapluie) is the main/defaul
* `tsdb`: Ganesh Vernekar (<ganesh@grafana.com> / @codesome), Bartłomiej Płotka (<bwplotka@gmail.com> / @bwplotka)
* `web`
* `ui`: Julius Volz (<julius.volz@gmail.com> / @juliusv)
* `module`: Augustin Husson (<husson.augustin@gmail.com> @nexucis)
* `Makefile` and related build configuration: Simon Pasquier (<pasquier.simon@gmail.com> / @simonpasquier), Ben Kochie (<superq@gmail.com> / @SuperQ)
For the sake of brevity, not all subtrees are explicitly listed. Due to the

View file

@ -14,12 +14,9 @@
# Needs to be defined before including Makefile.common to auto-generate targets
DOCKER_ARCHS ?= amd64 armv7 arm64 ppc64le s390x
REACT_APP_PATH = web/ui/react-app
REACT_APP_SOURCE_FILES = $(shell find $(REACT_APP_PATH)/public/ $(REACT_APP_PATH)/src/ $(REACT_APP_PATH)/tsconfig.json)
REACT_APP_OUTPUT_DIR = web/ui/static/react
REACT_APP_NODE_MODULES_PATH = $(REACT_APP_PATH)/node_modules
UI_PATH = web/ui
UI_NODE_MODULES_PATH = $(UI_PATH)/node_modules
REACT_APP_NPM_LICENSES_TARBALL = "npm_licenses.tar.bz2"
REACT_APP_BUILD_SCRIPT = ./scripts/build_react_app.sh
PROMTOOL = ./promtool
TSDB_BENCHMARK_NUM_METRICS ?= 1000
@ -32,15 +29,28 @@ include Makefile.common
DOCKER_IMAGE_NAME ?= prometheus
$(REACT_APP_NODE_MODULES_PATH): $(REACT_APP_PATH)/package.json $(REACT_APP_PATH)/yarn.lock
cd $(REACT_APP_PATH) && yarn --frozen-lockfile
.PHONY: ui-install
ui-install:
cd $(UI_PATH) && npm install
$(REACT_APP_OUTPUT_DIR): $(REACT_APP_NODE_MODULES_PATH) $(REACT_APP_SOURCE_FILES) $(REACT_APP_BUILD_SCRIPT)
@echo ">> building React app"
@$(REACT_APP_BUILD_SCRIPT)
.PHONY: ui-build
ui-build:
cd $(UI_PATH) && npm run build
.PHONY: ui-build-module
ui-build-module:
cd $(UI_PATH) && npm run build:module
.PHONY: ui-test
ui-test:
cd $(UI_PATH) && npm run test:coverage
.PHONY: ui-lint
ui-lint:
cd $(UI_PATH) && npm run lint
.PHONY: assets
assets: $(REACT_APP_OUTPUT_DIR)
assets: ui-install ui-build
@echo ">> writing assets"
# Un-setting GOOS and GOARCH here because the generated Go code is always the same,
# but the cached object code is incompatible between architectures and OSes (which
@ -48,36 +58,20 @@ assets: $(REACT_APP_OUTPUT_DIR)
cd web/ui && GO111MODULE=$(GO111MODULE) GOOS= GOARCH= $(GO) generate -x -v $(GOOPTS)
@$(GOFMT) -w ./web/ui
.PHONY: react-app-lint
react-app-lint:
@echo ">> running React app linting"
cd $(REACT_APP_PATH) && yarn lint:ci
.PHONY: react-app-lint-fix
react-app-lint-fix:
@echo ">> running React app linting and fixing errors where possible"
cd $(REACT_APP_PATH) && yarn lint
.PHONY: react-app-test
react-app-test: | $(REACT_APP_NODE_MODULES_PATH) react-app-lint
@echo ">> running React app tests"
cd $(REACT_APP_PATH) && yarn test --no-watch --coverage
.PHONY: test
# If we only want to only test go code we have to change the test target
# which is called by all.
ifeq ($(GO_ONLY),1)
test: common-test
else
test: common-test react-app-test
test: common-test ui-build-module ui-test ui-lint
endif
.PHONY: npm_licenses
npm_licenses: $(REACT_APP_NODE_MODULES_PATH)
npm_licenses: ui-install
@echo ">> bundling npm licenses"
rm -f $(REACT_APP_NPM_LICENSES_TARBALL)
find $(REACT_APP_NODE_MODULES_PATH) -iname "license*" | tar cfj $(REACT_APP_NPM_LICENSES_TARBALL) --transform 's/^/npm_licenses\//' --files-from=-
find $(UI_NODE_MODULES_PATH) -iname "license*" | tar cfj $(REACT_APP_NPM_LICENSES_TARBALL) --transform 's/^/npm_licenses\//' --files-from=-
.PHONY: tarball
tarball: npm_licenses common-tarball

View file

@ -83,12 +83,18 @@ PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_
GOLANGCI_LINT :=
GOLANGCI_LINT_OPTS ?=
GOLANGCI_LINT_VERSION ?= v1.39.0
GOLANGCI_LINT_VERSION ?= v1.42.0
# golangci-lint only supports linux, darwin and windows platforms on i386/amd64.
# windows isn't included here because of the path separator being different.
ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux darwin))
ifeq ($(GOHOSTARCH),$(filter $(GOHOSTARCH),amd64 i386))
GOLANGCI_LINT := $(FIRST_GOPATH)/bin/golangci-lint
# If we're in CI and there is an Actions file, that means the linter
# is being run in Actions, so we don't need to run it here.
ifeq (,$(CIRCLE_JOB))
GOLANGCI_LINT := $(FIRST_GOPATH)/bin/golangci-lint
else ifeq (,$(wildcard .github/workflows/golangci-lint.yml))
GOLANGCI_LINT := $(FIRST_GOPATH)/bin/golangci-lint
endif
endif
endif

View file

@ -55,18 +55,18 @@ Prometheus will now be reachable at http://localhost:9090/.
### Building from source
To build Prometheus from source code, first ensure that have a working
To build Prometheus from source code, first ensure that you have a working
Go environment with [version 1.14 or greater installed](https://golang.org/doc/install).
You also need [Node.js](https://nodejs.org/) and [Yarn](https://yarnpkg.com/)
You also need [Node.js](https://nodejs.org/) and [npm](https://www.npmjs.com/)
installed in order to build the frontend assets.
You can directly use the `go` tool to download and install the `prometheus`
and `promtool` binaries into your `GOPATH`:
$ GO111MODULE=on go get github.com/prometheus/prometheus/cmd/...
$ GO111MODULE=on go install github.com/prometheus/prometheus/cmd/...
$ prometheus --config.file=your_config.yml
*However*, when using `go get` to build Prometheus, Prometheus will expect to be able to
*However*, when using `go install` to build Prometheus, Prometheus will expect to be able to
read its web assets from local filesystem directories under `web/ui/static` and
`web/ui/templates`. In order for these assets to be found, you will have to run Prometheus
from the root of the cloned repository. Note also that these directories do not include the

View file

@ -34,8 +34,10 @@ Release cadence of first pre-releases being cut is 6 weeks.
| v2.27 | 2021-05-05 | Chris Marchbanks (GitHub: @csmarchbanks) |
| v2.28 | 2021-06-16 | Julius Volz (GitHub: @juliusv) |
| v2.29 | 2021-07-28 | Frederic Branczyk (GitHub: @brancz) |
| v2.30 | 2021-09-08 | **searching for volunteer** |
| v2.30 | 2021-09-08 | Ganesh Vernekar (GitHub: @codesome) |
| v2.31 | 2021-10-20 | **searching for volunteer** |
| v2.32 | 2021-12-01 | **searching for volunteer** |
| v2.33 | 2022-01-12 | **searching for volunteer** |
If you are interested in volunteering please create a pull request against the [prometheus/prometheus](https://github.com/prometheus/prometheus) repository and propose yourself for the release series of your choice.
@ -97,18 +99,21 @@ Either upgrade the dependencies within their existing version constraints as spe
```
cd web/ui/react-app
yarn upgrade
git add yarn.lock
npm update
git add package.json package-lock.json
```
Or alternatively, update all dependencies to their latest major versions. This is potentially more disruptive and will require more follow-up fixes, but should be done from time to time (use your best judgement):
```
cd web/ui/react-app
yarn upgrade --latest
git add package.json yarn.lock
npx npm-check-updates -u
npm install
git add package.json package-lock.json
```
You can find more details on managing npm dependencies and updates [in this blog post](https://www.carlrippon.com/upgrading-npm-dependencies/).
### 1. Prepare your release
At the start of a new major or minor release cycle create the corresponding release branch based on the main branch. For example if we're releasing `2.17.0` and the previous stable release is `2.16.0` we need to create a `release-2.17` branch. Note that all releases are handled in protected release branches, see the above `Branch management and versioning` section. Release candidates and patch releases for any given major or minor release happen in the same `release-<major>.<minor>` branch. Do not create `release-<version>` for patch or release candidate releases.

View file

@ -1 +1 @@
2.28.0
2.30.3

View file

@ -108,6 +108,7 @@ type flagConfig struct {
outageTolerance model.Duration
resendDelay model.Duration
web web.Options
scrape scrape.Options
tsdb tsdbOptions
lookbackDelta model.Duration
webTimeout model.Duration
@ -153,6 +154,9 @@ func (c *flagConfig) setFeatureListOptions(logger log.Logger) error {
case "memory-snapshot-on-shutdown":
c.tsdb.EnableMemorySnapshotOnShutdown = true
level.Info(logger).Log("msg", "Experimental memory snapshot on shutdown enabled")
case "extra-scrape-metrics":
c.scrape.ExtraMetrics = true
level.Info(logger).Log("msg", "Experimental additional scrape metrics")
case "":
continue
default:
@ -259,7 +263,7 @@ func main() {
a.Flag("storage.tsdb.retention.time", "How long to retain samples in storage. When this flag is set it overrides \"storage.tsdb.retention\". If neither this flag nor \"storage.tsdb.retention\" nor \"storage.tsdb.retention.size\" is set, the retention time defaults to "+defaultRetentionString+". Units Supported: y, w, d, h, m, s, ms.").
SetValue(&newFlagRetentionDuration)
a.Flag("storage.tsdb.retention.size", "Maximum number of bytes that can be stored for blocks. A unit is required, supported units: B, KB, MB, GB, TB, PB, EB. Ex: \"512MB\". This flag is experimental and can be changed in future releases.").
a.Flag("storage.tsdb.retention.size", "Maximum number of bytes that can be stored for blocks. A unit is required, supported units: B, KB, MB, GB, TB, PB, EB. Ex: \"512MB\".").
BytesVar(&cfg.tsdb.MaxBytes)
a.Flag("storage.tsdb.no-lockfile", "Do not create lockfile in data directory.").
@ -292,9 +296,12 @@ func main() {
a.Flag("rules.alert.resend-delay", "Minimum amount of time to wait before resending an alert to Alertmanager.").
Default("1m").SetValue(&cfg.resendDelay)
a.Flag("scrape.adjust-timestamps", "Adjust scrape timestamps by up to 2ms to align them to the intended schedule. See https://github.com/prometheus/prometheus/issues/7846 for more context. Experimental. This flag will be removed in a future release.").
a.Flag("scrape.adjust-timestamps", "Adjust scrape timestamps by up to `scrape.timestamp-tolerance` to align them to the intended schedule. See https://github.com/prometheus/prometheus/issues/7846 for more context. Experimental. This flag will be removed in a future release.").
Hidden().Default("true").BoolVar(&scrape.AlignScrapeTimestamps)
a.Flag("scrape.timestamp-tolerance", "Timestamp tolerance. See https://github.com/prometheus/prometheus/issues/7846 for more context. Experimental. This flag will be removed in a future release.").
Hidden().Default("2ms").DurationVar(&scrape.ScrapeTimestampTolerance)
a.Flag("alertmanager.notification-queue-capacity", "The capacity of the queue for pending Alertmanager notifications.").
Default("10000").IntVar(&cfg.notifier.QueueCapacity)
@ -313,7 +320,7 @@ func main() {
a.Flag("query.max-samples", "Maximum number of samples a single query can load into memory. Note that queries will fail if they try to load more samples than this into memory, so this also limits the number of samples a query can return.").
Default("50000000").IntVar(&cfg.queryMaxSamples)
a.Flag("enable-feature", "Comma separated feature names to enable. Valid options: exemplar-storage, expand-external-labels, memory-snapshot-on-shutdown, promql-at-modifier, promql-negative-offset, remote-write-receiver. See https://prometheus.io/docs/prometheus/latest/feature_flags/ for more details.").
a.Flag("enable-feature", "Comma separated feature names to enable. Valid options: exemplar-storage, expand-external-labels, memory-snapshot-on-shutdown, promql-at-modifier, promql-negative-offset, remote-write-receiver, extra-scrape-metrics. See https://prometheus.io/docs/prometheus/latest/feature_flags/ for more details.").
Default("").StringsVar(&cfg.featureList)
promlogflag.AddFlags(a, &cfg.promlogConfig)
@ -458,7 +465,7 @@ func main() {
ctxNotify, cancelNotify = context.WithCancel(context.Background())
discoveryManagerNotify = discovery.NewManager(ctxNotify, log.With(logger, "component", "discovery manager notify"), discovery.Name("notify"))
scrapeManager = scrape.NewManager(log.With(logger, "component", "scrape manager"), fanoutStorage)
scrapeManager = scrape.NewManager(&cfg.scrape, log.With(logger, "component", "scrape manager"), fanoutStorage)
opts = promql.EngineOpts{
Logger: log.With(logger, "component", "query engine"),

View file

@ -11,6 +11,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
//
//go:build !windows
// +build !windows
package main

View file

@ -24,7 +24,7 @@ import (
"net/url"
"os"
"path/filepath"
"reflect"
"sort"
"strconv"
"strings"
"time"
@ -48,6 +48,7 @@ import (
_ "github.com/prometheus/prometheus/discovery/install" // Register service discovery implementations.
"github.com/prometheus/prometheus/discovery/kubernetes"
"github.com/prometheus/prometheus/discovery/targetgroup"
"github.com/prometheus/prometheus/pkg/labels"
"github.com/prometheus/prometheus/pkg/rulefmt"
"github.com/prometheus/prometheus/promql"
)
@ -136,6 +137,7 @@ func main() {
analyzePath := tsdbAnalyzeCmd.Arg("db path", "Database path (default is "+defaultDBPath+").").Default(defaultDBPath).String()
analyzeBlockID := tsdbAnalyzeCmd.Arg("block id", "Block to analyze (default is the last block).").String()
analyzeLimit := tsdbAnalyzeCmd.Flag("limit", "How many items to show in each list.").Default("20").Int()
analyzeRunExtended := tsdbAnalyzeCmd.Flag("extended", "Run extended analysis.").Bool()
tsdbListCmd := tsdbCmd.Command("list", "List tsdb blocks.")
listHumanReadable := tsdbListCmd.Flag("human-readable", "Print human readable values.").Short('r').Bool()
@ -236,7 +238,7 @@ func main() {
os.Exit(checkErr(benchmarkWrite(*benchWriteOutPath, *benchSamplesFile, *benchWriteNumMetrics, *benchWriteNumScrapes)))
case tsdbAnalyzeCmd.FullCommand():
os.Exit(checkErr(analyzeBlock(*analyzePath, *analyzeBlockID, *analyzeLimit)))
os.Exit(checkErr(analyzeBlock(*analyzePath, *analyzeBlockID, *analyzeLimit, *analyzeRunExtended)))
case tsdbListCmd.FullCommand():
os.Exit(checkErr(listBlocks(*listPath, *listHumanReadable)))
@ -471,8 +473,8 @@ func checkRules(filename string) (int, []error) {
fmt.Printf("%d duplicate rule(s) found.\n", len(dRules))
for _, n := range dRules {
fmt.Printf("Metric: %s\nLabel(s):\n", n.metric)
for i, l := range n.label {
fmt.Printf("\t%s: %s\n", i, l)
for _, l := range n.label {
fmt.Printf("\t%s: %s\n", l.Name, l.Value)
}
}
fmt.Println("Might cause inconsistency while recording expressions.")
@ -483,29 +485,52 @@ func checkRules(filename string) (int, []error) {
type compareRuleType struct {
metric string
label map[string]string
label labels.Labels
}
type compareRuleTypes []compareRuleType
func (c compareRuleTypes) Len() int { return len(c) }
func (c compareRuleTypes) Swap(i, j int) { c[i], c[j] = c[j], c[i] }
func (c compareRuleTypes) Less(i, j int) bool { return compare(c[i], c[j]) < 0 }
func compare(a, b compareRuleType) int {
if res := strings.Compare(a.metric, b.metric); res != 0 {
return res
}
return labels.Compare(a.label, b.label)
}
func checkDuplicates(groups []rulefmt.RuleGroup) []compareRuleType {
var duplicates []compareRuleType
var rules compareRuleTypes
for _, group := range groups {
for index, rule := range group.Rules {
inst := compareRuleType{
for _, rule := range group.Rules {
rules = append(rules, compareRuleType{
metric: ruleMetric(rule),
label: rule.Labels,
}
for i := 0; i < index; i++ {
t := compareRuleType{
metric: ruleMetric(group.Rules[i]),
label: group.Rules[i].Labels,
}
if reflect.DeepEqual(t, inst) {
duplicates = append(duplicates, t)
}
}
label: labels.FromMap(rule.Labels),
})
}
}
if len(rules) < 2 {
return duplicates
}
sort.Sort(rules)
last := rules[0]
for i := 1; i < len(rules); i++ {
if compare(last, rules[i]) == 0 {
// Don't add a duplicated rule multiple times.
if len(duplicates) == 0 || compare(last, duplicates[len(duplicates)-1]) != 0 {
duplicates = append(duplicates, rules[i])
}
}
last = rules[i]
}
return duplicates
}
@ -911,20 +936,17 @@ func importRules(url *url.URL, start, end, outputDir string, evalInterval time.D
} else {
etime, err = parseTime(end)
if err != nil {
fmt.Fprintln(os.Stderr, "error parsing end time:", err)
return err
return fmt.Errorf("error parsing end time: %v", err)
}
}
stime, err = parseTime(start)
if err != nil {
fmt.Fprintln(os.Stderr, "error parsing start time:", err)
return err
return fmt.Errorf("error parsing start time: %v", err)
}
if !stime.Before(etime) {
fmt.Fprintln(os.Stderr, "start time is not before end time")
return nil
return errors.New("start time is not before end time")
}
cfg := ruleImporterConfig{
@ -937,25 +959,24 @@ func importRules(url *url.URL, start, end, outputDir string, evalInterval time.D
Address: url.String(),
})
if err != nil {
fmt.Fprintln(os.Stderr, "new api client error", err)
return err
return fmt.Errorf("new api client error: %v", err)
}
ruleImporter := newRuleImporter(log.NewLogfmtLogger(log.NewSyncWriter(os.Stderr)), cfg, v1.NewAPI(client))
errs := ruleImporter.loadGroups(ctx, files)
for _, err := range errs {
if err != nil {
fmt.Fprintln(os.Stderr, "rule importer parse error", err)
return err
return fmt.Errorf("rule importer parse error: %v", err)
}
}
errs = ruleImporter.importAll(ctx)
for _, err := range errs {
if err != nil {
fmt.Fprintln(os.Stderr, "rule importer error", err)
}
fmt.Fprintln(os.Stderr, "rule importer error:", err)
}
if len(errs) > 0 {
return errors.New("error importing rules")
}
return err
return nil
}

View file

@ -21,6 +21,8 @@ import (
"testing"
"time"
"github.com/prometheus/prometheus/pkg/labels"
"github.com/prometheus/prometheus/pkg/rulefmt"
"github.com/stretchr/testify/require"
)
@ -118,3 +120,46 @@ func TestCheckSDFile(t *testing.T) {
})
}
}
func TestCheckDuplicates(t *testing.T) {
cases := []struct {
name string
ruleFile string
expectedDups []compareRuleType
}{
{
name: "no duplicates",
ruleFile: "./testdata/rules.yml",
},
{
name: "duplicate in other group",
ruleFile: "./testdata/rules_duplicates.yml",
expectedDups: []compareRuleType{
{
metric: "job:test:count_over_time1m",
label: labels.New(),
},
},
},
}
for _, test := range cases {
c := test
t.Run(c.name, func(t *testing.T) {
rgs, err := rulefmt.ParseFile(c.ruleFile)
require.Empty(t, err)
dups := checkDuplicates(rgs.Groups)
require.Equal(t, c.expectedDups, dups)
})
}
}
func BenchmarkCheckDuplicates(b *testing.B) {
rgs, err := rulefmt.ParseFile("./testdata/rules_large.yml")
require.Empty(b, err)
b.ResetTimer()
for i := 0; i < b.N; i++ {
checkDuplicates(rgs.Groups)
}
}

View file

@ -57,7 +57,7 @@ type ruleImporterConfig struct {
// newRuleImporter creates a new rule importer that can be used to parse and evaluate recording rule files and create new series
// written to disk in blocks.
func newRuleImporter(logger log.Logger, config ruleImporterConfig, apiClient queryRangeAPI) *ruleImporter {
level.Info(logger).Log("backfiller", "new rule importer from start", config.start.Format(time.RFC822), " to end", config.end.Format(time.RFC822))
level.Info(logger).Log("backfiller", "new rule importer", "start", config.start.Format(time.RFC822), "end", config.end.Format(time.RFC822))
return &ruleImporter{
logger: logger,
config: config,
@ -81,10 +81,9 @@ func (importer *ruleImporter) importAll(ctx context.Context) (errs []error) {
for name, group := range importer.groups {
level.Info(importer.logger).Log("backfiller", "processing group", "name", name)
stimeWithAlignment := group.EvalTimestamp(importer.config.start.UnixNano())
for i, r := range group.Rules() {
level.Info(importer.logger).Log("backfiller", "processing rule", "id", i, "name", r.Name())
if err := importer.importRule(ctx, r.Query().String(), r.Name(), r.Labels(), stimeWithAlignment, importer.config.end, group); err != nil {
if err := importer.importRule(ctx, r.Query().String(), r.Name(), r.Labels(), importer.config.start, importer.config.end, group); err != nil {
errs = append(errs, err)
}
}
@ -103,11 +102,18 @@ func (importer *ruleImporter) importRule(ctx context.Context, ruleExpr, ruleName
currStart := max(startOfBlock/int64(time.Second/time.Millisecond), start.Unix())
startWithAlignment := grp.EvalTimestamp(time.Unix(currStart, 0).UTC().UnixNano())
for startWithAlignment.Unix() < currStart {
startWithAlignment = startWithAlignment.Add(grp.Interval())
}
end := time.Unix(min(endOfBlock/int64(time.Second/time.Millisecond), end.Unix()), 0).UTC()
if end.Before(startWithAlignment) {
break
}
val, warnings, err := importer.apiClient.QueryRange(ctx,
ruleExpr,
v1.Range{
Start: startWithAlignment,
End: time.Unix(min(endOfBlock/int64(time.Second/time.Millisecond), end.Unix()), 0).UTC(),
End: end,
Step: grp.Interval(),
},
)
@ -141,22 +147,16 @@ func (importer *ruleImporter) importRule(ctx context.Context, ruleExpr, ruleName
matrix = val.(model.Matrix)
for _, sample := range matrix {
currentLabels := make(labels.Labels, 0, len(sample.Metric)+len(ruleLabels)+1)
currentLabels = append(currentLabels, labels.Label{
Name: labels.MetricName,
Value: ruleName,
})
currentLabels = append(currentLabels, ruleLabels...)
lb := labels.NewBuilder(ruleLabels)
for name, value := range sample.Metric {
currentLabels = append(currentLabels, labels.Label{
Name: string(name),
Value: string(value),
})
lb.Set(string(name), string(value))
}
lb.Set(labels.MetricName, ruleName)
for _, value := range sample.Values {
if err := app.add(ctx, currentLabels, timestamp.FromTime(value.Timestamp.Time()), float64(value.Value)); err != nil {
if err := app.add(ctx, lb.Labels(), timestamp.FromTime(value.Timestamp.Time()), float64(value.Value)); err != nil {
return errors.Wrap(err, "add")
}
}

View file

@ -61,6 +61,7 @@ func TestBackfillRuleIntegration(t *testing.T) {
}{
{"no samples", 1, 0, 0, 0, []*model.SampleStream{}},
{"run importer once", 1, 8, 4, 4, []*model.SampleStream{{Metric: model.Metric{"name1": "val1"}, Values: []model.SamplePair{{Timestamp: testTime, Value: testValue}}}}},
{"run importer with dup name label", 1, 8, 4, 4, []*model.SampleStream{{Metric: model.Metric{"__name__": "val1", "name1": "val1"}, Values: []model.SamplePair{{Timestamp: testTime, Value: testValue}}}}},
{"one importer twice", 2, 8, 4, 8, []*model.SampleStream{{Metric: model.Metric{"name1": "val1"}, Values: []model.SamplePair{{Timestamp: testTime, Value: testValue}, {Timestamp: testTime2, Value: testValue2}}}}},
}
for _, tt := range testCases {
@ -194,7 +195,7 @@ func createMultiRuleTestFiles(path string) error {
- record: grp1_rule1
expr: grp1_rule1_expr
labels:
testlabel11: testlabelvalue11
testlabel11: testlabelvalue12
- name: group2
rules:
- record: grp2_rule1
@ -202,7 +203,7 @@ func createMultiRuleTestFiles(path string) error {
- record: grp2_rule2
expr: grp2_rule2_expr
labels:
testlabel11: testlabelvalue11
testlabel11: testlabelvalue13
`
return ioutil.WriteFile(path, []byte(recordingRules), 0777)
}

View file

@ -0,0 +1,24 @@
# This is a rules file with duplicate expressions
groups:
- name: base
rules:
- record: job:test:count_over_time1m
expr: sum without(instance) (count_over_time(test[1m]))
# A recording rule that doesn't depend on input series.
- record: fixed_data
expr: 1
# Subquery with default resolution test.
- record: suquery_interval_test
expr: count_over_time(up[5m:])
# Duplicating
- record: job:test:count_over_time1m
expr: sum without(instance) (count_over_time(test[1m]))
- name: duplicate
rules:
- record: job:test:count_over_time1m
expr: sum without(instance) (count_over_time(test[1m]))

40011
cmd/promtool/testdata/rules_large.yml vendored Normal file

File diff suppressed because it is too large Load diff

View file

@ -418,7 +418,7 @@ func openBlock(path, blockID string) (*tsdb.DBReadOnly, tsdb.BlockReader, error)
return db, block, nil
}
func analyzeBlock(path, blockID string, limit int) error {
func analyzeBlock(path, blockID string, limit int, runExtended bool) error {
db, block, err := openBlock(path, blockID)
if err != nil {
return err
@ -564,7 +564,11 @@ func analyzeBlock(path, blockID string, limit int) error {
fmt.Printf("\nHighest cardinality metric names:\n")
printInfo(postingInfos)
return analyzeCompaction(block, ir)
if runExtended {
return analyzeCompaction(block, ir)
}
return nil
}
func analyzeCompaction(block tsdb.BlockReader, indexr tsdb.IndexReader) (err error) {

View file

@ -29,6 +29,7 @@ import (
"github.com/pkg/errors"
"github.com/prometheus/common/config"
"github.com/prometheus/common/model"
"github.com/prometheus/common/sigv4"
yaml "gopkg.in/yaml.v2"
"github.com/prometheus/prometheus/discovery"
@ -666,7 +667,7 @@ type RemoteWriteConfig struct {
HTTPClientConfig config.HTTPClientConfig `yaml:",inline"`
QueueConfig QueueConfig `yaml:"queue_config,omitempty"`
MetadataConfig MetadataConfig `yaml:"metadata_config,omitempty"`
SigV4Config *SigV4Config `yaml:"sigv4,omitempty"`
SigV4Config *sigv4.SigV4Config `yaml:"sigv4,omitempty"`
}
// SetDirectory joins any relative file paths with dir.
@ -758,17 +759,6 @@ type MetadataConfig struct {
MaxSamplesPerSend int `yaml:"max_samples_per_send,omitempty"`
}
// SigV4Config is the configuration for signing remote write requests with
// AWS's SigV4 verification process. Empty values will be retrieved using the
// AWS default credentials chain.
type SigV4Config struct {
Region string `yaml:"region,omitempty"`
AccessKey string `yaml:"access_key,omitempty"`
SecretKey config.Secret `yaml:"secret_key,omitempty"`
Profile string `yaml:"profile,omitempty"`
RoleARN string `yaml:"role_arn,omitempty"`
}
// RemoteReadConfig is the configuration for reading from remote storage.
type RemoteReadConfig struct {
URL *config.URL `yaml:"url"`

View file

@ -11,6 +11,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
//go:build !windows
// +build !windows
package config

View file

@ -45,6 +45,7 @@ import (
"github.com/prometheus/prometheus/discovery/marathon"
"github.com/prometheus/prometheus/discovery/moby"
"github.com/prometheus/prometheus/discovery/openstack"
"github.com/prometheus/prometheus/discovery/puppetdb"
"github.com/prometheus/prometheus/discovery/scaleway"
"github.com/prometheus/prometheus/discovery/targetgroup"
"github.com/prometheus/prometheus/discovery/triton"
@ -790,6 +791,34 @@ var expectedConf = &Config{
}},
},
},
{
JobName: "service-puppetdb",
HonorTimestamps: true,
ScrapeInterval: model.Duration(15 * time.Second),
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
MetricsPath: DefaultScrapeConfig.MetricsPath,
Scheme: DefaultScrapeConfig.Scheme,
HTTPClientConfig: config.DefaultHTTPClientConfig,
ServiceDiscoveryConfigs: discovery.Configs{&puppetdb.SDConfig{
URL: "https://puppetserver/",
Query: "resources { type = \"Package\" and title = \"httpd\" }",
IncludeParameters: true,
Port: 80,
RefreshInterval: model.Duration(60 * time.Second),
HTTPClientConfig: config.HTTPClientConfig{
FollowRedirects: true,
TLSConfig: config.TLSConfig{
CAFile: "testdata/valid_ca_file",
CertFile: "testdata/valid_cert_file",
KeyFile: "testdata/valid_key_file",
},
},
},
},
},
{
JobName: "hetzner",
HonorTimestamps: true,
@ -1262,6 +1291,22 @@ var expectedErrors = []struct {
filename: "empty_static_config.bad.yml",
errMsg: "empty or null section in static_configs",
},
{
filename: "puppetdb_no_query.bad.yml",
errMsg: "query missing",
},
{
filename: "puppetdb_no_url.bad.yml",
errMsg: "URL is missing",
},
{
filename: "puppetdb_bad_url.bad.yml",
errMsg: "host is missing in URL",
},
{
filename: "puppetdb_no_scheme.bad.yml",
errMsg: "URL scheme must be 'http' or 'https'",
},
{
filename: "hetzner_role.bad.yml",
errMsg: "unknown role",
@ -1302,6 +1347,10 @@ var expectedErrors = []struct {
filename: "http_url_bad_scheme.bad.yml",
errMsg: "URL scheme must be 'http' or 'https'",
},
{
filename: "empty_scrape_config_action.bad.yml",
errMsg: "relabel action cannot be empty",
},
}
func TestBadConfigs(t *testing.T) {

View file

@ -307,6 +307,18 @@ scrape_configs:
cert_file: valid_cert_file
key_file: valid_key_file
- job_name: service-puppetdb
puppetdb_sd_configs:
- url: https://puppetserver/
query: 'resources { type = "Package" and title = "httpd" }'
include_parameters: true
port: 80
refresh_interval: 1m
tls_config:
ca_file: valid_ca_file
cert_file: valid_cert_file
key_file: valid_key_file
- job_name: hetzner
hetzner_sd_configs:
- role: hcloud

View file

@ -0,0 +1,4 @@
scrape_configs:
- job_name: prometheus
relabel_configs:
- action: null

View file

@ -0,0 +1,4 @@
scrape_configs:
- puppetdb_sd_configs:
- url: http://
query: 'resources { type = "Package" and title = "httpd" }'

View file

@ -0,0 +1,3 @@
scrape_configs:
- puppetdb_sd_configs:
- url: http://puppetserver/

View file

@ -0,0 +1,4 @@
scrape_configs:
- puppetdb_sd_configs:
- url: ftp://puppet
query: 'resources { type = "Package" and title = "httpd" }'

View file

@ -0,0 +1,3 @@
scrape_configs:
- puppetdb_sd_configs:
- query: 'resources { type = "Package" and title = "httpd" }'

View file

@ -194,24 +194,16 @@ func (d *EC2Discovery) ec2Client(ctx context.Context) (*ec2.EC2, error) {
return d.ec2, nil
}
func (d *EC2Discovery) azID(ctx context.Context, az string) (string, error) {
if azID, ok := d.azToAZID[az]; ok {
return azID, nil
}
func (d *EC2Discovery) refreshAZIDs(ctx context.Context) error {
azs, err := d.ec2.DescribeAvailabilityZonesWithContext(ctx, &ec2.DescribeAvailabilityZonesInput{})
if err != nil {
return "", errors.Wrap(err, "could not describe availability zones")
return err
}
d.azToAZID = make(map[string]string, len(azs.AvailabilityZones))
for _, az := range azs.AvailabilityZones {
d.azToAZID[*az.ZoneName] = *az.ZoneId
}
if azID, ok := d.azToAZID[az]; ok {
return azID, nil
}
return "", fmt.Errorf("no availability zone ID mapping found for %s", az)
return nil
}
func (d *EC2Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) {
@ -232,21 +224,23 @@ func (d *EC2Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error
})
}
input := &ec2.DescribeInstancesInput{Filters: filters}
// Only refresh the AZ ID map if we have never been able to build one.
// Prometheus requires a reload if AWS adds a new AZ to the region.
if d.azToAZID == nil {
if err := d.refreshAZIDs(ctx); err != nil {
level.Debug(d.logger).Log(
"msg", "Unable to describe availability zones",
"err", err)
}
}
input := &ec2.DescribeInstancesInput{Filters: filters}
if err := ec2Client.DescribeInstancesPagesWithContext(ctx, input, func(p *ec2.DescribeInstancesOutput, lastPage bool) bool {
for _, r := range p.Reservations {
for _, inst := range r.Instances {
if inst.PrivateIpAddress == nil {
continue
}
azID, err := d.azID(ctx, *inst.Placement.AvailabilityZone)
if err != nil {
level.Warn(d.logger).Log(
"msg", "Unable to determine availability zone ID",
"az", *inst.Placement.AvailabilityZone,
"err", err)
}
labels := model.LabelSet{
ec2LabelInstanceID: model.LabelValue(*inst.InstanceId),
@ -271,9 +265,14 @@ func (d *EC2Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error
labels[ec2LabelPublicIP] = model.LabelValue(*inst.PublicIpAddress)
labels[ec2LabelPublicDNS] = model.LabelValue(*inst.PublicDnsName)
}
labels[ec2LabelAMI] = model.LabelValue(*inst.ImageId)
labels[ec2LabelAZ] = model.LabelValue(*inst.Placement.AvailabilityZone)
azID, ok := d.azToAZID[*inst.Placement.AvailabilityZone]
if !ok && d.azToAZID != nil {
level.Debug(d.logger).Log(
"msg", "Availability zone ID not found",
"az", *inst.Placement.AvailabilityZone)
}
labels[ec2LabelAZID] = model.LabelValue(azID)
labels[ec2LabelInstanceState] = model.LabelValue(*inst.State.Name)
labels[ec2LabelInstanceType] = model.LabelValue(*inst.InstanceType)

View file

@ -464,7 +464,9 @@ func mapFromVM(vm compute.VirtualMachine) virtualMachine {
}
}
if vm.VirtualMachineProperties != nil && vm.VirtualMachineProperties.OsProfile != nil {
if vm.VirtualMachineProperties != nil &&
vm.VirtualMachineProperties.OsProfile != nil &&
vm.VirtualMachineProperties.OsProfile.ComputerName != nil {
computerName = *(vm.VirtualMachineProperties.OsProfile.ComputerName)
}

View file

@ -199,7 +199,7 @@ func NewDiscovery(conf *SDConfig, logger log.Logger) (*Discovery, error) {
logger = log.NewNopLogger()
}
wrapper, err := config.NewClientFromConfig(conf.HTTPClientConfig, "consul_sd", config.WithHTTP2Disabled(), config.WithIdleConnTimeout(2*watchTimeout))
wrapper, err := config.NewClientFromConfig(conf.HTTPClientConfig, "consul_sd", config.WithIdleConnTimeout(2*watchTimeout))
if err != nil {
return nil, err
}

View file

@ -108,7 +108,7 @@ func NewDiscovery(conf *SDConfig, logger log.Logger) (*Discovery, error) {
port: conf.Port,
}
rt, err := config.NewRoundTripperFromConfig(conf.HTTPClientConfig, "digitalocean_sd", config.WithHTTP2Disabled())
rt, err := config.NewRoundTripperFromConfig(conf.HTTPClientConfig, "digitalocean_sd")
if err != nil {
return nil, err
}

View file

@ -118,7 +118,7 @@ type Discovery struct {
// NewDiscovery creates a new Eureka discovery for the given role.
func NewDiscovery(conf *SDConfig, logger log.Logger) (*Discovery, error) {
rt, err := config.NewRoundTripperFromConfig(conf.HTTPClientConfig, "eureka_sd", config.WithHTTP2Disabled())
rt, err := config.NewRoundTripperFromConfig(conf.HTTPClientConfig, "eureka_sd")
if err != nil {
return nil, err
}

View file

@ -64,7 +64,7 @@ func newHcloudDiscovery(conf *SDConfig, logger log.Logger) (*hcloudDiscovery, er
port: conf.Port,
}
rt, err := config.NewRoundTripperFromConfig(conf.HTTPClientConfig, "hetzner_sd", config.WithHTTP2Disabled())
rt, err := config.NewRoundTripperFromConfig(conf.HTTPClientConfig, "hetzner_sd")
if err != nil {
return nil, err
}

View file

@ -59,7 +59,7 @@ func newRobotDiscovery(conf *SDConfig, logger log.Logger) (*robotDiscovery, erro
endpoint: conf.robotEndpoint,
}
rt, err := config.NewRoundTripperFromConfig(conf.HTTPClientConfig, "hetzner_sd", config.WithHTTP2Disabled())
rt, err := config.NewRoundTripperFromConfig(conf.HTTPClientConfig, "hetzner_sd")
if err != nil {
return nil, err
}

View file

@ -113,7 +113,7 @@ func NewDiscovery(conf *SDConfig, logger log.Logger) (*Discovery, error) {
logger = log.NewNopLogger()
}
client, err := config.NewClientFromConfig(conf.HTTPClientConfig, "http", config.WithHTTP2Disabled())
client, err := config.NewClientFromConfig(conf.HTTPClientConfig, "http")
if err != nil {
return nil, err
}

View file

@ -31,6 +31,7 @@ import (
_ "github.com/prometheus/prometheus/discovery/marathon" // register marathon
_ "github.com/prometheus/prometheus/discovery/moby" // register moby
_ "github.com/prometheus/prometheus/discovery/openstack" // register openstack
_ "github.com/prometheus/prometheus/discovery/puppetdb" // register puppetdb
_ "github.com/prometheus/prometheus/discovery/scaleway" // register scaleway
_ "github.com/prometheus/prometheus/discovery/triton" // register triton
_ "github.com/prometheus/prometheus/discovery/xds" // register xds

View file

@ -15,11 +15,13 @@ package kubernetes
import (
"context"
"strings"
"github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/pkg/errors"
"github.com/prometheus/common/model"
v1 "k8s.io/api/networking/v1"
"k8s.io/api/networking/v1beta1"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/util/workqueue"
@ -112,26 +114,24 @@ func (i *Ingress) process(ctx context.Context, ch chan<- []*targetgroup.Group) b
send(ctx, ch, &targetgroup.Group{Source: ingressSourceFromNamespaceAndName(namespace, name)})
return true
}
eps, err := convertToIngress(o)
if err != nil {
level.Error(i.logger).Log("msg", "converting to Ingress object failed", "err", err)
var ia ingressAdaptor
switch ingress := o.(type) {
case *v1.Ingress:
ia = newIngressAdaptorFromV1(ingress)
case *v1beta1.Ingress:
ia = newIngressAdaptorFromV1beta1(ingress)
default:
level.Error(i.logger).Log("msg", "converting to Ingress object failed", "err",
errors.Errorf("received unexpected object: %v", o))
return true
}
send(ctx, ch, i.buildIngress(eps))
send(ctx, ch, i.buildIngress(ia))
return true
}
func convertToIngress(o interface{}) (*v1beta1.Ingress, error) {
ingress, ok := o.(*v1beta1.Ingress)
if ok {
return ingress, nil
}
return nil, errors.Errorf("received unexpected object: %v", o)
}
func ingressSource(s *v1beta1.Ingress) string {
return ingressSourceFromNamespaceAndName(s.Namespace, s.Name)
func ingressSource(s ingressAdaptor) string {
return ingressSourceFromNamespaceAndName(s.namespace(), s.name())
}
func ingressSourceFromNamespaceAndName(namespace, name string) string {
@ -150,22 +150,22 @@ const (
ingressClassNameLabel = metaLabelPrefix + "ingress_class_name"
)
func ingressLabels(ingress *v1beta1.Ingress) model.LabelSet {
func ingressLabels(ingress ingressAdaptor) model.LabelSet {
// Each label and annotation will create two key-value pairs in the map.
ls := make(model.LabelSet, 2*(len(ingress.Labels)+len(ingress.Annotations))+2)
ls[ingressNameLabel] = lv(ingress.Name)
ls[namespaceLabel] = lv(ingress.Namespace)
if ingress.Spec.IngressClassName != nil {
ls[ingressClassNameLabel] = lv(*ingress.Spec.IngressClassName)
ls := make(model.LabelSet, 2*(len(ingress.labels())+len(ingress.annotations()))+2)
ls[ingressNameLabel] = lv(ingress.name())
ls[namespaceLabel] = lv(ingress.namespace())
if cls := ingress.ingressClassName(); cls != nil {
ls[ingressClassNameLabel] = lv(*cls)
}
for k, v := range ingress.Labels {
for k, v := range ingress.labels() {
ln := strutil.SanitizeLabelName(k)
ls[model.LabelName(ingressLabelPrefix+ln)] = lv(v)
ls[model.LabelName(ingressLabelPresentPrefix+ln)] = presentValue
}
for k, v := range ingress.Annotations {
for k, v := range ingress.annotations() {
ln := strutil.SanitizeLabelName(k)
ls[model.LabelName(ingressAnnotationPrefix+ln)] = lv(v)
ls[model.LabelName(ingressAnnotationPresentPrefix+ln)] = presentValue
@ -173,14 +173,14 @@ func ingressLabels(ingress *v1beta1.Ingress) model.LabelSet {
return ls
}
func pathsFromIngressRule(rv *v1beta1.IngressRuleValue) []string {
if rv.HTTP == nil {
func pathsFromIngressPaths(ingressPaths []string) []string {
if ingressPaths == nil {
return []string{"/"}
}
paths := make([]string, len(rv.HTTP.Paths))
for n, p := range rv.HTTP.Paths {
path := p.Path
if path == "" {
paths := make([]string, len(ingressPaths))
for n, p := range ingressPaths {
path := p
if p == "" {
path = "/"
}
paths[n] = path
@ -188,33 +188,29 @@ func pathsFromIngressRule(rv *v1beta1.IngressRuleValue) []string {
return paths
}
func (i *Ingress) buildIngress(ingress *v1beta1.Ingress) *targetgroup.Group {
func (i *Ingress) buildIngress(ingress ingressAdaptor) *targetgroup.Group {
tg := &targetgroup.Group{
Source: ingressSource(ingress),
}
tg.Labels = ingressLabels(ingress)
tlsHosts := make(map[string]struct{})
for _, tls := range ingress.Spec.TLS {
for _, host := range tls.Hosts {
tlsHosts[host] = struct{}{}
}
}
for _, rule := range ingress.Spec.Rules {
paths := pathsFromIngressRule(&rule.IngressRuleValue)
for _, rule := range ingress.rules() {
scheme := "http"
_, isTLS := tlsHosts[rule.Host]
if isTLS {
scheme = "https"
paths := pathsFromIngressPaths(rule.paths())
out:
for _, pattern := range ingress.tlsHosts() {
if matchesHostnamePattern(pattern, rule.host()) {
scheme = "https"
break out
}
}
for _, path := range paths {
tg.Targets = append(tg.Targets, model.LabelSet{
model.AddressLabel: lv(rule.Host),
model.AddressLabel: lv(rule.host()),
ingressSchemeLabel: lv(scheme),
ingressHostLabel: lv(rule.Host),
ingressHostLabel: lv(rule.host()),
ingressPathLabel: lv(path),
})
}
@ -222,3 +218,33 @@ func (i *Ingress) buildIngress(ingress *v1beta1.Ingress) *targetgroup.Group {
return tg
}
// matchesHostnamePattern returns true if the host matches a wildcard DNS
// pattern or pattern and host are equal.
func matchesHostnamePattern(pattern, host string) bool {
if pattern == host {
return true
}
patternParts := strings.Split(pattern, ".")
hostParts := strings.Split(host, ".")
// If the first element of the pattern is not a wildcard, give up.
if len(patternParts) == 0 || patternParts[0] != "*" {
return false
}
// A wildcard match require the pattern to have the same length as the host
// path.
if len(patternParts) != len(hostParts) {
return false
}
for i := 1; i < len(patternParts); i++ {
if patternParts[i] != hostParts[i] {
return false
}
}
return true
}

View file

@ -0,0 +1,141 @@
// Copyright 2016 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package kubernetes
import (
v1 "k8s.io/api/networking/v1"
"k8s.io/api/networking/v1beta1"
)
// ingressAdaptor is an adaptor for the different Ingress versions
type ingressAdaptor interface {
name() string
namespace() string
labels() map[string]string
annotations() map[string]string
tlsHosts() []string
ingressClassName() *string
rules() []ingressRuleAdaptor
}
type ingressRuleAdaptor interface {
paths() []string
host() string
}
// Adaptor for networking.k8s.io/v1
type ingressAdaptorV1 struct {
ingress *v1.Ingress
}
func newIngressAdaptorFromV1(ingress *v1.Ingress) ingressAdaptor {
return &ingressAdaptorV1{ingress: ingress}
}
func (i *ingressAdaptorV1) name() string { return i.ingress.Name }
func (i *ingressAdaptorV1) namespace() string { return i.ingress.Namespace }
func (i *ingressAdaptorV1) labels() map[string]string { return i.ingress.Labels }
func (i *ingressAdaptorV1) annotations() map[string]string { return i.ingress.Annotations }
func (i *ingressAdaptorV1) ingressClassName() *string { return i.ingress.Spec.IngressClassName }
func (i *ingressAdaptorV1) tlsHosts() []string {
var hosts []string
for _, tls := range i.ingress.Spec.TLS {
hosts = append(hosts, tls.Hosts...)
}
return hosts
}
func (i *ingressAdaptorV1) rules() []ingressRuleAdaptor {
var rules []ingressRuleAdaptor
for _, rule := range i.ingress.Spec.Rules {
rules = append(rules, newIngressRuleAdaptorFromV1(rule))
}
return rules
}
type ingressRuleAdaptorV1 struct {
rule v1.IngressRule
}
func newIngressRuleAdaptorFromV1(rule v1.IngressRule) ingressRuleAdaptor {
return &ingressRuleAdaptorV1{rule: rule}
}
func (i *ingressRuleAdaptorV1) paths() []string {
rv := i.rule.IngressRuleValue
if rv.HTTP == nil {
return nil
}
paths := make([]string, len(rv.HTTP.Paths))
for n, p := range rv.HTTP.Paths {
paths[n] = p.Path
}
return paths
}
func (i *ingressRuleAdaptorV1) host() string { return i.rule.Host }
// Adaptor for networking.k8s.io/v1beta1
type ingressAdaptorV1Beta1 struct {
ingress *v1beta1.Ingress
}
func newIngressAdaptorFromV1beta1(ingress *v1beta1.Ingress) ingressAdaptor {
return &ingressAdaptorV1Beta1{ingress: ingress}
}
func (i *ingressAdaptorV1Beta1) name() string { return i.ingress.Name }
func (i *ingressAdaptorV1Beta1) namespace() string { return i.ingress.Namespace }
func (i *ingressAdaptorV1Beta1) labels() map[string]string { return i.ingress.Labels }
func (i *ingressAdaptorV1Beta1) annotations() map[string]string { return i.ingress.Annotations }
func (i *ingressAdaptorV1Beta1) ingressClassName() *string { return i.ingress.Spec.IngressClassName }
func (i *ingressAdaptorV1Beta1) tlsHosts() []string {
var hosts []string
for _, tls := range i.ingress.Spec.TLS {
hosts = append(hosts, tls.Hosts...)
}
return hosts
}
func (i *ingressAdaptorV1Beta1) rules() []ingressRuleAdaptor {
var rules []ingressRuleAdaptor
for _, rule := range i.ingress.Spec.Rules {
rules = append(rules, newIngressRuleAdaptorFromV1Beta1(rule))
}
return rules
}
type ingressRuleAdaptorV1Beta1 struct {
rule v1beta1.IngressRule
}
func newIngressRuleAdaptorFromV1Beta1(rule v1beta1.IngressRule) ingressRuleAdaptor {
return &ingressRuleAdaptorV1Beta1{rule: rule}
}
func (i *ingressRuleAdaptorV1Beta1) paths() []string {
rv := i.rule.IngressRuleValue
if rv.HTTP == nil {
return nil
}
paths := make([]string, len(rv.HTTP.Paths))
for n, p := range rv.HTTP.Paths {
paths[n] = p.Path
}
return paths
}
func (i *ingressRuleAdaptorV1Beta1) host() string { return i.rule.Host }

View file

@ -19,6 +19,7 @@ import (
"testing"
"github.com/prometheus/common/model"
v1 "k8s.io/api/networking/v1"
"k8s.io/api/networking/v1beta1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@ -31,9 +32,64 @@ const (
TLSNo TLSMode = iota
TLSYes
TLSMixed
TLSWildcard
)
func makeIngress(tls TLSMode) *v1beta1.Ingress {
func makeIngress(tls TLSMode) *v1.Ingress {
ret := &v1.Ingress{
ObjectMeta: metav1.ObjectMeta{
Name: "testingress",
Namespace: "default",
Labels: map[string]string{"test/label": "testvalue"},
Annotations: map[string]string{"test/annotation": "testannotationvalue"},
},
Spec: v1.IngressSpec{
IngressClassName: classString("testclass"),
TLS: nil,
Rules: []v1.IngressRule{
{
Host: "example.com",
IngressRuleValue: v1.IngressRuleValue{
HTTP: &v1.HTTPIngressRuleValue{
Paths: []v1.HTTPIngressPath{
{Path: "/"},
{Path: "/foo"},
},
},
},
},
{
// No backend config, ignored
Host: "nobackend.example.com",
IngressRuleValue: v1.IngressRuleValue{
HTTP: &v1.HTTPIngressRuleValue{},
},
},
{
Host: "test.example.com",
IngressRuleValue: v1.IngressRuleValue{
HTTP: &v1.HTTPIngressRuleValue{
Paths: []v1.HTTPIngressPath{{}},
},
},
},
},
},
}
switch tls {
case TLSYes:
ret.Spec.TLS = []v1.IngressTLS{{Hosts: []string{"example.com", "test.example.com"}}}
case TLSMixed:
ret.Spec.TLS = []v1.IngressTLS{{Hosts: []string{"example.com"}}}
case TLSWildcard:
ret.Spec.TLS = []v1.IngressTLS{{Hosts: []string{"*.example.com"}}}
}
return ret
}
func makeIngressV1beta1(tls TLSMode) *v1beta1.Ingress {
ret := &v1beta1.Ingress{
ObjectMeta: metav1.ObjectMeta{
Name: "testingress",
@ -80,6 +136,8 @@ func makeIngress(tls TLSMode) *v1beta1.Ingress {
ret.Spec.TLS = []v1beta1.IngressTLS{{Hosts: []string{"example.com", "test.example.com"}}}
case TLSMixed:
ret.Spec.TLS = []v1beta1.IngressTLS{{Hosts: []string{"example.com"}}}
case TLSWildcard:
ret.Spec.TLS = []v1beta1.IngressTLS{{Hosts: []string{"*.example.com"}}}
}
return ret
@ -99,6 +157,8 @@ func expectedTargetGroups(ns string, tls TLSMode) map[string]*targetgroup.Group
scheme2 = "https"
case TLSMixed:
scheme1 = "https"
case TLSWildcard:
scheme2 = "https"
}
key := fmt.Sprintf("ingress/%s/testingress", ns)
@ -145,6 +205,20 @@ func TestIngressDiscoveryAdd(t *testing.T) {
discovery: n,
afterStart: func() {
obj := makeIngress(TLSNo)
c.NetworkingV1().Ingresses("default").Create(context.Background(), obj, metav1.CreateOptions{})
},
expectedMaxItems: 1,
expectedRes: expectedTargetGroups("default", TLSNo),
}.Run(t)
}
func TestIngressDiscoveryAddV1beta1(t *testing.T) {
n, c := makeDiscoveryWithVersion(RoleIngress, NamespaceDiscovery{Names: []string{"default"}}, "v1.18.0")
k8sDiscoveryTest{
discovery: n,
afterStart: func() {
obj := makeIngressV1beta1(TLSNo)
c.NetworkingV1beta1().Ingresses("default").Create(context.Background(), obj, metav1.CreateOptions{})
},
expectedMaxItems: 1,
@ -159,6 +233,20 @@ func TestIngressDiscoveryAddTLS(t *testing.T) {
discovery: n,
afterStart: func() {
obj := makeIngress(TLSYes)
c.NetworkingV1().Ingresses("default").Create(context.Background(), obj, metav1.CreateOptions{})
},
expectedMaxItems: 1,
expectedRes: expectedTargetGroups("default", TLSYes),
}.Run(t)
}
func TestIngressDiscoveryAddTLSV1beta1(t *testing.T) {
n, c := makeDiscoveryWithVersion(RoleIngress, NamespaceDiscovery{Names: []string{"default"}}, "v1.18.0")
k8sDiscoveryTest{
discovery: n,
afterStart: func() {
obj := makeIngressV1beta1(TLSYes)
c.NetworkingV1beta1().Ingresses("default").Create(context.Background(), obj, metav1.CreateOptions{})
},
expectedMaxItems: 1,
@ -173,6 +261,20 @@ func TestIngressDiscoveryAddMixed(t *testing.T) {
discovery: n,
afterStart: func() {
obj := makeIngress(TLSMixed)
c.NetworkingV1().Ingresses("default").Create(context.Background(), obj, metav1.CreateOptions{})
},
expectedMaxItems: 1,
expectedRes: expectedTargetGroups("default", TLSMixed),
}.Run(t)
}
func TestIngressDiscoveryAddMixedV1beta1(t *testing.T) {
n, c := makeDiscoveryWithVersion(RoleIngress, NamespaceDiscovery{Names: []string{"default"}}, "v1.18.0")
k8sDiscoveryTest{
discovery: n,
afterStart: func() {
obj := makeIngressV1beta1(TLSMixed)
c.NetworkingV1beta1().Ingresses("default").Create(context.Background(), obj, metav1.CreateOptions{})
},
expectedMaxItems: 1,
@ -193,6 +295,27 @@ func TestIngressDiscoveryNamespaces(t *testing.T) {
for _, ns := range []string{"ns1", "ns2"} {
obj := makeIngress(TLSNo)
obj.Namespace = ns
c.NetworkingV1().Ingresses(obj.Namespace).Create(context.Background(), obj, metav1.CreateOptions{})
}
},
expectedMaxItems: 2,
expectedRes: expected,
}.Run(t)
}
func TestIngressDiscoveryNamespacesV1beta1(t *testing.T) {
n, c := makeDiscoveryWithVersion(RoleIngress, NamespaceDiscovery{Names: []string{"ns1", "ns2"}}, "v1.18.0")
expected := expectedTargetGroups("ns1", TLSNo)
for k, v := range expectedTargetGroups("ns2", TLSNo) {
expected[k] = v
}
k8sDiscoveryTest{
discovery: n,
afterStart: func() {
for _, ns := range []string{"ns1", "ns2"} {
obj := makeIngressV1beta1(TLSNo)
obj.Namespace = ns
c.NetworkingV1beta1().Ingresses(obj.Namespace).Create(context.Background(), obj, metav1.CreateOptions{})
}
},

View file

@ -30,11 +30,13 @@ import (
"github.com/prometheus/common/version"
apiv1 "k8s.io/api/core/v1"
disv1beta1 "k8s.io/api/discovery/v1beta1"
networkv1 "k8s.io/api/networking/v1"
"k8s.io/api/networking/v1beta1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
utilversion "k8s.io/apimachinery/pkg/util/version"
"k8s.io/apimachinery/pkg/watch"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
@ -281,7 +283,7 @@ func New(l log.Logger, conf *SDConfig) (*Discovery, error) {
}
level.Info(l).Log("msg", "Using pod service account via in-cluster config")
} else {
rt, err := config.NewRoundTripperFromConfig(conf.HTTPClientConfig, "kubernetes_sd", config.WithHTTP2Disabled())
rt, err := config.NewRoundTripperFromConfig(conf.HTTPClientConfig, "kubernetes_sd")
if err != nil {
return nil, err
}
@ -491,23 +493,58 @@ func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) {
go svc.informer.Run(ctx.Done())
}
case RoleIngress:
// Check "networking.k8s.io/v1" availability with retries.
// If "v1" is not avaiable, use "networking.k8s.io/v1beta1" for backward compatibility
var v1Supported bool
if retryOnError(ctx, 10*time.Second,
func() (err error) {
v1Supported, err = checkNetworkingV1Supported(d.client)
if err != nil {
level.Error(d.logger).Log("msg", "Failed to check networking.k8s.io/v1 availability", "err", err)
}
return err
},
) {
d.Unlock()
return
}
for _, namespace := range namespaces {
i := d.client.NetworkingV1beta1().Ingresses(namespace)
ilw := &cache.ListWatch{
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
options.FieldSelector = d.selectors.ingress.field
options.LabelSelector = d.selectors.ingress.label
return i.List(ctx, options)
},
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
options.FieldSelector = d.selectors.ingress.field
options.LabelSelector = d.selectors.ingress.label
return i.Watch(ctx, options)
},
var informer cache.SharedInformer
if v1Supported {
i := d.client.NetworkingV1().Ingresses(namespace)
ilw := &cache.ListWatch{
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
options.FieldSelector = d.selectors.ingress.field
options.LabelSelector = d.selectors.ingress.label
return i.List(ctx, options)
},
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
options.FieldSelector = d.selectors.ingress.field
options.LabelSelector = d.selectors.ingress.label
return i.Watch(ctx, options)
},
}
informer = cache.NewSharedInformer(ilw, &networkv1.Ingress{}, resyncPeriod)
} else {
i := d.client.NetworkingV1beta1().Ingresses(namespace)
ilw := &cache.ListWatch{
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
options.FieldSelector = d.selectors.ingress.field
options.LabelSelector = d.selectors.ingress.label
return i.List(ctx, options)
},
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
options.FieldSelector = d.selectors.ingress.field
options.LabelSelector = d.selectors.ingress.label
return i.Watch(ctx, options)
},
}
informer = cache.NewSharedInformer(ilw, &v1beta1.Ingress{}, resyncPeriod)
}
ingress := NewIngress(
log.With(d.logger, "role", "ingress"),
cache.NewSharedInformer(ilw, &v1beta1.Ingress{}, resyncPeriod),
informer,
)
d.discoverers = append(d.discoverers, ingress)
go ingress.informer.Run(ctx.Done())
@ -563,3 +600,33 @@ func send(ctx context.Context, ch chan<- []*targetgroup.Group, tg *targetgroup.G
case ch <- []*targetgroup.Group{tg}:
}
}
func retryOnError(ctx context.Context, interval time.Duration, f func() error) (canceled bool) {
var err error
err = f()
for {
if err == nil {
return false
}
select {
case <-ctx.Done():
return true
case <-time.After(interval):
err = f()
}
}
}
func checkNetworkingV1Supported(client kubernetes.Interface) (bool, error) {
k8sVer, err := client.Discovery().ServerVersion()
if err != nil {
return false, err
}
semVer, err := utilversion.ParseSemantic(k8sVer.String())
if err != nil {
return false, err
}
// networking.k8s.io/v1 is available since Kubernetes v1.19
// https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.19.md
return semVer.Major() >= 1 && semVer.Minor() >= 19, nil
}

View file

@ -20,8 +20,11 @@ import (
"time"
"github.com/go-kit/log"
"github.com/pkg/errors"
"github.com/stretchr/testify/require"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/version"
fakediscovery "k8s.io/client-go/discovery/fake"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/kubernetes/fake"
"k8s.io/client-go/tools/cache"
@ -37,7 +40,14 @@ func TestMain(m *testing.M) {
// makeDiscovery creates a kubernetes.Discovery instance for testing.
func makeDiscovery(role Role, nsDiscovery NamespaceDiscovery, objects ...runtime.Object) (*Discovery, kubernetes.Interface) {
return makeDiscoveryWithVersion(role, nsDiscovery, "v1.22.0", objects...)
}
// makeDiscoveryWithVersion creates a kubernetes.Discovery instance with the specified kubernetes version for testing.
func makeDiscoveryWithVersion(role Role, nsDiscovery NamespaceDiscovery, k8sVer string, objects ...runtime.Object) (*Discovery, kubernetes.Interface) {
clientset := fake.NewSimpleClientset(objects...)
fakeDiscovery, _ := clientset.Discovery().(*fakediscovery.FakeDiscovery)
fakeDiscovery.FakedServerVersion = &version.Info{GitVersion: k8sVer}
return &Discovery{
client: clientset,
@ -205,3 +215,52 @@ func (p *Pod) hasSynced() bool {
func (s *Service) hasSynced() bool {
return s.informer.HasSynced()
}
func TestRetryOnError(t *testing.T) {
for _, successAt := range []int{1, 2, 3} {
var called int
f := func() error {
called++
if called >= successAt {
return nil
}
return errors.New("dummy")
}
retryOnError(context.TODO(), 0, f)
require.Equal(t, successAt, called)
}
}
func TestCheckNetworkingV1Supported(t *testing.T) {
tests := []struct {
version string
wantSupported bool
wantErr bool
}{
{version: "v1.18.0", wantSupported: false, wantErr: false},
{version: "v1.18.1", wantSupported: false, wantErr: false},
// networking v1 is supported since Kubernetes v1.19
{version: "v1.19.0", wantSupported: true, wantErr: false},
{version: "v1.20.0-beta.2", wantSupported: true, wantErr: false},
// error patterns
{version: "", wantSupported: false, wantErr: true},
{version: "<>", wantSupported: false, wantErr: true},
}
for _, tc := range tests {
tc := tc
t.Run(tc.version, func(t *testing.T) {
clientset := fake.NewSimpleClientset()
fakeDiscovery, _ := clientset.Discovery().(*fakediscovery.FakeDiscovery)
fakeDiscovery.FakedServerVersion = &version.Info{GitVersion: tc.version}
supported, err := checkNetworkingV1Supported(clientset)
if tc.wantErr {
require.Error(t, err)
} else {
require.NoError(t, err)
}
require.Equal(t, tc.wantSupported, supported)
})
}
}

View file

@ -132,7 +132,7 @@ func NewDiscovery(conf *SDConfig, logger log.Logger) (*Discovery, error) {
eventPollingEnabled: true,
}
rt, err := config.NewRoundTripperFromConfig(conf.HTTPClientConfig, "linode_sd", config.WithHTTP2Disabled())
rt, err := config.NewRoundTripperFromConfig(conf.HTTPClientConfig, "linode_sd")
if err != nil {
return nil, err
}

View file

@ -131,7 +131,7 @@ type Discovery struct {
// NewDiscovery returns a new Marathon Discovery.
func NewDiscovery(conf SDConfig, logger log.Logger) (*Discovery, error) {
rt, err := config.NewRoundTripperFromConfig(conf.HTTPClientConfig, "marathon_sd", config.WithHTTP2Disabled())
rt, err := config.NewRoundTripperFromConfig(conf.HTTPClientConfig, "marathon_sd")
if err != nil {
return nil, err
}

View file

@ -142,7 +142,7 @@ func NewDockerDiscovery(conf *DockerSDConfig, logger log.Logger) (*DockerDiscove
// unix, which are not supported by the HTTP client. Passing HTTP client
// options to the Docker client makes those non-HTTP requests fail.
if hostURL.Scheme == "http" || hostURL.Scheme == "https" {
rt, err := config.NewRoundTripperFromConfig(conf.HTTPClientConfig, "docker_sd", config.WithHTTP2Disabled())
rt, err := config.NewRoundTripperFromConfig(conf.HTTPClientConfig, "docker_sd")
if err != nil {
return nil, err
}

View file

@ -146,7 +146,7 @@ func NewDiscovery(conf *DockerSwarmSDConfig, logger log.Logger) (*Discovery, err
// unix, which are not supported by the HTTP client. Passing HTTP client
// options to the Docker client makes those non-HTTP requests fail.
if hostURL.Scheme == "http" || hostURL.Scheme == "https" {
rt, err := config.NewRoundTripperFromConfig(conf.HTTPClientConfig, "dockerswarm_sd", config.WithHTTP2Disabled())
rt, err := config.NewRoundTripperFromConfig(conf.HTTPClientConfig, "dockerswarm_sd")
if err != nil {
return nil, err
}

View file

@ -0,0 +1,49 @@
[
{
"certname": "edinburgh.example.com",
"environment": "prod",
"exported": false,
"file": "/etc/puppetlabs/code/environments/prod/modules/upstream/apache/manifests/init.pp",
"line": 384,
"parameters": {
"access_log": true,
"access_log_file": "ssl_access_log",
"additional_includes": [ ],
"directoryindex": "",
"docroot": "/var/www/html",
"ensure": "absent",
"options": [
"Indexes",
"FollowSymLinks",
"MultiViews"
],
"php_flags": { },
"labels": {
"alias": "edinburgh"
},
"scriptaliases": [
{
"alias": "/cgi-bin",
"path": "/var/www/cgi-bin"
}
]
},
"resource": "49af83866dc5a1518968b68e58a25319107afe11",
"tags": [
"roles::hypervisor",
"apache",
"apache::vhost",
"class",
"default-ssl",
"profile_hypervisor",
"vhost",
"profile_apache",
"hypervisor",
"__node_regexp__edinburgh",
"roles",
"node"
],
"title": "default-ssl",
"type": "Apache::Vhost"
}
]

View file

@ -0,0 +1,252 @@
// Copyright 2021 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package puppetdb
import (
"bytes"
"context"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"net"
"net/http"
"net/url"
"path"
"regexp"
"strconv"
"strings"
"time"
"github.com/go-kit/log"
"github.com/pkg/errors"
"github.com/prometheus/common/config"
"github.com/prometheus/common/model"
"github.com/prometheus/common/version"
"github.com/prometheus/prometheus/discovery"
"github.com/prometheus/prometheus/discovery/refresh"
"github.com/prometheus/prometheus/discovery/targetgroup"
)
const (
pdbLabel = model.MetaLabelPrefix + "puppetdb_"
pdbLabelCertname = pdbLabel + "certname"
pdbLabelResource = pdbLabel + "resource"
pdbLabelType = pdbLabel + "type"
pdbLabelTitle = pdbLabel + "title"
pdbLabelExported = pdbLabel + "exported"
pdbLabelTags = pdbLabel + "tags"
pdbLabelFile = pdbLabel + "file"
pdbLabelEnvironment = pdbLabel + "environment"
pdbLabelParameter = pdbLabel + "parameter_"
separator = ","
)
var (
// DefaultSDConfig is the default PuppetDB SD configuration.
DefaultSDConfig = SDConfig{
RefreshInterval: model.Duration(60 * time.Second),
Port: 80,
HTTPClientConfig: config.DefaultHTTPClientConfig,
}
matchContentType = regexp.MustCompile(`^(?i:application\/json(;\s*charset=("utf-8"|utf-8))?)$`)
userAgent = fmt.Sprintf("Prometheus/%s", version.Version)
)
func init() {
discovery.RegisterConfig(&SDConfig{})
}
// SDConfig is the configuration for PuppetDB based discovery.
type SDConfig struct {
HTTPClientConfig config.HTTPClientConfig `yaml:",inline"`
RefreshInterval model.Duration `yaml:"refresh_interval,omitempty"`
URL string `yaml:"url"`
Query string `yaml:"query"`
IncludeParameters bool `yaml:"include_parameters"`
Port int `yaml:"port"`
}
// Name returns the name of the Config.
func (*SDConfig) Name() string { return "puppetdb" }
// NewDiscoverer returns a Discoverer for the Config.
func (c *SDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (discovery.Discoverer, error) {
return NewDiscovery(c, opts.Logger)
}
// SetDirectory joins any relative file paths with dir.
func (c *SDConfig) SetDirectory(dir string) {
c.HTTPClientConfig.SetDirectory(dir)
}
// UnmarshalYAML implements the yaml.Unmarshaler interface.
func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
*c = DefaultSDConfig
type plain SDConfig
err := unmarshal((*plain)(c))
if err != nil {
return err
}
if c.URL == "" {
return fmt.Errorf("URL is missing")
}
parsedURL, err := url.Parse(c.URL)
if err != nil {
return err
}
if parsedURL.Scheme != "http" && parsedURL.Scheme != "https" {
return fmt.Errorf("URL scheme must be 'http' or 'https'")
}
if parsedURL.Host == "" {
return fmt.Errorf("host is missing in URL")
}
if c.Query == "" {
return fmt.Errorf("query missing")
}
return nil
}
// Discovery provides service discovery functionality based
// on PuppetDB resources.
type Discovery struct {
*refresh.Discovery
url string
query string
port int
includeParameters bool
client *http.Client
}
// NewDiscovery returns a new PuppetDB discovery for the given config.
func NewDiscovery(conf *SDConfig, logger log.Logger) (*Discovery, error) {
if logger == nil {
logger = log.NewNopLogger()
}
client, err := config.NewClientFromConfig(conf.HTTPClientConfig, "http")
if err != nil {
return nil, err
}
client.Timeout = time.Duration(conf.RefreshInterval)
u, err := url.Parse(conf.URL)
if err != nil {
return nil, err
}
u.Path = path.Join(u.Path, "pdb/query/v4")
d := &Discovery{
url: u.String(),
port: conf.Port,
query: conf.Query,
includeParameters: conf.IncludeParameters,
client: client,
}
d.Discovery = refresh.NewDiscovery(
logger,
"http",
time.Duration(conf.RefreshInterval),
d.refresh,
)
return d, nil
}
func (d *Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) {
body := struct {
Query string `json:"query"`
}{d.query}
bodyBytes, err := json.Marshal(body)
if err != nil {
return nil, err
}
req, err := http.NewRequest("POST", d.url, bytes.NewBuffer(bodyBytes))
if err != nil {
return nil, err
}
req.Header.Set("User-Agent", userAgent)
req.Header.Set("Accept", "application/json")
req.Header.Set("Content-Type", "application/json")
resp, err := d.client.Do(req.WithContext(ctx))
if err != nil {
return nil, err
}
defer func() {
io.Copy(ioutil.Discard, resp.Body)
resp.Body.Close()
}()
if resp.StatusCode != http.StatusOK {
return nil, errors.Errorf("server returned HTTP status %s", resp.Status)
}
if ct := resp.Header.Get("Content-Type"); !matchContentType.MatchString(ct) {
return nil, errors.Errorf("unsupported content type %s", resp.Header.Get("Content-Type"))
}
b, err := ioutil.ReadAll(resp.Body)
if err != nil {
return nil, err
}
var resources []Resource
if err := json.Unmarshal(b, &resources); err != nil {
return nil, err
}
tg := &targetgroup.Group{
// Use a pseudo-URL as source.
Source: d.url + "?query=" + d.query,
}
for _, resource := range resources {
labels := model.LabelSet{
pdbLabelCertname: model.LabelValue(resource.Certname),
pdbLabelResource: model.LabelValue(resource.Resource),
pdbLabelType: model.LabelValue(resource.Type),
pdbLabelTitle: model.LabelValue(resource.Title),
pdbLabelExported: model.LabelValue(fmt.Sprintf("%t", resource.Exported)),
pdbLabelFile: model.LabelValue(resource.File),
pdbLabelEnvironment: model.LabelValue(resource.Environment),
}
addr := net.JoinHostPort(resource.Certname, strconv.FormatUint(uint64(d.port), 10))
labels[model.AddressLabel] = model.LabelValue(addr)
if len(resource.Tags) > 0 {
// We surround the separated list with the separator as well. This way regular expressions
// in relabeling rules don't have to consider tag positions.
tags := separator + strings.Join(resource.Tags, separator) + separator
labels[pdbLabelTags] = model.LabelValue(tags)
}
// Parameters are not included by default. This should only be enabled
// on select resources as it might expose secrets on the Prometheus UI
// for certain resources.
if d.includeParameters {
for k, v := range resource.Parameters.toLabels() {
labels[pdbLabelParameter+k] = v
}
}
tg.Targets = append(tg.Targets, labels)
}
return []*targetgroup.Group{tg}, nil
}

View file

@ -0,0 +1,195 @@
// Copyright 2021 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package puppetdb
import (
"context"
"encoding/json"
"fmt"
"net/http"
"net/http/httptest"
"testing"
"time"
"github.com/go-kit/log"
"github.com/prometheus/common/config"
"github.com/prometheus/common/model"
"github.com/prometheus/prometheus/discovery/targetgroup"
"github.com/stretchr/testify/require"
)
func mockServer(t *testing.T) *httptest.Server {
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
var request struct {
Query string `json:"query"`
}
err := json.NewDecoder(r.Body).Decode(&request)
if err != nil {
w.WriteHeader(http.StatusBadRequest)
return
}
http.ServeFile(w, r, "fixtures/"+request.Query+".json")
}))
t.Cleanup(ts.Close)
return ts
}
func TestPuppetSlashInURL(t *testing.T) {
tests := map[string]string{
"https://puppetserver": "https://puppetserver/pdb/query/v4",
"https://puppetserver/": "https://puppetserver/pdb/query/v4",
"http://puppetserver:8080/": "http://puppetserver:8080/pdb/query/v4",
"http://puppetserver:8080": "http://puppetserver:8080/pdb/query/v4",
}
for serverURL, apiURL := range tests {
cfg := SDConfig{
HTTPClientConfig: config.DefaultHTTPClientConfig,
URL: serverURL,
Query: "vhosts", // This is not a valid PuppetDB query, but it is used by the mock.
Port: 80,
RefreshInterval: model.Duration(30 * time.Second),
}
d, err := NewDiscovery(&cfg, log.NewNopLogger())
require.NoError(t, err)
require.Equal(t, apiURL, d.url)
}
}
func TestPuppetDBRefresh(t *testing.T) {
ts := mockServer(t)
cfg := SDConfig{
HTTPClientConfig: config.DefaultHTTPClientConfig,
URL: ts.URL,
Query: "vhosts", // This is not a valid PuppetDB query, but it is used by the mock.
Port: 80,
RefreshInterval: model.Duration(30 * time.Second),
}
d, err := NewDiscovery(&cfg, log.NewNopLogger())
require.NoError(t, err)
ctx := context.Background()
tgs, err := d.refresh(ctx)
require.NoError(t, err)
expectedTargets := []*targetgroup.Group{
{
Targets: []model.LabelSet{
{
model.AddressLabel: model.LabelValue("edinburgh.example.com:80"),
model.LabelName("__meta_puppetdb_certname"): model.LabelValue("edinburgh.example.com"),
model.LabelName("__meta_puppetdb_environment"): model.LabelValue("prod"),
model.LabelName("__meta_puppetdb_exported"): model.LabelValue("false"),
model.LabelName("__meta_puppetdb_file"): model.LabelValue("/etc/puppetlabs/code/environments/prod/modules/upstream/apache/manifests/init.pp"),
model.LabelName("__meta_puppetdb_resource"): model.LabelValue("49af83866dc5a1518968b68e58a25319107afe11"),
model.LabelName("__meta_puppetdb_tags"): model.LabelValue(",roles::hypervisor,apache,apache::vhost,class,default-ssl,profile_hypervisor,vhost,profile_apache,hypervisor,__node_regexp__edinburgh,roles,node,"),
model.LabelName("__meta_puppetdb_title"): model.LabelValue("default-ssl"),
model.LabelName("__meta_puppetdb_type"): model.LabelValue("Apache::Vhost"),
},
},
Source: ts.URL + "/pdb/query/v4?query=vhosts",
},
}
require.Equal(t, tgs, expectedTargets)
}
func TestPuppetDBRefreshWithParameters(t *testing.T) {
ts := mockServer(t)
cfg := SDConfig{
HTTPClientConfig: config.DefaultHTTPClientConfig,
URL: ts.URL,
Query: "vhosts", // This is not a valid PuppetDB query, but it is used by the mock.
Port: 80,
IncludeParameters: true,
RefreshInterval: model.Duration(30 * time.Second),
}
d, err := NewDiscovery(&cfg, log.NewNopLogger())
require.NoError(t, err)
ctx := context.Background()
tgs, err := d.refresh(ctx)
require.NoError(t, err)
expectedTargets := []*targetgroup.Group{
{
Targets: []model.LabelSet{
{
model.AddressLabel: model.LabelValue("edinburgh.example.com:80"),
model.LabelName("__meta_puppetdb_certname"): model.LabelValue("edinburgh.example.com"),
model.LabelName("__meta_puppetdb_environment"): model.LabelValue("prod"),
model.LabelName("__meta_puppetdb_exported"): model.LabelValue("false"),
model.LabelName("__meta_puppetdb_file"): model.LabelValue("/etc/puppetlabs/code/environments/prod/modules/upstream/apache/manifests/init.pp"),
model.LabelName("__meta_puppetdb_parameter_access_log"): model.LabelValue("true"),
model.LabelName("__meta_puppetdb_parameter_access_log_file"): model.LabelValue("ssl_access_log"),
model.LabelName("__meta_puppetdb_parameter_docroot"): model.LabelValue("/var/www/html"),
model.LabelName("__meta_puppetdb_parameter_ensure"): model.LabelValue("absent"),
model.LabelName("__meta_puppetdb_parameter_labels_alias"): model.LabelValue("edinburgh"),
model.LabelName("__meta_puppetdb_parameter_options"): model.LabelValue("Indexes,FollowSymLinks,MultiViews"),
model.LabelName("__meta_puppetdb_resource"): model.LabelValue("49af83866dc5a1518968b68e58a25319107afe11"),
model.LabelName("__meta_puppetdb_tags"): model.LabelValue(",roles::hypervisor,apache,apache::vhost,class,default-ssl,profile_hypervisor,vhost,profile_apache,hypervisor,__node_regexp__edinburgh,roles,node,"),
model.LabelName("__meta_puppetdb_title"): model.LabelValue("default-ssl"),
model.LabelName("__meta_puppetdb_type"): model.LabelValue("Apache::Vhost"),
},
},
Source: ts.URL + "/pdb/query/v4?query=vhosts",
},
}
require.Equal(t, tgs, expectedTargets)
}
func TestPuppetDBInvalidCode(t *testing.T) {
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusBadRequest)
}))
t.Cleanup(ts.Close)
cfg := SDConfig{
HTTPClientConfig: config.DefaultHTTPClientConfig,
URL: ts.URL,
RefreshInterval: model.Duration(30 * time.Second),
}
d, err := NewDiscovery(&cfg, log.NewNopLogger())
require.NoError(t, err)
ctx := context.Background()
_, err = d.refresh(ctx)
require.EqualError(t, err, "server returned HTTP status 400 Bad Request")
}
func TestPuppetDBInvalidFormat(t *testing.T) {
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
fmt.Fprintln(w, "{}")
}))
t.Cleanup(ts.Close)
cfg := SDConfig{
HTTPClientConfig: config.DefaultHTTPClientConfig,
URL: ts.URL,
RefreshInterval: model.Duration(30 * time.Second),
}
d, err := NewDiscovery(&cfg, log.NewNopLogger())
require.NoError(t, err)
ctx := context.Background()
_, err = d.refresh(ctx)
require.EqualError(t, err, "unsupported content type text/plain; charset=utf-8")
}

View file

@ -0,0 +1,82 @@
// Copyright 2021 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package puppetdb
import (
"strconv"
"strings"
"github.com/prometheus/common/model"
"github.com/prometheus/prometheus/util/strutil"
)
type Resource struct {
Certname string `json:"certname"`
Resource string `json:"resource"`
Type string `json:"type"`
Title string `json:"title"`
Exported bool `json:"exported"`
Tags []string `json:"tags"`
File string `json:"file"`
Environment string `json:"environment"`
Parameters Parameters `json:"parameters"`
}
type Parameters map[string]interface{}
func (p *Parameters) toLabels() model.LabelSet {
labels := model.LabelSet{}
for k, v := range *p {
var labelValue string
switch value := v.(type) {
case string:
labelValue = value
case bool:
labelValue = strconv.FormatBool(value)
case []string:
labelValue = separator + strings.Join(value, separator) + separator
case []interface{}:
if len(value) == 0 {
continue
}
values := make([]string, len(value))
for i, v := range value {
switch value := v.(type) {
case string:
values[i] = value
case bool:
values[i] = strconv.FormatBool(value)
case []string:
values[i] = separator + strings.Join(value, separator) + separator
}
}
labelValue = strings.Join(values, separator)
case map[string]interface{}:
subParameter := Parameters(value)
prefix := strutil.SanitizeLabelName(k + "_")
for subk, subv := range subParameter.toLabels() {
labels[model.LabelName(prefix)+subk] = subv
}
default:
continue
}
if labelValue == "" {
continue
}
name := strutil.SanitizeLabelName(k)
labels[model.LabelName(name)] = model.LabelValue(labelValue)
}
return labels
}

View file

@ -70,7 +70,7 @@ func newBaremetalDiscovery(conf *SDConfig) (*baremetalDiscovery, error) {
tagsFilter: conf.TagsFilter,
}
rt, err := config.NewRoundTripperFromConfig(conf.HTTPClientConfig, "scaleway_sd", config.WithHTTP2Disabled())
rt, err := config.NewRoundTripperFromConfig(conf.HTTPClientConfig, "scaleway_sd")
if err != nil {
return nil, err
}

View file

@ -81,7 +81,7 @@ func newInstanceDiscovery(conf *SDConfig) (*instanceDiscovery, error) {
tagsFilter: conf.TagsFilter,
}
rt, err := config.NewRoundTripperFromConfig(conf.HTTPClientConfig, "scaleway_sd", config.WithHTTP2Disabled())
rt, err := config.NewRoundTripperFromConfig(conf.HTTPClientConfig, "scaleway_sd")
if err != nil {
return nil, err
}

View file

@ -112,7 +112,7 @@ func NewHTTPResourceClient(conf *HTTPResourceClientConfig, protocolVersion Proto
endpointURL.RawQuery = conf.ExtraQueryParams.Encode()
}
client, err := config.NewClientFromConfig(conf.HTTPClientConfig, conf.Name, config.WithHTTP2Disabled(), config.WithIdleConnTimeout(conf.Timeout))
client, err := config.NewClientFromConfig(conf.HTTPClientConfig, conf.Name, config.WithIdleConnTimeout(conf.Timeout))
if err != nil {
return nil, err
}

View file

@ -272,6 +272,10 @@ nerve_sd_configs:
openstack_sd_configs:
[ - <openstack_sd_config> ... ]
# List of PuppetDB service discovery configurations.
puppetdb_sd_configs:
[ - <puppetdb_sd_config> ... ]
# List of Scaleway service discovery configurations.
scaleway_sd_configs:
[ - <scaleway_sd_config> ... ]
@ -901,7 +905,7 @@ The following meta labels are available on targets during [relabeling](#relabel_
* `__meta_ec2_ami`: the EC2 Amazon Machine Image
* `__meta_ec2_architecture`: the architecture of the instance
* `__meta_ec2_availability_zone`: the availability zone in which the instance is running
* `__meta_ec2_availability_zone_id`: the [availability zone ID](https://docs.aws.amazon.com/ram/latest/userguide/working-with-az-ids.html) in which the instance is running
* `__meta_ec2_availability_zone_id`: the [availability zone ID](https://docs.aws.amazon.com/ram/latest/userguide/working-with-az-ids.html) in which the instance is running (requires `ec2:DescribeAvailabilityZones`)
* `__meta_ec2_instance_id`: the EC2 instance ID
* `__meta_ec2_instance_lifecycle`: the lifecycle of the EC2 instance, set only for 'spot' or 'scheduled' instances, absent otherwise
* `__meta_ec2_instance_state`: the state of the EC2 instance
@ -1069,6 +1073,94 @@ tls_config:
[ <tls_config> ]
```
### `<puppetdb_sd_config>`
PuppetDB SD configurations allow retrieving scrape targets from
[PuppetDB](https://puppet.com/docs/puppetdb/latest/index.html) resources.
This SD discovers resources and will create a target for each resource returned
by the API.
The resource address is the `certname` of the resource and can be changed during
[relabeling](#relabel_config).
The following meta labels are available on targets during [relabeling](#relabel_config):
* `__meta_puppetdb_certname`: the name of the node associated with the resource
* `__meta_puppetdb_resource`: a SHA-1 hash of the resources type, title, and parameters, for identification
* `__meta_puppetdb_type`: the resource type
* `__meta_puppetdb_title`: the resource title
* `__meta_puppetdb_exported`: whether the resource is exported (`"true"` or `"false"`)
* `__meta_puppetdb_tags`: comma separated list of resource tags
* `__meta_puppetdb_file`: the manifest file in which the resource was declared
* `__meta_puppetdb_environment`: the environment of the node associated with the resource
* `__meta_puppetdb_parameter_<parametername>`: the parameters of the resource
See below for the configuration options for PuppetDB discovery:
```yaml
# The URL of the PuppetDB root query endpoint.
url: <string>
# Puppet Query Language (PQL) query. Only resources are supported.
# https://puppet.com/docs/puppetdb/latest/api/query/v4/pql.html
query: <string>
# Whether to include the parameters as meta labels.
# Due to the differences between parameter types and Prometheus labels,
# some parameters might not be rendered. The format of the parameters might
# also change in future releases.
#
# Note: Enabling this exposes parameters in the Prometheus UI and API. Make sure
# that you don't have secrets exposed as parameters if you enable this.
[ include_parameters: <boolean> | default = false ]
# Refresh interval to re-read the resources list.
[ refresh_interval: <duration> | default = 60s ]
# The port to scrape metrics from.
[ port: <int> | default = 80 ]
# TLS configuration to connect to the PuppetDB.
tls_config:
[ <tls_config> ]
# basic_auth, authorization, and oauth2, are mutually exclusive.
# Optional HTTP basic authentication information.
basic_auth:
[ username: <string> ]
[ password: <secret> ]
[ password_file: <string> ]
# `Authorization` HTTP header configuration.
authorization:
# Sets the authentication type.
[ type: <string> | default: Bearer ]
# Sets the credentials. It is mutually exclusive with
# `credentials_file`.
[ credentials: <secret> ]
# Sets the credentials with the credentials read from the configured file.
# It is mutually exclusive with `credentials`.
[ credentials_file: <filename> ]
# Optional OAuth 2.0 configuration.
# Cannot be used at the same time as basic_auth or authorization.
oauth2:
[ <oauth2> ]
# Optional proxy URL.
[ proxy_url: <string> ]
# Configure whether HTTP requests follow HTTP 3xx redirects.
[ follow_redirects: <bool> | default = true ]
```
See [this example Prometheus configuration file](/documentation/examples/prometheus-puppetdb.yml)
for a detailed example of configuring Prometheus with PuppetDB.
### `<file_sd_config>`
File-based service discovery provides a more generic way to configure static targets
@ -1454,6 +1546,29 @@ Available meta labels:
* If the endpoints belong to a service, all labels of the `role: service` discovery are attached.
* For all targets backed by a pod, all labels of the `role: pod` discovery are attached.
#### `endpointslice`
The `endpointslice` role discovers targets from existing endpointslices. For each endpoint
address referenced in the endpointslice object one target is discovered. If the endpoint is backed by a pod, all
additional container ports of the pod, not bound to an endpoint port, are discovered as targets as well.
Available meta labels:
* `__meta_kubernetes_namespace`: The namespace of the endpoints object.
* `__meta_kubernetes_endpointslice_name`: The name of endpointslice object.
* For all targets discovered directly from the endpointslice list (those not additionally inferred
from underlying pods), the following labels are attached:
* `__meta_kubernetes_endpointslice_address_target_kind`: Kind of the referenced object.
* `__meta_kubernetes_endpointslice_address_target_name`: Name of referenced object.
* `__meta_kubernetes_endpointslice_address_type`: The ip protocol family of the adress target.
* `__meta_kubernetes_endpointslice_endpoint_conditions_ready`: Set to `true` or `false` for the referenced endpoint's ready state.
* `__meta_kubernetes_endpointslice_endpoint_topology_kubernetes_io_hostname`: Name of the node hosting the referenced endpoint.
* `__meta_kubernetes_endpointslice_endpoint_topology_present_kubernetes_io_hostname`: Flag that shows if the referenced object has a kubernetes.io/hostname annotation.
* `__meta_kubernetes_endpointslice_port`: Port of the referenced endpoint.
* `__meta_kubernetes_endpointslice_port_name`: Named port of the referenced endpoint.
* `__meta_kubernetes_endpointslice_port_protocol`: Protocol of the referenced endpoint.
* If the endpoints belong to a service, all labels of the `role: service` discovery are attached.
* For all targets backed by a pod, all labels of the `role: pod` discovery are attached.
#### `ingress`
The `ingress` role discovers a target for each path of each ingress.
@ -2172,6 +2287,9 @@ it was not set during relabeling. The `__scheme__` and `__metrics_path__` labels
are set to the scheme and metrics path of the target respectively. The `__param_<name>`
label is set to the value of the first passed URL parameter called `<name>`.
The `__scrape_interval__` and `__scrape_timeout__` labels are set to the target's
interval and timeout. This is **experimental** and could change in the future.
Additional labels prefixed with `__meta_` may be available during the
relabeling phase. They are set by the service discovery mechanism that provided
the target and vary between mechanisms.
@ -2384,6 +2502,10 @@ nerve_sd_configs:
openstack_sd_configs:
[ - <openstack_sd_config> ... ]
# List of PuppetDB service discovery configurations.
puppetdb_sd_configs:
[ - <puppetdb_sd_config> ... ]
# List of Scaleway service discovery configurations.
scaleway_sd_configs:
[ - <scaleway_sd_config> ... ]

View file

@ -78,6 +78,10 @@ name: <string>
# How often rules in the group are evaluated.
[ interval: <duration> | default = global.evaluation_interval ]
# Limit the number of alerts and series individual rules can produce.
# 0 is no limit.
[ limit: <int> | default = 0 ]
rules:
[ - <rule> ... ]
```

View file

@ -34,7 +34,7 @@ that PromQL does not look ahead of the evaluation time for samples.
`--enable-feature=promql-negative-offset`
In contrast to the positive offset modifier, the negative offset modifier lets
one shift a vector selector into the future. An example in which one may want
one shift a vector selector into the future. An example in which one may want
to use a negative offset is reviewing past data and making temporal comparisons
with more recent data.
@ -61,3 +61,13 @@ Exemplar storage is implemented as a fixed size circular buffer that stores exem
This takes the snapshot of the chunks that are in memory along with the series information when shutting down and stores
it on disk. This will reduce the startup time since the memory state can be restored with this snapshot and m-mapped
chunks without the need of WAL replay.
## Extra Scrape Metrics
`--enable-feature=extra-scrape-metrics`
When enabled, for each instance scrape, Prometheus stores a sample in the following additional time series:
- `scrape_timeout_seconds`. The configured `scrape_timeout` for a target. This allows you to measure each target to find out how close they are to timing out with `scrape_duration_seconds / scrape_timeout_seconds`.
- `scrape_sample_limit`. The configured `sample_limit` for a target. This allows you to measure each target
to find out how close they are to reaching the limit with `scrape_samples_post_metric_relabeling / scrape_sample_limit`. Note that `scrape_sample_limit` can be zero if there is no limit configured, which means that the query above can return `+Inf` for targets with no limit (as we divide by zero). If you want to query only for targets that do have a sample limit use this query: `scrape_samples_post_metric_relabeling / (scrape_sample_limit > 0)`.

View file

@ -25,7 +25,7 @@ Here is a table comparing our two generic Service Discovery implementations.
## Requirements of HTTP SD endpoints
If you implement an HTTP SD endpoint, here is a few requirements you should be
If you implement an HTTP SD endpoint, here are a few requirements you should be
aware of.
The response is consumed as is, unmodified. On each refresh interval (default: 1
@ -47,7 +47,7 @@ for incremental updates. A Prometheus instance does not send its hostname and it
is not possible for a SD endpoint to know if the SD requests is the first one
after a restart or not.
The URL to the HTTP SD is not considered secret. The authentication, and any API
The URL to the HTTP SD is not considered secret. The authentication and any API
keys should be passed with the appropriate authentication mechanisms. Prometheus
supports TLS authentication, basic authentication, OAuth2, and authorization
headers.

View file

@ -361,7 +361,7 @@ URL query parameters:
- `end=<rfc3339 | unix_timestamp>`: End timestamp.
```json
$ curl -g 'http://localhost:9090/api/v1/query_exemplars?query=test_exemplar_metric_total&start=2020-09-14T15:22:25.479Z&end=020-09-14T15:23:25.479Z'
$ curl -g 'http://localhost:9090/api/v1/query_exemplars?query=test_exemplar_metric_total&start=2020-09-14T15:22:25.479Z&end=2020-09-14T15:23:25.479Z'
{
"status": "success",
"data": [
@ -502,7 +502,9 @@ $ curl http://localhost:9090/api/v1/targets
"lastError": "",
"lastScrape": "2017-01-17T15:07:44.723715405+01:00",
"lastScrapeDuration": 0.050688943,
"health": "up"
"health": "up",
"scrapeInterval": "1m",
"scrapeTimeout": "10s"
}
],
"droppedTargets": [
@ -511,6 +513,8 @@ $ curl http://localhost:9090/api/v1/targets
"__address__": "127.0.0.1:9100",
"__metrics_path__": "/metrics",
"__scheme__": "http",
"__scrape_interval__": "1m",
"__scrape_timeout__": "10s",
"job": "node"
},
}

View file

@ -434,3 +434,26 @@ over time and return an instant vector with per-series aggregation results:
Note that all values in the specified interval have the same weight in the
aggregation even if the values are not equally spaced throughout the interval.
## Trigonometric Functions
The trigonometric functions work in radians:
- `acos(v instant-vector)`: calculates the arccosine of all elements in `v` ([special cases](https://pkg.go.dev/math#Acos)).
- `acosh(v instant-vector)`: calculates the inverse hyperbolic cosine of all elements in `v` ([special cases](https://pkg.go.dev/math#Acosh)).
- `asin(v instant-vector)`: calculates the arcsine of all elements in `v` ([special cases](https://pkg.go.dev/math#Asin)).
- `asinh(v instant-vector)`: calculates the inverse hyperbolic sine of all elements in `v` ([special cases](https://pkg.go.dev/math#Asinh)).
- `atan(v instant-vector)`: calculates the arctangent of all elements in `v` ([special cases](https://pkg.go.dev/math#Atan)).
- `atanh(v instant-vector)`: calculates the inverse hyperbolic tangent of all elements in `v` ([special cases](https://pkg.go.dev/math#Atanh)).
- `cos(v instant-vector)`: calculates the cosine of all elements in `v` ([special cases](https://pkg.go.dev/math#Cos)).
- `cosh(v instant-vector)`: calculates the hyperbolic cosine of all elements in `v` ([special cases](https://pkg.go.dev/math#Cosh)).
- `sin(v instant-vector)`: calculates the sine of all elements in `v` ([special cases](https://pkg.go.dev/math#Sin)).
- `sinh(v instant-vector)`: calculates the hyperbolic sine of all elements in `v` ([special cases](https://pkg.go.dev/math#Sinh)).
- `tan(v instant-vector)`: calculates the tangent of all elements in `v` ([special cases](https://pkg.go.dev/math#Tan)).
- `tanh(v instant-vector)`: calculates the hyperbolic tangent of all elements in `v` ([special cases](https://pkg.go.dev/math#Tanh)).
The following are useful for converting between degrees and radians:
- `deg(v instant-vector)`: converts radians to degrees for all elements in `v`.
- `pi()`: returns pi.
- `rad(v instant-vector)`: converts degrees to radians for all elements in `v`.

View file

@ -40,6 +40,16 @@ grouping labels becoming the output label set. The metric name is dropped. Entri
for which no matching entry in the right-hand vector can be found are not part of
the result.
### Trigonometric binary operators
The following trigonometric binary operators, which work in radians, exist in Prometheus:
* `atan2` (based on https://pkg.go.dev/math#Atan2)
Trigonometric operators allow trigonometric functions to be executed on two vectors using
vector matching, which isn't available with normal functions. They act in the same manner
as arithmetic operators.
### Comparison binary operators
The following binary comparison operators exist in Prometheus:
@ -264,7 +274,7 @@ The following list shows the precedence of binary operators in Prometheus, from
highest to lowest.
1. `^`
2. `*`, `/`, `%`
2. `*`, `/`, `%`, `atan2`
3. `+`, `-`
4. `==`, `!=`, `<=`, `<`, `>=`, `>`
5. `and`, `unless`

View file

@ -27,7 +27,7 @@ replayed when the Prometheus server restarts. Write-ahead log files are stored
in the `wal` directory in 128MB segments. These files contain raw data that
has not yet been compacted; thus they are significantly larger than regular block
files. Prometheus will retain a minimum of three write-ahead log files.
High-traffic servers may retain more than three WAL files in order to to keep at
High-traffic servers may retain more than three WAL files in order to keep at
least two hours of raw data.
A Prometheus server's data directory looks something like this:
@ -82,7 +82,7 @@ Prometheus has several flags that configure local storage. The most important ar
* `--storage.tsdb.path`: Where Prometheus writes its database. Defaults to `data/`.
* `--storage.tsdb.retention.time`: When to remove old data. Defaults to `15d`. Overrides `storage.tsdb.retention` if this flag is set to anything other than default.
* `--storage.tsdb.retention.size`: The maximum number of bytes of storage blocks to retain. The oldest data will be removed first. Defaults to `0` or disabled. Units supported: B, KB, MB, GB, TB, PB, EB. Ex: "512MB"
* `--storage.tsdb.retention.size`: The maximum number of bytes of storage blocks to retain. The oldest data will be removed first. Defaults to `0` or disabled. Units supported: B, KB, MB, GB, TB, PB, EB. Ex: "512MB". Only the persistent blocks are deleted to honor this retention although WAL and m-mapped chunks are counted in the total size. So the minimum requirement for the disk is the peak space taken by the `wal` (the WAL and Checkpoint) and `chunks_head` (m-mapped Head chunks) directory combined (peaks every 2 hours).
* `--storage.tsdb.retention`: Deprecated in favor of `storage.tsdb.retention.time`.
* `--storage.tsdb.wal-compression`: Enables compression of the write-ahead log (WAL). Depending on your data, you can expect the WAL size to be halved with little extra cpu load. This flag was introduced in 2.11.0 and enabled by default in 2.20.0. Note that once enabled, downgrading Prometheus to a version below 2.11.0 will require deleting the WAL.

View file

@ -17,7 +17,6 @@ import (
"context"
"io/ioutil"
"os"
"reflect"
"testing"
"github.com/prometheus/common/model"
@ -217,14 +216,7 @@ func TestGenerateTargetGroups(t *testing.T) {
for _, testCase := range testCases {
result := generateTargetGroups(testCase.targetGroup)
if !reflect.DeepEqual(result, testCase.expectedCustomSD) {
t.Errorf("%q failed\ngot: %#v\nexpected: %v",
testCase.title,
result,
testCase.expectedCustomSD)
}
require.Equal(t, testCase.expectedCustomSD, result)
}
}

View file

@ -0,0 +1,40 @@
# Prometheus example configuration to be used with PuppetDB.
scrape_configs:
- job_name: 'puppetdb-node-exporter'
puppetdb_sd_configs:
# This example discovers the nodes which have the class Prometheus::Node_exporter.
- url: https://puppetdb.example.com
query: 'resources { type = "Class" and title = "Prometheus::Node_exporter" }'
port: 9100
tls_config:
cert_file: prometheus-public.pem
key_file: prometheus-private.pem
ca_file: ca.pem
- job_name: 'puppetdb-scrape-jobs'
puppetdb_sd_configs:
# This example uses the Prometheus::Scrape_job
# exported resources.
# https://github.com/camptocamp/prometheus-puppetdb-sd
# This examples is compatible with Prometheus-puppetdb-sd,
# if the exported Prometheus::Scrape_job only have at most one target.
- url: https://puppetdb.example.com
query: 'resources { type = "Prometheus::Scrape_job" and exported = true }'
include_parameters: true
tls_config:
cert_file: prometheus-public.pem
key_file: prometheus-private.pem
ca_file: ca.pem
relabel_configs:
- source_labels: [__meta_puppetdb_certname]
target_label: certname
- source_labels: [__meta_puppetdb_parameter_targets]
regex: '(.+),?.*'
replacement: $1
target_label: __address__
- source_labels: [__meta_puppetdb_parameter_job_name]
target_label: job
- regex: '__meta_puppetdb_parameter_labels_(.+)'
replacement: '$1'
action: labelmap

View file

@ -17,6 +17,7 @@ import (
"testing"
"github.com/prometheus/common/model"
"github.com/stretchr/testify/require"
)
var (
@ -32,17 +33,13 @@ func TestEscape(t *testing.T) {
value := "abzABZ019(){},'\"\\"
expected := "abzABZ019\\(\\)\\{\\}\\,\\'\\\"\\\\"
actual := escape(model.LabelValue(value))
if expected != actual {
t.Errorf("Expected %s, got %s", expected, actual)
}
require.Equal(t, expected, actual)
// Test percent-encoding.
value = "é/|_;:%."
expected = "%C3%A9%2F|_;:%25%2E"
actual = escape(model.LabelValue(value))
if expected != actual {
t.Errorf("Expected %s, got %s", expected, actual)
}
require.Equal(t, expected, actual)
}
func TestPathFromMetric(t *testing.T) {
@ -51,7 +48,5 @@ func TestPathFromMetric(t *testing.T) {
".many_chars.abc!ABC:012-3!45%C3%B667~89%2E%2F\\(\\)\\{\\}\\,%3D%2E\\\"\\\\" +
".testlabel.test:value")
actual := pathFromMetric(metric, "prefix.")
if expected != actual {
t.Errorf("Expected %s, got %s", expected, actual)
}
require.Equal(t, expected, actual)
}

View file

@ -24,6 +24,7 @@ import (
influx "github.com/influxdata/influxdb/client/v2"
"github.com/prometheus/common/model"
"github.com/stretchr/testify/require"
)
func TestClient(t *testing.T) {
@ -73,28 +74,17 @@ testmetric,test_label=test_label_value2 value=5.1234 123456789123
server := httptest.NewServer(http.HandlerFunc(
func(w http.ResponseWriter, r *http.Request) {
if r.Method != "POST" {
t.Fatalf("Unexpected method; expected POST, got %s", r.Method)
}
if r.URL.Path != "/write" {
t.Fatalf("Unexpected path; expected %s, got %s", "/write", r.URL.Path)
}
require.Equal(t, "POST", r.Method, "Unexpected method.")
require.Equal(t, "/write", r.URL.Path, "Unexpected path.")
b, err := ioutil.ReadAll(r.Body)
if err != nil {
t.Fatalf("Error reading body: %s", err)
}
if string(b) != expectedBody {
t.Fatalf("Unexpected request body; expected:\n\n%s\n\ngot:\n\n%s", expectedBody, string(b))
}
require.NoError(t, err, "Error reading body.")
require.Equal(t, expectedBody, string(b), "Unexpected request body.")
},
))
defer server.Close()
serverURL, err := url.Parse(server.URL)
if err != nil {
t.Fatalf("Unable to parse server URL %s: %s", server.URL, err)
}
require.NoError(t, err, "Unable to parse server URL.")
conf := influx.HTTPConfig{
Addr: serverURL.String(),
@ -103,8 +93,6 @@ testmetric,test_label=test_label_value2 value=5.1234 123456789123
Timeout: time.Minute,
}
c := NewClient(nil, conf, "test_db", "default")
if err := c.Write(samples); err != nil {
t.Fatalf("Error sending samples: %s", err)
}
err = c.Write(samples)
require.NoError(t, err, "Error sending samples.")
}

View file

@ -14,12 +14,11 @@
package opentsdb
import (
"bytes"
"encoding/json"
"reflect"
"testing"
"github.com/prometheus/common/model"
"github.com/stretchr/testify/require"
)
var (
@ -36,9 +35,7 @@ func TestTagsFromMetric(t *testing.T) {
"many_chars": TagValue("abc!ABC:012-3!45ö67~89./"),
}
actual := tagsFromMetric(metric)
if !reflect.DeepEqual(actual, expected) {
t.Errorf("Expected %#v, got %#v", expected, actual)
}
require.Equal(t, expected, actual)
}
func TestMarshalStoreSamplesRequest(t *testing.T) {
@ -51,25 +48,11 @@ func TestMarshalStoreSamplesRequest(t *testing.T) {
expectedJSON := []byte(`{"metric":"test_.metric","timestamp":4711,"value":3.1415,"tags":{"many_chars":"abc_21ABC_.012-3_2145_C3_B667_7E89./","testlabel":"test_.value"}}`)
resultingJSON, err := json.Marshal(request)
if err != nil {
t.Fatalf("Marshal(request) resulted in err: %s", err)
}
if !bytes.Equal(resultingJSON, expectedJSON) {
t.Errorf(
"Marshal(request) => %q, want %q",
resultingJSON, expectedJSON,
)
}
require.NoError(t, err, "Marshal(request) resulted in err.")
require.Equal(t, expectedJSON, resultingJSON)
var unmarshaledRequest StoreSamplesRequest
err = json.Unmarshal(expectedJSON, &unmarshaledRequest)
if err != nil {
t.Fatalf("Unmarshal(expectedJSON, &unmarshaledRequest) resulted in err: %s", err)
}
if !reflect.DeepEqual(unmarshaledRequest, request) {
t.Errorf(
"Unmarshal(expectedJSON, &unmarshaledRequest) => %#v, want %#v",
unmarshaledRequest, request,
)
}
require.NoError(t, err, "Unmarshal(expectedJSON, &unmarshaledRequest) resulted in err.")
require.Equal(t, request, unmarshaledRequest)
}

View file

@ -14,9 +14,10 @@
package opentsdb
import (
"bytes"
"encoding/json"
"testing"
"github.com/stretchr/testify/require"
)
var stringtests = []struct {
@ -32,17 +33,9 @@ var stringtests = []struct {
func TestTagValueMarshaling(t *testing.T) {
for i, tt := range stringtests {
json, err := json.Marshal(tt.tv)
if err != nil {
t.Errorf("%d. Marshal(%q) returned err: %s", i, tt.tv, err)
} else {
if !bytes.Equal(json, tt.json) {
t.Errorf(
"%d. Marshal(%q) => %q, want %q",
i, tt.tv, json, tt.json,
)
}
}
got, err := json.Marshal(tt.tv)
require.NoError(t, err, "%d. Marshal(%q) returned error.", i, tt.tv)
require.Equal(t, tt.json, got, "%d. Marshal(%q) not equal.", i, tt.tv)
}
}
@ -50,15 +43,7 @@ func TestTagValueUnMarshaling(t *testing.T) {
for i, tt := range stringtests {
var tv TagValue
err := json.Unmarshal(tt.json, &tv)
if err != nil {
t.Errorf("%d. Unmarshal(%q, &str) returned err: %s", i, tt.json, err)
} else {
if tv != tt.tv {
t.Errorf(
"%d. Unmarshal(%q, &str) => str==%q, want %q",
i, tt.json, tv, tt.tv,
)
}
}
require.NoError(t, err, "%d. Unmarshal(%q, &str) returned error.", i, tt.json)
require.Equal(t, tt.tv, tv, "%d. Unmarshal(%q, &str) not equal.", i, tt.json)
}
}

View file

@ -9,9 +9,9 @@ and dashboards for Prometheus.
To use them, you need to have `jsonnet` (v0.13+) and `jb` installed. If you
have a working Go development environment, it's easiest to run the following:
```bash
$ go get github.com/google/go-jsonnet/cmd/jsonnet
$ go get github.com/google/go-jsonnet/cmd/jsonnetfmt
$ go get github.com/jsonnet-bundler/jsonnet-bundler/cmd/jb
$ go install github.com/google/go-jsonnet/cmd/jsonnet@latest
$ go install github.com/google/go-jsonnet/cmd/jsonnetfmt@latest
$ go install github.com/jsonnet-bundler/jsonnet-bundler/cmd/jb@latest
```
_Note: The make targets `lint` and `fmt` need the `jsonnetfmt` binary, which is

View file

@ -13,8 +13,8 @@ local template = grafana.template;
g.dashboard(
'%(prefix)sOverview' % $._config.grafanaPrometheus
)
.addMultiTemplate('job', 'prometheus_build_info', 'job')
.addMultiTemplate('instance', 'prometheus_build_info', 'instance')
.addMultiTemplate('job', 'prometheus_build_info{%(prometheusSelector)s}' % $._config, 'job')
.addMultiTemplate('instance', 'prometheus_build_info{job=~"$job"}', 'instance')
.addRow(
g.row('Prometheus Stats')
.addPanel(

View file

@ -12,7 +12,8 @@
// See the License for the specific language governing permissions and
// limitations under the License.
//+build tools
//go:build tools
// +build tools
// Package tools tracks dependencies for tools that used in the build process.
// See https://github.com/golang/go/issues/25922

51
go.mod
View file

@ -3,37 +3,37 @@ module github.com/prometheus/prometheus
go 1.14
require (
github.com/Azure/azure-sdk-for-go v55.8.0+incompatible
github.com/Azure/go-autorest/autorest v0.11.19
github.com/Azure/go-autorest/autorest/adal v0.9.14
github.com/Azure/azure-sdk-for-go v57.1.0+incompatible
github.com/Azure/go-autorest/autorest v0.11.20
github.com/Azure/go-autorest/autorest/adal v0.9.15
github.com/Azure/go-autorest/autorest/to v0.4.0 // indirect
github.com/Azure/go-autorest/autorest/validation v0.3.1 // indirect
github.com/alecthomas/units v0.0.0-20210208195552-ff826a37aa15
github.com/aws/aws-sdk-go v1.40.10
github.com/cespare/xxhash/v2 v2.1.1
github.com/alecthomas/units v0.0.0-20210912230133-d1bdfacee922
github.com/aws/aws-sdk-go v1.40.37
github.com/cespare/xxhash/v2 v2.1.2
github.com/containerd/containerd v1.5.4 // indirect
github.com/dennwc/varint v1.0.0
github.com/dgryski/go-sip13 v0.0.0-20200911182023-62edffca9245
github.com/digitalocean/godo v1.64.2
github.com/docker/docker v20.10.7+incompatible
github.com/digitalocean/godo v1.65.0
github.com/docker/docker v20.10.8+incompatible
github.com/docker/go-connections v0.4.0 // indirect
github.com/edsrzf/mmap-go v1.0.0
github.com/envoyproxy/go-control-plane v0.9.9
github.com/envoyproxy/protoc-gen-validate v0.6.1
github.com/go-kit/log v0.1.0
github.com/go-logfmt/logfmt v0.5.0
github.com/go-openapi/strfmt v0.20.1
github.com/go-logfmt/logfmt v0.5.1
github.com/go-openapi/strfmt v0.20.2
github.com/go-zookeeper/zk v1.0.2
github.com/gogo/protobuf v1.3.2
github.com/golang/snappy v0.0.4
github.com/google/pprof v0.0.0-20210726183535-c50bf4fe5303
github.com/gophercloud/gophercloud v0.19.0
github.com/google/pprof v0.0.0-20210827144239-02619b876842
github.com/gophercloud/gophercloud v0.20.0
github.com/grpc-ecosystem/grpc-gateway v1.16.0
github.com/hashicorp/consul/api v1.9.1
github.com/hetznercloud/hcloud-go v1.28.0
github.com/hashicorp/consul/api v1.10.1
github.com/hetznercloud/hcloud-go v1.32.0
github.com/influxdata/influxdb v1.9.3
github.com/json-iterator/go v1.1.11
github.com/linode/linodego v0.31.0
github.com/linode/linodego v0.32.0
github.com/miekg/dns v1.1.43
github.com/moby/term v0.0.0-20201216013528-df9cb8a40635 // indirect
github.com/morikuni/aec v1.0.0 // indirect
@ -43,10 +43,11 @@ require (
github.com/opentracing-contrib/go-stdlib v1.0.0
github.com/opentracing/opentracing-go v1.2.0
github.com/pkg/errors v0.9.1
github.com/prometheus/alertmanager v0.22.2
github.com/prometheus/alertmanager v0.23.0
github.com/prometheus/client_golang v1.11.0
github.com/prometheus/client_model v0.2.0
github.com/prometheus/common v0.30.0
github.com/prometheus/common v0.31.1
github.com/prometheus/common/sigv4 v0.1.0
github.com/prometheus/exporter-toolkit v0.6.1
github.com/scaleway/scaleway-sdk-go v1.0.0-beta.7.0.20210223165440-c65ae3540d44
github.com/shurcooL/httpfs v0.0.0-20190707220628-8d4bc4ba7749
@ -56,22 +57,22 @@ require (
github.com/uber/jaeger-lib v2.4.1+incompatible
go.uber.org/atomic v1.9.0
go.uber.org/goleak v1.1.10
golang.org/x/net v0.0.0-20210726213435-c6fcb2dbf985
golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914
golang.org/x/net v0.0.0-20210903162142-ad29c8ab022f
golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c
golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c
golang.org/x/sys v0.0.0-20210906170528-6f6e22806c34
golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac
golang.org/x/tools v0.1.5
google.golang.org/api v0.51.0
google.golang.org/genproto v0.0.0-20210716133855-ce7ef5c701ea
google.golang.org/api v0.56.0
google.golang.org/genproto v0.0.0-20210903162649-d08c68adba83
google.golang.org/protobuf v1.27.1
gopkg.in/alecthomas/kingpin.v2 v2.2.6
gopkg.in/fsnotify/fsnotify.v1 v1.4.7
gopkg.in/yaml.v2 v2.4.0
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b
k8s.io/api v0.21.3
k8s.io/apimachinery v0.21.3
k8s.io/client-go v0.21.3
k8s.io/api v0.22.1
k8s.io/apimachinery v0.22.1
k8s.io/client-go v0.22.1
k8s.io/klog v1.0.0
k8s.io/klog/v2 v2.10.0
)

182
go.sum
View file

@ -22,8 +22,10 @@ cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb
cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0=
cloud.google.com/go v0.83.0/go.mod h1:Z7MJUsANfY0pYPdw0lbnivPx4/vhy/e2FEkSkF7vAVY=
cloud.google.com/go v0.84.0/go.mod h1:RazrYuxIK6Kb7YrzzhPoLmCVzl7Sup4NrbKPg8KHSUM=
cloud.google.com/go v0.87.0 h1:8ZtzmY4a2JIO2sljMbpqkDYxA8aJQveYr3AMa+X40oc=
cloud.google.com/go v0.87.0/go.mod h1:TpDYlFy7vuLzZMMZ+B6iRiELaY7z/gJPaqbMx6mlWcY=
cloud.google.com/go v0.90.0/go.mod h1:kRX0mNRHe0e2rC6oNakvwQqzyDmg57xJ+SZU1eT2aDQ=
cloud.google.com/go v0.93.3 h1:wPBktZFzYBcCZVARvwVKqH1uEj+aLXofJEtrb4oOsio=
cloud.google.com/go v0.93.3/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+YI=
cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE=
cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc=
@ -47,8 +49,8 @@ collectd.org v0.3.0/go.mod h1:A/8DzQBkF6abtvrT2j/AU/4tiBgJWYyh0y/oB/4MlWE=
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
github.com/Azure/azure-sdk-for-go v16.2.1+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
github.com/Azure/azure-sdk-for-go v41.3.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
github.com/Azure/azure-sdk-for-go v55.8.0+incompatible h1:EuccMPzxu67cIE95/mrtwQivLv7ETmURi5IUgLNVug8=
github.com/Azure/azure-sdk-for-go v55.8.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
github.com/Azure/azure-sdk-for-go v57.1.0+incompatible h1:TKQ3ieyB0vVKkF6t9dsWbMjq56O1xU3eh3Ec09v6ajM=
github.com/Azure/azure-sdk-for-go v57.1.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 h1:w+iIsaOQNcT7OZ575w+acHgRric5iCyQh+xv+KJ4HB8=
github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8=
github.com/Azure/go-autorest v10.8.1+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
@ -59,9 +61,9 @@ github.com/Azure/go-autorest/autorest v0.9.3/go.mod h1:GsRuLYvwzLjjjRoWEIyMUaYq8
github.com/Azure/go-autorest/autorest v0.10.0/go.mod h1:/FALq9T/kS7b5J5qsQ+RSTUdAmGFqi0vUdVNNx8q630=
github.com/Azure/go-autorest/autorest v0.10.1/go.mod h1:/FALq9T/kS7b5J5qsQ+RSTUdAmGFqi0vUdVNNx8q630=
github.com/Azure/go-autorest/autorest v0.11.1/go.mod h1:JFgpikqFJ/MleTTxwepExTKnFUKKszPS8UavbQYUMuw=
github.com/Azure/go-autorest/autorest v0.11.12/go.mod h1:eipySxLmqSyC5s5k1CLupqet0PSENBEDP93LQ9a8QYw=
github.com/Azure/go-autorest/autorest v0.11.19 h1:7/IqD2fEYVha1EPeaiytVKhzmPV223pfkRIQUGOK2IE=
github.com/Azure/go-autorest/autorest v0.11.19/go.mod h1:dSiJPy22c3u0OtOKDNttNgqpNFY/GeWa7GH/Pz56QRA=
github.com/Azure/go-autorest/autorest v0.11.18/go.mod h1:dSiJPy22c3u0OtOKDNttNgqpNFY/GeWa7GH/Pz56QRA=
github.com/Azure/go-autorest/autorest v0.11.20 h1:s8H1PbCZSqg/DH7JMlOz6YMig6htWLNPsjDdlLqCx3M=
github.com/Azure/go-autorest/autorest v0.11.20/go.mod h1:o3tqFY+QR40VOlk+pV4d77mORO64jOXSgEnPQgLK6JY=
github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0=
github.com/Azure/go-autorest/autorest/adal v0.8.0/go.mod h1:Z6vX6WXXuyieHAXwMj0S6HY6e6wcHn37qQMBQlvY3lc=
github.com/Azure/go-autorest/autorest/adal v0.8.1/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q=
@ -70,8 +72,8 @@ github.com/Azure/go-autorest/autorest/adal v0.8.3/go.mod h1:ZjhuQClTqx435SRJ2iMl
github.com/Azure/go-autorest/autorest/adal v0.9.0/go.mod h1:/c022QCutn2P7uY+/oQWWNcK9YU+MH96NgK+jErpbcg=
github.com/Azure/go-autorest/autorest/adal v0.9.5/go.mod h1:B7KF7jKIeC9Mct5spmyCB/A8CG/sEz1vwIRGv/bbw7A=
github.com/Azure/go-autorest/autorest/adal v0.9.13/go.mod h1:W/MM4U6nLxnIskrw4UwWzlHfGjwUS50aOsc/I3yuU8M=
github.com/Azure/go-autorest/autorest/adal v0.9.14 h1:G8hexQdV5D4khOXrWG2YuLCFKhWYmWD8bHYaXN5ophk=
github.com/Azure/go-autorest/autorest/adal v0.9.14/go.mod h1:W/MM4U6nLxnIskrw4UwWzlHfGjwUS50aOsc/I3yuU8M=
github.com/Azure/go-autorest/autorest/adal v0.9.15 h1:X+p2GF0GWyOiSmqohIaEeuNFNDY4I4EOlVuUQvFdWMk=
github.com/Azure/go-autorest/autorest/adal v0.9.15/go.mod h1:tGMin8I49Yij6AQ+rvV+Xa/zwxYQB5hmsd6DkfAx2+A=
github.com/Azure/go-autorest/autorest/azure/auth v0.4.2/go.mod h1:90gmfKdlmKgfjUpnCEpOJzsUEjrWDSLwHIG73tSXddM=
github.com/Azure/go-autorest/autorest/azure/cli v0.3.1/go.mod h1:ZG5p860J94/0kI9mNJVoIoLgXcirM2gF5i2kWloofxw=
github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA=
@ -149,8 +151,9 @@ github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuy
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho=
github.com/alecthomas/units v0.0.0-20210208195552-ff826a37aa15 h1:AUNCr9CiJuwrRYS3XieqF+Z9B9gNxo/eANAJCF2eiN4=
github.com/alecthomas/units v0.0.0-20210208195552-ff826a37aa15/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE=
github.com/alecthomas/units v0.0.0-20210912230133-d1bdfacee922 h1:8ypNbf5sd3Sm3cKJ9waOGoQv6dKAFiFty9L6NP1AqJ4=
github.com/alecthomas/units v0.0.0-20210912230133-d1bdfacee922/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE=
github.com/alexflint/go-filemutex v0.0.0-20171022225611-72bdc8eae2ae/go.mod h1:CgnQgUtFrFz9mxFNtED3jI5tLDjKlOM+oUF/sTk6ps0=
github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8=
github.com/antihax/optional v0.0.0-20180407024304-ca021399b1a6/go.mod h1:V8iCPQYkqmusNa815XgQio277wI47sdRh1dUOLdyC6Q=
@ -180,8 +183,10 @@ github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN
github.com/aws/aws-sdk-go v1.29.16/go.mod h1:1KvfttTE3SPKMpo8g2c6jL3ZKfXtFvKscTgahTma5Xg=
github.com/aws/aws-sdk-go v1.30.12/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0=
github.com/aws/aws-sdk-go v1.34.28/go.mod h1:H7NKnBqNVzoTJpGfLrQkkD+ytBA93eiDYi/+8rV9s48=
github.com/aws/aws-sdk-go v1.40.10 h1:h+xUINuuH/9CwxE7O8mAuW7Aj9E5agfE9jQ3DrJsnA8=
github.com/aws/aws-sdk-go v1.40.10/go.mod h1:585smgzpB/KqRA+K3y/NL/oYRqQvpNJYvLm+LY1U59Q=
github.com/aws/aws-sdk-go v1.38.35/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro=
github.com/aws/aws-sdk-go v1.40.11/go.mod h1:585smgzpB/KqRA+K3y/NL/oYRqQvpNJYvLm+LY1U59Q=
github.com/aws/aws-sdk-go v1.40.37 h1:I+Q6cLctkFyMMrKukcDnj+i2kjrQ37LGiOM6xmsxC48=
github.com/aws/aws-sdk-go v1.40.37/go.mod h1:585smgzpB/KqRA+K3y/NL/oYRqQvpNJYvLm+LY1U59Q=
github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g=
github.com/benbjohnson/immutable v0.2.1/go.mod h1:uc6OHo6PN2++n98KHLxW8ef4W42ylHiQSENghE1ezxI=
github.com/benbjohnson/tmpl v1.0.0/go.mod h1:igT620JFIi44B6awvU9IsDhR77IXWtFigTLil/RPdps=
@ -208,13 +213,14 @@ github.com/cactus/go-statsd-client/statsd v0.0.0-20191106001114-12b4e2b38748/go.
github.com/casbin/casbin/v2 v2.1.2/go.mod h1:YcPU1XXisHhLzuxH9coDNf2FbKpjGlbCg3n9yuLkIJQ=
github.com/cenkalti/backoff v0.0.0-20181003080854-62661b46c409/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM=
github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM=
github.com/cenkalti/backoff/v4 v4.1.0/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw=
github.com/cenkalti/backoff/v4 v4.1.1/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko=
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
github.com/cespare/xxhash/v2 v2.1.0/go.mod h1:dgIUBU3pDso/gPgZ1osOZ0iQf77oPR28Tjxl5dIMyVM=
github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY=
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE=
github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/checkpoint-restore/go-criu/v4 v4.1.0/go.mod h1:xUQBLp4RLc5zJtWY++yjOoMoB5lihDt7fai+75m+rGw=
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
@ -360,8 +366,8 @@ github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8
github.com/dgryski/go-sip13 v0.0.0-20190329191031-25c5027a8c7b/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
github.com/dgryski/go-sip13 v0.0.0-20200911182023-62edffca9245 h1:9cOfvEwjQxdwKuNDTQSaMKNRvwKwgZG+U4HrjeRKHso=
github.com/dgryski/go-sip13 v0.0.0-20200911182023-62edffca9245/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
github.com/digitalocean/godo v1.64.2 h1:lJEB2TVIkJydFWJMPtdYOPa2Xwib+smZqq/oUZF8/iA=
github.com/digitalocean/godo v1.64.2/go.mod h1:p7dOjjtSBqCTUksqtA5Fd3uaKs9kyTq2xcz76ulEJRU=
github.com/digitalocean/godo v1.65.0 h1:3SywGJBC18HaYtPQF+T36jYzXBi+a6eIMonSjDll7TA=
github.com/digitalocean/godo v1.65.0/go.mod h1:p7dOjjtSBqCTUksqtA5Fd3uaKs9kyTq2xcz76ulEJRU=
github.com/dimchansky/utfbom v1.1.0/go.mod h1:rO41eb7gLfo8SF1jd9F8HplJm1Fewwi4mQvIirEdv+8=
github.com/dnaeon/go-vcr v1.0.1 h1:r8L/HqC0Hje5AXMu1ooW8oyQyOFv4GxqpL0nRP7SLLY=
github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E=
@ -369,8 +375,8 @@ github.com/docker/distribution v0.0.0-20190905152932-14b96e55d84c/go.mod h1:0+TT
github.com/docker/distribution v2.7.1-0.20190205005809-0d3efadf0154+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
github.com/docker/distribution v2.7.1+incompatible h1:a5mlkVzth6W5A4fOsS3D2EO5BUmsJpcB+cRlLU7cSug=
github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
github.com/docker/docker v20.10.7+incompatible h1:Z6O9Nhsjv+ayUEeI1IojKbYcsGdgYSNqxe1s2MYzUhQ=
github.com/docker/docker v20.10.7+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/docker v20.10.8+incompatible h1:RVqD337BgQicVCzYrrlhLDWhq6OAD2PJDUg2LsEUvKM=
github.com/docker/docker v20.10.8+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ=
github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec=
github.com/docker/go-events v0.0.0-20170721190031-9461782956ad/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA=
@ -409,14 +415,15 @@ github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7
github.com/envoyproxy/protoc-gen-validate v0.6.1 h1:4CF52PCseTFt4bE+Yk3dIpdVi7XWuPVMhPtm4FaIJPM=
github.com/envoyproxy/protoc-gen-validate v0.6.1/go.mod h1:txg5va2Qkip90uYoSKH+nkAAmXrb2j3iq4FLwdrCbXQ=
github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
github.com/evanphx/json-patch v4.9.0+incompatible h1:kLcOMZeuLAJvL2BPWLMIj5oaZQobrkAqrL+WFZwQses=
github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
github.com/evanphx/json-patch v4.11.0+incompatible h1:glyUF9yIYtMHzn8xaKw5rMhdWcwsYV8dZHIq5567/xs=
github.com/evanphx/json-patch v4.11.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
github.com/fatih/color v1.9.0 h1:8xPHl4/q1VyqGIPif1F+1V3Y3lSmrq01EabUW3CoW5s=
github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU=
github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k=
github.com/form3tech-oss/jwt-go v3.2.2+incompatible h1:TcekIExNqud5crz4xD2pavyTgWiPvpYe4Xau31I0PRk=
github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k=
github.com/form3tech-oss/jwt-go v3.2.3+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k=
github.com/foxcpp/go-mockdns v0.0.0-20201212160233-ede2f9158d15/go.mod h1:tPg4cp4nseejPd+UKxtCVQ2hUxNTZ7qQZJa7CLriIeo=
github.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVBjqR7JHJk0brhHOZYGmfBYOrK0ZhYMEtBr4=
github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8/go.mod h1:ZhphrRTfi2rbfLwlschooIH4+wKKDR4Pdxhh+TRoA20=
@ -430,6 +437,8 @@ github.com/getkin/kin-openapi v0.53.0/go.mod h1:7Yn5whZr5kJi6t+kShccXS8ae1APpYTW
github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk=
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI=
github.com/gin-gonic/gin v1.5.0/go.mod h1:Nd6IXA8m5kNZdNEHMBd93KT+mdY3+bewLgRvmCsR2Do=
github.com/globalsign/mgo v0.0.0-20180905125535-1ca0a4f7cbcb/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q=
github.com/globalsign/mgo v0.0.0-20181015135952-eeefdecb41b8/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q=
github.com/glycerine/go-unsnap-stream v0.0.0-20180323001048-9f0cb55181dd/go.mod h1:/20jfyN9Y5QPEAprSgKAUr+glWDY39ZiUEAYOEv5dsE=
@ -448,8 +457,9 @@ github.com/go-kit/log v0.1.0 h1:DGJh0Sm43HbOeYDNnVZFl8BvcYVvjD5bqYJvp0REbwQ=
github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY=
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
github.com/go-logfmt/logfmt v0.5.0 h1:TrB8swr/68K7m9CcGut2g3UOihhbcbiMAYiuTXdEih4=
github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A=
github.com/go-logfmt/logfmt v0.5.1 h1:otpy5pqBCBZ1ng9RQ0dPu4PN7ba75Y/aA+UpowDyNVA=
github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs=
github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU=
github.com/go-openapi/analysis v0.0.0-20180825180245-b006789cd277/go.mod h1:k70tL6pCuVxPJOHXQ+wIac1FUrvNkHolPie/cLEU6hI=
github.com/go-openapi/analysis v0.17.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik=
@ -504,8 +514,8 @@ github.com/go-openapi/runtime v0.19.4/go.mod h1:X277bwSUBxVlCYR3r7xgZZGKVvBd/29g
github.com/go-openapi/runtime v0.19.15/go.mod h1:dhGWCTKRXlAfGnQG0ONViOZpjfg0m2gUt9nTQPQZuoo=
github.com/go-openapi/runtime v0.19.16/go.mod h1:5P9104EJgYcizotuXhEuUrzVc+j1RiSjahULvYmlv98=
github.com/go-openapi/runtime v0.19.24/go.mod h1:Lm9YGCeecBnUUkFTxPC4s1+lwrkJ0pthx8YvyjCfkgk=
github.com/go-openapi/runtime v0.19.28 h1:9lYu6axek8LJrVkMVViVirRcpoaCxXX7+sSvmizGVnA=
github.com/go-openapi/runtime v0.19.28/go.mod h1:BvrQtn6iVb2QmiVXRsFAm6ZCAZBpbVKFfN6QWCp582M=
github.com/go-openapi/runtime v0.19.29 h1:5IIvCaIDbxetN674vX9eOxvoZ9mYGQ16fV1Q0VSG+NA=
github.com/go-openapi/runtime v0.19.29/go.mod h1:BvrQtn6iVb2QmiVXRsFAm6ZCAZBpbVKFfN6QWCp582M=
github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc=
github.com/go-openapi/spec v0.17.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI=
github.com/go-openapi/spec v0.18.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI=
@ -528,8 +538,9 @@ github.com/go-openapi/strfmt v0.19.4/go.mod h1:eftuHTlB/dI8Uq8JJOyRlieZf+WkkxUuk
github.com/go-openapi/strfmt v0.19.5/go.mod h1:eftuHTlB/dI8Uq8JJOyRlieZf+WkkxUuk0dgdHXr2Qk=
github.com/go-openapi/strfmt v0.19.11/go.mod h1:UukAYgTaQfqJuAFlNxxMWNvMYiwiXtLsF2VwmoFtbtc=
github.com/go-openapi/strfmt v0.20.0/go.mod h1:UukAYgTaQfqJuAFlNxxMWNvMYiwiXtLsF2VwmoFtbtc=
github.com/go-openapi/strfmt v0.20.1 h1:1VgxvehFne1mbChGeCmZ5pc0LxUf6yaACVSIYAR91Xc=
github.com/go-openapi/strfmt v0.20.1/go.mod h1:43urheQI9dNtE5lTZQfuFJvjYJKPrxicATpEfZwHUNk=
github.com/go-openapi/strfmt v0.20.2 h1:6XZL+fF4VZYFxKQGLAUB358hOrRh/wS51uWEtlONADE=
github.com/go-openapi/strfmt v0.20.2/go.mod h1:43urheQI9dNtE5lTZQfuFJvjYJKPrxicATpEfZwHUNk=
github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I=
github.com/go-openapi/swag v0.17.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg=
github.com/go-openapi/swag v0.18.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg=
@ -552,6 +563,8 @@ github.com/go-openapi/validate v0.19.15/go.mod h1:tbn/fdOwYHgrhPBzidZfJC2MIVvs9G
github.com/go-openapi/validate v0.20.1/go.mod h1:b60iJT+xNNLfaQJUqLI7946tYiFEOuE9E4k54HpKcJ0=
github.com/go-openapi/validate v0.20.2 h1:AhqDegYV3J3iQkMPJSXkvzymHKMTw0BST3RK3hTT4ts=
github.com/go-openapi/validate v0.20.2/go.mod h1:e7OJoKNgd0twXZwIn0A43tHbvIcr/rZIVCbJBpTUoY0=
github.com/go-playground/locales v0.12.1/go.mod h1:IUMDtCfWo/w/mtMfIE/IG2K+Ey3ygWanZIBtBW0W2TM=
github.com/go-playground/universal-translator v0.16.0/go.mod h1:1AnU7NaIRDWWzGEKwgtJRd2xk99HeFyHw3yid4rvQIY=
github.com/go-resty/resty/v2 v2.1.1-0.20191201195748-d7b97669fe48 h1:JVrqSeQfdhYRFk24TvhTZWU0q8lfCojxZQFi3Ou7+uY=
github.com/go-resty/resty/v2 v2.1.1-0.20191201195748-d7b97669fe48/go.mod h1:dZGr0i9PLlaaTD4H/hoZIDjQ+r6xq8mgbRzHZf7f2J8=
github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
@ -603,6 +616,8 @@ github.com/gogo/protobuf v1.3.0/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXP
github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
github.com/golang-jwt/jwt/v4 v4.0.0 h1:RAqyYixv1p7uEnocuy8P1nru5wprCh/MH2BIlW5z5/o=
github.com/golang-jwt/jwt/v4 v4.0.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg=
github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe/go.mod h1:8vg3r2VgvsThLBIFL93Qb5yWzgyZWhEmBwUJWevAkK0=
github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k=
github.com/golang/geo v0.0.0-20190916061304-5b978397cfec/go.mod h1:QZ0nwyI2jOfgRAoBvP+ab5aRr7c9x7lhGEJrKvBwjWI=
@ -612,8 +627,9 @@ github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4er
github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e h1:1r7pUrabqp18hOBcwBwiTsbnFeTZHV9eER/QT5JVZxY=
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE=
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y=
@ -649,8 +665,9 @@ github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM=
github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/golangci/lint-1 v0.0.0-20181222135242-d2cdd8c08219/go.mod h1:/X8TswGSh1pIozq4ZwCfxS0WA5JGXguxk94ar/4c87Y=
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/btree v1.0.0 h1:0udJVsspx3VBr5FwtLhQQtuAsVc79tTq0ocGIPAU6qo=
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4=
github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA=
github.com/google/flatbuffers v1.11.0/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8=
github.com/google/flatbuffers v2.0.0+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
@ -689,8 +706,9 @@ github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLe
github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
github.com/google/pprof v0.0.0-20210726183535-c50bf4fe5303 h1:UP+GUfRSErTzM7UtTMt4TjnfXKK3ybrGQasgSOp7vLY=
github.com/google/pprof v0.0.0-20210726183535-c50bf4fe5303/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
github.com/google/pprof v0.0.0-20210827144239-02619b876842 h1:JCrt5MIE1fHQtdy1825HwJ45oVQaqHE6lgssRhjcg/o=
github.com/google/pprof v0.0.0-20210827144239-02619b876842/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
@ -698,16 +716,19 @@ github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+
github.com/google/uuid v1.2.0 h1:qJYtXnJRWmpe7m/3XlyhrsLrEURqHRM2kxzoxXqyUDs=
github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
github.com/googleapis/gax-go/v2 v2.0.5 h1:sjZBwGj9Jlw33ImPtvFviGYvseOtDM7hkSKB7+Tv3SM=
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
github.com/googleapis/gax-go/v2 v2.1.0 h1:6DWmvNpomjL1+3liNSZbVns3zsYzzCjm6pRBO1tLeso=
github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0=
github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY=
github.com/googleapis/gnostic v0.4.0/go.mod h1:on+2t9HRStVgn95RSsFWFz+6Q0Snyqv1awfrALZdbtU=
github.com/googleapis/gnostic v0.4.1 h1:DLJCy1n/vrD4HPjOvYcT8aYQXpPIzoRZONaYwyycI+I=
github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg=
github.com/googleapis/gnostic v0.5.1/go.mod h1:6U4PtQXGIEt/Z3h5MAT7FNofLnw9vXk2cUuW7uA/OeU=
github.com/googleapis/gnostic v0.5.5 h1:9fHAtK0uDfpveeqqo1hkEZJcFvYXAiCN3UutL8F9xHw=
github.com/googleapis/gnostic v0.5.5/go.mod h1:7+EbHbldMins07ALC74bsA81Ovc97DwqyJO1AENw9kA=
github.com/gophercloud/gophercloud v0.1.0/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8=
github.com/gophercloud/gophercloud v0.10.0/go.mod h1:gmC5oQqMDOMO1t1gq5DquX/yAU808e/4mzjjDA76+Ss=
github.com/gophercloud/gophercloud v0.19.0 h1:zzaIh8W2K5M4AkJhPV1z6O4Sp0FOObzXm61NUmFz3Kw=
github.com/gophercloud/gophercloud v0.19.0/go.mod h1:wRtmUelyIIv3CSSDI47aUwbs075O6i+LY+pXsKCBsb4=
github.com/gophercloud/gophercloud v0.20.0 h1:1+4jrsjVhdX5omlAo4jkmFc6ftLbuXLzgFo4i6lH+Gk=
github.com/gophercloud/gophercloud v0.20.0/go.mod h1:wRtmUelyIIv3CSSDI47aUwbs075O6i+LY+pXsKCBsb4=
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg=
github.com/gorilla/handlers v0.0.0-20150720190736-60c7bfde3e33/go.mod h1:Qkdc/uu4tH4g6mTK6auzZ766c4CA0Ng8+o/OAirnOIQ=
@ -730,8 +751,8 @@ github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4
github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
github.com/hashicorp/consul/api v1.3.0/go.mod h1:MmDNSzIMUjNpY/mQ398R4bk2FnqQLoPndWW5VkKPlCE=
github.com/hashicorp/consul/api v1.4.0/go.mod h1:xc8u05kyMa3Wjr9eEAsIAo3dg8+LywT5E/Cl7cNS5nU=
github.com/hashicorp/consul/api v1.9.1 h1:SngrdG2L62qqLsUz85qcPhFZ78rPf8tcD5qjMgs6MME=
github.com/hashicorp/consul/api v1.9.1/go.mod h1:XjsvQN+RJGWI2TWy1/kqaE16HrR2J/FWgkYjdZQsX9M=
github.com/hashicorp/consul/api v1.10.1 h1:MwZJp86nlnL+6+W1Zly4JUuVn9YHhMggBirMpHGD7kw=
github.com/hashicorp/consul/api v1.10.1/go.mod h1:XjsvQN+RJGWI2TWy1/kqaE16HrR2J/FWgkYjdZQsX9M=
github.com/hashicorp/consul/sdk v0.3.0/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8=
github.com/hashicorp/consul/sdk v0.4.0/go.mod h1:fY08Y9z5SvJqevyZNy6WWPXiG3KwBPAvlcdx16zZ0fM=
github.com/hashicorp/consul/sdk v0.8.0 h1:OJtKBtEjboEZvG6AOUdh4Z1Zbyu0WcxQ0qatRrZHTVU=
@ -779,14 +800,14 @@ github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2p
github.com/hashicorp/memberlist v0.1.4/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I=
github.com/hashicorp/memberlist v0.2.0/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE=
github.com/hashicorp/memberlist v0.2.2/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE=
github.com/hashicorp/memberlist v0.2.3 h1:BwZa5IjREr75J0am7nblP+X5i95Rmp8EEbMI5vkUWdA=
github.com/hashicorp/memberlist v0.2.3/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE=
github.com/hashicorp/memberlist v0.2.4 h1:OOhYzSvFnkFQXm1ysE8RjXTHsqSRDyP4emusC9K7DYg=
github.com/hashicorp/memberlist v0.2.4/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE=
github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc=
github.com/hashicorp/serf v0.9.0/go.mod h1:YL0HO+FifKOW2u1ke99DGVu1zhcpZzNwrLIqBC7vbYU=
github.com/hashicorp/serf v0.9.5 h1:EBWvyu9tcRszt3Bxp3KNssBMP1KuHWyO51lz9+786iM=
github.com/hashicorp/serf v0.9.5/go.mod h1:UWDWwZeL5cuWDJdl0C6wrvrUwEqtQ4ZKBKKENpqIUyk=
github.com/hetznercloud/hcloud-go v1.28.0 h1:T2a0CVGETf7BoWIdZ/TACqmTZAa/ROutcfdUHYiPAQ4=
github.com/hetznercloud/hcloud-go v1.28.0/go.mod h1:2C5uMtBiMoFr3m7lBFPf7wXTdh33CevmZpQIIDPGYJI=
github.com/hetznercloud/hcloud-go v1.32.0 h1:7zyN2V7hMlhm3HZdxOarmOtvzKvkcYKjM0hcwYMQZz0=
github.com/hetznercloud/hcloud-go v1.32.0/go.mod h1:XX/TQub3ge0yWR2yHWmnDVIrB+MQbda1pHxkUmDlUME=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
github.com/huandu/xstrings v1.0.0/go.mod h1:4qWG/gcEcfX4z/mBDHJ++3ReCw9ibxbsNJbcucJdbSo=
github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg=
@ -882,11 +903,12 @@ github.com/kylelemons/godebug v0.0.0-20160406211939-eadb3ce320cb/go.mod h1:B69LE
github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
github.com/labstack/echo/v4 v4.2.1/go.mod h1:AA49e0DZ8kk5jTOOCKNuPR6oTnBS0dYiM4FW1e6jwpg=
github.com/labstack/gommon v0.3.0/go.mod h1:MULnywXg0yavhxWKc+lOruYdAhDwPK9wf0OL7NoOu+k=
github.com/leodido/go-urn v1.1.0/go.mod h1:+cyI34gQWZcE1eQU7NVgKkkzdXDQHr1dBMtdAPozLkw=
github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM=
github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4=
github.com/linode/linodego v0.31.0 h1:99+t6GtTaIsL+ncz5BpCbkh8Y90rC7qW1Not8MFe0Gw=
github.com/linode/linodego v0.31.0/go.mod h1:BR0gVkCJffEdIGJSl6bHR80Ty+Uvg/2jkjmrWaFectM=
github.com/linode/linodego v0.32.0 h1:IK04cx2b/IwAAd6XLruf1Dl/n3dRXj87Uw/5qo6afVU=
github.com/linode/linodego v0.32.0/go.mod h1:BR0gVkCJffEdIGJSl6bHR80Ty+Uvg/2jkjmrWaFectM=
github.com/lyft/protoc-gen-star v0.5.1/go.mod h1:9toiA3cC7z5uVbODF7kEQ91Xn7XNFkVUl+SrEe+ZORU=
github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ=
github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
@ -1005,13 +1027,15 @@ github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+W
github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.10.3/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.12.1 h1:mFwc4LvZ0xpSvDZ3E+k8Yte0hLOMxXUlP+yXtJqkYfQ=
github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
github.com/onsi/ginkgo v1.14.0 h1:2mOpI4JVVPBN+WQRa0WKH2eXR+Ey+uK4n7Zj0aYpIQA=
github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY=
github.com/onsi/gomega v0.0.0-20151007035656-2152b45fa28a/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
github.com/onsi/gomega v1.10.3 h1:gph6h/qe9GSUw1NhH1gp+qb+h8rXD8Cy60Z32Qw3ELA=
github.com/onsi/gomega v1.10.3/go.mod h1:V9xEwhxec5O8UDM77eCW8vLymOMltsqPVYWrpDsH8xc=
github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk=
@ -1082,8 +1106,8 @@ github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndr
github.com/posener/complete v1.2.3/go.mod h1:WZIdtGGp+qx0sLrYKtIRAruyNpv6hFCicSgv7Sy7s/s=
github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA=
github.com/prometheus/alertmanager v0.20.0/go.mod h1:9g2i48FAyZW6BtbsnvHtMHQXl2aVtrORKwKVCQ+nbrg=
github.com/prometheus/alertmanager v0.22.2 h1:JrDZalSEMb2/2bqGAhls6ZnvOxbC5jMIu29JV+uWTC0=
github.com/prometheus/alertmanager v0.22.2/go.mod h1:rYinOWxFuCnNssc3iOjn2oMTlhLaPcUuqV5yk5JKUAE=
github.com/prometheus/alertmanager v0.23.0 h1:KIb9IChC3kg+1CC388qfr7bsT+tARpQqdsCMoatdObA=
github.com/prometheus/alertmanager v0.23.0/go.mod h1:0MLTrjQI8EuVmvykEhcfr/7X0xmaDAZrqMgxIq3OXHk=
github.com/prometheus/client_golang v0.0.0-20180209125602-c332b6f63c06/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs=
@ -1095,7 +1119,6 @@ github.com/prometheus/client_golang v1.3.0/go.mod h1:hJaj2vgQTGQmVCsAACORcieXFeD
github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU=
github.com/prometheus/client_golang v1.5.1/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU=
github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
github.com/prometheus/client_golang v1.10.0/go.mod h1:WJM3cc3yu7XKBKa/I8WeZm+V3eltZnBwfENSU7mdogU=
github.com/prometheus/client_golang v1.11.0 h1:HNkLOAEQMIDv/K+04rukrLx6ch7msSRwf3/SASFAGtQ=
github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
github.com/prometheus/client_model v0.0.0-20171117100541-99fa1f4be8e5/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
@ -1115,14 +1138,13 @@ github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+
github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt26CguLLsqA=
github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4=
github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
github.com/prometheus/common v0.15.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16Clt/msog/s=
github.com/prometheus/common v0.18.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16Clt/msog/s=
github.com/prometheus/common v0.23.0/go.mod h1:H6QK/N6XVT42whUeIdI3dp36w49c+/iMDk7UAI2qm7Q=
github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc=
github.com/prometheus/common v0.29.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
github.com/prometheus/common v0.30.0 h1:JEkYlQnpzrzQFxi6gnukFPdQ+ac82oRhzMcIduJu/Ug=
github.com/prometheus/common v0.30.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
github.com/prometheus/exporter-toolkit v0.5.1/go.mod h1:OCkM4805mmisBhLmVFw858QYi3v0wKdY6/UxrT0pZVg=
github.com/prometheus/common v0.31.1 h1:d18hG4PkHnNAKNMOmFuXFaiY8Us0nird/2m60uS1AMs=
github.com/prometheus/common v0.31.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
github.com/prometheus/common/sigv4 v0.1.0 h1:qoVebwtwwEhS85Czm2dSROY5fTo2PAPEVdDeppTwGX4=
github.com/prometheus/common/sigv4 v0.1.0/go.mod h1:2Jkxxk9yYvCkE5G1sQT7GuEXm57JrvHu9k5YwTjsNtI=
github.com/prometheus/exporter-toolkit v0.6.1 h1:Aqk75wQD92N9CqmTlZwjKwq6272nOGrWIbc8Z7+xQO0=
github.com/prometheus/exporter-toolkit v0.6.1/go.mod h1:ZUBIj498ePooX9t/2xtDjeQYwvRpiPP2lh5u4iblj2g=
github.com/prometheus/procfs v0.0.0-20180125133057-cb4147076ac7/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
@ -1149,7 +1171,7 @@ github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFR
github.com/rogpeppe/go-internal v1.2.2/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
github.com/rs/cors v1.6.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU=
github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU=
github.com/rs/cors v1.8.0/go.mod h1:EBwu+T5AvHOcXwvZIkQFjUN6s8Czyqw12GL/Y0tUyRM=
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
@ -1209,6 +1231,7 @@ github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE=
github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980/go.mod h1:AO3tvPzVZ/ayst6UlUKUv6rcPQInYe3IknH3jYhAKu8=
github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8=
github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw=
github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw=
github.com/streadway/handy v0.0.0-20190108123426-d5acb3125c2a/go.mod h1:qNTQ5P5JnDBl6z3cMAg/SywNDC5ABu5ApDIw6lUbRmI=
@ -1247,6 +1270,8 @@ github.com/uber/jaeger-lib v2.2.0+incompatible/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6
github.com/uber/jaeger-lib v2.4.1+incompatible h1:td4jdvLcExb4cBISKIpHuGoVXh+dVKhn2Um6rjCsSsg=
github.com/uber/jaeger-lib v2.4.1+incompatible/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U=
github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc=
github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw=
github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY=
github.com/urfave/cli v0.0.0-20171014202726-7bc6a0acffa5/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
@ -1361,7 +1386,6 @@ golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPh
golang.org/x/crypto v0.0.0-20200728195943-123391ffb6de/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20201208171446-5f87f3452ae9/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
@ -1451,6 +1475,7 @@ golang.org/x/net v0.0.0-20200421231249-e086a090c8fd/go.mod h1:qpuaurCH72eLCgpAm/
golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200602114024-627f9648deb9/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
@ -1464,16 +1489,16 @@ golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb/go.mod h1:sp8m0HH+o8qH0wwXwY
golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20210224082022-3d97a244fca7/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc=
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
golang.org/x/net v0.0.0-20210421230115-4e50805a0758/go.mod h1:72T/g9IO56b78aLF+1Kcs5dz7/ng1VjMUvfKvpfy+jM=
golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20210520170846-37e1c6afe023/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20210614182718-04defd469f4e/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20210726213435-c6fcb2dbf985 h1:4CSI6oo7cOjJKajidEljs9h+uP0rRZBPPPhcCbj5mw8=
golang.org/x/net v0.0.0-20210726213435-c6fcb2dbf985/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20210903162142-ad29c8ab022f h1:w6wWR0H+nyVpbSAQbzVEIACVyr/h8l/BEkY6Sokc7Eg=
golang.org/x/net v0.0.0-20210903162142-ad29c8ab022f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
@ -1486,8 +1511,10 @@ golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ
golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914 h1:3B43BWw0xEBsLZ/NO1VALz6fppU3481pik+2Ksv45z8=
golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f h1:Qmd2pbz05z7z6lm0DrgQVVPuBm92jqujBKMHMOlOQEw=
golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
@ -1569,6 +1596,7 @@ golang.org/x/sys v0.0.0-20200420163511-1957bb5e6d1f/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200622214017-ed371f2e16b4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@ -1593,22 +1621,22 @@ golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210303074136-134d130e1a04/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210309074719-68d13333faf2/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210324051608-47abb6519492/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210420072515-93ed5bcd2bfe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210426230700-d19ff857e887/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c h1:F1jZWGFhYfh0Ci55sIpILtKKK8p3i2/krTr0H1rg74I=
golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210906170528-6f6e22806c34 h1:GkvMjFtXUmahfDtashnc1mnrCtuBVcwse5QV2lUk/tI=
golang.org/x/sys v0.0.0-20210906170528-6f6e22806c34/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d h1:SZxvLBoTP5yHO3Frd4z4vrF+DBX9vMVanchswa69toE=
@ -1751,8 +1779,10 @@ google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk
google.golang.org/api v0.47.0/go.mod h1:Wbvgpq1HddcWVtzsVLyfLp8lDg6AA241LmgIL59tHXo=
google.golang.org/api v0.48.0/go.mod h1:71Pr1vy+TAZRPkPs/xlCf5SsU8WjuAWv1Pfjbtukyy4=
google.golang.org/api v0.50.0/go.mod h1:4bNT5pAuq5ji4SRZm+5QIkjny9JAyVD/3gaSihNefaw=
google.golang.org/api v0.51.0 h1:SQaA2Cx57B+iPw2MBgyjEkoeMkRK2IenSGoia0U3lCk=
google.golang.org/api v0.51.0/go.mod h1:t4HdrdoNgyN5cbEfm7Lum0lcLDLiise1F8qDKX00sOU=
google.golang.org/api v0.54.0/go.mod h1:7C4bFFOvVDGXjfDTAsgGwDgAxRDeQ4X8NvUedIt6z3k=
google.golang.org/api v0.56.0 h1:08F9XVYTLOGeSQb3xI9C0gXMuQanhdGed0cWFhDozbI=
google.golang.org/api v0.56.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
@ -1799,6 +1829,7 @@ google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEY
google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA=
google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20201019141844-1ed22bb0c154/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20201110150050-8816d57aaa9a/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
@ -1815,8 +1846,14 @@ google.golang.org/genproto v0.0.0-20210604141403-392c879c8b08/go.mod h1:UODoCrxH
google.golang.org/genproto v0.0.0-20210608205507-b6d2f5bf0d7d/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0=
google.golang.org/genproto v0.0.0-20210624195500-8bfb893ecb84/go.mod h1:SzzZ/N+nwJDaO1kznhnlzqS8ocJICar6hYhVyhi++24=
google.golang.org/genproto v0.0.0-20210713002101-d411969a0d9a/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k=
google.golang.org/genproto v0.0.0-20210716133855-ce7ef5c701ea h1:8ZyCcgugUqamxp/vZSEJw9CMy7VZlSWYJLLJPi/dSDA=
google.golang.org/genproto v0.0.0-20210716133855-ce7ef5c701ea/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k=
google.golang.org/genproto v0.0.0-20210728212813-7823e685a01f/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48=
google.golang.org/genproto v0.0.0-20210805201207-89edb61ffb67/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48=
google.golang.org/genproto v0.0.0-20210813162853-db860fec028c/go.mod h1:cFeNkxwySK631ADgubI+/XFU/xp8FD5KIVV4rj8UC5w=
google.golang.org/genproto v0.0.0-20210821163610-241b8fcbd6c8/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
google.golang.org/genproto v0.0.0-20210828152312-66f60bf46e71/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
google.golang.org/genproto v0.0.0-20210903162649-d08c68adba83 h1:3V2dxSZpz4zozWWUq36vUxXEKnSYitEH2LdsAx+RUmg=
google.golang.org/genproto v0.0.0-20210903162649-d08c68adba83/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
google.golang.org/grpc v0.0.0-20160317175043-d3ddb4469d5a/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
@ -1847,8 +1884,10 @@ google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAG
google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
google.golang.org/grpc v1.37.1/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
google.golang.org/grpc v1.39.0 h1:Klz8I9kdtkIN6EpHHUOMLCYhTn/2WAe5a0s1hcBkdTI=
google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE=
google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE=
google.golang.org/grpc v1.40.0 h1:AGJ0Ih4mHjSeibYkFGh1dD9KJ/eOtZ93I6hoHhukQ5Q=
google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34=
google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
@ -1880,6 +1919,8 @@ gopkg.in/fsnotify/fsnotify.v1 v1.4.7 h1:XNNYLJHt73EyYiCZi6+xjupS9CpvmiDgjPTAjrBl
gopkg.in/fsnotify/fsnotify.v1 v1.4.7/go.mod h1:Fyux9zXlo4rWoMSIzpn9fDAYjalPqJ/K1qJ27s+7ltE=
gopkg.in/gcfg.v1 v1.2.3/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o=
gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2/go.mod h1:Xk6kEKp8OKb+X14hQBKWaSkCsqBpgog8nAV2xsGOxlo=
gopkg.in/go-playground/assert.v1 v1.2.1/go.mod h1:9RXL0bg/zibRAgZUYszZSwO/z8Y/a8bDuhia5mkpMnE=
gopkg.in/go-playground/validator.v9 v9.29.1/go.mod h1:+c9/zcJMFNgbLvly1L1V+PpxWdVbfP1avr/N00E2vyQ=
gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k=
@ -1923,14 +1964,14 @@ k8s.io/api v0.17.5/go.mod h1:0zV5/ungglgy2Rlm3QK8fbxkXVs+BSJWpJP/+8gUVLY=
k8s.io/api v0.20.1/go.mod h1:KqwcCVogGxQY3nBlRpwt+wpAMF/KjaCc7RpywacvqUo=
k8s.io/api v0.20.4/go.mod h1:++lNL1AJMkDymriNniQsWRkMDzRaX2Y/POTUi8yvqYQ=
k8s.io/api v0.20.6/go.mod h1:X9e8Qag6JV/bL5G6bU8sdVRltWKmdHsFUGS3eVndqE8=
k8s.io/api v0.21.3 h1:cblWILbLO8ar+Fj6xdDGr603HRsf8Wu9E9rngJeprZQ=
k8s.io/api v0.21.3/go.mod h1:hUgeYHUbBp23Ue4qdX9tR8/ANi/g3ehylAqDn9NWVOg=
k8s.io/api v0.22.1 h1:ISu3tD/jRhYfSW8jI/Q1e+lRxkR7w9UwQEZ7FgslrwY=
k8s.io/api v0.22.1/go.mod h1:bh13rkTp3F1XEaLGykbyRD2QaTTzPm0e/BMd8ptFONY=
k8s.io/apimachinery v0.17.5/go.mod h1:ioIo1G/a+uONV7Tv+ZmCbMG1/a3kVw5YcDdncd8ugQ0=
k8s.io/apimachinery v0.20.1/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU=
k8s.io/apimachinery v0.20.4/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU=
k8s.io/apimachinery v0.20.6/go.mod h1:ejZXtW1Ra6V1O5H8xPBGz+T3+4gfkTCeExAHKU57MAc=
k8s.io/apimachinery v0.21.3 h1:3Ju4nvjCngxxMYby0BimUk+pQHPOQp3eCGChk5kfVII=
k8s.io/apimachinery v0.21.3/go.mod h1:H/IM+5vH9kZRNJ4l3x/fXP/5bOPJaVP/guptnZPeCFI=
k8s.io/apimachinery v0.22.1 h1:DTARnyzmdHMz7bFWFDDm22AM4pLWTQECMpRTFu2d2OM=
k8s.io/apimachinery v0.22.1/go.mod h1:O3oNtNadZdeOMxHFVxOreoznohCpy0z6mocxbZr7oJ0=
k8s.io/apiserver v0.20.1/go.mod h1:ro5QHeQkgMS7ZGpvf4tSMx6bBOgPfE+f52KwvXfScaU=
k8s.io/apiserver v0.20.4/go.mod h1:Mc80thBKOyy7tbvFtB4kJv1kbdD0eIH8k8vianJcbFM=
k8s.io/apiserver v0.20.6/go.mod h1:QIJXNt6i6JB+0YQRNcS0hdRHJlMhflFmsBDeSgT1r8Q=
@ -1938,8 +1979,8 @@ k8s.io/client-go v0.17.5/go.mod h1:S8uZpBpjJJdEH/fEyxcqg7Rn0P5jH+ilkgBHjriSmNo=
k8s.io/client-go v0.20.1/go.mod h1:/zcHdt1TeWSd5HoUe6elJmHSQ6uLLgp4bIJHVEuy+/Y=
k8s.io/client-go v0.20.4/go.mod h1:LiMv25ND1gLUdBeYxBIwKpkSC5IsozMMmOOeSJboP+k=
k8s.io/client-go v0.20.6/go.mod h1:nNQMnOvEUEsOzRRFIIkdmYOjAZrC8bgq0ExboWSU1I0=
k8s.io/client-go v0.21.3 h1:J9nxZTOmvkInRDCzcSNQmPJbDYN/PjlxXT9Mos3HcLg=
k8s.io/client-go v0.21.3/go.mod h1:+VPhCgTsaFmGILxR/7E1N0S+ryO010QBeNCv5JwRGYU=
k8s.io/client-go v0.22.1 h1:jW0ZSHi8wW260FvcXHkIa0NLxFBQszTlhiAVsU5mopw=
k8s.io/client-go v0.22.1/go.mod h1:BquC5A4UOo4qVDUtoc04/+Nxp1MeHcVc1HJm1KmG8kk=
k8s.io/component-base v0.20.1/go.mod h1:guxkoJnNoh8LNrbtiQOlyp2Y2XFCZQmrcg2n/DeYNLk=
k8s.io/component-base v0.20.4/go.mod h1:t4p9EdiagbVCJKrQ1RsA5/V4rFQNDfRlevJajlGwgjI=
k8s.io/component-base v0.20.6/go.mod h1:6f1MPBAeI+mvuts3sIdtpjljHWBQ2cIy38oBIWMYnrM=
@ -1951,13 +1992,14 @@ k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8
k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
k8s.io/kube-openapi v0.0.0-20200316234421-82d701f24f9d/go.mod h1:F+5wygcW0wmRTnM3cOgIqGivxkwSWIWT5YdsDbeAOaU=
k8s.io/kube-openapi v0.0.0-20201113171705-d219536bb9fd/go.mod h1:WOJ3KddDSol4tAGcJo0Tvi+dK12EcqSLqcWsryKMpfM=
k8s.io/kube-openapi v0.0.0-20210305001622-591a79e4bda7 h1:vEx13qjvaZ4yfObSSXW7BrMc/KQBBT/Jyee8XtLf4x0=
k8s.io/kube-openapi v0.0.0-20210305001622-591a79e4bda7/go.mod h1:wXW5VT87nVfh/iLV8FpR2uDvrFyomxbtb1KivDbvPTE=
k8s.io/kube-openapi v0.0.0-20210421082810-95288971da7e h1:KLHHjkdQFomZy8+06csTWZ0m1343QqxZhR2LJ1OxCYM=
k8s.io/kube-openapi v0.0.0-20210421082810-95288971da7e/go.mod h1:vHXdDvt9+2spS2Rx9ql3I8tycm3H9FDfdUoIuKCefvw=
k8s.io/kubernetes v1.13.0/go.mod h1:ocZa8+6APFNC2tX1DZASIbocyYT5jHzqFVsY5aoB7Jk=
k8s.io/utils v0.0.0-20191114184206-e782cd3c129f/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew=
k8s.io/utils v0.0.0-20200414100711-2df71ebbae66/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
k8s.io/utils v0.0.0-20201110183641-67b214c5f920 h1:CbnUZsM497iRC5QMVkHwyl8s2tB3g7yaSHkYPkpgelw=
k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
k8s.io/utils v0.0.0-20210707171843-4b05e18ac7d9 h1:imL9YgXQ9p7xmPzHFm/vVd/cF78jad+n4wK1ABwYtMM=
k8s.io/utils v0.0.0-20210707171843-4b05e18ac7d9/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4=
rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=

View file

@ -634,7 +634,7 @@ type alertmanagerSet struct {
}
func newAlertmanagerSet(cfg *config.AlertmanagerConfig, logger log.Logger, metrics *alertMetrics) (*alertmanagerSet, error) {
client, err := config_util.NewClientFromConfig(cfg.HTTPClientConfig, "alertmanager", config_util.WithHTTP2Disabled())
client, err := config_util.NewClientFromConfig(cfg.HTTPClientConfig, "alertmanager")
if err != nil {
return nil, err
}

View file

@ -154,7 +154,7 @@ func TestHandlerSendAll(t *testing.T) {
Username: "prometheus",
Password: "testing_password",
},
}, "auth_alertmanager", config_util.WithHTTP2Disabled())
}, "auth_alertmanager")
h.alertmanagers = make(map[string]*alertmanagerSet)
@ -392,7 +392,7 @@ func TestHandlerQueuing(t *testing.T) {
require.NoError(t, err)
return
case <-time.After(5 * time.Second):
t.Fatalf("Alerts were not pushed")
require.FailNow(t, "Alerts were not pushed.")
}
}
}
@ -424,7 +424,7 @@ func TestHandlerQueuing(t *testing.T) {
case err := <-errc:
require.NoError(t, err)
case <-time.After(5 * time.Second):
t.Fatalf("Alerts were not pushed")
require.FailNow(t, "Alerts were not pushed.")
}
// Verify that we receive the last 3 batches.
@ -480,14 +480,12 @@ alerting:
alertmanagers:
- static_configs:
`
if err := yaml.UnmarshalStrict([]byte(s), cfg); err != nil {
t.Fatalf("Unable to load YAML config: %s", err)
}
err := yaml.UnmarshalStrict([]byte(s), cfg)
require.NoError(t, err, "Unable to load YAML config.")
require.Equal(t, 1, len(cfg.AlertingConfig.AlertmanagerConfigs))
if err := n.ApplyConfig(cfg); err != nil {
t.Fatalf("Error Applying the config:%v", err)
}
err = n.ApplyConfig(cfg)
require.NoError(t, err, "Error applying the config.")
tgs := make(map[string][]*targetgroup.Group)
for _, tt := range tests {
@ -534,14 +532,12 @@ alerting:
regex: 'alertmanager:9093'
action: drop
`
if err := yaml.UnmarshalStrict([]byte(s), cfg); err != nil {
t.Fatalf("Unable to load YAML config: %s", err)
}
err := yaml.UnmarshalStrict([]byte(s), cfg)
require.NoError(t, err, "Unable to load YAML config.")
require.Equal(t, 1, len(cfg.AlertingConfig.AlertmanagerConfigs))
if err := n.ApplyConfig(cfg); err != nil {
t.Fatalf("Error Applying the config:%v", err)
}
err = n.ApplyConfig(cfg)
require.NoError(t, err, "Error applying the config.")
tgs := make(map[string][]*targetgroup.Group)
for _, tt := range tests {

View file

@ -546,6 +546,46 @@ func TestLabels_Get(t *testing.T) {
require.Equal(t, "111", Labels{{"aaa", "111"}, {"bbb", "222"}}.Get("aaa"))
}
// BenchmarkLabels_Get was written to check whether a binary search can improve the performance vs the linear search implementation
// The results have shown that binary search would only be better when searching last labels in scenarios with more than 10 labels.
// In the following list, `old` is the linear search while `new` is the binary search implementaiton (without calling sort.Search, which performs even worse here)
// name old time/op new time/op delta
// Labels_Get/with_5_labels/get_first_label 5.12ns ± 0% 14.24ns ± 0% ~ (p=1.000 n=1+1)
// Labels_Get/with_5_labels/get_middle_label 13.5ns ± 0% 18.5ns ± 0% ~ (p=1.000 n=1+1)
// Labels_Get/with_5_labels/get_last_label 21.9ns ± 0% 18.9ns ± 0% ~ (p=1.000 n=1+1)
// Labels_Get/with_10_labels/get_first_label 5.11ns ± 0% 19.47ns ± 0% ~ (p=1.000 n=1+1)
// Labels_Get/with_10_labels/get_middle_label 26.2ns ± 0% 19.3ns ± 0% ~ (p=1.000 n=1+1)
// Labels_Get/with_10_labels/get_last_label 42.8ns ± 0% 23.4ns ± 0% ~ (p=1.000 n=1+1)
// Labels_Get/with_30_labels/get_first_label 5.10ns ± 0% 24.63ns ± 0% ~ (p=1.000 n=1+1)
// Labels_Get/with_30_labels/get_middle_label 75.8ns ± 0% 29.7ns ± 0% ~ (p=1.000 n=1+1)
// Labels_Get/with_30_labels/get_last_label 169ns ± 0% 29ns ± 0% ~ (p=1.000 n=1+1)
func BenchmarkLabels_Get(b *testing.B) {
maxLabels := 30
allLabels := make(Labels, maxLabels)
for i := 0; i < maxLabels; i++ {
allLabels[i] = Label{Name: strings.Repeat(string('a'+byte(i)), 5)}
}
for _, size := range []int{5, 10, maxLabels} {
b.Run(fmt.Sprintf("with %d labels", size), func(b *testing.B) {
labels := allLabels[:size]
for _, scenario := range []struct {
desc, label string
}{
{"get first label", labels[0].Name},
{"get middle label", labels[size/2].Name},
{"get last label", labels[size-1].Name},
} {
b.Run(scenario.desc, func(b *testing.B) {
b.ResetTimer()
for i := 0; i < b.N; i++ {
_ = labels.Get(scenario.label)
}
})
}
})
}
}
func TestLabels_Copy(t *testing.T) {
require.Equal(t, Labels{{"aaa", "111"}, {"bbb", "222"}}, Labels{{"aaa", "111"}, {"bbb", "222"}}.Copy())
}

View file

@ -28,17 +28,18 @@ const (
MatchNotRegexp
)
var matchTypeToStr = [...]string{
MatchEqual: "=",
MatchNotEqual: "!=",
MatchRegexp: "=~",
MatchNotRegexp: "!~",
}
func (m MatchType) String() string {
typeToStr := map[MatchType]string{
MatchEqual: "=",
MatchNotEqual: "!=",
MatchRegexp: "=~",
MatchNotRegexp: "!~",
if m < MatchEqual || m > MatchNotRegexp {
panic("unknown match type")
}
if str, ok := typeToStr[m]; ok {
return str
}
panic("unknown match type")
return matchTypeToStr[m]
}
// Matcher models the matching of a label.

View file

@ -117,3 +117,9 @@ func TestInverse(t *testing.T) {
require.Equal(t, test.expected.Type, result.Type)
}
}
func BenchmarkMatchType_String(b *testing.B) {
for i := 0; i <= b.N; i++ {
_ = MatchType(i % int(MatchNotRegexp+1)).String()
}
}

View file

@ -100,6 +100,9 @@ func (c *Config) UnmarshalYAML(unmarshal func(interface{}) error) error {
if c.Regex.Regexp == nil {
c.Regex = MustNewRegexp("")
}
if c.Action == "" {
return errors.Errorf("relabel action cannot be empty")
}
if c.Modulus == 0 && c.Action == HashMod {
return errors.Errorf("relabel configuration for hashmod requires non-zero modulus")
}

View file

@ -107,6 +107,7 @@ func (g *RuleGroups) Validate(node ruleGroups) (errs []error) {
type RuleGroup struct {
Name string `yaml:"name"`
Interval model.Duration `yaml:"interval,omitempty"`
Limit int `yaml:"limit,omitempty"`
Rules []RuleNode `yaml:"rules"`
}
@ -239,6 +240,7 @@ func testTemplateParsing(rl *RuleNode) (errs []error) {
model.Time(timestamp.FromTime(time.Now())),
nil,
nil,
nil,
)
return tmpl.ParseTest()
}

View file

@ -15,19 +15,14 @@ package rulefmt
import (
"path/filepath"
"strings"
"testing"
"github.com/stretchr/testify/require"
)
func TestParseFileSuccess(t *testing.T) {
if _, errs := ParseFile("testdata/test.yaml"); len(errs) > 0 {
t.Errorf("unexpected errors parsing file")
for _, err := range errs {
t.Error(err)
}
}
_, errs := ParseFile("testdata/test.yaml")
require.Empty(t, errs, "unexpected errors parsing file")
}
func TestParseFileFailure(t *testing.T) {
@ -79,15 +74,9 @@ func TestParseFileFailure(t *testing.T) {
for _, c := range table {
_, errs := ParseFile(filepath.Join("testdata", c.filename))
if errs == nil {
t.Errorf("Expected error parsing %s but got none", c.filename)
continue
}
if !strings.Contains(errs[0].Error(), c.errMsg) {
t.Errorf("Expected error for %s to contain %q but got: %s", c.filename, c.errMsg, errs)
}
require.NotNil(t, errs, "Expected error parsing %s but got none", c.filename)
require.Error(t, errs[0], c.errMsg, "Expected error for %s.", c.filename)
}
}
func TestTemplateParsing(t *testing.T) {

View file

@ -11,6 +11,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
//go:build !windows
// +build !windows
package runtime

View file

@ -11,6 +11,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
//go:build windows
// +build windows
package runtime

View file

@ -11,6 +11,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
//go:build openbsd || windows || netbsd || solaris
// +build openbsd windows netbsd solaris
package runtime

View file

@ -11,8 +11,8 @@
// See the License for the specific language governing permissions and
// limitations under the License.
// +build !windows,!openbsd,!netbsd,!solaris
// +build !386
//go:build !windows && !openbsd && !netbsd && !solaris && !386
// +build !windows,!openbsd,!netbsd,!solaris,!386
package runtime

View file

@ -11,6 +11,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
//go:build linux && 386
// +build linux,386
package runtime

View file

@ -11,6 +11,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
//go:build (386 && darwin) || (386 && freebsd)
// +build 386,darwin 386,freebsd
package runtime

View file

@ -11,6 +11,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
//go:build !linux
// +build !linux
package runtime

View file

@ -11,8 +11,8 @@
// See the License for the specific language governing permissions and
// limitations under the License.
// +build !windows
// +build !openbsd
//go:build !windows && !openbsd
// +build !windows,!openbsd
package runtime

View file

@ -11,6 +11,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
//go:build openbsd
// +build openbsd
package runtime

View file

@ -313,11 +313,10 @@ func (p *OpenMetricsParser) Next() (Entry, error) {
t2 := p.nextToken()
if t2 == tBraceOpen {
offsets, err := p.parseLVals()
p.offsets, err = p.parseLVals(p.offsets)
if err != nil {
return EntryInvalid, err
}
p.offsets = append(p.offsets, offsets...)
p.series = p.l.b[p.start:p.l.i]
t2 = p.nextToken()
}
@ -374,12 +373,12 @@ func (p *OpenMetricsParser) parseComment() error {
return err
}
var err error
// Parse the labels.
offsets, err := p.parseLVals()
p.eOffsets, err = p.parseLVals(p.eOffsets)
if err != nil {
return err
}
p.eOffsets = append(p.eOffsets, offsets...)
p.exemplar = p.l.b[p.start:p.l.i]
// Get the value.
@ -417,8 +416,7 @@ func (p *OpenMetricsParser) parseComment() error {
return nil
}
func (p *OpenMetricsParser) parseLVals() ([]int, error) {
var offsets []int
func (p *OpenMetricsParser) parseLVals(offsets []int) ([]int, error) {
first := true
for {
t := p.nextToken()

View file

@ -71,7 +71,7 @@ func BenchmarkRangeQuery(b *testing.B) {
a := storage.Appender(context.Background())
ts := int64(s * 10000) // 10s interval.
for i, metric := range metrics {
ref, _ := a.Append(refs[i], metric, ts, float64(s))
ref, _ := a.Append(refs[i], metric, ts, float64(s)+float64(i)/float64(len(metrics)))
refs[i] = ref
}
if err := a.Commit(); err != nil {
@ -130,6 +130,9 @@ func BenchmarkRangeQuery(b *testing.B) {
{
expr: "a_X unless b_X{l=~'.*[0-4]$'}",
},
{
expr: "a_X and b_X{l='notfound'}",
},
// Simple functions.
{
expr: "abs(a_X)",
@ -159,6 +162,9 @@ func BenchmarkRangeQuery(b *testing.B) {
{
expr: "count_values('value', h_X)",
},
{
expr: "topk(1, a_X)",
},
// Combinations.
{
expr: "rate(a_X[1m]) + rate(b_X[1m])",
@ -172,6 +178,10 @@ func BenchmarkRangeQuery(b *testing.B) {
{
expr: "histogram_quantile(0.9, rate(h_X[5m]))",
},
// Many-to-one join.
{
expr: "a_X + on(l) group_right a_one",
},
}
// X in an expr will be replaced by different metric sizes.

View file

@ -913,6 +913,8 @@ func (ev *evaluator) Eval(expr parser.Expr) (v parser.Value, ws storage.Warnings
type EvalSeriesHelper struct {
// The grouping key used by aggregation.
groupingKey uint64
// Used to map left-hand to right-hand in binary operations.
signature string
}
// EvalNodeHelper stores extra information and caches for evaluating a single node across steps.
@ -925,8 +927,6 @@ type EvalNodeHelper struct {
// Caches.
// DropMetricName and label_*.
Dmn map[uint64]labels.Labels
// signatureFunc.
sigf map[string]string
// funcHistogramQuantile.
signatureToMetricWithBuckets map[string]*metricWithBuckets
// label_replace.
@ -957,23 +957,6 @@ func (enh *EvalNodeHelper) DropMetricName(l labels.Labels) labels.Labels {
return ret
}
func (enh *EvalNodeHelper) signatureFunc(on bool, names ...string) func(labels.Labels) string {
if enh.sigf == nil {
enh.sigf = make(map[string]string, len(enh.Out))
}
f := signatureFunc(on, enh.lblBuf, names...)
return func(l labels.Labels) string {
enh.lblBuf = l.Bytes(enh.lblBuf)
ret, ok := enh.sigf[string(enh.lblBuf)]
if ok {
return ret
}
ret = f(l)
enh.sigf[string(enh.lblBuf)] = ret
return ret
}
}
// rangeEval evaluates the given expressions, and then for each step calls
// the given funcCall with the values computed for each expression at that
// step. The return value is the combination into time series of all the
@ -1432,22 +1415,28 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, storage.Warnings) {
return append(enh.Out, Sample{Point: Point{V: val}}), nil
}, e.LHS, e.RHS)
case lt == parser.ValueTypeVector && rt == parser.ValueTypeVector:
// Function to compute the join signature for each series.
buf := make([]byte, 0, 1024)
sigf := signatureFunc(e.VectorMatching.On, buf, e.VectorMatching.MatchingLabels...)
initSignatures := func(series labels.Labels, h *EvalSeriesHelper) {
h.signature = sigf(series)
}
switch e.Op {
case parser.LAND:
return ev.rangeEval(nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, storage.Warnings) {
return ev.VectorAnd(v[0].(Vector), v[1].(Vector), e.VectorMatching, enh), nil
return ev.rangeEval(initSignatures, func(v []parser.Value, sh [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, storage.Warnings) {
return ev.VectorAnd(v[0].(Vector), v[1].(Vector), e.VectorMatching, sh[0], sh[1], enh), nil
}, e.LHS, e.RHS)
case parser.LOR:
return ev.rangeEval(nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, storage.Warnings) {
return ev.VectorOr(v[0].(Vector), v[1].(Vector), e.VectorMatching, enh), nil
return ev.rangeEval(initSignatures, func(v []parser.Value, sh [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, storage.Warnings) {
return ev.VectorOr(v[0].(Vector), v[1].(Vector), e.VectorMatching, sh[0], sh[1], enh), nil
}, e.LHS, e.RHS)
case parser.LUNLESS:
return ev.rangeEval(nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, storage.Warnings) {
return ev.VectorUnless(v[0].(Vector), v[1].(Vector), e.VectorMatching, enh), nil
return ev.rangeEval(initSignatures, func(v []parser.Value, sh [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, storage.Warnings) {
return ev.VectorUnless(v[0].(Vector), v[1].(Vector), e.VectorMatching, sh[0], sh[1], enh), nil
}, e.LHS, e.RHS)
default:
return ev.rangeEval(nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, storage.Warnings) {
return ev.VectorBinop(e.Op, v[0].(Vector), v[1].(Vector), e.VectorMatching, e.ReturnBool, enh), nil
return ev.rangeEval(initSignatures, func(v []parser.Value, sh [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, storage.Warnings) {
return ev.VectorBinop(e.Op, v[0].(Vector), v[1].(Vector), e.VectorMatching, e.ReturnBool, sh[0], sh[1], enh), nil
}, e.LHS, e.RHS)
}
@ -1774,62 +1763,72 @@ func (ev *evaluator) matrixIterSlice(it *storage.BufferedSeriesIterator, mint, m
return out
}
func (ev *evaluator) VectorAnd(lhs, rhs Vector, matching *parser.VectorMatching, enh *EvalNodeHelper) Vector {
func (ev *evaluator) VectorAnd(lhs, rhs Vector, matching *parser.VectorMatching, lhsh, rhsh []EvalSeriesHelper, enh *EvalNodeHelper) Vector {
if matching.Card != parser.CardManyToMany {
panic("set operations must only use many-to-many matching")
}
sigf := enh.signatureFunc(matching.On, matching.MatchingLabels...)
if len(lhs) == 0 || len(rhs) == 0 {
return nil // Short-circuit: AND with nothing is nothing.
}
// The set of signatures for the right-hand side Vector.
rightSigs := map[string]struct{}{}
// Add all rhs samples to a map so we can easily find matches later.
for _, rs := range rhs {
rightSigs[sigf(rs.Metric)] = struct{}{}
for _, sh := range rhsh {
rightSigs[sh.signature] = struct{}{}
}
for _, ls := range lhs {
for i, ls := range lhs {
// If there's a matching entry in the right-hand side Vector, add the sample.
if _, ok := rightSigs[sigf(ls.Metric)]; ok {
if _, ok := rightSigs[lhsh[i].signature]; ok {
enh.Out = append(enh.Out, ls)
}
}
return enh.Out
}
func (ev *evaluator) VectorOr(lhs, rhs Vector, matching *parser.VectorMatching, enh *EvalNodeHelper) Vector {
func (ev *evaluator) VectorOr(lhs, rhs Vector, matching *parser.VectorMatching, lhsh, rhsh []EvalSeriesHelper, enh *EvalNodeHelper) Vector {
if matching.Card != parser.CardManyToMany {
panic("set operations must only use many-to-many matching")
}
sigf := enh.signatureFunc(matching.On, matching.MatchingLabels...)
if len(lhs) == 0 { // Short-circuit.
return rhs
} else if len(rhs) == 0 {
return lhs
}
leftSigs := map[string]struct{}{}
// Add everything from the left-hand-side Vector.
for _, ls := range lhs {
leftSigs[sigf(ls.Metric)] = struct{}{}
for i, ls := range lhs {
leftSigs[lhsh[i].signature] = struct{}{}
enh.Out = append(enh.Out, ls)
}
// Add all right-hand side elements which have not been added from the left-hand side.
for _, rs := range rhs {
if _, ok := leftSigs[sigf(rs.Metric)]; !ok {
for j, rs := range rhs {
if _, ok := leftSigs[rhsh[j].signature]; !ok {
enh.Out = append(enh.Out, rs)
}
}
return enh.Out
}
func (ev *evaluator) VectorUnless(lhs, rhs Vector, matching *parser.VectorMatching, enh *EvalNodeHelper) Vector {
func (ev *evaluator) VectorUnless(lhs, rhs Vector, matching *parser.VectorMatching, lhsh, rhsh []EvalSeriesHelper, enh *EvalNodeHelper) Vector {
if matching.Card != parser.CardManyToMany {
panic("set operations must only use many-to-many matching")
}
sigf := enh.signatureFunc(matching.On, matching.MatchingLabels...)
rightSigs := map[string]struct{}{}
for _, rs := range rhs {
rightSigs[sigf(rs.Metric)] = struct{}{}
// Short-circuit: empty rhs means we will return everything in lhs;
// empty lhs means we will return empty - don't need to build a map.
if len(lhs) == 0 || len(rhs) == 0 {
return lhs
}
for _, ls := range lhs {
if _, ok := rightSigs[sigf(ls.Metric)]; !ok {
rightSigs := map[string]struct{}{}
for _, sh := range rhsh {
rightSigs[sh.signature] = struct{}{}
}
for i, ls := range lhs {
if _, ok := rightSigs[lhsh[i].signature]; !ok {
enh.Out = append(enh.Out, ls)
}
}
@ -1837,17 +1836,20 @@ func (ev *evaluator) VectorUnless(lhs, rhs Vector, matching *parser.VectorMatchi
}
// VectorBinop evaluates a binary operation between two Vectors, excluding set operators.
func (ev *evaluator) VectorBinop(op parser.ItemType, lhs, rhs Vector, matching *parser.VectorMatching, returnBool bool, enh *EvalNodeHelper) Vector {
func (ev *evaluator) VectorBinop(op parser.ItemType, lhs, rhs Vector, matching *parser.VectorMatching, returnBool bool, lhsh, rhsh []EvalSeriesHelper, enh *EvalNodeHelper) Vector {
if matching.Card == parser.CardManyToMany {
panic("many-to-many only allowed for set operators")
}
sigf := enh.signatureFunc(matching.On, matching.MatchingLabels...)
if len(lhs) == 0 || len(rhs) == 0 {
return nil // Short-circuit: nothing is going to match.
}
// The control flow below handles one-to-one or many-to-one matching.
// For one-to-many, swap sidedness and account for the swap when calculating
// values.
if matching.Card == parser.CardOneToMany {
lhs, rhs = rhs, lhs
lhsh, rhsh = rhsh, lhsh
}
// All samples from the rhs hashed by the matching label/values.
@ -1861,8 +1863,8 @@ func (ev *evaluator) VectorBinop(op parser.ItemType, lhs, rhs Vector, matching *
rightSigs := enh.rightSigs
// Add all rhs samples to a map so we can easily find matches later.
for _, rs := range rhs {
sig := sigf(rs.Metric)
for i, rs := range rhs {
sig := rhsh[i].signature
// The rhs is guaranteed to be the 'one' side. Having multiple samples
// with the same signature means that the matching is many-to-many.
if duplSample, found := rightSigs[sig]; found {
@ -1892,8 +1894,8 @@ func (ev *evaluator) VectorBinop(op parser.ItemType, lhs, rhs Vector, matching *
// For all lhs samples find a respective rhs sample and perform
// the binary operation.
for _, ls := range lhs {
sig := sigf(ls.Metric)
for i, ls := range lhs {
sig := lhsh[i].signature
rs, found := rightSigs[sig] // Look for a match in the rhs Vector.
if !found {
@ -2114,6 +2116,8 @@ func vectorElemBinop(op parser.ItemType, lhs, rhs float64) (float64, bool) {
return lhs, lhs >= rhs
case parser.LTE:
return lhs, lhs <= rhs
case parser.ATAN2:
return math.Atan2(lhs, rhs), true
}
panic(errors.Errorf("operator %q not allowed for operations between Vectors", op))
}
@ -2210,22 +2214,24 @@ func (ev *evaluator) aggregation(op parser.ItemType, grouping []string, without
resultSize := k
if k > inputVecLen {
resultSize = inputVecLen
} else if k == 0 {
resultSize = 1
}
switch op {
case parser.STDVAR, parser.STDDEV:
result[groupingKey].value = 0
case parser.TOPK, parser.QUANTILE:
result[groupingKey].heap = make(vectorByValueHeap, 0, resultSize)
heap.Push(&result[groupingKey].heap, &Sample{
result[groupingKey].heap = make(vectorByValueHeap, 1, resultSize)
result[groupingKey].heap[0] = Sample{
Point: Point{V: s.V},
Metric: s.Metric,
})
}
case parser.BOTTOMK:
result[groupingKey].reverseHeap = make(vectorByReverseValueHeap, 0, resultSize)
heap.Push(&result[groupingKey].reverseHeap, &Sample{
result[groupingKey].reverseHeap = make(vectorByReverseValueHeap, 1, resultSize)
result[groupingKey].reverseHeap[0] = Sample{
Point: Point{V: s.V},
Metric: s.Metric,
})
}
case parser.GROUP:
result[groupingKey].value = 1
}
@ -2283,6 +2289,13 @@ func (ev *evaluator) aggregation(op parser.ItemType, grouping []string, without
case parser.TOPK:
if int64(len(group.heap)) < k || group.heap[0].V < s.V || math.IsNaN(group.heap[0].V) {
if int64(len(group.heap)) == k {
if k == 1 { // For k==1 we can replace in-situ.
group.heap[0] = Sample{
Point: Point{V: s.V},
Metric: s.Metric,
}
break
}
heap.Pop(&group.heap)
}
heap.Push(&group.heap, &Sample{
@ -2294,6 +2307,13 @@ func (ev *evaluator) aggregation(op parser.ItemType, grouping []string, without
case parser.BOTTOMK:
if int64(len(group.reverseHeap)) < k || group.reverseHeap[0].V > s.V || math.IsNaN(group.reverseHeap[0].V) {
if int64(len(group.reverseHeap)) == k {
if k == 1 { // For k==1 we can replace in-situ.
group.reverseHeap[0] = Sample{
Point: Point{V: s.V},
Metric: s.Metric,
}
break
}
heap.Pop(&group.reverseHeap)
}
heap.Push(&group.reverseHeap, &Sample{
@ -2327,7 +2347,9 @@ func (ev *evaluator) aggregation(op parser.ItemType, grouping []string, without
case parser.TOPK:
// The heap keeps the lowest value on top, so reverse it.
sort.Sort(sort.Reverse(aggr.heap))
if len(aggr.heap) > 1 {
sort.Sort(sort.Reverse(aggr.heap))
}
for _, v := range aggr.heap {
enh.Out = append(enh.Out, Sample{
Metric: v.Metric,
@ -2338,7 +2360,9 @@ func (ev *evaluator) aggregation(op parser.ItemType, grouping []string, without
case parser.BOTTOMK:
// The heap keeps the highest value on top, so reverse it.
sort.Sort(sort.Reverse(aggr.reverseHeap))
if len(aggr.reverseHeap) > 1 {
sort.Sort(sort.Reverse(aggr.reverseHeap))
}
for _, v := range aggr.reverseHeap {
enh.Out = append(enh.Out, Sample{
Metric: v.Metric,

View file

@ -72,7 +72,7 @@ func TestQueryConcurrency(t *testing.T) {
case <-processing:
// Expected.
case <-time.After(20 * time.Millisecond):
t.Fatalf("Query within concurrency threshold not being executed")
require.Fail(t, "Query within concurrency threshold not being executed")
}
}
@ -81,7 +81,7 @@ func TestQueryConcurrency(t *testing.T) {
select {
case <-processing:
t.Fatalf("Query above concurrency threshold being executed")
require.Fail(t, "Query above concurrency threshold being executed")
case <-time.After(20 * time.Millisecond):
// Expected.
}
@ -93,7 +93,7 @@ func TestQueryConcurrency(t *testing.T) {
case <-processing:
// Expected.
case <-time.After(20 * time.Millisecond):
t.Fatalf("Query within concurrency threshold not being executed")
require.Fail(t, "Query within concurrency threshold not being executed")
}
// Terminate remaining queries.
@ -604,7 +604,7 @@ func TestEngineShutdown(t *testing.T) {
require.Equal(t, errQueryCanceled, res.Err)
query2 := engine.newTestQuery(func(context.Context) error {
t.Fatalf("reached query execution unexpectedly")
require.FailNow(t, "reached query execution unexpectedly")
return nil
})
@ -1121,9 +1121,7 @@ func TestRecoverEvaluatorRuntime(t *testing.T) {
//nolint:govet
a[123] = 1
if err.Error() != "unexpected error" {
t.Fatalf("wrong error message: %q, expected %q", err, "unexpected error")
}
require.EqualError(t, err, "unexpected error")
}
func TestRecoverEvaluatorError(t *testing.T) {
@ -1133,9 +1131,7 @@ func TestRecoverEvaluatorError(t *testing.T) {
e := errors.New("custom error")
defer func() {
if err.Error() != e.Error() {
t.Fatalf("wrong error message: %q, expected %q", err, e)
}
require.EqualError(t, err, e.Error())
}()
defer ev.recover(nil, &err)
@ -1154,12 +1150,8 @@ func TestRecoverEvaluatorErrorWithWarnings(t *testing.T) {
}
defer func() {
if err.Error() != e.Error() {
t.Fatalf("wrong error message: %q, expected %q", err, e)
}
if len(ws) != len(warnings) && ws[0] != warnings[0] {
t.Fatalf("wrong warning message: %q, expected %q", ws[0], warnings[0])
}
require.EqualError(t, err, e.Error())
require.Equal(t, warnings, ws, "wrong warning message")
}()
defer ev.recover(&ws, &err)

Some files were not shown because too many files have changed in this diff Show more