merge master, fix conflicts

This commit is contained in:
jessicagreben 2020-10-17 08:20:21 -07:00
commit 36ac0b68f1
1268 changed files with 95726 additions and 61080 deletions

View file

@ -11,7 +11,7 @@ executors:
# should also be updated.
golang:
docker:
- image: circleci/golang:1.14-node
- image: circleci/golang:1.15-node
fuzzit:
docker:
@ -27,8 +27,8 @@ jobs:
key: v1
- restore_cache:
keys:
- v1-npm-deps-{{ checksum "web/ui/react-app/yarn.lock" }}
- v1-npm-deps-
- v3-npm-deps-{{ checksum "web/ui/react-app/yarn.lock" }}
- v3-npm-deps-
- run:
command: make
environment:
@ -41,7 +41,7 @@ jobs:
GOOPTS: "-p 2"
GOMAXPROCS: "2"
- prometheus/check_proto:
version: "3.11.4"
version: "3.12.3"
- prometheus/store_artifact:
file: prometheus
- prometheus/store_artifact:
@ -49,11 +49,12 @@ jobs:
- go/save-cache:
key: v1
- save_cache:
key: v1-npm-deps-{{ checksum "web/ui/react-app/yarn.lock" }}
key: v3-npm-deps-{{ checksum "web/ui/react-app/yarn.lock" }}
paths:
- web/ui/react-app/node_modules
- /home/circleci/.cache/yarn
- store_test_results:
path: test-results
test_windows:
executor: win/default
working_directory: /go/src/github.com/prometheus/prometheus
@ -72,6 +73,28 @@ jobs:
environment:
GOGC: "20"
GOOPTS: "-p 2 -mod=vendor"
test_mixins:
executor: golang
steps:
- checkout
- run: go install -mod=vendor ./cmd/promtool/.
- run:
command: go install -mod=readonly github.com/google/go-jsonnet/cmd/jsonnet github.com/google/go-jsonnet/cmd/jsonnetfmt github.com/jsonnet-bundler/jsonnet-bundler/cmd/jb
working_directory: ~/project/documentation/prometheus-mixin
- run:
command: make clean
working_directory: ~/project/documentation/prometheus-mixin
- run:
command: jb install
working_directory: ~/project/documentation/prometheus-mixin
- run:
command: make
working_directory: ~/project/documentation/prometheus-mixin
- run:
command: git diff --exit-code
working_directory: ~/project/documentation/prometheus-mixin
fuzzit_regression:
executor: fuzzit
working_directory: /go/src/github.com/prometheus/prometheus
@ -79,6 +102,7 @@ jobs:
- checkout
- setup_remote_docker
- run: ./fuzzit.sh local-regression
fuzzit_fuzzing:
executor: fuzzit
working_directory: /go/src/github.com/prometheus/prometheus
@ -101,6 +125,10 @@ workflows:
filters:
tags:
only: /.*/
- test_mixins:
filters:
tags:
only: /.*/
- test_windows:
filters:
tags:

View file

@ -5,4 +5,5 @@ data/
!.build/linux-amd64/
!.build/linux-armv7/
!.build/linux-arm64/
!.build/linux-ppc64le/
!.build/linux-s390x/

View file

@ -2,7 +2,6 @@
name: Bug report
about: Create a report to help us improve.
title: ''
labels: kind/bug
assignees: ''
---

View file

@ -1,4 +1,6 @@
on: repository_dispatch
on:
repository_dispatch:
types: [funcbench_start]
name: Funcbench Workflow
jobs:
run_funcbench:
@ -6,16 +8,18 @@ jobs:
if: github.event.action == 'funcbench_start'
runs-on: ubuntu-latest
env:
AUTH_FILE: ${{ secrets.TEST_INFRA_GKE_AUTH }}
AUTH_FILE: ${{ secrets.TEST_INFRA_PROVIDER_AUTH }}
BRANCH: ${{ github.event.client_payload.BRANCH }}
BENCH_FUNC_REGEX: ${{ github.event.client_payload.BENCH_FUNC_REGEX }}
PACKAGE_PATH: ${{ github.event.client_payload.PACKAGE_PATH }}
GITHUB_TOKEN: ${{ secrets.PROMBOT_TOKEN }}
GITHUB_ORG: prometheus
GITHUB_REPO: prometheus
GITHUB_STATUS_TARGET_URL: https://github.com/${{github.repository}}/actions/runs/${{github.run_id}}
LAST_COMMIT_SHA: ${{ github.event.client_payload.LAST_COMMIT_SHA }}
PROJECT_ID: macro-mile-203600
GKE_PROJECT_ID: macro-mile-203600
PR_NUMBER: ${{ github.event.client_payload.PR_NUMBER }}
PROVIDER: gke
ZONE: europe-west3-a
steps:
- name: Update status to pending

View file

@ -1,7 +1,9 @@
on: repository_dispatch
on:
repository_dispatch:
types: [prombench_start,prombench_restart,prombench_stop]
name: Prombench Workflow
env:
AUTH_FILE: ${{ secrets.TEST_INFRA_GKE_AUTH }}
AUTH_FILE: ${{ secrets.TEST_INFRA_PROVIDER_AUTH }}
CLUSTER_NAME: test-infra
DOMAIN_NAME: prombench.prometheus.io
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
@ -9,8 +11,9 @@ env:
GITHUB_REPO: prometheus
GITHUB_STATUS_TARGET_URL: https://github.com/${{github.repository}}/actions/runs/${{github.run_id}}
LAST_COMMIT_SHA: ${{ github.event.client_payload.LAST_COMMIT_SHA }}
PROJECT_ID: macro-mile-203600
GKE_PROJECT_ID: macro-mile-203600
PR_NUMBER: ${{ github.event.client_payload.PR_NUMBER }}
PROVIDER: gke
RELEASE: ${{ github.event.client_payload.RELEASE }}
ZONE: europe-west3-a
jobs:
@ -31,7 +34,7 @@ jobs:
uses: docker://prominfra/prombench:master
with:
args: >-
until make all_nodepools_deleted; do echo "waiting for nodepools to be deleted"; sleep 10; done;
until make all_nodes_deleted; do echo "waiting for nodepools to be deleted"; sleep 10; done;
make deploy;
- name: Update status to failure
if: failure()
@ -66,7 +69,7 @@ jobs:
uses: docker://prominfra/prombench:master
with:
args: >-
until make all_nodepools_running; do echo "waiting for nodepools to be created"; sleep 10; done;
until make all_nodes_running; do echo "waiting for nodepools to be created"; sleep 10; done;
make clean;
- name: Update status to failure
if: failure()
@ -101,9 +104,9 @@ jobs:
uses: docker://prominfra/prombench:master
with:
args: >-
until make all_nodepools_running; do echo "waiting for nodepools to be created"; sleep 10; done;
until make all_nodes_running; do echo "waiting for nodepools to be created"; sleep 10; done;
make clean;
until make all_nodepools_deleted; do echo "waiting for nodepools to be deleted"; sleep 10; done;
until make all_nodes_deleted; do echo "waiting for nodepools to be deleted"; sleep 10; done;
make deploy;
- name: Update status to failure
if: failure()

18
.gitpod.yml Normal file
View file

@ -0,0 +1,18 @@
tasks:
- init:
make build
command: |
gp sync-done build
./prometheus --config.file=documentation/examples/prometheus.yml
- command: |
cd web/ui/react-app
gp sync-await build
unset BROWSER
export DANGEROUSLY_DISABLE_HOST_CHECK=true
yarn start
openMode: split-right
ports:
- port: 3000
onOpen: open-preview
- port: 9090
onOpen: ignore

View file

@ -4,6 +4,7 @@ run:
linters:
enable:
- depguard
- golint
issues:
@ -13,5 +14,12 @@ issues:
- errcheck
linters-settings:
depguard:
list-type: blacklist
include-go-root: true
packages:
- sync/atomic
packages-with-error-message:
- sync/atomic: "Use go.uber.org/atomic instead of sync/atomic"
errcheck:
exclude: scripts/errcheck_excludes.txt

View file

@ -1,7 +1,7 @@
go:
# Whenever the Go version is updated here,
# .circle/config.yml should also be updated.
version: 1.14
version: 1.15
repository:
path: github.com/prometheus/prometheus
build:
@ -30,7 +30,6 @@ crossbuild:
- linux/amd64
- linux/386
- darwin/amd64
- darwin/386
- windows/amd64
- windows/386
- freebsd/amd64

View file

@ -1,3 +1,65 @@
## 2.22.0-rc.0 / 2020-10-07
As announced in the 2.21.0 release notes, the experimental gRPC API v2 has been
removed.
* [CHANGE] web: Remove APIv2. #7935
* [ENHANCEMENT] React UI: Implement missing TSDB head stats section. #7876
* [ENHANCEMENT] UI: Add Collapse all button. #6957
* [ENHANCEMENT] UI: Clarify alert state toggle via checkbox icon. #7936
* [ENHANCEMENT] Add `rule_group_last_evaluation_samples` and `prometheus_tsdb_data_replay_duration_seconds` metrics. #7737 #7977
* [ENHANCEMENT] Gracefully handle unknown WAL record types. #8004
* [ENHANCEMENT] Issue a warning for 64 bit systems running 32 bit binaries. #8012
* [BUGFIX] Adjust scrape timestamps to align them to the intended schedule, effectively reducing block size. Workaround for a regression in go1.14+. #7976
* [BUGFIX] promtool: Ensure alert rules are marked as restored in unit tests. #7661
* [BUGFIX] Eureka: Fix service discovery when compiled in 32-bit. #7964
* [BUGFIX] Don't do literal regex matching optimisation when case insensitive. #8013
* [BUGFIX] Fix classic UI sometimes running queries for instant query when in range query mode. #7984
## 2.21.0 / 2020-09-11
This release is built with Go 1.15, which deprecates [X.509 CommonName](https://golang.org/doc/go1.15#commonname)
in TLS certificates validation.
In the unlikely case that you use the gRPC API v2 (which is limited to TSDB
admin commands), please note that we will remove this experimental API in the
next minor release 2.22.
* [CHANGE] Disable HTTP/2 because of concerns with the Go HTTP/2 client. #7588 #7701
* [CHANGE] PromQL: `query_log_file` path is now relative to the config file. #7701
* [CHANGE] Promtool: Replace the tsdb command line tool by a promtool tsdb subcommand. #6088
* [CHANGE] Rules: Label `rule_group_iterations` metric with group name. #7823
* [FEATURE] Eureka SD: New service discovery. #3369
* [FEATURE] Hetzner SD: New service discovery. #7822
* [FEATURE] Kubernetes SD: Support Kubernetes EndpointSlices. #6838
* [FEATURE] Scrape: Add per scrape-config targets limit. #7554
* [ENHANCEMENT] Support composite durations in PromQL, config and UI, e.g. 1h30m. #7713 #7833
* [ENHANCEMENT] DNS SD: Add SRV record target and port meta labels. #7678
* [ENHANCEMENT] Docker Swarm SD: Support tasks and service without published ports. #7686
* [ENHANCEMENT] PromQL: Reduce the amount of data queried by remote read when a subquery has an offset. #7667
* [ENHANCEMENT] Promtool: Add `--time` option to query instant command. #7829
* [ENHANCEMENT] UI: Respect the `--web.page-title` parameter in the React UI. #7607
* [ENHANCEMENT] UI: Add duration, labels, annotations to alerts page in the React UI. #7605
* [ENHANCEMENT] UI: Add duration on the React UI rules page, hide annotation and labels if empty. #7606
* [BUGFIX] API: Deduplicate series in /api/v1/series. #7862
* [BUGFIX] PromQL: Drop metric name in bool comparison between two instant vectors. #7819
* [BUGFIX] PromQL: Exit with an error when time parameters can't be parsed. #7505
* [BUGFIX] Remote read: Re-add accidentally removed tracing for remote-read requests. #7916
* [BUGFIX] Rules: Detect extra fields in rule files. #7767
* [BUGFIX] Rules: Disallow overwriting the metric name in the `labels` section of recording rules. #7787
* [BUGFIX] Rules: Keep evaluation timestamp across reloads. #7775
* [BUGFIX] Scrape: Do not stop scrapes in progress during reload. #7752
* [BUGFIX] TSDB: Fix `chunks.HeadReadWriter: maxt of the files are not set` error. #7856
* [BUGFIX] TSDB: Delete blocks atomically to prevent corruption when there is a panic/crash during deletion. #7772
* [BUGFIX] Triton SD: Fix a panic when triton_sd_config is nil. #7671
* [BUGFIX] UI: Fix react UI bug with series going on and off. #7804
* [BUGFIX] UI: Fix styling bug for target labels with special names in React UI. #7902
* [BUGFIX] Web: Stop CMUX and GRPC servers even with stale connections, preventing the server to stop on SIGTERM. #7810
## 2.20.1 / 2020-08-05
* [BUGFIX] SD: Reduce the Consul watch timeout to 2m and adjust the request timeout accordingly. #7724
## 2.20.0 / 2020-07-22
This release changes WAL compression from opt-in to default. WAL compression will prevent a downgrade to v2.10 or earlier without deleting the WAL. Disable WAL compression explicitly by setting the command line flag `--no-storage.tsdb.wal-compression` if you require downgrading to v2.10 or earlier.

View file

@ -28,6 +28,8 @@ Should you wish to work on an issue, please claim it first by commenting on the
Please check the [`low-hanging-fruit`](https://github.com/prometheus/prometheus/issues?q=is%3Aissue+is%3Aopen+label%3A%22low+hanging+fruit%22) label to find issues that are good for getting started. If you have questions about one of the issues, with or without the tag, please comment on them and one of the maintainers will clarify it. For a quicker response, contact us over [IRC](https://prometheus.io/community).
You can [spin up a prebuilt dev environment](https://gitpod.io/#https://github.com/prometheus/prometheus) using Gitpod.io.
For complete instructions on how to compile see: [Building From Source](https://github.com/prometheus/prometheus#building-from-source)
For quickly compiling and testing your changes do:
@ -56,7 +58,7 @@ All our issues are regularly tagged so that you can also filter down the issues
## Dependency management
The Prometheus project uses [Go modules](https://golang.org/cmd/go/#hdr-Modules__module_versions__and_more) to manage dependencies on external packages. This requires a working Go environment with version 1.13 or greater installed.
The Prometheus project uses [Go modules](https://golang.org/cmd/go/#hdr-Modules__module_versions__and_more) to manage dependencies on external packages.
All dependencies are vendored in the `vendor/` directory.

View file

@ -8,7 +8,7 @@
* `prometheus-mixin`: @beorn7
* `storage`
* `remote`: @csmarchbanks, @cstyan, @bwplotka
* `tsdb`: @codesome, @krasi-georgiev
* `tsdb`: @codesome, @krasi-georgiev, @bwplotka
* `web`
* `ui`: @juliusv
* `Makefile` and related build configuration: @simonpasquier, @SuperQ

View file

@ -12,7 +12,7 @@
# limitations under the License.
# Needs to be defined before including Makefile.common to auto-generate targets
DOCKER_ARCHS ?= amd64 armv7 arm64 s390x
DOCKER_ARCHS ?= amd64 armv7 arm64 ppc64le s390x
REACT_APP_PATH = web/ui/react-app
REACT_APP_SOURCE_FILES = $(wildcard $(REACT_APP_PATH)/public/* $(REACT_APP_PATH)/src/* $(REACT_APP_PATH)/tsconfig.json)

View file

@ -78,7 +78,7 @@ ifneq ($(shell which gotestsum),)
endif
endif
PROMU_VERSION ?= 0.5.0
PROMU_VERSION ?= 0.6.0
PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_VERSION)/promu-$(PROMU_VERSION).$(GO_BUILD_PLATFORM).tar.gz
GOLANGCI_LINT :=

View file

@ -6,6 +6,7 @@
[![Go Report Card](https://goreportcard.com/badge/github.com/prometheus/prometheus)](https://goreportcard.com/report/github.com/prometheus/prometheus)
[![CII Best Practices](https://bestpractices.coreinfrastructure.org/projects/486/badge)](https://bestpractices.coreinfrastructure.org/projects/486)
[![fuzzit](https://app.fuzzit.dev/badge?org_id=prometheus&branch=master)](https://fuzzit.dev)
[![Gitpod ready-to-code](https://img.shields.io/badge/Gitpod-ready--to--code-blue?logo=gitpod)](https://gitpod.io/#https://github.com/prometheus/prometheus)
Visit [prometheus.io](https://prometheus.io) for the full documentation,
examples and guides.
@ -18,7 +19,7 @@ to be true.
Prometheus's main distinguishing features as compared to other monitoring systems are:
- a **multi-dimensional** data model (timeseries defined by metric name and set of key/value dimensions)
- a **flexible query language** to leverage this dimensionality
- PromQL, a **powerful and flexible query language** to leverage this dimensionality
- no dependency on distributed storage; **single server nodes are autonomous**
- timeseries collection happens via a **pull model** over HTTP
- **pushing timeseries** is supported via an intermediary gateway
@ -43,8 +44,6 @@ is the recommended way of installing Prometheus.
See the [Installing](https://prometheus.io/docs/introduction/install/)
chapter in the documentation for all the details.
Debian packages [are available](https://packages.debian.org/sid/net/prometheus).
### Docker images
Docker images are available on [Quay.io](https://quay.io/repository/prometheus/prometheus) or [Docker Hub](https://hub.docker.com/r/prom/prometheus/).
@ -94,6 +93,7 @@ The Makefile provides several targets:
* *format*: format the source code
* *vet*: check the source code for common errors
* *docker*: build a docker container for the current `HEAD`
* *assets*: build the new experimental React UI
## React UI Development

View file

@ -38,7 +38,7 @@ The release shepherd is responsible for the entire release series of a minor rel
* We aim to keep the master branch in a working state at all times. In principle, it should be possible to cut a release from master at any time. In practice, things might not work out as nicely. A few days before the pre-release is scheduled, the shepherd should check the state of master. Following their best judgement, the shepherd should try to expedite bug fixes that are still in progress but should make it into the release. On the other hand, the shepherd may hold back merging last-minute invasive and risky changes that are better suited for the next minor release.
* On the date listed in the table above, the release shepherd cuts the first pre-release (using the suffix `-rc.0`) and creates a new branch called `release-<major>.<minor>` starting at the commit tagged for the pre-release. In general, a pre-release is considered a release candidate (that's what `rc` stands for) and should therefore not contain any known bugs that are planned to be fixed in the final release.
* With the pre-release, the release shepherd is responsible for running and monitoring a benchmark run of the pre-release for 3 days, after which, if successful, the pre-release is promoted to a stable release.
* If regressions or critical bugs are detected, they need to get fixed before cutting a new pre-release (called `-rc.1`, `-rc.2`, etc.).
* If regressions or critical bugs are detected, they need to get fixed before cutting a new pre-release (called `-rc.1`, `-rc.2`, etc.).
See the next section for details on cutting an individual release.
@ -62,13 +62,7 @@ Maintaining the release branches for older minor releases happens on a best effo
### 0. Updating dependencies
A few days before a major or minor release, consider updating the dependencies:
```
make update-go-deps
git add go.mod go.sum vendor
git commit -m "Update dependencies"
```
A few days before a major or minor release, consider updating the dependencies.
Then create a pull request against the master branch.
@ -81,6 +75,32 @@ you can skip the dependency update or only update select dependencies. In such a
case, you have to create an issue or pull request in the GitHub project for
later follow-up.
#### Updating Go dependencies
```
make update-go-deps
git add go.mod go.sum vendor
git commit -m "Update dependencies"
```
#### Updating React dependencies
Either upgrade the dependencies within their existing version constraints as specified in the `package.json` file (see https://docs.npmjs.com/files/package.json#dependencies):
```
cd web/ui/react-app
yarn upgrade
git add yarn.lock
```
Or alternatively, update all dependencies to their latest major versions. This is potentially more disruptive and will require more follow-up fixes, but should be done from time to time (use your best judgement):
```
cd web/ui/react-app
yarn upgrade --latest
git add package.json yarn.lock
```
### 1. Prepare your release
At the start of a new major or minor release cycle create the corresponding release branch based on the master branch. For example if we're releasing `2.17.0` and the previous stable release is `2.16.0` we need to create a `release-2.17` branch. Note that all releases are handled in protected release branches, see the above `Branch management and versioning` section. Release candidates and patch releases for any given major or minor release happen in the same `release-<major>.<minor>` branch. Do not create `release-<version>` for patch or release candidate releases.
@ -132,6 +152,4 @@ For release candidate versions (`v2.16.0-rc.0`), run the benchmark for 3 days us
If the release has happened in the latest release branch, merge the changes into master.
To update the docs, a PR needs to be created to `prometheus/docs`. See [this PR](https://github.com/prometheus/docs/pull/952/files) for inspiration (note: only actually merge this for final releases, not for pre-releases like a release candidate).
Once the binaries have been uploaded, announce the release on `prometheus-announce@googlegroups.com`. (Please do not use `prometheus-users@googlegroups.com` for announcements anymore.) Check out previous announcement mails for inspiration.

View file

@ -1 +1 @@
2.20.0
2.22.0-rc.0

View file

@ -19,6 +19,7 @@ import (
"fmt"
"io"
"math"
"math/bits"
"net"
"net/http"
_ "net/http/pprof" // Comment this line to disable pprof endpoint.
@ -30,7 +31,6 @@ import (
"runtime"
"strings"
"sync"
"sync/atomic"
"syscall"
"time"
@ -47,13 +47,14 @@ import (
"github.com/prometheus/common/version"
jcfg "github.com/uber/jaeger-client-go/config"
jprom "github.com/uber/jaeger-lib/metrics/prometheus"
"go.uber.org/atomic"
kingpin "gopkg.in/alecthomas/kingpin.v2"
"k8s.io/klog"
klog "k8s.io/klog"
klogv2 "k8s.io/klog/v2"
promlogflag "github.com/prometheus/common/promlog/flag"
"github.com/prometheus/prometheus/config"
"github.com/prometheus/prometheus/discovery"
sd_config "github.com/prometheus/prometheus/discovery/config"
"github.com/prometheus/prometheus/notifier"
"github.com/prometheus/prometheus/pkg/labels"
"github.com/prometheus/prometheus/pkg/logging"
@ -67,6 +68,8 @@ import (
"github.com/prometheus/prometheus/tsdb"
"github.com/prometheus/prometheus/util/strutil"
"github.com/prometheus/prometheus/web"
_ "github.com/prometheus/prometheus/discovery/install" // Register service discovery implementations.
)
var (
@ -238,6 +241,9 @@ func main() {
a.Flag("rules.alert.resend-delay", "Minimum amount of time to wait before resending an alert to Alertmanager.").
Default("1m").SetValue(&cfg.resendDelay)
a.Flag("scrape.adjust-timestamps", "Adjust scrape timestamps by up to 2ms to align them to the intended schedule. See https://github.com/prometheus/prometheus/issues/7846 for more context. Experimental. This flag will be removed in a future release.").
Hidden().Default("true").BoolVar(&scrape.AlignScrapeTimestamps)
a.Flag("alertmanager.notification-queue-capacity", "The capacity of the queue for pending Alertmanager notifications.").
Default("10000").IntVar(&cfg.notifier.QueueCapacity)
@ -284,6 +290,14 @@ func main() {
level.Error(logger).Log("msg", fmt.Sprintf("Error loading config (--config.file=%s)", cfg.configFile), "err", err)
os.Exit(2)
}
// Now that the validity of the config is established, set the config
// success metrics accordingly, although the config isn't really loaded
// yet. This will happen later (including setting these metrics again),
// but if we don't do it now, the metrics will stay at zero until the
// startup procedure is complete, which might take long enough to
// trigger alerts about an invalid config.
configSuccess.Set(1)
configSuccessTime.SetToCurrentTime()
cfg.web.ReadTimeout = time.Duration(cfg.webTimeout)
// Default -web.route-prefix to path of -web.external-url.
@ -341,8 +355,14 @@ func main() {
// Above level 6, the k8s client would log bearer tokens in clear-text.
klog.ClampLevel(6)
klog.SetLogger(log.With(logger, "component", "k8s_client_runtime"))
klogv2.ClampLevel(6)
klogv2.SetLogger(log.With(logger, "component", "k8s_client_runtime"))
level.Info(logger).Log("msg", "Starting Prometheus", "version", version.Info())
if bits.UintSize < 64 {
level.Warn(logger).Log("msg", "This Prometheus binary has not been compiled for a 64-bit architecture. Due to virtual memory constraints of 32-bit systems, it is highly recommended to switch to a 64-bit binary of Prometheus.", "GOARCH", runtime.GOARCH)
}
level.Info(logger).Log("build_context", version.BuildContext())
level.Info(logger).Log("host_details", prom_runtime.Uname())
level.Info(logger).Log("fd_limits", prom_runtime.FdLimits())
@ -436,56 +456,73 @@ func main() {
conntrack.DialWithTracing(),
)
reloaders := []func(cfg *config.Config) error{
remoteStorage.ApplyConfig,
webHandler.ApplyConfig,
func(cfg *config.Config) error {
if cfg.GlobalConfig.QueryLogFile == "" {
queryEngine.SetQueryLogger(nil)
return nil
}
l, err := logging.NewJSONFileLogger(cfg.GlobalConfig.QueryLogFile)
if err != nil {
return err
}
queryEngine.SetQueryLogger(l)
return nil
},
// The Scrape and notifier managers need to reload before the Discovery manager as
// they need to read the most updated config when receiving the new targets list.
scrapeManager.ApplyConfig,
func(cfg *config.Config) error {
c := make(map[string]sd_config.ServiceDiscoveryConfig)
for _, v := range cfg.ScrapeConfigs {
c[v.JobName] = v.ServiceDiscoveryConfig
}
return discoveryManagerScrape.ApplyConfig(c)
},
notifierManager.ApplyConfig,
func(cfg *config.Config) error {
c := make(map[string]sd_config.ServiceDiscoveryConfig)
for k, v := range cfg.AlertingConfig.AlertmanagerConfigs.ToMap() {
c[k] = v.ServiceDiscoveryConfig
}
return discoveryManagerNotify.ApplyConfig(c)
},
func(cfg *config.Config) error {
// Get all rule files matching the configuration paths.
var files []string
for _, pat := range cfg.RuleFiles {
fs, err := filepath.Glob(pat)
if err != nil {
// The only error can be a bad pattern.
return errors.Wrapf(err, "error retrieving rule files for %s", pat)
reloaders := []reloader{
{
name: "remote_storage",
reloader: remoteStorage.ApplyConfig,
}, {
name: "web_handler",
reloader: webHandler.ApplyConfig,
}, {
name: "query_engine",
reloader: func(cfg *config.Config) error {
if cfg.GlobalConfig.QueryLogFile == "" {
queryEngine.SetQueryLogger(nil)
return nil
}
files = append(files, fs...)
}
return ruleManager.Update(
time.Duration(cfg.GlobalConfig.EvaluationInterval),
files,
cfg.GlobalConfig.ExternalLabels,
)
l, err := logging.NewJSONFileLogger(cfg.GlobalConfig.QueryLogFile)
if err != nil {
return err
}
queryEngine.SetQueryLogger(l)
return nil
},
}, {
// The Scrape and notifier managers need to reload before the Discovery manager as
// they need to read the most updated config when receiving the new targets list.
name: "scrape",
reloader: scrapeManager.ApplyConfig,
}, {
name: "scrape_sd",
reloader: func(cfg *config.Config) error {
c := make(map[string]discovery.Configs)
for _, v := range cfg.ScrapeConfigs {
c[v.JobName] = v.ServiceDiscoveryConfigs
}
return discoveryManagerScrape.ApplyConfig(c)
},
}, {
name: "notify",
reloader: notifierManager.ApplyConfig,
}, {
name: "notify_sd",
reloader: func(cfg *config.Config) error {
c := make(map[string]discovery.Configs)
for k, v := range cfg.AlertingConfig.AlertmanagerConfigs.ToMap() {
c[k] = v.ServiceDiscoveryConfigs
}
return discoveryManagerNotify.ApplyConfig(c)
},
}, {
name: "rules",
reloader: func(cfg *config.Config) error {
// Get all rule files matching the configuration paths.
var files []string
for _, pat := range cfg.RuleFiles {
fs, err := filepath.Glob(pat)
if err != nil {
// The only error can be a bad pattern.
return errors.Wrapf(err, "error retrieving rule files for %s", pat)
}
files = append(files, fs...)
}
return ruleManager.Update(
time.Duration(cfg.GlobalConfig.EvaluationInterval),
files,
cfg.GlobalConfig.ExternalLabels,
)
},
},
}
@ -696,7 +733,13 @@ func main() {
return errors.Wrapf(err, "opening storage failed")
}
level.Info(logger).Log("fs_type", prom_runtime.Statfs(cfg.localStoragePath))
switch fsType := prom_runtime.Statfs(cfg.localStoragePath); fsType {
case "NFS_SUPER_MAGIC":
level.Warn(logger).Log("fs_type", fsType, "msg", "This filesystem is not supported and may lead to data corruption and data loss. Please carefully read https://prometheus.io/docs/prometheus/latest/storage/ to learn more about supported filesystems.")
default:
level.Info(logger).Log("fs_type", fsType)
}
level.Info(logger).Log("msg", "TSDB started")
level.Debug(logger).Log("msg", "TSDB options",
"MinBlockDuration", cfg.tsdb.MinBlockDuration,
@ -801,21 +844,28 @@ func openDBWithMetrics(dir string, logger log.Logger, reg prometheus.Registerer,
}
type safePromQLNoStepSubqueryInterval struct {
value int64
value atomic.Int64
}
func durationToInt64Millis(d time.Duration) int64 {
return int64(d / time.Millisecond)
}
func (i *safePromQLNoStepSubqueryInterval) Set(ev model.Duration) {
atomic.StoreInt64(&i.value, durationToInt64Millis(time.Duration(ev)))
i.value.Store(durationToInt64Millis(time.Duration(ev)))
}
func (i *safePromQLNoStepSubqueryInterval) Get(int64) int64 {
return atomic.LoadInt64(&i.value)
return i.value.Load()
}
func reloadConfig(filename string, logger log.Logger, noStepSuqueryInterval *safePromQLNoStepSubqueryInterval, rls ...func(*config.Config) error) (err error) {
type reloader struct {
name string
reloader func(*config.Config) error
}
func reloadConfig(filename string, logger log.Logger, noStepSuqueryInterval *safePromQLNoStepSubqueryInterval, rls ...reloader) (err error) {
start := time.Now()
timings := []interface{}{}
level.Info(logger).Log("msg", "Loading configuration file", "filename", filename)
defer func() {
@ -834,17 +884,20 @@ func reloadConfig(filename string, logger log.Logger, noStepSuqueryInterval *saf
failed := false
for _, rl := range rls {
if err := rl(conf); err != nil {
rstart := time.Now()
if err := rl.reloader(conf); err != nil {
level.Error(logger).Log("msg", "Failed to apply configuration", "err", err)
failed = true
}
timings = append(timings, rl.name, time.Since(rstart))
}
if failed {
return errors.Errorf("one or more errors occurred while applying the new configuration (--config.file=%q)", filename)
}
noStepSuqueryInterval.Set(conf.GlobalConfig.EvaluationInterval)
level.Info(logger).Log("msg", "Completed loading of configuration file", "filename", filename)
l := []interface{}{"msg", "Completed loading of configuration file", "filename", filename, "totalDuration", time.Since(start)}
level.Info(logger).Log(append(l, timings...)...)
return nil
}
@ -984,9 +1037,9 @@ func (s *readyStorage) ChunkQuerier(ctx context.Context, mint, maxt int64) (stor
}
// Appender implements the Storage interface.
func (s *readyStorage) Appender() storage.Appender {
func (s *readyStorage) Appender(ctx context.Context) storage.Appender {
if x := s.get(); x != nil {
return x.Appender()
return x.Appender(ctx)
}
return notReadyAppender{}
}

View file

@ -263,7 +263,7 @@ func TestTimeMetrics(t *testing.T) {
"prometheus_tsdb_head_max_time_seconds",
))
app := db.Appender()
app := db.Appender(context.Background())
_, err = app.Add(labels.FromStrings(model.MetricNameLabel, "a"), 1000, 1)
testutil.Ok(t, err)
_, err = app.Add(labels.FromStrings(model.MetricNameLabel, "a"), 2000, 1)

View file

@ -21,7 +21,7 @@ import (
"github.com/pkg/errors"
)
const filePerm = 0644
const filePerm = 0666
type tarGzFileWriter struct {
tarWriter *tar.Writer

View file

@ -41,7 +41,11 @@ import (
"gopkg.in/alecthomas/kingpin.v2"
"github.com/prometheus/prometheus/config"
"github.com/prometheus/prometheus/discovery/file"
"github.com/prometheus/prometheus/discovery/kubernetes"
"github.com/prometheus/prometheus/pkg/rulefmt"
_ "github.com/prometheus/prometheus/discovery/install" // Register service discovery implementations.
)
func main() {
@ -67,9 +71,11 @@ func main() {
queryCmd := app.Command("query", "Run query against a Prometheus server.")
queryCmdFmt := queryCmd.Flag("format", "Output format of the query.").Short('o').Default("promql").Enum("promql", "json")
queryInstantCmd := queryCmd.Command("instant", "Run instant query.")
queryServer := queryInstantCmd.Arg("server", "Prometheus server to query.").Required().String()
queryExpr := queryInstantCmd.Arg("expr", "PromQL query expression.").Required().String()
queryInstantServer := queryInstantCmd.Arg("server", "Prometheus server to query.").Required().String()
queryInstantExpr := queryInstantCmd.Arg("expr", "PromQL query expression.").Required().String()
queryInstantTime := queryInstantCmd.Flag("time", "Query evaluation time (RFC3339 or Unix timestamp).").String()
queryRangeCmd := queryCmd.Command("range", "Run range query.")
queryRangeServer := queryRangeCmd.Arg("server", "Prometheus server to query.").Required().String()
@ -165,7 +171,7 @@ func main() {
os.Exit(CheckMetrics())
case queryInstantCmd.FullCommand():
os.Exit(QueryInstant(*queryServer, *queryExpr, p))
os.Exit(QueryInstant(*queryInstantServer, *queryInstantExpr, *queryInstantTime, p))
case queryRangeCmd.FullCommand():
os.Exit(QueryRange(*queryRangeServer, *queryRangeHeaders, *queryRangeExpr, *queryRangeBegin, *queryRangeEnd, *queryRangeStep, p))
@ -282,24 +288,25 @@ func checkConfig(filename string) ([]string, error) {
return nil, err
}
for _, kd := range scfg.ServiceDiscoveryConfig.KubernetesSDConfigs {
if err := checkTLSConfig(kd.HTTPClientConfig.TLSConfig); err != nil {
return nil, err
}
}
for _, filesd := range scfg.ServiceDiscoveryConfig.FileSDConfigs {
for _, file := range filesd.Files {
files, err := filepath.Glob(file)
if err != nil {
for _, c := range scfg.ServiceDiscoveryConfigs {
switch c := c.(type) {
case *kubernetes.SDConfig:
if err := checkTLSConfig(c.HTTPClientConfig.TLSConfig); err != nil {
return nil, err
}
if len(files) != 0 {
// There was at least one match for the glob and we can assume checkFileExists
// for all matches would pass, we can continue the loop.
continue
case *file.SDConfig:
for _, file := range c.Files {
files, err := filepath.Glob(file)
if err != nil {
return nil, err
}
if len(files) != 0 {
// There was at least one match for the glob and we can assume checkFileExists
// for all matches would pass, we can continue the loop.
continue
}
fmt.Printf(" WARNING: file %q for file_sd in scrape job %q does not exist\n", file, scfg.JobName)
}
fmt.Printf(" WARNING: file %q for file_sd in scrape job %q does not exist\n", file, scfg.JobName)
}
}
}
@ -441,7 +448,7 @@ func CheckMetrics() int {
}
// QueryInstant performs an instant query against a Prometheus server.
func QueryInstant(url, query string, p printer) int {
func QueryInstant(url, query, evalTime string, p printer) int {
config := api.Config{
Address: url,
}
@ -453,11 +460,20 @@ func QueryInstant(url, query string, p printer) int {
return 1
}
eTime := time.Now()
if evalTime != "" {
eTime, err = parseTime(evalTime)
if err != nil {
fmt.Fprintln(os.Stderr, "error parsing evaluation time:", err)
return 1
}
}
// Run query against client.
api := v1.NewAPI(c)
ctx, cancel := context.WithTimeout(context.Background(), 2*time.Minute)
val, _, err := api.Query(ctx, query, time.Now()) // Ignoring warnings for now.
val, _, err := api.Query(ctx, query, eTime) // Ignoring warnings for now.
cancel()
if err != nil {
fmt.Fprintln(os.Stderr, "query error:", err)

View file

@ -19,6 +19,8 @@ import (
"net/http/httptest"
"testing"
"time"
"github.com/prometheus/prometheus/util/testutil"
)
func TestQueryRange(t *testing.T) {
@ -27,41 +29,31 @@ func TestQueryRange(t *testing.T) {
p := &promqlPrinter{}
exitCode := QueryRange(s.URL, map[string]string{}, "up", "0", "300", 0, p)
expectedPath := "/api/v1/query_range"
gotPath := getRequest().URL.Path
if gotPath != expectedPath {
t.Errorf("unexpected URL path %s (wanted %s)", gotPath, expectedPath)
}
testutil.Equals(t, "/api/v1/query_range", getRequest().URL.Path)
form := getRequest().Form
actual := form.Get("query")
if actual != "up" {
t.Errorf("unexpected value %s for query", actual)
}
actual = form.Get("step")
if actual != "1" {
t.Errorf("unexpected value %s for step", actual)
}
if exitCode > 0 {
t.Error()
}
testutil.Equals(t, "up", form.Get("query"))
testutil.Equals(t, "1", form.Get("step"))
testutil.Equals(t, 0, exitCode)
exitCode = QueryRange(s.URL, map[string]string{}, "up", "0", "300", 10*time.Millisecond, p)
gotPath = getRequest().URL.Path
if gotPath != expectedPath {
t.Errorf("unexpected URL path %s (wanted %s)", gotPath, expectedPath)
}
testutil.Equals(t, "/api/v1/query_range", getRequest().URL.Path)
form = getRequest().Form
actual = form.Get("query")
if actual != "up" {
t.Errorf("unexpected value %s for query", actual)
}
actual = form.Get("step")
if actual != "0.01" {
t.Errorf("unexpected value %s for step", actual)
}
if exitCode > 0 {
t.Error()
}
testutil.Equals(t, "up", form.Get("query"))
testutil.Equals(t, "0.01", form.Get("step"))
testutil.Equals(t, 0, exitCode)
}
func TestQueryInstant(t *testing.T) {
s, getRequest := mockServer(200, `{"status": "success", "data": {"resultType": "vector", "result": []}}`)
defer s.Close()
p := &promqlPrinter{}
exitCode := QueryInstant(s.URL, "up", "300", p)
testutil.Equals(t, "/api/v1/query", getRequest().URL.Path)
form := getRequest().Form
testutil.Equals(t, "up", form.Get("query"))
testutil.Equals(t, "300", form.Get("time"))
testutil.Equals(t, 0, exitCode)
}
func mockServer(code int, body string) (*httptest.Server, func() *http.Request) {

View file

@ -8,6 +8,14 @@ tests:
input_series:
- series: 'up{job="prometheus", instance="localhost:9090"}'
values: "0+0x1440"
promql_expr_test:
- expr: count(ALERTS) by (alertname, alertstate)
eval_time: 4m
exp_samples:
- labels: '{alertname="InstanceDown",alertstate="pending"}'
value: 1
alert_rule_test:
- eval_time: 1d
alertname: InstanceDown

View file

@ -30,6 +30,7 @@ import (
"text/tabwriter"
"time"
"github.com/alecthomas/units"
"github.com/go-kit/kit/log"
"github.com/pkg/errors"
"github.com/prometheus/prometheus/pkg/labels"
@ -199,7 +200,7 @@ func (b *writeBenchmark) ingestScrapesShard(lbls []labels.Labels, scrapeCount in
total := uint64(0)
for i := 0; i < scrapeCount; i++ {
app := b.storage.Appender()
app := b.storage.Appender(context.TODO())
ts += timeDelta
for _, s := range scrape {
@ -363,12 +364,12 @@ func printBlocks(blocks []tsdb.BlockReader, humanReadable bool) {
tw := tabwriter.NewWriter(os.Stdout, 0, 0, 2, ' ', 0)
defer tw.Flush()
fmt.Fprintln(tw, "BLOCK ULID\tMIN TIME\tMAX TIME\tDURATION\tNUM SAMPLES\tNUM CHUNKS\tNUM SERIES")
fmt.Fprintln(tw, "BLOCK ULID\tMIN TIME\tMAX TIME\tDURATION\tNUM SAMPLES\tNUM CHUNKS\tNUM SERIES\tSIZE")
for _, b := range blocks {
meta := b.Meta()
fmt.Fprintf(tw,
"%v\t%v\t%v\t%v\t%v\t%v\t%v\n",
"%v\t%v\t%v\t%v\t%v\t%v\t%v\t%v\n",
meta.ULID,
getFormatedTime(meta.MinTime, humanReadable),
getFormatedTime(meta.MaxTime, humanReadable),
@ -376,6 +377,7 @@ func printBlocks(blocks []tsdb.BlockReader, humanReadable bool) {
meta.Stats.NumSamples,
meta.Stats.NumChunks,
meta.Stats.NumSeries,
getFormatedBytes(b.Size(), humanReadable),
)
}
}
@ -387,6 +389,13 @@ func getFormatedTime(timestamp int64, humanReadable bool) string {
return strconv.FormatInt(timestamp, 10)
}
func getFormatedBytes(bytes int64, humanReadable bool) string {
if humanReadable {
return units.Base2Bytes(bytes).String()
}
return strconv.FormatInt(bytes, 10)
}
func openBlock(path, blockID string) (*tsdb.DBReadOnly, tsdb.BlockReader, error) {
db, err := tsdb.OpenDBReadOnly(path, nil)
if err != nil {

View file

@ -221,6 +221,16 @@ func (tg *testGroup) test(mint, maxt time.Time, evalInterval time.Duration, grou
// Current index in alertEvalTimes what we are looking at.
curr := 0
for _, g := range groups {
for _, r := range g.Rules() {
if alertRule, ok := r.(*rules.AlertingRule); ok {
// Mark alerting rules as restored, to ensure the ALERTS timeseries is
// created when they run.
alertRule.SetRestored(true)
}
}
}
var errs []error
for ts := mint; ts.Before(maxt); ts = ts.Add(evalInterval) {
// Collects the alerts asked for unit testing.

View file

@ -23,11 +23,11 @@ import (
"time"
"github.com/pkg/errors"
config_util "github.com/prometheus/common/config"
"github.com/prometheus/common/config"
"github.com/prometheus/common/model"
yaml "gopkg.in/yaml.v2"
sd_config "github.com/prometheus/prometheus/discovery/config"
"github.com/prometheus/prometheus/discovery"
"github.com/prometheus/prometheus/pkg/labels"
"github.com/prometheus/prometheus/pkg/relabel"
)
@ -48,7 +48,6 @@ func Load(s string) (*Config, error) {
if err != nil {
return nil, err
}
cfg.original = s
return cfg, nil
}
@ -62,7 +61,7 @@ func LoadFile(filename string) (*Config, error) {
if err != nil {
return nil, errors.Wrapf(err, "parsing YAML file %s", filename)
}
resolveFilepaths(filepath.Dir(filename), cfg)
cfg.SetDirectory(filepath.Dir(filename))
return cfg, nil
}
@ -105,16 +104,16 @@ var (
// DefaultQueueConfig is the default remote queue configuration.
DefaultQueueConfig = QueueConfig{
// With a maximum of 1000 shards, assuming an average of 100ms remote write
// time and 100 samples per batch, we will be able to push 1M samples/s.
MaxShards: 1000,
// With a maximum of 200 shards, assuming an average of 100ms remote write
// time and 500 samples per batch, we will be able to push 1M samples/s.
MaxShards: 200,
MinShards: 1,
MaxSamplesPerSend: 100,
MaxSamplesPerSend: 500,
// Each shard will have a max of 500 samples pending in it's channel, plus the pending
// samples that have been enqueued. Theoretically we should only ever have about 600 samples
// per shard pending. At 1000 shards that's 600k.
Capacity: 500,
// Each shard will have a max of 2500 samples pending in its channel, plus the pending
// samples that have been enqueued. Theoretically we should only ever have about 3000 samples
// per shard pending. At 200 shards that's 600k.
Capacity: 2500,
BatchSendDeadline: model.Duration(5 * time.Second),
// Backoff times for retrying a batch of samples on recoverable errors.
@ -137,80 +136,23 @@ type Config struct {
RemoteWriteConfigs []*RemoteWriteConfig `yaml:"remote_write,omitempty"`
RemoteReadConfigs []*RemoteReadConfig `yaml:"remote_read,omitempty"`
// original is the input from which the config was parsed.
original string
}
// resolveFilepaths joins all relative paths in a configuration
// with a given base directory.
func resolveFilepaths(baseDir string, cfg *Config) {
join := func(fp string) string {
if len(fp) > 0 && !filepath.IsAbs(fp) {
fp = filepath.Join(baseDir, fp)
}
return fp
// SetDirectory joins any relative file paths with dir.
func (c *Config) SetDirectory(dir string) {
c.GlobalConfig.SetDirectory(dir)
c.AlertingConfig.SetDirectory(dir)
for i, file := range c.RuleFiles {
c.RuleFiles[i] = config.JoinDir(dir, file)
}
for i, rf := range cfg.RuleFiles {
cfg.RuleFiles[i] = join(rf)
for _, c := range c.ScrapeConfigs {
c.SetDirectory(dir)
}
tlsPaths := func(cfg *config_util.TLSConfig) {
cfg.CAFile = join(cfg.CAFile)
cfg.CertFile = join(cfg.CertFile)
cfg.KeyFile = join(cfg.KeyFile)
for _, c := range c.RemoteWriteConfigs {
c.SetDirectory(dir)
}
clientPaths := func(scfg *config_util.HTTPClientConfig) {
if scfg.BasicAuth != nil {
scfg.BasicAuth.PasswordFile = join(scfg.BasicAuth.PasswordFile)
}
scfg.BearerTokenFile = join(scfg.BearerTokenFile)
tlsPaths(&scfg.TLSConfig)
}
sdPaths := func(cfg *sd_config.ServiceDiscoveryConfig) {
for _, kcfg := range cfg.KubernetesSDConfigs {
clientPaths(&kcfg.HTTPClientConfig)
}
for _, mcfg := range cfg.MarathonSDConfigs {
mcfg.AuthTokenFile = join(mcfg.AuthTokenFile)
clientPaths(&mcfg.HTTPClientConfig)
}
for _, consulcfg := range cfg.ConsulSDConfigs {
tlsPaths(&consulcfg.TLSConfig)
}
for _, digitaloceancfg := range cfg.DigitalOceanSDConfigs {
clientPaths(&digitaloceancfg.HTTPClientConfig)
}
for _, dockerswarmcfg := range cfg.DockerSwarmSDConfigs {
clientPaths(&dockerswarmcfg.HTTPClientConfig)
}
for _, cfg := range cfg.OpenstackSDConfigs {
tlsPaths(&cfg.TLSConfig)
}
for _, cfg := range cfg.TritonSDConfigs {
tlsPaths(&cfg.TLSConfig)
}
for _, filecfg := range cfg.FileSDConfigs {
for i, fn := range filecfg.Files {
filecfg.Files[i] = join(fn)
}
}
}
for _, cfg := range cfg.ScrapeConfigs {
clientPaths(&cfg.HTTPClientConfig)
sdPaths(&cfg.ServiceDiscoveryConfig)
}
for _, cfg := range cfg.AlertingConfig.AlertmanagerConfigs {
clientPaths(&cfg.HTTPClientConfig)
sdPaths(&cfg.ServiceDiscoveryConfig)
}
for _, cfg := range cfg.RemoteReadConfigs {
clientPaths(&cfg.HTTPClientConfig)
}
for _, cfg := range cfg.RemoteWriteConfigs {
clientPaths(&cfg.HTTPClientConfig)
for _, c := range c.RemoteReadConfigs {
c.SetDirectory(dir)
}
}
@ -311,6 +253,11 @@ type GlobalConfig struct {
ExternalLabels labels.Labels `yaml:"external_labels,omitempty"`
}
// SetDirectory joins any relative file paths with dir.
func (c *GlobalConfig) SetDirectory(dir string) {
c.QueryLogFile = config.JoinDir(dir, c.QueryLogFile)
}
// UnmarshalYAML implements the yaml.Unmarshaler interface.
func (c *GlobalConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
// Create a clean global config as the previous one was already populated
@ -379,14 +326,17 @@ type ScrapeConfig struct {
MetricsPath string `yaml:"metrics_path,omitempty"`
// The URL scheme with which to fetch metrics from targets.
Scheme string `yaml:"scheme,omitempty"`
// More than this many samples post metric-relabelling will cause the scrape to fail.
// More than this many samples post metric-relabeling will cause the scrape to fail.
SampleLimit uint `yaml:"sample_limit,omitempty"`
// More than this many targets after the target relabeling will cause the
// scrapes to fail.
TargetLimit uint `yaml:"target_limit,omitempty"`
// We cannot do proper Go type embedding below as the parser will then parse
// values arbitrarily into the overflow maps of further-down types.
ServiceDiscoveryConfig sd_config.ServiceDiscoveryConfig `yaml:",inline"`
HTTPClientConfig config_util.HTTPClientConfig `yaml:",inline"`
ServiceDiscoveryConfigs discovery.Configs `yaml:"-"`
HTTPClientConfig config.HTTPClientConfig `yaml:",inline"`
// List of target relabel configurations.
RelabelConfigs []*relabel.Config `yaml:"relabel_configs,omitempty"`
@ -394,12 +344,16 @@ type ScrapeConfig struct {
MetricRelabelConfigs []*relabel.Config `yaml:"metric_relabel_configs,omitempty"`
}
// SetDirectory joins any relative file paths with dir.
func (c *ScrapeConfig) SetDirectory(dir string) {
c.ServiceDiscoveryConfigs.SetDirectory(dir)
c.HTTPClientConfig.SetDirectory(dir)
}
// UnmarshalYAML implements the yaml.Unmarshaler interface.
func (c *ScrapeConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
*c = DefaultScrapeConfig
type plain ScrapeConfig
err := unmarshal((*plain)(c))
if err != nil {
if err := discovery.UnmarshalYAMLWithInlineConfigs(c, unmarshal); err != nil {
return err
}
if len(c.JobName) == 0 {
@ -413,21 +367,10 @@ func (c *ScrapeConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
return err
}
// The UnmarshalYAML method of ServiceDiscoveryConfig is not being called because it's not a pointer.
// We cannot make it a pointer as the parser panics for inlined pointer structs.
// Thus we just do its validation here.
if err := c.ServiceDiscoveryConfig.Validate(); err != nil {
return err
}
// Check for users putting URLs in target groups.
if len(c.RelabelConfigs) == 0 {
for _, tg := range c.ServiceDiscoveryConfig.StaticConfigs {
for _, t := range tg.Targets {
if err := CheckTargetAddress(t[model.AddressLabel]); err != nil {
return err
}
}
if err := checkStaticTargets(c.ServiceDiscoveryConfigs); err != nil {
return err
}
}
@ -442,21 +385,27 @@ func (c *ScrapeConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
}
}
// Add index to the static config target groups for unique identification
// within scrape pool.
for i, tg := range c.ServiceDiscoveryConfig.StaticConfigs {
tg.Source = fmt.Sprintf("%d", i)
}
return nil
}
// MarshalYAML implements the yaml.Marshaler interface.
func (c *ScrapeConfig) MarshalYAML() (interface{}, error) {
return discovery.MarshalYAMLWithInlineConfigs(c)
}
// AlertingConfig configures alerting and alertmanager related configs.
type AlertingConfig struct {
AlertRelabelConfigs []*relabel.Config `yaml:"alert_relabel_configs,omitempty"`
AlertmanagerConfigs AlertmanagerConfigs `yaml:"alertmanagers,omitempty"`
}
// SetDirectory joins any relative file paths with dir.
func (c *AlertingConfig) SetDirectory(dir string) {
for _, c := range c.AlertmanagerConfigs {
c.SetDirectory(dir)
}
}
// UnmarshalYAML implements the yaml.Unmarshaler interface.
func (c *AlertingConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
// Create a clean global config as the previous one was already populated
@ -526,8 +475,8 @@ type AlertmanagerConfig struct {
// We cannot do proper Go type embedding below as the parser will then parse
// values arbitrarily into the overflow maps of further-down types.
ServiceDiscoveryConfig sd_config.ServiceDiscoveryConfig `yaml:",inline"`
HTTPClientConfig config_util.HTTPClientConfig `yaml:",inline"`
ServiceDiscoveryConfigs discovery.Configs `yaml:"-"`
HTTPClientConfig config.HTTPClientConfig `yaml:",inline"`
// The URL scheme to use when talking to Alertmanagers.
Scheme string `yaml:"scheme,omitempty"`
@ -543,11 +492,16 @@ type AlertmanagerConfig struct {
RelabelConfigs []*relabel.Config `yaml:"relabel_configs,omitempty"`
}
// SetDirectory joins any relative file paths with dir.
func (c *AlertmanagerConfig) SetDirectory(dir string) {
c.ServiceDiscoveryConfigs.SetDirectory(dir)
c.HTTPClientConfig.SetDirectory(dir)
}
// UnmarshalYAML implements the yaml.Unmarshaler interface.
func (c *AlertmanagerConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
*c = DefaultAlertmanagerConfig
type plain AlertmanagerConfig
if err := unmarshal((*plain)(c)); err != nil {
if err := discovery.UnmarshalYAMLWithInlineConfigs(c, unmarshal); err != nil {
return err
}
@ -558,21 +512,10 @@ func (c *AlertmanagerConfig) UnmarshalYAML(unmarshal func(interface{}) error) er
return err
}
// The UnmarshalYAML method of ServiceDiscoveryConfig is not being called because it's not a pointer.
// We cannot make it a pointer as the parser panics for inlined pointer structs.
// Thus we just do its validation here.
if err := c.ServiceDiscoveryConfig.Validate(); err != nil {
return err
}
// Check for users putting URLs in target groups.
if len(c.RelabelConfigs) == 0 {
for _, tg := range c.ServiceDiscoveryConfig.StaticConfigs {
for _, t := range tg.Targets {
if err := CheckTargetAddress(t[model.AddressLabel]); err != nil {
return err
}
}
if err := checkStaticTargets(c.ServiceDiscoveryConfigs); err != nil {
return err
}
}
@ -582,12 +525,28 @@ func (c *AlertmanagerConfig) UnmarshalYAML(unmarshal func(interface{}) error) er
}
}
// Add index to the static config target groups for unique identification
// within scrape pool.
for i, tg := range c.ServiceDiscoveryConfig.StaticConfigs {
tg.Source = fmt.Sprintf("%d", i)
}
return nil
}
// MarshalYAML implements the yaml.Marshaler interface.
func (c *AlertmanagerConfig) MarshalYAML() (interface{}, error) {
return discovery.MarshalYAMLWithInlineConfigs(c)
}
func checkStaticTargets(configs discovery.Configs) error {
for _, cfg := range configs {
sc, ok := cfg.(discovery.StaticConfig)
if !ok {
continue
}
for _, tg := range sc {
for _, t := range tg.Targets {
if err := CheckTargetAddress(t[model.AddressLabel]); err != nil {
return err
}
}
}
}
return nil
}
@ -600,29 +559,22 @@ func CheckTargetAddress(address model.LabelValue) error {
return nil
}
// ClientCert contains client cert credentials.
type ClientCert struct {
Cert string `yaml:"cert"`
Key config_util.Secret `yaml:"key"`
}
// FileSDConfig is the configuration for file based discovery.
type FileSDConfig struct {
Files []string `yaml:"files"`
RefreshInterval model.Duration `yaml:"refresh_interval,omitempty"`
}
// RemoteWriteConfig is the configuration for writing to remote storage.
type RemoteWriteConfig struct {
URL *config_util.URL `yaml:"url"`
URL *config.URL `yaml:"url"`
RemoteTimeout model.Duration `yaml:"remote_timeout,omitempty"`
WriteRelabelConfigs []*relabel.Config `yaml:"write_relabel_configs,omitempty"`
Name string `yaml:"name,omitempty"`
// We cannot do proper Go type embedding below as the parser will then parse
// values arbitrarily into the overflow maps of further-down types.
HTTPClientConfig config_util.HTTPClientConfig `yaml:",inline"`
QueueConfig QueueConfig `yaml:"queue_config,omitempty"`
HTTPClientConfig config.HTTPClientConfig `yaml:",inline"`
QueueConfig QueueConfig `yaml:"queue_config,omitempty"`
}
// SetDirectory joins any relative file paths with dir.
func (c *RemoteWriteConfig) SetDirectory(dir string) {
c.HTTPClientConfig.SetDirectory(dir)
}
// UnmarshalYAML implements the yaml.Unmarshaler interface.
@ -673,20 +625,25 @@ type QueueConfig struct {
// RemoteReadConfig is the configuration for reading from remote storage.
type RemoteReadConfig struct {
URL *config_util.URL `yaml:"url"`
RemoteTimeout model.Duration `yaml:"remote_timeout,omitempty"`
ReadRecent bool `yaml:"read_recent,omitempty"`
Name string `yaml:"name,omitempty"`
URL *config.URL `yaml:"url"`
RemoteTimeout model.Duration `yaml:"remote_timeout,omitempty"`
ReadRecent bool `yaml:"read_recent,omitempty"`
Name string `yaml:"name,omitempty"`
// We cannot do proper Go type embedding below as the parser will then parse
// values arbitrarily into the overflow maps of further-down types.
HTTPClientConfig config_util.HTTPClientConfig `yaml:",inline"`
HTTPClientConfig config.HTTPClientConfig `yaml:",inline"`
// RequiredMatchers is an optional list of equality matchers which have to
// be present in a selector to query the remote read endpoint.
RequiredMatchers model.LabelSet `yaml:"required_matchers,omitempty"`
}
// SetDirectory joins any relative file paths with dir.
func (c *RemoteReadConfig) SetDirectory(dir string) {
c.HTTPClientConfig.SetDirectory(dir)
}
// UnmarshalYAML implements the yaml.Unmarshaler interface.
func (c *RemoteReadConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
*c = DefaultRemoteReadConfig

View file

@ -24,5 +24,4 @@ var ruleFilesExpectedConf = &Config{
"testdata/rules/second.rules",
"/absolute/third.rules",
},
original: "",
}

View file

@ -23,18 +23,20 @@ import (
"testing"
"time"
config_util "github.com/prometheus/common/config"
"github.com/prometheus/common/config"
"github.com/prometheus/common/model"
"gopkg.in/yaml.v2"
"github.com/prometheus/prometheus/discovery"
"github.com/prometheus/prometheus/discovery/azure"
sd_config "github.com/prometheus/prometheus/discovery/config"
"github.com/prometheus/prometheus/discovery/consul"
"github.com/prometheus/prometheus/discovery/digitalocean"
"github.com/prometheus/prometheus/discovery/dns"
"github.com/prometheus/prometheus/discovery/dockerswarm"
"github.com/prometheus/prometheus/discovery/ec2"
"github.com/prometheus/prometheus/discovery/eureka"
"github.com/prometheus/prometheus/discovery/file"
"github.com/prometheus/prometheus/discovery/hetzner"
"github.com/prometheus/prometheus/discovery/kubernetes"
"github.com/prometheus/prometheus/discovery/marathon"
"github.com/prometheus/prometheus/discovery/openstack"
@ -46,12 +48,12 @@ import (
"github.com/prometheus/prometheus/util/testutil"
)
func mustParseURL(u string) *config_util.URL {
func mustParseURL(u string) *config.URL {
parsed, err := url.Parse(u)
if err != nil {
panic(err)
}
return &config_util.URL{URL: parsed}
return &config.URL{URL: parsed}
}
var expectedConf = &Config{
@ -93,8 +95,8 @@ var expectedConf = &Config{
RemoteTimeout: model.Duration(30 * time.Second),
QueueConfig: DefaultQueueConfig,
Name: "rw_tls",
HTTPClientConfig: config_util.HTTPClientConfig{
TLSConfig: config_util.TLSConfig{
HTTPClientConfig: config.HTTPClientConfig{
TLSConfig: config.TLSConfig{
CertFile: filepath.FromSlash("testdata/valid_cert_file"),
KeyFile: filepath.FromSlash("testdata/valid_key_file"),
},
@ -115,8 +117,8 @@ var expectedConf = &Config{
ReadRecent: false,
Name: "read_special",
RequiredMatchers: model.LabelSet{"job": "special"},
HTTPClientConfig: config_util.HTTPClientConfig{
TLSConfig: config_util.TLSConfig{
HTTPClientConfig: config.HTTPClientConfig{
TLSConfig: config.TLSConfig{
CertFile: filepath.FromSlash("testdata/valid_cert_file"),
KeyFile: filepath.FromSlash("testdata/valid_key_file"),
},
@ -136,12 +138,20 @@ var expectedConf = &Config{
MetricsPath: DefaultScrapeConfig.MetricsPath,
Scheme: DefaultScrapeConfig.Scheme,
HTTPClientConfig: config_util.HTTPClientConfig{
HTTPClientConfig: config.HTTPClientConfig{
BearerTokenFile: filepath.FromSlash("testdata/valid_token_file"),
},
ServiceDiscoveryConfig: sd_config.ServiceDiscoveryConfig{
StaticConfigs: []*targetgroup.Group{
ServiceDiscoveryConfigs: discovery.Configs{
&file.SDConfig{
Files: []string{"testdata/foo/*.slow.json", "testdata/foo/*.slow.yml", "testdata/single/file.yml"},
RefreshInterval: model.Duration(10 * time.Minute),
},
&file.SDConfig{
Files: []string{"testdata/bar/*.yaml"},
RefreshInterval: model.Duration(5 * time.Minute),
},
discovery.StaticConfig{
{
Targets: []model.LabelSet{
{model.AddressLabel: "localhost:9090"},
@ -154,17 +164,6 @@ var expectedConf = &Config{
Source: "0",
},
},
FileSDConfigs: []*file.SDConfig{
{
Files: []string{"testdata/foo/*.slow.json", "testdata/foo/*.slow.yml", "testdata/single/file.yml"},
RefreshInterval: model.Duration(10 * time.Minute),
},
{
Files: []string{"testdata/bar/*.yaml"},
RefreshInterval: model.Duration(5 * time.Minute),
},
},
},
RelabelConfigs: []*relabel.Config{
@ -206,8 +205,8 @@ var expectedConf = &Config{
ScrapeTimeout: model.Duration(5 * time.Second),
SampleLimit: 1000,
HTTPClientConfig: config_util.HTTPClientConfig{
BasicAuth: &config_util.BasicAuth{
HTTPClientConfig: config.HTTPClientConfig{
BasicAuth: &config.BasicAuth{
Username: "admin_name",
Password: "multiline\nmysecret\ntest",
},
@ -215,23 +214,21 @@ var expectedConf = &Config{
MetricsPath: "/my_path",
Scheme: "https",
ServiceDiscoveryConfig: sd_config.ServiceDiscoveryConfig{
DNSSDConfigs: []*dns.SDConfig{
{
Names: []string{
"first.dns.address.domain.com",
"second.dns.address.domain.com",
},
RefreshInterval: model.Duration(15 * time.Second),
Type: "SRV",
ServiceDiscoveryConfigs: discovery.Configs{
&dns.SDConfig{
Names: []string{
"first.dns.address.domain.com",
"second.dns.address.domain.com",
},
{
Names: []string{
"first.dns.address.domain.com",
},
RefreshInterval: model.Duration(30 * time.Second),
Type: "SRV",
RefreshInterval: model.Duration(15 * time.Second),
Type: "SRV",
},
&dns.SDConfig{
Names: []string{
"first.dns.address.domain.com",
},
RefreshInterval: model.Duration(30 * time.Second),
Type: "SRV",
},
},
@ -298,24 +295,22 @@ var expectedConf = &Config{
MetricsPath: DefaultScrapeConfig.MetricsPath,
Scheme: DefaultScrapeConfig.Scheme,
ServiceDiscoveryConfig: sd_config.ServiceDiscoveryConfig{
ConsulSDConfigs: []*consul.SDConfig{
{
Server: "localhost:1234",
Token: "mysecret",
Services: []string{"nginx", "cache", "mysql"},
ServiceTags: []string{"canary", "v1"},
NodeMeta: map[string]string{"rack": "123"},
TagSeparator: consul.DefaultSDConfig.TagSeparator,
Scheme: "https",
RefreshInterval: consul.DefaultSDConfig.RefreshInterval,
AllowStale: true,
TLSConfig: config_util.TLSConfig{
CertFile: filepath.FromSlash("testdata/valid_cert_file"),
KeyFile: filepath.FromSlash("testdata/valid_key_file"),
CAFile: filepath.FromSlash("testdata/valid_ca_file"),
InsecureSkipVerify: false,
},
ServiceDiscoveryConfigs: discovery.Configs{
&consul.SDConfig{
Server: "localhost:1234",
Token: "mysecret",
Services: []string{"nginx", "cache", "mysql"},
ServiceTags: []string{"canary", "v1"},
NodeMeta: map[string]string{"rack": "123"},
TagSeparator: consul.DefaultSDConfig.TagSeparator,
Scheme: "https",
RefreshInterval: consul.DefaultSDConfig.RefreshInterval,
AllowStale: true,
TLSConfig: config.TLSConfig{
CertFile: filepath.FromSlash("testdata/valid_cert_file"),
KeyFile: filepath.FromSlash("testdata/valid_key_file"),
CAFile: filepath.FromSlash("testdata/valid_ca_file"),
InsecureSkipVerify: false,
},
},
},
@ -341,8 +336,8 @@ var expectedConf = &Config{
MetricsPath: "/metrics",
Scheme: "http",
HTTPClientConfig: config_util.HTTPClientConfig{
TLSConfig: config_util.TLSConfig{
HTTPClientConfig: config.HTTPClientConfig{
TLSConfig: config.TLSConfig{
CertFile: filepath.FromSlash("testdata/valid_cert_file"),
KeyFile: filepath.FromSlash("testdata/valid_key_file"),
},
@ -360,23 +355,21 @@ var expectedConf = &Config{
MetricsPath: DefaultScrapeConfig.MetricsPath,
Scheme: DefaultScrapeConfig.Scheme,
ServiceDiscoveryConfig: sd_config.ServiceDiscoveryConfig{
KubernetesSDConfigs: []*kubernetes.SDConfig{
{
APIServer: kubernetesSDHostURL(),
Role: kubernetes.RoleEndpoint,
HTTPClientConfig: config_util.HTTPClientConfig{
BasicAuth: &config_util.BasicAuth{
Username: "myusername",
Password: "mysecret",
},
TLSConfig: config_util.TLSConfig{
CertFile: filepath.FromSlash("testdata/valid_cert_file"),
KeyFile: filepath.FromSlash("testdata/valid_key_file"),
},
ServiceDiscoveryConfigs: discovery.Configs{
&kubernetes.SDConfig{
APIServer: kubernetesSDHostURL(),
Role: kubernetes.RoleEndpoint,
HTTPClientConfig: config.HTTPClientConfig{
BasicAuth: &config.BasicAuth{
Username: "myusername",
Password: "mysecret",
},
TLSConfig: config.TLSConfig{
CertFile: filepath.FromSlash("testdata/valid_cert_file"),
KeyFile: filepath.FromSlash("testdata/valid_key_file"),
},
NamespaceDiscovery: kubernetes.NamespaceDiscovery{},
},
NamespaceDiscovery: kubernetes.NamespaceDiscovery{},
},
},
},
@ -389,22 +382,20 @@ var expectedConf = &Config{
MetricsPath: DefaultScrapeConfig.MetricsPath,
Scheme: DefaultScrapeConfig.Scheme,
HTTPClientConfig: config_util.HTTPClientConfig{
BasicAuth: &config_util.BasicAuth{
HTTPClientConfig: config.HTTPClientConfig{
BasicAuth: &config.BasicAuth{
Username: "myusername",
PasswordFile: filepath.FromSlash("testdata/valid_password_file"),
},
},
ServiceDiscoveryConfig: sd_config.ServiceDiscoveryConfig{
KubernetesSDConfigs: []*kubernetes.SDConfig{
{
APIServer: kubernetesSDHostURL(),
Role: kubernetes.RoleEndpoint,
NamespaceDiscovery: kubernetes.NamespaceDiscovery{
Names: []string{
"default",
},
ServiceDiscoveryConfigs: discovery.Configs{
&kubernetes.SDConfig{
APIServer: kubernetesSDHostURL(),
Role: kubernetes.RoleEndpoint,
NamespaceDiscovery: kubernetes.NamespaceDiscovery{
Names: []string{
"default",
},
},
},
@ -420,19 +411,17 @@ var expectedConf = &Config{
MetricsPath: DefaultScrapeConfig.MetricsPath,
Scheme: DefaultScrapeConfig.Scheme,
ServiceDiscoveryConfig: sd_config.ServiceDiscoveryConfig{
MarathonSDConfigs: []*marathon.SDConfig{
{
Servers: []string{
"https://marathon.example.com:443",
},
RefreshInterval: model.Duration(30 * time.Second),
AuthToken: config_util.Secret("mysecret"),
HTTPClientConfig: config_util.HTTPClientConfig{
TLSConfig: config_util.TLSConfig{
CertFile: filepath.FromSlash("testdata/valid_cert_file"),
KeyFile: filepath.FromSlash("testdata/valid_key_file"),
},
ServiceDiscoveryConfigs: discovery.Configs{
&marathon.SDConfig{
Servers: []string{
"https://marathon.example.com:443",
},
RefreshInterval: model.Duration(30 * time.Second),
AuthToken: "mysecret",
HTTPClientConfig: config.HTTPClientConfig{
TLSConfig: config.TLSConfig{
CertFile: filepath.FromSlash("testdata/valid_cert_file"),
KeyFile: filepath.FromSlash("testdata/valid_key_file"),
},
},
},
@ -448,24 +437,22 @@ var expectedConf = &Config{
MetricsPath: DefaultScrapeConfig.MetricsPath,
Scheme: DefaultScrapeConfig.Scheme,
ServiceDiscoveryConfig: sd_config.ServiceDiscoveryConfig{
EC2SDConfigs: []*ec2.SDConfig{
{
Region: "us-east-1",
AccessKey: "access",
SecretKey: "mysecret",
Profile: "profile",
RefreshInterval: model.Duration(60 * time.Second),
Port: 80,
Filters: []*ec2.Filter{
{
Name: "tag:environment",
Values: []string{"prod"},
},
{
Name: "tag:service",
Values: []string{"web", "db"},
},
ServiceDiscoveryConfigs: discovery.Configs{
&ec2.SDConfig{
Region: "us-east-1",
AccessKey: "access",
SecretKey: "mysecret",
Profile: "profile",
RefreshInterval: model.Duration(60 * time.Second),
Port: 80,
Filters: []*ec2.Filter{
{
Name: "tag:environment",
Values: []string{"prod"},
},
{
Name: "tag:service",
Values: []string{"web", "db"},
},
},
},
@ -481,18 +468,16 @@ var expectedConf = &Config{
MetricsPath: DefaultScrapeConfig.MetricsPath,
Scheme: DefaultScrapeConfig.Scheme,
ServiceDiscoveryConfig: sd_config.ServiceDiscoveryConfig{
AzureSDConfigs: []*azure.SDConfig{
{
Environment: "AzurePublicCloud",
SubscriptionID: "11AAAA11-A11A-111A-A111-1111A1111A11",
TenantID: "BBBB222B-B2B2-2B22-B222-2BB2222BB2B2",
ClientID: "333333CC-3C33-3333-CCC3-33C3CCCCC33C",
ClientSecret: "mysecret",
AuthenticationMethod: "OAuth",
RefreshInterval: model.Duration(5 * time.Minute),
Port: 9100,
},
ServiceDiscoveryConfigs: discovery.Configs{
&azure.SDConfig{
Environment: "AzurePublicCloud",
SubscriptionID: "11AAAA11-A11A-111A-A111-1111A1111A11",
TenantID: "BBBB222B-B2B2-2B22-B222-2BB2222BB2B2",
ClientID: "333333CC-3C33-3333-CCC3-33C3CCCCC33C",
ClientSecret: "mysecret",
AuthenticationMethod: "OAuth",
RefreshInterval: model.Duration(5 * time.Minute),
Port: 9100,
},
},
},
@ -506,13 +491,11 @@ var expectedConf = &Config{
MetricsPath: DefaultScrapeConfig.MetricsPath,
Scheme: DefaultScrapeConfig.Scheme,
ServiceDiscoveryConfig: sd_config.ServiceDiscoveryConfig{
NerveSDConfigs: []*zookeeper.NerveSDConfig{
{
Servers: []string{"localhost"},
Paths: []string{"/monitoring"},
Timeout: model.Duration(10 * time.Second),
},
ServiceDiscoveryConfigs: discovery.Configs{
&zookeeper.NerveSDConfig{
Servers: []string{"localhost"},
Paths: []string{"/monitoring"},
Timeout: model.Duration(10 * time.Second),
},
},
},
@ -526,8 +509,8 @@ var expectedConf = &Config{
MetricsPath: DefaultScrapeConfig.MetricsPath,
Scheme: DefaultScrapeConfig.Scheme,
ServiceDiscoveryConfig: sd_config.ServiceDiscoveryConfig{
StaticConfigs: []*targetgroup.Group{
ServiceDiscoveryConfigs: discovery.Configs{
discovery.StaticConfig{
{
Targets: []model.LabelSet{
{model.AddressLabel: "localhost:9090"},
@ -547,8 +530,8 @@ var expectedConf = &Config{
MetricsPath: "/federate",
Scheme: DefaultScrapeConfig.Scheme,
ServiceDiscoveryConfig: sd_config.ServiceDiscoveryConfig{
StaticConfigs: []*targetgroup.Group{
ServiceDiscoveryConfigs: discovery.Configs{
discovery.StaticConfig{
{
Targets: []model.LabelSet{
{model.AddressLabel: "localhost:9090"},
@ -568,8 +551,8 @@ var expectedConf = &Config{
MetricsPath: DefaultScrapeConfig.MetricsPath,
Scheme: DefaultScrapeConfig.Scheme,
ServiceDiscoveryConfig: sd_config.ServiceDiscoveryConfig{
StaticConfigs: []*targetgroup.Group{
ServiceDiscoveryConfigs: discovery.Configs{
discovery.StaticConfig{
{
Targets: []model.LabelSet{
{model.AddressLabel: "localhost:9090"},
@ -589,20 +572,18 @@ var expectedConf = &Config{
MetricsPath: DefaultScrapeConfig.MetricsPath,
Scheme: DefaultScrapeConfig.Scheme,
ServiceDiscoveryConfig: sd_config.ServiceDiscoveryConfig{
TritonSDConfigs: []*triton.SDConfig{
{
Account: "testAccount",
Role: "container",
DNSSuffix: "triton.example.com",
Endpoint: "triton.example.com",
Port: 9163,
RefreshInterval: model.Duration(60 * time.Second),
Version: 1,
TLSConfig: config_util.TLSConfig{
CertFile: "testdata/valid_cert_file",
KeyFile: "testdata/valid_key_file",
},
ServiceDiscoveryConfigs: discovery.Configs{
&triton.SDConfig{
Account: "testAccount",
Role: "container",
DNSSuffix: "triton.example.com",
Endpoint: "triton.example.com",
Port: 9163,
RefreshInterval: model.Duration(60 * time.Second),
Version: 1,
TLSConfig: config.TLSConfig{
CertFile: "testdata/valid_cert_file",
KeyFile: "testdata/valid_key_file",
},
},
},
@ -617,15 +598,13 @@ var expectedConf = &Config{
MetricsPath: DefaultScrapeConfig.MetricsPath,
Scheme: DefaultScrapeConfig.Scheme,
ServiceDiscoveryConfig: sd_config.ServiceDiscoveryConfig{
DigitalOceanSDConfigs: []*digitalocean.SDConfig{
{
HTTPClientConfig: config_util.HTTPClientConfig{
BearerToken: "abcdef",
},
Port: 80,
RefreshInterval: model.Duration(60 * time.Second),
ServiceDiscoveryConfigs: discovery.Configs{
&digitalocean.SDConfig{
HTTPClientConfig: config.HTTPClientConfig{
BearerToken: "abcdef",
},
Port: 80,
RefreshInterval: model.Duration(60 * time.Second),
},
},
},
@ -639,14 +618,12 @@ var expectedConf = &Config{
MetricsPath: DefaultScrapeConfig.MetricsPath,
Scheme: DefaultScrapeConfig.Scheme,
ServiceDiscoveryConfig: sd_config.ServiceDiscoveryConfig{
DockerSwarmSDConfigs: []*dockerswarm.SDConfig{
{
Host: "http://127.0.0.1:2375",
Role: "nodes",
Port: 80,
RefreshInterval: model.Duration(60 * time.Second),
},
ServiceDiscoveryConfigs: discovery.Configs{
&dockerswarm.SDConfig{
Host: "http://127.0.0.1:2375",
Role: "nodes",
Port: 80,
RefreshInterval: model.Duration(60 * time.Second),
},
},
},
@ -660,21 +637,61 @@ var expectedConf = &Config{
MetricsPath: DefaultScrapeConfig.MetricsPath,
Scheme: DefaultScrapeConfig.Scheme,
ServiceDiscoveryConfig: sd_config.ServiceDiscoveryConfig{
OpenstackSDConfigs: []*openstack.SDConfig{
{
Role: "instance",
Region: "RegionOne",
Port: 80,
Availability: "public",
RefreshInterval: model.Duration(60 * time.Second),
TLSConfig: config_util.TLSConfig{
CAFile: "testdata/valid_ca_file",
CertFile: "testdata/valid_cert_file",
KeyFile: "testdata/valid_key_file",
},
ServiceDiscoveryConfigs: discovery.Configs{&openstack.SDConfig{
Role: "instance",
Region: "RegionOne",
Port: 80,
Availability: "public",
RefreshInterval: model.Duration(60 * time.Second),
TLSConfig: config.TLSConfig{
CAFile: "testdata/valid_ca_file",
CertFile: "testdata/valid_cert_file",
KeyFile: "testdata/valid_key_file",
}},
},
},
{
JobName: "hetzner",
HonorTimestamps: true,
ScrapeInterval: model.Duration(15 * time.Second),
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
MetricsPath: DefaultScrapeConfig.MetricsPath,
Scheme: DefaultScrapeConfig.Scheme,
ServiceDiscoveryConfigs: discovery.Configs{
&hetzner.SDConfig{
HTTPClientConfig: config.HTTPClientConfig{
BearerToken: "abcdef",
},
Port: 80,
RefreshInterval: model.Duration(60 * time.Second),
Role: "hcloud",
},
&hetzner.SDConfig{
HTTPClientConfig: config.HTTPClientConfig{
BasicAuth: &config.BasicAuth{Username: "abcdef", Password: "abcdef"},
},
Port: 80,
RefreshInterval: model.Duration(60 * time.Second),
Role: "robot",
},
},
},
{
JobName: "service-eureka",
HonorTimestamps: true,
ScrapeInterval: model.Duration(15 * time.Second),
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
MetricsPath: DefaultScrapeConfig.MetricsPath,
Scheme: DefaultScrapeConfig.Scheme,
ServiceDiscoveryConfigs: discovery.Configs{&eureka.SDConfig{
Server: "http://eureka.example.com:8761/eureka",
RefreshInterval: model.Duration(30 * time.Second),
},
},
},
},
@ -684,8 +701,8 @@ var expectedConf = &Config{
Scheme: "https",
Timeout: model.Duration(10 * time.Second),
APIVersion: AlertmanagerAPIVersionV1,
ServiceDiscoveryConfig: sd_config.ServiceDiscoveryConfig{
StaticConfigs: []*targetgroup.Group{
ServiceDiscoveryConfigs: discovery.Configs{
discovery.StaticConfig{
{
Targets: []model.LabelSet{
{model.AddressLabel: "1.2.3.4:9093"},
@ -699,7 +716,19 @@ var expectedConf = &Config{
},
},
},
original: "",
}
func TestYAMLRoundtrip(t *testing.T) {
want, err := LoadFile("testdata/roundtrip.good.yml")
testutil.Ok(t, err)
out, err := yaml.Marshal(want)
testutil.Ok(t, err)
got := &Config{}
testutil.Ok(t, yaml.UnmarshalStrict(out, got))
testutil.Equals(t, want, got)
}
func TestLoadConfig(t *testing.T) {
@ -710,8 +739,6 @@ func TestLoadConfig(t *testing.T) {
c, err := LoadFile("testdata/conf.good.yml")
testutil.Ok(t, err)
expectedConf.original = c.original
testutil.Equals(t, expectedConf, c)
}
@ -736,7 +763,7 @@ func TestElideSecrets(t *testing.T) {
yamlConfig := string(config)
matches := secretRe.FindAllStringIndex(yamlConfig, -1)
testutil.Assert(t, len(matches) == 8, "wrong number of secret matches found")
testutil.Assert(t, len(matches) == 10, "wrong number of secret matches found")
testutil.Assert(t, !strings.Contains(yamlConfig, "mysecret"),
"yaml marshal reveals authentication credentials.")
}
@ -745,8 +772,6 @@ func TestLoadConfigRuleFilesAbsolutePath(t *testing.T) {
// Parse a valid file that sets a rule files with an absolute path
c, err := LoadFile(ruleFilesConfigFile)
testutil.Ok(t, err)
ruleFilesExpectedConf.original = c.original
testutil.Equals(t, ruleFilesExpectedConf, c)
}
@ -834,7 +859,7 @@ var expectedErrors = []struct {
errMsg: "invalid rule file path",
}, {
filename: "unknown_attr.bad.yml",
errMsg: "field consult_sd_configs not found in type config.plain",
errMsg: "field consult_sd_configs not found in type",
}, {
filename: "bearertoken.bad.yml",
errMsg: "at most one of bearer_token & bearer_token_file must be configured",
@ -984,6 +1009,18 @@ var expectedErrors = []struct {
filename: "empty_static_config.bad.yml",
errMsg: "empty or null section in static_configs",
},
{
filename: "hetzner_role.bad.yml",
errMsg: "unknown role",
},
{
filename: "eureka_no_server.bad.yml",
errMsg: "empty or null eureka server",
},
{
filename: "eureka_invalid_server.bad.yml",
errMsg: "invalid eureka server URL",
},
}
func TestBadConfigs(t *testing.T) {
@ -1022,11 +1059,10 @@ func TestEmptyGlobalBlock(t *testing.T) {
c, err := Load("global:\n")
testutil.Ok(t, err)
exp := DefaultConfig
exp.original = "global:\n"
testutil.Equals(t, exp, *c)
}
func kubernetesSDHostURL() config_util.URL {
func kubernetesSDHostURL() config.URL {
tURL, _ := url.Parse("https://localhost:1234")
return config_util.URL{URL: tURL}
return config.URL{URL: tURL}
}

View file

@ -22,5 +22,4 @@ var ruleFilesExpectedConf = &Config{
"testdata\\rules\\second.rules",
"c:\\absolute\\third.rules",
},
original: "",
}

View file

@ -279,6 +279,19 @@ scrape_configs:
cert_file: valid_cert_file
key_file: valid_key_file
- job_name: hetzner
hetzner_sd_configs:
- role: hcloud
bearer_token: abcdef
- role: robot
basic_auth:
username: abcdef
password: abcdef
- job_name: service-eureka
eureka_sd_configs:
- server: 'http://eureka.example.com:8761/eureka'
alerting:
alertmanagers:
- scheme: https

View file

@ -0,0 +1,5 @@
scrape_configs:
- job_name: eureka
eureka_sd_configs:
- server: eureka.com

View file

@ -0,0 +1,5 @@
scrape_configs:
- job_name: eureka
eureka_sd_configs:
- server:

4
config/testdata/hetzner_role.bad.yml vendored Normal file
View file

@ -0,0 +1,4 @@
scrape_configs:
- hetzner_sd_configs:
- role: invalid

143
config/testdata/roundtrip.good.yml vendored Normal file
View file

@ -0,0 +1,143 @@
alerting:
alertmanagers:
- scheme: https
file_sd_configs:
- files:
- foo/*.slow.json
- foo/*.slow.yml
refresh_interval: 10m
- files:
- bar/*.yaml
static_configs:
- targets:
- 1.2.3.4:9093
- 1.2.3.5:9093
- 1.2.3.6:9093
scrape_configs:
- job_name: foo
static_configs:
- targets:
- localhost:9090
- localhost:9191
labels:
my: label
your: label
- job_name: bar
azure_sd_configs:
- environment: AzurePublicCloud
authentication_method: OAuth
subscription_id: 11AAAA11-A11A-111A-A111-1111A1111A11
tenant_id: BBBB222B-B2B2-2B22-B222-2BB2222BB2B2
client_id: 333333CC-3C33-3333-CCC3-33C3CCCCC33C
client_secret: <secret>
port: 9100
consul_sd_configs:
- server: localhost:1234
token: <secret>
services: [nginx, cache, mysql]
tags: [canary, v1]
node_meta:
rack: "123"
allow_stale: true
scheme: https
tls_config:
ca_file: valid_ca_file
cert_file: valid_cert_file
key_file: valid_key_file
digitalocean_sd_configs:
- bearer_token: <secret>
dockerswarm_sd_configs:
- host: http://127.0.0.1:2375
role: nodes
dns_sd_configs:
- refresh_interval: 15s
names:
- first.dns.address.domain.com
- second.dns.address.domain.com
- names:
- first.dns.address.domain.com
ec2_sd_configs:
- region: us-east-1
access_key: access
secret_key: <secret>
profile: profile
filters:
- name: tag:environment
values:
- prod
- name: tag:service
values:
- web
- db
file_sd_configs:
- files:
- single/file.yml
kubernetes_sd_configs:
- role: endpoints
api_server: https://localhost:1234
tls_config:
cert_file: valid_cert_file
key_file: valid_key_file
basic_auth:
username: username
password: <secret>
- role: endpoints
api_server: https://localhost:1234
namespaces:
names:
- default
basic_auth:
username: username
password_file: valid_password_file
marathon_sd_configs:
- servers:
- https://marathon.example.com:443
auth_token: <secret>
tls_config:
cert_file: valid_cert_file
key_file: valid_key_file
nerve_sd_configs:
- servers:
- localhost
paths:
- /monitoring
openstack_sd_configs:
- role: instance
region: RegionOne
port: 80
refresh_interval: 1m
tls_config:
ca_file: valid_ca_file
cert_file: valid_cert_file
key_file: valid_key_file
static_configs:
- targets:
- localhost:9093
triton_sd_configs:
- account: testAccount
dns_suffix: triton.example.com
endpoint: triton.example.com
port: 9163
refresh_interval: 1m
version: 1
tls_config:
cert_file: valid_cert_file
key_file: valid_key_file

View file

@ -2,15 +2,15 @@
{{/* Load Prometheus console library JS/CSS. Should go in <head> */}}
{{ define "prom_console_head" }}
<link type="text/css" rel="stylesheet" href="{{ pathPrefix }}/static/vendor/rickshaw/rickshaw.min.css">
<link type="text/css" rel="stylesheet" href="{{ pathPrefix }}/static/vendor/bootstrap-4.3.1/css/bootstrap.min.css">
<link type="text/css" rel="stylesheet" href="{{ pathPrefix }}/static/vendor/bootstrap-4.5.2/css/bootstrap.min.css">
<link type="text/css" rel="stylesheet" href="{{ pathPrefix }}/static/css/prom_console.css">
<link type="text/css" rel="stylesheet" href="{{ pathPrefix }}/static/vendor/bootstrap4-glyphicons/css/bootstrap-glyphicons.min.css">
<script src="{{ pathPrefix }}/static/vendor/rickshaw/vendor/d3.v3.js"></script>
<script src="{{ pathPrefix }}/static/vendor/rickshaw/vendor/d3.layout.min.js"></script>
<script src="{{ pathPrefix }}/static/vendor/rickshaw/rickshaw.min.js"></script>
<script src="{{ pathPrefix }}/static/vendor/js/jquery-3.3.1.min.js"></script>
<script src="{{ pathPrefix }}/static/vendor/js/jquery-3.5.1.min.js"></script>
<script src="{{ pathPrefix }}/static/vendor/js/popper.min.js"></script>
<script src="{{ pathPrefix }}/static/vendor/bootstrap-4.3.1/js/bootstrap.min.js"></script>
<script src="{{ pathPrefix }}/static/vendor/bootstrap-4.5.2/js/bootstrap.min.js"></script>
<script>
var PATH_PREFIX = "{{ pathPrefix }}";

View file

@ -146,85 +146,115 @@ both cases.
For example if we had a discovery mechanism and it retrieves the following groups:
```
```go
[]targetgroup.Group{
{
Targets: []model.LabelSet{
{
"__instance__": "10.11.150.1:7870",
"hostname": "demo-target-1",
"test": "simple-test",
},
{
"__instance__": "10.11.150.4:7870",
"hostname": "demo-target-2",
"test": "simple-test",
},
},
Labels: map[LabelName][LabelValue] {
"job": "mysql",
},
"Source": "file1",
},
{
Targets: []model.LabelSet{
{
"__instance__": "10.11.122.11:6001",
"hostname": "demo-postgres-1",
"test": "simple-test",
},
{
"__instance__": "10.11.122.15:6001",
"hostname": "demo-postgres-2",
"test": "simple-test",
},
},
Labels: map[LabelName][LabelValue] {
"job": "postgres",
},
"Source": "file2",
},
{
Targets: []model.LabelSet{
{
"__instance__": "10.11.150.1:7870",
"hostname": "demo-target-1",
"test": "simple-test",
},
{
"__instance__": "10.11.150.4:7870",
"hostname": "demo-target-2",
"test": "simple-test",
},
},
Labels: model.LabelSet{
"job": "mysql",
},
"Source": "file1",
},
{
Targets: []model.LabelSet{
{
"__instance__": "10.11.122.11:6001",
"hostname": "demo-postgres-1",
"test": "simple-test",
},
{
"__instance__": "10.11.122.15:6001",
"hostname": "demo-postgres-2",
"test": "simple-test",
},
},
Labels: model.LabelSet{
"job": "postgres",
},
"Source": "file2",
},
}
```
Here there are two target groups one group with source `file1` and another with `file2`. The grouping is implementation specific and could even be one target per group. But, one has to make sure every target group sent by an SD instance should have a `Source` which is unique across all the target groups of that SD instance.
In this case, both the target groups are sent down the channel the first time `Run()` is called. Now, for an update, we need to send the whole _changed_ target group down the channel. i.e, if the target with `hostname: demo-postgres-2` goes away, we send:
```
```go
&targetgroup.Group{
Targets: []model.LabelSet{
{
"__instance__": "10.11.122.11:6001",
"hostname": "demo-postgres-1",
"test": "simple-test",
},
},
Labels: map[LabelName][LabelValue] {
"job": "postgres",
},
"Source": "file2",
Targets: []model.LabelSet{
{
"__instance__": "10.11.122.11:6001",
"hostname": "demo-postgres-1",
"test": "simple-test",
},
},
Labels: model.LabelSet{
"job": "postgres",
},
"Source": "file2",
}
```
down the channel.
If all the targets in a group go away, we need to send the target groups with empty `Targets` down the channel. i.e, if all targets with `job: postgres` go away, we send:
```
```go
&targetgroup.Group{
Targets: nil,
"Source": "file2",
Targets: nil,
"Source": "file2",
}
```
down the channel.
### The Config interface
Now that your service discovery mechanism is ready to discover targets, you must help
Prometheus discover it. This is done by implementing the `discovery.Config` interface
and registering it with `discovery.RegisterConfig` in an init function of your package.
```go
type Config interface {
// Name returns the name of the discovery mechanism.
Name() string
// NewDiscoverer returns a Discoverer for the Config
// with the given DiscovererOptions.
NewDiscoverer(DiscovererOptions) (Discoverer, error)
}
type DiscovererOptions struct {
Logger log.Logger
}
```
The value returned by `Name()` should be short, descriptive, lowercase, and unique.
It's used to tag the provided `Logger` and as the part of the YAML key for your SD
mechanism's list of configs in `scrape_config` and `alertmanager_config`
(e.g. `${NAME}_sd_configs`).
### New Service Discovery Check List
Here are some non-obvious parts of adding service discoveries that need to be verified:
- Check for `nil` SDConfigs in `discovery/config/config.go`.
- Validate that discovery configs can be DeepEqualled by adding them to
`config/testdata/conf.good.yml` and to the associated tests.
- If there is a TLSConfig or HTTPClientConfig, add them to
`resolveFilepaths` in `config/config.go`.
- If the config contains file paths directly or indirectly (e.g. with a TLSConfig or
HTTPClientConfig field), then it must implement `config.DirectorySetter`.
- Import your SD package from `prometheus/discovery/install`. The install package is
imported from `main` to register all builtin SD mechanisms.
- List the service discovery in both `<scrape_config>` and
`<alertmanager_config>` in `docs/configuration/configuration.md`.

View file

@ -33,6 +33,7 @@ import (
config_util "github.com/prometheus/common/config"
"github.com/prometheus/common/model"
"github.com/prometheus/prometheus/discovery"
"github.com/prometheus/prometheus/discovery/refresh"
"github.com/prometheus/prometheus/discovery/targetgroup"
"github.com/prometheus/prometheus/util/strutil"
@ -64,6 +65,10 @@ var DefaultSDConfig = SDConfig{
AuthenticationMethod: authMethodOAuth,
}
func init() {
discovery.RegisterConfig(&SDConfig{})
}
// SDConfig is the configuration for Azure based service discovery.
type SDConfig struct {
Environment string `yaml:"environment,omitempty"`
@ -76,6 +81,14 @@ type SDConfig struct {
AuthenticationMethod string `yaml:"authentication_method,omitempty"`
}
// Name returns the name of the Config.
func (*SDConfig) Name() string { return "azure" }
// NewDiscoverer returns a Discoverer for the Config.
func (c *SDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (discovery.Discoverer, error) {
return NewDiscovery(c, opts.Logger), nil
}
func validateAuthParam(param, name string) error {
if len(param) == 0 {
return errors.Errorf("azure SD configuration requires a %s", name)

View file

@ -18,8 +18,13 @@ import (
"testing"
"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute"
"go.uber.org/goleak"
)
func TestMain(m *testing.M) {
goleak.VerifyTestMain(m)
}
func TestMapFromVMWithEmptyTags(t *testing.T) {
id := "test"
name := "name"

View file

@ -1,147 +0,0 @@
// Copyright 2016 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package config
import (
"github.com/pkg/errors"
"github.com/prometheus/prometheus/discovery/azure"
"github.com/prometheus/prometheus/discovery/consul"
"github.com/prometheus/prometheus/discovery/digitalocean"
"github.com/prometheus/prometheus/discovery/dns"
"github.com/prometheus/prometheus/discovery/dockerswarm"
"github.com/prometheus/prometheus/discovery/ec2"
"github.com/prometheus/prometheus/discovery/file"
"github.com/prometheus/prometheus/discovery/gce"
"github.com/prometheus/prometheus/discovery/kubernetes"
"github.com/prometheus/prometheus/discovery/marathon"
"github.com/prometheus/prometheus/discovery/openstack"
"github.com/prometheus/prometheus/discovery/targetgroup"
"github.com/prometheus/prometheus/discovery/triton"
"github.com/prometheus/prometheus/discovery/zookeeper"
)
// ServiceDiscoveryConfig configures lists of different service discovery mechanisms.
type ServiceDiscoveryConfig struct {
// List of labeled target groups for this job.
StaticConfigs []*targetgroup.Group `yaml:"static_configs,omitempty"`
// List of DNS service discovery configurations.
DNSSDConfigs []*dns.SDConfig `yaml:"dns_sd_configs,omitempty"`
// List of file service discovery configurations.
FileSDConfigs []*file.SDConfig `yaml:"file_sd_configs,omitempty"`
// List of Consul service discovery configurations.
ConsulSDConfigs []*consul.SDConfig `yaml:"consul_sd_configs,omitempty"`
// List of DigitalOcean service discovery configurations.
DigitalOceanSDConfigs []*digitalocean.SDConfig `yaml:"digitalocean_sd_configs,omitempty"`
// List of Docker Swarm service discovery configurations.
DockerSwarmSDConfigs []*dockerswarm.SDConfig `yaml:"dockerswarm_sd_configs,omitempty"`
// List of Serverset service discovery configurations.
ServersetSDConfigs []*zookeeper.ServersetSDConfig `yaml:"serverset_sd_configs,omitempty"`
// NerveSDConfigs is a list of Nerve service discovery configurations.
NerveSDConfigs []*zookeeper.NerveSDConfig `yaml:"nerve_sd_configs,omitempty"`
// MarathonSDConfigs is a list of Marathon service discovery configurations.
MarathonSDConfigs []*marathon.SDConfig `yaml:"marathon_sd_configs,omitempty"`
// List of Kubernetes service discovery configurations.
KubernetesSDConfigs []*kubernetes.SDConfig `yaml:"kubernetes_sd_configs,omitempty"`
// List of GCE service discovery configurations.
GCESDConfigs []*gce.SDConfig `yaml:"gce_sd_configs,omitempty"`
// List of EC2 service discovery configurations.
EC2SDConfigs []*ec2.SDConfig `yaml:"ec2_sd_configs,omitempty"`
// List of OpenStack service discovery configurations.
OpenstackSDConfigs []*openstack.SDConfig `yaml:"openstack_sd_configs,omitempty"`
// List of Azure service discovery configurations.
AzureSDConfigs []*azure.SDConfig `yaml:"azure_sd_configs,omitempty"`
// List of Triton service discovery configurations.
TritonSDConfigs []*triton.SDConfig `yaml:"triton_sd_configs,omitempty"`
}
// Validate validates the ServiceDiscoveryConfig.
func (c *ServiceDiscoveryConfig) Validate() error {
for _, cfg := range c.AzureSDConfigs {
if cfg == nil {
return errors.New("empty or null section in azure_sd_configs")
}
}
for _, cfg := range c.ConsulSDConfigs {
if cfg == nil {
return errors.New("empty or null section in consul_sd_configs")
}
}
for _, cfg := range c.DigitalOceanSDConfigs {
if cfg == nil {
return errors.New("empty or null section in digitalocean_sd_configs")
}
}
for _, cfg := range c.DockerSwarmSDConfigs {
if cfg == nil {
return errors.New("empty or null section in dockerswarm_sd_configs")
}
}
for _, cfg := range c.DNSSDConfigs {
if cfg == nil {
return errors.New("empty or null section in dns_sd_configs")
}
}
for _, cfg := range c.EC2SDConfigs {
if cfg == nil {
return errors.New("empty or null section in ec2_sd_configs")
}
}
for _, cfg := range c.FileSDConfigs {
if cfg == nil {
return errors.New("empty or null section in file_sd_configs")
}
}
for _, cfg := range c.GCESDConfigs {
if cfg == nil {
return errors.New("empty or null section in gce_sd_configs")
}
}
for _, cfg := range c.KubernetesSDConfigs {
if cfg == nil {
return errors.New("empty or null section in kubernetes_sd_configs")
}
}
for _, cfg := range c.MarathonSDConfigs {
if cfg == nil {
return errors.New("empty or null section in marathon_sd_configs")
}
}
for _, cfg := range c.NerveSDConfigs {
if cfg == nil {
return errors.New("empty or null section in nerve_sd_configs")
}
}
for _, cfg := range c.OpenstackSDConfigs {
if cfg == nil {
return errors.New("empty or null section in openstack_sd_configs")
}
}
for _, cfg := range c.ServersetSDConfigs {
if cfg == nil {
return errors.New("empty or null section in serverset_sd_configs")
}
}
for _, cfg := range c.StaticConfigs {
if cfg == nil {
return errors.New("empty or null section in static_configs")
}
}
for _, cfg := range c.TritonSDConfigs {
if cfg == nil {
return errors.New("empty or null section in triton_sd_configs")
}
}
return nil
}

View file

@ -1,58 +0,0 @@
// Copyright 2020 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package config
import (
"fmt"
"reflect"
"strings"
"testing"
"github.com/prometheus/prometheus/util/testutil"
"gopkg.in/yaml.v2"
)
func TestForNilSDConfig(t *testing.T) {
// Get all the yaml fields names of the ServiceDiscoveryConfig struct.
s := reflect.ValueOf(ServiceDiscoveryConfig{})
configType := s.Type()
n := s.NumField()
fieldsSlice := make([]string, n)
for i := 0; i < n; i++ {
field := configType.Field(i)
tag := field.Tag.Get("yaml")
tag = strings.Split(tag, ",")[0]
fieldsSlice = append(fieldsSlice, tag)
}
// Unmarshall all possible yaml keys and validate errors check upon nil
// SD config.
for _, f := range fieldsSlice {
if f == "" {
continue
}
t.Run(f, func(t *testing.T) {
c := &ServiceDiscoveryConfig{}
err := yaml.Unmarshal([]byte(fmt.Sprintf(`
---
%s:
-
`, f)), c)
testutil.Ok(t, err)
err = c.Validate()
testutil.NotOk(t, err)
testutil.Equals(t, fmt.Sprintf("empty or null section in %s", f), err.Error())
})
}
}

View file

@ -28,15 +28,16 @@ import (
conntrack "github.com/mwitkow/go-conntrack"
"github.com/pkg/errors"
"github.com/prometheus/client_golang/prometheus"
config_util "github.com/prometheus/common/config"
"github.com/prometheus/common/config"
"github.com/prometheus/common/model"
"github.com/prometheus/prometheus/discovery"
"github.com/prometheus/prometheus/discovery/targetgroup"
"github.com/prometheus/prometheus/util/strutil"
)
const (
watchTimeout = 10 * time.Minute
watchTimeout = 2 * time.Minute
retryInterval = 15 * time.Second
// addressLabel is the name for the label containing a target's address.
@ -99,15 +100,21 @@ var (
}
)
func init() {
discovery.RegisterConfig(&SDConfig{})
prometheus.MustRegister(rpcFailuresCount)
prometheus.MustRegister(rpcDuration)
}
// SDConfig is the configuration for Consul service discovery.
type SDConfig struct {
Server string `yaml:"server,omitempty"`
Token config_util.Secret `yaml:"token,omitempty"`
Datacenter string `yaml:"datacenter,omitempty"`
TagSeparator string `yaml:"tag_separator,omitempty"`
Scheme string `yaml:"scheme,omitempty"`
Username string `yaml:"username,omitempty"`
Password config_util.Secret `yaml:"password,omitempty"`
Server string `yaml:"server,omitempty"`
Token config.Secret `yaml:"token,omitempty"`
Datacenter string `yaml:"datacenter,omitempty"`
TagSeparator string `yaml:"tag_separator,omitempty"`
Scheme string `yaml:"scheme,omitempty"`
Username string `yaml:"username,omitempty"`
Password config.Secret `yaml:"password,omitempty"`
// See https://www.consul.io/docs/internals/consensus.html#consistency-modes,
// stale reads are a lot cheaper and are a necessity if you have >5k targets.
@ -127,7 +134,20 @@ type SDConfig struct {
// Desired node metadata.
NodeMeta map[string]string `yaml:"node_meta,omitempty"`
TLSConfig config_util.TLSConfig `yaml:"tls_config,omitempty"`
TLSConfig config.TLSConfig `yaml:"tls_config,omitempty"`
}
// Name returns the name of the Config.
func (*SDConfig) Name() string { return "consul" }
// NewDiscoverer returns a Discoverer for the Config.
func (c *SDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (discovery.Discoverer, error) {
return NewDiscovery(c, opts.Logger)
}
// SetDirectory joins any relative file paths with dir.
func (c *SDConfig) SetDirectory(dir string) {
c.TLSConfig.SetDirectory(dir)
}
// UnmarshalYAML implements the yaml.Unmarshaler interface.
@ -144,11 +164,6 @@ func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
return nil
}
func init() {
prometheus.MustRegister(rpcFailuresCount)
prometheus.MustRegister(rpcDuration)
}
// Discovery retrieves target information from a Consul server
// and updates them via watches.
type Discovery struct {
@ -170,7 +185,7 @@ func NewDiscovery(conf *SDConfig, logger log.Logger) (*Discovery, error) {
logger = log.NewNopLogger()
}
tls, err := config_util.NewTLSConfig(&conf.TLSConfig)
tls, err := config.NewTLSConfig(&conf.TLSConfig)
if err != nil {
return nil, err
}
@ -184,7 +199,7 @@ func NewDiscovery(conf *SDConfig, logger log.Logger) (*Discovery, error) {
}
wrapper := &http.Client{
Transport: transport,
Timeout: 35 * time.Second,
Timeout: time.Duration(watchTimeout) + 15*time.Second,
}
clientConf := &consul.Config{
@ -348,13 +363,13 @@ func (d *Discovery) watchServices(ctx context.Context, ch chan<- []*targetgroup.
catalog := d.client.Catalog()
level.Debug(d.logger).Log("msg", "Watching services", "tags", strings.Join(d.watchedTags, ","))
t0 := time.Now()
opts := &consul.QueryOptions{
WaitIndex: *lastIndex,
WaitTime: watchTimeout,
AllowStale: d.allowStale,
NodeMeta: d.watchedNodeMeta,
}
t0 := time.Now()
srvs, meta, err := catalog.Services(opts.WithContext(ctx))
elapsed := time.Since(t0)
servicesRPCDuration.Observe(elapsed.Seconds())
@ -441,18 +456,19 @@ func (d *Discovery) watchService(ctx context.Context, ch chan<- []*targetgroup.G
go func() {
ticker := time.NewTicker(d.refreshInterval)
defer ticker.Stop()
var lastIndex uint64
health := srv.client.Health()
for {
select {
case <-ctx.Done():
ticker.Stop()
return
default:
srv.watch(ctx, ch, health, &lastIndex)
select {
case <-ticker.C:
case <-ctx.Done():
return
}
}
}
@ -463,7 +479,6 @@ func (d *Discovery) watchService(ctx context.Context, ch chan<- []*targetgroup.G
func (srv *consulService) watch(ctx context.Context, ch chan<- []*targetgroup.Group, health *consul.Health, lastIndex *uint64) {
level.Debug(srv.logger).Log("msg", "Watching service", "service", srv.name, "tags", strings.Join(srv.tags, ","))
t0 := time.Now()
opts := &consul.QueryOptions{
WaitIndex: *lastIndex,
WaitTime: watchTimeout,
@ -471,6 +486,7 @@ func (srv *consulService) watch(ctx context.Context, ch chan<- []*targetgroup.Gr
NodeMeta: srv.discovery.watchedNodeMeta,
}
t0 := time.Now()
serviceNodes, meta, err := health.ServiceMultipleTags(srv.name, srv.tags, false, opts.WithContext(ctx))
elapsed := time.Since(t0)
serviceRPCDuration.Observe(elapsed.Seconds())

View file

@ -228,20 +228,20 @@ func newServer(t *testing.T) (*httptest.Server, *SDConfig) {
switch r.URL.String() {
case "/v1/agent/self":
response = AgentAnswer
case "/v1/health/service/test?node-meta=rack_name%3A2304&stale=&tag=tag1&wait=600000ms":
case "/v1/health/service/test?node-meta=rack_name%3A2304&stale=&tag=tag1&wait=120000ms":
response = ServiceTestAnswer
case "/v1/health/service/test?wait=600000ms":
case "/v1/health/service/test?wait=120000ms":
response = ServiceTestAnswer
case "/v1/health/service/other?wait=600000ms":
case "/v1/health/service/other?wait=120000ms":
response = `[]`
case "/v1/catalog/services?node-meta=rack_name%3A2304&stale=&wait=600000ms":
case "/v1/catalog/services?node-meta=rack_name%3A2304&stale=&wait=120000ms":
response = ServicesTestAnswer
case "/v1/catalog/services?wait=600000ms":
case "/v1/catalog/services?wait=120000ms":
response = ServicesTestAnswer
case "/v1/catalog/services?index=1&node-meta=rack_name%3A2304&stale=&wait=600000ms":
case "/v1/catalog/services?index=1&node-meta=rack_name%3A2304&stale=&wait=120000ms":
time.Sleep(5 * time.Second)
response = ServicesTestAnswer
case "/v1/catalog/services?index=1&wait=600000ms":
case "/v1/catalog/services?index=1&wait=120000ms":
time.Sleep(5 * time.Second)
response = ServicesTestAnswer
default:

View file

@ -24,10 +24,11 @@ import (
"github.com/digitalocean/godo"
"github.com/go-kit/kit/log"
config_util "github.com/prometheus/common/config"
"github.com/prometheus/common/config"
"github.com/prometheus/common/model"
"github.com/prometheus/common/version"
"github.com/prometheus/prometheus/discovery"
"github.com/prometheus/prometheus/discovery/refresh"
"github.com/prometheus/prometheus/discovery/targetgroup"
)
@ -54,14 +55,31 @@ var DefaultSDConfig = SDConfig{
RefreshInterval: model.Duration(60 * time.Second),
}
func init() {
discovery.RegisterConfig(&SDConfig{})
}
// SDConfig is the configuration for DigitalOcean based service discovery.
type SDConfig struct {
HTTPClientConfig config_util.HTTPClientConfig `yaml:",inline"`
HTTPClientConfig config.HTTPClientConfig `yaml:",inline"`
RefreshInterval model.Duration `yaml:"refresh_interval"`
Port int `yaml:"port"`
}
// Name returns the name of the Config.
func (*SDConfig) Name() string { return "digitalocean" }
// NewDiscoverer returns a Discoverer for the Config.
func (c *SDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (discovery.Discoverer, error) {
return NewDiscovery(c, opts.Logger)
}
// SetDirectory joins any relative file paths with dir.
func (c *SDConfig) SetDirectory(dir string) {
c.HTTPClientConfig.SetDirectory(dir)
}
// UnmarshalYAML implements the yaml.Unmarshaler interface.
func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
*c = DefaultSDConfig
@ -87,7 +105,7 @@ func NewDiscovery(conf *SDConfig, logger log.Logger) (*Discovery, error) {
port: conf.Port,
}
rt, err := config_util.NewRoundTripperFromConfig(conf.HTTPClientConfig, "digitalocean_sd", false)
rt, err := config.NewRoundTripperFromConfig(conf.HTTPClientConfig, "digitalocean_sd", false, false)
if err != nil {
return nil, err
}

117
discovery/discovery.go Normal file
View file

@ -0,0 +1,117 @@
// Copyright 2020 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package discovery
import (
"context"
"reflect"
"github.com/go-kit/kit/log"
"github.com/prometheus/common/config"
"github.com/prometheus/prometheus/discovery/targetgroup"
)
// Discoverer provides information about target groups. It maintains a set
// of sources from which TargetGroups can originate. Whenever a discovery provider
// detects a potential change, it sends the TargetGroup through its channel.
//
// Discoverer does not know if an actual change happened.
// It does guarantee that it sends the new TargetGroup whenever a change happens.
//
// Discoverers should initially send a full set of all discoverable TargetGroups.
type Discoverer interface {
// Run hands a channel to the discovery provider (Consul, DNS, etc.) through which
// it can send updated target groups. It must return when the context is canceled.
// It should not close the update channel on returning.
Run(ctx context.Context, up chan<- []*targetgroup.Group)
}
// DiscovererOptions provides options for a Discoverer.
type DiscovererOptions struct {
Logger log.Logger
}
// A Config provides the configuration and constructor for a Discoverer.
type Config interface {
// Name returns the name of the discovery mechanism.
Name() string
// NewDiscoverer returns a Discoverer for the Config
// with the given DiscovererOptions.
NewDiscoverer(DiscovererOptions) (Discoverer, error)
}
// Configs is a slice of Config values that uses custom YAML marshaling and unmarshaling
// to represent itself as a mapping of the Config values grouped by their types.
type Configs []Config
// SetDirectory joins any relative file paths with dir.
func (c *Configs) SetDirectory(dir string) {
for _, c := range *c {
if v, ok := c.(config.DirectorySetter); ok {
v.SetDirectory(dir)
}
}
}
// UnmarshalYAML implements yaml.Unmarshaler.
func (c *Configs) UnmarshalYAML(unmarshal func(interface{}) error) error {
cfgTyp := getConfigType(configsType)
cfgPtr := reflect.New(cfgTyp)
cfgVal := cfgPtr.Elem()
if err := unmarshal(cfgPtr.Interface()); err != nil {
return replaceYAMLTypeError(err, cfgTyp, configsType)
}
var err error
*c, err = readConfigs(cfgVal, 0)
return err
}
// MarshalYAML implements yaml.Marshaler.
func (c Configs) MarshalYAML() (interface{}, error) {
cfgTyp := getConfigType(configsType)
cfgPtr := reflect.New(cfgTyp)
cfgVal := cfgPtr.Elem()
if err := writeConfigs(cfgVal, c); err != nil {
return nil, err
}
return cfgPtr.Interface(), nil
}
// A StaticConfig is a Config that provides a static list of targets.
type StaticConfig []*targetgroup.Group
// Name returns the name of the service discovery mechanism.
func (StaticConfig) Name() string { return "static" }
// NewDiscoverer returns a Discoverer for the Config.
func (c StaticConfig) NewDiscoverer(DiscovererOptions) (Discoverer, error) {
return staticDiscoverer(c), nil
}
type staticDiscoverer []*targetgroup.Group
func (c staticDiscoverer) Run(ctx context.Context, up chan<- []*targetgroup.Group) {
// TODO: existing implementation closes up chan, but documentation explicitly forbids it...?
defer close(up)
select {
case <-ctx.Done():
case up <- c:
}
}

View file

@ -28,6 +28,7 @@ import (
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/common/model"
"github.com/prometheus/prometheus/discovery"
"github.com/prometheus/prometheus/discovery/refresh"
"github.com/prometheus/prometheus/discovery/targetgroup"
)
@ -35,7 +36,10 @@ import (
const (
resolvConf = "/etc/resolv.conf"
dnsNameLabel = model.MetaLabelPrefix + "dns_name"
dnsNameLabel = model.MetaLabelPrefix + "dns_name"
dnsSrvRecordPrefix = model.MetaLabelPrefix + "dns_srv_record_"
dnsSrvRecordTargetLabel = dnsSrvRecordPrefix + "target"
dnsSrvRecordPortLabel = dnsSrvRecordPrefix + "port"
// Constants for instrumentation.
namespace = "prometheus"
@ -62,6 +66,12 @@ var (
}
)
func init() {
discovery.RegisterConfig(&SDConfig{})
prometheus.MustRegister(dnsSDLookupFailuresCount)
prometheus.MustRegister(dnsSDLookupsCount)
}
// SDConfig is the configuration for DNS based service discovery.
type SDConfig struct {
Names []string `yaml:"names"`
@ -70,6 +80,14 @@ type SDConfig struct {
Port int `yaml:"port"` // Ignored for SRV records
}
// Name returns the name of the Config.
func (*SDConfig) Name() string { return "dns" }
// NewDiscoverer returns a Discoverer for the Config.
func (c *SDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (discovery.Discoverer, error) {
return NewDiscovery(*c, opts.Logger), nil
}
// UnmarshalYAML implements the yaml.Unmarshaler interface.
func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
*c = DefaultSDConfig
@ -93,11 +111,6 @@ func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
return nil
}
func init() {
prometheus.MustRegister(dnsSDLookupFailuresCount)
prometheus.MustRegister(dnsSDLookupsCount)
}
// Discovery periodically performs DNS-SD requests. It implements
// the Discoverer interface.
type Discovery struct {
@ -183,9 +196,13 @@ func (d *Discovery) refreshOne(ctx context.Context, name string, ch chan<- *targ
}
for _, record := range response.Answer {
var target model.LabelValue
var target, dnsSrvRecordTarget, dnsSrvRecordPort model.LabelValue
switch addr := record.(type) {
case *dns.SRV:
dnsSrvRecordTarget = model.LabelValue(addr.Target)
dnsSrvRecordPort = model.LabelValue(fmt.Sprintf("%d", addr.Port))
// Remove the final dot from rooted DNS names to make them look more usual.
addr.Target = strings.TrimRight(addr.Target, ".")
@ -199,8 +216,10 @@ func (d *Discovery) refreshOne(ctx context.Context, name string, ch chan<- *targ
continue
}
tg.Targets = append(tg.Targets, model.LabelSet{
model.AddressLabel: target,
dnsNameLabel: model.LabelValue(name),
model.AddressLabel: target,
dnsNameLabel: model.LabelValue(name),
dnsSrvRecordTargetLabel: dnsSrvRecordTarget,
dnsSrvRecordPortLabel: dnsSrvRecordPort,
})
}

View file

@ -22,6 +22,7 @@ import (
"github.com/go-kit/kit/log"
"github.com/miekg/dns"
"go.uber.org/goleak"
"gopkg.in/yaml.v2"
"github.com/prometheus/common/model"
@ -29,6 +30,10 @@ import (
"github.com/prometheus/prometheus/util/testutil"
)
func TestMain(m *testing.M) {
goleak.VerifyTestMain(m)
}
func TestDNS(t *testing.T) {
testCases := []struct {
name string
@ -70,7 +75,12 @@ func TestDNS(t *testing.T) {
{
Source: "web.example.com.",
Targets: []model.LabelSet{
{"__address__": "192.0.2.2:80", "__meta_dns_name": "web.example.com."},
{
"__address__": "192.0.2.2:80",
"__meta_dns_name": "web.example.com.",
"__meta_dns_srv_record_target": "",
"__meta_dns_srv_record_port": "",
},
},
},
},
@ -95,7 +105,12 @@ func TestDNS(t *testing.T) {
{
Source: "web.example.com.",
Targets: []model.LabelSet{
{"__address__": "[::1]:80", "__meta_dns_name": "web.example.com."},
{
"__address__": "[::1]:80",
"__meta_dns_name": "web.example.com.",
"__meta_dns_srv_record_target": "",
"__meta_dns_srv_record_port": "",
},
},
},
},
@ -120,8 +135,18 @@ func TestDNS(t *testing.T) {
{
Source: "_mysql._tcp.db.example.com.",
Targets: []model.LabelSet{
{"__address__": "db1.example.com:3306", "__meta_dns_name": "_mysql._tcp.db.example.com."},
{"__address__": "db2.example.com:3306", "__meta_dns_name": "_mysql._tcp.db.example.com."},
{
"__address__": "db1.example.com:3306",
"__meta_dns_name": "_mysql._tcp.db.example.com.",
"__meta_dns_srv_record_target": "db1.example.com.",
"__meta_dns_srv_record_port": "3306",
},
{
"__address__": "db2.example.com:3306",
"__meta_dns_name": "_mysql._tcp.db.example.com.",
"__meta_dns_srv_record_target": "db2.example.com.",
"__meta_dns_srv_record_port": "3306",
},
},
},
},
@ -145,7 +170,12 @@ func TestDNS(t *testing.T) {
{
Source: "_mysql._tcp.db.example.com.",
Targets: []model.LabelSet{
{"__address__": "db1.example.com:3306", "__meta_dns_name": "_mysql._tcp.db.example.com."},
{
"__address__": "db1.example.com:3306",
"__meta_dns_name": "_mysql._tcp.db.example.com.",
"__meta_dns_srv_record_target": "db1.example.com.",
"__meta_dns_srv_record_port": "3306",
},
},
},
},

View file

@ -22,9 +22,11 @@ import (
"github.com/docker/docker/client"
"github.com/go-kit/kit/log"
config_util "github.com/prometheus/common/config"
"github.com/prometheus/common/config"
"github.com/prometheus/common/model"
"github.com/prometheus/common/version"
"github.com/prometheus/prometheus/discovery"
"github.com/prometheus/prometheus/discovery/refresh"
"github.com/prometheus/prometheus/discovery/targetgroup"
)
@ -33,15 +35,21 @@ const (
swarmLabel = model.MetaLabelPrefix + "dockerswarm_"
)
var userAgent = fmt.Sprintf("Prometheus/%s", version.Version)
// DefaultSDConfig is the default Docker Swarm SD configuration.
var DefaultSDConfig = SDConfig{
RefreshInterval: model.Duration(60 * time.Second),
Port: 80,
}
func init() {
discovery.RegisterConfig(&SDConfig{})
}
// SDConfig is the configuration for Docker Swarm based service discovery.
type SDConfig struct {
HTTPClientConfig config_util.HTTPClientConfig `yaml:",inline"`
HTTPClientConfig config.HTTPClientConfig `yaml:",inline"`
Host string `yaml:"host"`
Role string `yaml:"role"`
@ -50,6 +58,19 @@ type SDConfig struct {
RefreshInterval model.Duration `yaml:"refresh_interval"`
}
// Name returns the name of the Config.
func (*SDConfig) Name() string { return "dockerswarm" }
// NewDiscoverer returns a Discoverer for the Config.
func (c *SDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (discovery.Discoverer, error) {
return NewDiscovery(c, opts.Logger)
}
// SetDirectory joins any relative file paths with dir.
func (c *SDConfig) SetDirectory(dir string) {
c.HTTPClientConfig.SetDirectory(dir)
}
// UnmarshalYAML implements the yaml.Unmarshaler interface.
func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
*c = DefaultSDConfig
@ -106,7 +127,7 @@ func NewDiscovery(conf *SDConfig, logger log.Logger) (*Discovery, error) {
// unix, which are not supported by the HTTP client. Passing HTTP client
// options to the Docker client makes those non-HTTP requests fail.
if hostURL.Scheme == "http" || hostURL.Scheme == "https" {
rt, err := config_util.NewRoundTripperFromConfig(conf.HTTPClientConfig, "dockerswarm_sd", false)
rt, err := config.NewRoundTripperFromConfig(conf.HTTPClientConfig, "dockerswarm_sd", false, false)
if err != nil {
return nil, err
}
@ -116,6 +137,9 @@ func NewDiscovery(conf *SDConfig, logger log.Logger) (*Discovery, error) {
Timeout: time.Duration(conf.RefreshInterval),
}),
client.WithScheme(hostURL.Scheme),
client.WithHTTPHeaders(map[string]string{
"User-Agent": userAgent,
}),
)
}

View file

@ -57,42 +57,68 @@ func (d *Discovery) refreshServices(ctx context.Context) ([]*targetgroup.Group,
}
for _, s := range services {
for _, e := range s.Endpoint.Ports {
if e.Protocol != swarm.PortConfigProtocolTCP {
continue
commonLabels := map[string]string{
swarmLabelServiceID: s.ID,
swarmLabelServiceName: s.Spec.Name,
swarmLabelServiceTaskContainerHostname: s.Spec.TaskTemplate.ContainerSpec.Hostname,
swarmLabelServiceTaskContainerImage: s.Spec.TaskTemplate.ContainerSpec.Image,
}
commonLabels[swarmLabelServiceMode] = getServiceValueMode(s)
if s.UpdateStatus != nil {
commonLabels[swarmLabelServiceUpdatingStatus] = string(s.UpdateStatus.State)
}
for k, v := range s.Spec.Labels {
ln := strutil.SanitizeLabelName(k)
commonLabels[swarmLabelServiceLabelPrefix+ln] = v
}
for _, p := range s.Endpoint.VirtualIPs {
var added bool
ip, _, err := net.ParseCIDR(p.Addr)
if err != nil {
return nil, fmt.Errorf("error while parsing address %s: %w", p.Addr, err)
}
for _, p := range s.Endpoint.VirtualIPs {
for _, e := range s.Endpoint.Ports {
if e.Protocol != swarm.PortConfigProtocolTCP {
continue
}
labels := model.LabelSet{
swarmLabelServiceEndpointPortName: model.LabelValue(e.Name),
swarmLabelServiceEndpointPortPublishMode: model.LabelValue(e.PublishMode),
swarmLabelServiceID: model.LabelValue(s.ID),
swarmLabelServiceName: model.LabelValue(s.Spec.Name),
swarmLabelServiceTaskContainerHostname: model.LabelValue(s.Spec.TaskTemplate.ContainerSpec.Hostname),
swarmLabelServiceTaskContainerImage: model.LabelValue(s.Spec.TaskTemplate.ContainerSpec.Image),
}
labels[swarmLabelServiceMode] = model.LabelValue(getServiceValueMode(s))
if s.UpdateStatus != nil {
labels[swarmLabelServiceUpdatingStatus] = model.LabelValue(s.UpdateStatus.State)
for k, v := range commonLabels {
labels[model.LabelName(k)] = model.LabelValue(v)
}
for k, v := range s.Spec.Labels {
ln := strutil.SanitizeLabelName(k)
labels[model.LabelName(swarmLabelServiceLabelPrefix+ln)] = model.LabelValue(v)
}
ip, _, err := net.ParseCIDR(p.Addr)
if err != nil {
return nil, fmt.Errorf("error while parsing address %s: %w", p.Addr, err)
}
addr := net.JoinHostPort(ip.String(), strconv.FormatUint(uint64(e.PublishedPort), 10))
labels[model.AddressLabel] = model.LabelValue(addr)
for k, v := range networkLabels[p.NetworkID] {
labels[model.LabelName(k)] = model.LabelValue(v)
}
addr := net.JoinHostPort(ip.String(), strconv.FormatUint(uint64(e.PublishedPort), 10))
labels[model.AddressLabel] = model.LabelValue(addr)
tg.Targets = append(tg.Targets, labels)
added = true
}
if !added {
labels := model.LabelSet{}
for k, v := range commonLabels {
labels[model.LabelName(k)] = model.LabelValue(v)
}
for k, v := range networkLabels[p.NetworkID] {
labels[model.LabelName(k)] = model.LabelValue(v)
}
addr := net.JoinHostPort(ip.String(), fmt.Sprintf("%d", d.port))
labels[model.AddressLabel] = model.LabelValue(addr)
tg.Targets = append(tg.Targets, labels)
}
}

View file

@ -50,7 +50,7 @@ host: %s
tg := tgs[0]
testutil.Assert(t, tg != nil, "tg should not be nil")
testutil.Assert(t, tg.Targets != nil, "tg.targets should not be nil")
testutil.Equals(t, 10, len(tg.Targets))
testutil.Equals(t, 15, len(tg.Targets))
for i, lbls := range []model.LabelSet{
{
@ -65,8 +65,8 @@ host: %s
"__meta_dockerswarm_service_id": model.LabelValue("3qvd7bwfptmoht16t1f7jllb6"),
"__meta_dockerswarm_service_label_com_docker_stack_image": model.LabelValue("stefanprodan/swarmprom-node-exporter:v0.16.0"),
"__meta_dockerswarm_service_label_com_docker_stack_namespace": model.LabelValue("mon"),
"__meta_dockerswarm_service_name": model.LabelValue("mon_node-exporter"),
"__meta_dockerswarm_service_mode": model.LabelValue("global"),
"__meta_dockerswarm_service_name": model.LabelValue("mon_node-exporter"),
"__meta_dockerswarm_service_task_container_hostname": model.LabelValue(""),
"__meta_dockerswarm_service_task_container_image": model.LabelValue("stefanprodan/swarmprom-node-exporter:v0.16.0@sha256:1d2518ec0501dd848e718bf008df37852b1448c862d6f256f2d39244975758d6"),
},
@ -83,11 +83,75 @@ host: %s
"__meta_dockerswarm_service_id": model.LabelValue("3qvd7bwfptmoht16t1f7jllb6"),
"__meta_dockerswarm_service_label_com_docker_stack_image": model.LabelValue("stefanprodan/swarmprom-node-exporter:v0.16.0"),
"__meta_dockerswarm_service_label_com_docker_stack_namespace": model.LabelValue("mon"),
"__meta_dockerswarm_service_name": model.LabelValue("mon_node-exporter"),
"__meta_dockerswarm_service_mode": model.LabelValue("global"),
"__meta_dockerswarm_service_name": model.LabelValue("mon_node-exporter"),
"__meta_dockerswarm_service_task_container_hostname": model.LabelValue(""),
"__meta_dockerswarm_service_task_container_image": model.LabelValue("stefanprodan/swarmprom-node-exporter:v0.16.0@sha256:1d2518ec0501dd848e718bf008df37852b1448c862d6f256f2d39244975758d6"),
},
{
"__address__": model.LabelValue("10.0.1.34:80"),
"__meta_dockerswarm_network_id": model.LabelValue("npq2closzy836m07eaq1425k3"),
"__meta_dockerswarm_network_ingress": model.LabelValue("false"),
"__meta_dockerswarm_network_internal": model.LabelValue("false"),
"__meta_dockerswarm_network_label_com_docker_stack_namespace": model.LabelValue("mon"),
"__meta_dockerswarm_network_name": model.LabelValue("mon_net"),
"__meta_dockerswarm_network_scope": model.LabelValue("swarm"),
"__meta_dockerswarm_service_id": model.LabelValue("9bbq7j55tzzz85k2gg52x73rg"),
"__meta_dockerswarm_service_label_com_docker_stack_image": model.LabelValue("cloudflare/unsee:v0.8.0"),
"__meta_dockerswarm_service_label_com_docker_stack_namespace": model.LabelValue("mon"),
"__meta_dockerswarm_service_mode": model.LabelValue("replicated"),
"__meta_dockerswarm_service_name": model.LabelValue("mon_unsee"),
"__meta_dockerswarm_service_task_container_hostname": model.LabelValue(""),
"__meta_dockerswarm_service_task_container_image": model.LabelValue("cloudflare/unsee:v0.8.0@sha256:28398f47f63feb1647887999701fa730da351fc6d3cc7228e5da44b40a663ada"),
},
{
"__address__": model.LabelValue("10.0.1.13:80"),
"__meta_dockerswarm_network_id": model.LabelValue("npq2closzy836m07eaq1425k3"),
"__meta_dockerswarm_network_ingress": model.LabelValue("false"),
"__meta_dockerswarm_network_internal": model.LabelValue("false"),
"__meta_dockerswarm_network_label_com_docker_stack_namespace": model.LabelValue("mon"),
"__meta_dockerswarm_network_name": model.LabelValue("mon_net"),
"__meta_dockerswarm_network_scope": model.LabelValue("swarm"),
"__meta_dockerswarm_service_id": model.LabelValue("hv645udwaaewyw7wuc6nspm68"),
"__meta_dockerswarm_service_label_com_docker_stack_image": model.LabelValue("stefanprodan/swarmprom-prometheus:v2.5.0"),
"__meta_dockerswarm_service_label_com_docker_stack_namespace": model.LabelValue("mon"),
"__meta_dockerswarm_service_mode": model.LabelValue("replicated"),
"__meta_dockerswarm_service_name": model.LabelValue("mon_prometheus"),
"__meta_dockerswarm_service_task_container_hostname": model.LabelValue(""),
"__meta_dockerswarm_service_task_container_image": model.LabelValue("stefanprodan/swarmprom-prometheus:v2.5.0@sha256:f1a3781c4785637ba088dcf54889f77d9b6b69f21b2c0a167c1347473f4e2587"),
},
{
"__address__": model.LabelValue("10.0.1.23:80"),
"__meta_dockerswarm_network_id": model.LabelValue("npq2closzy836m07eaq1425k3"),
"__meta_dockerswarm_network_ingress": model.LabelValue("false"),
"__meta_dockerswarm_network_internal": model.LabelValue("false"),
"__meta_dockerswarm_network_label_com_docker_stack_namespace": model.LabelValue("mon"),
"__meta_dockerswarm_network_name": model.LabelValue("mon_net"),
"__meta_dockerswarm_network_scope": model.LabelValue("swarm"),
"__meta_dockerswarm_service_id": model.LabelValue("iml8vdd2dudvq457sbirpf66z"),
"__meta_dockerswarm_service_label_com_docker_stack_image": model.LabelValue("google/cadvisor"),
"__meta_dockerswarm_service_label_com_docker_stack_namespace": model.LabelValue("mon"),
"__meta_dockerswarm_service_mode": model.LabelValue("global"),
"__meta_dockerswarm_service_name": model.LabelValue("mon_cadvisor"),
"__meta_dockerswarm_service_task_container_hostname": model.LabelValue(""),
"__meta_dockerswarm_service_task_container_image": model.LabelValue("google/cadvisor:latest@sha256:815386ebbe9a3490f38785ab11bda34ec8dacf4634af77b8912832d4f85dca04"),
},
{
"__address__": model.LabelValue("10.0.1.31:80"),
"__meta_dockerswarm_network_id": model.LabelValue("npq2closzy836m07eaq1425k3"),
"__meta_dockerswarm_network_ingress": model.LabelValue("false"),
"__meta_dockerswarm_network_internal": model.LabelValue("false"),
"__meta_dockerswarm_network_label_com_docker_stack_namespace": model.LabelValue("mon"),
"__meta_dockerswarm_network_name": model.LabelValue("mon_net"),
"__meta_dockerswarm_network_scope": model.LabelValue("swarm"),
"__meta_dockerswarm_service_id": model.LabelValue("rl4jhdws3r4lkfvq8kx6c4xnr"),
"__meta_dockerswarm_service_label_com_docker_stack_image": model.LabelValue("stefanprodan/swarmprom-alertmanager:v0.14.0"),
"__meta_dockerswarm_service_label_com_docker_stack_namespace": model.LabelValue("mon"),
"__meta_dockerswarm_service_mode": model.LabelValue("replicated"),
"__meta_dockerswarm_service_name": model.LabelValue("mon_alertmanager"),
"__meta_dockerswarm_service_task_container_hostname": model.LabelValue(""),
"__meta_dockerswarm_service_task_container_image": model.LabelValue("stefanprodan/swarmprom-alertmanager:v0.14.0@sha256:7069b656bd5df0606ff1db81a7553a25dc72be51d3aca6a7e08d776566cefbd8"),
},
{
"__address__": model.LabelValue("10.0.0.13:9090"),
"__meta_dockerswarm_network_id": model.LabelValue("qvwhwd6p61k4o0ulsknqb066z"),
@ -100,8 +164,42 @@ host: %s
"__meta_dockerswarm_service_id": model.LabelValue("tkv91uso46cck763aaqcb4rek"),
"__meta_dockerswarm_service_label_com_docker_stack_image": model.LabelValue("stefanprodan/caddy"),
"__meta_dockerswarm_service_label_com_docker_stack_namespace": model.LabelValue("mon"),
"__meta_dockerswarm_service_name": model.LabelValue("mon_caddy"),
"__meta_dockerswarm_service_mode": model.LabelValue("replicated"),
"__meta_dockerswarm_service_name": model.LabelValue("mon_caddy"),
"__meta_dockerswarm_service_task_container_hostname": model.LabelValue(""),
"__meta_dockerswarm_service_task_container_image": model.LabelValue("stefanprodan/caddy:latest@sha256:44541cfacb66f4799f81f17fcfb3cb757ccc8f327592745549f5930c42d115c9"),
},
{
"__address__": model.LabelValue("10.0.0.13:9093"),
"__meta_dockerswarm_network_id": model.LabelValue("qvwhwd6p61k4o0ulsknqb066z"),
"__meta_dockerswarm_network_ingress": model.LabelValue("true"),
"__meta_dockerswarm_network_internal": model.LabelValue("false"),
"__meta_dockerswarm_network_name": model.LabelValue("ingress"),
"__meta_dockerswarm_network_scope": model.LabelValue("swarm"),
"__meta_dockerswarm_service_endpoint_port_name": model.LabelValue(""),
"__meta_dockerswarm_service_endpoint_port_publish_mode": model.LabelValue("ingress"),
"__meta_dockerswarm_service_id": model.LabelValue("tkv91uso46cck763aaqcb4rek"),
"__meta_dockerswarm_service_label_com_docker_stack_image": model.LabelValue("stefanprodan/caddy"),
"__meta_dockerswarm_service_label_com_docker_stack_namespace": model.LabelValue("mon"),
"__meta_dockerswarm_service_mode": model.LabelValue("replicated"),
"__meta_dockerswarm_service_name": model.LabelValue("mon_caddy"),
"__meta_dockerswarm_service_task_container_hostname": model.LabelValue(""),
"__meta_dockerswarm_service_task_container_image": model.LabelValue("stefanprodan/caddy:latest@sha256:44541cfacb66f4799f81f17fcfb3cb757ccc8f327592745549f5930c42d115c9"),
},
{
"__address__": model.LabelValue("10.0.0.13:9094"),
"__meta_dockerswarm_network_id": model.LabelValue("qvwhwd6p61k4o0ulsknqb066z"),
"__meta_dockerswarm_network_ingress": model.LabelValue("true"),
"__meta_dockerswarm_network_internal": model.LabelValue("false"),
"__meta_dockerswarm_network_name": model.LabelValue("ingress"),
"__meta_dockerswarm_network_scope": model.LabelValue("swarm"),
"__meta_dockerswarm_service_endpoint_port_name": model.LabelValue(""),
"__meta_dockerswarm_service_endpoint_port_publish_mode": model.LabelValue("ingress"),
"__meta_dockerswarm_service_id": model.LabelValue("tkv91uso46cck763aaqcb4rek"),
"__meta_dockerswarm_service_label_com_docker_stack_image": model.LabelValue("stefanprodan/caddy"),
"__meta_dockerswarm_service_label_com_docker_stack_namespace": model.LabelValue("mon"),
"__meta_dockerswarm_service_mode": model.LabelValue("replicated"),
"__meta_dockerswarm_service_name": model.LabelValue("mon_caddy"),
"__meta_dockerswarm_service_task_container_hostname": model.LabelValue(""),
"__meta_dockerswarm_service_task_container_image": model.LabelValue("stefanprodan/caddy:latest@sha256:44541cfacb66f4799f81f17fcfb3cb757ccc8f327592745549f5930c42d115c9"),
},
@ -118,25 +216,8 @@ host: %s
"__meta_dockerswarm_service_id": model.LabelValue("tkv91uso46cck763aaqcb4rek"),
"__meta_dockerswarm_service_label_com_docker_stack_image": model.LabelValue("stefanprodan/caddy"),
"__meta_dockerswarm_service_label_com_docker_stack_namespace": model.LabelValue("mon"),
"__meta_dockerswarm_service_name": model.LabelValue("mon_caddy"),
"__meta_dockerswarm_service_mode": model.LabelValue("replicated"),
"__meta_dockerswarm_service_task_container_hostname": model.LabelValue(""),
"__meta_dockerswarm_service_task_container_image": model.LabelValue("stefanprodan/caddy:latest@sha256:44541cfacb66f4799f81f17fcfb3cb757ccc8f327592745549f5930c42d115c9"),
},
{
"__address__": model.LabelValue("10.0.0.13:9093"),
"__meta_dockerswarm_network_id": model.LabelValue("qvwhwd6p61k4o0ulsknqb066z"),
"__meta_dockerswarm_network_ingress": model.LabelValue("true"),
"__meta_dockerswarm_network_internal": model.LabelValue("false"),
"__meta_dockerswarm_network_name": model.LabelValue("ingress"),
"__meta_dockerswarm_network_scope": model.LabelValue("swarm"),
"__meta_dockerswarm_service_endpoint_port_name": model.LabelValue(""),
"__meta_dockerswarm_service_endpoint_port_publish_mode": model.LabelValue("ingress"),
"__meta_dockerswarm_service_id": model.LabelValue("tkv91uso46cck763aaqcb4rek"),
"__meta_dockerswarm_service_label_com_docker_stack_image": model.LabelValue("stefanprodan/caddy"),
"__meta_dockerswarm_service_label_com_docker_stack_namespace": model.LabelValue("mon"),
"__meta_dockerswarm_service_name": model.LabelValue("mon_caddy"),
"__meta_dockerswarm_service_mode": model.LabelValue("replicated"),
"__meta_dockerswarm_service_task_container_hostname": model.LabelValue(""),
"__meta_dockerswarm_service_task_container_image": model.LabelValue("stefanprodan/caddy:latest@sha256:44541cfacb66f4799f81f17fcfb3cb757ccc8f327592745549f5930c42d115c9"),
},
@ -153,25 +234,8 @@ host: %s
"__meta_dockerswarm_service_id": model.LabelValue("tkv91uso46cck763aaqcb4rek"),
"__meta_dockerswarm_service_label_com_docker_stack_image": model.LabelValue("stefanprodan/caddy"),
"__meta_dockerswarm_service_label_com_docker_stack_namespace": model.LabelValue("mon"),
"__meta_dockerswarm_service_name": model.LabelValue("mon_caddy"),
"__meta_dockerswarm_service_mode": model.LabelValue("replicated"),
"__meta_dockerswarm_service_task_container_hostname": model.LabelValue(""),
"__meta_dockerswarm_service_task_container_image": model.LabelValue("stefanprodan/caddy:latest@sha256:44541cfacb66f4799f81f17fcfb3cb757ccc8f327592745549f5930c42d115c9"),
},
{
"__address__": model.LabelValue("10.0.0.13:9094"),
"__meta_dockerswarm_network_id": model.LabelValue("qvwhwd6p61k4o0ulsknqb066z"),
"__meta_dockerswarm_network_ingress": model.LabelValue("true"),
"__meta_dockerswarm_network_internal": model.LabelValue("false"),
"__meta_dockerswarm_network_name": model.LabelValue("ingress"),
"__meta_dockerswarm_network_scope": model.LabelValue("swarm"),
"__meta_dockerswarm_service_endpoint_port_name": model.LabelValue(""),
"__meta_dockerswarm_service_endpoint_port_publish_mode": model.LabelValue("ingress"),
"__meta_dockerswarm_service_id": model.LabelValue("tkv91uso46cck763aaqcb4rek"),
"__meta_dockerswarm_service_label_com_docker_stack_image": model.LabelValue("stefanprodan/caddy"),
"__meta_dockerswarm_service_label_com_docker_stack_namespace": model.LabelValue("mon"),
"__meta_dockerswarm_service_name": model.LabelValue("mon_caddy"),
"__meta_dockerswarm_service_mode": model.LabelValue("replicated"),
"__meta_dockerswarm_service_task_container_hostname": model.LabelValue(""),
"__meta_dockerswarm_service_task_container_image": model.LabelValue("stefanprodan/caddy:latest@sha256:44541cfacb66f4799f81f17fcfb3cb757ccc8f327592745549f5930c42d115c9"),
},
@ -188,8 +252,8 @@ host: %s
"__meta_dockerswarm_service_id": model.LabelValue("tkv91uso46cck763aaqcb4rek"),
"__meta_dockerswarm_service_label_com_docker_stack_image": model.LabelValue("stefanprodan/caddy"),
"__meta_dockerswarm_service_label_com_docker_stack_namespace": model.LabelValue("mon"),
"__meta_dockerswarm_service_name": model.LabelValue("mon_caddy"),
"__meta_dockerswarm_service_mode": model.LabelValue("replicated"),
"__meta_dockerswarm_service_name": model.LabelValue("mon_caddy"),
"__meta_dockerswarm_service_task_container_hostname": model.LabelValue(""),
"__meta_dockerswarm_service_task_container_image": model.LabelValue("stefanprodan/caddy:latest@sha256:44541cfacb66f4799f81f17fcfb3cb757ccc8f327592745549f5930c42d115c9"),
},
@ -205,8 +269,8 @@ host: %s
"__meta_dockerswarm_service_id": model.LabelValue("uk9su5tb9ykfzew3qtmp14uzh"),
"__meta_dockerswarm_service_label_com_docker_stack_image": model.LabelValue("stefanprodan/swarmprom-grafana:5.3.4"),
"__meta_dockerswarm_service_label_com_docker_stack_namespace": model.LabelValue("mon"),
"__meta_dockerswarm_service_name": model.LabelValue("mon_grafana"),
"__meta_dockerswarm_service_mode": model.LabelValue("replicated"),
"__meta_dockerswarm_service_name": model.LabelValue("mon_grafana"),
"__meta_dockerswarm_service_task_container_hostname": model.LabelValue(""),
"__meta_dockerswarm_service_task_container_image": model.LabelValue("stefanprodan/swarmprom-grafana:5.3.4@sha256:2aca8aa5716e6e0eed3fcdc88fec256a0a1828c491a8cf240486ae7cc473092d"),
},
@ -223,11 +287,27 @@ host: %s
"__meta_dockerswarm_service_id": model.LabelValue("uk9su5tb9ykfzew3qtmp14uzh"),
"__meta_dockerswarm_service_label_com_docker_stack_image": model.LabelValue("stefanprodan/swarmprom-grafana:5.3.4"),
"__meta_dockerswarm_service_label_com_docker_stack_namespace": model.LabelValue("mon"),
"__meta_dockerswarm_service_name": model.LabelValue("mon_grafana"),
"__meta_dockerswarm_service_mode": model.LabelValue("replicated"),
"__meta_dockerswarm_service_name": model.LabelValue("mon_grafana"),
"__meta_dockerswarm_service_task_container_hostname": model.LabelValue(""),
"__meta_dockerswarm_service_task_container_image": model.LabelValue("stefanprodan/swarmprom-grafana:5.3.4@sha256:2aca8aa5716e6e0eed3fcdc88fec256a0a1828c491a8cf240486ae7cc473092d"),
},
{
"__address__": model.LabelValue("10.0.1.17:80"),
"__meta_dockerswarm_network_id": model.LabelValue("npq2closzy836m07eaq1425k3"),
"__meta_dockerswarm_network_ingress": model.LabelValue("false"),
"__meta_dockerswarm_network_internal": model.LabelValue("false"),
"__meta_dockerswarm_network_label_com_docker_stack_namespace": model.LabelValue("mon"),
"__meta_dockerswarm_network_name": model.LabelValue("mon_net"),
"__meta_dockerswarm_network_scope": model.LabelValue("swarm"),
"__meta_dockerswarm_service_id": model.LabelValue("ul5qawv4s7f7msm7dtyfarw80"),
"__meta_dockerswarm_service_label_com_docker_stack_image": model.LabelValue("stefanprodan/caddy"),
"__meta_dockerswarm_service_label_com_docker_stack_namespace": model.LabelValue("mon"),
"__meta_dockerswarm_service_mode": model.LabelValue("global"),
"__meta_dockerswarm_service_name": model.LabelValue("mon_dockerd-exporter"),
"__meta_dockerswarm_service_task_container_hostname": model.LabelValue(""),
"__meta_dockerswarm_service_task_container_image": model.LabelValue("stefanprodan/caddy:latest@sha256:44541cfacb66f4799f81f17fcfb3cb757ccc8f327592745549f5930c42d115c9"),
},
} {
t.Run(fmt.Sprintf("item %d", i), func(t *testing.T) {
testutil.Equals(t, lbls, tg.Targets[i])

View file

@ -106,12 +106,19 @@ func (d *Discovery) refreshTasks(ctx context.Context) ([]*targetgroup.Group, err
tg.Targets = append(tg.Targets, labels)
}
for _, p := range servicePorts[s.ServiceID] {
if p.Protocol != swarm.PortConfigProtocolTCP {
continue
}
for _, network := range s.NetworksAttachments {
for _, address := range network.Addresses {
for _, network := range s.NetworksAttachments {
for _, address := range network.Addresses {
var added bool
ip, _, err := net.ParseCIDR(address)
if err != nil {
return nil, fmt.Errorf("error while parsing address %s: %w", address, err)
}
for _, p := range servicePorts[s.ServiceID] {
if p.Protocol != swarm.PortConfigProtocolTCP {
continue
}
labels := model.LabelSet{
swarmLabelTaskPortMode: model.LabelValue(p.PublishMode),
}
@ -124,13 +131,26 @@ func (d *Discovery) refreshTasks(ctx context.Context) ([]*targetgroup.Group, err
labels[model.LabelName(k)] = model.LabelValue(v)
}
ip, _, err := net.ParseCIDR(address)
if err != nil {
return nil, fmt.Errorf("error while parsing address %s: %w", address, err)
}
addr := net.JoinHostPort(ip.String(), strconv.FormatUint(uint64(p.PublishedPort), 10))
labels[model.AddressLabel] = model.LabelValue(addr)
tg.Targets = append(tg.Targets, labels)
added = true
}
if !added {
labels := model.LabelSet{}
for k, v := range commonLabels {
labels[model.LabelName(k)] = model.LabelValue(v)
}
for k, v := range networkLabels[network.Network.ID] {
labels[model.LabelName(k)] = model.LabelValue(v)
}
addr := net.JoinHostPort(ip.String(), fmt.Sprintf("%d", d.port))
labels[model.AddressLabel] = model.LabelValue(addr)
tg.Targets = append(tg.Targets, labels)
}
}

View file

@ -50,7 +50,7 @@ host: %s
tg := tgs[0]
testutil.Assert(t, tg != nil, "tg should not be nil")
testutil.Assert(t, tg.Targets != nil, "tg.targets should not be nil")
testutil.Equals(t, 17, len(tg.Targets))
testutil.Equals(t, 27, len(tg.Targets))
for i, lbls := range []model.LabelSet{
{
@ -108,6 +108,33 @@ host: %s
"__meta_dockerswarm_task_slot": model.LabelValue("0"),
"__meta_dockerswarm_task_state": model.LabelValue("running"),
},
{
"__address__": model.LabelValue("10.0.1.88:80"),
"__meta_dockerswarm_network_id": model.LabelValue("npq2closzy836m07eaq1425k3"),
"__meta_dockerswarm_network_ingress": model.LabelValue("false"),
"__meta_dockerswarm_network_internal": model.LabelValue("false"),
"__meta_dockerswarm_network_label_com_docker_stack_namespace": model.LabelValue("mon"),
"__meta_dockerswarm_network_name": model.LabelValue("mon_net"),
"__meta_dockerswarm_network_scope": model.LabelValue("swarm"),
"__meta_dockerswarm_node_address": model.LabelValue("10.0.232.2"),
"__meta_dockerswarm_node_availability": model.LabelValue("active"),
"__meta_dockerswarm_node_hostname": model.LabelValue("master-2"),
"__meta_dockerswarm_node_id": model.LabelValue("i9woemzxymn1n98o9ufebclgm"),
"__meta_dockerswarm_node_platform_architecture": model.LabelValue("x86_64"),
"__meta_dockerswarm_node_platform_os": model.LabelValue("linux"),
"__meta_dockerswarm_node_role": model.LabelValue("manager"),
"__meta_dockerswarm_node_status": model.LabelValue("ready"),
"__meta_dockerswarm_service_id": model.LabelValue("iml8vdd2dudvq457sbirpf66z"),
"__meta_dockerswarm_service_label_com_docker_stack_image": model.LabelValue("google/cadvisor"),
"__meta_dockerswarm_service_label_com_docker_stack_namespace": model.LabelValue("mon"),
"__meta_dockerswarm_service_mode": model.LabelValue("global"),
"__meta_dockerswarm_service_name": model.LabelValue("mon_cadvisor"),
"__meta_dockerswarm_task_container_id": model.LabelValue(""),
"__meta_dockerswarm_task_desired_state": model.LabelValue("ready"),
"__meta_dockerswarm_task_id": model.LabelValue("7ogolpkgw2d2amnht1fbtm9oq"),
"__meta_dockerswarm_task_slot": model.LabelValue("0"),
"__meta_dockerswarm_task_state": model.LabelValue("rejected"),
},
{
"__address__": model.LabelValue("10.0.0.12:9100"),
"__meta_dockerswarm_network_id": model.LabelValue("qvwhwd6p61k4o0ulsknqb066z"),
@ -273,6 +300,168 @@ host: %s
"__meta_dockerswarm_task_slot": model.LabelValue("0"),
"__meta_dockerswarm_task_state": model.LabelValue("running"),
},
{
"__address__": model.LabelValue("10.0.1.35:80"),
"__meta_dockerswarm_network_id": model.LabelValue("npq2closzy836m07eaq1425k3"),
"__meta_dockerswarm_network_ingress": model.LabelValue("false"),
"__meta_dockerswarm_network_internal": model.LabelValue("false"),
"__meta_dockerswarm_network_label_com_docker_stack_namespace": model.LabelValue("mon"),
"__meta_dockerswarm_network_name": model.LabelValue("mon_net"),
"__meta_dockerswarm_network_scope": model.LabelValue("swarm"),
"__meta_dockerswarm_node_address": model.LabelValue("10.0.232.4"),
"__meta_dockerswarm_node_availability": model.LabelValue("active"),
"__meta_dockerswarm_node_hostname": model.LabelValue("worker-1"),
"__meta_dockerswarm_node_id": model.LabelValue("jkc2xd7p3xdazf0u6sh1m9dmr"),
"__meta_dockerswarm_node_platform_architecture": model.LabelValue("x86_64"),
"__meta_dockerswarm_node_platform_os": model.LabelValue("linux"),
"__meta_dockerswarm_node_role": model.LabelValue("worker"),
"__meta_dockerswarm_node_status": model.LabelValue("ready"),
"__meta_dockerswarm_service_id": model.LabelValue("9bbq7j55tzzz85k2gg52x73rg"),
"__meta_dockerswarm_service_label_com_docker_stack_image": model.LabelValue("cloudflare/unsee:v0.8.0"),
"__meta_dockerswarm_service_label_com_docker_stack_namespace": model.LabelValue("mon"),
"__meta_dockerswarm_service_mode": model.LabelValue("replicated"),
"__meta_dockerswarm_service_name": model.LabelValue("mon_unsee"),
"__meta_dockerswarm_task_container_id": model.LabelValue("a0734f9421e710b654ca9e67010cbb55c1d5f92c17cc9e6590ab61130a3587e0"),
"__meta_dockerswarm_task_desired_state": model.LabelValue("running"),
"__meta_dockerswarm_task_id": model.LabelValue("i9skiw2n5jkjoq7gix2t9uzhy"),
"__meta_dockerswarm_task_slot": model.LabelValue("1"),
"__meta_dockerswarm_task_state": model.LabelValue("running"),
},
{
"__address__": model.LabelValue("10.0.1.14:80"),
"__meta_dockerswarm_network_id": model.LabelValue("npq2closzy836m07eaq1425k3"),
"__meta_dockerswarm_network_ingress": model.LabelValue("false"),
"__meta_dockerswarm_network_internal": model.LabelValue("false"),
"__meta_dockerswarm_network_label_com_docker_stack_namespace": model.LabelValue("mon"),
"__meta_dockerswarm_network_name": model.LabelValue("mon_net"),
"__meta_dockerswarm_network_scope": model.LabelValue("swarm"),
"__meta_dockerswarm_node_address": model.LabelValue("10.0.232.1"),
"__meta_dockerswarm_node_availability": model.LabelValue("active"),
"__meta_dockerswarm_node_hostname": model.LabelValue("oxygen"),
"__meta_dockerswarm_node_id": model.LabelValue("d3cw2msquo0d71yn42qrnb0tu"),
"__meta_dockerswarm_node_platform_architecture": model.LabelValue("x86_64"),
"__meta_dockerswarm_node_platform_os": model.LabelValue("linux"),
"__meta_dockerswarm_node_role": model.LabelValue("manager"),
"__meta_dockerswarm_node_status": model.LabelValue("ready"),
"__meta_dockerswarm_service_id": model.LabelValue("hv645udwaaewyw7wuc6nspm68"),
"__meta_dockerswarm_service_label_com_docker_stack_image": model.LabelValue("stefanprodan/swarmprom-prometheus:v2.5.0"),
"__meta_dockerswarm_service_label_com_docker_stack_namespace": model.LabelValue("mon"),
"__meta_dockerswarm_service_mode": model.LabelValue("replicated"),
"__meta_dockerswarm_service_name": model.LabelValue("mon_prometheus"),
"__meta_dockerswarm_task_container_id": model.LabelValue("f35e071f1251951a09566a2231cb318a1a29e158a3c3d15ad68fd937184c56b6"),
"__meta_dockerswarm_task_desired_state": model.LabelValue("running"),
"__meta_dockerswarm_task_id": model.LabelValue("irj35fr4j59f1kvi0xdin9oly"),
"__meta_dockerswarm_task_slot": model.LabelValue("1"),
"__meta_dockerswarm_task_state": model.LabelValue("running"),
},
{
"__address__": model.LabelValue("10.0.1.20:80"),
"__meta_dockerswarm_network_id": model.LabelValue("npq2closzy836m07eaq1425k3"),
"__meta_dockerswarm_network_ingress": model.LabelValue("false"),
"__meta_dockerswarm_network_internal": model.LabelValue("false"),
"__meta_dockerswarm_network_label_com_docker_stack_namespace": model.LabelValue("mon"),
"__meta_dockerswarm_network_name": model.LabelValue("mon_net"),
"__meta_dockerswarm_network_scope": model.LabelValue("swarm"),
"__meta_dockerswarm_node_address": model.LabelValue("10.0.232.1"),
"__meta_dockerswarm_node_availability": model.LabelValue("active"),
"__meta_dockerswarm_node_hostname": model.LabelValue("oxygen"),
"__meta_dockerswarm_node_id": model.LabelValue("d3cw2msquo0d71yn42qrnb0tu"),
"__meta_dockerswarm_node_platform_architecture": model.LabelValue("x86_64"),
"__meta_dockerswarm_node_platform_os": model.LabelValue("linux"),
"__meta_dockerswarm_node_role": model.LabelValue("manager"),
"__meta_dockerswarm_node_status": model.LabelValue("ready"),
"__meta_dockerswarm_service_id": model.LabelValue("ul5qawv4s7f7msm7dtyfarw80"),
"__meta_dockerswarm_service_label_com_docker_stack_image": model.LabelValue("stefanprodan/caddy"),
"__meta_dockerswarm_service_label_com_docker_stack_namespace": model.LabelValue("mon"),
"__meta_dockerswarm_service_mode": model.LabelValue("global"),
"__meta_dockerswarm_service_name": model.LabelValue("mon_dockerd-exporter"),
"__meta_dockerswarm_task_container_id": model.LabelValue("3cee48a5b2b363576b255207aae2d1bd0d8872aa61c3ff3b6d180d78d672a943"),
"__meta_dockerswarm_task_desired_state": model.LabelValue("running"),
"__meta_dockerswarm_task_id": model.LabelValue("k8x2806pevvxigagsxovjmur1"),
"__meta_dockerswarm_task_slot": model.LabelValue("0"),
"__meta_dockerswarm_task_state": model.LabelValue("running"),
},
{
"__address__": model.LabelValue("10.0.1.19:80"),
"__meta_dockerswarm_network_id": model.LabelValue("npq2closzy836m07eaq1425k3"),
"__meta_dockerswarm_network_ingress": model.LabelValue("false"),
"__meta_dockerswarm_network_internal": model.LabelValue("false"),
"__meta_dockerswarm_network_label_com_docker_stack_namespace": model.LabelValue("mon"),
"__meta_dockerswarm_network_name": model.LabelValue("mon_net"),
"__meta_dockerswarm_network_scope": model.LabelValue("swarm"),
"__meta_dockerswarm_node_address": model.LabelValue("10.0.232.5"),
"__meta_dockerswarm_node_availability": model.LabelValue("active"),
"__meta_dockerswarm_node_hostname": model.LabelValue("worker-2"),
"__meta_dockerswarm_node_id": model.LabelValue("ldawcom10uqi6owysgi28n4ve"),
"__meta_dockerswarm_node_platform_architecture": model.LabelValue("x86_64"),
"__meta_dockerswarm_node_platform_os": model.LabelValue("linux"),
"__meta_dockerswarm_node_role": model.LabelValue("worker"),
"__meta_dockerswarm_node_status": model.LabelValue("ready"),
"__meta_dockerswarm_service_id": model.LabelValue("ul5qawv4s7f7msm7dtyfarw80"),
"__meta_dockerswarm_service_label_com_docker_stack_image": model.LabelValue("stefanprodan/caddy"),
"__meta_dockerswarm_service_label_com_docker_stack_namespace": model.LabelValue("mon"),
"__meta_dockerswarm_service_mode": model.LabelValue("global"),
"__meta_dockerswarm_service_name": model.LabelValue("mon_dockerd-exporter"),
"__meta_dockerswarm_task_container_id": model.LabelValue("d5fd335a33c1fca8fffa1ec17bb5010a6bb7fba7aa72fd2209c25be8f5d1b975"),
"__meta_dockerswarm_task_desired_state": model.LabelValue("running"),
"__meta_dockerswarm_task_id": model.LabelValue("mita4y262u04g4c7zz7ns032a"),
"__meta_dockerswarm_task_slot": model.LabelValue("0"),
"__meta_dockerswarm_task_state": model.LabelValue("running"),
},
{
"__address__": model.LabelValue("10.0.1.18:80"),
"__meta_dockerswarm_network_id": model.LabelValue("npq2closzy836m07eaq1425k3"),
"__meta_dockerswarm_network_ingress": model.LabelValue("false"),
"__meta_dockerswarm_network_internal": model.LabelValue("false"),
"__meta_dockerswarm_network_label_com_docker_stack_namespace": model.LabelValue("mon"),
"__meta_dockerswarm_network_name": model.LabelValue("mon_net"),
"__meta_dockerswarm_network_scope": model.LabelValue("swarm"),
"__meta_dockerswarm_node_address": model.LabelValue("10.0.232.4"),
"__meta_dockerswarm_node_availability": model.LabelValue("active"),
"__meta_dockerswarm_node_hostname": model.LabelValue("worker-1"),
"__meta_dockerswarm_node_id": model.LabelValue("jkc2xd7p3xdazf0u6sh1m9dmr"),
"__meta_dockerswarm_node_platform_architecture": model.LabelValue("x86_64"),
"__meta_dockerswarm_node_platform_os": model.LabelValue("linux"),
"__meta_dockerswarm_node_role": model.LabelValue("worker"),
"__meta_dockerswarm_node_status": model.LabelValue("ready"),
"__meta_dockerswarm_service_id": model.LabelValue("ul5qawv4s7f7msm7dtyfarw80"),
"__meta_dockerswarm_service_label_com_docker_stack_image": model.LabelValue("stefanprodan/caddy"),
"__meta_dockerswarm_service_label_com_docker_stack_namespace": model.LabelValue("mon"),
"__meta_dockerswarm_service_mode": model.LabelValue("global"),
"__meta_dockerswarm_service_name": model.LabelValue("mon_dockerd-exporter"),
"__meta_dockerswarm_task_container_id": model.LabelValue("ca72094e1ec0547e84aba227dd44446588172fa9ee273de9bf4e6040ff6937bd"),
"__meta_dockerswarm_task_desired_state": model.LabelValue("running"),
"__meta_dockerswarm_task_id": model.LabelValue("nm3v4jifzhfkw3rz33gmnaq2z"),
"__meta_dockerswarm_task_slot": model.LabelValue("0"),
"__meta_dockerswarm_task_state": model.LabelValue("running"),
},
{
"__address__": model.LabelValue("10.0.1.75:80"),
"__meta_dockerswarm_network_id": model.LabelValue("npq2closzy836m07eaq1425k3"),
"__meta_dockerswarm_network_ingress": model.LabelValue("false"),
"__meta_dockerswarm_network_internal": model.LabelValue("false"),
"__meta_dockerswarm_network_label_com_docker_stack_namespace": model.LabelValue("mon"),
"__meta_dockerswarm_network_name": model.LabelValue("mon_net"),
"__meta_dockerswarm_network_scope": model.LabelValue("swarm"),
"__meta_dockerswarm_node_address": model.LabelValue("10.0.232.5"),
"__meta_dockerswarm_node_availability": model.LabelValue("active"),
"__meta_dockerswarm_node_hostname": model.LabelValue("worker-2"),
"__meta_dockerswarm_node_id": model.LabelValue("ldawcom10uqi6owysgi28n4ve"),
"__meta_dockerswarm_node_platform_architecture": model.LabelValue("x86_64"),
"__meta_dockerswarm_node_platform_os": model.LabelValue("linux"),
"__meta_dockerswarm_node_role": model.LabelValue("worker"),
"__meta_dockerswarm_node_status": model.LabelValue("ready"),
"__meta_dockerswarm_service_id": model.LabelValue("iml8vdd2dudvq457sbirpf66z"),
"__meta_dockerswarm_service_label_com_docker_stack_image": model.LabelValue("google/cadvisor"),
"__meta_dockerswarm_service_label_com_docker_stack_namespace": model.LabelValue("mon"),
"__meta_dockerswarm_service_mode": model.LabelValue("global"),
"__meta_dockerswarm_service_name": model.LabelValue("mon_cadvisor"),
"__meta_dockerswarm_task_container_id": model.LabelValue(""),
"__meta_dockerswarm_task_desired_state": model.LabelValue("shutdown"),
"__meta_dockerswarm_task_id": model.LabelValue("pxdv57mpvquv2zbt7faz3xux9"),
"__meta_dockerswarm_task_slot": model.LabelValue("0"),
"__meta_dockerswarm_task_state": model.LabelValue("rejected"),
},
{
"__address__": model.LabelValue("10.0.0.19:9090"),
"__meta_dockerswarm_network_id": model.LabelValue("qvwhwd6p61k4o0ulsknqb066z"),
@ -300,6 +489,60 @@ host: %s
"__meta_dockerswarm_task_slot": model.LabelValue("1"),
"__meta_dockerswarm_task_state": model.LabelValue("starting"),
},
{
"__address__": model.LabelValue("10.0.0.19:9093"),
"__meta_dockerswarm_network_id": model.LabelValue("qvwhwd6p61k4o0ulsknqb066z"),
"__meta_dockerswarm_network_ingress": model.LabelValue("true"),
"__meta_dockerswarm_network_internal": model.LabelValue("false"),
"__meta_dockerswarm_network_name": model.LabelValue("ingress"),
"__meta_dockerswarm_network_scope": model.LabelValue("swarm"),
"__meta_dockerswarm_node_address": model.LabelValue("10.0.232.2"),
"__meta_dockerswarm_node_availability": model.LabelValue("active"),
"__meta_dockerswarm_node_hostname": model.LabelValue("master-2"),
"__meta_dockerswarm_node_id": model.LabelValue("i9woemzxymn1n98o9ufebclgm"),
"__meta_dockerswarm_node_platform_architecture": model.LabelValue("x86_64"),
"__meta_dockerswarm_node_platform_os": model.LabelValue("linux"),
"__meta_dockerswarm_node_role": model.LabelValue("manager"),
"__meta_dockerswarm_node_status": model.LabelValue("ready"),
"__meta_dockerswarm_service_id": model.LabelValue("tkv91uso46cck763aaqcb4rek"),
"__meta_dockerswarm_service_label_com_docker_stack_image": model.LabelValue("stefanprodan/caddy"),
"__meta_dockerswarm_service_label_com_docker_stack_namespace": model.LabelValue("mon"),
"__meta_dockerswarm_service_mode": model.LabelValue("replicated"),
"__meta_dockerswarm_service_name": model.LabelValue("mon_caddy"),
"__meta_dockerswarm_task_container_id": model.LabelValue("5d00e145527f14d1bb2b13601000f1de9c95fc8ec1ccc1ff8291cadc31232957"),
"__meta_dockerswarm_task_desired_state": model.LabelValue("running"),
"__meta_dockerswarm_task_id": model.LabelValue("qtr16op0cz5pdbx4rylm7up4g"),
"__meta_dockerswarm_task_port_publish_mode": model.LabelValue("ingress"),
"__meta_dockerswarm_task_slot": model.LabelValue("1"),
"__meta_dockerswarm_task_state": model.LabelValue("starting"),
},
{
"__address__": model.LabelValue("10.0.0.19:9094"),
"__meta_dockerswarm_network_id": model.LabelValue("qvwhwd6p61k4o0ulsknqb066z"),
"__meta_dockerswarm_network_ingress": model.LabelValue("true"),
"__meta_dockerswarm_network_internal": model.LabelValue("false"),
"__meta_dockerswarm_network_name": model.LabelValue("ingress"),
"__meta_dockerswarm_network_scope": model.LabelValue("swarm"),
"__meta_dockerswarm_node_address": model.LabelValue("10.0.232.2"),
"__meta_dockerswarm_node_availability": model.LabelValue("active"),
"__meta_dockerswarm_node_hostname": model.LabelValue("master-2"),
"__meta_dockerswarm_node_id": model.LabelValue("i9woemzxymn1n98o9ufebclgm"),
"__meta_dockerswarm_node_platform_architecture": model.LabelValue("x86_64"),
"__meta_dockerswarm_node_platform_os": model.LabelValue("linux"),
"__meta_dockerswarm_node_role": model.LabelValue("manager"),
"__meta_dockerswarm_node_status": model.LabelValue("ready"),
"__meta_dockerswarm_service_id": model.LabelValue("tkv91uso46cck763aaqcb4rek"),
"__meta_dockerswarm_service_label_com_docker_stack_image": model.LabelValue("stefanprodan/caddy"),
"__meta_dockerswarm_service_label_com_docker_stack_namespace": model.LabelValue("mon"),
"__meta_dockerswarm_service_mode": model.LabelValue("replicated"),
"__meta_dockerswarm_service_name": model.LabelValue("mon_caddy"),
"__meta_dockerswarm_task_container_id": model.LabelValue("5d00e145527f14d1bb2b13601000f1de9c95fc8ec1ccc1ff8291cadc31232957"),
"__meta_dockerswarm_task_desired_state": model.LabelValue("running"),
"__meta_dockerswarm_task_id": model.LabelValue("qtr16op0cz5pdbx4rylm7up4g"),
"__meta_dockerswarm_task_port_publish_mode": model.LabelValue("ingress"),
"__meta_dockerswarm_task_slot": model.LabelValue("1"),
"__meta_dockerswarm_task_state": model.LabelValue("starting"),
},
{
"__address__": model.LabelValue("10.0.1.81:9090"),
"__meta_dockerswarm_network_id": model.LabelValue("npq2closzy836m07eaq1425k3"),
@ -328,33 +571,6 @@ host: %s
"__meta_dockerswarm_task_slot": model.LabelValue("1"),
"__meta_dockerswarm_task_state": model.LabelValue("starting"),
},
{
"__address__": model.LabelValue("10.0.0.19:9093"),
"__meta_dockerswarm_network_id": model.LabelValue("qvwhwd6p61k4o0ulsknqb066z"),
"__meta_dockerswarm_network_ingress": model.LabelValue("true"),
"__meta_dockerswarm_network_internal": model.LabelValue("false"),
"__meta_dockerswarm_network_name": model.LabelValue("ingress"),
"__meta_dockerswarm_network_scope": model.LabelValue("swarm"),
"__meta_dockerswarm_node_address": model.LabelValue("10.0.232.2"),
"__meta_dockerswarm_node_availability": model.LabelValue("active"),
"__meta_dockerswarm_node_hostname": model.LabelValue("master-2"),
"__meta_dockerswarm_node_id": model.LabelValue("i9woemzxymn1n98o9ufebclgm"),
"__meta_dockerswarm_node_platform_architecture": model.LabelValue("x86_64"),
"__meta_dockerswarm_node_platform_os": model.LabelValue("linux"),
"__meta_dockerswarm_node_role": model.LabelValue("manager"),
"__meta_dockerswarm_node_status": model.LabelValue("ready"),
"__meta_dockerswarm_service_id": model.LabelValue("tkv91uso46cck763aaqcb4rek"),
"__meta_dockerswarm_service_label_com_docker_stack_image": model.LabelValue("stefanprodan/caddy"),
"__meta_dockerswarm_service_label_com_docker_stack_namespace": model.LabelValue("mon"),
"__meta_dockerswarm_service_mode": model.LabelValue("replicated"),
"__meta_dockerswarm_service_name": model.LabelValue("mon_caddy"),
"__meta_dockerswarm_task_container_id": model.LabelValue("5d00e145527f14d1bb2b13601000f1de9c95fc8ec1ccc1ff8291cadc31232957"),
"__meta_dockerswarm_task_desired_state": model.LabelValue("running"),
"__meta_dockerswarm_task_id": model.LabelValue("qtr16op0cz5pdbx4rylm7up4g"),
"__meta_dockerswarm_task_port_publish_mode": model.LabelValue("ingress"),
"__meta_dockerswarm_task_slot": model.LabelValue("1"),
"__meta_dockerswarm_task_state": model.LabelValue("starting"),
},
{
"__address__": model.LabelValue("10.0.1.81:9093"),
"__meta_dockerswarm_network_id": model.LabelValue("npq2closzy836m07eaq1425k3"),
@ -383,33 +599,6 @@ host: %s
"__meta_dockerswarm_task_slot": model.LabelValue("1"),
"__meta_dockerswarm_task_state": model.LabelValue("starting"),
},
{
"__address__": model.LabelValue("10.0.0.19:9094"),
"__meta_dockerswarm_network_id": model.LabelValue("qvwhwd6p61k4o0ulsknqb066z"),
"__meta_dockerswarm_network_ingress": model.LabelValue("true"),
"__meta_dockerswarm_network_internal": model.LabelValue("false"),
"__meta_dockerswarm_network_name": model.LabelValue("ingress"),
"__meta_dockerswarm_network_scope": model.LabelValue("swarm"),
"__meta_dockerswarm_node_address": model.LabelValue("10.0.232.2"),
"__meta_dockerswarm_node_availability": model.LabelValue("active"),
"__meta_dockerswarm_node_hostname": model.LabelValue("master-2"),
"__meta_dockerswarm_node_id": model.LabelValue("i9woemzxymn1n98o9ufebclgm"),
"__meta_dockerswarm_node_platform_architecture": model.LabelValue("x86_64"),
"__meta_dockerswarm_node_platform_os": model.LabelValue("linux"),
"__meta_dockerswarm_node_role": model.LabelValue("manager"),
"__meta_dockerswarm_node_status": model.LabelValue("ready"),
"__meta_dockerswarm_service_id": model.LabelValue("tkv91uso46cck763aaqcb4rek"),
"__meta_dockerswarm_service_label_com_docker_stack_image": model.LabelValue("stefanprodan/caddy"),
"__meta_dockerswarm_service_label_com_docker_stack_namespace": model.LabelValue("mon"),
"__meta_dockerswarm_service_mode": model.LabelValue("replicated"),
"__meta_dockerswarm_service_name": model.LabelValue("mon_caddy"),
"__meta_dockerswarm_task_container_id": model.LabelValue("5d00e145527f14d1bb2b13601000f1de9c95fc8ec1ccc1ff8291cadc31232957"),
"__meta_dockerswarm_task_desired_state": model.LabelValue("running"),
"__meta_dockerswarm_task_id": model.LabelValue("qtr16op0cz5pdbx4rylm7up4g"),
"__meta_dockerswarm_task_port_publish_mode": model.LabelValue("ingress"),
"__meta_dockerswarm_task_slot": model.LabelValue("1"),
"__meta_dockerswarm_task_state": model.LabelValue("starting"),
},
{
"__address__": model.LabelValue("10.0.1.81:9094"),
"__meta_dockerswarm_network_id": model.LabelValue("npq2closzy836m07eaq1425k3"),
@ -438,6 +627,60 @@ host: %s
"__meta_dockerswarm_task_slot": model.LabelValue("1"),
"__meta_dockerswarm_task_state": model.LabelValue("starting"),
},
{
"__address__": model.LabelValue("10.0.1.24:80"),
"__meta_dockerswarm_network_id": model.LabelValue("npq2closzy836m07eaq1425k3"),
"__meta_dockerswarm_network_ingress": model.LabelValue("false"),
"__meta_dockerswarm_network_internal": model.LabelValue("false"),
"__meta_dockerswarm_network_label_com_docker_stack_namespace": model.LabelValue("mon"),
"__meta_dockerswarm_network_name": model.LabelValue("mon_net"),
"__meta_dockerswarm_network_scope": model.LabelValue("swarm"),
"__meta_dockerswarm_node_address": model.LabelValue("10.0.232.1"),
"__meta_dockerswarm_node_availability": model.LabelValue("active"),
"__meta_dockerswarm_node_hostname": model.LabelValue("oxygen"),
"__meta_dockerswarm_node_id": model.LabelValue("d3cw2msquo0d71yn42qrnb0tu"),
"__meta_dockerswarm_node_platform_architecture": model.LabelValue("x86_64"),
"__meta_dockerswarm_node_platform_os": model.LabelValue("linux"),
"__meta_dockerswarm_node_role": model.LabelValue("manager"),
"__meta_dockerswarm_node_status": model.LabelValue("ready"),
"__meta_dockerswarm_service_id": model.LabelValue("iml8vdd2dudvq457sbirpf66z"),
"__meta_dockerswarm_service_label_com_docker_stack_image": model.LabelValue("google/cadvisor"),
"__meta_dockerswarm_service_label_com_docker_stack_namespace": model.LabelValue("mon"),
"__meta_dockerswarm_service_mode": model.LabelValue("global"),
"__meta_dockerswarm_service_name": model.LabelValue("mon_cadvisor"),
"__meta_dockerswarm_task_container_id": model.LabelValue("81dd017b6d9f36d779a3217021111ffc77dcdbd5f26da86c2ae8fada86f33d17"),
"__meta_dockerswarm_task_desired_state": model.LabelValue("running"),
"__meta_dockerswarm_task_id": model.LabelValue("s0rh3k9l7ii9vb62lsfp4wc93"),
"__meta_dockerswarm_task_slot": model.LabelValue("0"),
"__meta_dockerswarm_task_state": model.LabelValue("running"),
},
{
"__address__": model.LabelValue("10.0.1.32:80"),
"__meta_dockerswarm_network_id": model.LabelValue("npq2closzy836m07eaq1425k3"),
"__meta_dockerswarm_network_ingress": model.LabelValue("false"),
"__meta_dockerswarm_network_internal": model.LabelValue("false"),
"__meta_dockerswarm_network_label_com_docker_stack_namespace": model.LabelValue("mon"),
"__meta_dockerswarm_network_name": model.LabelValue("mon_net"),
"__meta_dockerswarm_network_scope": model.LabelValue("swarm"),
"__meta_dockerswarm_node_address": model.LabelValue("10.0.232.3"),
"__meta_dockerswarm_node_availability": model.LabelValue("active"),
"__meta_dockerswarm_node_hostname": model.LabelValue("master-3"),
"__meta_dockerswarm_node_id": model.LabelValue("bvtjl7pnrtg0k88ywialsldpd"),
"__meta_dockerswarm_node_platform_architecture": model.LabelValue("x86_64"),
"__meta_dockerswarm_node_platform_os": model.LabelValue("linux"),
"__meta_dockerswarm_node_role": model.LabelValue("manager"),
"__meta_dockerswarm_node_status": model.LabelValue("ready"),
"__meta_dockerswarm_service_id": model.LabelValue("rl4jhdws3r4lkfvq8kx6c4xnr"),
"__meta_dockerswarm_service_label_com_docker_stack_image": model.LabelValue("stefanprodan/swarmprom-alertmanager:v0.14.0"),
"__meta_dockerswarm_service_label_com_docker_stack_namespace": model.LabelValue("mon"),
"__meta_dockerswarm_service_mode": model.LabelValue("replicated"),
"__meta_dockerswarm_service_name": model.LabelValue("mon_alertmanager"),
"__meta_dockerswarm_task_container_id": model.LabelValue("2c753737f2ac7b01b577324513d9bd2a1754e65ab2a44714090eb5d83d2961b2"),
"__meta_dockerswarm_task_desired_state": model.LabelValue("running"),
"__meta_dockerswarm_task_id": model.LabelValue("sxwkumr37wjaqzir89uepm4nc"),
"__meta_dockerswarm_task_slot": model.LabelValue("1"),
"__meta_dockerswarm_task_state": model.LabelValue("running"),
},
{
"__address__": model.LabelValue("10.0.0.16:3000"),
"__meta_dockerswarm_network_id": model.LabelValue("qvwhwd6p61k4o0ulsknqb066z"),
@ -515,6 +758,33 @@ host: %s
"__meta_dockerswarm_task_slot": model.LabelValue("0"),
"__meta_dockerswarm_task_state": model.LabelValue("running"),
},
{
"__address__": model.LabelValue("10.0.1.22:80"),
"__meta_dockerswarm_network_id": model.LabelValue("npq2closzy836m07eaq1425k3"),
"__meta_dockerswarm_network_ingress": model.LabelValue("false"),
"__meta_dockerswarm_network_internal": model.LabelValue("false"),
"__meta_dockerswarm_network_label_com_docker_stack_namespace": model.LabelValue("mon"),
"__meta_dockerswarm_network_name": model.LabelValue("mon_net"),
"__meta_dockerswarm_network_scope": model.LabelValue("swarm"),
"__meta_dockerswarm_node_address": model.LabelValue("10.0.232.3"),
"__meta_dockerswarm_node_availability": model.LabelValue("active"),
"__meta_dockerswarm_node_hostname": model.LabelValue("master-3"),
"__meta_dockerswarm_node_id": model.LabelValue("bvtjl7pnrtg0k88ywialsldpd"),
"__meta_dockerswarm_node_platform_architecture": model.LabelValue("x86_64"),
"__meta_dockerswarm_node_platform_os": model.LabelValue("linux"),
"__meta_dockerswarm_node_role": model.LabelValue("manager"),
"__meta_dockerswarm_node_status": model.LabelValue("ready"),
"__meta_dockerswarm_service_id": model.LabelValue("ul5qawv4s7f7msm7dtyfarw80"),
"__meta_dockerswarm_service_label_com_docker_stack_image": model.LabelValue("stefanprodan/caddy"),
"__meta_dockerswarm_service_label_com_docker_stack_namespace": model.LabelValue("mon"),
"__meta_dockerswarm_service_mode": model.LabelValue("global"),
"__meta_dockerswarm_service_name": model.LabelValue("mon_dockerd-exporter"),
"__meta_dockerswarm_task_container_id": model.LabelValue("dd50c428ebc7a7bd786c7b0db873bf53ed5cbe5312995a76eb8fd325dcf16032"),
"__meta_dockerswarm_task_desired_state": model.LabelValue("running"),
"__meta_dockerswarm_task_id": model.LabelValue("zhoo9feqxfyi9zg6acnt1eam2"),
"__meta_dockerswarm_task_slot": model.LabelValue("0"),
"__meta_dockerswarm_task_state": model.LabelValue("running"),
},
} {
t.Run(fmt.Sprintf("item %d", i), func(t *testing.T) {
testutil.Equals(t, lbls, tg.Targets[i])

View file

@ -28,9 +28,10 @@ import (
"github.com/aws/aws-sdk-go/service/ec2"
"github.com/go-kit/kit/log"
"github.com/pkg/errors"
config_util "github.com/prometheus/common/config"
"github.com/prometheus/common/config"
"github.com/prometheus/common/model"
"github.com/prometheus/prometheus/discovery"
"github.com/prometheus/prometheus/discovery/refresh"
"github.com/prometheus/prometheus/discovery/targetgroup"
"github.com/prometheus/prometheus/util/strutil"
@ -64,6 +65,10 @@ var DefaultSDConfig = SDConfig{
RefreshInterval: model.Duration(60 * time.Second),
}
func init() {
discovery.RegisterConfig(&SDConfig{})
}
// Filter is the configuration for filtering EC2 instances.
type Filter struct {
Name string `yaml:"name"`
@ -72,15 +77,23 @@ type Filter struct {
// SDConfig is the configuration for EC2 based service discovery.
type SDConfig struct {
Endpoint string `yaml:"endpoint"`
Region string `yaml:"region"`
AccessKey string `yaml:"access_key,omitempty"`
SecretKey config_util.Secret `yaml:"secret_key,omitempty"`
Profile string `yaml:"profile,omitempty"`
RoleARN string `yaml:"role_arn,omitempty"`
RefreshInterval model.Duration `yaml:"refresh_interval,omitempty"`
Port int `yaml:"port"`
Filters []*Filter `yaml:"filters"`
Endpoint string `yaml:"endpoint"`
Region string `yaml:"region"`
AccessKey string `yaml:"access_key,omitempty"`
SecretKey config.Secret `yaml:"secret_key,omitempty"`
Profile string `yaml:"profile,omitempty"`
RoleARN string `yaml:"role_arn,omitempty"`
RefreshInterval model.Duration `yaml:"refresh_interval,omitempty"`
Port int `yaml:"port"`
Filters []*Filter `yaml:"filters"`
}
// Name returns the name of the Config.
func (*SDConfig) Name() string { return "ec2" }
// NewDiscoverer returns a Discoverer for the Config.
func (c *SDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (discovery.Discoverer, error) {
return NewDiscovery(c, opts.Logger), nil
}
// UnmarshalYAML implements the yaml.Unmarshaler interface.

112
discovery/eureka/client.go Normal file
View file

@ -0,0 +1,112 @@
// Copyright 2020 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package eureka
import (
"context"
"encoding/xml"
"fmt"
"io"
"io/ioutil"
"net/http"
"github.com/pkg/errors"
"github.com/prometheus/common/version"
)
var userAgent = fmt.Sprintf("Prometheus/%s", version.Version)
type Applications struct {
VersionsDelta int `xml:"versions__delta"`
AppsHashcode string `xml:"apps__hashcode"`
Applications []Application `xml:"application"`
}
type Application struct {
Name string `xml:"name"`
Instances []Instance `xml:"instance"`
}
type Port struct {
Port int `xml:",chardata"`
Enabled bool `xml:"enabled,attr"`
}
type Instance struct {
HostName string `xml:"hostName"`
HomePageURL string `xml:"homePageUrl"`
StatusPageURL string `xml:"statusPageUrl"`
HealthCheckURL string `xml:"healthCheckUrl"`
App string `xml:"app"`
IPAddr string `xml:"ipAddr"`
VipAddress string `xml:"vipAddress"`
SecureVipAddress string `xml:"secureVipAddress"`
Status string `xml:"status"`
Port *Port `xml:"port"`
SecurePort *Port `xml:"securePort"`
DataCenterInfo *DataCenterInfo `xml:"dataCenterInfo"`
Metadata *MetaData `xml:"metadata"`
IsCoordinatingDiscoveryServer bool `xml:"isCoordinatingDiscoveryServer"`
ActionType string `xml:"actionType"`
CountryID int `xml:"countryId"`
InstanceID string `xml:"instanceId"`
}
type MetaData struct {
Items []Tag `xml:",any"`
}
type Tag struct {
XMLName xml.Name
Content string `xml:",innerxml"`
}
type DataCenterInfo struct {
Name string `xml:"name"`
Class string `xml:"class,attr"`
Metadata *MetaData `xml:"metadata"`
}
const appListPath string = "/apps"
func fetchApps(ctx context.Context, server string, client *http.Client) (*Applications, error) {
url := fmt.Sprintf("%s%s", server, appListPath)
request, err := http.NewRequest("GET", url, nil)
if err != nil {
return nil, err
}
request = request.WithContext(ctx)
request.Header.Add("User-Agent", userAgent)
resp, err := client.Do(request)
if err != nil {
return nil, err
}
defer func() {
io.Copy(ioutil.Discard, resp.Body)
resp.Body.Close()
}()
if resp.StatusCode/100 != 2 {
return nil, errors.Errorf("non 2xx status '%d' response during eureka service discovery", resp.StatusCode)
}
var apps Applications
err = xml.NewDecoder(resp.Body).Decode(&apps)
if err != nil {
return nil, errors.Wrapf(err, "%q", url)
}
return &apps, nil
}

View file

@ -0,0 +1,213 @@
// Copyright 2020 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package eureka
import (
"context"
"io"
"net/http"
"net/http/httptest"
"testing"
"github.com/prometheus/prometheus/util/testutil"
)
func TestFetchApps(t *testing.T) {
appsXML := `<applications>
<versions__delta>1</versions__delta>
<apps__hashcode>UP_4_</apps__hashcode>
<application>
<name>CONFIG-SERVICE</name>
<instance>
<instanceId>config-service001.test.com:config-service:8080</instanceId>
<hostName>config-service001.test.com</hostName>
<app>CONFIG-SERVICE</app>
<ipAddr>192.133.83.31</ipAddr>
<status>UP</status>
<overriddenstatus>UNKNOWN</overriddenstatus>
<port enabled="true">8080</port>
<securePort enabled="false">8080</securePort>
<countryId>1</countryId>
<dataCenterInfo class="com.netflix.appinfo.InstanceInfo$DefaultDataCenterInfo">
<name>MyOwn</name>
</dataCenterInfo>
<leaseInfo>
<renewalIntervalInSecs>30</renewalIntervalInSecs>
<durationInSecs>90</durationInSecs>
<registrationTimestamp>1596003469304</registrationTimestamp>
<lastRenewalTimestamp>1596110179310</lastRenewalTimestamp>
<evictionTimestamp>0</evictionTimestamp>
<serviceUpTimestamp>1547190033103</serviceUpTimestamp>
</leaseInfo>
<metadata>
<instanceId>config-service001.test.com:config-service:8080</instanceId>
</metadata>
<homePageUrl>http://config-service001.test.com:8080/</homePageUrl>
<statusPageUrl>http://config-service001.test.com:8080/info</statusPageUrl>
<healthCheckUrl>http://config-service001.test.com 8080/health</healthCheckUrl>
<vipAddress>config-service</vipAddress>
<isCoordinatingDiscoveryServer>false</isCoordinatingDiscoveryServer>
<lastUpdatedTimestamp>1596003469304</lastUpdatedTimestamp>
<lastDirtyTimestamp>1596003469304</lastDirtyTimestamp>
<actionType>ADDED</actionType>
</instance>
<instance>
<instanceId>config-service002.test.com:config-service:8080</instanceId>
<hostName>config-service002.test.com</hostName>
<app>CONFIG-SERVICE</app>
<ipAddr>192.133.83.31</ipAddr>
<status>UP</status>
<overriddenstatus>UNKNOWN</overriddenstatus>
<port enabled="true">8080</port>
<securePort enabled="false">8080</securePort>
<countryId>1</countryId>
<dataCenterInfo class="com.netflix.appinfo.InstanceInfo$DefaultDataCenterInfo">
<name>MyOwn</name>
</dataCenterInfo>
<leaseInfo>
<renewalIntervalInSecs>30</renewalIntervalInSecs>
<durationInSecs>90</durationInSecs>
<registrationTimestamp>1596003469304</registrationTimestamp>
<lastRenewalTimestamp>1596110179310</lastRenewalTimestamp>
<evictionTimestamp>0</evictionTimestamp>
<serviceUpTimestamp>1547190033103</serviceUpTimestamp>
</leaseInfo>
<metadata>
<instanceId>config-service002.test.com:config-service:8080</instanceId>
</metadata>
<homePageUrl>http://config-service002.test.com:8080/</homePageUrl>
<statusPageUrl>http://config-service002.test.com:8080/info</statusPageUrl>
<healthCheckUrl>http://config-service002.test.com:8080/health</healthCheckUrl>
<vipAddress>config-service</vipAddress>
<isCoordinatingDiscoveryServer>false</isCoordinatingDiscoveryServer>
<lastUpdatedTimestamp>1596003469304</lastUpdatedTimestamp>
<lastDirtyTimestamp>1596003469304</lastDirtyTimestamp>
<actionType>ADDED</actionType>
</instance>
</application>
<application>
<name>META-SERVICE</name>
<instance>
<instanceId>meta-service002.test.com:meta-service:8080</instanceId>
<hostName>meta-service002.test.com</hostName>
<app>META-SERVICE</app>
<ipAddr>192.133.87.237</ipAddr>
<status>UP</status>
<overriddenstatus>UNKNOWN</overriddenstatus>
<port enabled="true">8080</port>
<securePort enabled="false">443</securePort>
<countryId>1</countryId>
<dataCenterInfo class="com.netflix.appinfo.InstanceInfo$DefaultDataCenterInfo">
<name>MyOwn</name>
</dataCenterInfo>
<leaseInfo>
<renewalIntervalInSecs>30</renewalIntervalInSecs>
<durationInSecs>90</durationInSecs>
<registrationTimestamp>1535444352472</registrationTimestamp>
<lastRenewalTimestamp>1596110168846</lastRenewalTimestamp>
<evictionTimestamp>0</evictionTimestamp>
<serviceUpTimestamp>1535444352472</serviceUpTimestamp>
</leaseInfo>
<metadata>
<project>meta-service</project>
<management.port>8090</management.port>
</metadata>
<homePageUrl>http://meta-service002.test.com:8080/</homePageUrl>
<statusPageUrl>http://meta-service002.test.com:8080/info</statusPageUrl>
<healthCheckUrl>http://meta-service002.test.com:8080/health</healthCheckUrl>
<vipAddress>meta-service</vipAddress>
<secureVipAddress>meta-service</secureVipAddress>
<isCoordinatingDiscoveryServer>false</isCoordinatingDiscoveryServer>
<lastUpdatedTimestamp>1535444352472</lastUpdatedTimestamp>
<lastDirtyTimestamp>1535444352398</lastDirtyTimestamp>
<actionType>ADDED</actionType>
</instance>
<instance>
<instanceId>meta-service001.test.com:meta-service:8080</instanceId>
<hostName>meta-service001.test.com</hostName>
<app>META-SERVICE</app>
<ipAddr>192.133.87.236</ipAddr>
<status>UP</status>
<overriddenstatus>UNKNOWN</overriddenstatus>
<port enabled="true">8080</port>
<securePort enabled="false">443</securePort>
<countryId>1</countryId>
<dataCenterInfo class="com.netflix.appinfo.InstanceInfo$DefaultDataCenterInfo">
<name>MyOwn</name>
</dataCenterInfo>
<leaseInfo>
<renewalIntervalInSecs>30</renewalIntervalInSecs>
<durationInSecs>90</durationInSecs>
<registrationTimestamp>1535444352472</registrationTimestamp>
<lastRenewalTimestamp>1596110168846</lastRenewalTimestamp>
<evictionTimestamp>0</evictionTimestamp>
<serviceUpTimestamp>1535444352472</serviceUpTimestamp>
</leaseInfo>
<metadata>
<project>meta-service</project>
<management.port>8090</management.port>
</metadata>
<homePageUrl>http://meta-service001.test.com:8080/</homePageUrl>
<statusPageUrl>http://meta-service001.test.com:8080/info</statusPageUrl>
<healthCheckUrl>http://meta-service001.test.com:8080/health</healthCheckUrl>
<vipAddress>meta-service</vipAddress>
<secureVipAddress>meta-service</secureVipAddress>
<isCoordinatingDiscoveryServer>false</isCoordinatingDiscoveryServer>
<lastUpdatedTimestamp>1535444352472</lastUpdatedTimestamp>
<lastDirtyTimestamp>1535444352398</lastDirtyTimestamp>
<actionType>ADDED</actionType>
</instance>
</application>
</applications>`
// Simulate apps with a valid XML response.
respHandler := func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusOK)
w.Header().Set("Content-Type", "application/xml")
io.WriteString(w, appsXML)
}
// Create a test server with mock HTTP handler.
ts := httptest.NewServer(http.HandlerFunc(respHandler))
defer ts.Close()
apps, err := fetchApps(context.TODO(), ts.URL, &http.Client{})
testutil.Ok(t, err)
testutil.Equals(t, len(apps.Applications), 2)
testutil.Equals(t, apps.Applications[0].Name, "CONFIG-SERVICE")
testutil.Equals(t, apps.Applications[1].Name, "META-SERVICE")
testutil.Equals(t, len(apps.Applications[1].Instances), 2)
testutil.Equals(t, apps.Applications[1].Instances[0].InstanceID, "meta-service002.test.com:meta-service:8080")
testutil.Equals(t, apps.Applications[1].Instances[0].Metadata.Items[0].XMLName.Local, "project")
testutil.Equals(t, apps.Applications[1].Instances[0].Metadata.Items[0].Content, "meta-service")
testutil.Equals(t, apps.Applications[1].Instances[0].Metadata.Items[1].XMLName.Local, "management.port")
testutil.Equals(t, apps.Applications[1].Instances[0].Metadata.Items[1].Content, "8090")
testutil.Equals(t, apps.Applications[1].Instances[1].InstanceID, "meta-service001.test.com:meta-service:8080")
}
func Test500ErrorHttpResponse(t *testing.T) {
// Simulate 500 error.
respHandler := func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusInternalServerError)
w.Header().Set("Content-Type", "application/xml")
io.WriteString(w, ``)
}
// Create a test server with mock HTTP handler.
ts := httptest.NewServer(http.HandlerFunc(respHandler))
defer ts.Close()
_, err := fetchApps(context.TODO(), ts.URL, &http.Client{})
testutil.NotOk(t, err, "5xx HTTP response")
}

220
discovery/eureka/eureka.go Normal file
View file

@ -0,0 +1,220 @@
// Copyright 2020 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package eureka
import (
"context"
"net"
"net/http"
"net/url"
"strconv"
"time"
"github.com/go-kit/kit/log"
"github.com/pkg/errors"
"github.com/prometheus/common/config"
"github.com/prometheus/common/model"
"github.com/prometheus/prometheus/discovery"
"github.com/prometheus/prometheus/discovery/refresh"
"github.com/prometheus/prometheus/discovery/targetgroup"
"github.com/prometheus/prometheus/util/strutil"
)
const (
// metaLabelPrefix is the meta prefix used for all meta labels.
// in this discovery.
metaLabelPrefix = model.MetaLabelPrefix + "eureka_"
metaAppInstanceLabelPrefix = metaLabelPrefix + "app_instance_"
appNameLabel = metaLabelPrefix + "app_name"
appInstanceHostNameLabel = metaAppInstanceLabelPrefix + "hostname"
appInstanceHomePageURLLabel = metaAppInstanceLabelPrefix + "homepage_url"
appInstanceStatusPageURLLabel = metaAppInstanceLabelPrefix + "statuspage_url"
appInstanceHealthCheckURLLabel = metaAppInstanceLabelPrefix + "healthcheck_url"
appInstanceIPAddrLabel = metaAppInstanceLabelPrefix + "ip_addr"
appInstanceVipAddressLabel = metaAppInstanceLabelPrefix + "vip_address"
appInstanceSecureVipAddressLabel = metaAppInstanceLabelPrefix + "secure_vip_address"
appInstanceStatusLabel = metaAppInstanceLabelPrefix + "status"
appInstancePortLabel = metaAppInstanceLabelPrefix + "port"
appInstancePortEnabledLabel = metaAppInstanceLabelPrefix + "port_enabled"
appInstanceSecurePortLabel = metaAppInstanceLabelPrefix + "secure_port"
appInstanceSecurePortEnabledLabel = metaAppInstanceLabelPrefix + "secure_port_enabled"
appInstanceDataCenterInfoNameLabel = metaAppInstanceLabelPrefix + "datacenterinfo_name"
appInstanceDataCenterInfoMetadataPrefix = metaAppInstanceLabelPrefix + "datacenterinfo_metadata_"
appInstanceCountryIDLabel = metaAppInstanceLabelPrefix + "country_id"
appInstanceIDLabel = metaAppInstanceLabelPrefix + "id"
appInstanceMetadataPrefix = metaAppInstanceLabelPrefix + "metadata_"
)
// DefaultSDConfig is the default Eureka SD configuration.
var DefaultSDConfig = SDConfig{
RefreshInterval: model.Duration(30 * time.Second),
}
func init() {
discovery.RegisterConfig(&SDConfig{})
}
// SDConfig is the configuration for applications running on Eureka.
type SDConfig struct {
Server string `yaml:"server,omitempty"`
HTTPClientConfig config.HTTPClientConfig `yaml:",inline"`
RefreshInterval model.Duration `yaml:"refresh_interval,omitempty"`
}
// Name returns the name of the Config.
func (*SDConfig) Name() string { return "eureka" }
// NewDiscoverer returns a Discoverer for the Config.
func (c *SDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (discovery.Discoverer, error) {
return NewDiscovery(c, opts.Logger)
}
// SetDirectory joins any relative file paths with dir.
func (c *SDConfig) SetDirectory(dir string) {
c.HTTPClientConfig.SetDirectory(dir)
}
// UnmarshalYAML implements the yaml.Unmarshaler interface.
func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
*c = DefaultSDConfig
type plain SDConfig
err := unmarshal((*plain)(c))
if err != nil {
return err
}
if len(c.Server) == 0 {
return errors.New("eureka_sd: empty or null eureka server")
}
url, err := url.Parse(c.Server)
if err != nil {
return err
}
if len(url.Scheme) == 0 || len(url.Host) == 0 {
return errors.New("eureka_sd: invalid eureka server URL")
}
return c.HTTPClientConfig.Validate()
}
// Discovery provides service discovery based on a Eureka instance.
type Discovery struct {
*refresh.Discovery
client *http.Client
server string
}
// New creates a new Eureka discovery for the given role.
func NewDiscovery(conf *SDConfig, logger log.Logger) (*Discovery, error) {
rt, err := config.NewRoundTripperFromConfig(conf.HTTPClientConfig, "eureka_sd", false, false)
if err != nil {
return nil, err
}
d := &Discovery{
client: &http.Client{Transport: rt},
server: conf.Server,
}
d.Discovery = refresh.NewDiscovery(
logger,
"eureka",
time.Duration(conf.RefreshInterval),
d.refresh,
)
return d, nil
}
func (d *Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) {
apps, err := fetchApps(ctx, d.server, d.client)
if err != nil {
return nil, err
}
tg := &targetgroup.Group{
Source: "eureka",
}
for _, app := range apps.Applications {
targets := targetsForApp(&app)
tg.Targets = append(tg.Targets, targets...)
}
return []*targetgroup.Group{tg}, nil
}
func targetsForApp(app *Application) []model.LabelSet {
targets := make([]model.LabelSet, 0, len(app.Instances))
// Gather info about the app's 'instances'. Each instance is considered a task.
for _, t := range app.Instances {
var targetAddress string
if t.Port != nil {
targetAddress = net.JoinHostPort(t.HostName, strconv.Itoa(t.Port.Port))
} else {
targetAddress = net.JoinHostPort(t.HostName, "80")
}
target := model.LabelSet{
model.AddressLabel: lv(targetAddress),
model.InstanceLabel: lv(t.InstanceID),
appNameLabel: lv(app.Name),
appInstanceHostNameLabel: lv(t.HostName),
appInstanceHomePageURLLabel: lv(t.HomePageURL),
appInstanceStatusPageURLLabel: lv(t.StatusPageURL),
appInstanceHealthCheckURLLabel: lv(t.HealthCheckURL),
appInstanceIPAddrLabel: lv(t.IPAddr),
appInstanceVipAddressLabel: lv(t.VipAddress),
appInstanceSecureVipAddressLabel: lv(t.SecureVipAddress),
appInstanceStatusLabel: lv(t.Status),
appInstanceCountryIDLabel: lv(strconv.Itoa(t.CountryID)),
appInstanceIDLabel: lv(t.InstanceID),
}
if t.Port != nil {
target[appInstancePortLabel] = lv(strconv.Itoa(t.Port.Port))
target[appInstancePortEnabledLabel] = lv(strconv.FormatBool(t.Port.Enabled))
}
if t.SecurePort != nil {
target[appInstanceSecurePortLabel] = lv(strconv.Itoa(t.SecurePort.Port))
target[appInstanceSecurePortEnabledLabel] = lv(strconv.FormatBool(t.SecurePort.Enabled))
}
if t.DataCenterInfo != nil {
target[appInstanceDataCenterInfoNameLabel] = lv(t.DataCenterInfo.Name)
if t.DataCenterInfo.Metadata != nil {
for _, m := range t.DataCenterInfo.Metadata.Items {
ln := strutil.SanitizeLabelName(m.XMLName.Local)
target[model.LabelName(appInstanceDataCenterInfoMetadataPrefix+ln)] = lv(m.Content)
}
}
}
if t.Metadata != nil {
for _, m := range t.Metadata.Items {
ln := strutil.SanitizeLabelName(m.XMLName.Local)
target[model.LabelName(appInstanceMetadataPrefix+ln)] = lv(m.Content)
}
}
targets = append(targets, target)
}
return targets
}
func lv(s string) model.LabelValue {
return model.LabelValue(s)
}

View file

@ -0,0 +1,246 @@
// Copyright 2020 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package eureka
import (
"context"
"github.com/prometheus/prometheus/util/testutil"
"io"
"net/http"
"net/http/httptest"
"testing"
"github.com/pkg/errors"
"github.com/prometheus/common/model"
"github.com/prometheus/prometheus/discovery/targetgroup"
)
func testUpdateServices(respHandler http.HandlerFunc) ([]*targetgroup.Group, error) {
// Create a test server with mock HTTP handler.
ts := httptest.NewServer(respHandler)
defer ts.Close()
conf := SDConfig{
Server: ts.URL,
}
md, err := NewDiscovery(&conf, nil)
if err != nil {
return nil, err
}
return md.refresh(context.Background())
}
func TestEurekaSDHandleError(t *testing.T) {
var (
errTesting = errors.Errorf("non 2xx status '%d' response during eureka service discovery", http.StatusInternalServerError)
respHandler = func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusInternalServerError)
w.Header().Set("Content-Type", "application/xml")
io.WriteString(w, ``)
}
)
tgs, err := testUpdateServices(respHandler)
testutil.ErrorEqual(t, err, errTesting)
testutil.Equals(t, len(tgs), 0)
}
func TestEurekaSDEmptyList(t *testing.T) {
var (
appsXML = `<applications>
<versions__delta>1</versions__delta>
<apps__hashcode/>
</applications>`
respHandler = func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusOK)
w.Header().Set("Content-Type", "application/xml")
io.WriteString(w, appsXML)
}
)
tgs, err := testUpdateServices(respHandler)
testutil.Ok(t, err)
testutil.Equals(t, len(tgs), 1)
}
func TestEurekaSDSendGroup(t *testing.T) {
var (
appsXML = `<applications>
<versions__delta>1</versions__delta>
<apps__hashcode>UP_4_</apps__hashcode>
<application>
<name>CONFIG-SERVICE</name>
<instance>
<instanceId>config-service001.test.com:config-service:8080</instanceId>
<hostName>config-service001.test.com</hostName>
<app>CONFIG-SERVICE</app>
<ipAddr>192.133.83.31</ipAddr>
<status>UP</status>
<overriddenstatus>UNKNOWN</overriddenstatus>
<port enabled="true">8080</port>
<securePort enabled="false">8080</securePort>
<countryId>1</countryId>
<dataCenterInfo class="com.netflix.appinfo.InstanceInfo$DefaultDataCenterInfo">
<name>MyOwn</name>
</dataCenterInfo>
<leaseInfo>
<renewalIntervalInSecs>30</renewalIntervalInSecs>
<durationInSecs>90</durationInSecs>
<registrationTimestamp>1596003469304</registrationTimestamp>
<lastRenewalTimestamp>1596110179310</lastRenewalTimestamp>
<evictionTimestamp>0</evictionTimestamp>
<serviceUpTimestamp>1547190033103</serviceUpTimestamp>
</leaseInfo>
<metadata>
<instanceId>config-service001.test.com:config-service:8080</instanceId>
</metadata>
<homePageUrl>http://config-service001.test.com:8080/</homePageUrl>
<statusPageUrl>http://config-service001.test.com:8080/info</statusPageUrl>
<healthCheckUrl>http://config-service001.test.com 8080/health</healthCheckUrl>
<vipAddress>config-service</vipAddress>
<isCoordinatingDiscoveryServer>false</isCoordinatingDiscoveryServer>
<lastUpdatedTimestamp>1596003469304</lastUpdatedTimestamp>
<lastDirtyTimestamp>1596003469304</lastDirtyTimestamp>
<actionType>ADDED</actionType>
</instance>
<instance>
<instanceId>config-service002.test.com:config-service:8080</instanceId>
<hostName>config-service002.test.com</hostName>
<app>CONFIG-SERVICE</app>
<ipAddr>192.133.83.31</ipAddr>
<status>UP</status>
<overriddenstatus>UNKNOWN</overriddenstatus>
<port enabled="true">8080</port>
<securePort enabled="false">8080</securePort>
<countryId>1</countryId>
<dataCenterInfo class="com.netflix.appinfo.InstanceInfo$DefaultDataCenterInfo">
<name>MyOwn</name>
</dataCenterInfo>
<leaseInfo>
<renewalIntervalInSecs>30</renewalIntervalInSecs>
<durationInSecs>90</durationInSecs>
<registrationTimestamp>1596003469304</registrationTimestamp>
<lastRenewalTimestamp>1596110179310</lastRenewalTimestamp>
<evictionTimestamp>0</evictionTimestamp>
<serviceUpTimestamp>1547190033103</serviceUpTimestamp>
</leaseInfo>
<metadata>
<instanceId>config-service002.test.com:config-service:8080</instanceId>
</metadata>
<homePageUrl>http://config-service002.test.com:8080/</homePageUrl>
<statusPageUrl>http://config-service002.test.com:8080/info</statusPageUrl>
<healthCheckUrl>http://config-service002.test.com:8080/health</healthCheckUrl>
<vipAddress>config-service</vipAddress>
<isCoordinatingDiscoveryServer>false</isCoordinatingDiscoveryServer>
<lastUpdatedTimestamp>1596003469304</lastUpdatedTimestamp>
<lastDirtyTimestamp>1596003469304</lastDirtyTimestamp>
<actionType>ADDED</actionType>
</instance>
</application>
<application>
<name>META-SERVICE</name>
<instance>
<instanceId>meta-service002.test.com:meta-service:8080</instanceId>
<hostName>meta-service002.test.com</hostName>
<app>META-SERVICE</app>
<ipAddr>192.133.87.237</ipAddr>
<status>UP</status>
<overriddenstatus>UNKNOWN</overriddenstatus>
<port enabled="true">8080</port>
<securePort enabled="false">443</securePort>
<countryId>1</countryId>
<dataCenterInfo class="com.netflix.appinfo.InstanceInfo$DefaultDataCenterInfo">
<name>MyOwn</name>
</dataCenterInfo>
<leaseInfo>
<renewalIntervalInSecs>30</renewalIntervalInSecs>
<durationInSecs>90</durationInSecs>
<registrationTimestamp>1535444352472</registrationTimestamp>
<lastRenewalTimestamp>1596110168846</lastRenewalTimestamp>
<evictionTimestamp>0</evictionTimestamp>
<serviceUpTimestamp>1535444352472</serviceUpTimestamp>
</leaseInfo>
<metadata>
<project>meta-service</project>
<management.port>8090</management.port>
</metadata>
<homePageUrl>http://meta-service002.test.com:8080/</homePageUrl>
<statusPageUrl>http://meta-service002.test.com:8080/info</statusPageUrl>
<healthCheckUrl>http://meta-service002.test.com:8080/health</healthCheckUrl>
<vipAddress>meta-service</vipAddress>
<secureVipAddress>meta-service</secureVipAddress>
<isCoordinatingDiscoveryServer>false</isCoordinatingDiscoveryServer>
<lastUpdatedTimestamp>1535444352472</lastUpdatedTimestamp>
<lastDirtyTimestamp>1535444352398</lastDirtyTimestamp>
<actionType>ADDED</actionType>
</instance>
<instance>
<instanceId>meta-service001.test.com:meta-service:8080</instanceId>
<hostName>meta-service001.test.com</hostName>
<app>META-SERVICE</app>
<ipAddr>192.133.87.236</ipAddr>
<status>UP</status>
<overriddenstatus>UNKNOWN</overriddenstatus>
<port enabled="true">8080</port>
<securePort enabled="false">443</securePort>
<countryId>1</countryId>
<dataCenterInfo class="com.netflix.appinfo.InstanceInfo$DefaultDataCenterInfo">
<name>MyOwn</name>
</dataCenterInfo>
<leaseInfo>
<renewalIntervalInSecs>30</renewalIntervalInSecs>
<durationInSecs>90</durationInSecs>
<registrationTimestamp>1535444352472</registrationTimestamp>
<lastRenewalTimestamp>1596110168846</lastRenewalTimestamp>
<evictionTimestamp>0</evictionTimestamp>
<serviceUpTimestamp>1535444352472</serviceUpTimestamp>
</leaseInfo>
<metadata>
<project>meta-service</project>
<management.port>8090</management.port>
</metadata>
<homePageUrl>http://meta-service001.test.com:8080/</homePageUrl>
<statusPageUrl>http://meta-service001.test.com:8080/info</statusPageUrl>
<healthCheckUrl>http://meta-service001.test.com:8080/health</healthCheckUrl>
<vipAddress>meta-service</vipAddress>
<secureVipAddress>meta-service</secureVipAddress>
<isCoordinatingDiscoveryServer>false</isCoordinatingDiscoveryServer>
<lastUpdatedTimestamp>1535444352472</lastUpdatedTimestamp>
<lastDirtyTimestamp>1535444352398</lastDirtyTimestamp>
<actionType>ADDED</actionType>
</instance>
</application>
</applications>`
respHandler = func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusOK)
w.Header().Set("Content-Type", "application/xml")
io.WriteString(w, appsXML)
}
)
tgs, err := testUpdateServices(respHandler)
testutil.Ok(t, err)
testutil.Equals(t, len(tgs), 1)
tg := tgs[0]
testutil.Equals(t, tg.Source, "eureka")
testutil.Equals(t, len(tg.Targets), 4)
tgt := tg.Targets[0]
testutil.Equals(t, tgt[model.AddressLabel], model.LabelValue("config-service001.test.com:8080"))
tgt = tg.Targets[2]
testutil.Equals(t, tgt[model.AddressLabel], model.LabelValue("meta-service002.test.com:8080"))
}

View file

@ -29,10 +29,12 @@ import (
"github.com/go-kit/kit/log/level"
"github.com/pkg/errors"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/common/config"
"github.com/prometheus/common/model"
fsnotify "gopkg.in/fsnotify/fsnotify.v1"
yaml "gopkg.in/yaml.v2"
"github.com/prometheus/prometheus/discovery"
"github.com/prometheus/prometheus/discovery/targetgroup"
)
@ -45,12 +47,31 @@ var (
}
)
func init() {
discovery.RegisterConfig(&SDConfig{})
}
// SDConfig is the configuration for file based discovery.
type SDConfig struct {
Files []string `yaml:"files"`
RefreshInterval model.Duration `yaml:"refresh_interval,omitempty"`
}
// Name returns the name of the Config.
func (*SDConfig) Name() string { return "file" }
// NewDiscoverer returns a Discoverer for the Config.
func (c *SDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (discovery.Discoverer, error) {
return NewDiscovery(c, opts.Logger), nil
}
// SetDirectory joins any relative file paths with dir.
func (c *SDConfig) SetDirectory(dir string) {
for i, file := range c.Files {
c.Files[i] = config.JoinDir(dir, file)
}
}
// UnmarshalYAML implements the yaml.Unmarshaler interface.
func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
*c = DefaultSDConfig

View file

@ -26,11 +26,16 @@ import (
"time"
"github.com/prometheus/common/model"
"go.uber.org/goleak"
"github.com/prometheus/prometheus/discovery/targetgroup"
"github.com/prometheus/prometheus/util/testutil"
)
func TestMain(m *testing.M) {
goleak.VerifyTestMain(m)
}
const defaultWait = time.Second
type testRunner struct {

View file

@ -28,6 +28,7 @@ import (
compute "google.golang.org/api/compute/v1"
"google.golang.org/api/option"
"github.com/prometheus/prometheus/discovery"
"github.com/prometheus/prometheus/discovery/refresh"
"github.com/prometheus/prometheus/discovery/targetgroup"
"github.com/prometheus/prometheus/util/strutil"
@ -57,6 +58,10 @@ var DefaultSDConfig = SDConfig{
RefreshInterval: model.Duration(60 * time.Second),
}
func init() {
discovery.RegisterConfig(&SDConfig{})
}
// SDConfig is the configuration for GCE based service discovery.
type SDConfig struct {
// Project: The Google Cloud Project ID
@ -76,6 +81,14 @@ type SDConfig struct {
TagSeparator string `yaml:"tag_separator,omitempty"`
}
// Name returns the name of the Config.
func (*SDConfig) Name() string { return "gce" }
// NewDiscoverer returns a Discoverer for the Config.
func (c *SDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (discovery.Discoverer, error) {
return NewDiscovery(*c, opts.Logger)
}
// UnmarshalYAML implements the yaml.Unmarshaler interface.
func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
*c = DefaultSDConfig

132
discovery/hetzner/hcloud.go Normal file
View file

@ -0,0 +1,132 @@
// Copyright 2020 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package hetzner
import (
"context"
"fmt"
"net"
"net/http"
"strconv"
"time"
"github.com/go-kit/kit/log"
"github.com/hetznercloud/hcloud-go/hcloud"
"github.com/prometheus/common/config"
"github.com/prometheus/common/model"
"github.com/prometheus/common/version"
"github.com/prometheus/prometheus/discovery/refresh"
"github.com/prometheus/prometheus/discovery/targetgroup"
"github.com/prometheus/prometheus/util/strutil"
)
const (
hetznerHcloudLabelPrefix = hetznerLabelPrefix + "hcloud_"
hetznerLabelHcloudImageName = hetznerHcloudLabelPrefix + "image_name"
hetznerLabelHcloudImageDescription = hetznerHcloudLabelPrefix + "image_description"
hetznerLabelHcloudImageOSVersion = hetznerHcloudLabelPrefix + "image_os_version"
hetznerLabelHcloudImageOSFlavor = hetznerHcloudLabelPrefix + "image_os_flavor"
hetznerLabelHcloudPrivateIPv4 = hetznerHcloudLabelPrefix + "private_ipv4_"
hetznerLabelHcloudDatacenterLocation = hetznerHcloudLabelPrefix + "datacenter_location"
hetznerLabelHcloudDatacenterLocationNetworkZone = hetznerHcloudLabelPrefix + "datacenter_location_network_zone"
hetznerLabelHcloudCPUCores = hetznerHcloudLabelPrefix + "cpu_cores"
hetznerLabelHcloudCPUType = hetznerHcloudLabelPrefix + "cpu_type"
hetznerLabelHcloudMemoryGB = hetznerHcloudLabelPrefix + "memory_size_gb"
hetznerLabelHcloudDiskGB = hetznerHcloudLabelPrefix + "disk_size_gb"
hetznerLabelHcloudType = hetznerHcloudLabelPrefix + "server_type"
hetznerLabelHcloudLabel = hetznerHcloudLabelPrefix + "label_"
)
// Discovery periodically performs Hetzner Cloud requests. It implements
// the Discoverer interface.
type hcloudDiscovery struct {
*refresh.Discovery
client *hcloud.Client
port int
}
// newHcloudDiscovery returns a new hcloudDiscovery which periodically refreshes its targets.
func newHcloudDiscovery(conf *SDConfig, logger log.Logger) (*hcloudDiscovery, error) {
d := &hcloudDiscovery{
port: conf.Port,
}
rt, err := config.NewRoundTripperFromConfig(conf.HTTPClientConfig, "hetzner_sd", false, false)
if err != nil {
return nil, err
}
d.client = hcloud.NewClient(
hcloud.WithApplication("Prometheus", version.Version),
hcloud.WithHTTPClient(&http.Client{
Transport: rt,
Timeout: time.Duration(conf.RefreshInterval),
}),
hcloud.WithEndpoint(conf.hcloudEndpoint),
)
return d, nil
}
func (d *hcloudDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) {
servers, err := d.client.Server.All(ctx)
if err != nil {
return nil, err
}
networks, err := d.client.Network.All(ctx)
if err != nil {
return nil, err
}
targets := make([]model.LabelSet, len(servers))
for i, server := range servers {
labels := model.LabelSet{
hetznerLabelRole: model.LabelValue(hetznerRoleHcloud),
hetznerLabelServerID: model.LabelValue(fmt.Sprintf("%d", server.ID)),
hetznerLabelServerName: model.LabelValue(server.Name),
hetznerLabelDatacenter: model.LabelValue(server.Datacenter.Name),
hetznerLabelPublicIPv4: model.LabelValue(server.PublicNet.IPv4.IP.String()),
hetznerLabelPublicIPv6Network: model.LabelValue(server.PublicNet.IPv6.Network.String()),
hetznerLabelServerStatus: model.LabelValue(server.Status),
hetznerLabelHcloudDatacenterLocation: model.LabelValue(server.Datacenter.Location.Name),
hetznerLabelHcloudDatacenterLocationNetworkZone: model.LabelValue(server.Datacenter.Location.NetworkZone),
hetznerLabelHcloudType: model.LabelValue(server.ServerType.Name),
hetznerLabelHcloudCPUCores: model.LabelValue(fmt.Sprintf("%d", server.ServerType.Cores)),
hetznerLabelHcloudCPUType: model.LabelValue(server.ServerType.CPUType),
hetznerLabelHcloudMemoryGB: model.LabelValue(fmt.Sprintf("%d", int(server.ServerType.Memory))),
hetznerLabelHcloudDiskGB: model.LabelValue(fmt.Sprintf("%d", server.ServerType.Disk)),
model.AddressLabel: model.LabelValue(net.JoinHostPort(server.PublicNet.IPv4.IP.String(), strconv.FormatUint(uint64(d.port), 10))),
}
if server.Image != nil {
labels[hetznerLabelHcloudImageName] = model.LabelValue(server.Image.Name)
labels[hetznerLabelHcloudImageDescription] = model.LabelValue(server.Image.Description)
labels[hetznerLabelHcloudImageOSVersion] = model.LabelValue(server.Image.OSVersion)
labels[hetznerLabelHcloudImageOSFlavor] = model.LabelValue(server.Image.OSFlavor)
}
for _, privateNet := range server.PrivateNet {
for _, network := range networks {
if privateNet.Network.ID == network.ID {
networkLabel := model.LabelName(hetznerLabelHcloudPrivateIPv4 + strutil.SanitizeLabelName(network.Name))
labels[networkLabel] = model.LabelValue(privateNet.IP.String())
}
}
}
for labelKey, labelValue := range server.Labels {
label := model.LabelName(hetznerLabelHcloudLabel + strutil.SanitizeLabelName(labelKey))
labels[label] = model.LabelValue(labelValue)
}
targets[i] = labels
}
return []*targetgroup.Group{{Source: "hetzner", Targets: targets}}, nil
}

View file

@ -0,0 +1,124 @@
// Copyright 2020 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package hetzner
import (
"context"
"fmt"
"github.com/go-kit/kit/log"
"github.com/prometheus/common/model"
"github.com/prometheus/prometheus/util/testutil"
"testing"
)
type hcloudSDTestSuite struct {
Mock *SDMock
}
func (s *hcloudSDTestSuite) SetupTest(t *testing.T) {
s.Mock = NewSDMock(t)
s.Mock.Setup()
s.Mock.HandleHcloudServers()
s.Mock.HandleHcloudNetworks()
}
func TestHCloudSDRefresh(t *testing.T) {
suite := &hcloudSDTestSuite{}
suite.SetupTest(t)
cfg := DefaultSDConfig
cfg.HTTPClientConfig.BearerToken = hcloudTestToken
cfg.hcloudEndpoint = suite.Mock.Endpoint()
d, err := newHcloudDiscovery(&cfg, log.NewNopLogger())
testutil.Ok(t, err)
targetGroups, err := d.refresh(context.Background())
testutil.Ok(t, err)
testutil.Equals(t, 1, len(targetGroups))
targetGroup := targetGroups[0]
testutil.Assert(t, targetGroup != nil, "targetGroup should not be nil")
testutil.Assert(t, targetGroup.Targets != nil, "targetGroup.targets should not be nil")
testutil.Equals(t, 3, len(targetGroup.Targets))
for i, labelSet := range []model.LabelSet{
{
"__address__": model.LabelValue("1.2.3.4:80"),
"__meta_hetzner_role": model.LabelValue("hcloud"),
"__meta_hetzner_server_id": model.LabelValue("42"),
"__meta_hetzner_server_name": model.LabelValue("my-server"),
"__meta_hetzner_server_status": model.LabelValue("running"),
"__meta_hetzner_public_ipv4": model.LabelValue("1.2.3.4"),
"__meta_hetzner_public_ipv6_network": model.LabelValue("2001:db8::/64"),
"__meta_hetzner_datacenter": model.LabelValue("fsn1-dc8"),
"__meta_hetzner_hcloud_image_name": model.LabelValue("ubuntu-20.04"),
"__meta_hetzner_hcloud_image_description": model.LabelValue("Ubuntu 20.04 Standard 64 bit"),
"__meta_hetzner_hcloud_image_os_flavor": model.LabelValue("ubuntu"),
"__meta_hetzner_hcloud_image_os_version": model.LabelValue("20.04"),
"__meta_hetzner_hcloud_datacenter_location": model.LabelValue("fsn1"),
"__meta_hetzner_hcloud_datacenter_location_network_zone": model.LabelValue("eu-central"),
"__meta_hetzner_hcloud_cpu_cores": model.LabelValue("1"),
"__meta_hetzner_hcloud_cpu_type": model.LabelValue("shared"),
"__meta_hetzner_hcloud_memory_size_gb": model.LabelValue("1"),
"__meta_hetzner_hcloud_disk_size_gb": model.LabelValue("25"),
"__meta_hetzner_hcloud_server_type": model.LabelValue("cx11"),
"__meta_hetzner_hcloud_private_ipv4_mynet": model.LabelValue("10.0.0.2"),
"__meta_hetzner_hcloud_label_my_key": model.LabelValue("my-value"),
},
{
"__address__": model.LabelValue("1.2.3.5:80"),
"__meta_hetzner_role": model.LabelValue("hcloud"),
"__meta_hetzner_server_id": model.LabelValue("44"),
"__meta_hetzner_server_name": model.LabelValue("another-server"),
"__meta_hetzner_server_status": model.LabelValue("stopped"),
"__meta_hetzner_datacenter": model.LabelValue("fsn1-dc14"),
"__meta_hetzner_public_ipv4": model.LabelValue("1.2.3.5"),
"__meta_hetzner_public_ipv6_network": model.LabelValue("2001:db9::/64"),
"__meta_hetzner_hcloud_image_name": model.LabelValue("ubuntu-20.04"),
"__meta_hetzner_hcloud_image_description": model.LabelValue("Ubuntu 20.04 Standard 64 bit"),
"__meta_hetzner_hcloud_image_os_flavor": model.LabelValue("ubuntu"),
"__meta_hetzner_hcloud_image_os_version": model.LabelValue("20.04"),
"__meta_hetzner_hcloud_datacenter_location": model.LabelValue("fsn1"),
"__meta_hetzner_hcloud_datacenter_location_network_zone": model.LabelValue("eu-central"),
"__meta_hetzner_hcloud_cpu_cores": model.LabelValue("2"),
"__meta_hetzner_hcloud_cpu_type": model.LabelValue("shared"),
"__meta_hetzner_hcloud_memory_size_gb": model.LabelValue("1"),
"__meta_hetzner_hcloud_disk_size_gb": model.LabelValue("50"),
"__meta_hetzner_hcloud_server_type": model.LabelValue("cpx11"),
},
{
"__address__": model.LabelValue("1.2.3.6:80"),
"__meta_hetzner_role": model.LabelValue("hcloud"),
"__meta_hetzner_server_id": model.LabelValue("36"),
"__meta_hetzner_server_name": model.LabelValue("deleted-image-server"),
"__meta_hetzner_server_status": model.LabelValue("stopped"),
"__meta_hetzner_datacenter": model.LabelValue("fsn1-dc14"),
"__meta_hetzner_public_ipv4": model.LabelValue("1.2.3.6"),
"__meta_hetzner_public_ipv6_network": model.LabelValue("2001:db7::/64"),
"__meta_hetzner_hcloud_datacenter_location": model.LabelValue("fsn1"),
"__meta_hetzner_hcloud_datacenter_location_network_zone": model.LabelValue("eu-central"),
"__meta_hetzner_hcloud_cpu_cores": model.LabelValue("2"),
"__meta_hetzner_hcloud_cpu_type": model.LabelValue("shared"),
"__meta_hetzner_hcloud_memory_size_gb": model.LabelValue("1"),
"__meta_hetzner_hcloud_disk_size_gb": model.LabelValue("50"),
"__meta_hetzner_hcloud_server_type": model.LabelValue("cpx11"),
},
} {
t.Run(fmt.Sprintf("item %d", i), func(t *testing.T) {
testutil.Equals(t, labelSet, targetGroup.Targets[i])
})
}
}

View file

@ -0,0 +1,150 @@
// Copyright 2020 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package hetzner
import (
"context"
"github.com/pkg/errors"
"time"
"github.com/go-kit/kit/log"
"github.com/hetznercloud/hcloud-go/hcloud"
"github.com/prometheus/common/config"
"github.com/prometheus/common/model"
"github.com/prometheus/prometheus/discovery"
"github.com/prometheus/prometheus/discovery/refresh"
"github.com/prometheus/prometheus/discovery/targetgroup"
)
const (
hetznerLabelPrefix = model.MetaLabelPrefix + "hetzner_"
hetznerLabelRole = hetznerLabelPrefix + "role"
hetznerLabelServerID = hetznerLabelPrefix + "server_id"
hetznerLabelServerName = hetznerLabelPrefix + "server_name"
hetznerLabelServerStatus = hetznerLabelPrefix + "server_status"
hetznerLabelDatacenter = hetznerLabelPrefix + "datacenter"
hetznerLabelPublicIPv4 = hetznerLabelPrefix + "public_ipv4"
hetznerLabelPublicIPv6Network = hetznerLabelPrefix + "public_ipv6_network"
)
// DefaultSDConfig is the default Hetzner SD configuration.
var DefaultSDConfig = SDConfig{
Port: 80,
RefreshInterval: model.Duration(60 * time.Second),
}
func init() {
discovery.RegisterConfig(&SDConfig{})
}
// SDConfig is the configuration for Hetzner based service discovery.
type SDConfig struct {
HTTPClientConfig config.HTTPClientConfig `yaml:",inline"`
RefreshInterval model.Duration `yaml:"refresh_interval"`
Port int `yaml:"port"`
Role role `yaml:"role"`
hcloudEndpoint string // For tests only.
robotEndpoint string // For tests only.
}
// Name returns the name of the Config.
func (*SDConfig) Name() string { return "hetzner" }
// NewDiscoverer returns a Discoverer for the Config.
func (c *SDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (discovery.Discoverer, error) {
return NewDiscovery(c, opts.Logger)
}
type refresher interface {
refresh(context.Context) ([]*targetgroup.Group, error)
}
// role is the role of the target within the Hetzner Ecosystem.
type role string
// The valid options for role.
const (
// Hetzner Robot Role (Dedicated Server)
// https://robot.hetzner.com
hetznerRoleRobot role = "robot"
// Hetzner Cloud Role
// https://console.hetzner.cloud
hetznerRoleHcloud role = "hcloud"
)
// UnmarshalYAML implements the yaml.Unmarshaler interface.
func (c *role) UnmarshalYAML(unmarshal func(interface{}) error) error {
if err := unmarshal((*string)(c)); err != nil {
return err
}
switch *c {
case hetznerRoleRobot, hetznerRoleHcloud:
return nil
default:
return errors.Errorf("unknown role %q", *c)
}
}
// UnmarshalYAML implements the yaml.Unmarshaler interface.
func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
*c = DefaultSDConfig
type plain SDConfig
err := unmarshal((*plain)(c))
if err != nil {
return err
}
if c.Role == "" {
return errors.New("role missing (one of: robot, hcloud)")
}
return nil
}
// Discovery periodically performs Hetzner requests. It implements
// the Discoverer interface.
type Discovery struct {
*refresh.Discovery
}
// NewDiscovery returns a new Discovery which periodically refreshes its targets.
func NewDiscovery(conf *SDConfig, logger log.Logger) (*refresh.Discovery, error) {
r, err := newRefresher(conf, logger)
if err != nil {
return nil, err
}
return refresh.NewDiscovery(
logger,
"hetzner",
time.Duration(conf.RefreshInterval),
r.refresh,
), nil
}
func newRefresher(conf *SDConfig, l log.Logger) (refresher, error) {
switch conf.Role {
case hetznerRoleHcloud:
if conf.hcloudEndpoint == "" {
conf.hcloudEndpoint = hcloud.Endpoint
}
return newHcloudDiscovery(conf, l)
case hetznerRoleRobot:
if conf.robotEndpoint == "" {
conf.robotEndpoint = "https://robot-ws.your-server.de"
}
return newRobotDiscovery(conf, l)
}
return nil, errors.New("unknown Hetzner discovery role")
}

View file

@ -0,0 +1,552 @@
// Copyright 2020 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package hetzner
import (
"fmt"
"net/http"
"net/http/httptest"
"testing"
)
// SDMock is the interface for the Hetzner Cloud mock
type SDMock struct {
t *testing.T
Server *httptest.Server
Mux *http.ServeMux
}
// NewSDMock returns a new SDMock.
func NewSDMock(t *testing.T) *SDMock {
return &SDMock{
t: t,
}
}
// Endpoint returns the URI to the mock server
func (m *SDMock) Endpoint() string {
return m.Server.URL + "/"
}
// Setup creates the mock server
func (m *SDMock) Setup() {
m.Mux = http.NewServeMux()
m.Server = httptest.NewServer(m.Mux)
m.t.Cleanup(m.Server.Close)
}
// ShutdownServer creates the mock server
func (m *SDMock) ShutdownServer() {
m.Server.Close()
}
const hcloudTestToken = "LRK9DAWQ1ZAEFSrCNEEzLCUwhYX1U3g7wMg4dTlkkDC96fyDuyJ39nVbVjCKSDfj"
// HandleHcloudServers mocks the cloud servers list endpoint.
func (m *SDMock) HandleHcloudServers() {
m.Mux.HandleFunc("/servers", func(w http.ResponseWriter, r *http.Request) {
if r.Header.Get("Authorization") != fmt.Sprintf("Bearer %s", hcloudTestToken) {
w.WriteHeader(http.StatusUnauthorized)
return
}
w.Header().Add("content-type", "application/json; charset=utf-8")
w.WriteHeader(http.StatusOK)
fmt.Fprint(w, `
{
"servers": [
{
"id": 42,
"name": "my-server",
"status": "running",
"created": "2016-01-30T23:50:00+00:00",
"public_net": {
"ipv4": {
"ip": "1.2.3.4",
"blocked": false,
"dns_ptr": "server01.example.com"
},
"ipv6": {
"ip": "2001:db8::/64",
"blocked": false,
"dns_ptr": [
{
"ip": "2001:db8::1",
"dns_ptr": "server.example.com"
}
]
},
"floating_ips": [
478
]
},
"private_net": [
{
"network": 4711,
"ip": "10.0.0.2",
"alias_ips": [],
"mac_address": "86:00:ff:2a:7d:e1"
}
],
"server_type": {
"id": 1,
"name": "cx11",
"description": "CX11",
"cores": 1,
"memory": 1,
"disk": 25,
"deprecated": false,
"prices": [
{
"location": "fsn1",
"price_hourly": {
"net": "1.0000000000",
"gross": "1.1900000000000000"
},
"price_monthly": {
"net": "1.0000000000",
"gross": "1.1900000000000000"
}
}
],
"storage_type": "local",
"cpu_type": "shared"
},
"datacenter": {
"id": 1,
"name": "fsn1-dc8",
"description": "Falkenstein 1 DC 8",
"location": {
"id": 1,
"name": "fsn1",
"description": "Falkenstein DC Park 1",
"country": "DE",
"city": "Falkenstein",
"latitude": 50.47612,
"longitude": 12.370071,
"network_zone": "eu-central"
},
"server_types": {
"supported": [
1,
2,
3
],
"available": [
1,
2,
3
],
"available_for_migration": [
1,
2,
3
]
}
},
"image": {
"id": 4711,
"type": "system",
"status": "available",
"name": "ubuntu-20.04",
"description": "Ubuntu 20.04 Standard 64 bit",
"image_size": 2.3,
"disk_size": 10,
"created": "2016-01-30T23:50:00+00:00",
"created_from": {
"id": 1,
"name": "Server"
},
"bound_to": null,
"os_flavor": "ubuntu",
"os_version": "20.04",
"rapid_deploy": false,
"protection": {
"delete": false
},
"deprecated": "2018-02-28T00:00:00+00:00",
"labels": {}
},
"iso": null,
"rescue_enabled": false,
"locked": false,
"backup_window": "22-02",
"outgoing_traffic": 123456,
"ingoing_traffic": 123456,
"included_traffic": 654321,
"protection": {
"delete": false,
"rebuild": false
},
"labels": {
"my-key": "my-value"
},
"volumes": [],
"load_balancers": []
},
{
"id": 44,
"name": "another-server",
"status": "stopped",
"created": "2016-01-30T23:50:00+00:00",
"public_net": {
"ipv4": {
"ip": "1.2.3.5",
"blocked": false,
"dns_ptr": "server01.example.org"
},
"ipv6": {
"ip": "2001:db9::/64",
"blocked": false,
"dns_ptr": [
{
"ip": "2001:db9::1",
"dns_ptr": "server01.example.org"
}
]
},
"floating_ips": []
},
"private_net": [],
"server_type": {
"id": 2,
"name": "cpx11",
"description": "CPX11",
"cores": 2,
"memory": 1,
"disk": 50,
"deprecated": false,
"prices": [
{
"location": "fsn1",
"price_hourly": {
"net": "1.0000000000",
"gross": "1.1900000000000000"
},
"price_monthly": {
"net": "1.0000000000",
"gross": "1.1900000000000000"
}
}
],
"storage_type": "local",
"cpu_type": "shared"
},
"datacenter": {
"id": 2,
"name": "fsn1-dc14",
"description": "Falkenstein 1 DC 14",
"location": {
"id": 1,
"name": "fsn1",
"description": "Falkenstein DC Park 1",
"country": "DE",
"city": "Falkenstein",
"latitude": 50.47612,
"longitude": 12.370071,
"network_zone": "eu-central"
},
"server_types": {
"supported": [
1,
2,
3
],
"available": [
1,
2,
3
],
"available_for_migration": [
1,
2,
3
]
}
},
"image": {
"id": 4711,
"type": "system",
"status": "available",
"name": "ubuntu-20.04",
"description": "Ubuntu 20.04 Standard 64 bit",
"image_size": 2.3,
"disk_size": 10,
"created": "2016-01-30T23:50:00+00:00",
"created_from": {
"id": 1,
"name": "Server"
},
"bound_to": null,
"os_flavor": "ubuntu",
"os_version": "20.04",
"rapid_deploy": false,
"protection": {
"delete": false
},
"deprecated": "2018-02-28T00:00:00+00:00",
"labels": {}
},
"iso": null,
"rescue_enabled": false,
"locked": false,
"backup_window": "22-02",
"outgoing_traffic": 123456,
"ingoing_traffic": 123456,
"included_traffic": 654321,
"protection": {
"delete": false,
"rebuild": false
},
"labels": {},
"volumes": [],
"load_balancers": []
},
{
"id": 36,
"name": "deleted-image-server",
"status": "stopped",
"created": "2016-01-30T23:50:00+00:00",
"public_net": {
"ipv4": {
"ip": "1.2.3.6",
"blocked": false,
"dns_ptr": "server01.example.org"
},
"ipv6": {
"ip": "2001:db7::/64",
"blocked": false,
"dns_ptr": [
{
"ip": "2001:db7::1",
"dns_ptr": "server01.example.org"
}
]
},
"floating_ips": []
},
"private_net": [],
"server_type": {
"id": 2,
"name": "cpx11",
"description": "CPX11",
"cores": 2,
"memory": 1,
"disk": 50,
"deprecated": false,
"prices": [
{
"location": "fsn1",
"price_hourly": {
"net": "1.0000000000",
"gross": "1.1900000000000000"
},
"price_monthly": {
"net": "1.0000000000",
"gross": "1.1900000000000000"
}
}
],
"storage_type": "local",
"cpu_type": "shared"
},
"datacenter": {
"id": 2,
"name": "fsn1-dc14",
"description": "Falkenstein 1 DC 14",
"location": {
"id": 1,
"name": "fsn1",
"description": "Falkenstein DC Park 1",
"country": "DE",
"city": "Falkenstein",
"latitude": 50.47612,
"longitude": 12.370071,
"network_zone": "eu-central"
},
"server_types": {
"supported": [
1,
2,
3
],
"available": [
1,
2,
3
],
"available_for_migration": [
1,
2,
3
]
}
},
"image": null,
"iso": null,
"rescue_enabled": false,
"locked": false,
"backup_window": "22-02",
"outgoing_traffic": 123456,
"ingoing_traffic": 123456,
"included_traffic": 654321,
"protection": {
"delete": false,
"rebuild": false
},
"labels": {},
"volumes": [],
"load_balancers": []
}
],
"meta": {
"pagination": {
"page": 1,
"per_page": 25,
"previous_page": null,
"next_page": null,
"last_page": 1,
"total_entries": 2
}
}
}`,
)
})
}
// HandleHcloudNetworks mocks the cloud networks list endpoint.
func (m *SDMock) HandleHcloudNetworks() {
m.Mux.HandleFunc("/networks", func(w http.ResponseWriter, r *http.Request) {
if r.Header.Get("Authorization") != fmt.Sprintf("Bearer %s", hcloudTestToken) {
w.WriteHeader(http.StatusUnauthorized)
return
}
w.Header().Add("content-type", "application/json; charset=utf-8")
w.WriteHeader(http.StatusOK)
fmt.Fprint(w, `
{
"networks": [
{
"id": 4711,
"name": "mynet",
"ip_range": "10.0.0.0/16",
"subnets": [
{
"type": "cloud",
"ip_range": "10.0.1.0/24",
"network_zone": "eu-central",
"gateway": "10.0.0.1"
}
],
"routes": [
{
"destination": "10.100.1.0/24",
"gateway": "10.0.1.1"
}
],
"servers": [
42
],
"load_balancers": [
42
],
"protection": {
"delete": false
},
"labels": {},
"created": "2016-01-30T23:50:00+00:00"
}
],
"meta": {
"pagination": {
"page": 1,
"per_page": 25,
"previous_page": null,
"next_page": null,
"last_page": 1,
"total_entries": 1
}
}
}`,
)
})
}
const robotTestUsername = "my-hetzner"
const robotTestPassword = "my-password"
// HandleRobotServers mocks the robot servers list endpoint.
func (m *SDMock) HandleRobotServers() {
m.Mux.HandleFunc("/server", func(w http.ResponseWriter, r *http.Request) {
username, password, ok := r.BasicAuth()
if username != robotTestUsername && password != robotTestPassword && !ok {
w.WriteHeader(http.StatusUnauthorized)
return
}
w.Header().Add("content-type", "application/json; charset=utf-8")
w.WriteHeader(http.StatusOK)
fmt.Fprint(w, `
[
{
"server":{
"server_ip":"123.123.123.123",
"server_number":321,
"server_name":"server1",
"product":"DS 3000",
"dc":"NBG1-DC1",
"traffic":"5 TB",
"flatrate":true,
"status":"ready",
"throttled":true,
"cancelled":false,
"paid_until":"2010-09-02",
"ip":[
"123.123.123.123"
],
"subnet":[
{
"ip":"2a01:4f8:111:4221::",
"mask":"64"
}
]
}
},
{
"server":{
"server_ip":"123.123.123.124",
"server_number":421,
"server_name":"server2",
"product":"X5",
"dc":"FSN1-DC10",
"traffic":"2 TB",
"flatrate":true,
"status":"in process",
"throttled":false,
"cancelled":true,
"paid_until":"2010-06-11",
"ip":[
"123.123.123.124"
],
"subnet":null
}
}
]`,
)
})
}

141
discovery/hetzner/robot.go Normal file
View file

@ -0,0 +1,141 @@
// Copyright 2020 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package hetzner
import (
"context"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"net"
"net/http"
"strconv"
"strings"
"time"
"github.com/go-kit/kit/log"
"github.com/pkg/errors"
"github.com/prometheus/common/config"
"github.com/prometheus/common/model"
"github.com/prometheus/common/version"
"github.com/prometheus/prometheus/discovery/refresh"
"github.com/prometheus/prometheus/discovery/targetgroup"
)
const (
hetznerRobotLabelPrefix = hetznerLabelPrefix + "robot_"
hetznerLabelRobotProduct = hetznerRobotLabelPrefix + "product"
hetznerLabelRobotCancelled = hetznerRobotLabelPrefix + "cancelled"
)
var userAgent = fmt.Sprintf("Prometheus/%s", version.Version)
// Discovery periodically performs Hetzner Robot requests. It implements
// the Discoverer interface.
type robotDiscovery struct {
*refresh.Discovery
client *http.Client
port int
endpoint string
}
// newRobotDiscovery returns a new robotDiscovery which periodically refreshes its targets.
func newRobotDiscovery(conf *SDConfig, logger log.Logger) (*robotDiscovery, error) {
d := &robotDiscovery{
port: conf.Port,
endpoint: conf.robotEndpoint,
}
rt, err := config.NewRoundTripperFromConfig(conf.HTTPClientConfig, "hetzner_sd", false, false)
if err != nil {
return nil, err
}
d.client = &http.Client{
Transport: rt,
Timeout: time.Duration(conf.RefreshInterval),
}
return d, nil
}
func (d *robotDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) {
req, err := http.NewRequest("GET", d.endpoint+"/server", nil)
if err != nil {
return nil, err
}
req.Header.Add("User-Agent", userAgent)
resp, err := d.client.Do(req)
if err != nil {
return nil, err
}
defer func() {
io.Copy(ioutil.Discard, resp.Body)
resp.Body.Close()
}()
if resp.StatusCode/100 != 2 {
return nil, errors.Errorf("non 2xx status '%d' response during hetzner service discovery with role robot", resp.StatusCode)
}
var servers serversList
err = json.NewDecoder(resp.Body).Decode(&servers)
if err != nil {
return nil, err
}
targets := make([]model.LabelSet, len(servers))
for i, server := range servers {
labels := model.LabelSet{
hetznerLabelRole: model.LabelValue(hetznerRoleRobot),
hetznerLabelServerID: model.LabelValue(strconv.Itoa(server.Server.ServerNumber)),
hetznerLabelServerName: model.LabelValue(server.Server.ServerName),
hetznerLabelDatacenter: model.LabelValue(strings.ToLower(server.Server.Dc)),
hetznerLabelPublicIPv4: model.LabelValue(server.Server.ServerIP),
hetznerLabelServerStatus: model.LabelValue(server.Server.Status),
hetznerLabelRobotProduct: model.LabelValue(server.Server.Product),
hetznerLabelRobotCancelled: model.LabelValue(fmt.Sprintf("%t", server.Server.Canceled)),
model.AddressLabel: model.LabelValue(net.JoinHostPort(server.Server.ServerIP, strconv.FormatUint(uint64(d.port), 10))),
}
for _, subnet := range server.Server.Subnet {
ip := net.ParseIP(subnet.IP)
if ip.To4() == nil {
labels[hetznerLabelPublicIPv6Network] = model.LabelValue(fmt.Sprintf("%s/%s", subnet.IP, subnet.Mask))
break
}
}
targets[i] = labels
}
return []*targetgroup.Group{{Source: "hetzner", Targets: targets}}, nil
}
type serversList []struct {
Server struct {
ServerIP string `json:"server_ip"`
ServerNumber int `json:"server_number"`
ServerName string `json:"server_name"`
Dc string `json:"dc"`
Status string `json:"status"`
Product string `json:"product"`
Canceled bool `json:"cancelled"`
Subnet []struct {
IP string `json:"ip"`
Mask string `json:"mask"`
} `json:"subnet"`
} `json:"server"`
}

View file

@ -0,0 +1,101 @@
// Copyright 2020 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package hetzner
import (
"context"
"fmt"
"github.com/go-kit/kit/log"
"github.com/prometheus/common/config"
"github.com/prometheus/common/model"
"github.com/prometheus/prometheus/util/testutil"
"testing"
)
type robotSDTestSuite struct {
Mock *SDMock
}
func (s *robotSDTestSuite) SetupTest(t *testing.T) {
s.Mock = NewSDMock(t)
s.Mock.Setup()
s.Mock.HandleRobotServers()
}
func TestRobotSDRefresh(t *testing.T) {
suite := &robotSDTestSuite{}
suite.SetupTest(t)
cfg := DefaultSDConfig
cfg.HTTPClientConfig.BasicAuth = &config.BasicAuth{Username: robotTestUsername, Password: robotTestPassword}
cfg.robotEndpoint = suite.Mock.Endpoint()
d, err := newRobotDiscovery(&cfg, log.NewNopLogger())
testutil.Ok(t, err)
targetGroups, err := d.refresh(context.Background())
testutil.Ok(t, err)
testutil.Equals(t, 1, len(targetGroups))
targetGroup := targetGroups[0]
testutil.Assert(t, targetGroup != nil, "targetGroup should not be nil")
testutil.Assert(t, targetGroup.Targets != nil, "targetGroup.targets should not be nil")
testutil.Equals(t, 2, len(targetGroup.Targets))
for i, labelSet := range []model.LabelSet{
{
"__address__": model.LabelValue("123.123.123.123:80"),
"__meta_hetzner_role": model.LabelValue("robot"),
"__meta_hetzner_server_id": model.LabelValue("321"),
"__meta_hetzner_server_name": model.LabelValue("server1"),
"__meta_hetzner_server_status": model.LabelValue("ready"),
"__meta_hetzner_public_ipv4": model.LabelValue("123.123.123.123"),
"__meta_hetzner_public_ipv6_network": model.LabelValue("2a01:4f8:111:4221::/64"),
"__meta_hetzner_datacenter": model.LabelValue("nbg1-dc1"),
"__meta_hetzner_robot_product": model.LabelValue("DS 3000"),
"__meta_hetzner_robot_cancelled": model.LabelValue("false"),
},
{
"__address__": model.LabelValue("123.123.123.124:80"),
"__meta_hetzner_role": model.LabelValue("robot"),
"__meta_hetzner_server_id": model.LabelValue("421"),
"__meta_hetzner_server_name": model.LabelValue("server2"),
"__meta_hetzner_server_status": model.LabelValue("in process"),
"__meta_hetzner_public_ipv4": model.LabelValue("123.123.123.124"),
"__meta_hetzner_datacenter": model.LabelValue("fsn1-dc10"),
"__meta_hetzner_robot_product": model.LabelValue("X5"),
"__meta_hetzner_robot_cancelled": model.LabelValue("true"),
},
} {
t.Run(fmt.Sprintf("item %d", i), func(t *testing.T) {
testutil.Equals(t, labelSet, targetGroup.Targets[i])
})
}
}
func TestRobotSDRefreshHandleError(t *testing.T) {
suite := &robotSDTestSuite{}
suite.SetupTest(t)
cfg := DefaultSDConfig
cfg.robotEndpoint = suite.Mock.Endpoint()
d, err := newRobotDiscovery(&cfg, log.NewNopLogger())
testutil.Ok(t, err)
targetGroups, err := d.refresh(context.Background())
testutil.NotOk(t, err)
testutil.Equals(t, "non 2xx status '401' response during hetzner service discovery with role robot", err.Error())
testutil.Equals(t, 0, len(targetGroups))
}

View file

@ -0,0 +1,34 @@
// Copyright 2020 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package install has the side-effect of registering all builtin
// service discovery config types.
package install
import (
_ "github.com/prometheus/prometheus/discovery/azure" // register azure
_ "github.com/prometheus/prometheus/discovery/consul" // register consul
_ "github.com/prometheus/prometheus/discovery/digitalocean" // register digitalocean
_ "github.com/prometheus/prometheus/discovery/dns" // register dns
_ "github.com/prometheus/prometheus/discovery/dockerswarm" // register dockerswarm
_ "github.com/prometheus/prometheus/discovery/ec2" // register ec2
_ "github.com/prometheus/prometheus/discovery/eureka" // register eureka
_ "github.com/prometheus/prometheus/discovery/file" // register file
_ "github.com/prometheus/prometheus/discovery/gce" // register gce
_ "github.com/prometheus/prometheus/discovery/hetzner" // register hetzner
_ "github.com/prometheus/prometheus/discovery/kubernetes" // register kubernetes
_ "github.com/prometheus/prometheus/discovery/marathon" // register marathon
_ "github.com/prometheus/prometheus/discovery/openstack" // register openstack
_ "github.com/prometheus/prometheus/discovery/triton" // register triton
_ "github.com/prometheus/prometheus/discovery/zookeeper" // register zookeeper
)

View file

@ -0,0 +1,407 @@
// Copyright 2020 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package kubernetes
import (
"context"
"net"
"strconv"
"github.com/go-kit/kit/log"
"github.com/go-kit/kit/log/level"
"github.com/pkg/errors"
"github.com/prometheus/common/model"
apiv1 "k8s.io/api/core/v1"
disv1beta1 "k8s.io/api/discovery/v1beta1"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/util/workqueue"
"github.com/prometheus/prometheus/discovery/targetgroup"
"github.com/prometheus/prometheus/util/strutil"
)
var (
epslAddCount = eventCount.WithLabelValues("endpointslice", "add")
epslUpdateCount = eventCount.WithLabelValues("endpointslice", "update")
epslDeleteCount = eventCount.WithLabelValues("endpointslice", "delete")
)
// EndpointSlice discovers new endpoint targets.
type EndpointSlice struct {
logger log.Logger
endpointSliceInf cache.SharedInformer
serviceInf cache.SharedInformer
podInf cache.SharedInformer
podStore cache.Store
endpointSliceStore cache.Store
serviceStore cache.Store
queue *workqueue.Type
}
// NewEndpointSlice returns a new endpointslice discovery.
func NewEndpointSlice(l log.Logger, svc, eps, pod cache.SharedInformer) *EndpointSlice {
if l == nil {
l = log.NewNopLogger()
}
e := &EndpointSlice{
logger: l,
endpointSliceInf: eps,
endpointSliceStore: eps.GetStore(),
serviceInf: svc,
serviceStore: svc.GetStore(),
podInf: pod,
podStore: pod.GetStore(),
queue: workqueue.NewNamed("endpointSlice"),
}
e.endpointSliceInf.AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: func(o interface{}) {
epslAddCount.Inc()
e.enqueue(o)
},
UpdateFunc: func(_, o interface{}) {
epslUpdateCount.Inc()
e.enqueue(o)
},
DeleteFunc: func(o interface{}) {
epslDeleteCount.Inc()
e.enqueue(o)
},
})
serviceUpdate := func(o interface{}) {
svc, err := convertToService(o)
if err != nil {
level.Error(e.logger).Log("msg", "converting to Service object failed", "err", err)
return
}
// TODO(brancz): use cache.Indexer to index endpoints by
// disv1beta1.LabelServiceName so this operation doesn't have to
// iterate over all endpoint objects.
for _, obj := range e.endpointSliceStore.List() {
ep := obj.(*disv1beta1.EndpointSlice)
if lv, exists := ep.Labels[disv1beta1.LabelServiceName]; exists && lv == svc.Name {
e.enqueue(ep)
}
}
}
e.serviceInf.AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: func(o interface{}) {
svcAddCount.Inc()
serviceUpdate(o)
},
UpdateFunc: func(_, o interface{}) {
svcUpdateCount.Inc()
serviceUpdate(o)
},
DeleteFunc: func(o interface{}) {
svcDeleteCount.Inc()
serviceUpdate(o)
},
})
return e
}
func (e *EndpointSlice) enqueue(obj interface{}) {
key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(obj)
if err != nil {
return
}
e.queue.Add(key)
}
// Run implements the Discoverer interface.
func (e *EndpointSlice) Run(ctx context.Context, ch chan<- []*targetgroup.Group) {
defer e.queue.ShutDown()
if !cache.WaitForCacheSync(ctx.Done(), e.endpointSliceInf.HasSynced, e.serviceInf.HasSynced, e.podInf.HasSynced) {
if ctx.Err() != context.Canceled {
level.Error(e.logger).Log("msg", "endpointslice informer unable to sync cache")
}
return
}
go func() {
for e.process(ctx, ch) {
}
}()
// Block until the target provider is explicitly canceled.
<-ctx.Done()
}
func (e *EndpointSlice) process(ctx context.Context, ch chan<- []*targetgroup.Group) bool {
keyObj, quit := e.queue.Get()
if quit {
return false
}
defer e.queue.Done(keyObj)
key := keyObj.(string)
namespace, name, err := cache.SplitMetaNamespaceKey(key)
if err != nil {
level.Error(e.logger).Log("msg", "splitting key failed", "key", key)
return true
}
o, exists, err := e.endpointSliceStore.GetByKey(key)
if err != nil {
level.Error(e.logger).Log("msg", "getting object from store failed", "key", key)
return true
}
if !exists {
send(ctx, ch, &targetgroup.Group{Source: endpointSliceSourceFromNamespaceAndName(namespace, name)})
return true
}
eps, err := convertToEndpointSlice(o)
if err != nil {
level.Error(e.logger).Log("msg", "converting to EndpointSlice object failed", "err", err)
return true
}
send(ctx, ch, e.buildEndpointSlice(eps))
return true
}
func convertToEndpointSlice(o interface{}) (*disv1beta1.EndpointSlice, error) {
endpoints, ok := o.(*disv1beta1.EndpointSlice)
if ok {
return endpoints, nil
}
return nil, errors.Errorf("received unexpected object: %v", o)
}
func endpointSliceSource(ep *disv1beta1.EndpointSlice) string {
return endpointSliceSourceFromNamespaceAndName(ep.Namespace, ep.Name)
}
func endpointSliceSourceFromNamespaceAndName(namespace, name string) string {
return "endpointslice/" + namespace + "/" + name
}
const (
endpointSliceNameLabel = metaLabelPrefix + "endpointslice_name"
endpointSliceAddressTypeLabel = metaLabelPrefix + "endpointslice_address_type"
endpointSlicePortNameLabel = metaLabelPrefix + "endpointslice_port_name"
endpointSlicePortProtocolLabel = metaLabelPrefix + "endpointslice_port_protocol"
endpointSlicePortLabel = metaLabelPrefix + "endpointslice_port"
endpointSlicePortAppProtocol = metaLabelPrefix + "endpointslice_port_app_protocol"
endpointSliceEndpointConditionsReadyLabel = metaLabelPrefix + "endpointslice_endpoint_conditions_ready"
endpointSliceEndpointHostnameLabel = metaLabelPrefix + "endpointslice_endpoint_hostname"
endpointSliceAddressTargetKindLabel = metaLabelPrefix + "endpointslice_address_target_kind"
endpointSliceAddressTargetNameLabel = metaLabelPrefix + "endpointslice_address_target_name"
endpointSliceEndpointTopologyLabelPrefix = metaLabelPrefix + "endpointslice_endpoint_topology_"
endpointSliceEndpointTopologyLabelPresentPrefix = metaLabelPrefix + "endpointslice_endpoint_topology_present_"
)
func (e *EndpointSlice) buildEndpointSlice(eps *disv1beta1.EndpointSlice) *targetgroup.Group {
tg := &targetgroup.Group{
Source: endpointSliceSource(eps),
}
tg.Labels = model.LabelSet{
namespaceLabel: lv(eps.Namespace),
endpointSliceNameLabel: lv(eps.Name),
endpointSliceAddressTypeLabel: lv(string(eps.AddressType)),
}
e.addServiceLabels(eps, tg)
type podEntry struct {
pod *apiv1.Pod
servicePorts []disv1beta1.EndpointPort
}
seenPods := map[string]*podEntry{}
add := func(addr string, ep disv1beta1.Endpoint, port disv1beta1.EndpointPort) {
a := addr
if port.Port != nil {
a = net.JoinHostPort(addr, strconv.FormatUint(uint64(*port.Port), 10))
}
target := model.LabelSet{
model.AddressLabel: lv(a),
}
if port.Name != nil {
target[endpointSlicePortNameLabel] = lv(*port.Name)
}
if port.Protocol != nil {
target[endpointSlicePortProtocolLabel] = lv(string(*port.Protocol))
}
if port.Port != nil {
target[endpointSlicePortLabel] = lv(strconv.FormatUint(uint64(*port.Port), 10))
}
if port.AppProtocol != nil {
target[endpointSlicePortAppProtocol] = lv(*port.AppProtocol)
}
if ep.Conditions.Ready != nil {
target[endpointSliceEndpointConditionsReadyLabel] = lv(strconv.FormatBool(*ep.Conditions.Ready))
}
if ep.Hostname != nil {
target[endpointSliceEndpointHostnameLabel] = lv(*ep.Hostname)
}
if ep.TargetRef != nil {
target[model.LabelName(endpointSliceAddressTargetKindLabel)] = lv(ep.TargetRef.Kind)
target[model.LabelName(endpointSliceAddressTargetNameLabel)] = lv(ep.TargetRef.Name)
}
for k, v := range ep.Topology {
ln := strutil.SanitizeLabelName(k)
target[model.LabelName(endpointSliceEndpointTopologyLabelPrefix+ln)] = lv(v)
target[model.LabelName(endpointSliceEndpointTopologyLabelPresentPrefix+ln)] = presentValue
}
pod := e.resolvePodRef(ep.TargetRef)
if pod == nil {
// This target is not a Pod, so don't continue with Pod specific logic.
tg.Targets = append(tg.Targets, target)
return
}
s := pod.Namespace + "/" + pod.Name
sp, ok := seenPods[s]
if !ok {
sp = &podEntry{pod: pod}
seenPods[s] = sp
}
// Attach standard pod labels.
target = target.Merge(podLabels(pod))
// Attach potential container port labels matching the endpoint port.
for _, c := range pod.Spec.Containers {
for _, cport := range c.Ports {
if port.Port == nil {
continue
}
if *port.Port == cport.ContainerPort {
ports := strconv.FormatUint(uint64(*port.Port), 10)
target[podContainerNameLabel] = lv(c.Name)
target[podContainerPortNameLabel] = lv(cport.Name)
target[podContainerPortNumberLabel] = lv(ports)
target[podContainerPortProtocolLabel] = lv(string(cport.Protocol))
break
}
}
}
// Add service port so we know that we have already generated a target
// for it.
sp.servicePorts = append(sp.servicePorts, port)
tg.Targets = append(tg.Targets, target)
}
for _, ep := range eps.Endpoints {
for _, port := range eps.Ports {
for _, addr := range ep.Addresses {
add(addr, ep, port)
}
}
}
// For all seen pods, check all container ports. If they were not covered
// by one of the service endpoints, generate targets for them.
for _, pe := range seenPods {
for _, c := range pe.pod.Spec.Containers {
for _, cport := range c.Ports {
hasSeenPort := func() bool {
for _, eport := range pe.servicePorts {
if eport.Port == nil {
continue
}
if cport.ContainerPort == *eport.Port {
return true
}
}
return false
}
if hasSeenPort() {
continue
}
a := net.JoinHostPort(pe.pod.Status.PodIP, strconv.FormatUint(uint64(cport.ContainerPort), 10))
ports := strconv.FormatUint(uint64(cport.ContainerPort), 10)
target := model.LabelSet{
model.AddressLabel: lv(a),
podContainerNameLabel: lv(c.Name),
podContainerPortNameLabel: lv(cport.Name),
podContainerPortNumberLabel: lv(ports),
podContainerPortProtocolLabel: lv(string(cport.Protocol)),
}
tg.Targets = append(tg.Targets, target.Merge(podLabels(pe.pod)))
}
}
}
return tg
}
func (e *EndpointSlice) resolvePodRef(ref *apiv1.ObjectReference) *apiv1.Pod {
if ref == nil || ref.Kind != "Pod" {
return nil
}
p := &apiv1.Pod{}
p.Namespace = ref.Namespace
p.Name = ref.Name
obj, exists, err := e.podStore.Get(p)
if err != nil {
level.Error(e.logger).Log("msg", "resolving pod ref failed", "err", err)
return nil
}
if !exists {
return nil
}
return obj.(*apiv1.Pod)
}
func (e *EndpointSlice) addServiceLabels(eps *disv1beta1.EndpointSlice, tg *targetgroup.Group) {
var (
svc = &apiv1.Service{}
found bool
)
svc.Namespace = eps.Namespace
// Every EndpointSlice object has the Service they belong to in the
// kubernetes.io/service-name label.
svc.Name, found = eps.Labels[disv1beta1.LabelServiceName]
if !found {
return
}
obj, exists, err := e.serviceStore.Get(svc)
if err != nil {
level.Error(e.logger).Log("msg", "retrieving service failed", "err", err)
return
}
if !exists {
return
}
svc = obj.(*apiv1.Service)
tg.Labels = tg.Labels.Merge(serviceLabels(svc))
}

View file

@ -0,0 +1,631 @@
// Copyright 2016 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package kubernetes
import (
"context"
"testing"
"github.com/prometheus/common/model"
"github.com/prometheus/prometheus/discovery/targetgroup"
v1 "k8s.io/api/core/v1"
disv1beta1 "k8s.io/api/discovery/v1beta1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
)
func strptr(str string) *string {
return &str
}
func boolptr(b bool) *bool {
return &b
}
func int32ptr(i int32) *int32 {
return &i
}
func protocolptr(p v1.Protocol) *v1.Protocol {
return &p
}
func makeEndpointSlice() *disv1beta1.EndpointSlice {
return &disv1beta1.EndpointSlice{
ObjectMeta: metav1.ObjectMeta{
Name: "testendpoints",
Namespace: "default",
Labels: map[string]string{
disv1beta1.LabelServiceName: "testendpoints",
},
},
AddressType: disv1beta1.AddressTypeIPv4,
Ports: []disv1beta1.EndpointPort{
{
Name: strptr("testport"),
Port: int32ptr(9000),
Protocol: protocolptr(v1.ProtocolTCP),
},
},
Endpoints: []disv1beta1.Endpoint{
{
Addresses: []string{"1.2.3.4"},
Hostname: strptr("testendpoint1"),
}, {
Addresses: []string{"2.3.4.5"},
Conditions: disv1beta1.EndpointConditions{
Ready: boolptr(true),
},
}, {
Addresses: []string{"3.4.5.6"},
Conditions: disv1beta1.EndpointConditions{
Ready: boolptr(false),
},
},
},
}
}
func TestEndpointSliceDiscoveryBeforeRun(t *testing.T) {
n, c := makeDiscovery(RoleEndpointSlice, NamespaceDiscovery{})
k8sDiscoveryTest{
discovery: n,
beforeRun: func() {
obj := makeEndpointSlice()
c.DiscoveryV1beta1().EndpointSlices(obj.Namespace).Create(context.Background(), obj, metav1.CreateOptions{})
},
expectedMaxItems: 1,
expectedRes: map[string]*targetgroup.Group{
"endpointslice/default/testendpoints": {
Targets: []model.LabelSet{
{
"__address__": "1.2.3.4:9000",
"__meta_kubernetes_endpointslice_endpoint_hostname": "testendpoint1",
"__meta_kubernetes_endpointslice_port": "9000",
"__meta_kubernetes_endpointslice_port_name": "testport",
"__meta_kubernetes_endpointslice_port_protocol": "TCP",
},
{
"__address__": "2.3.4.5:9000",
"__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true",
"__meta_kubernetes_endpointslice_port": "9000",
"__meta_kubernetes_endpointslice_port_name": "testport",
"__meta_kubernetes_endpointslice_port_protocol": "TCP",
},
{
"__address__": "3.4.5.6:9000",
"__meta_kubernetes_endpointslice_endpoint_conditions_ready": "false",
"__meta_kubernetes_endpointslice_port": "9000",
"__meta_kubernetes_endpointslice_port_name": "testport",
"__meta_kubernetes_endpointslice_port_protocol": "TCP",
},
},
Labels: model.LabelSet{
"__meta_kubernetes_endpointslice_address_type": "IPv4",
"__meta_kubernetes_namespace": "default",
"__meta_kubernetes_endpointslice_name": "testendpoints",
},
Source: "endpointslice/default/testendpoints",
},
},
}.Run(t)
}
func TestEndpointSliceDiscoveryAdd(t *testing.T) {
obj := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "testpod",
Namespace: "default",
UID: types.UID("deadbeef"),
},
Spec: v1.PodSpec{
NodeName: "testnode",
Containers: []v1.Container{
{
Name: "c1",
Ports: []v1.ContainerPort{
{
Name: "mainport",
ContainerPort: 9000,
Protocol: v1.ProtocolTCP,
},
},
},
{
Name: "c2",
Ports: []v1.ContainerPort{
{
Name: "sideport",
ContainerPort: 9001,
Protocol: v1.ProtocolTCP,
},
},
},
},
},
Status: v1.PodStatus{
HostIP: "2.3.4.5",
PodIP: "1.2.3.4",
},
}
n, c := makeDiscovery(RoleEndpointSlice, NamespaceDiscovery{}, obj)
k8sDiscoveryTest{
discovery: n,
afterStart: func() {
obj := &disv1beta1.EndpointSlice{
ObjectMeta: metav1.ObjectMeta{
Name: "testendpoints",
Namespace: "default",
},
AddressType: disv1beta1.AddressTypeIPv4,
Ports: []disv1beta1.EndpointPort{
{
Name: strptr("testport"),
Port: int32ptr(9000),
Protocol: protocolptr(v1.ProtocolTCP),
},
},
Endpoints: []disv1beta1.Endpoint{
{
Addresses: []string{"4.3.2.1"},
TargetRef: &v1.ObjectReference{
Kind: "Pod",
Name: "testpod",
Namespace: "default",
},
Conditions: disv1beta1.EndpointConditions{
Ready: boolptr(false),
},
},
},
}
c.DiscoveryV1beta1().EndpointSlices(obj.Namespace).Create(context.Background(), obj, metav1.CreateOptions{})
},
expectedMaxItems: 1,
expectedRes: map[string]*targetgroup.Group{
"endpointslice/default/testendpoints": {
Targets: []model.LabelSet{
{
"__address__": "4.3.2.1:9000",
"__meta_kubernetes_endpointslice_address_target_kind": "Pod",
"__meta_kubernetes_endpointslice_address_target_name": "testpod",
"__meta_kubernetes_endpointslice_endpoint_conditions_ready": "false",
"__meta_kubernetes_endpointslice_port": "9000",
"__meta_kubernetes_endpointslice_port_name": "testport",
"__meta_kubernetes_endpointslice_port_protocol": "TCP",
"__meta_kubernetes_pod_container_name": "c1",
"__meta_kubernetes_pod_container_port_name": "mainport",
"__meta_kubernetes_pod_container_port_number": "9000",
"__meta_kubernetes_pod_container_port_protocol": "TCP",
"__meta_kubernetes_pod_host_ip": "2.3.4.5",
"__meta_kubernetes_pod_ip": "1.2.3.4",
"__meta_kubernetes_pod_name": "testpod",
"__meta_kubernetes_pod_node_name": "testnode",
"__meta_kubernetes_pod_phase": "",
"__meta_kubernetes_pod_ready": "unknown",
"__meta_kubernetes_pod_uid": "deadbeef",
},
{
"__address__": "1.2.3.4:9001",
"__meta_kubernetes_pod_container_name": "c2",
"__meta_kubernetes_pod_container_port_name": "sideport",
"__meta_kubernetes_pod_container_port_number": "9001",
"__meta_kubernetes_pod_container_port_protocol": "TCP",
"__meta_kubernetes_pod_host_ip": "2.3.4.5",
"__meta_kubernetes_pod_ip": "1.2.3.4",
"__meta_kubernetes_pod_name": "testpod",
"__meta_kubernetes_pod_node_name": "testnode",
"__meta_kubernetes_pod_phase": "",
"__meta_kubernetes_pod_ready": "unknown",
"__meta_kubernetes_pod_uid": "deadbeef",
},
},
Labels: model.LabelSet{
"__meta_kubernetes_endpointslice_address_type": "IPv4",
"__meta_kubernetes_endpointslice_name": "testendpoints",
"__meta_kubernetes_namespace": "default",
},
Source: "endpointslice/default/testendpoints",
},
},
}.Run(t)
}
func TestEndpointSliceDiscoveryDelete(t *testing.T) {
n, c := makeDiscovery(RoleEndpointSlice, NamespaceDiscovery{}, makeEndpointSlice())
k8sDiscoveryTest{
discovery: n,
afterStart: func() {
obj := makeEndpointSlice()
c.DiscoveryV1beta1().EndpointSlices(obj.Namespace).Delete(context.Background(), obj.Name, metav1.DeleteOptions{})
},
expectedMaxItems: 2,
expectedRes: map[string]*targetgroup.Group{
"endpointslice/default/testendpoints": {
Source: "endpointslice/default/testendpoints",
},
},
}.Run(t)
}
func TestEndpointSliceDiscoveryUpdate(t *testing.T) {
n, c := makeDiscovery(RoleEndpointSlice, NamespaceDiscovery{}, makeEndpointSlice())
k8sDiscoveryTest{
discovery: n,
afterStart: func() {
obj := &disv1beta1.EndpointSlice{
ObjectMeta: metav1.ObjectMeta{
Name: "testendpoints",
Namespace: "default",
},
AddressType: disv1beta1.AddressTypeIPv4,
Ports: []disv1beta1.EndpointPort{
{
Name: strptr("testport"),
Port: int32ptr(9000),
Protocol: protocolptr(v1.ProtocolTCP),
},
},
Endpoints: []disv1beta1.Endpoint{
{
Addresses: []string{"1.2.3.4"},
Hostname: strptr("testendpoint1"),
}, {
Addresses: []string{"2.3.4.5"},
Conditions: disv1beta1.EndpointConditions{
Ready: boolptr(true),
},
},
},
}
c.DiscoveryV1beta1().EndpointSlices(obj.Namespace).Update(context.Background(), obj, metav1.UpdateOptions{})
},
expectedMaxItems: 2,
expectedRes: map[string]*targetgroup.Group{
"endpointslice/default/testendpoints": {
Targets: []model.LabelSet{
{
"__address__": "1.2.3.4:9000",
"__meta_kubernetes_endpointslice_endpoint_hostname": "testendpoint1",
"__meta_kubernetes_endpointslice_port": "9000",
"__meta_kubernetes_endpointslice_port_name": "testport",
"__meta_kubernetes_endpointslice_port_protocol": "TCP",
},
{
"__address__": "2.3.4.5:9000",
"__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true",
"__meta_kubernetes_endpointslice_port": "9000",
"__meta_kubernetes_endpointslice_port_name": "testport",
"__meta_kubernetes_endpointslice_port_protocol": "TCP",
},
},
Labels: model.LabelSet{
"__meta_kubernetes_endpointslice_address_type": "IPv4",
"__meta_kubernetes_endpointslice_name": "testendpoints",
"__meta_kubernetes_namespace": "default",
},
Source: "endpointslice/default/testendpoints",
},
},
}.Run(t)
}
func TestEndpointSliceDiscoveryEmptyEndpoints(t *testing.T) {
n, c := makeDiscovery(RoleEndpointSlice, NamespaceDiscovery{}, makeEndpointSlice())
k8sDiscoveryTest{
discovery: n,
afterStart: func() {
obj := &disv1beta1.EndpointSlice{
ObjectMeta: metav1.ObjectMeta{
Name: "testendpoints",
Namespace: "default",
},
AddressType: disv1beta1.AddressTypeIPv4,
Ports: []disv1beta1.EndpointPort{
{
Name: strptr("testport"),
Port: int32ptr(9000),
Protocol: protocolptr(v1.ProtocolTCP),
},
},
Endpoints: []disv1beta1.Endpoint{},
}
c.DiscoveryV1beta1().EndpointSlices(obj.Namespace).Update(context.Background(), obj, metav1.UpdateOptions{})
},
expectedMaxItems: 2,
expectedRes: map[string]*targetgroup.Group{
"endpointslice/default/testendpoints": {
Labels: model.LabelSet{
"__meta_kubernetes_endpointslice_address_type": "IPv4",
"__meta_kubernetes_endpointslice_name": "testendpoints",
"__meta_kubernetes_namespace": "default",
},
Source: "endpointslice/default/testendpoints",
},
},
}.Run(t)
}
func TestEndpointSliceDiscoveryWithService(t *testing.T) {
n, c := makeDiscovery(RoleEndpointSlice, NamespaceDiscovery{}, makeEndpointSlice())
k8sDiscoveryTest{
discovery: n,
beforeRun: func() {
obj := &v1.Service{
ObjectMeta: metav1.ObjectMeta{
Name: "testendpoints",
Namespace: "default",
Labels: map[string]string{
"app/name": "test",
},
},
}
c.CoreV1().Services(obj.Namespace).Create(context.Background(), obj, metav1.CreateOptions{})
},
expectedMaxItems: 1,
expectedRes: map[string]*targetgroup.Group{
"endpointslice/default/testendpoints": {
Targets: []model.LabelSet{
{
"__address__": "1.2.3.4:9000",
"__meta_kubernetes_endpointslice_endpoint_hostname": "testendpoint1",
"__meta_kubernetes_endpointslice_port": "9000",
"__meta_kubernetes_endpointslice_port_name": "testport",
"__meta_kubernetes_endpointslice_port_protocol": "TCP",
},
{
"__address__": "2.3.4.5:9000",
"__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true",
"__meta_kubernetes_endpointslice_port": "9000",
"__meta_kubernetes_endpointslice_port_name": "testport",
"__meta_kubernetes_endpointslice_port_protocol": "TCP",
},
{
"__address__": "3.4.5.6:9000",
"__meta_kubernetes_endpointslice_endpoint_conditions_ready": "false",
"__meta_kubernetes_endpointslice_port": "9000",
"__meta_kubernetes_endpointslice_port_name": "testport",
"__meta_kubernetes_endpointslice_port_protocol": "TCP",
},
},
Labels: model.LabelSet{
"__meta_kubernetes_endpointslice_address_type": "IPv4",
"__meta_kubernetes_endpointslice_name": "testendpoints",
"__meta_kubernetes_namespace": "default",
"__meta_kubernetes_service_label_app_name": "test",
"__meta_kubernetes_service_labelpresent_app_name": "true",
"__meta_kubernetes_service_name": "testendpoints",
},
Source: "endpointslice/default/testendpoints",
},
},
}.Run(t)
}
func TestEndpointSliceDiscoveryWithServiceUpdate(t *testing.T) {
n, c := makeDiscovery(RoleEndpointSlice, NamespaceDiscovery{}, makeEndpointSlice())
k8sDiscoveryTest{
discovery: n,
beforeRun: func() {
obj := &v1.Service{
ObjectMeta: metav1.ObjectMeta{
Name: "testendpoints",
Namespace: "default",
Labels: map[string]string{
"app/name": "test",
},
},
}
c.CoreV1().Services(obj.Namespace).Create(context.Background(), obj, metav1.CreateOptions{})
},
afterStart: func() {
obj := &v1.Service{
ObjectMeta: metav1.ObjectMeta{
Name: "testendpoints",
Namespace: "default",
Labels: map[string]string{
"app/name": "svc",
"component": "testing",
},
},
}
c.CoreV1().Services(obj.Namespace).Update(context.Background(), obj, metav1.UpdateOptions{})
},
expectedMaxItems: 2,
expectedRes: map[string]*targetgroup.Group{
"endpointslice/default/testendpoints": {
Targets: []model.LabelSet{
{
"__address__": "1.2.3.4:9000",
"__meta_kubernetes_endpointslice_endpoint_hostname": "testendpoint1",
"__meta_kubernetes_endpointslice_port": "9000",
"__meta_kubernetes_endpointslice_port_name": "testport",
"__meta_kubernetes_endpointslice_port_protocol": "TCP",
},
{
"__address__": "2.3.4.5:9000",
"__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true",
"__meta_kubernetes_endpointslice_port": "9000",
"__meta_kubernetes_endpointslice_port_name": "testport",
"__meta_kubernetes_endpointslice_port_protocol": "TCP",
},
{
"__address__": "3.4.5.6:9000",
"__meta_kubernetes_endpointslice_endpoint_conditions_ready": "false",
"__meta_kubernetes_endpointslice_port": "9000",
"__meta_kubernetes_endpointslice_port_name": "testport",
"__meta_kubernetes_endpointslice_port_protocol": "TCP",
},
},
Labels: model.LabelSet{
"__meta_kubernetes_endpointslice_address_type": "IPv4",
"__meta_kubernetes_endpointslice_name": "testendpoints",
"__meta_kubernetes_namespace": "default",
"__meta_kubernetes_service_label_app_name": "svc",
"__meta_kubernetes_service_label_component": "testing",
"__meta_kubernetes_service_labelpresent_app_name": "true",
"__meta_kubernetes_service_labelpresent_component": "true",
"__meta_kubernetes_service_name": "testendpoints",
},
Source: "endpointslice/default/testendpoints",
},
},
}.Run(t)
}
func TestEndpointSliceDiscoveryNamespaces(t *testing.T) {
epOne := makeEndpointSlice()
epOne.Namespace = "ns1"
objs := []runtime.Object{
epOne,
&disv1beta1.EndpointSlice{
ObjectMeta: metav1.ObjectMeta{
Name: "testendpoints",
Namespace: "ns2",
},
AddressType: disv1beta1.AddressTypeIPv4,
Ports: []disv1beta1.EndpointPort{
{
Name: strptr("testport"),
Port: int32ptr(9000),
Protocol: protocolptr(v1.ProtocolTCP),
},
},
Endpoints: []disv1beta1.Endpoint{
{
Addresses: []string{"4.3.2.1"},
TargetRef: &v1.ObjectReference{
Kind: "Pod",
Name: "testpod",
Namespace: "ns2",
},
},
},
},
&v1.Service{
ObjectMeta: metav1.ObjectMeta{
Name: "testendpoints",
Namespace: "ns1",
Labels: map[string]string{
"app": "app1",
},
},
},
&v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "testpod",
Namespace: "ns2",
UID: types.UID("deadbeef"),
},
Spec: v1.PodSpec{
NodeName: "testnode",
Containers: []v1.Container{
{
Name: "c1",
Ports: []v1.ContainerPort{
{
Name: "mainport",
ContainerPort: 9000,
Protocol: v1.ProtocolTCP,
},
},
},
},
},
Status: v1.PodStatus{
HostIP: "2.3.4.5",
PodIP: "4.3.2.1",
},
},
}
n, _ := makeDiscovery(RoleEndpointSlice, NamespaceDiscovery{Names: []string{"ns1", "ns2"}}, objs...)
k8sDiscoveryTest{
discovery: n,
expectedMaxItems: 2,
expectedRes: map[string]*targetgroup.Group{
"endpointslice/ns1/testendpoints": {
Targets: []model.LabelSet{
{
"__address__": "1.2.3.4:9000",
"__meta_kubernetes_endpointslice_endpoint_hostname": "testendpoint1",
"__meta_kubernetes_endpointslice_port": "9000",
"__meta_kubernetes_endpointslice_port_name": "testport",
"__meta_kubernetes_endpointslice_port_protocol": "TCP",
},
{
"__address__": "2.3.4.5:9000",
"__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true",
"__meta_kubernetes_endpointslice_port": "9000",
"__meta_kubernetes_endpointslice_port_name": "testport",
"__meta_kubernetes_endpointslice_port_protocol": "TCP",
},
{
"__address__": "3.4.5.6:9000",
"__meta_kubernetes_endpointslice_endpoint_conditions_ready": "false",
"__meta_kubernetes_endpointslice_port": "9000",
"__meta_kubernetes_endpointslice_port_name": "testport",
"__meta_kubernetes_endpointslice_port_protocol": "TCP",
},
},
Labels: model.LabelSet{
"__meta_kubernetes_endpointslice_address_type": "IPv4",
"__meta_kubernetes_endpointslice_name": "testendpoints",
"__meta_kubernetes_namespace": "ns1",
"__meta_kubernetes_service_label_app": "app1",
"__meta_kubernetes_service_labelpresent_app": "true",
"__meta_kubernetes_service_name": "testendpoints",
},
Source: "endpointslice/ns1/testendpoints",
},
"endpointslice/ns2/testendpoints": {
Targets: []model.LabelSet{
{
"__address__": "4.3.2.1:9000",
"__meta_kubernetes_endpointslice_address_target_kind": "Pod",
"__meta_kubernetes_endpointslice_address_target_name": "testpod",
"__meta_kubernetes_endpointslice_port": "9000",
"__meta_kubernetes_endpointslice_port_name": "testport",
"__meta_kubernetes_endpointslice_port_protocol": "TCP",
"__meta_kubernetes_pod_container_name": "c1",
"__meta_kubernetes_pod_container_port_name": "mainport",
"__meta_kubernetes_pod_container_port_number": "9000",
"__meta_kubernetes_pod_container_port_protocol": "TCP",
"__meta_kubernetes_pod_host_ip": "2.3.4.5",
"__meta_kubernetes_pod_ip": "4.3.2.1",
"__meta_kubernetes_pod_name": "testpod",
"__meta_kubernetes_pod_node_name": "testnode",
"__meta_kubernetes_pod_phase": "",
"__meta_kubernetes_pod_ready": "unknown",
"__meta_kubernetes_pod_uid": "deadbeef",
},
},
Labels: model.LabelSet{
"__meta_kubernetes_endpointslice_address_type": "IPv4",
"__meta_kubernetes_endpointslice_name": "testendpoints",
"__meta_kubernetes_namespace": "ns2",
},
Source: "endpointslice/ns2/testendpoints",
},
},
}.Run(t)
}

View file

@ -15,6 +15,7 @@ package kubernetes
import (
"context"
"fmt"
"reflect"
"strings"
"sync"
@ -24,9 +25,11 @@ import (
"github.com/go-kit/kit/log/level"
"github.com/pkg/errors"
"github.com/prometheus/client_golang/prometheus"
config_util "github.com/prometheus/common/config"
"github.com/prometheus/common/config"
"github.com/prometheus/common/model"
"github.com/prometheus/common/version"
apiv1 "k8s.io/api/core/v1"
disv1beta1 "k8s.io/api/discovery/v1beta1"
"k8s.io/api/networking/v1beta1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/fields"
@ -36,6 +39,7 @@ import (
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/cache"
"github.com/prometheus/prometheus/discovery"
"github.com/prometheus/prometheus/discovery/targetgroup"
)
@ -49,6 +53,8 @@ const (
)
var (
// Http header
userAgent = fmt.Sprintf("Prometheus/%s", version.Version)
// Custom events metric
eventCount = prometheus.NewCounterVec(
prometheus.CounterOpts{
@ -62,16 +68,30 @@ var (
DefaultSDConfig = SDConfig{}
)
func init() {
discovery.RegisterConfig(&SDConfig{})
prometheus.MustRegister(eventCount)
// Initialize metric vectors.
for _, role := range []string{"endpointslice", "endpoints", "node", "pod", "service", "ingress"} {
for _, evt := range []string{"add", "delete", "update"} {
eventCount.WithLabelValues(role, evt)
}
}
(&clientGoRequestMetricAdapter{}).Register(prometheus.DefaultRegisterer)
(&clientGoWorkqueueMetricsProvider{}).Register(prometheus.DefaultRegisterer)
}
// Role is role of the service in Kubernetes.
type Role string
// The valid options for Role.
const (
RoleNode Role = "node"
RolePod Role = "pod"
RoleService Role = "service"
RoleEndpoint Role = "endpoints"
RoleIngress Role = "ingress"
RoleNode Role = "node"
RolePod Role = "pod"
RoleService Role = "service"
RoleEndpoint Role = "endpoints"
RoleEndpointSlice Role = "endpointslice"
RoleIngress Role = "ingress"
)
// UnmarshalYAML implements the yaml.Unmarshaler interface.
@ -80,7 +100,7 @@ func (c *Role) UnmarshalYAML(unmarshal func(interface{}) error) error {
return err
}
switch *c {
case RoleNode, RolePod, RoleService, RoleEndpoint, RoleIngress:
case RoleNode, RolePod, RoleService, RoleEndpoint, RoleEndpointSlice, RoleIngress:
return nil
default:
return errors.Errorf("unknown Kubernetes SD role %q", *c)
@ -89,19 +109,33 @@ func (c *Role) UnmarshalYAML(unmarshal func(interface{}) error) error {
// SDConfig is the configuration for Kubernetes service discovery.
type SDConfig struct {
APIServer config_util.URL `yaml:"api_server,omitempty"`
Role Role `yaml:"role"`
HTTPClientConfig config_util.HTTPClientConfig `yaml:",inline"`
NamespaceDiscovery NamespaceDiscovery `yaml:"namespaces,omitempty"`
Selectors []SelectorConfig `yaml:"selectors,omitempty"`
APIServer config.URL `yaml:"api_server,omitempty"`
Role Role `yaml:"role"`
HTTPClientConfig config.HTTPClientConfig `yaml:",inline"`
NamespaceDiscovery NamespaceDiscovery `yaml:"namespaces,omitempty"`
Selectors []SelectorConfig `yaml:"selectors,omitempty"`
}
// Name returns the name of the Config.
func (*SDConfig) Name() string { return "kubernetes" }
// NewDiscoverer returns a Discoverer for the Config.
func (c *SDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (discovery.Discoverer, error) {
return New(opts.Logger, c)
}
// SetDirectory joins any relative file paths with dir.
func (c *SDConfig) SetDirectory(dir string) {
c.HTTPClientConfig.SetDirectory(dir)
}
type roleSelector struct {
node resourceSelector
pod resourceSelector
service resourceSelector
endpoints resourceSelector
ingress resourceSelector
node resourceSelector
pod resourceSelector
service resourceSelector
endpoints resourceSelector
endpointslice resourceSelector
ingress resourceSelector
}
type SelectorConfig struct {
@ -124,23 +158,24 @@ func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
return err
}
if c.Role == "" {
return errors.Errorf("role missing (one of: pod, service, endpoints, node, ingress)")
return errors.Errorf("role missing (one of: pod, service, endpoints, endpointslice, node, ingress)")
}
err = c.HTTPClientConfig.Validate()
if err != nil {
return err
}
if c.APIServer.URL == nil && !reflect.DeepEqual(c.HTTPClientConfig, config_util.HTTPClientConfig{}) {
if c.APIServer.URL == nil && !reflect.DeepEqual(c.HTTPClientConfig, config.HTTPClientConfig{}) {
return errors.Errorf("to use custom HTTP client configuration please provide the 'api_server' URL explicitly")
}
foundSelectorRoles := make(map[Role]struct{})
allowedSelectors := map[Role][]string{
RolePod: {string(RolePod)},
RoleService: {string(RoleService)},
RoleEndpoint: {string(RolePod), string(RoleService), string(RoleEndpoint)},
RoleNode: {string(RoleNode)},
RoleIngress: {string(RoleIngress)},
RolePod: {string(RolePod)},
RoleService: {string(RoleService)},
RoleEndpointSlice: {string(RolePod), string(RoleService), string(RoleEndpointSlice)},
RoleEndpoint: {string(RolePod), string(RoleService), string(RoleEndpoint)},
RoleNode: {string(RoleNode)},
RoleIngress: {string(RoleIngress)},
}
for _, selector := range c.Selectors {
@ -150,7 +185,7 @@ func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
foundSelectorRoles[selector.Role] = struct{}{}
if _, ok := allowedSelectors[c.Role]; !ok {
return errors.Errorf("invalid role: %q, expecting one of: pod, service, endpoints, node or ingress", c.Role)
return errors.Errorf("invalid role: %q, expecting one of: pod, service, endpoints, endpointslice, node or ingress", c.Role)
}
var allowed bool
for _, role := range allowedSelectors[c.Role] {
@ -189,31 +224,6 @@ func (c *NamespaceDiscovery) UnmarshalYAML(unmarshal func(interface{}) error) er
return unmarshal((*plain)(c))
}
func init() {
prometheus.MustRegister(eventCount)
// Initialize metric vectors.
for _, role := range []string{"endpoints", "node", "pod", "service", "ingress"} {
for _, evt := range []string{"add", "delete", "update"} {
eventCount.WithLabelValues(role, evt)
}
}
var (
clientGoRequestMetricAdapterInstance = clientGoRequestMetricAdapter{}
clientGoWorkqueueMetricsProviderInstance = clientGoWorkqueueMetricsProvider{}
)
clientGoRequestMetricAdapterInstance.Register(prometheus.DefaultRegisterer)
clientGoWorkqueueMetricsProviderInstance.Register(prometheus.DefaultRegisterer)
}
// This is only for internal use.
type discoverer interface {
Run(ctx context.Context, up chan<- []*targetgroup.Group)
}
// Discovery implements the discoverer interface for discovering
// targets from Kubernetes.
type Discovery struct {
@ -222,7 +232,7 @@ type Discovery struct {
role Role
logger log.Logger
namespaceDiscovery *NamespaceDiscovery
discoverers []discoverer
discoverers []discovery.Discoverer
selectors roleSelector
}
@ -252,7 +262,7 @@ func New(l log.Logger, conf *SDConfig) (*Discovery, error) {
}
level.Info(l).Log("msg", "Using pod service account via in-cluster config")
} else {
rt, err := config_util.NewRoundTripperFromConfig(conf.HTTPClientConfig, "kubernetes_sd", false)
rt, err := config.NewRoundTripperFromConfig(conf.HTTPClientConfig, "kubernetes_sd", false, false)
if err != nil {
return nil, err
}
@ -262,7 +272,7 @@ func New(l log.Logger, conf *SDConfig) (*Discovery, error) {
}
}
kcfg.UserAgent = "Prometheus/discovery"
kcfg.UserAgent = userAgent
c, err := kubernetes.NewForConfig(kcfg)
if err != nil {
@ -273,7 +283,7 @@ func New(l log.Logger, conf *SDConfig) (*Discovery, error) {
logger: l,
role: conf.Role,
namespaceDiscovery: &conf.NamespaceDiscovery,
discoverers: make([]discoverer, 0),
discoverers: make([]discovery.Discoverer, 0),
selectors: mapSelector(conf.Selectors),
}, nil
}
@ -282,6 +292,9 @@ func mapSelector(rawSelector []SelectorConfig) roleSelector {
rs := roleSelector{}
for _, resourceSelectorRaw := range rawSelector {
switch resourceSelectorRaw.Role {
case RoleEndpointSlice:
rs.endpointslice.field = resourceSelectorRaw.Field
rs.endpointslice.label = resourceSelectorRaw.Label
case RoleEndpoint:
rs.endpoints.field = resourceSelectorRaw.Field
rs.endpoints.label = resourceSelectorRaw.Label
@ -310,6 +323,58 @@ func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) {
namespaces := d.getNamespaces()
switch d.role {
case RoleEndpointSlice:
for _, namespace := range namespaces {
e := d.client.DiscoveryV1beta1().EndpointSlices(namespace)
elw := &cache.ListWatch{
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
options.FieldSelector = d.selectors.endpointslice.field
options.LabelSelector = d.selectors.endpointslice.label
return e.List(ctx, options)
},
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
options.FieldSelector = d.selectors.endpointslice.field
options.LabelSelector = d.selectors.endpointslice.label
return e.Watch(ctx, options)
},
}
s := d.client.CoreV1().Services(namespace)
slw := &cache.ListWatch{
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
options.FieldSelector = d.selectors.service.field
options.LabelSelector = d.selectors.service.label
return s.List(ctx, options)
},
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
options.FieldSelector = d.selectors.service.field
options.LabelSelector = d.selectors.service.label
return s.Watch(ctx, options)
},
}
p := d.client.CoreV1().Pods(namespace)
plw := &cache.ListWatch{
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
options.FieldSelector = d.selectors.pod.field
options.LabelSelector = d.selectors.pod.label
return p.List(ctx, options)
},
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
options.FieldSelector = d.selectors.pod.field
options.LabelSelector = d.selectors.pod.label
return p.Watch(ctx, options)
},
}
eps := NewEndpointSlice(
log.With(d.logger, "role", "endpointslice"),
cache.NewSharedInformer(slw, &apiv1.Service{}, resyncPeriod),
cache.NewSharedInformer(elw, &disv1beta1.EndpointSlice{}, resyncPeriod),
cache.NewSharedInformer(plw, &apiv1.Pod{}, resyncPeriod),
)
d.discoverers = append(d.discoverers, eps)
go eps.endpointSliceInf.Run(ctx.Done())
go eps.serviceInf.Run(ctx.Done())
go eps.podInf.Run(ctx.Done())
}
case RoleEndpoint:
for _, namespace := range namespaces {
e := d.client.CoreV1().Endpoints(namespace)
@ -454,7 +519,7 @@ func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) {
var wg sync.WaitGroup
for _, dd := range d.discoverers {
wg.Add(1)
go func(d discoverer) {
go func(d discovery.Discoverer) {
defer wg.Done()
d.Run(ctx, ch)
}(dd)

View file

@ -25,10 +25,15 @@ import (
"k8s.io/client-go/kubernetes/fake"
"k8s.io/client-go/tools/cache"
"github.com/prometheus/prometheus/discovery"
"github.com/prometheus/prometheus/discovery/targetgroup"
"github.com/prometheus/prometheus/util/testutil"
)
func TestMain(m *testing.M) {
testutil.TolerantVerifyLeak(m)
}
// makeDiscovery creates a kubernetes.Discovery instance for testing.
func makeDiscovery(role Role, nsDiscovery NamespaceDiscovery, objects ...runtime.Object) (*Discovery, kubernetes.Interface) {
clientset := fake.NewSimpleClientset(objects...)
@ -43,7 +48,7 @@ func makeDiscovery(role Role, nsDiscovery NamespaceDiscovery, objects ...runtime
type k8sDiscoveryTest struct {
// discovery is instance of discovery.Discoverer
discovery discoverer
discovery discovery.Discoverer
// beforeRun runs before discoverer run
beforeRun func()
// afterStart runs after discoverer has synced
@ -158,6 +163,7 @@ type hasSynced interface {
var _ hasSynced = &Discovery{}
var _ hasSynced = &Node{}
var _ hasSynced = &Endpoints{}
var _ hasSynced = &EndpointSlice{}
var _ hasSynced = &Ingress{}
var _ hasSynced = &Pod{}
var _ hasSynced = &Service{}
@ -183,6 +189,10 @@ func (e *Endpoints) hasSynced() bool {
return e.endpointsInf.HasSynced() && e.serviceInf.HasSynced() && e.podInf.HasSynced()
}
func (e *EndpointSlice) hasSynced() bool {
return e.endpointSliceInf.HasSynced() && e.serviceInf.HasSynced() && e.podInf.HasSynced()
}
func (i *Ingress) hasSynced() bool {
return i.informer.HasSynced()
}

View file

@ -24,22 +24,7 @@ import (
"github.com/go-kit/kit/log/level"
"github.com/prometheus/client_golang/prometheus"
sd_config "github.com/prometheus/prometheus/discovery/config"
"github.com/prometheus/prometheus/discovery/targetgroup"
"github.com/prometheus/prometheus/discovery/azure"
"github.com/prometheus/prometheus/discovery/consul"
"github.com/prometheus/prometheus/discovery/digitalocean"
"github.com/prometheus/prometheus/discovery/dns"
"github.com/prometheus/prometheus/discovery/dockerswarm"
"github.com/prometheus/prometheus/discovery/ec2"
"github.com/prometheus/prometheus/discovery/file"
"github.com/prometheus/prometheus/discovery/gce"
"github.com/prometheus/prometheus/discovery/kubernetes"
"github.com/prometheus/prometheus/discovery/marathon"
"github.com/prometheus/prometheus/discovery/openstack"
"github.com/prometheus/prometheus/discovery/triton"
"github.com/prometheus/prometheus/discovery/zookeeper"
)
var (
@ -84,22 +69,6 @@ func init() {
prometheus.MustRegister(failedConfigs, discoveredTargets, receivedUpdates, delayedUpdates, sentUpdates)
}
// Discoverer provides information about target groups. It maintains a set
// of sources from which TargetGroups can originate. Whenever a discovery provider
// detects a potential change, it sends the TargetGroup through its channel.
//
// Discoverer does not know if an actual change happened.
// It does guarantee that it sends the new TargetGroup whenever a change happens.
//
// Discoverers should initially send a full set of all discoverable TargetGroups.
type Discoverer interface {
// Run hands a channel to the discovery provider (Consul, DNS etc) through which it can send
// updated target groups.
// Must returns if the context gets canceled. It should not close the update
// channel on returning.
Run(ctx context.Context, up chan<- []*targetgroup.Group)
}
type poolKey struct {
setName string
provider string
@ -183,7 +152,7 @@ func (m *Manager) SyncCh() <-chan map[string][]*targetgroup.Group {
}
// ApplyConfig removes all running discovery providers and starts new ones using the provided config.
func (m *Manager) ApplyConfig(cfg map[string]sd_config.ServiceDiscoveryConfig) error {
func (m *Manager) ApplyConfig(cfg map[string]Configs) error {
m.mtx.Lock()
defer m.mtx.Unlock()
@ -324,13 +293,12 @@ func (m *Manager) allGroups() map[string][]*targetgroup.Group {
}
// registerProviders returns a number of failed SD config.
func (m *Manager) registerProviders(cfg sd_config.ServiceDiscoveryConfig, setName string) int {
func (m *Manager) registerProviders(cfgs Configs, setName string) int {
var (
failedCount int
added bool
failed int
added bool
)
add := func(cfg interface{}, newDiscoverer func() (Discoverer, error)) {
t := reflect.TypeOf(cfg).String()
add := func(cfg Config) {
for _, p := range m.providers {
if reflect.DeepEqual(cfg, p.config) {
p.subs = append(p.subs, setName)
@ -338,98 +306,25 @@ func (m *Manager) registerProviders(cfg sd_config.ServiceDiscoveryConfig, setNam
return
}
}
d, err := newDiscoverer()
typ := cfg.Name()
d, err := cfg.NewDiscoverer(DiscovererOptions{
Logger: log.With(m.logger, "discovery", typ),
})
if err != nil {
level.Error(m.logger).Log("msg", "Cannot create service discovery", "err", err, "type", t)
failedCount++
level.Error(m.logger).Log("msg", "Cannot create service discovery", "err", err, "type", typ)
failed++
return
}
provider := provider{
name: fmt.Sprintf("%s/%d", t, len(m.providers)),
m.providers = append(m.providers, &provider{
name: fmt.Sprintf("%s/%d", typ, len(m.providers)),
d: d,
config: cfg,
subs: []string{setName},
}
m.providers = append(m.providers, &provider)
})
added = true
}
for _, c := range cfg.DNSSDConfigs {
add(c, func() (Discoverer, error) {
return dns.NewDiscovery(*c, log.With(m.logger, "discovery", "dns")), nil
})
}
for _, c := range cfg.FileSDConfigs {
add(c, func() (Discoverer, error) {
return file.NewDiscovery(c, log.With(m.logger, "discovery", "file")), nil
})
}
for _, c := range cfg.ConsulSDConfigs {
add(c, func() (Discoverer, error) {
return consul.NewDiscovery(c, log.With(m.logger, "discovery", "consul"))
})
}
for _, c := range cfg.DigitalOceanSDConfigs {
add(c, func() (Discoverer, error) {
return digitalocean.NewDiscovery(c, log.With(m.logger, "discovery", "digitalocean"))
})
}
for _, c := range cfg.DockerSwarmSDConfigs {
add(c, func() (Discoverer, error) {
return dockerswarm.NewDiscovery(c, log.With(m.logger, "discovery", "dockerswarm"))
})
}
for _, c := range cfg.MarathonSDConfigs {
add(c, func() (Discoverer, error) {
return marathon.NewDiscovery(*c, log.With(m.logger, "discovery", "marathon"))
})
}
for _, c := range cfg.KubernetesSDConfigs {
add(c, func() (Discoverer, error) {
return kubernetes.New(log.With(m.logger, "discovery", "k8s"), c)
})
}
for _, c := range cfg.ServersetSDConfigs {
add(c, func() (Discoverer, error) {
return zookeeper.NewServersetDiscovery(c, log.With(m.logger, "discovery", "zookeeper"))
})
}
for _, c := range cfg.NerveSDConfigs {
add(c, func() (Discoverer, error) {
return zookeeper.NewNerveDiscovery(c, log.With(m.logger, "discovery", "nerve"))
})
}
for _, c := range cfg.EC2SDConfigs {
add(c, func() (Discoverer, error) {
return ec2.NewDiscovery(c, log.With(m.logger, "discovery", "ec2")), nil
})
}
for _, c := range cfg.OpenstackSDConfigs {
add(c, func() (Discoverer, error) {
return openstack.NewDiscovery(c, log.With(m.logger, "discovery", "openstack"))
})
}
for _, c := range cfg.GCESDConfigs {
add(c, func() (Discoverer, error) {
return gce.NewDiscovery(*c, log.With(m.logger, "discovery", "gce"))
})
}
for _, c := range cfg.AzureSDConfigs {
add(c, func() (Discoverer, error) {
return azure.NewDiscovery(c, log.With(m.logger, "discovery", "azure")), nil
})
}
for _, c := range cfg.TritonSDConfigs {
add(c, func() (Discoverer, error) {
return triton.New(log.With(m.logger, "discovery", "triton"), c)
})
}
if len(cfg.StaticConfigs) > 0 {
add(setName, func() (Discoverer, error) {
return &StaticProvider{TargetGroups: cfg.StaticConfigs}, nil
})
for _, cfg := range cfgs {
add(cfg)
}
if !added {
// Add an empty target group to force the refresh of the corresponding
@ -437,11 +332,9 @@ func (m *Manager) registerProviders(cfg sd_config.ServiceDiscoveryConfig, setNam
// current targets.
// It can happen because the combined set of SD configurations is empty
// or because we fail to instantiate all the SD configurations.
add(setName, func() (Discoverer, error) {
return &StaticProvider{TargetGroups: []*targetgroup.Group{{}}}, nil
})
add(StaticConfig{{}})
}
return failedCount
return failed
}
// StaticProvider holds a list of target groups that never change.

View file

@ -16,8 +16,6 @@ package discovery
import (
"context"
"fmt"
"io/ioutil"
"os"
"reflect"
"sort"
"strconv"
@ -26,15 +24,9 @@ import (
"github.com/go-kit/kit/log"
client_testutil "github.com/prometheus/client_golang/prometheus/testutil"
common_config "github.com/prometheus/common/config"
"github.com/prometheus/common/model"
"github.com/prometheus/prometheus/config"
sd_config "github.com/prometheus/prometheus/discovery/config"
"github.com/prometheus/prometheus/discovery/consul"
"github.com/prometheus/prometheus/discovery/file"
"github.com/prometheus/prometheus/discovery/targetgroup"
"github.com/prometheus/prometheus/util/testutil"
"gopkg.in/yaml.v2"
)
func TestMain(m *testing.M) {
@ -678,16 +670,14 @@ func TestTargetUpdatesOrder(t *testing.T) {
for _, up := range tc.updates {
go newMockDiscoveryProvider(up...).Run(ctx, provUpdates)
if len(up) > 0 {
totalUpdatesCount = totalUpdatesCount + len(up)
totalUpdatesCount += len(up)
}
}
Loop:
for x := 0; x < totalUpdatesCount; x++ {
select {
case <-ctx.Done():
t.Errorf("%d: no update arrived within the timeout limit", x)
break Loop
t.Fatalf("%d: no update arrived within the timeout limit", x)
case tgs := <-provUpdates:
discoveryManager.updateGroup(poolKey{setName: strconv.Itoa(i), provider: tc.title}, tgs)
for _, got := range discoveryManager.allGroups() {
@ -727,6 +717,19 @@ func assertEqualGroups(t *testing.T, got, expected []*targetgroup.Group, msg fun
}
func staticConfig(addrs ...string) StaticConfig {
var cfg StaticConfig
for i, addr := range addrs {
cfg = append(cfg, &targetgroup.Group{
Source: fmt.Sprint(i),
Targets: []model.LabelSet{
{model.AddressLabel: model.LabelValue(addr)},
},
})
}
return cfg
}
func verifyPresence(t *testing.T, tSets map[poolKey]map[string]*targetgroup.Group, poolKey poolKey, label string, present bool) {
t.Helper()
if _, ok := tSets[poolKey]; !ok {
@ -762,51 +765,46 @@ func TestTargetSetRecreatesTargetGroupsEveryRun(t *testing.T) {
discoveryManager.updatert = 100 * time.Millisecond
go discoveryManager.Run()
c := map[string]sd_config.ServiceDiscoveryConfig{
c := map[string]Configs{
"prometheus": {
StaticConfigs: []*targetgroup.Group{
{
Source: "0",
Targets: []model.LabelSet{
{
model.AddressLabel: model.LabelValue("foo:9090"),
},
},
},
{
Source: "1",
Targets: []model.LabelSet{
{
model.AddressLabel: model.LabelValue("bar:9090"),
},
},
},
},
staticConfig("foo:9090", "bar:9090"),
},
}
discoveryManager.ApplyConfig(c)
<-discoveryManager.SyncCh()
verifyPresence(t, discoveryManager.targets, poolKey{setName: "prometheus", provider: "string/0"}, "{__address__=\"foo:9090\"}", true)
verifyPresence(t, discoveryManager.targets, poolKey{setName: "prometheus", provider: "string/0"}, "{__address__=\"bar:9090\"}", true)
verifyPresence(t, discoveryManager.targets, poolKey{setName: "prometheus", provider: "static/0"}, "{__address__=\"foo:9090\"}", true)
verifyPresence(t, discoveryManager.targets, poolKey{setName: "prometheus", provider: "static/0"}, "{__address__=\"bar:9090\"}", true)
c["prometheus"] = sd_config.ServiceDiscoveryConfig{
StaticConfigs: []*targetgroup.Group{
{
Source: "0",
Targets: []model.LabelSet{
{
model.AddressLabel: model.LabelValue("foo:9090"),
},
},
},
c["prometheus"] = Configs{
staticConfig("foo:9090"),
}
discoveryManager.ApplyConfig(c)
<-discoveryManager.SyncCh()
verifyPresence(t, discoveryManager.targets, poolKey{setName: "prometheus", provider: "static/0"}, "{__address__=\"foo:9090\"}", true)
verifyPresence(t, discoveryManager.targets, poolKey{setName: "prometheus", provider: "static/0"}, "{__address__=\"bar:9090\"}", false)
}
func TestDiscovererConfigs(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
discoveryManager := NewManager(ctx, log.NewNopLogger())
discoveryManager.updatert = 100 * time.Millisecond
go discoveryManager.Run()
c := map[string]Configs{
"prometheus": {
staticConfig("foo:9090", "bar:9090"),
staticConfig("baz:9090"),
},
}
discoveryManager.ApplyConfig(c)
<-discoveryManager.SyncCh()
verifyPresence(t, discoveryManager.targets, poolKey{setName: "prometheus", provider: "string/0"}, "{__address__=\"foo:9090\"}", true)
verifyPresence(t, discoveryManager.targets, poolKey{setName: "prometheus", provider: "string/0"}, "{__address__=\"bar:9090\"}", false)
verifyPresence(t, discoveryManager.targets, poolKey{setName: "prometheus", provider: "static/0"}, "{__address__=\"foo:9090\"}", true)
verifyPresence(t, discoveryManager.targets, poolKey{setName: "prometheus", provider: "static/0"}, "{__address__=\"bar:9090\"}", true)
verifyPresence(t, discoveryManager.targets, poolKey{setName: "prometheus", provider: "static/1"}, "{__address__=\"baz:9090\"}", true)
}
// TestTargetSetRecreatesEmptyStaticConfigs ensures that reloading a config file after
@ -819,33 +817,24 @@ func TestTargetSetRecreatesEmptyStaticConfigs(t *testing.T) {
discoveryManager.updatert = 100 * time.Millisecond
go discoveryManager.Run()
c := map[string]sd_config.ServiceDiscoveryConfig{
c := map[string]Configs{
"prometheus": {
StaticConfigs: []*targetgroup.Group{
{
Source: "0",
Targets: []model.LabelSet{
{
model.AddressLabel: model.LabelValue("foo:9090"),
},
},
},
},
staticConfig("foo:9090"),
},
}
discoveryManager.ApplyConfig(c)
<-discoveryManager.SyncCh()
verifyPresence(t, discoveryManager.targets, poolKey{setName: "prometheus", provider: "string/0"}, "{__address__=\"foo:9090\"}", true)
verifyPresence(t, discoveryManager.targets, poolKey{setName: "prometheus", provider: "static/0"}, "{__address__=\"foo:9090\"}", true)
c["prometheus"] = sd_config.ServiceDiscoveryConfig{
StaticConfigs: []*targetgroup.Group{},
c["prometheus"] = Configs{
StaticConfig{{}},
}
discoveryManager.ApplyConfig(c)
<-discoveryManager.SyncCh()
pkey := poolKey{setName: "prometheus", provider: "string/0"}
pkey := poolKey{setName: "prometheus", provider: "static/0"}
targetGroups, ok := discoveryManager.targets[pkey]
if !ok {
t.Fatalf("'%v' should be present in target groups", pkey)
@ -861,78 +850,36 @@ func TestTargetSetRecreatesEmptyStaticConfigs(t *testing.T) {
}
func TestIdenticalConfigurationsAreCoalesced(t *testing.T) {
tmpFile, err := ioutil.TempFile("", "sd")
if err != nil {
t.Fatalf("error creating temporary file: %v", err)
}
defer os.Remove(tmpFile.Name())
if _, err := tmpFile.Write([]byte(`[{"targets": ["foo:9090"]}]`)); err != nil {
t.Fatalf("error writing temporary file: %v", err)
}
if err := tmpFile.Close(); err != nil {
t.Fatalf("error closing temporary file: %v", err)
}
tmpFile2 := fmt.Sprintf("%s.json", tmpFile.Name())
if err = os.Link(tmpFile.Name(), tmpFile2); err != nil {
t.Fatalf("error linking temporary file: %v", err)
}
defer os.Remove(tmpFile2)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
discoveryManager := NewManager(ctx, nil)
discoveryManager.updatert = 100 * time.Millisecond
go discoveryManager.Run()
c := map[string]sd_config.ServiceDiscoveryConfig{
c := map[string]Configs{
"prometheus": {
FileSDConfigs: []*file.SDConfig{
{
Files: []string{
tmpFile2,
},
RefreshInterval: file.DefaultSDConfig.RefreshInterval,
},
},
staticConfig("foo:9090"),
},
"prometheus2": {
FileSDConfigs: []*file.SDConfig{
{
Files: []string{
tmpFile2,
},
RefreshInterval: file.DefaultSDConfig.RefreshInterval,
},
},
staticConfig("foo:9090"),
},
}
discoveryManager.ApplyConfig(c)
<-discoveryManager.SyncCh()
verifyPresence(t, discoveryManager.targets, poolKey{setName: "prometheus", provider: "*file.SDConfig/0"}, "{__address__=\"foo:9090\"}", true)
verifyPresence(t, discoveryManager.targets, poolKey{setName: "prometheus2", provider: "*file.SDConfig/0"}, "{__address__=\"foo:9090\"}", true)
verifyPresence(t, discoveryManager.targets, poolKey{setName: "prometheus", provider: "static/0"}, "{__address__=\"foo:9090\"}", true)
verifyPresence(t, discoveryManager.targets, poolKey{setName: "prometheus2", provider: "static/0"}, "{__address__=\"foo:9090\"}", true)
if len(discoveryManager.providers) != 1 {
t.Fatalf("Invalid number of providers: expected 1, got %d", len(discoveryManager.providers))
}
}
func TestApplyConfigDoesNotModifyStaticProviderTargets(t *testing.T) {
cfgText := `
scrape_configs:
- job_name: 'prometheus'
static_configs:
- targets: ["foo:9090"]
- targets: ["bar:9090"]
- targets: ["baz:9090"]
`
originalConfig := &config.Config{}
if err := yaml.UnmarshalStrict([]byte(cfgText), originalConfig); err != nil {
t.Fatalf("Unable to load YAML config cfgYaml: %s", err)
func TestApplyConfigDoesNotModifyStaticTargets(t *testing.T) {
originalConfig := Configs{
staticConfig("foo:9090", "bar:9090", "baz:9090"),
}
processedConfig := &config.Config{}
if err := yaml.UnmarshalStrict([]byte(cfgText), processedConfig); err != nil {
t.Fatalf("Unable to load YAML config cfgYaml: %s", err)
processedConfig := Configs{
staticConfig("foo:9090", "bar:9090", "baz:9090"),
}
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
@ -940,21 +887,25 @@ scrape_configs:
discoveryManager.updatert = 100 * time.Millisecond
go discoveryManager.Run()
c := map[string]sd_config.ServiceDiscoveryConfig{
"prometheus": processedConfig.ScrapeConfigs[0].ServiceDiscoveryConfig,
cfgs := map[string]Configs{
"prometheus": processedConfig,
}
discoveryManager.ApplyConfig(c)
discoveryManager.ApplyConfig(cfgs)
<-discoveryManager.SyncCh()
origSdcfg := originalConfig.ScrapeConfigs[0].ServiceDiscoveryConfig
for _, sdcfg := range c {
if !reflect.DeepEqual(origSdcfg.StaticConfigs, sdcfg.StaticConfigs) {
for _, cfg := range cfgs {
if !reflect.DeepEqual(originalConfig, cfg) {
t.Fatalf("discovery manager modified static config \n expected: %v\n got: %v\n",
origSdcfg.StaticConfigs, sdcfg.StaticConfigs)
originalConfig, cfg)
}
}
}
type errorConfig struct{ err error }
func (e errorConfig) Name() string { return "error" }
func (e errorConfig) NewDiscoverer(DiscovererOptions) (Discoverer, error) { return nil, e.err }
func TestGaugeFailedConfigs(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
@ -962,28 +913,11 @@ func TestGaugeFailedConfigs(t *testing.T) {
discoveryManager.updatert = 100 * time.Millisecond
go discoveryManager.Run()
c := map[string]sd_config.ServiceDiscoveryConfig{
c := map[string]Configs{
"prometheus": {
ConsulSDConfigs: []*consul.SDConfig{
{
Server: "foo:8500",
TLSConfig: common_config.TLSConfig{
CertFile: "/tmp/non_existent",
},
},
{
Server: "bar:8500",
TLSConfig: common_config.TLSConfig{
CertFile: "/tmp/non_existent",
},
},
{
Server: "foo2:8500",
TLSConfig: common_config.TLSConfig{
CertFile: "/tmp/non_existent",
},
},
},
errorConfig{fmt.Errorf("tests error 0")},
errorConfig{fmt.Errorf("tests error 1")},
errorConfig{fmt.Errorf("tests error 2")},
},
}
discoveryManager.ApplyConfig(c)
@ -994,17 +928,8 @@ func TestGaugeFailedConfigs(t *testing.T) {
t.Fatalf("Expected to have 3 failed configs, got: %v", failedCount)
}
c["prometheus"] = sd_config.ServiceDiscoveryConfig{
StaticConfigs: []*targetgroup.Group{
{
Source: "0",
Targets: []model.LabelSet{
{
model.AddressLabel: "foo:9090",
},
},
},
},
c["prometheus"] = Configs{
staticConfig("foo:9090"),
}
discoveryManager.ApplyConfig(c)
<-discoveryManager.SyncCh()
@ -1195,16 +1120,10 @@ func newMockDiscoveryProvider(updates ...update) mockdiscoveryProvider {
func (tp mockdiscoveryProvider) Run(ctx context.Context, upCh chan<- []*targetgroup.Group) {
for _, u := range tp.updates {
if u.interval > 0 {
t := time.NewTicker(u.interval)
defer t.Stop()
Loop:
for {
select {
case <-ctx.Done():
return
case <-t.C:
break Loop
}
select {
case <-ctx.Done():
return
case <-time.After(u.interval):
}
}
tgs := make([]*targetgroup.Group, len(u.targetGroups))

View file

@ -28,9 +28,10 @@ import (
"github.com/go-kit/kit/log"
"github.com/pkg/errors"
config_util "github.com/prometheus/common/config"
"github.com/prometheus/common/config"
"github.com/prometheus/common/model"
"github.com/prometheus/prometheus/discovery"
"github.com/prometheus/prometheus/discovery/refresh"
"github.com/prometheus/prometheus/discovery/targetgroup"
"github.com/prometheus/prometheus/util/strutil"
@ -63,13 +64,31 @@ var DefaultSDConfig = SDConfig{
RefreshInterval: model.Duration(30 * time.Second),
}
func init() {
discovery.RegisterConfig(&SDConfig{})
}
// SDConfig is the configuration for services running on Marathon.
type SDConfig struct {
Servers []string `yaml:"servers,omitempty"`
RefreshInterval model.Duration `yaml:"refresh_interval,omitempty"`
AuthToken config_util.Secret `yaml:"auth_token,omitempty"`
AuthTokenFile string `yaml:"auth_token_file,omitempty"`
HTTPClientConfig config_util.HTTPClientConfig `yaml:",inline"`
Servers []string `yaml:"servers,omitempty"`
RefreshInterval model.Duration `yaml:"refresh_interval,omitempty"`
AuthToken config.Secret `yaml:"auth_token,omitempty"`
AuthTokenFile string `yaml:"auth_token_file,omitempty"`
HTTPClientConfig config.HTTPClientConfig `yaml:",inline"`
}
// Name returns the name of the Config.
func (*SDConfig) Name() string { return "marathon" }
// NewDiscoverer returns a Discoverer for the Config.
func (c *SDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (discovery.Discoverer, error) {
return NewDiscovery(*c, opts.Logger)
}
// SetDirectory joins any relative file paths with dir.
func (c *SDConfig) SetDirectory(dir string) {
c.HTTPClientConfig.SetDirectory(dir)
c.AuthTokenFile = config.JoinDir(dir, c.AuthTokenFile)
}
// UnmarshalYAML implements the yaml.Unmarshaler interface.
@ -108,7 +127,7 @@ type Discovery struct {
// NewDiscovery returns a new Marathon Discovery.
func NewDiscovery(conf SDConfig, logger log.Logger) (*Discovery, error) {
rt, err := config_util.NewRoundTripperFromConfig(conf.HTTPClientConfig, "marathon_sd", false)
rt, err := config.NewRoundTripperFromConfig(conf.HTTPClientConfig, "marathon_sd", false, false)
if err != nil {
return nil, err
}
@ -137,12 +156,12 @@ func NewDiscovery(conf SDConfig, logger log.Logger) (*Discovery, error) {
}
type authTokenRoundTripper struct {
authToken config_util.Secret
authToken config.Secret
rt http.RoundTripper
}
// newAuthTokenRoundTripper adds the provided auth token to a request.
func newAuthTokenRoundTripper(token config_util.Secret, rt http.RoundTripper) (http.RoundTripper, error) {
func newAuthTokenRoundTripper(token config.Secret, rt http.RoundTripper) (http.RoundTripper, error) {
return &authTokenRoundTripper{token, rt}, nil
}

View file

@ -24,9 +24,10 @@ import (
"github.com/gophercloud/gophercloud/openstack"
conntrack "github.com/mwitkow/go-conntrack"
"github.com/pkg/errors"
config_util "github.com/prometheus/common/config"
"github.com/prometheus/common/config"
"github.com/prometheus/common/model"
"github.com/prometheus/prometheus/discovery"
"github.com/prometheus/prometheus/discovery/refresh"
"github.com/prometheus/prometheus/discovery/targetgroup"
)
@ -38,26 +39,43 @@ var DefaultSDConfig = SDConfig{
Availability: "public",
}
func init() {
discovery.RegisterConfig(&SDConfig{})
}
// SDConfig is the configuration for OpenStack based service discovery.
type SDConfig struct {
IdentityEndpoint string `yaml:"identity_endpoint"`
Username string `yaml:"username"`
UserID string `yaml:"userid"`
Password config_util.Secret `yaml:"password"`
ProjectName string `yaml:"project_name"`
ProjectID string `yaml:"project_id"`
DomainName string `yaml:"domain_name"`
DomainID string `yaml:"domain_id"`
ApplicationCredentialName string `yaml:"application_credential_name"`
ApplicationCredentialID string `yaml:"application_credential_id"`
ApplicationCredentialSecret config_util.Secret `yaml:"application_credential_secret"`
Role Role `yaml:"role"`
Region string `yaml:"region"`
RefreshInterval model.Duration `yaml:"refresh_interval"`
Port int `yaml:"port"`
AllTenants bool `yaml:"all_tenants,omitempty"`
TLSConfig config_util.TLSConfig `yaml:"tls_config,omitempty"`
Availability string `yaml:"availability,omitempty"`
IdentityEndpoint string `yaml:"identity_endpoint"`
Username string `yaml:"username"`
UserID string `yaml:"userid"`
Password config.Secret `yaml:"password"`
ProjectName string `yaml:"project_name"`
ProjectID string `yaml:"project_id"`
DomainName string `yaml:"domain_name"`
DomainID string `yaml:"domain_id"`
ApplicationCredentialName string `yaml:"application_credential_name"`
ApplicationCredentialID string `yaml:"application_credential_id"`
ApplicationCredentialSecret config.Secret `yaml:"application_credential_secret"`
Role Role `yaml:"role"`
Region string `yaml:"region"`
RefreshInterval model.Duration `yaml:"refresh_interval"`
Port int `yaml:"port"`
AllTenants bool `yaml:"all_tenants,omitempty"`
TLSConfig config.TLSConfig `yaml:"tls_config,omitempty"`
Availability string `yaml:"availability,omitempty"`
}
// Name returns the name of the Config.
func (*SDConfig) Name() string { return "openstack" }
// NewDiscoverer returns a Discoverer for the Config.
func (c *SDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (discovery.Discoverer, error) {
return NewDiscovery(c, opts.Logger)
}
// SetDirectory joins any relative file paths with dir.
func (c *SDConfig) SetDirectory(dir string) {
c.TLSConfig.SetDirectory(dir)
}
// Role is the role of the target in OpenStack.
@ -157,7 +175,7 @@ func newRefresher(conf *SDConfig, l log.Logger) (refresher, error) {
if err != nil {
return nil, err
}
tls, err := config_util.NewTLSConfig(&conf.TLSConfig)
tls, err := config.NewTLSConfig(&conf.TLSConfig)
if err != nil {
return nil, err
}

View file

@ -20,11 +20,16 @@ import (
"time"
"github.com/prometheus/common/model"
"go.uber.org/goleak"
"github.com/prometheus/prometheus/discovery/targetgroup"
"github.com/prometheus/prometheus/util/testutil"
)
func TestMain(m *testing.M) {
goleak.VerifyTestMain(m)
}
func TestRefresh(t *testing.T) {
tg1 := []*targetgroup.Group{
{

257
discovery/registry.go Normal file
View file

@ -0,0 +1,257 @@
// Copyright 2020 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package discovery
import (
"fmt"
"reflect"
"sort"
"strconv"
"strings"
"sync"
"github.com/prometheus/prometheus/discovery/targetgroup"
"gopkg.in/yaml.v2"
)
const (
configFieldPrefix = "AUTO_DISCOVERY_"
staticConfigsKey = "static_configs"
staticConfigsFieldName = configFieldPrefix + staticConfigsKey
)
var (
configNames = make(map[string]Config)
configFieldNames = make(map[reflect.Type]string)
configFields []reflect.StructField
configTypesMu sync.Mutex
configTypes = make(map[reflect.Type]reflect.Type)
emptyStructType = reflect.TypeOf(struct{}{})
configsType = reflect.TypeOf(Configs{})
)
// RegisterConfig registers the given Config type for YAML marshaling and unmarshaling.
func RegisterConfig(config Config) {
registerConfig(config.Name()+"_sd_configs", reflect.TypeOf(config), config)
}
func init() {
// N.B.: static_configs is the only Config type implemented by default.
// All other types are registered at init by their implementing packages.
elemTyp := reflect.TypeOf(&targetgroup.Group{})
registerConfig(staticConfigsKey, elemTyp, StaticConfig{})
}
func registerConfig(yamlKey string, elemType reflect.Type, config Config) {
name := config.Name()
if _, ok := configNames[name]; ok {
panic(fmt.Sprintf("discovery: Config named %q is already registered", name))
}
configNames[name] = config
fieldName := configFieldPrefix + yamlKey // Field must be exported.
configFieldNames[elemType] = fieldName
// Insert fields in sorted order.
i := sort.Search(len(configFields), func(k int) bool {
return fieldName < configFields[k].Name
})
configFields = append(configFields, reflect.StructField{}) // Add empty field at end.
copy(configFields[i+1:], configFields[i:]) // Shift fields to the right.
configFields[i] = reflect.StructField{ // Write new field in place.
Name: fieldName,
Type: reflect.SliceOf(elemType),
Tag: reflect.StructTag(`yaml:"` + yamlKey + `,omitempty"`),
}
}
func getConfigType(out reflect.Type) reflect.Type {
configTypesMu.Lock()
defer configTypesMu.Unlock()
if typ, ok := configTypes[out]; ok {
return typ
}
// Initial exported fields map one-to-one.
var fields []reflect.StructField
for i, n := 0, out.NumField(); i < n; i++ {
switch field := out.Field(i); {
case field.PkgPath == "" && field.Type != configsType:
fields = append(fields, field)
default:
fields = append(fields, reflect.StructField{
Name: "_" + field.Name, // Field must be unexported.
PkgPath: out.PkgPath(),
Type: emptyStructType,
})
}
}
// Append extra config fields on the end.
fields = append(fields, configFields...)
typ := reflect.StructOf(fields)
configTypes[out] = typ
return typ
}
// UnmarshalYAMLWithInlineConfigs helps implement yaml.Unmarshal for structs
// that have a Configs field that should be inlined.
func UnmarshalYAMLWithInlineConfigs(out interface{}, unmarshal func(interface{}) error) error {
outVal := reflect.ValueOf(out)
if outVal.Kind() != reflect.Ptr {
return fmt.Errorf("discovery: can only unmarshal into a struct pointer: %T", out)
}
outVal = outVal.Elem()
if outVal.Kind() != reflect.Struct {
return fmt.Errorf("discovery: can only unmarshal into a struct pointer: %T", out)
}
outTyp := outVal.Type()
cfgTyp := getConfigType(outTyp)
cfgPtr := reflect.New(cfgTyp)
cfgVal := cfgPtr.Elem()
// Copy shared fields (defaults) to dynamic value.
var configs *Configs
for i, n := 0, outVal.NumField(); i < n; i++ {
if outTyp.Field(i).Type == configsType {
configs = outVal.Field(i).Addr().Interface().(*Configs)
continue
}
if cfgTyp.Field(i).PkgPath != "" {
continue // Field is unexported: ignore.
}
cfgVal.Field(i).Set(outVal.Field(i))
}
if configs == nil {
return fmt.Errorf("discovery: Configs field not found in type: %T", out)
}
// Unmarshal into dynamic value.
if err := unmarshal(cfgPtr.Interface()); err != nil {
return replaceYAMLTypeError(err, cfgTyp, outTyp)
}
// Copy shared fields from dynamic value.
for i, n := 0, outVal.NumField(); i < n; i++ {
if cfgTyp.Field(i).PkgPath != "" {
continue // Field is unexported: ignore.
}
outVal.Field(i).Set(cfgVal.Field(i))
}
var err error
*configs, err = readConfigs(cfgVal, outVal.NumField())
return err
}
func readConfigs(structVal reflect.Value, startField int) (Configs, error) {
var (
configs Configs
targets []*targetgroup.Group
)
for i, n := startField, structVal.NumField(); i < n; i++ {
field := structVal.Field(i)
if field.Kind() != reflect.Slice {
panic("discovery: internal error: field is not a slice")
}
for k := 0; k < field.Len(); k++ {
val := field.Index(k)
if val.IsZero() || (val.Kind() == reflect.Ptr && val.Elem().IsZero()) {
key := configFieldNames[field.Type().Elem()]
key = strings.TrimPrefix(key, configFieldPrefix)
return nil, fmt.Errorf("empty or null section in %s", key)
}
switch c := val.Interface().(type) {
case *targetgroup.Group:
// Add index to the static config target groups for unique identification
// within scrape pool.
c.Source = strconv.Itoa(len(targets))
// Coalesce multiple static configs into a single static config.
targets = append(targets, c)
case Config:
configs = append(configs, c)
default:
panic("discovery: internal error: slice element is not a Config")
}
}
}
if len(targets) > 0 {
configs = append(configs, StaticConfig(targets))
}
return configs, nil
}
// MarshalYAMLWithInlineConfigs helps implement yaml.Marshal for structs
// that have a Configs field that should be inlined.
func MarshalYAMLWithInlineConfigs(in interface{}) (interface{}, error) {
inVal := reflect.ValueOf(in)
for inVal.Kind() == reflect.Ptr {
inVal = inVal.Elem()
}
inTyp := inVal.Type()
cfgTyp := getConfigType(inTyp)
cfgPtr := reflect.New(cfgTyp)
cfgVal := cfgPtr.Elem()
// Copy shared fields to dynamic value.
var configs *Configs
for i, n := 0, inTyp.NumField(); i < n; i++ {
if inTyp.Field(i).Type == configsType {
configs = inVal.Field(i).Addr().Interface().(*Configs)
}
if cfgTyp.Field(i).PkgPath != "" {
continue // Field is unexported: ignore.
}
cfgVal.Field(i).Set(inVal.Field(i))
}
if configs == nil {
return nil, fmt.Errorf("discovery: Configs field not found in type: %T", in)
}
if err := writeConfigs(cfgVal, *configs); err != nil {
return nil, err
}
return cfgPtr.Interface(), nil
}
func writeConfigs(structVal reflect.Value, configs Configs) error {
targets := structVal.FieldByName(staticConfigsFieldName).Addr().Interface().(*[]*targetgroup.Group)
for _, c := range configs {
if sc, ok := c.(StaticConfig); ok {
*targets = append(*targets, sc...)
continue
}
fieldName, ok := configFieldNames[reflect.TypeOf(c)]
if !ok {
return fmt.Errorf("discovery: cannot marshal unregistered Config type: %T", c)
}
field := structVal.FieldByName(fieldName)
field.Set(reflect.Append(field, reflect.ValueOf(c)))
}
return nil
}
func replaceYAMLTypeError(err error, oldTyp, newTyp reflect.Type) error {
if e, ok := err.(*yaml.TypeError); ok {
oldStr := oldTyp.String()
newStr := newTyp.String()
for i, s := range e.Errors {
e.Errors[i] = strings.Replace(s, oldStr, newStr, -1)
}
}
return err
}

View file

@ -27,9 +27,10 @@ import (
"github.com/go-kit/kit/log"
conntrack "github.com/mwitkow/go-conntrack"
"github.com/pkg/errors"
config_util "github.com/prometheus/common/config"
"github.com/prometheus/common/config"
"github.com/prometheus/common/model"
"github.com/prometheus/prometheus/discovery"
"github.com/prometheus/prometheus/discovery/refresh"
"github.com/prometheus/prometheus/discovery/targetgroup"
)
@ -52,17 +53,34 @@ var DefaultSDConfig = SDConfig{
Version: 1,
}
func init() {
discovery.RegisterConfig(&SDConfig{})
}
// SDConfig is the configuration for Triton based service discovery.
type SDConfig struct {
Account string `yaml:"account"`
Role string `yaml:"role"`
DNSSuffix string `yaml:"dns_suffix"`
Endpoint string `yaml:"endpoint"`
Groups []string `yaml:"groups,omitempty"`
Port int `yaml:"port"`
RefreshInterval model.Duration `yaml:"refresh_interval,omitempty"`
TLSConfig config_util.TLSConfig `yaml:"tls_config,omitempty"`
Version int `yaml:"version"`
Account string `yaml:"account"`
Role string `yaml:"role"`
DNSSuffix string `yaml:"dns_suffix"`
Endpoint string `yaml:"endpoint"`
Groups []string `yaml:"groups,omitempty"`
Port int `yaml:"port"`
RefreshInterval model.Duration `yaml:"refresh_interval,omitempty"`
TLSConfig config.TLSConfig `yaml:"tls_config,omitempty"`
Version int `yaml:"version"`
}
// Name returns the name of the Config.
func (*SDConfig) Name() string { return "triton" }
// NewDiscoverer returns a Discoverer for the Config.
func (c *SDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (discovery.Discoverer, error) {
return New(opts.Logger, c)
}
// SetDirectory joins any relative file paths with dir.
func (c *SDConfig) SetDirectory(dir string) {
c.TLSConfig.SetDirectory(dir)
}
// UnmarshalYAML implements the yaml.Unmarshaler interface.
@ -122,7 +140,7 @@ type Discovery struct {
// New returns a new Discovery which periodically refreshes its targets.
func New(logger log.Logger, conf *SDConfig) (*Discovery, error) {
tls, err := config_util.NewTLSConfig(&conf.TLSConfig)
tls, err := config.NewTLSConfig(&conf.TLSConfig)
if err != nil {
return nil, err
}

View file

@ -27,6 +27,7 @@ import (
"github.com/prometheus/common/model"
"github.com/samuel/go-zookeeper/zk"
"github.com/prometheus/prometheus/discovery"
"github.com/prometheus/prometheus/discovery/targetgroup"
"github.com/prometheus/prometheus/util/strutil"
"github.com/prometheus/prometheus/util/treecache"
@ -43,6 +44,11 @@ var (
}
)
func init() {
discovery.RegisterConfig(&ServersetSDConfig{})
discovery.RegisterConfig(&NerveSDConfig{})
}
// ServersetSDConfig is the configuration for Twitter serversets in Zookeeper based discovery.
type ServersetSDConfig struct {
Servers []string `yaml:"servers"`
@ -50,6 +56,14 @@ type ServersetSDConfig struct {
Timeout model.Duration `yaml:"timeout,omitempty"`
}
// Name returns the name of the Config.
func (*ServersetSDConfig) Name() string { return "serverset" }
// NewDiscoverer returns a Discoverer for the Config.
func (c *ServersetSDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (discovery.Discoverer, error) {
return NewServersetDiscovery(c, opts.Logger)
}
// UnmarshalYAML implements the yaml.Unmarshaler interface.
func (c *ServersetSDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
*c = DefaultServersetSDConfig
@ -79,6 +93,14 @@ type NerveSDConfig struct {
Timeout model.Duration `yaml:"timeout,omitempty"`
}
// Name returns the name of the Config.
func (*NerveSDConfig) Name() string { return "nerve" }
// NewDiscoverer returns a Discoverer for the Config.
func (c *NerveSDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (discovery.Discoverer, error) {
return NewNerveDiscovery(c, opts.Logger)
}
// UnmarshalYAML implements the yaml.Unmarshaler interface.
func (c *NerveSDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
*c = DefaultNerveSDConfig

View file

@ -18,8 +18,13 @@ import (
"time"
"github.com/prometheus/common/model"
"go.uber.org/goleak"
)
func TestMain(m *testing.M) {
goleak.VerifyTestMain(m)
}
func TestNewDiscoveryError(t *testing.T) {
_, err := NewDiscovery(
[]string{"unreachable.test"},

View file

@ -86,7 +86,7 @@ the "Alerts" tab of your Prometheus instance. This will show you the exact
label sets for which each defined alert is currently active.
For pending and firing alerts, Prometheus also stores synthetic time series of
the form `ALERTS{alertname="<alert name>", alertstate="pending|firing", <additional alert labels>}`.
the form `ALERTS{alertname="<alert name>", alertstate="<pending or firing>", <additional alert labels>}`.
The sample value is set to `1` as long as the alert is in the indicated active
(pending or firing) state, and the series is marked stale when this is no
longer the case.

View file

@ -32,7 +32,7 @@ value is set to the specified default.
Generic placeholders are defined as follows:
* `<boolean>`: a boolean that can take the values `true` or `false`
* `<duration>`: a duration matching the regular expression `[0-9]+(ms|[smhdwy])`
* `<duration>`: a duration matching the regular expression `((([0-9]+)y)?(([0-9]+)w)?(([0-9]+)d)?(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?|0)`, e.g. `1d`, `1h30m`, `5m`, `10s`
* `<filename>`: a valid path in the current working directory
* `<host>`: a valid string consisting of a hostname or IP followed by an optional port number
* `<int>`: an integer value
@ -207,6 +207,10 @@ dns_sd_configs:
ec2_sd_configs:
[ - <ec2_sd_config> ... ]
# List of Eureka service discovery configurations.
eureka_sd_configs:
[ - <eureka_sd_config> ... ]
# List of file service discovery configurations.
file_sd_configs:
[ - <file_sd_config> ... ]
@ -215,6 +219,10 @@ file_sd_configs:
gce_sd_configs:
[ - <gce_sd_config> ... ]
# List of Hetzner service discovery configurations.
hetzner_sd_configs:
[ - <hetzner_sd_config> ... ]
# List of Kubernetes service discovery configurations.
kubernetes_sd_configs:
[ - <kubernetes_sd_config> ... ]
@ -252,9 +260,16 @@ metric_relabel_configs:
[ - <relabel_config> ... ]
# Per-scrape limit on number of scraped samples that will be accepted.
# If more than this number of samples are present after metric relabelling
# If more than this number of samples are present after metric relabeling
# the entire scrape will be treated as failed. 0 means no limit.
[ sample_limit: <int> | default = 0 ]
# Per-scrape config limit on number of unique targets that will be
# accepted. If more than this number of targets are present after target
# relabeling, Prometheus will mark the targets as failed without scraping them.
# 0 means no limit. This is an experimental feature, this behaviour could
# change in the future.
[ target_limit: <int> | default = 0 ]
```
Where `<job_name>` must be unique across all scrape configurations.
@ -283,7 +298,7 @@ A `tls_config` allows configuring TLS connections.
Azure SD configurations allow retrieving scrape targets from Azure VMs.
The following meta labels are available on targets during relabeling:
The following meta labels are available on targets during [relabeling](#relabel_config):
* `__meta_azure_machine_id`: the machine ID
* `__meta_azure_machine_location`: the location the machine runs in
@ -459,7 +474,10 @@ One of the following roles can be configured to discover targets:
#### `services`
The `services` role is used to discover [Swarm services](https://docs.docker.com/engine/swarm/key-concepts/#services-and-tasks).
The `services` role discovers all [Swarm services](https://docs.docker.com/engine/swarm/key-concepts/#services-and-tasks)
and exposes their ports as targets. For each published port of a service, a
single target is generated. If a service has no published ports, a target per
service is created using the `port` parameter defined in the SD configuration.
Available meta labels:
@ -481,7 +499,10 @@ Available meta labels:
#### `tasks`
The `tasks` role is used to discover [Swarm tasks](https://docs.docker.com/engine/swarm/key-concepts/#services-and-tasks).
The `tasks` role discovers all [Swarm tasks](https://docs.docker.com/engine/swarm/key-concepts/#services-and-tasks)
and exposes their ports as targets. For each published port of a task, a single
target is generated. If a task has no published ports, a target per task is
created using the `port` parameter defined in the SD configuration.
Available meta labels:
@ -552,7 +573,8 @@ tls_config:
# Role of the targets to retrieve. Must be `services`, `tasks`, or `nodes`.
role: <string>
# The port to scrape metrics from, when `role` is nodes.
# The port to scrape metrics from, when `role` is nodes, and for discovered
# tasks and services that don't have published ports.
[ port: <int> | default = 80 ]
# The time after which the droplets are refreshed.
@ -589,9 +611,11 @@ This service discovery method only supports basic DNS A, AAAA and SRV record
queries, but not the advanced DNS-SD approach specified in
[RFC6763](https://tools.ietf.org/html/rfc6763).
During the [relabeling phase](#relabel_config), the meta label
`__meta_dns_name` is available on each target and is set to the
record name that produced the discovered target.
The following meta labels are available on targets during [relabeling](#relabel_config):
* `__meta_dns_name`: the record name that produced the discovered target.
* `__meta_dns_srv_record_target`: the target field of the SRV record
* `__meta_dns_srv_record_port`: the port field of the SRV record
```yaml
# A list of DNS domain names to be queried.
@ -795,9 +819,10 @@ It reads a set of files containing a list of zero or more
and applied immediately. Files may be provided in YAML or JSON format. Only
changes resulting in well-formed target groups are applied.
The JSON file must contain a list of static configs, using this format:
Files must contain a list of static configs, using these formats:
```yaml
**JSON**
```json
[
{
"targets": [ "<host>", ... ],
@ -809,6 +834,14 @@ The JSON file must contain a list of static configs, using this format:
]
```
**YAML**
```yaml
- targets:
[ - '<host>' ]
labels:
[ <labelname>: <labelvalue> ... ]
```
As a fallback, the file contents are also re-read periodically at the specified
refresh interval.
@ -893,6 +926,84 @@ instance it is running on should have at least read-only permissions to the
compute resources. If running outside of GCE make sure to create an appropriate
service account and place the credential file in one of the expected locations.
### `<hetzner_sd_config>`
Hetzner SD configurations allow retrieving scrape targets from
[Hetzner](https://www.hetzner.com/) [Cloud](https://www.hetzner.cloud/) API and
[Robot](https://docs.hetzner.com/robot/) API.
This service discovery uses the public IPv4 address by default, but that can be
changed with relabeling, as demonstrated in [the Prometheus hetzner-sd
configuration file](/documentation/examples/prometheus-hetzner.yml).
The following meta labels are available on all targets during [relabeling](#relabel_config):
* `__meta_hetzner_server_id`: the ID of the server
* `__meta_hetzner_server_name`: the name of the server
* `__meta_hetzner_server_status`: the status of the server
* `__meta_hetzner_public_ipv4`: the public ipv4 address of the server
* `__meta_hetzner_public_ipv6_network`: the public ipv6 network (/64) of the server
* `__meta_hetzner_datacenter`: the datacenter of the server
The labels below are only available for targets with `role` set to `hcloud`:
* `__meta_hetzner_hcloud_image_name`: the image name of the server
* `__meta_hetzner_hcloud_image_description`: the description of the server image
* `__meta_hetzner_hcloud_image_os_flavor`: the OS flavor of the server image
* `__meta_hetzner_hcloud_image_os_version`: the OS version of the server image
* `__meta_hetzner_hcloud_image_description`: the description of the server image
* `__meta_hetzner_hcloud_datacenter_location`: the location of the server
* `__meta_hetzner_hcloud_datacenter_location_network_zone`: the network zone of the server
* `__meta_hetzner_hcloud_server_type`: the type of the server
* `__meta_hetzner_hcloud_cpu_cores`: the CPU cores count of the server
* `__meta_hetzner_hcloud_cpu_type`: the CPU type of the server (shared or dedicated)
* `__meta_hetzner_hcloud_memory_size_gb`: the amount of memory of the server (in GB)
* `__meta_hetzner_hcloud_disk_size_gb`: the disk size of the server (in GB)
* `__meta_hetzner_hcloud_private_ipv4_<networkname>`: the private ipv4 address of the server within a given network
* `__meta_hetzner_hcloud_label_<labelname>`: each label of the server
The labels below are only available for targets with `role` set to `robot`:
* `__meta_hetzner_robot_product`: the product of the server
* `__meta_hetzner_robot_cancelled`: the server cancellation status
```yaml
# The Hetzner role of entities that should be discovered.
# One of robot or hcloud.
role: <string>
# Authentication information used to authenticate to the API server.
# Note that `basic_auth`, `bearer_token` and `bearer_token_file` options are
# mutually exclusive.
# password and password_file are mutually exclusive.
# Optional HTTP basic authentication information, required when role is robot
# Role hcloud does not support basic auth.
basic_auth:
[ username: <string> ]
[ password: <secret> ]
[ password_file: <string> ]
# Optional bearer token authentication information, required when role is hcloud
# Role robot does not support bearer token authentication.
[ bearer_token: <secret> ]
# Optional bearer token file authentication information.
[ bearer_token_file: <filename> ]
# Optional proxy URL.
[ proxy_url: <string> ]
# TLS configuration.
tls_config:
[ <tls_config> ]
# The port to scrape metrics from.
[ port: <int> | default = 80 ]
# The time after which the servers are refreshed.
[ refresh_interval: <duration> | default = 60s ]
```
### `<kubernetes_sd_config>`
Kubernetes SD configurations allow retrieving scrape targets from
@ -1184,7 +1295,7 @@ stored in [Zookeeper](https://zookeeper.apache.org/). Serversets are commonly
used by [Finagle](https://twitter.github.io/finagle/) and
[Aurora](https://aurora.apache.org/).
The following meta labels are available on targets during relabeling:
The following meta labels are available on targets during [relabeling](#relabel_config):
* `__meta_serverset_path`: the full path to the serverset member node in Zookeeper
* `__meta_serverset_endpoint_host`: the host of the default endpoint
@ -1277,6 +1388,72 @@ tls_config:
[ <tls_config> ]
```
### `<eureka_sd_config>`
Eureka SD configurations allow retrieving scrape targets using the
[Eureka](https://github.com/Netflix/eureka) REST API. Prometheus
will periodically check the REST endpoint and
create a target for every app instance.
The following meta labels are available on targets during [relabeling](#relabel_config):
* `__meta_eureka_app_name`: the name of the app
* `__meta_eureka_app_instance_id`: the ID of the app instance
* `__meta_eureka_app_instance_hostname`: the hostname of the instance
* `__meta_eureka_app_instance_homepage_url`: the homepage url of the app instance
* `__meta_eureka_app_instance_statuspage_url`: the status page url of the app instance
* `__meta_eureka_app_instance_healthcheck_url`: the health check url of the app instance
* `__meta_eureka_app_instance_ip_addr`: the IP address of the app instance
* `__meta_eureka_app_instance_vip_address`: the VIP address of the app instance
* `__meta_eureka_app_instance_secure_vip_address`: the secure VIP address of the app instance
* `__meta_eureka_app_instance_status`: the status of the app instance
* `__meta_eureka_app_instance_port`: the port of the app instance
* `__meta_eureka_app_instance_port_enabled`: the port enabled of the app instance
* `__meta_eureka_app_instance_secure_port`: the secure port address of the app instance
* `__meta_eureka_app_instance_secure_port_enabled`: the secure port of the app instance
* `__meta_eureka_app_instance_country_id`: the country ID of the app instance
* `__meta_eureka_app_instance_metadata_<metadataname>`: app instance metadata
* `__meta_eureka_app_instance_datacenterinfo_name`: the datacenter name of the app instance
* `__meta_eureka_app_instance_datacenterinfo_<metadataname>`: the datacenter metadata
See below for the configuration options for Eureka discovery:
```yaml
# The URL to connect to the Eureka server.
server: <string>
# Sets the `Authorization` header on every request with the
# configured username and password.
# password and password_file are mutually exclusive.
basic_auth:
[ username: <string> ]
[ password: <secret> ]
[ password_file: <string> ]
# Sets the `Authorization` header on every request with
# the configured bearer token. It is mutually exclusive with `bearer_token_file`.
[ bearer_token: <string> ]
# Sets the `Authorization` header on every request with the bearer token
# read from the configured file. It is mutually exclusive with `bearer_token`.
[ bearer_token_file: <filename> ]
# Configures the scrape request's TLS settings.
tls_config:
[ <tls_config> ]
# Optional proxy URL.
[ proxy_url: <string> ]
# Refresh interval to re-read the app instance list.
[ refresh_interval: <duration> | default = 30s ]
```
See [the Prometheus eureka-sd configuration file](/documentation/examples/prometheus-eureka.yml)
for a practical example on how to set up your Eureka app and your Prometheus
configuration.
### `<static_config>`
A `static_config` allows specifying a list of targets and a common label set
@ -1453,6 +1630,10 @@ dns_sd_configs:
ec2_sd_configs:
[ - <ec2_sd_config> ... ]
# List of Eureka service discovery configurations.
eureka_sd_configs:
[ - <eureka_sd_config> ... ]
# List of file service discovery configurations.
file_sd_configs:
[ - <file_sd_config> ... ]
@ -1469,6 +1650,10 @@ dockerswarm_sd_configs:
gce_sd_configs:
[ - <gce_sd_config> ... ]
# List of Hetzner service discovery configurations.
hetzner_sd_configs:
[ - <hetzner_sd_config> ... ]
# List of Kubernetes service discovery configurations.
kubernetes_sd_configs:
[ - <kubernetes_sd_config> ... ]

View file

@ -27,9 +27,9 @@ prom/prometheus`. This starts Prometheus with a sample
configuration and exposes it on port 9090.
The Prometheus image uses a volume to store the actual metrics. For
production deployments it is highly recommended to use the
[Data Volume Container](https://docs.docker.com/engine/admin/volumes/volumes/)
pattern to ease managing the data on Prometheus upgrades.
production deployments it is highly recommended to use a
[named volume](https://docs.docker.com/storage/volumes/)
to ease managing the data on Prometheus upgrades.
To provide your own configuration, there are several options. Here are
two examples.
@ -41,11 +41,12 @@ Bind-mount your `prometheus.yml` from the host by running:
```bash
docker run \
-p 9090:9090 \
-v /tmp/prometheus.yml:/etc/prometheus/prometheus.yml \
-v /path/to/prometheus.yml:/etc/prometheus/prometheus.yml \
prom/prometheus
```
Or use an additional volume for the config:
Or bind-mount the directory containing `prometheus.yml` onto
`/etc/prometheus` by running:
```bash
docker run \

View file

@ -7,7 +7,7 @@ sort_rank: 8
In line with our [stability promise](https://prometheus.io/blog/2016/07/18/prometheus-1-0-released/#fine-print),
the Prometheus 2.0 release contains a number of backwards incompatible changes.
This document offers guidance on migrating from Prometheus 1.8 to Prometheus 2.0.
This document offers guidance on migrating from Prometheus 1.8 to Prometheus 2.0 and newer versions.
## Flags
@ -120,12 +120,12 @@ new format. For example:
$ promtool update rules example.rules
```
Note that you will need to use promtool from 2.0, not 1.8.
You will need to use `promtool` from [Prometheus 2.5](https://github.com/prometheus/prometheus/releases/tag/v2.5.0) as later versions no longer contain the above subcommand.
## Storage
The data format in Prometheus 2.0 has completely changed and is not backwards
compatible with 1.8. To retain access to your historic monitoring data we
compatible with 1.8 and older versions. To retain access to your historic monitoring data we
recommend you run a non-scraping Prometheus instance running at least version
1.8.1 in parallel with your Prometheus 2.0 instance, and have the new server
read existing data from the old one via the remote read protocol.

View file

@ -46,21 +46,20 @@ The JSON response envelope format is as follows:
}
```
Input timestamps may be provided either in
Generic placeholders are defined as follows:
* `<rfc3339 | unix_timestamp>`: Input timestamps may be provided either in
[RFC3339](https://www.ietf.org/rfc/rfc3339.txt) format or as a Unix timestamp
in seconds, with optional decimal places for sub-second precision. Output
timestamps are always represented as Unix timestamps in seconds.
Names of query parameters that may be repeated end with `[]`.
`<series_selector>` placeholders refer to Prometheus [time series
* `<series_selector>`: Prometheus [time series
selectors](basics.md#time-series-selectors) like `http_requests_total` or
`http_requests_total{method=~"(GET|POST)"}` and need to be URL-encoded.
* `<duration>`: [Prometheus duration strings](basics.md#time_durations).
For example, `5m` refers to a duration of 5 minutes.
* `<bool>`: boolean values (strings `true` and `false`).
`<duration>` placeholders refer to Prometheus duration strings of the form
`[0-9]+[smhdwy]`. For example, `5m` refers to a duration of 5 minutes.
`<bool>` placeholders refer to boolean values (strings `true` and `false`).
Note: Names of query parameters that may be repeated end with `[]`.
## Expression queries
@ -840,7 +839,6 @@ $ curl http://localhost:9090/api/v1/status/runtimeinfo
"CWD": "/",
"reloadConfigSuccess": true,
"lastConfigTime": "2019-11-02T17:23:59+01:00",
"chunkCount": 873,
"timeSeriesCount": 873,
"corruptionCount": 0,
"goroutineCount": 48,
@ -892,6 +890,11 @@ The following endpoint returns various cardinality statistics about the Promethe
```
GET /api/v1/status/tsdb
```
- **headStats**: This provides the following data about the head block of the TSDB:
- **numSeries**: The number of series.
- **chunkCount**: The number of chunks.
- **minTime**: The current minimum timestamp in milliseconds.
- **maxTime**: The current maximum timestamp in milliseconds.
- **seriesCountByMetricName:** This will provide a list of metrics names and their series count.
- **labelValueCountByLabelName:** This will provide a list of the label names and their value count.
- **memoryInBytesByLabelName** This will provide a list of the label names and memory used in bytes. Memory usage is calculated by adding the length of all values for a given label name.
@ -902,6 +905,12 @@ $ curl http://localhost:9090/api/v1/status/tsdb
{
"status": "success",
"data": {
"headStats": {
"numSeries": 508,
"chunkCount": 937,
"minTime": 1591516800000,
"maxTime": 1598896800143,
},
"seriesCountByMetricName": [
{
"name": "net_conntrack_dialer_conn_failed_total",
@ -951,8 +960,6 @@ $ curl http://localhost:9090/api/v1/status/tsdb
## TSDB Admin APIs
These are APIs that expose database functionalities for the advanced user. These APIs are not enabled unless the `--web.enable-admin-api` is set.
We also expose a gRPC API whose definition can be found [here](https://github.com/prometheus/prometheus/blob/master/prompb/rpc.proto). This is experimental and might change in the future.
### Snapshot
Snapshot creates a snapshot of all current data into `snapshots/<datetime>-<rand>` under the TSDB's data directory and returns the directory as response.
It will optionally skip snapshotting data that is only present in the head block, and which has not yet been compacted to disk.
@ -980,7 +987,7 @@ The snapshot now exists at `<data-dir>/snapshots/20171210T211224Z-2be650b6d019eb
*New in v2.1 and supports PUT from v2.9*
### Delete Series
DeleteSeries deletes data for a selection of series in a time range. The actual data still exists on disk and is cleaned up in future compactions or can be explicitly cleaned up by hitting the Clean Tombstones endpoint.
DeleteSeries deletes data for a selection of series in a time range. The actual data still exists on disk and is cleaned up in future compactions or can be explicitly cleaned up by hitting the [Clean Tombstones](#clean-tombstones) endpoint.
If successful, a `204` is returned.

View file

@ -145,20 +145,10 @@ syntax](https://github.com/google/re2/wiki/Syntax).
### Range Vector Selectors
Range vector literals work like instant vector literals, except that they
select a range of samples back from the current instant. Syntactically, a range
duration is appended in square brackets (`[]`) at the end of a vector selector
to specify how far back in time values should be fetched for each resulting
range vector element.
Time durations are specified as a number, followed immediately by one of the
following units:
* `s` - seconds
* `m` - minutes
* `h` - hours
* `d` - days
* `w` - weeks
* `y` - years
select a range of samples back from the current instant. Syntactically, a [time
duration](#time_durations) is appended in square brackets (`[]`) at the end of a
vector selector to specify how far back in time values should be fetched for
each resulting range vector element.
In this example, we select all the values we have recorded within the last 5
minutes for all time series that have the metric name `http_requests_total` and
@ -166,6 +156,29 @@ a `job` label set to `prometheus`:
http_requests_total{job="prometheus"}[5m]
### Time Durations
Time durations are specified as a number, followed immediately by one of the
following units:
* `ms` - milliseconds
* `s` - seconds
* `m` - minutes
* `h` - hours
* `d` - days - assuming a day has always 24h
* `w` - weeks - assuming a week has always 7d
* `y` - years - assuming a year has always 365d
Time durations can be combined, by concatenation. Units must be ordered from the
longest to the shortest. A given unit must only appear once in a time duration.
Here are some examples of valid time durations:
5h
1h30m
5m
10s
### Offset modifier
The `offset` modifier allows changing the time offset for individual

View file

@ -31,7 +31,7 @@ scalar that is the result of the operator applied to both scalar operands.
**Between an instant vector and a scalar**, the operator is applied to the
value of every data sample in the vector. E.g. if a time series instant vector
is multiplied by 2, the result is another vector in which every sample value of
the original vector is multiplied by 2.
the original vector is multiplied by 2. The metric name is dropped.
**Between two instant vectors**, a binary arithmetic operator is applied to
each entry in the left-hand side vector and its [matching element](#vector-matching)
@ -64,7 +64,8 @@ operators result in another scalar that is either `0` (`false`) or `1`
value of every data sample in the vector, and vector elements between which the
comparison result is `false` get dropped from the result vector. If the `bool`
modifier is provided, vector elements that would be dropped instead have the value
`0` and vector elements that would be kept have the value `1`.
`0` and vector elements that would be kept have the value `1`. The metric name
is dropped if the `bool` modifier is provided.
**Between two instant vectors**, these operators behave as a filter by default,
applied to matching entries. Vector elements for which the expression is not
@ -74,6 +75,7 @@ with the grouping labels becoming the output label set.
If the `bool` modifier is provided, vector elements that would have been
dropped instead have the value `0` and vector elements that would be kept have
the value `1`, with the grouping labels again becoming the output label set.
The metric name is dropped if the `bool` modifier is provided.
### Logical/set binary operators

View file

@ -24,7 +24,6 @@ Things considered unstable for 2.x:
* Any feature listed as experimental or subject to change, including:
* The [`holt_winters` PromQL function](https://github.com/prometheus/prometheus/issues/2458)
* Remote read, remote write and the remote read endpoint
* v2 HTTP and GRPC APIs
* Service discovery integrations, with the exception of `static_configs` and `file_sd_configs`
* Go APIs of packages that are part of the server
* HTML generated by the web UI

View file

@ -15,7 +15,7 @@ Prometheus's local time series database stores time series data in a custom form
Ingested samples are grouped into blocks of two hours. Each two-hour block consists of a directory containing one or more chunk files that contain all time series samples for that window of time, as well as a metadata file and index file (which indexes metric names and labels to time series in the chunk files). When series are deleted via the API, deletion records are stored in separate tombstone files (instead of deleting the data immediately from the chunk files).
The block for currently incoming samples is kept in memory and not fully persisted yet. It is secured against crashes by a write-ahead-log (WAL) that can be replayed when the Prometheus server restarts after a crash. Write-ahead log files are stored in the `wal` directory in 128MB segments. These files contain raw data that has not been compacted yet, so they are significantly larger than regular block files. Prometheus will keep a minimum of 3 write-ahead log files, however high-traffic servers may see more than three WAL files since it needs to keep at least two hours worth of raw data.
The block for currently incoming samples is kept in memory and not fully persisted yet. It is secured against crashes by a write-ahead log (WAL) that can be replayed when the Prometheus server restarts after a crash. Write-ahead log files are stored in the `wal` directory in 128MB segments. These files contain raw data that has not been compacted yet, so they are significantly larger than regular block files. Prometheus will keep a minimum of 3 write-ahead log files, however high-traffic servers may see more than three WAL files since it needs to keep at least two hours worth of raw data.
The directory structure of a Prometheus server's data directory will look something like this:
@ -37,17 +37,20 @@ The directory structure of a Prometheus server's data directory will look someth
│  ├── tombstones
│  ├── index
│  └── meta.json
├── chunks_head
│  └── 000001
└── wal
  ├── 00000002
  └── checkpoint.000001
  ├── 000000002
  └── checkpoint.00000001
   └── 00000000
```
Note that a limitation of the local storage is that it is not clustered or replicated. Thus, it is not arbitrarily scalable or durable in the face of disk or node outages and should be treated as you would any other kind of single node database. Using RAID for disk availability, [snapshots](https://prometheus.io/docs/prometheus/latest/querying/api/#snapshot) for backups, capacity planning, etc, is recommended for improved durability. With proper storage durability and planning storing years of data in the local storage is possible.
Note that a limitation of the local storage is that it is not clustered or replicated. Thus, it is not arbitrarily scalable or durable in the face of disk or node outages and should be treated as you would any other kind of single node database. Using RAID for disk availability, [snapshots](querying/api.md#snapshot) for backups, capacity planning, etc, is recommended for improved durability. With proper storage durability and planning, storing years of data in the local storage is possible.
Alternatively, external storage may be used via the [remote read/write APIs](https://prometheus.io/docs/operating/integrations/#remote-endpoints-and-storage). Careful evaluation is required for these systems as they vary greatly in durability, performance, and efficiency.
For further details on file format, see [TSDB format](https://github.com/prometheus/prometheus/blob/master/tsdb/docs/format/README.md).
For further details on file format, see [TSDB format](/tsdb/docs/format/README.md).
## Compaction
@ -61,7 +64,7 @@ Prometheus has several flags that allow configuring the local storage. The most
* `--storage.tsdb.path`: This determines where Prometheus writes its database. Defaults to `data/`.
* `--storage.tsdb.retention.time`: This determines when to remove old data. Defaults to `15d`. Overrides `storage.tsdb.retention` if this flag is set to anything other than default.
* `--storage.tsdb.retention.size`: [EXPERIMENTAL] This determines the maximum number of bytes that storage blocks can use (note that this does not include the WAL size, which can be substantial). The oldest data will be removed first. Defaults to `0` or disabled. This flag is experimental and can be changed in future releases. Units supported: B, KB, MB, GB, TB, PB, EB. Ex: "512MB"
* `--storage.tsdb.retention.size`: [EXPERIMENTAL] This determines the maximum number of bytes that storage blocks can use. The oldest data will be removed first. Defaults to `0` or disabled. This flag is experimental and can be changed in future releases. Units supported: B, KB, MB, GB, TB, PB, EB. Ex: "512MB"
* `--storage.tsdb.retention`: This flag has been deprecated in favour of `storage.tsdb.retention.time`.
* `--storage.tsdb.wal-compression`: This flag enables compression of the write-ahead log (WAL). Depending on your data, you can expect the WAL size to be halved with little extra cpu load. This flag was introduced in 2.11.0 and enabled by default in 2.20.0. Note that once enabled, downgrading Prometheus to a version below 2.11.0 will require deleting the WAL.
@ -73,7 +76,9 @@ needed_disk_space = retention_time_seconds * ingested_samples_per_second * bytes
To tune the rate of ingested samples per second, you can either reduce the number of time series you scrape (fewer targets or fewer series per target), or you can increase the scrape interval. However, reducing the number of series is likely more effective, due to compression of samples within a series.
If your local storage becomes corrupted for whatever reason, your best bet is to shut down Prometheus and remove the entire storage directory. Non POSIX compliant filesystems are not supported by Prometheus's local storage, corruptions may happen, without possibility to recover. NFS is only potentially POSIX, most implementations are not. You can try removing individual block directories to resolve the problem, this means losing a time window of around two hours worth of data per block directory. Again, Prometheus's local storage is not meant as durable long-term storage.
If your local storage becomes corrupted for whatever reason, your best bet is to shut down Prometheus and remove the entire storage directory. You can try removing individual block directories, or WAL directory, to resolve the problem, this means losing a time window of around two hours worth of data per block directory. Again, Prometheus's local storage is not meant as durable long-term storage.
CAUTION: Non-POSIX compliant filesystems are not supported by Prometheus' local storage as unrecoverable corruptions may happen. NFS filesystems (including AWS's EFS) are not supported. NFS could be POSIX-compliant, but most implementations are not. It is strongly recommended to use a local filesystem for reliability.
If both time and size retention policies are specified, whichever policy triggers first will be used at that instant.

View file

@ -1,187 +0,0 @@
{
"swagger": "2.0",
"info": {
"title": "rpc.proto",
"version": "version not set"
},
"consumes": [
"application/json"
],
"produces": [
"application/json"
],
"paths": {
"/v2/admin/tsdb/clean_tombstones": {
"post": {
"summary": "CleanTombstones removes the deleted data from disk and cleans up the existing tombstones.",
"operationId": "Admin_TSDBCleanTombstones",
"responses": {
"200": {
"description": "A successful response.",
"schema": {
"$ref": "#/definitions/prometheusTSDBCleanTombstonesResponse"
}
},
"default": {
"description": "An unexpected error response",
"schema": {
"$ref": "#/definitions/runtimeError"
}
}
},
"tags": [
"Admin"
]
}
},
"/v2/admin/tsdb/delete_series": {
"post": {
"summary": "DeleteSeries deletes data for a selection of series in a time range.",
"operationId": "Admin_DeleteSeries",
"responses": {
"200": {
"description": "A successful response.",
"schema": {
"$ref": "#/definitions/prometheusSeriesDeleteResponse"
}
},
"default": {
"description": "An unexpected error response",
"schema": {
"$ref": "#/definitions/runtimeError"
}
}
},
"parameters": [
{
"name": "body",
"in": "body",
"required": true,
"schema": {
"$ref": "#/definitions/prometheusSeriesDeleteRequest"
}
}
],
"tags": [
"Admin"
]
}
},
"/v2/admin/tsdb/snapshot": {
"post": {
"summary": "Snapshot creates a snapshot of all current data into 'snapshots/\u003cdatetime\u003e-\u003crand\u003e' under the TSDB's data directory.",
"operationId": "Admin_TSDBSnapshot",
"responses": {
"200": {
"description": "A successful response.",
"schema": {
"$ref": "#/definitions/prometheusTSDBSnapshotResponse"
}
},
"default": {
"description": "An unexpected error response",
"schema": {
"$ref": "#/definitions/runtimeError"
}
}
},
"tags": [
"Admin"
]
}
}
},
"definitions": {
"prometheusLabelMatcher": {
"type": "object",
"properties": {
"type": {
"$ref": "#/definitions/prometheusLabelMatcherType"
},
"name": {
"type": "string"
},
"value": {
"type": "string"
}
},
"description": "Matcher specifies a rule, which can match or set of labels or not."
},
"prometheusLabelMatcherType": {
"type": "string",
"enum": [
"EQ",
"NEQ",
"RE",
"NRE"
],
"default": "EQ"
},
"prometheusSeriesDeleteRequest": {
"type": "object",
"properties": {
"min_time": {
"type": "string",
"format": "date-time"
},
"max_time": {
"type": "string",
"format": "date-time"
},
"matchers": {
"type": "array",
"items": {
"$ref": "#/definitions/prometheusLabelMatcher"
}
}
}
},
"prometheusSeriesDeleteResponse": {
"type": "object"
},
"prometheusTSDBCleanTombstonesResponse": {
"type": "object"
},
"prometheusTSDBSnapshotResponse": {
"type": "object",
"properties": {
"name": {
"type": "string"
}
}
},
"protobufAny": {
"type": "object",
"properties": {
"type_url": {
"type": "string"
},
"value": {
"type": "string",
"format": "byte"
}
}
},
"runtimeError": {
"type": "object",
"properties": {
"error": {
"type": "string"
},
"code": {
"type": "integer",
"format": "int32"
},
"message": {
"type": "string"
},
"details": {
"type": "array",
"items": {
"$ref": "#/definitions/protobufAny"
}
}
}
}
}
}

View file

@ -10,7 +10,7 @@ scrape_configs:
# Create a job for Docker daemons.
#
# This exemple requires Docker daemons to be configured to expose
# This example requires Docker daemons to be configured to expose
# Prometheus metrics, as documented here:
# https://docs.docker.com/config/daemon/prometheus/
- job_name: 'docker'

View file

@ -0,0 +1,66 @@
# A example scrape configuration for running Prometheus with Eureka.
scrape_configs:
# Make Prometheus scrape itself for metrics.
- job_name: 'prometheus'
static_configs:
- targets: ['localhost:9090']
# Discover Eureka services to scrape.
- job_name: 'eureka'
# Scrape Eureka itself to discover new services.
eureka_sd_configs:
- server: http://localhost:8761/eureka
relabel_configs:
# You can use Eureka's application instance metadata.
# If you are using SpringBoot, you can add metadata using eureka.instance.metadataMap like this:
# application.yaml (spring-boot)
# eureka:
# instance:
# metadataMap:
# "prometheus.scrape": "true"
# "prometheus.path": "/actuator/prometheus"
# "prometheus.port": "8080"
#
#
# Example relabel to scrape only application that have
# "prometheus.scrape = true" metadata.
# - source_labels: [__meta_eureka_app_instance_metadata_prometheus_scrape]
# action: keep
# regex: true
#
# application.yaml (spring-boot)
# eureka:
# instance:
# metadataMap:
# "prometheus.scrape": "true"
#
# Example relabel to customize metric path based on application
# "prometheus.path = <metric path>" annotation.
# - source_labels: [__meta_eureka_app_instance_metadata_prometheus_path]
# action: replace
# target_label: __metrics_path__
# regex: (.+)
#
# application.yaml (spring-boot)
# eureka:
# instance:
# metadataMap:
# "prometheus.path": "/actuator/prometheus"
#
# Example relabel to scrape only single, desired port for the application
# based on application "prometheus.port = <port>" metadata.
# - source_labels: [__address__, __meta_eureka_app_instance_metadata_prometheus_port]
# action: replace
# regex: ([^:]+)(?::\d+)?;(\d+)
# replacement: $1:$2
# target_label: __address__
#
# application.yaml (spring-boot)
# eureka:
# instance:
# metadataMap:
# "prometheus.port": "8080"

View file

@ -0,0 +1,47 @@
# A example scrape configuration for running Prometheus with
# Hetzner.
scrape_configs:
# Make Prometheus scrape itself for metrics.
- job_name: 'prometheus'
static_configs:
- targets: ['localhost:9090']
# Discover Node Exporter instances to scrape.
- job_name: 'node'
hetzner_sd_configs:
- bearer_token: "<replace with a Hetzner Cloud API Token>"
platform: "hcloud"
relabel_configs:
# Use the public IPv4 and port 9100 to scrape the target.
- source_labels: [__meta_hetzner_public_ipv4]
target_label: __address__
replacement: '$1:9100'
# Discover Node Exporter instances to scrape using a Hetzner Cloud Network called mynet.
- job_name: 'node_private'
hetzner_sd_configs:
- bearer_token: "<replace with a Hetzner Cloud API Token>"
platform: "hcloud"
relabel_configs:
# Use the private IPv4 within the Hetzner Cloud Network and port 9100 to scrape the target.
- source_labels: [__meta_hetzner_hcloud_private_ipv4_mynet]
target_label: __address__
replacement: '$1:9100'
# Discover Node Exporter instances to scrape.
- job_name: 'node_robot'
hetzner_sd_configs:
- basic_auth:
username: "<replace with a Hetzner Robot API username>"
password: "<replace with a Hetzner Robot API password>"
platform: "robot"
relabel_configs:
# Use the public IPv4 and port 9100 to scrape the target.
- source_labels: [__meta_hetzner_public_ipv4]
target_label: __address__
replacement: '$1:9100'

View file

@ -40,7 +40,7 @@ The [termination handler](https://github.com/prometheus/prometheus/blob/v2.3.1/c
The scrape discovery manager is a [`discovery.Manager`](https://github.com/prometheus/prometheus/blob/v2.3.1/discovery/manager.go#L73-L89) that uses Prometheus's service discovery functionality to find and continuously update the list of targets from which Prometheus should scrape metrics. It runs independently of the scrape manager (which performs the actual target scrapes) and feeds it with a stream of [target group](https://github.com/prometheus/prometheus/blob/v2.3.1/discovery/targetgroup/targetgroup.go#L24-L33) updates over a [synchronization channel](https://github.com/prometheus/prometheus/blob/v2.3.1/cmd/prometheus/main.go#L431).
Internally, the scrape discovery manager runs an instance of each configuration-defined service discovery mechanism in its own goroutine. For example, if a `scrape_config` in the configuration file defines two [`kubernetes_sd_config` sections](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#%3Ckubernetes_sd_config%3E), the manager will run two separate [`kubernetes.Discovery`](https://github.com/prometheus/prometheus/blob/v2.3.1/discovery/kubernetes/kubernetes.go#L150-L159) instances. Each of these discovery instances implements the [`discovery.Discoverer` interface](https://github.com/prometheus/prometheus/blob/v2.3.1/discovery/manager.go#L41-L55) and sends target updates over a synchronization channel to the controlling discovery manager, which then enriches the target group update with information about the specific discovery instance and forwards it to the scrape manager.
Internally, the scrape discovery manager runs an instance of each configuration-defined service discovery mechanism in its own goroutine. For example, if a `scrape_config` in the configuration file defines two [`kubernetes_sd_config` sections](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#kubernetes_sd_config), the manager will run two separate [`kubernetes.Discovery`](https://github.com/prometheus/prometheus/blob/v2.3.1/discovery/kubernetes/kubernetes.go#L150-L159) instances. Each of these discovery instances implements the [`discovery.Discoverer` interface](https://github.com/prometheus/prometheus/blob/v2.3.1/discovery/manager.go#L41-L55) and sends target updates over a synchronization channel to the controlling discovery manager, which then enriches the target group update with information about the specific discovery instance and forwards it to the scrape manager.
When a configuration change is applied, the discovery manager stops all currently running discovery mechanisms and restarts new ones as defined in the new configuration file.
@ -71,7 +71,7 @@ In the same way that the scrape discovery manager runs one discovery mechanism f
### Target labels and target relabeling
Whenever the scrape manager receives an updated list of targets for a given scrape pool from the discovery manager, the scrape pool applies default target labels (such as `job` and `instance`) to each target and applies [target relabeling configurations](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#%3Crelabel_config%3E) to produce the final list of targets to be scraped.
Whenever the scrape manager receives an updated list of targets for a given scrape pool from the discovery manager, the scrape pool applies default target labels (such as `job` and `instance`) to each target and applies [target relabeling configurations](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config) to produce the final list of targets to be scraped.
### Target hashing and scrape timing
@ -93,15 +93,15 @@ Currently rules still read and write directly from/to the fanout storage, but th
### Local storage
Prometheus's local on-disk time series database is a [light-weight wrapper](https://github.com/prometheus/prometheus/blob/v2.3.1/storage/tsdb/tsdb.go#L102-L106) around [`github.com/prometheus/prometheus/tsdb.DB`](https://github.com/prometheus/prometheus/blob/master/tsdb/db.go#L92-L117). The wrapper makes only minor interface adjustments for use of the TSDB in the context of the Prometheus server and implements the [`storage.Storage` interface](https://github.com/prometheus/prometheus/blob/v2.3.1/storage/interface.go#L31-L44). You can find more details about the TSDB's on-disk layout in the [local storage documentation](https://prometheus.io/docs/prometheus/latest/storage/).
About Prometheus's local on-disk time series database, please refer to [`github.com/prometheus/prometheus/tsdb.DB`](https://github.com/prometheus/prometheus/blob/master/tsdb/db.go). You can find more details about the TSDB's on-disk layout in the [local storage documentation](https://prometheus.io/docs/prometheus/latest/storage/).
### Remote storage
The remote storage is a [`remote.Storage`](https://github.com/prometheus/prometheus/blob/v2.3.1/storage/remote/storage.go#L31-L44) that implements the [`storage.Storage` interface](https://github.com/prometheus/prometheus/blob/v2.3.1/storage/interface.go#L31-L44) and is responsible for interfacing with remote read and write endpoints.
For each [`remote_write`](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#%3Cremote_write%3E) section in the configuration file, the remote storage creates and runs one [`remote.QueueManager`](https://github.com/prometheus/prometheus/blob/v2.3.1/storage/remote/queue_manager.go#L141-L161), which in turn queues and sends samples to a specific remote write endpoint. Each queue manager parallelizes writes to the remote endpoint by running a dynamic number of shards based on current and past load observations. When a configuration reload is applied, all remote storage queues are shut down and new ones are created.
For each [`remote_write`](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#remote_write) section in the configuration file, the remote storage creates and runs one [`remote.QueueManager`](https://github.com/prometheus/prometheus/blob/v2.3.1/storage/remote/queue_manager.go#L141-L161), which in turn queues and sends samples to a specific remote write endpoint. Each queue manager parallelizes writes to the remote endpoint by running a dynamic number of shards based on current and past load observations. When a configuration reload is applied, all remote storage queues are shut down and new ones are created.
For each [`remote_read`](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#%3Cremote_read%3E) section in the configuration file, the remote storage creates a [reader client](https://github.com/prometheus/prometheus/blob/v2.3.1/storage/remote/storage.go#L96-L118) and results from each remote source are merged.
For each [`remote_read`](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#remote_read) section in the configuration file, the remote storage creates a [reader client](https://github.com/prometheus/prometheus/blob/v2.3.1/storage/remote/storage.go#L96-L118) and results from each remote source are merged.
## PromQL engine

View file

@ -127,7 +127,15 @@
{
alert: 'PrometheusNotIngestingSamples',
expr: |||
rate(prometheus_tsdb_head_samples_appended_total{%(prometheusSelector)s}[5m]) <= 0
(
rate(prometheus_tsdb_head_samples_appended_total{%(prometheusSelector)s}[5m]) <= 0
and
(
sum without(scrape_job) (prometheus_target_metadata_cache_entries{%(prometheusSelector)s}) > 0
or
sum without(rule_group) (prometheus_rule_group_rules{%(prometheusSelector)s}) > 0
)
)
||| % $._config,
'for': '10m',
labels: {
@ -259,6 +267,20 @@
description: 'Prometheus %(prometheusName)s has missed {{ printf "%%.0f" $value }} rule group evaluations in the last 5m.' % $._config,
},
},
{
alert: 'PrometheusTargetLimitHit',
expr: |||
increase(prometheus_target_scrape_pool_exceeded_target_limit_total{%(prometheusSelector)s}[5m]) > 0
||| % $._config,
'for': '15m',
labels: {
severity: 'warning',
},
annotations: {
summary: 'Prometheus has dropped targets because some scrape configs have exceeded the targets limit.',
description: 'Prometheus %(prometheusName)s has dropped {{ printf "%%.0f" $value }} targets because the number of targets exceeded the configured target_limit.' % $._config,
},
},
],
},
],

View file

@ -99,7 +99,7 @@ local template = grafana.template;
),
// Remote write specific dashboard.
'prometheus-remote-write.json':
local timestampComparison =
local timestampComparison =
graphPanel.new(
'Highest Timestamp In vs. Highest Timestamp Sent',
datasource='$datasource',
@ -110,13 +110,13 @@ local template = grafana.template;
(
prometheus_remote_storage_highest_timestamp_in_seconds{cluster=~"$cluster", instance=~"$instance"}
-
ignoring(remote_name, url) group_right(instance) prometheus_remote_storage_queue_highest_sent_timestamp_seconds{cluster=~"$cluster", instance=~"$instance"}
ignoring(remote_name, url) group_right(instance) (prometheus_remote_storage_queue_highest_sent_timestamp_seconds{cluster=~"$cluster", instance=~"$instance"} != 0)
)
|||,
legendFormat='{{cluster}}:{{instance}} {{remote_name}}:{{url}}',
));
local timestampComparisonRate =
local timestampComparisonRate =
graphPanel.new(
'Rate[5m]',
datasource='$datasource',
@ -124,11 +124,11 @@ local template = grafana.template;
)
.addTarget(prometheus.target(
|||
(
clamp_min(
rate(prometheus_remote_storage_highest_timestamp_in_seconds{cluster=~"$cluster", instance=~"$instance"}[5m])
-
ignoring (remote_name, url) group_right(instance) rate(prometheus_remote_storage_queue_highest_sent_timestamp_seconds{cluster=~"$cluster", instance=~"$instance"}[5m])
)
, 0)
|||,
legendFormat='{{cluster}}:{{instance}} {{remote_name}}:{{url}}',
));
@ -206,8 +206,8 @@ local template = grafana.template;
'prometheus_remote_storage_shard_capacity{cluster=~"$cluster", instance=~"$instance"}',
legendFormat='{{cluster}}:{{instance}} {{remote_name}}:{{url}}'
));
local pendingSamples =
graphPanel.new(
'Pending Samples',
@ -219,7 +219,7 @@ local template = grafana.template;
legendFormat='{{cluster}}:{{instance}} {{remote_name}}:{{url}}'
));
local walSegment =
local walSegment =
graphPanel.new(
'TSDB Current Segment',
datasource='$datasource',
@ -231,7 +231,7 @@ local template = grafana.template;
legendFormat='{{cluster}}:{{instance}}'
));
local queueSegment =
local queueSegment =
graphPanel.new(
'Remote Write Current Segment',
datasource='$datasource',
@ -288,7 +288,7 @@ local template = grafana.template;
));
dashboard.new('Prometheus Remote Write',
editable=true)
editable=true)
.addTemplate(
{
hide: 0,
@ -312,7 +312,7 @@ local template = grafana.template;
text: 'All',
value: '$__all',
},
includeAll=true,
includeAll=true,
)
)
.addTemplate(
@ -326,7 +326,7 @@ local template = grafana.template;
text: 'All',
value: '$__all',
},
includeAll=true,
includeAll=true,
)
)
.addTemplate(
@ -335,7 +335,7 @@ local template = grafana.template;
'$datasource',
'label_values(prometheus_remote_storage_shards{cluster=~"$cluster", instance=~"$instance"}, url)' % $._config,
refresh='time',
includeAll=true,
includeAll=true,
)
)
.addRow(
@ -348,7 +348,8 @@ local template = grafana.template;
.addPanel(samplesRate)
)
.addRow(
row.new('Shards'
row.new(
'Shards'
)
.addPanel(currentShards)
.addPanel(maxShards)
@ -371,6 +372,6 @@ local template = grafana.template;
.addPanel(failedSamples)
.addPanel(retriedSamples)
.addPanel(enqueueRetries)
)
),
},
}

View file

@ -0,0 +1,8 @@
module github.com/prometheus/prometheus/documentation/prometheus-mixin
go 1.15
require (
github.com/google/go-jsonnet v0.16.0
github.com/jsonnet-bundler/jsonnet-bundler v0.4.0
)

View file

@ -0,0 +1,49 @@
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc h1:cAKDfWh5VpdgMhJosfJnn5/FoN2SRZ4p7fJNX58YPaU=
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf h1:qet1QNfXsQxTZqLG4oE62mJzwPIB8+Tee4RNCL9ulrY=
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/campoy/embedmd v1.0.0/go.mod h1:oxyr9RCiSXg0M3VJ3ks0UGfp98BpSSGr0kpiX3MzVl8=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
github.com/fatih/color v1.9.0 h1:8xPHl4/q1VyqGIPif1F+1V3Y3lSmrq01EabUW3CoW5s=
github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU=
github.com/google/go-jsonnet v0.16.0 h1:Nb4EEOp+rdeGGyB1rQ5eisgSAqrTnhf9ip+X6lzZbY0=
github.com/google/go-jsonnet v0.16.0/go.mod h1:sOcuej3UW1vpPTZOr8L7RQimqai1a57bt5j22LzGZCw=
github.com/jsonnet-bundler/jsonnet-bundler v0.4.0 h1:4BKZ6LDqPc2wJDmaKnmYD/vDjUptJtnUpai802MibFc=
github.com/jsonnet-bundler/jsonnet-bundler v0.4.0/go.mod h1:/by7P/OoohkI3q4CgSFqcoFsVY+IaNbzOVDknEsKDeU=
github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
github.com/mattn/go-colorable v0.1.4 h1:snbPLB8fVfU9iwbbo30TPtbLRzwWu6aJS6Xh4eaaviA=
github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
github.com/mattn/go-isatty v0.0.6/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
github.com/mattn/go-isatty v0.0.11 h1:FxPOTFNqGkuDUGi3H/qkUbQO4ZiBa2brKq5r0l8TGeM=
github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE=
github.com/pkg/errors v0.8.0 h1:WdK/asTD0HN+q6hsWO3/vpuAkAr+tw6aNJNDFFf0+qw=
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/sergi/go-diff v1.1.0 h1:we8PVUC3FE2uYfodKH/nBHMSetSfHDR6scGdBi+erh0=
github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk=
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190310054646-10058d7d4faa/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037 h1:YyJpGZS1sBuBCzLAR1VEpK193GlqGZbnPFnPV/5Rsb4=
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
gopkg.in/alecthomas/kingpin.v2 v2.2.6 h1:jMFz6MfLP0/4fUyZle81rXUoxOBFi19VUFKVDOQfozc=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.4 h1:/eiJrUcujPVeJ3xlSWaiNi3uSVmDGBK1pDHUHAnao1I=
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=

View file

@ -1,24 +1,24 @@
{
"version": 1,
"dependencies": [
{
"name": "grafana-builder",
"source": {
"git": {
"remote": "https://github.com/grafana/jsonnet-libs",
"subdir": "grafana-builder"
"remote": "https://github.com/grafana/grafonnet-lib.git",
"subdir": "grafonnet"
}
},
"version": "master"
},
{
"name": "grafonnet",
"source": {
"git": {
"remote": "https://github.com/grafana/grafonnet-lib",
"subdir": "grafonnet"
"remote": "https://github.com/grafana/jsonnet-libs.git",
"subdir": "grafana-builder"
}
},
"version": "master"
}
]
],
"legacyImports": true
}

View file

@ -1,10 +1,10 @@
// Copyright 2020 Google LLC
// Copyright 2020 The prometheus Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
@ -12,12 +12,14 @@
// See the License for the specific language governing permissions and
// limitations under the License.
// Package field_mask aliases all exported identifiers in
// package "google.golang.org/protobuf/types/known/fieldmaskpb".
package field_mask
//+build tools
import "google.golang.org/protobuf/types/known/fieldmaskpb"
// Package tools tracks dependencies for tools that used in the build process.
// See https://github.com/golang/go/issues/25922
package tools
type FieldMask = fieldmaskpb.FieldMask
var File_google_protobuf_field_mask_proto = fieldmaskpb.File_google_protobuf_field_mask_proto
import (
_ "github.com/google/go-jsonnet/cmd/jsonnet"
_ "github.com/google/go-jsonnet/cmd/jsonnetfmt"
_ "github.com/jsonnet-bundler/jsonnet-bundler/cmd/jb"
)

109
go.mod
View file

@ -1,23 +1,24 @@
module github.com/prometheus/prometheus
go 1.13
go 1.14
require (
github.com/Azure/azure-sdk-for-go v44.0.0+incompatible
github.com/Azure/azure-sdk-for-go v46.4.0+incompatible
github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 // indirect
github.com/Azure/go-autorest/autorest v0.11.2
github.com/Azure/go-autorest/autorest/adal v0.9.0
github.com/Azure/go-autorest/autorest v0.11.10
github.com/Azure/go-autorest/autorest/adal v0.9.5
github.com/Azure/go-autorest/autorest/to v0.3.0 // indirect
github.com/Azure/go-autorest/autorest/validation v0.2.0 // indirect
github.com/HdrHistogram/hdrhistogram-go v0.9.0 // indirect
github.com/Microsoft/go-winio v0.4.14 // indirect
github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d
github.com/armon/go-metrics v0.3.3 // indirect
github.com/aws/aws-sdk-go v1.33.5
github.com/cespare/xxhash v1.1.0
github.com/aws/aws-sdk-go v1.35.5
github.com/cespare/xxhash/v2 v2.1.1
github.com/containerd/containerd v1.3.4 // indirect
github.com/davecgh/go-spew v1.1.1
github.com/dgryski/go-sip13 v0.0.0-20190329191031-25c5027a8c7b
github.com/digitalocean/godo v1.38.0
github.com/dgryski/go-sip13 v0.0.0-20200911182023-62edffca9245
github.com/digitalocean/godo v1.46.0
github.com/docker/distribution v2.7.1+incompatible // indirect
github.com/docker/docker v17.12.0-ce-rc1.0.20200706150819-a40b877fbb9e+incompatible
github.com/docker/go-connections v0.4.0 // indirect
@ -26,21 +27,18 @@ require (
github.com/go-logfmt/logfmt v0.5.0
github.com/go-openapi/strfmt v0.19.5
github.com/gogo/protobuf v1.3.1
github.com/golang/snappy v0.0.1
github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99
github.com/googleapis/gnostic v0.4.0 // indirect
github.com/gophercloud/gophercloud v0.12.0
github.com/grpc-ecosystem/grpc-gateway v1.14.6
github.com/hashicorp/consul/api v1.5.0
github.com/golang/snappy v0.0.2
github.com/google/pprof v0.0.0-20201007051231-1066cbb265c7
github.com/gophercloud/gophercloud v0.13.0
github.com/grpc-ecosystem/grpc-gateway v1.15.0
github.com/hashicorp/consul/api v1.7.0
github.com/hashicorp/go-hclog v0.12.2 // indirect
github.com/hashicorp/go-immutable-radix v1.2.0 // indirect
github.com/hashicorp/golang-lru v0.5.4 // indirect
github.com/influxdata/influxdb v1.8.1
github.com/jpillora/backoff v1.0.0 // indirect
github.com/hetznercloud/hcloud-go v1.22.0
github.com/influxdata/influxdb v1.8.3
github.com/json-iterator/go v1.1.10
github.com/julienschmidt/httprouter v1.3.0 // indirect
github.com/mattn/go-colorable v0.1.6 // indirect
github.com/miekg/dns v1.1.30
github.com/miekg/dns v1.1.31
github.com/mitchellh/mapstructure v1.2.2 // indirect
github.com/morikuni/aec v1.0.0 // indirect
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f
@ -55,36 +53,65 @@ require (
github.com/prometheus/alertmanager v0.21.0
github.com/prometheus/client_golang v1.7.1
github.com/prometheus/client_model v0.2.0
github.com/prometheus/common v0.10.0
github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da
github.com/prometheus/common v0.14.0
github.com/samuel/go-zookeeper v0.0.0-20200724154423-2164a8ac840e
github.com/shurcooL/httpfs v0.0.0-20190707220628-8d4bc4ba7749
github.com/shurcooL/vfsgen v0.0.0-20200627165143-92b8a710ab6c
github.com/soheilhy/cmux v0.1.4
github.com/uber/jaeger-client-go v2.24.0+incompatible
github.com/uber/jaeger-lib v2.2.0+incompatible
github.com/shurcooL/vfsgen v0.0.0-20200824052919-0d455de96546
github.com/uber/jaeger-client-go v2.25.0+incompatible
github.com/uber/jaeger-lib v2.4.0+incompatible
go.mongodb.org/mongo-driver v1.3.2 // indirect
go.uber.org/atomic v1.6.0 // indirect
go.uber.org/goleak v1.0.0
golang.org/x/net v0.0.0-20200707034311-ab3426394381
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d
golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208
golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae
go.uber.org/atomic v1.7.0
go.uber.org/goleak v1.1.10
golang.org/x/net v0.0.0-20201006153459-a7d1128ccaa0
golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43
golang.org/x/sync v0.0.0-20200930132711-30421366ff76
golang.org/x/sys v0.0.0-20201008064518-c1f3e3309c71
golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e
golang.org/x/tools v0.0.0-20200710042808-f1c4188a97a1
google.golang.org/api v0.29.0
google.golang.org/appengine v1.6.6 // indirect
google.golang.org/genproto v0.0.0-20200710124503-20a17af7bd0e
google.golang.org/grpc v1.29.1
golang.org/x/tools v0.0.0-20201008025239-9df69603baec
google.golang.org/api v0.32.0
gopkg.in/alecthomas/kingpin.v2 v2.2.6
gopkg.in/fsnotify/fsnotify.v1 v1.4.7
gopkg.in/yaml.v2 v2.3.0
gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776
gotest.tools v2.2.0+incompatible // indirect
k8s.io/api v0.18.5
k8s.io/apimachinery v0.18.5
k8s.io/client-go v0.18.5
k8s.io/api v0.19.2
k8s.io/apimachinery v0.19.2
k8s.io/client-go v0.19.2
k8s.io/klog v1.0.0
k8s.io/utils v0.0.0-20200414100711-2df71ebbae66 // indirect
k8s.io/klog/v2 v2.3.0
)
replace k8s.io/klog => github.com/simonpasquier/klog-gokit v0.1.0
replace (
k8s.io/klog => github.com/simonpasquier/klog-gokit v0.3.0
k8s.io/klog/v2 => github.com/simonpasquier/klog-gokit/v2 v2.0.1
)
exclude (
// Exclude grpc v1.30.0 because of breaking changes. See #7621.
github.com/grpc-ecosystem/grpc-gateway v1.14.7
google.golang.org/api v0.30.0
// Exclude pre-go-mod kubernetes tags, as they are older
// than v0.x releases but are picked when we update the dependencies.
k8s.io/client-go v1.4.0
k8s.io/client-go v1.4.0+incompatible
k8s.io/client-go v1.5.0
k8s.io/client-go v1.5.0+incompatible
k8s.io/client-go v1.5.1
k8s.io/client-go v1.5.1+incompatible
k8s.io/client-go v10.0.0+incompatible
k8s.io/client-go v11.0.0+incompatible
k8s.io/client-go v2.0.0+incompatible
k8s.io/client-go v2.0.0-alpha.1+incompatible
k8s.io/client-go v3.0.0+incompatible
k8s.io/client-go v3.0.0-beta.0+incompatible
k8s.io/client-go v4.0.0+incompatible
k8s.io/client-go v4.0.0-beta.0+incompatible
k8s.io/client-go v5.0.0+incompatible
k8s.io/client-go v5.0.1+incompatible
k8s.io/client-go v6.0.0+incompatible
k8s.io/client-go v7.0.0+incompatible
k8s.io/client-go v8.0.0+incompatible
k8s.io/client-go v9.0.0+incompatible
k8s.io/client-go v9.0.0-invalid+incompatible
)

288
go.sum
View file

@ -10,41 +10,55 @@ cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6T
cloud.google.com/go v0.51.0/go.mod h1:hWtGJ6gnXH+KgDv+V0zFGDvpi07n3z8ZNj3T1RW0Gcw=
cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4=
cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M=
cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc=
cloud.google.com/go v0.56.0 h1:WRz29PgAsVEyPSDHyk+0fpEkwEFyfhHn+JbksT6gIL4=
cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk=
cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs=
cloud.google.com/go v0.65.0 h1:Dg9iHVQfrhq82rUNu9ZxUDrJLaxFUe/HlCVaLyRruq8=
cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY=
cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE=
cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc=
cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg=
cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc=
cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ=
cloud.google.com/go/bigtable v1.2.0/go.mod h1:JcVAOl45lrTmQfLj7T6TxyMzIN/3FGGcFm+2xVAli2o=
cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk=
cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw=
cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA=
cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU=
cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw=
cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos=
cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk=
cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs=
cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0=
collectd.org v0.3.0/go.mod h1:A/8DzQBkF6abtvrT2j/AU/4tiBgJWYyh0y/oB/4MlWE=
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
github.com/Azure/azure-sdk-for-go v44.0.0+incompatible h1:e82Yv2HNpS0kuyeCrV29OPKvEiqfs2/uJHic3/3iKdg=
github.com/Azure/azure-sdk-for-go v44.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
github.com/Azure/azure-sdk-for-go v46.4.0+incompatible h1:fCN6Pi+tEiEwFa8RSmtVlFHRXEZ+DJm9gfx/MKqYWw4=
github.com/Azure/azure-sdk-for-go v46.4.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 h1:w+iIsaOQNcT7OZ575w+acHgRric5iCyQh+xv+KJ4HB8=
github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8=
github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs=
github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI=
github.com/Azure/go-autorest/autorest v0.11.2 h1:BR5GoSGobeiMwGOOIxXuvNKNPy+HMGdteKB8kJUDnBE=
github.com/Azure/go-autorest/autorest v0.11.2/go.mod h1:JFgpikqFJ/MleTTxwepExTKnFUKKszPS8UavbQYUMuw=
github.com/Azure/go-autorest/autorest v0.9.6/go.mod h1:/FALq9T/kS7b5J5qsQ+RSTUdAmGFqi0vUdVNNx8q630=
github.com/Azure/go-autorest/autorest v0.11.10 h1:j5sGbX7uj1ieYYkQ3Mpvewd4DCsEQ+ZeJpqnSM9pjnM=
github.com/Azure/go-autorest/autorest v0.11.10/go.mod h1:eipySxLmqSyC5s5k1CLupqet0PSENBEDP93LQ9a8QYw=
github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0=
github.com/Azure/go-autorest/autorest/adal v0.9.0 h1:SigMbuFNuKgc1xcGhaeapbh+8fgsu+GxgDRFyg7f5lM=
github.com/Azure/go-autorest/autorest/adal v0.9.0/go.mod h1:/c022QCutn2P7uY+/oQWWNcK9YU+MH96NgK+jErpbcg=
github.com/Azure/go-autorest/autorest/adal v0.8.2/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q=
github.com/Azure/go-autorest/autorest/adal v0.9.5 h1:Y3bBUV4rTuxenJJs41HU3qmqsb+auo+a3Lz+PlJPpL0=
github.com/Azure/go-autorest/autorest/adal v0.9.5/go.mod h1:B7KF7jKIeC9Mct5spmyCB/A8CG/sEz1vwIRGv/bbw7A=
github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA=
github.com/Azure/go-autorest/autorest/date v0.2.0/go.mod h1:vcORJHLJEh643/Ioh9+vPmf1Ij9AEBM5FuBIXLmIy0g=
github.com/Azure/go-autorest/autorest/date v0.3.0 h1:7gUk1U5M/CQbp9WoqinNzJar+8KY+LPI6wiWrP/myHw=
github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74=
github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0=
github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0=
github.com/Azure/go-autorest/autorest/mocks v0.4.0 h1:z20OWOSG5aCye0HEkDp6TPmP17ZcfeMxPi6HnSALa8c=
github.com/Azure/go-autorest/autorest/mocks v0.4.0/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k=
github.com/Azure/go-autorest/autorest/mocks v0.3.0/go.mod h1:a8FDP3DYzQ4RYfVAxAN3SVSiiO77gL2j2ronKKP0syM=
github.com/Azure/go-autorest/autorest/mocks v0.4.1 h1:K0laFcLE6VLTOwNgSxaGbUcLPuGXlNkbVvq4cW4nIHk=
github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k=
github.com/Azure/go-autorest/autorest/to v0.3.0 h1:zebkZaadz7+wIQYgC7GXaz3Wb28yKYfVkkBKwc38VF8=
github.com/Azure/go-autorest/autorest/to v0.3.0/go.mod h1:MgwOyqaIuKdG4TL/2ywSsIWKAfJfgHDo8ObuUk3t5sA=
github.com/Azure/go-autorest/autorest/validation v0.2.0 h1:15vMO4y76dehZSq7pAaOLQxC6dZYsSrj2GQpflyM/L4=
@ -61,6 +75,8 @@ github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
github.com/DATA-DOG/go-sqlmock v1.3.3/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM=
github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ=
github.com/HdrHistogram/hdrhistogram-go v0.9.0 h1:dpujRju0R4M/QZzcnR1LH1qm+TVG3UzkWdp5tH1WMcg=
github.com/HdrHistogram/hdrhistogram-go v0.9.0/go.mod h1:nxrse8/Tzg2tg3DZcZjm6qEclQKK70g0KxO61gFFZD4=
github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0=
github.com/Microsoft/go-winio v0.4.14 h1:+hMXMk01us9KgxGb7ftKQt2Xpf5hH/yky+TDA+qxleU=
github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA=
@ -97,6 +113,7 @@ github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmV
github.com/armon/go-metrics v0.3.3 h1:a9F4rlj7EWWrbj7BYw8J8+x+ZZkJeqzNyRk8hdPF+ro=
github.com/armon/go-metrics v0.3.3/go.mod h1:4O98XIr/9W0sxpJ8UaYkvjk10Iff7SnFrb4QAOwNTFc=
github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
github.com/aryann/difflib v0.0.0-20170710044230-e206f873d14a/go.mod h1:DAHtR1m6lCRdSC2Tm3DSWRPvIPr6xNKyeHdqDQSQT+A=
github.com/asaskevich/govalidator v0.0.0-20180720115003-f9ffefc3facf/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY=
github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY=
@ -104,8 +121,8 @@ github.com/asaskevich/govalidator v0.0.0-20200108200545-475eaeb16496 h1:zV3ejI06
github.com/asaskevich/govalidator v0.0.0-20200108200545-475eaeb16496/go.mod h1:oGkLhpf+kjZl6xBf758TQhh5XrAeiJv/7FRz/2spLIg=
github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQwij/eHl5CU=
github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
github.com/aws/aws-sdk-go v1.33.5 h1:p2fr1ryvNTU6avUWLI+/H7FGv0TBIjzVM5WDgXBBv4U=
github.com/aws/aws-sdk-go v1.33.5/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0=
github.com/aws/aws-sdk-go v1.35.5 h1:doSEOxC0UkirPcle20Rc+1kAhJ4Ip+GSEeZ3nKl7Qlk=
github.com/aws/aws-sdk-go v1.35.5/go.mod h1:tlPOdRjfxPBpNIwqDj61rmsnA85v9jc0Ps9+muhnW+k=
github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
@ -149,10 +166,10 @@ github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs
github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM=
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
github.com/dgryski/go-bitstream v0.0.0-20180413035011-3522498ce2c8/go.mod h1:VMaSuZ+SZcx/wljOQKvp5srsbCiKDEb6K2wC4+PiBmQ=
github.com/dgryski/go-sip13 v0.0.0-20190329191031-25c5027a8c7b h1:Yqiad0+sloMPdd/0Fg22actpFx0dekpzt1xJmVNVkU0=
github.com/dgryski/go-sip13 v0.0.0-20190329191031-25c5027a8c7b/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
github.com/digitalocean/godo v1.38.0 h1:to+pLe5RJqflJiyxhaLJfJgT3YzwHRSg19mOWkKt6A0=
github.com/digitalocean/godo v1.38.0/go.mod h1:p7dOjjtSBqCTUksqtA5Fd3uaKs9kyTq2xcz76ulEJRU=
github.com/dgryski/go-sip13 v0.0.0-20200911182023-62edffca9245 h1:9cOfvEwjQxdwKuNDTQSaMKNRvwKwgZG+U4HrjeRKHso=
github.com/dgryski/go-sip13 v0.0.0-20200911182023-62edffca9245/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
github.com/digitalocean/godo v1.46.0 h1:WRbwjATilgz2NE4NGMeSDpeicy9h4xSKNGuRJ/Nq/fA=
github.com/digitalocean/godo v1.46.0/go.mod h1:p7dOjjtSBqCTUksqtA5Fd3uaKs9kyTq2xcz76ulEJRU=
github.com/docker/distribution v2.7.1+incompatible h1:a5mlkVzth6W5A4fOsS3D2EO5BUmsJpcB+cRlLU7cSug=
github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
github.com/docker/docker v17.12.0-ce-rc1.0.20200706150819-a40b877fbb9e+incompatible h1:+mzU0jHyjWpYHiD0StRlsVXkCvecWS2hc55M3OlUJSk=
@ -178,16 +195,20 @@ github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymF
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
github.com/evanphx/json-patch v4.2.0+incompatible h1:fUDGZCv/7iAN7u0puUVhvKCcsR6vRfwrJatElLBEf0I=
github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
github.com/evanphx/json-patch v4.9.0+incompatible h1:kLcOMZeuLAJvL2BPWLMIj5oaZQobrkAqrL+WFZwQses=
github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
github.com/fatih/color v1.9.0 h1:8xPHl4/q1VyqGIPif1F+1V3Y3lSmrq01EabUW3CoW5s=
github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU=
github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k=
github.com/form3tech-oss/jwt-go v3.2.2+incompatible h1:TcekIExNqud5crz4xD2pavyTgWiPvpYe4Xau31I0PRk=
github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k=
github.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVBjqR7JHJk0brhHOZYGmfBYOrK0ZhYMEtBr4=
github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8/go.mod h1:ZhphrRTfi2rbfLwlschooIH4+wKKDR4Pdxhh+TRoA20=
github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4=
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk=
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
@ -206,8 +227,6 @@ github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
github.com/go-logfmt/logfmt v0.5.0 h1:TrB8swr/68K7m9CcGut2g3UOihhbcbiMAYiuTXdEih4=
github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A=
github.com/go-logr/logr v0.1.0 h1:M1Tv3VzNlEHg6uyACnRdtrploV2P7wZqH8BoQMtz0cg=
github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas=
github.com/go-openapi/analysis v0.0.0-20180825180245-b006789cd277/go.mod h1:k70tL6pCuVxPJOHXQ+wIac1FUrvNkHolPie/cLEU6hI=
github.com/go-openapi/analysis v0.17.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik=
github.com/go-openapi/analysis v0.18.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik=
@ -278,7 +297,6 @@ github.com/go-openapi/validate v0.19.8 h1:YFzsdWIDfVuLvIOF+ZmKjVg1MbPJ1QgY9PihMw
github.com/go-openapi/validate v0.19.8/go.mod h1:8DJv2CVJQ6kGNpFW6eV9N3JviE1C85nY1c2z52x1Gk4=
github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg=
github.com/go-stack/stack v1.8.0 h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk=
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
github.com/gobuffalo/attrs v0.0.0-20190224210810-a9411de4debd/go.mod h1:4duuawTqi2wkkpB4ePgWMaai6/Kc6WEz83bhFwpHzj0=
@ -305,6 +323,7 @@ github.com/gobuffalo/packd v0.1.0/go.mod h1:M2Juc+hhDXf/PnmBANFCqx4DM3wRbgDvnVWe
github.com/gobuffalo/packr/v2 v2.0.9/go.mod h1:emmyGweYTm6Kdper+iywB6YK5YzuKchGtJQZ0Odn4pQ=
github.com/gobuffalo/packr/v2 v2.2.0/go.mod h1:CaAwI0GPIAv+5wKLtv8Afwl+Cm78K/I/VCm/3ptBN+0=
github.com/gobuffalo/syncx v0.0.0-20190224160051-33c29581e754/go.mod h1:HhnNqWY95UYwwW3uSASeV7vtgYkT2t16hJgV3AEPUpw=
github.com/gofrs/uuid v3.3.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM=
github.com/gogo/googleapis v1.1.0/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s=
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
@ -324,12 +343,14 @@ github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfb
github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y=
github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
github.com/golang/protobuf v0.0.0-20161109072736-4bd1920723d7/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk=
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
@ -342,6 +363,8 @@ github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw
github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/golang/snappy v0.0.1 h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4=
github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/golang/snappy v0.0.2 h1:aeE13tS0IiQgFjYdoL8qN3K1N2bXXtI6Vi51/y7BpMw=
github.com/golang/snappy v0.0.2/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/btree v1.0.0 h1:0udJVsspx3VBr5FwtLhQQtuAsVc79tTq0ocGIPAU6qo=
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
@ -351,19 +374,29 @@ github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMyw
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4=
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.0 h1:/QaMHBdZ26BB3SSst0Iwl10Epc+xhTquomWX0oZEB6w=
github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.2 h1:X2ev0eStA3AbceY54o37/0PQ/UWqKEiiO2dKL5OPaFM=
github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-querystring v1.0.0 h1:Xkwi/a1rcvNg1PPYe5vI8GbeBY/jrVuDX5ASuANWTrk=
github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g=
github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99 h1:Ak8CrdlwwXwAZxzS66vgPt4U8yUZX7JwLvVR58FN5jM=
github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
github.com/google/pprof v0.0.0-20201007051231-1066cbb265c7 h1:qYWTuM6SUNWgtvkhV8oH6GFHCpU+rKQOxPcepM3xKi0=
github.com/google/pprof v0.0.0-20201007051231-1066cbb265c7/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY=
@ -371,31 +404,29 @@ github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
github.com/googleapis/gax-go/v2 v2.0.5 h1:sjZBwGj9Jlw33ImPtvFviGYvseOtDM7hkSKB7+Tv3SM=
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY=
github.com/googleapis/gnostic v0.1.0/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY=
github.com/googleapis/gnostic v0.4.0 h1:BXDUo8p/DaxC+4FJY/SSx3gvnx9C1VdHNgaUkiEL5mk=
github.com/googleapis/gnostic v0.4.0/go.mod h1:on+2t9HRStVgn95RSsFWFz+6Q0Snyqv1awfrALZdbtU=
github.com/gophercloud/gophercloud v0.1.0/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8=
github.com/gophercloud/gophercloud v0.12.0 h1:mZrie07npp6ODiwHZolTicr5jV8Ogn43AvAsSMm6Ork=
github.com/gophercloud/gophercloud v0.12.0/go.mod h1:gmC5oQqMDOMO1t1gq5DquX/yAU808e/4mzjjDA76+Ss=
github.com/googleapis/gnostic v0.4.1 h1:DLJCy1n/vrD4HPjOvYcT8aYQXpPIzoRZONaYwyycI+I=
github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg=
github.com/gophercloud/gophercloud v0.13.0 h1:1XkslZZRm6Ks0bLup+hBNth+KQf+0JA1UeoB7YKw9E8=
github.com/gophercloud/gophercloud v0.13.0/go.mod h1:VX0Ibx85B60B5XOrZr6kaNwrmPUzcmMpwxvQ1WQIIWM=
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg=
github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
github.com/gorilla/mux v1.7.3 h1:gnP5JzjVOuiZD07fKKToCAOjS0yOpj/qPETTXCCS6hw=
github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7 h1:pdN6V1QBWetyv/0+wjACpqVH+eVULgEjkurDLq3goeM=
github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
github.com/grpc-ecosystem/grpc-gateway v1.14.6 h1:8ERzHx8aj1Sc47mu9n/AksaKCSWrMchFtkdrS4BIj5o=
github.com/grpc-ecosystem/grpc-gateway v1.14.6/go.mod h1:zdiPV4Yse/1gnckTHtghG4GkDEdKCRJduHpTxT3/jcw=
github.com/grpc-ecosystem/grpc-gateway v1.15.0 h1:ntPNC9TD/6l2XDenJZe6T5lSMg95thpV9sGAqHX4WU8=
github.com/grpc-ecosystem/grpc-gateway v1.15.0/go.mod h1:vO11I9oWA+KsxmfFQPhLnnIb1VDE24M+pdxZFiuZcA8=
github.com/hashicorp/consul/api v1.3.0/go.mod h1:MmDNSzIMUjNpY/mQ398R4bk2FnqQLoPndWW5VkKPlCE=
github.com/hashicorp/consul/api v1.5.0 h1:Yo2bneoGy68A7aNwmuETFnPhjyBEm7n3vzRacEVMjvI=
github.com/hashicorp/consul/api v1.5.0/go.mod h1:LqwrLNW876eYSuUOo4ZLHBcdKc038txr/IMfbLPATa4=
github.com/hashicorp/consul/api v1.7.0 h1:tGs8Oep67r8CcA2Ycmb/8BLBcJ70St44mF2X10a/qPg=
github.com/hashicorp/consul/api v1.7.0/go.mod h1:1NSuaUUkFaJzMasbfq/11wKYWSR67Xn6r2DXKhuDNFg=
github.com/hashicorp/consul/sdk v0.3.0/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8=
github.com/hashicorp/consul/sdk v0.5.0 h1:WC4594Wp/LkEeML/OdQKEC1yqBmEYkRp6i7X5u0zDAs=
github.com/hashicorp/consul/sdk v0.5.0/go.mod h1:fY08Y9z5SvJqevyZNy6WWPXiG3KwBPAvlcdx16zZ0fM=
github.com/hashicorp/consul/sdk v0.6.0 h1:FfhMEkwvQl57CildXJyGHnwGGM4HMODGyfjGwNM1Vdw=
github.com/hashicorp/consul/sdk v0.6.0/go.mod h1:fY08Y9z5SvJqevyZNy6WWPXiG3KwBPAvlcdx16zZ0fM=
github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA=
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
@ -411,6 +442,8 @@ github.com/hashicorp/go-msgpack v0.5.3 h1:zKjpN5BK/P5lMYrLmBHdBULWbJ0XpYR+7NGzqk
github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM=
github.com/hashicorp/go-multierror v1.0.0 h1:iVjPR7a6H0tWELX5NxNe7bYopibicUzc7uPribsnS6o=
github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk=
github.com/hashicorp/go-multierror v1.1.0 h1:B9UzwGQJehnUY1yNrnwREHc3fGbC2xefo8g4TbElacI=
github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA=
github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs=
github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU=
github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc=
@ -432,13 +465,13 @@ github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO
github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ=
github.com/hashicorp/mdns v1.0.1/go.mod h1:4gW7WsVCke5TE7EPeYliwHlRUyBtfCwuFwuMg2DmyNY=
github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I=
github.com/hashicorp/memberlist v0.2.0 h1:WeeNspppWi5s1OFefTviPQueC/Bq8dONfvNjPhiEQKE=
github.com/hashicorp/memberlist v0.2.0/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE=
github.com/hashicorp/memberlist v0.2.2 h1:5+RffWKwqJ71YPu9mWsF7ZOscZmwfasdA8kbdC7AO2g=
github.com/hashicorp/memberlist v0.2.2/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE=
github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc=
github.com/hashicorp/serf v0.9.0 h1:+Zd/16AJ9lxk9RzfTDyv/TLhZ8UerqYS0/+JGCIDaa0=
github.com/hashicorp/serf v0.9.0/go.mod h1:YL0HO+FifKOW2u1ke99DGVu1zhcpZzNwrLIqBC7vbYU=
github.com/hashicorp/serf v0.9.3 h1:AVF6JDQQens6nMHT9OGERBvK0f8rPrAGILnsKLr6lzM=
github.com/hashicorp/serf v0.9.3/go.mod h1:UWDWwZeL5cuWDJdl0C6wrvrUwEqtQ4ZKBKKENpqIUyk=
github.com/hetznercloud/hcloud-go v1.22.0 h1:CC0jwkaBzwP4ObFE0sdJBTvGh5DE9kB/tuDETnRfOik=
github.com/hetznercloud/hcloud-go v1.22.0/go.mod h1:xng8lbDUg+xM1dgc0yGHX5EeqbwIq7UYlMWMTx3SQVg=
github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg=
@ -446,11 +479,11 @@ github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6 h1:UDMh68U
github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
github.com/influxdata/flux v0.65.0/go.mod h1:BwN2XG2lMszOoquQaFdPET8FRQfrXiZsWmcMO9rkaVY=
github.com/influxdata/influxdb v1.8.1 h1:kzu28jHvuG3ZF6UUmQCtgHpsKlYPogzaEDnb88q23H0=
github.com/influxdata/influxdb v1.8.1/go.mod h1:SIzcnsjaHRFpmlxpJ4S3NT64qtEKYweNTUMb/vh0OMQ=
github.com/influxdata/flux v0.65.1/go.mod h1:J754/zds0vvpfwuq7Gc2wRdVwEodfpCFM7mYlOw2LqY=
github.com/influxdata/influxdb v1.8.3 h1:WEypI1BQFTT4teLM+1qkEcvUi0dAvopAI/ir0vAiBg8=
github.com/influxdata/influxdb v1.8.3/go.mod h1:JugdFhsvvI8gadxOI6noqNeeBHvWNTbfYGtiAn+2jhI=
github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo=
github.com/influxdata/influxql v1.1.0/go.mod h1:KpVI7okXjK6PRi3Z5B+mtKZli+R1DnZgb3N+tzevNgo=
github.com/influxdata/influxql v1.1.1-0.20200828144457-65d3ef77d385/go.mod h1:gHp9y86a/pxhjJ+zMjNXiQAA197Xk9wLxaz+fGG+kWk=
github.com/influxdata/line-protocol v0.0.0-20180522152040-32c6aa80de5e/go.mod h1:4kt73NQhadE3daL3WhR5EJ/J2ocX0PZzwxQ0gXJ7oFE=
github.com/influxdata/promql/v2 v2.12.0/go.mod h1:fxOPu+DY0bqCTCECchSRtWfc+0X19ybifQhZoQNF5D8=
github.com/influxdata/roaring v0.4.13-0.20180809181101-fc520f41fab6/go.mod h1:bSgUQ7q5ZLSO+bKBGqJiCBGAl+9DxyW63zLTujjUlOE=
@ -458,8 +491,10 @@ github.com/influxdata/tdigest v0.0.0-20181121200506-bf2b5ad3c0a9/go.mod h1:Js0mq
github.com/influxdata/usage-client v0.0.0-20160829180054-6d3895376368/go.mod h1:Wbbw6tYNvwa5dlB6304Sd+82Z3f7PmVZHVKU637d4po=
github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
github.com/jmespath/go-jmespath v0.3.0 h1:OS12ieG61fsCg5+qLJ+SsW9NicxNkg3b25OyT2yCeUc=
github.com/jmespath/go-jmespath v0.3.0/go.mod h1:9QtRXoHjLGCJ5IBSaohpXITPlowMeeYCZ7fLUTSywik=
github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg=
github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=
github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8=
github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U=
github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg=
github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
github.com/jpillora/backoff v1.0.0 h1:uvFg412JmmHBHw7iwprIxkPMI+sGQ4kzOWsMeHnm2EA=
@ -492,10 +527,14 @@ github.com/klauspost/pgzip v1.0.2-0.20170402124221-0bf5dcad4ada/go.mod h1:Ch1tH6
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/konsorten/go-windows-terminal-sequences v1.0.2 h1:DB17ag19krx9CFsz4o3enTrPXyIXCl+2iCXH/aMAp9s=
github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/konsorten/go-windows-terminal-sequences v1.0.3 h1:CE8S1cTafDpPvMhIxNJKvHsGVBgn1xWYf1NbHQhywc8=
github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pretty v0.2.0 h1:s5hAObm+yFO5uHYt5dYjxi2rXrsnmRpJx4OYvIWUaQs=
github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI=
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA=
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
@ -534,9 +573,10 @@ github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0j
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg=
github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso=
github.com/miekg/dns v1.1.30 h1:Qww6FseFn8PRfw07jueqIXqodm0JKiiKuK0DeXSqfyo=
github.com/miekg/dns v1.1.30/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7xM=
github.com/miekg/dns v1.1.31 h1:sJFOl9BgwbYAWOGEwr61FU28pqsBNdpRBnhGXtO06Oo=
github.com/miekg/dns v1.1.31/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7xM=
github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc=
github.com/mitchellh/cli v1.1.0/go.mod h1:xcISNoH86gajksDmfB23e/pu+B+GeFRMYmoHXxx3xhI=
github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
@ -614,6 +654,7 @@ github.com/paulbellamy/ratecounter v0.2.0/go.mod h1:Hfx1hDpSGoqxkVVpBi/IlYD7kChl
github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k=
github.com/pelletier/go-toml v1.4.0/go.mod h1:PN7xzY2wHTK0K9p34ErDQMlFxa51Fk0OUruD3k1mMwo=
github.com/performancecopilot/speed v3.0.0+incompatible/go.mod h1:/CLtqpZ5gBg1M9iaPbIdPPGyKcA8hKdoy6hAWba7Yac=
github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI=
github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU=
github.com/peterh/liner v1.0.1-0.20180619022028-8c1271fcf47f/go.mod h1:xIteQHvHuaLYG9IFj6mSxM0fCKrs34IrEQUhOYuGPHc=
github.com/philhofer/fwd v1.0.0/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG3ZVNU=
@ -628,6 +669,7 @@ github.com/pkg/term v0.0.0-20180730021639-bffc007b7fd5/go.mod h1:eCbImbZ95eXtAUI
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI=
github.com/posener/complete v1.2.3/go.mod h1:WZIdtGGp+qx0sLrYKtIRAruyNpv6hFCicSgv7Sy7s/s=
github.com/prometheus/alertmanager v0.21.0 h1:qK51JcUR9l/unhawGA9F9B64OCYfcGewhPNprem/Acc=
github.com/prometheus/alertmanager v0.21.0/go.mod h1:h7tJ81NA0VLWvWEayi1QltevFkLF3KxmC/malTcT8Go=
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
@ -652,6 +694,8 @@ github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt2
github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4=
github.com/prometheus/common v0.10.0 h1:RyRA7RzGXQZiW+tGMr7sxa85G1z0yOpM1qq5c8lNawc=
github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
github.com/prometheus/common v0.14.0 h1:RHRyE8UocrbjU+6UvRzwi6HjiDfxrrBU91TtbKzkGp4=
github.com/prometheus/common v0.14.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16Clt/msog/s=
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
@ -672,8 +716,9 @@ github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb
github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da h1:p3Vo3i64TCLY7gIfzeQaUJ+kppEO5WQG3cL8iE8tGHU=
github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E=
github.com/samuel/go-zookeeper v0.0.0-20200724154423-2164a8ac840e h1:CGjiMQ0wMH4wtNWrlj6kiTbkPt2F3rbYnhGX6TWLfco=
github.com/samuel/go-zookeeper v0.0.0-20200724154423-2164a8ac840e/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E=
github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0=
github.com/satori/go.uuid v1.2.1-0.20181028125025-b2ce2384e17b/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0=
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I=
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
github.com/segmentio/kafka-go v0.1.0/go.mod h1:X6itGqS9L4jDletMsxZ7Dz+JFWxM6JHfPOCvTvk+EJo=
@ -684,15 +729,19 @@ github.com/shurcooL/httpfs v0.0.0-20190707220628-8d4bc4ba7749/go.mod h1:ZY1cvUeJ
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
github.com/shurcooL/vfsgen v0.0.0-20181202132449-6a9ea43bcacd h1:ug7PpSOB5RBPK1Kg6qskGBoP3Vnj/aNYFTznWvlkGo0=
github.com/shurcooL/vfsgen v0.0.0-20181202132449-6a9ea43bcacd/go.mod h1:TrYk7fJVaAttu97ZZKrO9UbRa8izdowaMIZcxYMbVaw=
github.com/shurcooL/vfsgen v0.0.0-20200627165143-92b8a710ab6c h1:XLPw6rny9Vrrvrzhw8pNLrC2+x/kH0a/3gOx5xWDa6Y=
github.com/shurcooL/vfsgen v0.0.0-20200627165143-92b8a710ab6c/go.mod h1:TrYk7fJVaAttu97ZZKrO9UbRa8izdowaMIZcxYMbVaw=
github.com/simonpasquier/klog-gokit v0.1.0 h1:l3GGzgwlUF4vC1ApCOEsMsV+6nJPM01VoVCUCZgOIUw=
github.com/simonpasquier/klog-gokit v0.1.0/go.mod h1:4lorAA0CyDox4KO34BrvNAJk8J2Ma/M9Q2BDkR38vSI=
github.com/shurcooL/vfsgen v0.0.0-20200824052919-0d455de96546 h1:pXY9qYc/MP5zdvqWEUH6SjNiu7VhSjuVFTFiTcphaLU=
github.com/shurcooL/vfsgen v0.0.0-20200824052919-0d455de96546/go.mod h1:TrYk7fJVaAttu97ZZKrO9UbRa8izdowaMIZcxYMbVaw=
github.com/simonpasquier/klog-gokit v0.3.0 h1:TkFK21cbwDRS+CiystjqbAiq5ubJcVTk9hLUck5Ntcs=
github.com/simonpasquier/klog-gokit v0.3.0/go.mod h1:+SUlDQNrhVtGt2FieaqNftzzk8P72zpWlACateWxA9k=
github.com/simonpasquier/klog-gokit/v2 v2.0.1 h1:v7vrNd8wve5mHjX6R7kKUfR/ebJJ/LUi06NveGAvdcU=
github.com/simonpasquier/klog-gokit/v2 v2.0.1/go.mod h1:VgeTFrwzYYcMH8edEfh3/ai2j/Yg8c/qIm1bkGkhuJg=
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
github.com/sirupsen/logrus v1.4.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q=
github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4=
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
github.com/sirupsen/logrus v1.6.0 h1:UBcNElsrwanuuMsnGSlYmtmgbb23qDR5dG+6X6Oo89I=
github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
github.com/soheilhy/cmux v0.1.4 h1:0HKaf1o97UwFjHH9o5XsHUOF+tqmdA7KEzXLpiyaw0E=
@ -713,6 +762,7 @@ github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271/go.mod h1:AZpEONHx3
github.com/streadway/handy v0.0.0-20190108123426-d5acb3125c2a/go.mod h1:qNTQ5P5JnDBl6z3cMAg/SywNDC5ABu5ApDIw6lUbRmI=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.2.0 h1:Hbg2NidpLE8veEBkEZTL3CvlkUIVzuU9jDplZO54c48=
github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE=
github.com/stretchr/testify v1.2.0/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
@ -725,10 +775,10 @@ github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhV
github.com/tinylib/msgp v1.0.2/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE=
github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM=
github.com/uber/jaeger-client-go v2.24.0+incompatible h1:CGchgJcHsDd2jWnaL4XngByMrXoGHh3n8oCqAKx0uMo=
github.com/uber/jaeger-client-go v2.24.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk=
github.com/uber/jaeger-lib v2.2.0+incompatible h1:MxZXOiR2JuoANZ3J6DE/U0kSFv/eJ/GfSYVCjK7dyaw=
github.com/uber/jaeger-lib v2.2.0+incompatible/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U=
github.com/uber/jaeger-client-go v2.25.0+incompatible h1:IxcNZ7WRY1Y3G4poYlx24szfsn/3LvK9QHCq9oQw8+U=
github.com/uber/jaeger-client-go v2.25.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk=
github.com/uber/jaeger-lib v2.4.0+incompatible h1:fY7QsGQWiCt8pajv4r7JEvmATdCVaWxXbjwyYwsNaLQ=
github.com/uber/jaeger-lib v2.4.0+incompatible/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U=
github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
github.com/vektah/gqlparser v1.1.2/go.mod h1:1ycwN7Ij5njmMkPPAOaRFY4rET2Enx7IkVv3vaXspKw=
@ -741,6 +791,7 @@ github.com/xlab/treeprint v1.0.0/go.mod h1:IoImgRak9i3zJyuxOKUP1v4UZd1tMoKkq/Cim
github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg=
go.mongodb.org/mongo-driver v1.0.3/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM=
@ -756,12 +807,14 @@ go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.22.3 h1:8sGtKOrtQqkN1bp2AtX+misvLIlOmsEsNd+9NIcPEm8=
go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.22.4 h1:LYy1Hy3MJdrCdMwwzxA/dRok4ejH+RwNGbuoD9fCjto=
go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
go.uber.org/atomic v1.6.0 h1:Ezj3JGmsOnG1MoRWQkPBsKLe9DwWD9QeXzTRzzldNVk=
go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
go.uber.org/goleak v1.0.0 h1:qsup4IcBdlmsnGfqyLl4Ntn3C2XCCuKAE7DwHpScyUo=
go.uber.org/goleak v1.0.0/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A=
go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw=
go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
go.uber.org/goleak v1.1.10 h1:z+mqJhf6ss6BSfSM671tgKyZBFPTTJM+HLxnhPC3wu0=
go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A=
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4=
go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA=
@ -770,7 +823,6 @@ go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM=
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20190320223903-b7391e95e576/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20190422162423-af44ce270edf/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE=
@ -783,9 +835,11 @@ golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8U
golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20191202143827-86a70503ff7e/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20200220183623-bac4c82f6975/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 h1:psW17arqaxU48Z5kZ0CQnkZWQJsqcURM6tKiBApRjXI=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0 h1:hb9wdF1z5waM+dSIICn1l0DkLVDT3hqhhQsDNUmHPRE=
golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
@ -823,7 +877,6 @@ golang.org/x/mod v0.2.0 h1:KU7oHjnv3XNWfa5COkzUifxZmxp1TyI7ImMXqFxLwvQ=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0 h1:RM4zey1++hCTbCVQfnWeKs9/IEsaBLA8vTkd0WVtmH4=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
@ -843,29 +896,37 @@ golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn
golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20191002035440-2ec189313ef0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20191126235420-ef20fe5d7933/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.0.0-20200707034311-ab3426394381 h1:VXak5I6aEWmAXeQjA+QSZzlgNrpq9mjcfDemuexIKsU=
golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.0.0-20200822124328-c89045814202 h1:VvcQYSHwXgi7W+TpUR6A9g6Up98WAHf3f/ulnJ62IyA=
golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.0.0-20201006153459-a7d1128ccaa0 h1:wBouT66WTYFXdxfVdz9sVWARVd/2vfGcmI45D2gj45M=
golang.org/x/net v0.0.0-20201006153459-a7d1128ccaa0/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d h1:TzXSXBo42m9gQenoE3b9BGiEpg5IG2JkU5FkPIawgtw=
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43 h1:ld7aEMNHoBnnDAX15v1T6z31v8HwR2A9FYOuAhWqkwc=
golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
@ -877,7 +938,8 @@ golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a h1:WXEvlFVvvGxCJLG6REjsT03i
golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208 h1:qwRHBd0NqMbJxfbotnDhm2ByMI1Shq4Y6oRJo21SGJA=
golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20170830134202-bb24a47a89ea/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sync v0.0.0-20200930132711-30421366ff76 h1:JnxiSYT3Nm0BT2a8CyvYyM6cnrWpidecD1UuSYbhKm0=
golang.org/x/sync v0.0.0-20200930132711-30421366ff76/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@ -886,7 +948,6 @@ golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5h
golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190209173611-3b5209105503/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@ -906,8 +967,8 @@ golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20190922100055-0a153f010e69/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191022100944-742c48ecaeb7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191128015809-6d18c012aee9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@ -922,19 +983,30 @@ golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200420163511-1957bb5e6d1f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1 h1:ogLJMz+qpzav7lGMh10LMvAkM/fAoGlaiiHYiFYdm80=
golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae h1:Ih9Yo4hSPImZOpfGuA4bR/ORKTAbhZo2AbWNRCnevdo=
golang.org/x/sys v0.0.0-20200622214017-ed371f2e16b4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/sys v0.0.0-20200828194041-157a740278f4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201008064518-c1f3e3309c71 h1:ZPX6UakxrJCxWiyGWpXtFY+fp86Esy7xJT/jJCG8bgU=
golang.org/x/sys v0.0.0-20201008064518-c1f3e3309c71/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
@ -979,7 +1051,6 @@ golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtn
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191203134012-c197fd4bf371/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191216052735-49a3e744a425/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
@ -992,14 +1063,27 @@ golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapK
golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8=
golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20200513201620-d5fe73897c97 h1:DAuln/hGp+aJiHpID1Y1hYzMEPP5WLwtZHPb50mN0OE=
golang.org/x/tools v0.0.0-20200513201620-d5fe73897c97/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20200710042808-f1c4188a97a1 h1:rD1FcWVsRaMY+l8biE9jbWP5MS/CJJ/90a9TMkMgNrM=
golang.org/x/tools v0.0.0-20200710042808-f1c4188a97a1/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
golang.org/x/tools v0.0.0-20200828161849-5deb26317202/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE=
golang.org/x/tools v0.0.0-20201008025239-9df69603baec h1:RY2OghEV/7X1MLaecgm1mwFd3sGvUddm5pGVSxQvX0c=
golang.org/x/tools v0.0.0-20201008025239-9df69603baec/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
gonum.org/v1/gonum v0.0.0-20180816165407-929014505bf4/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo=
gonum.org/v1/gonum v0.0.0-20181121035319-3f7ecaa7e8ca/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo=
gonum.org/v1/gonum v0.6.0/go.mod h1:9mxDZsDKxgMAuccQkewq682L+0eCu4dCN2yonUJTCLU=
@ -1016,9 +1100,14 @@ google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsb
google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
google.golang.org/api v0.29.0 h1:BaiDisFir8O4IJxvAabCGGkQ6yCJegNQqSVoYUNAnbk=
google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM=
google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
google.golang.org/api v0.31.0/go.mod h1:CL+9IBCa2WWU6gRuBWaKqGWLFFwbEUXkfeMkHLQWYWo=
google.golang.org/api v0.32.0 h1:Le77IccnTqEa8ryp9wIpX5W3zYm7Gf9LhOp9PHcwFts=
google.golang.org/api v0.32.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
@ -1047,11 +1136,21 @@ google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvx
google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA=
google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884 h1:fiNLklpBwWK1mth30Hlwk+fcdBmIALlgF5iy77O37Ig=
google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U=
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
google.golang.org/genproto v0.0.0-20200710124503-20a17af7bd0e h1:k+p/u26/lVeNEpdxSeUrm7rTvoFckBKaf7gTzgmHyDA=
google.golang.org/genproto v0.0.0-20200710124503-20a17af7bd0e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA=
google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20200831141814-d751682dd103/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d h1:92D1fum1bJLKSdr11OJ+54YeCMCGYIygTA7R/YZxH5M=
google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM=
@ -1068,12 +1167,17 @@ google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8
google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60=
google.golang.org/grpc v1.29.1 h1:EC2SB8S04d2r73uptxphDSUG+kTKVgjRPF+N3xpxRB4=
google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk=
google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
google.golang.org/grpc v1.32.0 h1:zWTV+LMdc3kaiJMSTOFz2UgSBgx8RNQoTGiZu3fR9S0=
google.golang.org/grpc v1.32.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.23.0 h1:4MY060fB1DLGMB/7MBTLnwQUY6+F09GEiz6SsrNqyzM=
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.24.0 h1:UhZDfRO8JRQru4/+LlLE0BRKGF8L+PICnvYZmx/fEGA=
@ -1103,7 +1207,6 @@ gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.7/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU=
gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
@ -1118,27 +1221,24 @@ honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWh
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
k8s.io/api v0.18.5 h1:fKbCxr+U3fu7k6jB+QeYPD/c6xKYeSJ2KVWmyUypuWM=
k8s.io/api v0.18.5/go.mod h1:tN+e/2nbdGKOAH55NMV8oGrMG+3uRlA9GaRfvnCCSNk=
k8s.io/apimachinery v0.18.5 h1:Lh6tgsM9FMkC12K5T5QjRm7rDs6aQN5JHkA0JomULDM=
k8s.io/apimachinery v0.18.5/go.mod h1:OaXp26zu/5J7p0f92ASynJa1pZo06YlV9fG7BoWbCko=
k8s.io/client-go v0.18.5 h1:cLhGZdOmyPhwtt20Lrb7uAqxxB1uvY+NTmNJvno1oKA=
k8s.io/client-go v0.18.5/go.mod h1:EsiD+7Fx+bRckKWZXnAXRKKetm1WuzPagH4iOSC8x58=
k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
k8s.io/klog/v2 v2.0.0 h1:Foj74zO6RbjjP4hBEKjnYtjjAhGg4jNynUdYF6fJrok=
k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE=
k8s.io/kube-openapi v0.0.0-20200410145947-61e04a5be9a6 h1:Oh3Mzx5pJ+yIumsAD0MOECPVeXsVot0UkiaCGVyfGQY=
k8s.io/kube-openapi v0.0.0-20200410145947-61e04a5be9a6/go.mod h1:GRQhZsXIAJ1xR0C9bd8UpWHZ5plfAS9fzPjJuQ6JL3E=
k8s.io/utils v0.0.0-20200324210504-a9aa75ae1b89/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew=
k8s.io/utils v0.0.0-20200414100711-2df71ebbae66 h1:Ly1Oxdu5p5ZFmiVT71LFgeZETvMfZ1iBIGeOenT2JeM=
k8s.io/utils v0.0.0-20200414100711-2df71ebbae66/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
k8s.io/api v0.19.2 h1:q+/krnHWKsL7OBZg/rxnycsl9569Pud76UJ77MvKXms=
k8s.io/api v0.19.2/go.mod h1:IQpK0zFQ1xc5iNIQPqzgoOwuFugaYHK4iCknlAQP9nI=
k8s.io/apimachinery v0.19.2 h1:5Gy9vQpAGTKHPVOh5c4plE274X8D/6cuEiTO2zve7tc=
k8s.io/apimachinery v0.19.2/go.mod h1:DnPGDnARWFvYa3pMHgSxtbZb7gpzzAZ1pTfaUNDVlmA=
k8s.io/client-go v0.19.2 h1:gMJuU3xJZs86L1oQ99R4EViAADUPMHHtS9jFshasHSc=
k8s.io/client-go v0.19.2/go.mod h1:S5wPhCqyDNAlzM9CnEdgTGV4OqhsW3jGO1UM1epwfJA=
k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
k8s.io/kube-openapi v0.0.0-20200805222855-6aeccd4b50c6 h1:+WnxoVtG8TMiudHBSEtrVL1egv36TkkJm+bA8AxicmQ=
k8s.io/kube-openapi v0.0.0-20200805222855-6aeccd4b50c6/go.mod h1:UuqjUnNftUyPE5H64/qeyjQoUZhGpeFDVdxjTeEVN2o=
k8s.io/utils v0.0.0-20200729134348-d5654de09c73 h1:uJmqzgNWG7XyClnU/mLPBWwfKKF1K8Hf8whTseBgJcg=
k8s.io/utils v0.0.0-20200729134348-d5654de09c73/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4=
rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
sigs.k8s.io/structured-merge-diff/v3 v3.0.0-20200116222232-67a7b8c61874/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw=
sigs.k8s.io/structured-merge-diff/v3 v3.0.0 h1:dOmIZBMfhcHS09XZkMyUgkq5trg3/jRyJYFZUiaOp8E=
sigs.k8s.io/structured-merge-diff/v3 v3.0.0/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw=
sigs.k8s.io/structured-merge-diff/v4 v4.0.1 h1:YXTMot5Qz/X1iBRJhAt+vI+HVttY0WkSqqhKxQ0xVbA=
sigs.k8s.io/structured-merge-diff/v4 v4.0.1/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw=
sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o=
sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q=
sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc=

Some files were not shown because too many files have changed in this diff Show more