mirror of
https://github.com/prometheus/prometheus.git
synced 2025-03-05 20:59:13 -08:00
commit
46514c6b12
|
@ -99,21 +99,13 @@ jobs:
|
|||
steps:
|
||||
- checkout
|
||||
- run: go install ./cmd/promtool/.
|
||||
- run:
|
||||
command: go install -mod=readonly github.com/google/go-jsonnet/cmd/jsonnet github.com/google/go-jsonnet/cmd/jsonnetfmt github.com/jsonnet-bundler/jsonnet-bundler/cmd/jb
|
||||
working_directory: ~/project/documentation/prometheus-mixin
|
||||
- run:
|
||||
command: make clean
|
||||
working_directory: ~/project/documentation/prometheus-mixin
|
||||
- run:
|
||||
command: jb install
|
||||
working_directory: ~/project/documentation/prometheus-mixin
|
||||
- run:
|
||||
command: make
|
||||
working_directory: ~/project/documentation/prometheus-mixin
|
||||
- run:
|
||||
command: git diff --exit-code
|
||||
working_directory: ~/project/documentation/prometheus-mixin
|
||||
- run: go install github.com/google/go-jsonnet/cmd/jsonnet@latest
|
||||
- run: go install github.com/google/go-jsonnet/cmd/jsonnetfmt@latest
|
||||
- run: go install github.com/jsonnet-bundler/jsonnet-bundler/cmd/jb@latest
|
||||
- run: make -C documentation/prometheus-mixin clean
|
||||
- run: make -C documentation/prometheus-mixin jb_install
|
||||
- run: make -C documentation/prometheus-mixin
|
||||
- run: git diff --exit-code
|
||||
|
||||
repo_sync:
|
||||
executor: golang
|
||||
|
|
2
.github/workflows/golangci-lint.yml
vendored
2
.github/workflows/golangci-lint.yml
vendored
|
@ -7,6 +7,7 @@ on:
|
|||
- "**.go"
|
||||
- "scripts/errcheck_excludes.txt"
|
||||
- ".github/workflows/golangci-lint.yml"
|
||||
- ".golangci.yml"
|
||||
pull_request:
|
||||
paths:
|
||||
- "go.sum"
|
||||
|
@ -14,6 +15,7 @@ on:
|
|||
- "**.go"
|
||||
- "scripts/errcheck_excludes.txt"
|
||||
- ".github/workflows/golangci-lint.yml"
|
||||
- ".golangci.yml"
|
||||
|
||||
jobs:
|
||||
golangci:
|
||||
|
|
2
.gitignore
vendored
2
.gitignore
vendored
|
@ -8,7 +8,9 @@
|
|||
/promtool
|
||||
benchmark.txt
|
||||
/data
|
||||
/data-agent
|
||||
/cmd/prometheus/data
|
||||
/cmd/prometheus/data-agent
|
||||
/cmd/prometheus/debug
|
||||
/benchout
|
||||
/cmd/promtool/data
|
||||
|
|
|
@ -1,10 +1,15 @@
|
|||
run:
|
||||
deadline: 5m
|
||||
skip-files:
|
||||
# Skip autogenerated files.
|
||||
- ^.*\.(pb|y)\.go$
|
||||
|
||||
linters:
|
||||
enable:
|
||||
- depguard
|
||||
- golint
|
||||
- gofumpt
|
||||
- goimports
|
||||
- revive
|
||||
|
||||
issues:
|
||||
exclude-rules:
|
||||
|
@ -25,3 +30,7 @@ linters-settings:
|
|||
- github.com/go-kit/kit/log: "Use github.com/go-kit/log instead of github.com/go-kit/kit/log"
|
||||
errcheck:
|
||||
exclude: scripts/errcheck_excludes.txt
|
||||
goimports:
|
||||
local-prefixes: github.com/prometheus/prometheus
|
||||
gofumpt:
|
||||
extra-rules: true
|
||||
|
|
26
CHANGELOG.md
26
CHANGELOG.md
|
@ -1,3 +1,28 @@
|
|||
## 2.31.0 / 2021-11-02
|
||||
|
||||
* [CHANGE] UI: Remove standard PromQL editor in favour of the codemirror-based editor. #9452
|
||||
* [FEATURE] PromQL: Add trigonometric functions and `atan2` binary operator. #9239 #9248 #9515
|
||||
* [FEATURE] Remote: Add support for exemplar in the remote write receiver endpoint. #9319 #9414
|
||||
* [FEATURE] SD: Add PuppetDB service discovery. #8883
|
||||
* [FEATURE] SD: Add Uyuni service discovery. #8190
|
||||
* [FEATURE] Web: Add support for security-related HTTP headers. #9546
|
||||
* [ENHANCEMENT] Azure SD: Add `proxy_url`, `follow_redirects`, `tls_config`. #9267
|
||||
* [ENHANCEMENT] Backfill: Add `--max-block-duration` in `promtool create-blocks-from rules`. #9511
|
||||
* [ENHANCEMENT] Config: Print human-readable sizes with unit instead of raw numbers. #9361
|
||||
* [ENHANCEMENT] HTTP: Re-enable HTTP/2. #9398
|
||||
* [ENHANCEMENT] Kubernetes SD: Warn user if number of endpoints exceeds limit. #9467
|
||||
* [ENHANCEMENT] OAuth2: Add TLS configuration to token requests. #9550
|
||||
* [ENHANCEMENT] PromQL: Several optimizations. #9365 #9360 #9362 #9552
|
||||
* [ENHANCEMENT] PromQL: Make aggregations deterministic in instant queries. #9459
|
||||
* [ENHANCEMENT] Rules: Add the ability to limit number of alerts or series. #9260 #9541
|
||||
* [ENHANCEMENT] SD: Experimental discovery manager to avoid restarts upon reload. Disabled by default, enable with flag `--enable-feature=new-service-discovery-manager`. #9349 #9537
|
||||
* [ENHANCEMENT] UI: Debounce timerange setting changes. #9359
|
||||
* [BUGFIX] Backfill: Apply rule labels after query labels. #9421
|
||||
* [BUGFIX] Scrape: Resolve conflicts between multiple exported label prefixes. #9479 #9518
|
||||
* [BUGFIX] Scrape: Restart scrape loops when `__scrape_interval__` is changed. #9551
|
||||
* [BUGFIX] TSDB: Fix memory leak in samples deletion. #9151
|
||||
* [BUGFIX] UI: Use consistent margin-bottom for all alert kinds. #9318
|
||||
|
||||
## 2.30.3 / 2021-10-05
|
||||
|
||||
* [BUGFIX] TSDB: Fix panic on failed snapshot replay. #9438
|
||||
|
@ -116,6 +141,7 @@ This vulnerability has been reported by Aaron Devaney from MDSec.
|
|||
|
||||
## 2.27.0 / 2021-05-12
|
||||
|
||||
* [CHANGE] Remote write: Metric `prometheus_remote_storage_samples_bytes_total` renamed to `prometheus_remote_storage_bytes_total`. #8296
|
||||
* [FEATURE] Promtool: Retroactive rule evaluation functionality. #7675
|
||||
* [FEATURE] Configuration: Environment variable expansion for external labels. Behind `--enable-feature=expand-external-labels` flag. #8649
|
||||
* [FEATURE] TSDB: Add a flag(`--storage.tsdb.max-block-chunk-segment-size`) to control the max chunks file size of the blocks for small Prometheus instances. #8478
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
Julien Pivotto (<roidelapluie@prometheus.io> / @roidelapluie) is the main/default maintainer, some parts of the codebase have other maintainers:
|
||||
Julien Pivotto (<roidelapluie@prometheus.io> / @roidelapluie) and Levi Harrison (<levi@leviharrison.dev> / @LeviHarrison) are the main/default maintainers, some parts of the codebase have other maintainers:
|
||||
|
||||
* `cmd`
|
||||
* `promtool`: David Leadbeater (<dgl@dgl.cx> / @dgl), Jessica Grebenschikov (<jessica.greben1@gmail.com> / @jessicagreben)
|
||||
|
@ -19,4 +19,3 @@ size of this repository, the natural changes in focus of maintainers over time,
|
|||
and nuances of where particular features live, this list will always be
|
||||
incomplete and out of date. However the listed maintainer(s) should be able to
|
||||
direct a PR/question to the right person.
|
||||
|
||||
|
|
|
@ -162,7 +162,7 @@ endif
|
|||
update-go-deps:
|
||||
@echo ">> updating Go dependencies"
|
||||
@for m in $$($(GO) list -mod=readonly -m -f '{{ if and (not .Indirect) (not .Main)}}{{.Path}}{{end}}' all); do \
|
||||
$(GO) get $$m; \
|
||||
$(GO) get -d $$m; \
|
||||
done
|
||||
GO111MODULE=$(GO111MODULE) $(GO) mod tidy
|
||||
ifneq (,$(wildcard vendor))
|
||||
|
|
10
README.md
10
README.md
|
@ -55,10 +55,10 @@ Prometheus will now be reachable at http://localhost:9090/.
|
|||
|
||||
### Building from source
|
||||
|
||||
To build Prometheus from source code, first ensure that you have a working
|
||||
Go environment with [version 1.14 or greater installed](https://golang.org/doc/install).
|
||||
You also need [Node.js](https://nodejs.org/) and [npm](https://www.npmjs.com/)
|
||||
installed in order to build the frontend assets.
|
||||
To build Prometheus from source code, You need:
|
||||
* Go [version 1.14 or greater](https://golang.org/doc/install).
|
||||
* NodeJS [version 16 or greater](https://nodejs.org/).
|
||||
* npm [version 7 or greater](https://www.npmjs.com/).
|
||||
|
||||
You can directly use the `go` tool to download and install the `prometheus`
|
||||
and `promtool` binaries into your `GOPATH`:
|
||||
|
@ -107,7 +107,7 @@ You can build a docker image locally with the following commands:
|
|||
|
||||
## React UI Development
|
||||
|
||||
For more information on building, running, and developing on the new React-based UI, see the React app's [README.md](web/ui/react-app/README.md).
|
||||
For more information on building, running, and developing on the new React-based UI, see the React app's [README.md](web/ui/README.md).
|
||||
|
||||
## More information
|
||||
|
||||
|
|
25
RELEASE.md
25
RELEASE.md
|
@ -35,8 +35,8 @@ Release cadence of first pre-releases being cut is 6 weeks.
|
|||
| v2.28 | 2021-06-16 | Julius Volz (GitHub: @juliusv) |
|
||||
| v2.29 | 2021-07-28 | Frederic Branczyk (GitHub: @brancz) |
|
||||
| v2.30 | 2021-09-08 | Ganesh Vernekar (GitHub: @codesome) |
|
||||
| v2.31 | 2021-10-20 | **searching for volunteer** |
|
||||
| v2.32 | 2021-12-01 | **searching for volunteer** |
|
||||
| v2.31 | 2021-10-20 | Julien Pivotto (GitHub: @roidelapluie) |
|
||||
| v2.32 | 2021-12-01 | Julius Volz (GitHub: @juliusv) |
|
||||
| v2.33 | 2022-01-12 | **searching for volunteer** |
|
||||
|
||||
If you are interested in volunteering please create a pull request against the [prometheus/prometheus](https://github.com/prometheus/prometheus) repository and propose yourself for the release series of your choice.
|
||||
|
@ -95,24 +95,13 @@ git commit -m "Update dependencies"
|
|||
|
||||
#### Updating React dependencies
|
||||
|
||||
Either upgrade the dependencies within their existing version constraints as specified in the `package.json` file (see https://docs.npmjs.com/files/package.json#dependencies):
|
||||
The React application recently moved to a monorepo system with multiple internal npm packages. Dependency upgrades are
|
||||
quite sensitive for the time being and should be done manually with caution.
|
||||
|
||||
```
|
||||
cd web/ui/react-app
|
||||
npm update
|
||||
git add package.json package-lock.json
|
||||
```
|
||||
When you want to update a dependency, you have to go to every internal npm package where the dependency is used and
|
||||
manually change the version. Once you have taken care of that, you need to go back to `web/ui` and run `npm install`
|
||||
|
||||
Or alternatively, update all dependencies to their latest major versions. This is potentially more disruptive and will require more follow-up fixes, but should be done from time to time (use your best judgement):
|
||||
|
||||
```
|
||||
cd web/ui/react-app
|
||||
npx npm-check-updates -u
|
||||
npm install
|
||||
git add package.json package-lock.json
|
||||
```
|
||||
|
||||
You can find more details on managing npm dependencies and updates [in this blog post](https://www.carlrippon.com/upgrading-npm-dependencies/).
|
||||
**NOTE**: We are researching ways to automate and improve this.
|
||||
|
||||
### 1. Prepare your release
|
||||
|
||||
|
|
|
@ -58,6 +58,8 @@ import (
|
|||
"github.com/prometheus/prometheus/config"
|
||||
"github.com/prometheus/prometheus/discovery"
|
||||
_ "github.com/prometheus/prometheus/discovery/install" // Register service discovery implementations.
|
||||
"github.com/prometheus/prometheus/discovery/legacymanager"
|
||||
"github.com/prometheus/prometheus/discovery/targetgroup"
|
||||
"github.com/prometheus/prometheus/notifier"
|
||||
"github.com/prometheus/prometheus/pkg/exemplar"
|
||||
"github.com/prometheus/prometheus/pkg/labels"
|
||||
|
@ -70,11 +72,14 @@ import (
|
|||
"github.com/prometheus/prometheus/storage"
|
||||
"github.com/prometheus/prometheus/storage/remote"
|
||||
"github.com/prometheus/prometheus/tsdb"
|
||||
"github.com/prometheus/prometheus/tsdb/agent"
|
||||
"github.com/prometheus/prometheus/util/strutil"
|
||||
"github.com/prometheus/prometheus/web"
|
||||
)
|
||||
|
||||
var (
|
||||
appName = "prometheus"
|
||||
|
||||
configSuccess = prometheus.NewGauge(prometheus.GaugeOpts{
|
||||
Name: "prometheus_config_last_reload_successful",
|
||||
Help: "Whether the last configuration reload attempt was successful.",
|
||||
|
@ -86,10 +91,13 @@ var (
|
|||
|
||||
defaultRetentionString = "15d"
|
||||
defaultRetentionDuration model.Duration
|
||||
|
||||
agentMode bool
|
||||
agentOnlyFlags, serverOnlyFlags []string
|
||||
)
|
||||
|
||||
func init() {
|
||||
prometheus.MustRegister(version.NewCollector("prometheus"))
|
||||
prometheus.MustRegister(version.NewCollector(strings.ReplaceAll(appName, "-", "_")))
|
||||
|
||||
var err error
|
||||
defaultRetentionDuration, err = model.ParseDuration(defaultRetentionString)
|
||||
|
@ -98,6 +106,37 @@ func init() {
|
|||
}
|
||||
}
|
||||
|
||||
// agentOnlySetting can be provided to a kingpin flag's PreAction to mark a
|
||||
// flag as agent-only.
|
||||
func agentOnlySetting() func(*kingpin.ParseContext) error {
|
||||
return func(pc *kingpin.ParseContext) error {
|
||||
agentOnlyFlags = append(agentOnlyFlags, extractFlagName(pc))
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// serverOnlySetting can be provided to a kingpin flag's PreAction to mark a
|
||||
// flag as server-only.
|
||||
func serverOnlySetting() func(*kingpin.ParseContext) error {
|
||||
return func(pc *kingpin.ParseContext) error {
|
||||
serverOnlyFlags = append(serverOnlyFlags, extractFlagName(pc))
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// extractFlagName gets the flag name from the ParseContext. Only call
|
||||
// from agentOnlySetting or serverOnlySetting.
|
||||
func extractFlagName(pc *kingpin.ParseContext) string {
|
||||
for _, pe := range pc.Elements {
|
||||
fc, ok := pe.Clause.(*kingpin.FlagClause)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
return "--" + fc.Model().Name
|
||||
}
|
||||
panic("extractFlagName not called from a kingpin PreAction. This is a bug, please report to Prometheus.")
|
||||
}
|
||||
|
||||
type flagConfig struct {
|
||||
configFile string
|
||||
|
||||
|
@ -109,6 +148,7 @@ type flagConfig struct {
|
|||
web web.Options
|
||||
scrape scrape.Options
|
||||
tsdb tsdbOptions
|
||||
agent agentOptions
|
||||
lookbackDelta model.Duration
|
||||
webTimeout model.Duration
|
||||
queryTimeout model.Duration
|
||||
|
@ -122,6 +162,7 @@ type flagConfig struct {
|
|||
enablePromQLAtModifier bool
|
||||
enablePromQLNegativeOffset bool
|
||||
enableExpandExternalLabels bool
|
||||
enableNewSDManager bool
|
||||
|
||||
prometheusURL string
|
||||
corsRegexString string
|
||||
|
@ -156,6 +197,12 @@ func (c *flagConfig) setFeatureListOptions(logger log.Logger) error {
|
|||
case "extra-scrape-metrics":
|
||||
c.scrape.ExtraMetrics = true
|
||||
level.Info(logger).Log("msg", "Experimental additional scrape metrics")
|
||||
case "new-service-discovery-manager":
|
||||
c.enableNewSDManager = true
|
||||
level.Info(logger).Log("msg", "Experimental service discovery manager")
|
||||
case "agent":
|
||||
agentMode = true
|
||||
level.Info(logger).Log("msg", "Experimental agent mode enabled.")
|
||||
case "":
|
||||
continue
|
||||
default:
|
||||
|
@ -190,7 +237,7 @@ func main() {
|
|||
|
||||
a := kingpin.New(filepath.Base(os.Args[0]), "The Prometheus monitoring server").UsageWriter(os.Stdout)
|
||||
|
||||
a.Version(version.Print("prometheus"))
|
||||
a.Version(version.Print(appName))
|
||||
|
||||
a.HelpFlag.Short('h')
|
||||
|
||||
|
@ -239,60 +286,105 @@ func main() {
|
|||
Default(".*").StringVar(&cfg.corsRegexString)
|
||||
|
||||
a.Flag("storage.tsdb.path", "Base path for metrics storage.").
|
||||
PreAction(serverOnlySetting()).
|
||||
Default("data/").StringVar(&cfg.localStoragePath)
|
||||
|
||||
a.Flag("storage.tsdb.min-block-duration", "Minimum duration of a data block before being persisted. For use in testing.").
|
||||
PreAction(serverOnlySetting()).
|
||||
Hidden().Default("2h").SetValue(&cfg.tsdb.MinBlockDuration)
|
||||
|
||||
a.Flag("storage.tsdb.max-block-duration",
|
||||
"Maximum duration compacted blocks may span. For use in testing. (Defaults to 10% of the retention period.)").
|
||||
PreAction(serverOnlySetting()).
|
||||
Hidden().PlaceHolder("<duration>").SetValue(&cfg.tsdb.MaxBlockDuration)
|
||||
|
||||
a.Flag("storage.tsdb.max-block-chunk-segment-size",
|
||||
"The maximum size for a single chunk segment in a block. Example: 512MB").
|
||||
PreAction(serverOnlySetting()).
|
||||
Hidden().PlaceHolder("<bytes>").BytesVar(&cfg.tsdb.MaxBlockChunkSegmentSize)
|
||||
|
||||
a.Flag("storage.tsdb.wal-segment-size",
|
||||
"Size at which to split the tsdb WAL segment files. Example: 100MB").
|
||||
PreAction(serverOnlySetting()).
|
||||
Hidden().PlaceHolder("<bytes>").BytesVar(&cfg.tsdb.WALSegmentSize)
|
||||
|
||||
a.Flag("storage.tsdb.retention", "[DEPRECATED] How long to retain samples in storage. This flag has been deprecated, use \"storage.tsdb.retention.time\" instead.").
|
||||
PreAction(serverOnlySetting()).
|
||||
SetValue(&oldFlagRetentionDuration)
|
||||
|
||||
a.Flag("storage.tsdb.retention.time", "How long to retain samples in storage. When this flag is set it overrides \"storage.tsdb.retention\". If neither this flag nor \"storage.tsdb.retention\" nor \"storage.tsdb.retention.size\" is set, the retention time defaults to "+defaultRetentionString+". Units Supported: y, w, d, h, m, s, ms.").
|
||||
PreAction(serverOnlySetting()).
|
||||
SetValue(&newFlagRetentionDuration)
|
||||
|
||||
a.Flag("storage.tsdb.retention.size", "Maximum number of bytes that can be stored for blocks. A unit is required, supported units: B, KB, MB, GB, TB, PB, EB. Ex: \"512MB\".").
|
||||
PreAction(serverOnlySetting()).
|
||||
BytesVar(&cfg.tsdb.MaxBytes)
|
||||
|
||||
a.Flag("storage.tsdb.no-lockfile", "Do not create lockfile in data directory.").
|
||||
PreAction(serverOnlySetting()).
|
||||
Default("false").BoolVar(&cfg.tsdb.NoLockfile)
|
||||
|
||||
a.Flag("storage.tsdb.allow-overlapping-blocks", "Allow overlapping blocks, which in turn enables vertical compaction and vertical query merge.").
|
||||
PreAction(serverOnlySetting()).
|
||||
Default("false").BoolVar(&cfg.tsdb.AllowOverlappingBlocks)
|
||||
|
||||
a.Flag("storage.tsdb.wal-compression", "Compress the tsdb WAL.").
|
||||
PreAction(serverOnlySetting()).
|
||||
Hidden().Default("true").BoolVar(&cfg.tsdb.WALCompression)
|
||||
|
||||
a.Flag("storage.agent.path", "Base path for metrics storage.").
|
||||
PreAction(agentOnlySetting()).
|
||||
Default("data-agent/").StringVar(&cfg.localStoragePath)
|
||||
|
||||
a.Flag("storage.agent.wal-segment-size",
|
||||
"Size at which to split WAL segment files. Example: 100MB").
|
||||
PreAction(agentOnlySetting()).
|
||||
Hidden().PlaceHolder("<bytes>").BytesVar(&cfg.agent.WALSegmentSize)
|
||||
|
||||
a.Flag("storage.agent.wal-compression", "Compress the agent WAL.").
|
||||
PreAction(agentOnlySetting()).
|
||||
Default("true").BoolVar(&cfg.agent.WALCompression)
|
||||
|
||||
a.Flag("storage.agent.wal-truncate-frequency",
|
||||
"The frequency at which to truncate the WAL and remove old data.").
|
||||
PreAction(agentOnlySetting()).
|
||||
Hidden().PlaceHolder("<duration>").SetValue(&cfg.agent.TruncateFrequency)
|
||||
|
||||
a.Flag("storage.agent.retention.min-time",
|
||||
"Minimum age samples may be before being considered for deletion when the WAL is truncated").
|
||||
PreAction(agentOnlySetting()).
|
||||
SetValue(&cfg.agent.MinWALTime)
|
||||
|
||||
a.Flag("storage.agent.retention.max-time",
|
||||
"Maximum age samples may be before being forcibly deleted when the WAL is truncated").
|
||||
PreAction(agentOnlySetting()).
|
||||
SetValue(&cfg.agent.MaxWALTime)
|
||||
|
||||
a.Flag("storage.remote.flush-deadline", "How long to wait flushing sample on shutdown or config reload.").
|
||||
Default("1m").PlaceHolder("<duration>").SetValue(&cfg.RemoteFlushDeadline)
|
||||
|
||||
a.Flag("storage.remote.read-sample-limit", "Maximum overall number of samples to return via the remote read interface, in a single query. 0 means no limit. This limit is ignored for streamed response types.").
|
||||
PreAction(serverOnlySetting()).
|
||||
Default("5e7").IntVar(&cfg.web.RemoteReadSampleLimit)
|
||||
|
||||
a.Flag("storage.remote.read-concurrent-limit", "Maximum number of concurrent remote read calls. 0 means no limit.").
|
||||
PreAction(serverOnlySetting()).
|
||||
Default("10").IntVar(&cfg.web.RemoteReadConcurrencyLimit)
|
||||
|
||||
a.Flag("storage.remote.read-max-bytes-in-frame", "Maximum number of bytes in a single frame for streaming remote read response types before marshalling. Note that client might have limit on frame size as well. 1MB as recommended by protobuf by default.").
|
||||
PreAction(serverOnlySetting()).
|
||||
Default("1048576").IntVar(&cfg.web.RemoteReadBytesInFrame)
|
||||
|
||||
a.Flag("rules.alert.for-outage-tolerance", "Max time to tolerate prometheus outage for restoring \"for\" state of alert.").
|
||||
PreAction(serverOnlySetting()).
|
||||
Default("1h").SetValue(&cfg.outageTolerance)
|
||||
|
||||
a.Flag("rules.alert.for-grace-period", "Minimum duration between alert and restored \"for\" state. This is maintained only for alerts with configured \"for\" time greater than grace period.").
|
||||
PreAction(serverOnlySetting()).
|
||||
Default("10m").SetValue(&cfg.forGracePeriod)
|
||||
|
||||
a.Flag("rules.alert.resend-delay", "Minimum amount of time to wait before resending an alert to Alertmanager.").
|
||||
PreAction(serverOnlySetting()).
|
||||
Default("1m").SetValue(&cfg.resendDelay)
|
||||
|
||||
a.Flag("scrape.adjust-timestamps", "Adjust scrape timestamps by up to `scrape.timestamp-tolerance` to align them to the intended schedule. See https://github.com/prometheus/prometheus/issues/7846 for more context. Experimental. This flag will be removed in a future release.").
|
||||
|
@ -302,24 +394,29 @@ func main() {
|
|||
Hidden().Default("2ms").DurationVar(&scrape.ScrapeTimestampTolerance)
|
||||
|
||||
a.Flag("alertmanager.notification-queue-capacity", "The capacity of the queue for pending Alertmanager notifications.").
|
||||
PreAction(serverOnlySetting()).
|
||||
Default("10000").IntVar(&cfg.notifier.QueueCapacity)
|
||||
|
||||
// TODO: Remove in Prometheus 3.0.
|
||||
alertmanagerTimeout := a.Flag("alertmanager.timeout", "[DEPRECATED] This flag has no effect.").Hidden().String()
|
||||
|
||||
a.Flag("query.lookback-delta", "The maximum lookback duration for retrieving metrics during expression evaluations and federation.").
|
||||
PreAction(serverOnlySetting()).
|
||||
Default("5m").SetValue(&cfg.lookbackDelta)
|
||||
|
||||
a.Flag("query.timeout", "Maximum time a query may take before being aborted.").
|
||||
PreAction(serverOnlySetting()).
|
||||
Default("2m").SetValue(&cfg.queryTimeout)
|
||||
|
||||
a.Flag("query.max-concurrency", "Maximum number of queries executed concurrently.").
|
||||
PreAction(serverOnlySetting()).
|
||||
Default("20").IntVar(&cfg.queryConcurrency)
|
||||
|
||||
a.Flag("query.max-samples", "Maximum number of samples a single query can load into memory. Note that queries will fail if they try to load more samples than this into memory, so this also limits the number of samples a query can return.").
|
||||
PreAction(serverOnlySetting()).
|
||||
Default("50000000").IntVar(&cfg.queryMaxSamples)
|
||||
|
||||
a.Flag("enable-feature", "Comma separated feature names to enable. Valid options: exemplar-storage, expand-external-labels, memory-snapshot-on-shutdown, promql-at-modifier, promql-negative-offset, remote-write-receiver, extra-scrape-metrics. See https://prometheus.io/docs/prometheus/latest/feature_flags/ for more details.").
|
||||
a.Flag("enable-feature", "Comma separated feature names to enable. Valid options: exemplar-storage, expand-external-labels, memory-snapshot-on-shutdown, promql-at-modifier, promql-negative-offset, remote-write-receiver, extra-scrape-metrics, new-service-discovery-manager. See https://prometheus.io/docs/prometheus/latest/feature_flags/ for more details.").
|
||||
Default("").StringsVar(&cfg.featureList)
|
||||
|
||||
promlogflag.AddFlags(a, &cfg.promlogConfig)
|
||||
|
@ -338,6 +435,16 @@ func main() {
|
|||
os.Exit(1)
|
||||
}
|
||||
|
||||
if agentMode && len(serverOnlyFlags) > 0 {
|
||||
fmt.Fprintf(os.Stderr, "The following flag(s) can not be used in agent mode: %q", serverOnlyFlags)
|
||||
os.Exit(3)
|
||||
}
|
||||
|
||||
if !agentMode && len(agentOnlyFlags) > 0 {
|
||||
fmt.Fprintf(os.Stderr, "The following flag(s) can only be used in agent mode: %q", agentOnlyFlags)
|
||||
os.Exit(3)
|
||||
}
|
||||
|
||||
cfg.web.ExternalURL, err = computeExternalURL(cfg.prometheusURL, cfg.web.ListenAddress)
|
||||
if err != nil {
|
||||
fmt.Fprintln(os.Stderr, errors.Wrapf(err, "parse external URL %q", cfg.prometheusURL))
|
||||
|
@ -356,7 +463,7 @@ func main() {
|
|||
|
||||
// Throw error for invalid config before starting other components.
|
||||
var cfgFile *config.Config
|
||||
if cfgFile, err = config.LoadFile(cfg.configFile, false, log.NewNopLogger()); err != nil {
|
||||
if cfgFile, err = config.LoadFile(cfg.configFile, agentMode, false, log.NewNopLogger()); err != nil {
|
||||
level.Error(logger).Log("msg", fmt.Sprintf("Error loading config (--config.file=%s)", cfg.configFile), "err", err)
|
||||
os.Exit(2)
|
||||
}
|
||||
|
@ -384,7 +491,8 @@ func main() {
|
|||
// RoutePrefix must always be at least '/'.
|
||||
cfg.web.RoutePrefix = "/" + strings.Trim(cfg.web.RoutePrefix, "/")
|
||||
|
||||
{ // Time retention settings.
|
||||
if !agentMode {
|
||||
// Time retention settings.
|
||||
if oldFlagRetentionDuration != 0 {
|
||||
level.Warn(logger).Log("deprecation_notice", "'storage.tsdb.retention' flag is deprecated use 'storage.tsdb.retention.time' instead.")
|
||||
cfg.tsdb.RetentionDuration = oldFlagRetentionDuration
|
||||
|
@ -409,9 +517,8 @@ func main() {
|
|||
cfg.tsdb.RetentionDuration = y
|
||||
level.Warn(logger).Log("msg", "Time retention value is too high. Limiting to: "+y.String())
|
||||
}
|
||||
}
|
||||
|
||||
{ // Max block size settings.
|
||||
// Max block size settings.
|
||||
if cfg.tsdb.MaxBlockDuration == 0 {
|
||||
maxBlockDuration, err := model.ParseDuration("31d")
|
||||
if err != nil {
|
||||
|
@ -459,14 +566,30 @@ func main() {
|
|||
notifierManager = notifier.NewManager(&cfg.notifier, log.With(logger, "component", "notifier"))
|
||||
|
||||
ctxScrape, cancelScrape = context.WithCancel(context.Background())
|
||||
discoveryManagerScrape = discovery.NewManager(ctxScrape, log.With(logger, "component", "discovery manager scrape"), discovery.Name("scrape"))
|
||||
|
||||
ctxNotify, cancelNotify = context.WithCancel(context.Background())
|
||||
discoveryManagerNotify = discovery.NewManager(ctxNotify, log.With(logger, "component", "discovery manager notify"), discovery.Name("notify"))
|
||||
discoveryManagerScrape discoveryManager
|
||||
discoveryManagerNotify discoveryManager
|
||||
)
|
||||
|
||||
if cfg.enableNewSDManager {
|
||||
discovery.RegisterMetrics()
|
||||
discoveryManagerScrape = discovery.NewManager(ctxScrape, log.With(logger, "component", "discovery manager scrape"), discovery.Name("scrape"))
|
||||
discoveryManagerNotify = discovery.NewManager(ctxNotify, log.With(logger, "component", "discovery manager notify"), discovery.Name("notify"))
|
||||
} else {
|
||||
legacymanager.RegisterMetrics()
|
||||
discoveryManagerScrape = legacymanager.NewManager(ctxScrape, log.With(logger, "component", "discovery manager scrape"), legacymanager.Name("scrape"))
|
||||
discoveryManagerNotify = legacymanager.NewManager(ctxNotify, log.With(logger, "component", "discovery manager notify"), legacymanager.Name("notify"))
|
||||
}
|
||||
|
||||
var (
|
||||
scrapeManager = scrape.NewManager(&cfg.scrape, log.With(logger, "component", "scrape manager"), fanoutStorage)
|
||||
|
||||
opts = promql.EngineOpts{
|
||||
queryEngine *promql.Engine
|
||||
ruleManager *rules.Manager
|
||||
)
|
||||
|
||||
if !agentMode {
|
||||
opts := promql.EngineOpts{
|
||||
Logger: log.With(logger, "component", "query engine"),
|
||||
Reg: prometheus.DefaultRegisterer,
|
||||
MaxSamples: cfg.queryMaxSamples,
|
||||
|
@ -493,7 +616,7 @@ func main() {
|
|||
ForGracePeriod: time.Duration(cfg.forGracePeriod),
|
||||
ResendDelay: time.Duration(cfg.resendDelay),
|
||||
})
|
||||
)
|
||||
}
|
||||
|
||||
scraper.Set(scrapeManager)
|
||||
|
||||
|
@ -509,6 +632,7 @@ func main() {
|
|||
cfg.web.RuleManager = ruleManager
|
||||
cfg.web.Notifier = notifierManager
|
||||
cfg.web.LookbackDelta = time.Duration(cfg.lookbackDelta)
|
||||
cfg.web.IsAgent = agentMode
|
||||
|
||||
cfg.web.Version = &web.PrometheusVersion{
|
||||
Version: version.Version,
|
||||
|
@ -540,7 +664,7 @@ func main() {
|
|||
)
|
||||
|
||||
// This is passed to ruleManager.Update().
|
||||
var externalURL = cfg.web.ExternalURL.String()
|
||||
externalURL := cfg.web.ExternalURL.String()
|
||||
|
||||
reloaders := []reloader{
|
||||
{
|
||||
|
@ -555,6 +679,11 @@ func main() {
|
|||
}, {
|
||||
name: "query_engine",
|
||||
reloader: func(cfg *config.Config) error {
|
||||
if agentMode {
|
||||
// No-op in Agent mode.
|
||||
return nil
|
||||
}
|
||||
|
||||
if cfg.GlobalConfig.QueryLogFile == "" {
|
||||
queryEngine.SetQueryLogger(nil)
|
||||
return nil
|
||||
|
@ -596,6 +725,11 @@ func main() {
|
|||
}, {
|
||||
name: "rules",
|
||||
reloader: func(cfg *config.Config) error {
|
||||
if agentMode {
|
||||
// No-op in Agent mode
|
||||
return nil
|
||||
}
|
||||
|
||||
// Get all rule files matching the configuration paths.
|
||||
var files []string
|
||||
for _, pat := range cfg.RuleFiles {
|
||||
|
@ -762,7 +896,6 @@ func main() {
|
|||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
},
|
||||
func(err error) {
|
||||
// Wait for any in-progress reloads to complete to avoid
|
||||
|
@ -800,7 +933,7 @@ func main() {
|
|||
},
|
||||
)
|
||||
}
|
||||
{
|
||||
if !agentMode {
|
||||
// Rule manager.
|
||||
g.Add(
|
||||
func() error {
|
||||
|
@ -812,8 +945,7 @@ func main() {
|
|||
ruleManager.Stop()
|
||||
},
|
||||
)
|
||||
}
|
||||
{
|
||||
|
||||
// TSDB.
|
||||
opts := cfg.tsdb.ToTSDBOptions()
|
||||
cancel := make(chan struct{})
|
||||
|
@ -875,6 +1007,59 @@ func main() {
|
|||
},
|
||||
)
|
||||
}
|
||||
if agentMode {
|
||||
// WAL storage.
|
||||
opts := cfg.agent.ToAgentOptions()
|
||||
cancel := make(chan struct{})
|
||||
g.Add(
|
||||
func() error {
|
||||
level.Info(logger).Log("msg", "Starting WAL storage ...")
|
||||
if cfg.agent.WALSegmentSize != 0 {
|
||||
if cfg.agent.WALSegmentSize < 10*1024*1024 || cfg.agent.WALSegmentSize > 256*1024*1024 {
|
||||
return errors.New("flag 'storage.agent.wal-segment-size' must be set between 10MB and 256MB")
|
||||
}
|
||||
}
|
||||
db, err := agent.Open(
|
||||
logger,
|
||||
prometheus.DefaultRegisterer,
|
||||
remoteStorage,
|
||||
cfg.localStoragePath,
|
||||
&opts,
|
||||
)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "opening storage failed")
|
||||
}
|
||||
|
||||
switch fsType := prom_runtime.Statfs(cfg.localStoragePath); fsType {
|
||||
case "NFS_SUPER_MAGIC":
|
||||
level.Warn(logger).Log("fs_type", fsType, "msg", "This filesystem is not supported and may lead to data corruption and data loss. Please carefully read https://prometheus.io/docs/prometheus/latest/storage/ to learn more about supported filesystems.")
|
||||
default:
|
||||
level.Info(logger).Log("fs_type", fsType)
|
||||
}
|
||||
|
||||
level.Info(logger).Log("msg", "Agent WAL storage started")
|
||||
level.Debug(logger).Log("msg", "Agent WAL storage options",
|
||||
"WALSegmentSize", cfg.agent.WALSegmentSize,
|
||||
"WALCompression", cfg.agent.WALCompression,
|
||||
"StripeSize", cfg.agent.StripeSize,
|
||||
"TruncateFrequency", cfg.agent.TruncateFrequency,
|
||||
"MinWALTime", cfg.agent.MinWALTime,
|
||||
"MaxWALTime", cfg.agent.MaxWALTime,
|
||||
)
|
||||
|
||||
localStorage.Set(db, 0)
|
||||
close(dbOpen)
|
||||
<-cancel
|
||||
return nil
|
||||
},
|
||||
func(e error) {
|
||||
if err := fanoutStorage.Close(); err != nil {
|
||||
level.Error(logger).Log("msg", "Error stopping storage", "err", err)
|
||||
}
|
||||
close(cancel)
|
||||
},
|
||||
)
|
||||
}
|
||||
{
|
||||
// Web handler.
|
||||
g.Add(
|
||||
|
@ -960,6 +1145,7 @@ type safePromQLNoStepSubqueryInterval struct {
|
|||
func durationToInt64Millis(d time.Duration) int64 {
|
||||
return int64(d / time.Millisecond)
|
||||
}
|
||||
|
||||
func (i *safePromQLNoStepSubqueryInterval) Set(ev model.Duration) {
|
||||
i.value.Store(durationToInt64Millis(time.Duration(ev)))
|
||||
}
|
||||
|
@ -973,7 +1159,7 @@ type reloader struct {
|
|||
reloader func(*config.Config) error
|
||||
}
|
||||
|
||||
func reloadConfig(filename string, expandExternalLabels bool, enableExemplarStorage bool, logger log.Logger, noStepSuqueryInterval *safePromQLNoStepSubqueryInterval, rls ...reloader) (err error) {
|
||||
func reloadConfig(filename string, expandExternalLabels, enableExemplarStorage bool, logger log.Logger, noStepSuqueryInterval *safePromQLNoStepSubqueryInterval, rls ...reloader) (err error) {
|
||||
start := time.Now()
|
||||
timings := []interface{}{}
|
||||
level.Info(logger).Log("msg", "Loading configuration file", "filename", filename)
|
||||
|
@ -987,7 +1173,7 @@ func reloadConfig(filename string, expandExternalLabels bool, enableExemplarStor
|
|||
}
|
||||
}()
|
||||
|
||||
conf, err := config.LoadFile(filename, expandExternalLabels, logger)
|
||||
conf, err := config.LoadFile(filename, agentMode, expandExternalLabels, logger)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "couldn't load configuration (--config.file=%q)", filename)
|
||||
}
|
||||
|
@ -998,6 +1184,25 @@ func reloadConfig(filename string, expandExternalLabels bool, enableExemplarStor
|
|||
}
|
||||
}
|
||||
|
||||
// Perform validation for Agent-compatible configs and remove anything that's unsupported.
|
||||
if agentMode {
|
||||
// Perform validation for Agent-compatible configs and remove anything that's
|
||||
// unsupported.
|
||||
if len(conf.AlertingConfig.AlertRelabelConfigs) > 0 || len(conf.AlertingConfig.AlertmanagerConfigs) > 0 {
|
||||
level.Warn(logger).Log("msg", "alerting configs not supported in agent mode")
|
||||
conf.AlertingConfig.AlertRelabelConfigs = []*relabel.Config{}
|
||||
conf.AlertingConfig.AlertmanagerConfigs = config.AlertmanagerConfigs{}
|
||||
}
|
||||
if len(conf.RuleFiles) > 0 {
|
||||
level.Warn(logger).Log("msg", "recording rules not supported in agent mode")
|
||||
conf.RuleFiles = []string{}
|
||||
}
|
||||
if len(conf.RemoteReadConfigs) > 0 {
|
||||
level.Warn(logger).Log("msg", "remote_read configs not supported in agent mode")
|
||||
conf.RemoteReadConfigs = []*config.RemoteReadConfig{}
|
||||
}
|
||||
}
|
||||
|
||||
failed := false
|
||||
for _, rl := range rls {
|
||||
rstart := time.Now()
|
||||
|
@ -1098,18 +1303,21 @@ func sendAlerts(s sender, externalURL string) rules.NotifyFunc {
|
|||
// storage at a later point in time.
|
||||
type readyStorage struct {
|
||||
mtx sync.RWMutex
|
||||
db *tsdb.DB
|
||||
db storage.Storage
|
||||
startTimeMargin int64
|
||||
stats *tsdb.DBStats
|
||||
}
|
||||
|
||||
func (s *readyStorage) ApplyConfig(conf *config.Config) error {
|
||||
db := s.get()
|
||||
return db.ApplyConfig(conf)
|
||||
if db, ok := db.(*tsdb.DB); ok {
|
||||
return db.ApplyConfig(conf)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Set the storage.
|
||||
func (s *readyStorage) Set(db *tsdb.DB, startTimeMargin int64) {
|
||||
func (s *readyStorage) Set(db storage.Storage, startTimeMargin int64) {
|
||||
s.mtx.Lock()
|
||||
defer s.mtx.Unlock()
|
||||
|
||||
|
@ -1117,7 +1325,7 @@ func (s *readyStorage) Set(db *tsdb.DB, startTimeMargin int64) {
|
|||
s.startTimeMargin = startTimeMargin
|
||||
}
|
||||
|
||||
func (s *readyStorage) get() *tsdb.DB {
|
||||
func (s *readyStorage) get() storage.Storage {
|
||||
s.mtx.RLock()
|
||||
x := s.db
|
||||
s.mtx.RUnlock()
|
||||
|
@ -1134,15 +1342,21 @@ func (s *readyStorage) getStats() *tsdb.DBStats {
|
|||
// StartTime implements the Storage interface.
|
||||
func (s *readyStorage) StartTime() (int64, error) {
|
||||
if x := s.get(); x != nil {
|
||||
var startTime int64
|
||||
|
||||
if len(x.Blocks()) > 0 {
|
||||
startTime = x.Blocks()[0].Meta().MinTime
|
||||
} else {
|
||||
startTime = time.Now().Unix() * 1000
|
||||
switch db := x.(type) {
|
||||
case *tsdb.DB:
|
||||
var startTime int64
|
||||
if len(db.Blocks()) > 0 {
|
||||
startTime = db.Blocks()[0].Meta().MinTime
|
||||
} else {
|
||||
startTime = time.Now().Unix() * 1000
|
||||
}
|
||||
// Add a safety margin as it may take a few minutes for everything to spin up.
|
||||
return startTime + s.startTimeMargin, nil
|
||||
case *agent.DB:
|
||||
return db.StartTime()
|
||||
default:
|
||||
panic(fmt.Sprintf("unknown storage type %T", db))
|
||||
}
|
||||
// Add a safety margin as it may take a few minutes for everything to spin up.
|
||||
return startTime + s.startTimeMargin, nil
|
||||
}
|
||||
|
||||
return math.MaxInt64, tsdb.ErrNotReady
|
||||
|
@ -1166,7 +1380,14 @@ func (s *readyStorage) ChunkQuerier(ctx context.Context, mint, maxt int64) (stor
|
|||
|
||||
func (s *readyStorage) ExemplarQuerier(ctx context.Context) (storage.ExemplarQuerier, error) {
|
||||
if x := s.get(); x != nil {
|
||||
return x.ExemplarQuerier(ctx)
|
||||
switch db := x.(type) {
|
||||
case *tsdb.DB:
|
||||
return db.ExemplarQuerier(ctx)
|
||||
case *agent.DB:
|
||||
return nil, agent.ErrUnsupported
|
||||
default:
|
||||
panic(fmt.Sprintf("unknown storage type %T", db))
|
||||
}
|
||||
}
|
||||
return nil, tsdb.ErrNotReady
|
||||
}
|
||||
|
@ -1204,7 +1425,14 @@ func (s *readyStorage) Close() error {
|
|||
// CleanTombstones implements the api_v1.TSDBAdminStats and api_v2.TSDBAdmin interfaces.
|
||||
func (s *readyStorage) CleanTombstones() error {
|
||||
if x := s.get(); x != nil {
|
||||
return x.CleanTombstones()
|
||||
switch db := x.(type) {
|
||||
case *tsdb.DB:
|
||||
return db.CleanTombstones()
|
||||
case *agent.DB:
|
||||
return agent.ErrUnsupported
|
||||
default:
|
||||
panic(fmt.Sprintf("unknown storage type %T", db))
|
||||
}
|
||||
}
|
||||
return tsdb.ErrNotReady
|
||||
}
|
||||
|
@ -1212,7 +1440,14 @@ func (s *readyStorage) CleanTombstones() error {
|
|||
// Delete implements the api_v1.TSDBAdminStats and api_v2.TSDBAdmin interfaces.
|
||||
func (s *readyStorage) Delete(mint, maxt int64, ms ...*labels.Matcher) error {
|
||||
if x := s.get(); x != nil {
|
||||
return x.Delete(mint, maxt, ms...)
|
||||
switch db := x.(type) {
|
||||
case *tsdb.DB:
|
||||
return db.Delete(mint, maxt, ms...)
|
||||
case *agent.DB:
|
||||
return agent.ErrUnsupported
|
||||
default:
|
||||
panic(fmt.Sprintf("unknown storage type %T", db))
|
||||
}
|
||||
}
|
||||
return tsdb.ErrNotReady
|
||||
}
|
||||
|
@ -1220,7 +1455,14 @@ func (s *readyStorage) Delete(mint, maxt int64, ms ...*labels.Matcher) error {
|
|||
// Snapshot implements the api_v1.TSDBAdminStats and api_v2.TSDBAdmin interfaces.
|
||||
func (s *readyStorage) Snapshot(dir string, withHead bool) error {
|
||||
if x := s.get(); x != nil {
|
||||
return x.Snapshot(dir, withHead)
|
||||
switch db := x.(type) {
|
||||
case *tsdb.DB:
|
||||
return db.Snapshot(dir, withHead)
|
||||
case *agent.DB:
|
||||
return agent.ErrUnsupported
|
||||
default:
|
||||
panic(fmt.Sprintf("unknown storage type %T", db))
|
||||
}
|
||||
}
|
||||
return tsdb.ErrNotReady
|
||||
}
|
||||
|
@ -1228,7 +1470,14 @@ func (s *readyStorage) Snapshot(dir string, withHead bool) error {
|
|||
// Stats implements the api_v1.TSDBAdminStats interface.
|
||||
func (s *readyStorage) Stats(statsByLabelName string) (*tsdb.Stats, error) {
|
||||
if x := s.get(); x != nil {
|
||||
return x.Head().Stats(statsByLabelName), nil
|
||||
switch db := x.(type) {
|
||||
case *tsdb.DB:
|
||||
return db.Head().Stats(statsByLabelName), nil
|
||||
case *agent.DB:
|
||||
return nil, agent.ErrUnsupported
|
||||
default:
|
||||
panic(fmt.Sprintf("unknown storage type %T", db))
|
||||
}
|
||||
}
|
||||
return nil, tsdb.ErrNotReady
|
||||
}
|
||||
|
@ -1306,6 +1555,27 @@ func (opts tsdbOptions) ToTSDBOptions() tsdb.Options {
|
|||
}
|
||||
}
|
||||
|
||||
// agentOptions is a version of agent.Options with defined units. This is required
|
||||
// as agent.Option fields are unit agnostic (time).
|
||||
type agentOptions struct {
|
||||
WALSegmentSize units.Base2Bytes
|
||||
WALCompression bool
|
||||
StripeSize int
|
||||
TruncateFrequency model.Duration
|
||||
MinWALTime, MaxWALTime model.Duration
|
||||
}
|
||||
|
||||
func (opts agentOptions) ToAgentOptions() agent.Options {
|
||||
return agent.Options{
|
||||
WALSegmentSize: int(opts.WALSegmentSize),
|
||||
WALCompression: opts.WALCompression,
|
||||
StripeSize: opts.StripeSize,
|
||||
TruncateFrequency: time.Duration(opts.TruncateFrequency),
|
||||
MinWALTime: durationToInt64Millis(time.Duration(opts.MinWALTime)),
|
||||
MaxWALTime: durationToInt64Millis(time.Duration(opts.MaxWALTime)),
|
||||
}
|
||||
}
|
||||
|
||||
func initTracing(logger log.Logger) (io.Closer, error) {
|
||||
// Set tracing configuration defaults.
|
||||
cfg := &jcfg.Configuration{
|
||||
|
@ -1346,3 +1616,12 @@ func (l jaegerLogger) Infof(msg string, args ...interface{}) {
|
|||
keyvals := []interface{}{"msg", fmt.Sprintf(msg, args...)}
|
||||
level.Info(l.logger).Log(keyvals...)
|
||||
}
|
||||
|
||||
// discoveryManager interfaces the discovery manager. This is used to keep using
|
||||
// the manager that restarts SD's on reload for a few releases until we feel
|
||||
// the new manager can be enabled for all users.
|
||||
type discoveryManager interface {
|
||||
ApplyConfig(cfg map[string]discovery.Configs) error
|
||||
Run() error
|
||||
SyncCh() <-chan map[string][]*targetgroup.Group
|
||||
}
|
||||
|
|
|
@ -35,9 +35,12 @@ import (
|
|||
"github.com/prometheus/prometheus/rules"
|
||||
)
|
||||
|
||||
var promPath = os.Args[0]
|
||||
var promConfig = filepath.Join("..", "..", "documentation", "examples", "prometheus.yml")
|
||||
var promData = filepath.Join(os.TempDir(), "data")
|
||||
var (
|
||||
promPath = os.Args[0]
|
||||
promConfig = filepath.Join("..", "..", "documentation", "examples", "prometheus.yml")
|
||||
agentConfig = filepath.Join("..", "..", "documentation", "examples", "prometheus-agent.yml")
|
||||
promData = filepath.Join(os.TempDir(), "data")
|
||||
)
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
for i, arg := range os.Args {
|
||||
|
@ -202,7 +205,7 @@ func TestWALSegmentSizeBounds(t *testing.T) {
|
|||
}
|
||||
|
||||
for size, expectedExitStatus := range map[string]int{"9MB": 1, "257MB": 1, "10": 2, "1GB": 1, "12MB": 0} {
|
||||
prom := exec.Command(promPath, "-test.main", "--storage.tsdb.wal-segment-size="+size, "--config.file="+promConfig)
|
||||
prom := exec.Command(promPath, "-test.main", "--storage.tsdb.wal-segment-size="+size, "--web.listen-address=0.0.0.0:0", "--config.file="+promConfig)
|
||||
|
||||
// Log stderr in case of failure.
|
||||
stderr, err := prom.StderrPipe()
|
||||
|
@ -223,6 +226,7 @@ func TestWALSegmentSizeBounds(t *testing.T) {
|
|||
t.Errorf("prometheus should be still running: %v", err)
|
||||
case <-time.After(5 * time.Second):
|
||||
prom.Process.Kill()
|
||||
<-done
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
@ -244,7 +248,7 @@ func TestMaxBlockChunkSegmentSizeBounds(t *testing.T) {
|
|||
}
|
||||
|
||||
for size, expectedExitStatus := range map[string]int{"512KB": 1, "1MB": 0} {
|
||||
prom := exec.Command(promPath, "-test.main", "--storage.tsdb.max-block-chunk-segment-size="+size, "--config.file="+promConfig)
|
||||
prom := exec.Command(promPath, "-test.main", "--storage.tsdb.max-block-chunk-segment-size="+size, "--web.listen-address=0.0.0.0:0", "--config.file="+promConfig)
|
||||
|
||||
// Log stderr in case of failure.
|
||||
stderr, err := prom.StderrPipe()
|
||||
|
@ -265,6 +269,7 @@ func TestMaxBlockChunkSegmentSizeBounds(t *testing.T) {
|
|||
t.Errorf("prometheus should be still running: %v", err)
|
||||
case <-time.After(5 * time.Second):
|
||||
prom.Process.Kill()
|
||||
<-done
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
@ -347,3 +352,107 @@ func getCurrentGaugeValuesFor(t *testing.T, reg prometheus.Gatherer, metricNames
|
|||
}
|
||||
return res
|
||||
}
|
||||
|
||||
func TestAgentSuccessfulStartup(t *testing.T) {
|
||||
prom := exec.Command(promPath, "-test.main", "--enable-feature=agent", "--config.file="+agentConfig)
|
||||
err := prom.Start()
|
||||
require.NoError(t, err)
|
||||
|
||||
expectedExitStatus := 0
|
||||
actualExitStatus := 0
|
||||
|
||||
done := make(chan error, 1)
|
||||
go func() { done <- prom.Wait() }()
|
||||
select {
|
||||
case err := <-done:
|
||||
t.Logf("prometheus agent should be still running: %v", err)
|
||||
actualExitStatus = prom.ProcessState.ExitCode()
|
||||
case <-time.After(5 * time.Second):
|
||||
prom.Process.Kill()
|
||||
}
|
||||
require.Equal(t, expectedExitStatus, actualExitStatus)
|
||||
}
|
||||
|
||||
func TestAgentStartupWithInvalidConfig(t *testing.T) {
|
||||
prom := exec.Command(promPath, "-test.main", "--enable-feature=agent", "--config.file="+promConfig)
|
||||
err := prom.Start()
|
||||
require.NoError(t, err)
|
||||
|
||||
expectedExitStatus := 2
|
||||
actualExitStatus := 0
|
||||
|
||||
done := make(chan error, 1)
|
||||
go func() { done <- prom.Wait() }()
|
||||
select {
|
||||
case err := <-done:
|
||||
t.Logf("prometheus agent should not be running: %v", err)
|
||||
actualExitStatus = prom.ProcessState.ExitCode()
|
||||
case <-time.After(5 * time.Second):
|
||||
prom.Process.Kill()
|
||||
}
|
||||
require.Equal(t, expectedExitStatus, actualExitStatus)
|
||||
}
|
||||
|
||||
func TestModeSpecificFlags(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("skipping test in short mode.")
|
||||
}
|
||||
|
||||
testcases := []struct {
|
||||
mode string
|
||||
arg string
|
||||
exitStatus int
|
||||
}{
|
||||
{"agent", "--storage.agent.path", 0},
|
||||
{"server", "--storage.tsdb.path", 0},
|
||||
{"server", "--storage.agent.path", 3},
|
||||
{"agent", "--storage.tsdb.path", 3},
|
||||
}
|
||||
|
||||
for _, tc := range testcases {
|
||||
t.Run(fmt.Sprintf("%s mode with option %s", tc.mode, tc.arg), func(t *testing.T) {
|
||||
args := []string{"-test.main", tc.arg, t.TempDir()}
|
||||
|
||||
if tc.mode == "agent" {
|
||||
args = append(args, "--enable-feature=agent", "--config.file="+agentConfig)
|
||||
} else {
|
||||
args = append(args, "--config.file="+promConfig)
|
||||
}
|
||||
|
||||
prom := exec.Command(promPath, args...)
|
||||
|
||||
// Log stderr in case of failure.
|
||||
stderr, err := prom.StderrPipe()
|
||||
require.NoError(t, err)
|
||||
go func() {
|
||||
slurp, _ := ioutil.ReadAll(stderr)
|
||||
t.Log(string(slurp))
|
||||
}()
|
||||
|
||||
err = prom.Start()
|
||||
require.NoError(t, err)
|
||||
|
||||
if tc.exitStatus == 0 {
|
||||
done := make(chan error, 1)
|
||||
go func() { done <- prom.Wait() }()
|
||||
select {
|
||||
case err := <-done:
|
||||
t.Errorf("prometheus should be still running: %v", err)
|
||||
case <-time.After(5 * time.Second):
|
||||
prom.Process.Kill()
|
||||
<-done
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
err = prom.Wait()
|
||||
require.Error(t, err)
|
||||
if exitError, ok := err.(*exec.ExitError); ok {
|
||||
status := exitError.Sys().(syscall.WaitStatus)
|
||||
require.Equal(t, tc.exitStatus, status.ExitStatus())
|
||||
} else {
|
||||
t.Errorf("unable to retrieve the exit status for prometheus: %v", err)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
|
|
@ -21,7 +21,7 @@ import (
|
|||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
const filePerm = 0666
|
||||
const filePerm = 0o666
|
||||
|
||||
type tarGzFileWriter struct {
|
||||
tarWriter *tar.Writer
|
||||
|
|
|
@ -21,6 +21,7 @@ import (
|
|||
|
||||
"github.com/go-kit/log"
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"github.com/prometheus/prometheus/pkg/labels"
|
||||
"github.com/prometheus/prometheus/pkg/textparse"
|
||||
"github.com/prometheus/prometheus/tsdb"
|
||||
|
@ -66,7 +67,7 @@ func getMinAndMaxTimestamps(p textparse.Parser) (int64, int64, error) {
|
|||
return maxt, mint, nil
|
||||
}
|
||||
|
||||
func createBlocks(input []byte, mint, maxt, maxBlockDuration int64, maxSamplesInAppender int, outputDir string, humanReadable, quiet bool) (returnErr error) {
|
||||
func getCompatibleBlockDuration(maxBlockDuration int64) int64 {
|
||||
blockDuration := tsdb.DefaultBlockDuration
|
||||
if maxBlockDuration > tsdb.DefaultBlockDuration {
|
||||
ranges := tsdb.ExponentialBlockRanges(tsdb.DefaultBlockDuration, 10, 3)
|
||||
|
@ -79,6 +80,11 @@ func createBlocks(input []byte, mint, maxt, maxBlockDuration int64, maxSamplesIn
|
|||
}
|
||||
blockDuration = ranges[idx]
|
||||
}
|
||||
return blockDuration
|
||||
}
|
||||
|
||||
func createBlocks(input []byte, mint, maxt, maxBlockDuration int64, maxSamplesInAppender int, outputDir string, humanReadable, quiet bool) (returnErr error) {
|
||||
blockDuration := getCompatibleBlockDuration(maxBlockDuration)
|
||||
mint = blockDuration * (mint / blockDuration)
|
||||
|
||||
db, err := tsdb.OpenDBReadOnly(outputDir, nil)
|
||||
|
@ -100,7 +106,6 @@ func createBlocks(input []byte, mint, maxt, maxBlockDuration int64, maxSamplesIn
|
|||
// The next sample is not in this timerange, we can avoid parsing
|
||||
// the file for this timerange.
|
||||
continue
|
||||
|
||||
}
|
||||
nextSampleTs = math.MaxInt64
|
||||
|
||||
|
@ -202,13 +207,11 @@ func createBlocks(input []byte, mint, maxt, maxBlockDuration int64, maxSamplesIn
|
|||
|
||||
return nil
|
||||
}()
|
||||
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "process blocks")
|
||||
}
|
||||
}
|
||||
return nil
|
||||
|
||||
}
|
||||
|
||||
func backfill(maxSamplesInAppender int, input []byte, outputDir string, humanReadable, quiet bool, maxBlockDuration time.Duration) (err error) {
|
||||
|
|
|
@ -22,10 +22,11 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/prometheus/prometheus/pkg/labels"
|
||||
"github.com/prometheus/prometheus/storage"
|
||||
"github.com/prometheus/prometheus/tsdb"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
type backfillSample struct {
|
||||
|
|
|
@ -57,7 +57,6 @@ func debugWrite(cfg debugWriterConfig) error {
|
|||
return errors.Wrap(err, "error writing into the archive")
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
if err := archiver.close(); err != nil {
|
||||
|
|
|
@ -44,13 +44,16 @@ import (
|
|||
yaml "gopkg.in/yaml.v2"
|
||||
|
||||
"github.com/prometheus/prometheus/config"
|
||||
"github.com/prometheus/prometheus/discovery"
|
||||
"github.com/prometheus/prometheus/discovery/file"
|
||||
_ "github.com/prometheus/prometheus/discovery/install" // Register service discovery implementations.
|
||||
"github.com/prometheus/prometheus/discovery/kubernetes"
|
||||
"github.com/prometheus/prometheus/discovery/targetgroup"
|
||||
"github.com/prometheus/prometheus/notifier"
|
||||
"github.com/prometheus/prometheus/pkg/labels"
|
||||
"github.com/prometheus/prometheus/pkg/rulefmt"
|
||||
"github.com/prometheus/prometheus/promql"
|
||||
"github.com/prometheus/prometheus/scrape"
|
||||
)
|
||||
|
||||
func main() {
|
||||
|
@ -60,6 +63,11 @@ func main() {
|
|||
|
||||
checkCmd := app.Command("check", "Check the resources for validity.")
|
||||
|
||||
sdCheckCmd := checkCmd.Command("service-discovery", "Perform service discovery for the given job name and report the results, including relabeling.")
|
||||
sdConfigFile := sdCheckCmd.Arg("config-file", "The prometheus config file.").Required().ExistingFile()
|
||||
sdJobName := sdCheckCmd.Arg("job", "The job to run service discovery for.").Required().String()
|
||||
sdTimeout := sdCheckCmd.Flag("timeout", "The time to wait for discovery results.").Default("30s").Duration()
|
||||
|
||||
checkConfigCmd := checkCmd.Command("config", "Check if the config files are valid or not.")
|
||||
configFiles := checkConfigCmd.Arg(
|
||||
"config-files",
|
||||
|
@ -79,6 +87,7 @@ func main() {
|
|||
).Required().ExistingFiles()
|
||||
|
||||
checkMetricsCmd := checkCmd.Command("metrics", checkMetricsUsage)
|
||||
agentMode := checkConfigCmd.Flag("agent", "Check config file for Prometheus in Agent mode.").Bool()
|
||||
|
||||
queryCmd := app.Command("query", "Run query against a Prometheus server.")
|
||||
queryCmdFmt := queryCmd.Flag("format", "Output format of the query.").Short('o').Default("promql").Enum("promql", "json")
|
||||
|
@ -198,8 +207,11 @@ func main() {
|
|||
}
|
||||
|
||||
switch parsedCmd {
|
||||
case sdCheckCmd.FullCommand():
|
||||
os.Exit(CheckSD(*sdConfigFile, *sdJobName, *sdTimeout))
|
||||
|
||||
case checkConfigCmd.FullCommand():
|
||||
os.Exit(CheckConfig(*configFiles...))
|
||||
os.Exit(CheckConfig(*agentMode, *configFiles...))
|
||||
|
||||
case checkWebConfigCmd.FullCommand():
|
||||
os.Exit(CheckWebConfig(*webConfigFiles...))
|
||||
|
@ -245,21 +257,21 @@ func main() {
|
|||
|
||||
case tsdbDumpCmd.FullCommand():
|
||||
os.Exit(checkErr(dumpSamples(*dumpPath, *dumpMinTime, *dumpMaxTime)))
|
||||
//TODO(aSquare14): Work on adding support for custom block size.
|
||||
// TODO(aSquare14): Work on adding support for custom block size.
|
||||
case openMetricsImportCmd.FullCommand():
|
||||
os.Exit(backfillOpenMetrics(*importFilePath, *importDBPath, *importHumanReadable, *importQuiet, *maxBlockDuration))
|
||||
|
||||
case importRulesCmd.FullCommand():
|
||||
os.Exit(checkErr(importRules(*importRulesURL, *importRulesStart, *importRulesEnd, *importRulesOutputDir, *importRulesEvalInterval, *importRulesFiles...)))
|
||||
os.Exit(checkErr(importRules(*importRulesURL, *importRulesStart, *importRulesEnd, *importRulesOutputDir, *importRulesEvalInterval, *maxBlockDuration, *importRulesFiles...)))
|
||||
}
|
||||
}
|
||||
|
||||
// CheckConfig validates configuration files.
|
||||
func CheckConfig(files ...string) int {
|
||||
func CheckConfig(agentMode bool, files ...string) int {
|
||||
failed := false
|
||||
|
||||
for _, f := range files {
|
||||
ruleFiles, err := checkConfig(f)
|
||||
ruleFiles, err := checkConfig(agentMode, f)
|
||||
if err != nil {
|
||||
fmt.Fprintln(os.Stderr, " FAILED:", err)
|
||||
failed = true
|
||||
|
@ -314,10 +326,10 @@ func checkFileExists(fn string) error {
|
|||
return err
|
||||
}
|
||||
|
||||
func checkConfig(filename string) ([]string, error) {
|
||||
func checkConfig(agentMode bool, filename string) ([]string, error) {
|
||||
fmt.Println("Checking", filename)
|
||||
|
||||
cfg, err := config.LoadFile(filename, false, log.NewNopLogger())
|
||||
cfg, err := config.LoadFile(filename, agentMode, false, log.NewNopLogger())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -363,19 +375,60 @@ func checkConfig(filename string) ([]string, error) {
|
|||
}
|
||||
if len(files) != 0 {
|
||||
for _, f := range files {
|
||||
err = checkSDFile(f)
|
||||
var targetGroups []*targetgroup.Group
|
||||
targetGroups, err = checkSDFile(f)
|
||||
if err != nil {
|
||||
return nil, errors.Errorf("checking SD file %q: %v", file, err)
|
||||
}
|
||||
if err := checkTargetGroupsForScrapeConfig(targetGroups, scfg); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
continue
|
||||
}
|
||||
fmt.Printf(" WARNING: file %q for file_sd in scrape job %q does not exist\n", file, scfg.JobName)
|
||||
}
|
||||
case discovery.StaticConfig:
|
||||
if err := checkTargetGroupsForScrapeConfig(c, scfg); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
alertConfig := cfg.AlertingConfig
|
||||
for _, amcfg := range alertConfig.AlertmanagerConfigs {
|
||||
for _, c := range amcfg.ServiceDiscoveryConfigs {
|
||||
switch c := c.(type) {
|
||||
case *file.SDConfig:
|
||||
for _, file := range c.Files {
|
||||
files, err := filepath.Glob(file)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(files) != 0 {
|
||||
for _, f := range files {
|
||||
var targetGroups []*targetgroup.Group
|
||||
targetGroups, err = checkSDFile(f)
|
||||
if err != nil {
|
||||
return nil, errors.Errorf("checking SD file %q: %v", file, err)
|
||||
}
|
||||
|
||||
if err := checkTargetGroupsForAlertmanager(targetGroups, amcfg); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
continue
|
||||
}
|
||||
fmt.Printf(" WARNING: file %q for file_sd in alertmanager config does not exist\n", file)
|
||||
}
|
||||
case discovery.StaticConfig:
|
||||
if err := checkTargetGroupsForAlertmanager(c, amcfg); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return ruleFiles, nil
|
||||
}
|
||||
|
||||
|
@ -397,16 +450,16 @@ func checkTLSConfig(tlsConfig config_util.TLSConfig) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func checkSDFile(filename string) error {
|
||||
func checkSDFile(filename string) ([]*targetgroup.Group, error) {
|
||||
fd, err := os.Open(filename)
|
||||
if err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
defer fd.Close()
|
||||
|
||||
content, err := ioutil.ReadAll(fd)
|
||||
if err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var targetGroups []*targetgroup.Group
|
||||
|
@ -414,23 +467,23 @@ func checkSDFile(filename string) error {
|
|||
switch ext := filepath.Ext(filename); strings.ToLower(ext) {
|
||||
case ".json":
|
||||
if err := json.Unmarshal(content, &targetGroups); err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
case ".yml", ".yaml":
|
||||
if err := yaml.UnmarshalStrict(content, &targetGroups); err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
default:
|
||||
return errors.Errorf("invalid file extension: %q", ext)
|
||||
return nil, errors.Errorf("invalid file extension: %q", ext)
|
||||
}
|
||||
|
||||
for i, tg := range targetGroups {
|
||||
if tg == nil {
|
||||
return errors.Errorf("nil target group item found (index %d)", i)
|
||||
return nil, errors.Errorf("nil target group item found (index %d)", i)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
return targetGroups, nil
|
||||
}
|
||||
|
||||
// CheckRules validates rule files.
|
||||
|
@ -507,7 +560,6 @@ func checkDuplicates(groups []rulefmt.RuleGroup) []compareRuleType {
|
|||
var rules compareRuleTypes
|
||||
|
||||
for _, group := range groups {
|
||||
|
||||
for _, rule := range group.Rules {
|
||||
rules = append(rules, compareRuleType{
|
||||
metric: ruleMetric(rule),
|
||||
|
@ -721,7 +773,7 @@ func QuerySeries(url *url.URL, matchers []string, start, end string, p printer)
|
|||
}
|
||||
|
||||
// QueryLabels queries for label values against a Prometheus server.
|
||||
func QueryLabels(url *url.URL, name string, start, end string, p printer) int {
|
||||
func QueryLabels(url *url.URL, name, start, end string, p printer) int {
|
||||
if url.Scheme == "" {
|
||||
url.Scheme = "http"
|
||||
}
|
||||
|
@ -899,11 +951,13 @@ type promqlPrinter struct{}
|
|||
func (p *promqlPrinter) printValue(v model.Value) {
|
||||
fmt.Println(v)
|
||||
}
|
||||
|
||||
func (p *promqlPrinter) printSeries(val []model.LabelSet) {
|
||||
for _, v := range val {
|
||||
fmt.Println(v)
|
||||
}
|
||||
}
|
||||
|
||||
func (p *promqlPrinter) printLabelValues(val model.LabelValues) {
|
||||
for _, v := range val {
|
||||
fmt.Println(v)
|
||||
|
@ -916,10 +970,12 @@ func (j *jsonPrinter) printValue(v model.Value) {
|
|||
//nolint:errcheck
|
||||
json.NewEncoder(os.Stdout).Encode(v)
|
||||
}
|
||||
|
||||
func (j *jsonPrinter) printSeries(v []model.LabelSet) {
|
||||
//nolint:errcheck
|
||||
json.NewEncoder(os.Stdout).Encode(v)
|
||||
}
|
||||
|
||||
func (j *jsonPrinter) printLabelValues(v model.LabelValues) {
|
||||
//nolint:errcheck
|
||||
json.NewEncoder(os.Stdout).Encode(v)
|
||||
|
@ -927,7 +983,7 @@ func (j *jsonPrinter) printLabelValues(v model.LabelValues) {
|
|||
|
||||
// importRules backfills recording rules from the files provided. The output are blocks of data
|
||||
// at the outputDir location.
|
||||
func importRules(url *url.URL, start, end, outputDir string, evalInterval time.Duration, files ...string) error {
|
||||
func importRules(url *url.URL, start, end, outputDir string, evalInterval, maxBlockDuration time.Duration, files ...string) error {
|
||||
ctx := context.Background()
|
||||
var stime, etime time.Time
|
||||
var err error
|
||||
|
@ -950,10 +1006,11 @@ func importRules(url *url.URL, start, end, outputDir string, evalInterval time.D
|
|||
}
|
||||
|
||||
cfg := ruleImporterConfig{
|
||||
outputDir: outputDir,
|
||||
start: stime,
|
||||
end: etime,
|
||||
evalInterval: evalInterval,
|
||||
outputDir: outputDir,
|
||||
start: stime,
|
||||
end: etime,
|
||||
evalInterval: evalInterval,
|
||||
maxBlockDuration: maxBlockDuration,
|
||||
}
|
||||
client, err := api.NewClient(api.Config{
|
||||
Address: url.String(),
|
||||
|
@ -980,3 +1037,25 @@ func importRules(url *url.URL, start, end, outputDir string, evalInterval time.D
|
|||
|
||||
return nil
|
||||
}
|
||||
|
||||
func checkTargetGroupsForAlertmanager(targetGroups []*targetgroup.Group, amcfg *config.AlertmanagerConfig) error {
|
||||
for _, tg := range targetGroups {
|
||||
if _, _, err := notifier.AlertmanagerFromGroup(tg, amcfg); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func checkTargetGroupsForScrapeConfig(targetGroups []*targetgroup.Group, scfg *config.ScrapeConfig) error {
|
||||
for _, tg := range targetGroups {
|
||||
_, failures := scrape.TargetsFromGroup(tg, scfg)
|
||||
if len(failures) > 0 {
|
||||
first := failures[0]
|
||||
return first
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -21,9 +21,10 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/prometheus/prometheus/pkg/labels"
|
||||
"github.com/prometheus/prometheus/pkg/rulefmt"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestQueryRange(t *testing.T) {
|
||||
|
@ -111,7 +112,7 @@ func TestCheckSDFile(t *testing.T) {
|
|||
}
|
||||
for _, test := range cases {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
err := checkSDFile(test.file)
|
||||
_, err := checkSDFile(test.file)
|
||||
if test.err != "" {
|
||||
require.Equalf(t, test.err, err.Error(), "Expected error %q, got %q", test.err, err.Error())
|
||||
return
|
||||
|
@ -163,3 +164,42 @@ func BenchmarkCheckDuplicates(b *testing.B) {
|
|||
checkDuplicates(rgs.Groups)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCheckTargetConfig(t *testing.T) {
|
||||
cases := []struct {
|
||||
name string
|
||||
file string
|
||||
err string
|
||||
}{
|
||||
{
|
||||
name: "url_in_scrape_targetgroup_with_relabel_config.good",
|
||||
file: "url_in_scrape_targetgroup_with_relabel_config.good.yml",
|
||||
err: "",
|
||||
},
|
||||
{
|
||||
name: "url_in_alert_targetgroup_with_relabel_config.good",
|
||||
file: "url_in_alert_targetgroup_with_relabel_config.good.yml",
|
||||
err: "",
|
||||
},
|
||||
{
|
||||
name: "url_in_scrape_targetgroup_with_relabel_config.bad",
|
||||
file: "url_in_scrape_targetgroup_with_relabel_config.bad.yml",
|
||||
err: "instance 0 in group 0: \"http://bad\" is not a valid hostname",
|
||||
},
|
||||
{
|
||||
name: "url_in_alert_targetgroup_with_relabel_config.bad",
|
||||
file: "url_in_alert_targetgroup_with_relabel_config.bad.yml",
|
||||
err: "\"http://bad\" is not a valid hostname",
|
||||
},
|
||||
}
|
||||
for _, test := range cases {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
_, err := checkConfig(false, "testdata/"+test.file)
|
||||
if test.err != "" {
|
||||
require.Equalf(t, test.err, err.Error(), "Expected error %q, got %q", test.err, err.Error())
|
||||
return
|
||||
}
|
||||
require.NoError(t, err)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
|
|
@ -23,6 +23,7 @@ import (
|
|||
"github.com/pkg/errors"
|
||||
v1 "github.com/prometheus/client_golang/api/prometheus/v1"
|
||||
"github.com/prometheus/common/model"
|
||||
|
||||
"github.com/prometheus/prometheus/pkg/labels"
|
||||
"github.com/prometheus/prometheus/pkg/timestamp"
|
||||
"github.com/prometheus/prometheus/rules"
|
||||
|
@ -48,10 +49,11 @@ type ruleImporter struct {
|
|||
}
|
||||
|
||||
type ruleImporterConfig struct {
|
||||
outputDir string
|
||||
start time.Time
|
||||
end time.Time
|
||||
evalInterval time.Duration
|
||||
outputDir string
|
||||
start time.Time
|
||||
end time.Time
|
||||
evalInterval time.Duration
|
||||
maxBlockDuration time.Duration
|
||||
}
|
||||
|
||||
// newRuleImporter creates a new rule importer that can be used to parse and evaluate recording rule files and create new series
|
||||
|
@ -83,7 +85,7 @@ func (importer *ruleImporter) importAll(ctx context.Context) (errs []error) {
|
|||
|
||||
for i, r := range group.Rules() {
|
||||
level.Info(importer.logger).Log("backfiller", "processing rule", "id", i, "name", r.Name())
|
||||
if err := importer.importRule(ctx, r.Query().String(), r.Name(), r.Labels(), importer.config.start, importer.config.end, group); err != nil {
|
||||
if err := importer.importRule(ctx, r.Query().String(), r.Name(), r.Labels(), importer.config.start, importer.config.end, int64(importer.config.maxBlockDuration/time.Millisecond), group); err != nil {
|
||||
errs = append(errs, err)
|
||||
}
|
||||
}
|
||||
|
@ -92,8 +94,9 @@ func (importer *ruleImporter) importAll(ctx context.Context) (errs []error) {
|
|||
}
|
||||
|
||||
// importRule queries a prometheus API to evaluate rules at times in the past.
|
||||
func (importer *ruleImporter) importRule(ctx context.Context, ruleExpr, ruleName string, ruleLabels labels.Labels, start, end time.Time, grp *rules.Group) (err error) {
|
||||
blockDuration := tsdb.DefaultBlockDuration
|
||||
func (importer *ruleImporter) importRule(ctx context.Context, ruleExpr, ruleName string, ruleLabels labels.Labels, start, end time.Time,
|
||||
maxBlockDuration int64, grp *rules.Group) (err error) {
|
||||
blockDuration := getCompatibleBlockDuration(maxBlockDuration)
|
||||
startInMs := start.Unix() * int64(time.Second/time.Millisecond)
|
||||
endInMs := end.Unix() * int64(time.Second/time.Millisecond)
|
||||
|
||||
|
@ -130,7 +133,7 @@ func (importer *ruleImporter) importRule(ctx context.Context, ruleExpr, ruleName
|
|||
// also need to append samples throughout the whole block range. To allow that, we
|
||||
// pretend that the block is twice as large here, but only really add sample in the
|
||||
// original interval later.
|
||||
w, err := tsdb.NewBlockWriter(log.NewNopLogger(), importer.config.outputDir, 2*tsdb.DefaultBlockDuration)
|
||||
w, err := tsdb.NewBlockWriter(log.NewNopLogger(), importer.config.outputDir, 2*blockDuration)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "new block writer")
|
||||
}
|
||||
|
@ -147,12 +150,18 @@ func (importer *ruleImporter) importRule(ctx context.Context, ruleExpr, ruleName
|
|||
matrix = val.(model.Matrix)
|
||||
|
||||
for _, sample := range matrix {
|
||||
lb := labels.NewBuilder(ruleLabels)
|
||||
lb := labels.NewBuilder(labels.Labels{})
|
||||
|
||||
for name, value := range sample.Metric {
|
||||
lb.Set(string(name), string(value))
|
||||
}
|
||||
|
||||
// Setting the rule labels after the output of the query,
|
||||
// so they can override query output.
|
||||
for _, l := range ruleLabels {
|
||||
lb.Set(l.Name, l.Value)
|
||||
}
|
||||
|
||||
lb.Set(labels.MetricName, ruleName)
|
||||
|
||||
for _, value := range sample.Values {
|
||||
|
@ -162,7 +171,7 @@ func (importer *ruleImporter) importRule(ctx context.Context, ruleExpr, ruleName
|
|||
}
|
||||
}
|
||||
default:
|
||||
return errors.New(fmt.Sprintf("rule result is wrong type %s", val.Type().String()))
|
||||
return fmt.Errorf("rule result is wrong type %s", val.Type().String())
|
||||
}
|
||||
|
||||
if err := app.flushAndCommit(ctx); err != nil {
|
||||
|
|
|
@ -25,9 +25,10 @@ import (
|
|||
"github.com/go-kit/log"
|
||||
v1 "github.com/prometheus/client_golang/api/prometheus/v1"
|
||||
"github.com/prometheus/common/model"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/prometheus/prometheus/pkg/labels"
|
||||
"github.com/prometheus/prometheus/tsdb"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
type mockQueryRangeAPI struct {
|
||||
|
@ -38,6 +39,8 @@ func (mockAPI mockQueryRangeAPI) QueryRange(ctx context.Context, query string, r
|
|||
return mockAPI.samples, v1.Warnings{}, nil
|
||||
}
|
||||
|
||||
const defaultBlockDuration = time.Duration(tsdb.DefaultBlockDuration) * time.Millisecond
|
||||
|
||||
// TestBackfillRuleIntegration is an integration test that runs all the rule importer code to confirm the parts work together.
|
||||
func TestBackfillRuleIntegration(t *testing.T) {
|
||||
const (
|
||||
|
@ -46,23 +49,26 @@ func TestBackfillRuleIntegration(t *testing.T) {
|
|||
testValue2 = 98
|
||||
)
|
||||
var (
|
||||
start = time.Date(2009, time.November, 10, 6, 34, 0, 0, time.UTC)
|
||||
testTime = model.Time(start.Add(-9 * time.Hour).Unix())
|
||||
testTime2 = model.Time(start.Add(-8 * time.Hour).Unix())
|
||||
start = time.Date(2009, time.November, 10, 6, 34, 0, 0, time.UTC)
|
||||
testTime = model.Time(start.Add(-9 * time.Hour).Unix())
|
||||
testTime2 = model.Time(start.Add(-8 * time.Hour).Unix())
|
||||
twentyFourHourDuration, _ = time.ParseDuration("24h")
|
||||
)
|
||||
|
||||
var testCases = []struct {
|
||||
testCases := []struct {
|
||||
name string
|
||||
runcount int
|
||||
maxBlockDuration time.Duration
|
||||
expectedBlockCount int
|
||||
expectedSeriesCount int
|
||||
expectedSampleCount int
|
||||
samples []*model.SampleStream
|
||||
}{
|
||||
{"no samples", 1, 0, 0, 0, []*model.SampleStream{}},
|
||||
{"run importer once", 1, 8, 4, 4, []*model.SampleStream{{Metric: model.Metric{"name1": "val1"}, Values: []model.SamplePair{{Timestamp: testTime, Value: testValue}}}}},
|
||||
{"run importer with dup name label", 1, 8, 4, 4, []*model.SampleStream{{Metric: model.Metric{"__name__": "val1", "name1": "val1"}, Values: []model.SamplePair{{Timestamp: testTime, Value: testValue}}}}},
|
||||
{"one importer twice", 2, 8, 4, 8, []*model.SampleStream{{Metric: model.Metric{"name1": "val1"}, Values: []model.SamplePair{{Timestamp: testTime, Value: testValue}, {Timestamp: testTime2, Value: testValue2}}}}},
|
||||
{"no samples", 1, defaultBlockDuration, 0, 0, 0, []*model.SampleStream{}},
|
||||
{"run importer once", 1, defaultBlockDuration, 8, 4, 4, []*model.SampleStream{{Metric: model.Metric{"name1": "val1"}, Values: []model.SamplePair{{Timestamp: testTime, Value: testValue}}}}},
|
||||
{"run importer with dup name label", 1, defaultBlockDuration, 8, 4, 4, []*model.SampleStream{{Metric: model.Metric{"__name__": "val1", "name1": "val1"}, Values: []model.SamplePair{{Timestamp: testTime, Value: testValue}}}}},
|
||||
{"one importer twice", 2, defaultBlockDuration, 8, 4, 8, []*model.SampleStream{{Metric: model.Metric{"name1": "val1"}, Values: []model.SamplePair{{Timestamp: testTime, Value: testValue}, {Timestamp: testTime2, Value: testValue2}}}}},
|
||||
{"run importer once with larger blocks", 1, twentyFourHourDuration, 4, 4, 4, []*model.SampleStream{{Metric: model.Metric{"name1": "val1"}, Values: []model.SamplePair{{Timestamp: testTime, Value: testValue}}}}},
|
||||
}
|
||||
for _, tt := range testCases {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
|
@ -76,7 +82,8 @@ func TestBackfillRuleIntegration(t *testing.T) {
|
|||
// Execute the test more than once to simulate running the rule importer twice with the same data.
|
||||
// We expect duplicate blocks with the same series are created when run more than once.
|
||||
for i := 0; i < tt.runcount; i++ {
|
||||
ruleImporter, err := newTestRuleImporter(ctx, start, tmpDir, tt.samples)
|
||||
|
||||
ruleImporter, err := newTestRuleImporter(ctx, start, tmpDir, tt.samples, tt.maxBlockDuration)
|
||||
require.NoError(t, err)
|
||||
path1 := filepath.Join(tmpDir, "test.file")
|
||||
require.NoError(t, createSingleRuleTestFiles(path1))
|
||||
|
@ -162,13 +169,14 @@ func TestBackfillRuleIntegration(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func newTestRuleImporter(ctx context.Context, start time.Time, tmpDir string, testSamples model.Matrix) (*ruleImporter, error) {
|
||||
func newTestRuleImporter(ctx context.Context, start time.Time, tmpDir string, testSamples model.Matrix, maxBlockDuration time.Duration) (*ruleImporter, error) {
|
||||
logger := log.NewNopLogger()
|
||||
cfg := ruleImporterConfig{
|
||||
outputDir: tmpDir,
|
||||
start: start.Add(-10 * time.Hour),
|
||||
end: start.Add(-7 * time.Hour),
|
||||
evalInterval: 60 * time.Second,
|
||||
outputDir: tmpDir,
|
||||
start: start.Add(-10 * time.Hour),
|
||||
end: start.Add(-7 * time.Hour),
|
||||
evalInterval: 60 * time.Second,
|
||||
maxBlockDuration: maxBlockDuration,
|
||||
}
|
||||
|
||||
return newRuleImporter(logger, cfg, mockQueryRangeAPI{
|
||||
|
@ -185,7 +193,7 @@ func createSingleRuleTestFiles(path string) error {
|
|||
labels:
|
||||
testlabel11: testlabelvalue11
|
||||
`
|
||||
return ioutil.WriteFile(path, []byte(recordingRules), 0777)
|
||||
return ioutil.WriteFile(path, []byte(recordingRules), 0o777)
|
||||
}
|
||||
|
||||
func createMultiRuleTestFiles(path string) error {
|
||||
|
@ -205,5 +213,69 @@ func createMultiRuleTestFiles(path string) error {
|
|||
labels:
|
||||
testlabel11: testlabelvalue13
|
||||
`
|
||||
return ioutil.WriteFile(path, []byte(recordingRules), 0777)
|
||||
return ioutil.WriteFile(path, []byte(recordingRules), 0o777)
|
||||
}
|
||||
|
||||
// TestBackfillLabels confirms that the labels in the rule file override the labels from the metrics
|
||||
// received from Prometheus Query API, including the __name__ label.
|
||||
func TestBackfillLabels(t *testing.T) {
|
||||
tmpDir, err := ioutil.TempDir("", "backfilldata")
|
||||
require.NoError(t, err)
|
||||
defer func() {
|
||||
require.NoError(t, os.RemoveAll(tmpDir))
|
||||
}()
|
||||
ctx := context.Background()
|
||||
|
||||
start := time.Date(2009, time.November, 10, 6, 34, 0, 0, time.UTC)
|
||||
mockAPISamples := []*model.SampleStream{
|
||||
{
|
||||
Metric: model.Metric{"name1": "override-me", "__name__": "override-me-too"},
|
||||
Values: []model.SamplePair{{Timestamp: model.TimeFromUnixNano(start.UnixNano()), Value: 123}},
|
||||
},
|
||||
}
|
||||
ruleImporter, err := newTestRuleImporter(ctx, start, tmpDir, mockAPISamples, defaultBlockDuration)
|
||||
require.NoError(t, err)
|
||||
|
||||
path := filepath.Join(tmpDir, "test.file")
|
||||
recordingRules := `groups:
|
||||
- name: group0
|
||||
rules:
|
||||
- record: rulename
|
||||
expr: ruleExpr
|
||||
labels:
|
||||
name1: value-from-rule
|
||||
`
|
||||
require.NoError(t, ioutil.WriteFile(path, []byte(recordingRules), 0o777))
|
||||
errs := ruleImporter.loadGroups(ctx, []string{path})
|
||||
for _, err := range errs {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
errs = ruleImporter.importAll(ctx)
|
||||
for _, err := range errs {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
opts := tsdb.DefaultOptions()
|
||||
opts.AllowOverlappingBlocks = true
|
||||
db, err := tsdb.Open(tmpDir, nil, nil, opts, nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
q, err := db.Querier(context.Background(), math.MinInt64, math.MaxInt64)
|
||||
require.NoError(t, err)
|
||||
|
||||
t.Run("correct-labels", func(t *testing.T) {
|
||||
selectedSeries := q.Select(false, nil, labels.MustNewMatcher(labels.MatchRegexp, "", ".*"))
|
||||
for selectedSeries.Next() {
|
||||
series := selectedSeries.At()
|
||||
expectedLabels := labels.Labels{
|
||||
labels.Label{Name: "__name__", Value: "rulename"},
|
||||
labels.Label{Name: "name1", Value: "value-from-rule"},
|
||||
}
|
||||
require.Equal(t, expectedLabels, series.Labels())
|
||||
}
|
||||
require.NoError(t, selectedSeries.Err())
|
||||
require.NoError(t, q.Close())
|
||||
require.NoError(t, db.Close())
|
||||
})
|
||||
}
|
||||
|
|
148
cmd/promtool/sd.go
Normal file
148
cmd/promtool/sd.go
Normal file
|
@ -0,0 +1,148 @@
|
|||
// Copyright 2021 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"reflect"
|
||||
"time"
|
||||
|
||||
"github.com/go-kit/log"
|
||||
|
||||
"github.com/prometheus/prometheus/config"
|
||||
"github.com/prometheus/prometheus/discovery"
|
||||
"github.com/prometheus/prometheus/discovery/targetgroup"
|
||||
"github.com/prometheus/prometheus/pkg/labels"
|
||||
"github.com/prometheus/prometheus/scrape"
|
||||
)
|
||||
|
||||
type sdCheckResult struct {
|
||||
DiscoveredLabels labels.Labels `json:"discoveredLabels"`
|
||||
Labels labels.Labels `json:"labels"`
|
||||
Error error `json:"error,omitempty"`
|
||||
}
|
||||
|
||||
// CheckSD performs service discovery for the given job name and reports the results.
|
||||
func CheckSD(sdConfigFiles, sdJobName string, sdTimeout time.Duration) int {
|
||||
logger := log.NewLogfmtLogger(log.NewSyncWriter(os.Stderr))
|
||||
|
||||
cfg, err := config.LoadFile(sdConfigFiles, false, false, logger)
|
||||
if err != nil {
|
||||
fmt.Fprintln(os.Stderr, "Cannot load config", err)
|
||||
return 2
|
||||
}
|
||||
|
||||
var scrapeConfig *config.ScrapeConfig
|
||||
jobs := []string{}
|
||||
jobMatched := false
|
||||
for _, v := range cfg.ScrapeConfigs {
|
||||
jobs = append(jobs, v.JobName)
|
||||
if v.JobName == sdJobName {
|
||||
jobMatched = true
|
||||
scrapeConfig = v
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if !jobMatched {
|
||||
fmt.Fprintf(os.Stderr, "Job %s not found. Select one of:\n", sdJobName)
|
||||
for _, job := range jobs {
|
||||
fmt.Fprintf(os.Stderr, "\t%s\n", job)
|
||||
}
|
||||
return 1
|
||||
}
|
||||
|
||||
targetGroupChan := make(chan []*targetgroup.Group)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), sdTimeout)
|
||||
defer cancel()
|
||||
|
||||
for _, cfg := range scrapeConfig.ServiceDiscoveryConfigs {
|
||||
d, err := cfg.NewDiscoverer(discovery.DiscovererOptions{Logger: logger})
|
||||
if err != nil {
|
||||
fmt.Fprintln(os.Stderr, "Could not create new discoverer", err)
|
||||
return 2
|
||||
}
|
||||
go d.Run(ctx, targetGroupChan)
|
||||
}
|
||||
|
||||
var targetGroups []*targetgroup.Group
|
||||
sdCheckResults := make(map[string][]*targetgroup.Group)
|
||||
outerLoop:
|
||||
for {
|
||||
select {
|
||||
case targetGroups = <-targetGroupChan:
|
||||
for _, tg := range targetGroups {
|
||||
sdCheckResults[tg.Source] = append(sdCheckResults[tg.Source], tg)
|
||||
}
|
||||
case <-ctx.Done():
|
||||
break outerLoop
|
||||
}
|
||||
}
|
||||
results := []sdCheckResult{}
|
||||
for _, tgs := range sdCheckResults {
|
||||
results = append(results, getSDCheckResult(tgs, scrapeConfig)...)
|
||||
}
|
||||
|
||||
res, err := json.MarshalIndent(results, "", " ")
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Could not marshal result json: %s", err)
|
||||
return 2
|
||||
}
|
||||
|
||||
fmt.Printf("%s", res)
|
||||
return 0
|
||||
}
|
||||
|
||||
func getSDCheckResult(targetGroups []*targetgroup.Group, scrapeConfig *config.ScrapeConfig) []sdCheckResult {
|
||||
sdCheckResults := []sdCheckResult{}
|
||||
for _, targetGroup := range targetGroups {
|
||||
for _, target := range targetGroup.Targets {
|
||||
labelSlice := make([]labels.Label, 0, len(target)+len(targetGroup.Labels))
|
||||
|
||||
for name, value := range target {
|
||||
labelSlice = append(labelSlice, labels.Label{Name: string(name), Value: string(value)})
|
||||
}
|
||||
|
||||
for name, value := range targetGroup.Labels {
|
||||
if _, ok := target[name]; !ok {
|
||||
labelSlice = append(labelSlice, labels.Label{Name: string(name), Value: string(value)})
|
||||
}
|
||||
}
|
||||
|
||||
targetLabels := labels.New(labelSlice...)
|
||||
res, orig, err := scrape.PopulateLabels(targetLabels, scrapeConfig)
|
||||
result := sdCheckResult{
|
||||
DiscoveredLabels: orig,
|
||||
Labels: res,
|
||||
Error: err,
|
||||
}
|
||||
|
||||
duplicateRes := false
|
||||
for _, sdCheckRes := range sdCheckResults {
|
||||
if reflect.DeepEqual(sdCheckRes, result) {
|
||||
duplicateRes = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if !duplicateRes {
|
||||
sdCheckResults = append(sdCheckResults, result)
|
||||
}
|
||||
}
|
||||
}
|
||||
return sdCheckResults
|
||||
}
|
70
cmd/promtool/sd_test.go
Normal file
70
cmd/promtool/sd_test.go
Normal file
|
@ -0,0 +1,70 @@
|
|||
// Copyright 2021 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/prometheus/common/model"
|
||||
|
||||
"github.com/prometheus/prometheus/config"
|
||||
"github.com/prometheus/prometheus/discovery/targetgroup"
|
||||
"github.com/prometheus/prometheus/pkg/labels"
|
||||
"github.com/prometheus/prometheus/pkg/relabel"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestSDCheckResult(t *testing.T) {
|
||||
targetGroups := []*targetgroup.Group{{
|
||||
Targets: []model.LabelSet{
|
||||
map[model.LabelName]model.LabelValue{"__address__": "localhost:8080", "foo": "bar"},
|
||||
},
|
||||
}}
|
||||
|
||||
reg, err := relabel.NewRegexp("(.*)")
|
||||
require.Nil(t, err)
|
||||
|
||||
scrapeConfig := &config.ScrapeConfig{
|
||||
RelabelConfigs: []*relabel.Config{{
|
||||
SourceLabels: model.LabelNames{"foo"},
|
||||
Action: relabel.Replace,
|
||||
TargetLabel: "newfoo",
|
||||
Regex: reg,
|
||||
Replacement: "$1",
|
||||
}},
|
||||
}
|
||||
|
||||
expectedSDCheckResult := []sdCheckResult{
|
||||
{
|
||||
DiscoveredLabels: labels.Labels{
|
||||
labels.Label{Name: "__address__", Value: "localhost:8080"},
|
||||
labels.Label{Name: "__scrape_interval__", Value: "0s"},
|
||||
labels.Label{Name: "__scrape_timeout__", Value: "0s"},
|
||||
labels.Label{Name: "foo", Value: "bar"},
|
||||
},
|
||||
Labels: labels.Labels{
|
||||
labels.Label{Name: "__address__", Value: "localhost:8080"},
|
||||
labels.Label{Name: "__scrape_interval__", Value: "0s"},
|
||||
labels.Label{Name: "__scrape_timeout__", Value: "0s"},
|
||||
labels.Label{Name: "foo", Value: "bar"},
|
||||
labels.Label{Name: "instance", Value: "localhost:8080"},
|
||||
labels.Label{Name: "newfoo", Value: "bar"},
|
||||
},
|
||||
Error: nil,
|
||||
},
|
||||
}
|
||||
|
||||
require.Equal(t, expectedSDCheckResult, getSDCheckResult(targetGroups, scrapeConfig))
|
||||
}
|
8
cmd/promtool/testdata/url_in_alert_targetgroup_with_relabel_config.bad.yml
vendored
Normal file
8
cmd/promtool/testdata/url_in_alert_targetgroup_with_relabel_config.bad.yml
vendored
Normal file
|
@ -0,0 +1,8 @@
|
|||
alerting:
|
||||
alertmanagers:
|
||||
- relabel_configs:
|
||||
- source_labels: [__address__]
|
||||
target_label: __param_target
|
||||
static_configs:
|
||||
- targets:
|
||||
- http://bad
|
10
cmd/promtool/testdata/url_in_alert_targetgroup_with_relabel_config.good.yml
vendored
Normal file
10
cmd/promtool/testdata/url_in_alert_targetgroup_with_relabel_config.good.yml
vendored
Normal file
|
@ -0,0 +1,10 @@
|
|||
alerting:
|
||||
alertmanagers:
|
||||
- relabel_configs:
|
||||
- source_labels: [__address__]
|
||||
target_label: __param_target
|
||||
- target_label: __address__
|
||||
replacement: good
|
||||
static_configs:
|
||||
- targets:
|
||||
- http://bad
|
8
cmd/promtool/testdata/url_in_scrape_targetgroup_with_relabel_config.bad.yml
vendored
Normal file
8
cmd/promtool/testdata/url_in_scrape_targetgroup_with_relabel_config.bad.yml
vendored
Normal file
|
@ -0,0 +1,8 @@
|
|||
scrape_configs:
|
||||
- job_name: prometheus
|
||||
relabel_configs:
|
||||
- source_labels: [__address__]
|
||||
target_label: __param_target
|
||||
static_configs:
|
||||
- targets:
|
||||
- http://bad
|
10
cmd/promtool/testdata/url_in_scrape_targetgroup_with_relabel_config.good.yml
vendored
Normal file
10
cmd/promtool/testdata/url_in_scrape_targetgroup_with_relabel_config.good.yml
vendored
Normal file
|
@ -0,0 +1,10 @@
|
|||
scrape_configs:
|
||||
- job_name: prometheus
|
||||
relabel_configs:
|
||||
- source_labels: [__address__]
|
||||
target_label: __param_target
|
||||
- target_label: __address__
|
||||
replacement: good
|
||||
static_configs:
|
||||
- targets:
|
||||
- http://good
|
|
@ -17,7 +17,6 @@ import (
|
|||
"bufio"
|
||||
"context"
|
||||
"fmt"
|
||||
"github.com/prometheus/prometheus/tsdb/index"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"math"
|
||||
|
@ -41,6 +40,7 @@ import (
|
|||
"github.com/prometheus/prometheus/tsdb/chunks"
|
||||
tsdb_errors "github.com/prometheus/prometheus/tsdb/errors"
|
||||
"github.com/prometheus/prometheus/tsdb/fileutil"
|
||||
"github.com/prometheus/prometheus/tsdb/index"
|
||||
)
|
||||
|
||||
const timeDelta = 30000
|
||||
|
@ -78,7 +78,7 @@ func benchmarkWrite(outPath, samplesFile string, numMetrics, numScrapes int) err
|
|||
if err := os.RemoveAll(b.outPath); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := os.MkdirAll(b.outPath, 0777); err != nil {
|
||||
if err := os.MkdirAll(b.outPath, 0o777); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -589,7 +589,7 @@ func analyzeCompaction(block tsdb.BlockReader, indexr tsdb.IndexReader) (err err
|
|||
histogram := make([]int, nBuckets)
|
||||
totalChunks := 0
|
||||
for postingsr.Next() {
|
||||
var lbsl = labels.Labels{}
|
||||
lbsl := labels.Labels{}
|
||||
var chks []chunks.Meta
|
||||
if err := indexr.Series(postingsr.At(), &lbsl, &chks); err != nil {
|
||||
return err
|
||||
|
@ -671,14 +671,14 @@ func checkErr(err error) int {
|
|||
return 0
|
||||
}
|
||||
|
||||
func backfillOpenMetrics(path string, outputDir string, humanReadable, quiet bool, maxBlockDuration time.Duration) int {
|
||||
func backfillOpenMetrics(path, outputDir string, humanReadable, quiet bool, maxBlockDuration time.Duration) int {
|
||||
inputFile, err := fileutil.OpenMmapFile(path)
|
||||
if err != nil {
|
||||
return checkErr(err)
|
||||
}
|
||||
defer inputFile.Close()
|
||||
|
||||
if err := os.MkdirAll(outputDir, 0777); err != nil {
|
||||
if err := os.MkdirAll(outputDir, 0o777); err != nil {
|
||||
return checkErr(errors.Wrap(err, "create output dir"))
|
||||
}
|
||||
|
||||
|
|
|
@ -47,6 +47,7 @@ func RulesUnitTest(queryOpts promql.LazyLoaderOpts, files ...string) int {
|
|||
fmt.Fprintln(os.Stderr, " FAILED:")
|
||||
for _, e := range errs {
|
||||
fmt.Fprintln(os.Stderr, e.Error())
|
||||
fmt.Println()
|
||||
}
|
||||
failed = true
|
||||
} else {
|
||||
|
@ -313,30 +314,18 @@ func (tg *testGroup) test(evalInterval time.Duration, groupOrderMap map[string]i
|
|||
})
|
||||
}
|
||||
|
||||
var sb strings.Builder
|
||||
if gotAlerts.Len() != expAlerts.Len() {
|
||||
sort.Sort(gotAlerts)
|
||||
sort.Sort(expAlerts)
|
||||
|
||||
if !reflect.DeepEqual(expAlerts, gotAlerts) {
|
||||
var testName string
|
||||
if tg.TestGroupName != "" {
|
||||
fmt.Fprintf(&sb, " name: %s,\n", tg.TestGroupName)
|
||||
}
|
||||
fmt.Fprintf(&sb, " alertname:%s, time:%s, \n", testcase.Alertname, testcase.EvalTime.String())
|
||||
fmt.Fprintf(&sb, " exp:%#v, \n", expAlerts.String())
|
||||
fmt.Fprintf(&sb, " got:%#v", gotAlerts.String())
|
||||
|
||||
errs = append(errs, errors.New(sb.String()))
|
||||
} else {
|
||||
sort.Sort(gotAlerts)
|
||||
sort.Sort(expAlerts)
|
||||
|
||||
if !reflect.DeepEqual(expAlerts, gotAlerts) {
|
||||
if tg.TestGroupName != "" {
|
||||
fmt.Fprintf(&sb, " name: %s,\n", tg.TestGroupName)
|
||||
}
|
||||
fmt.Fprintf(&sb, " alertname:%s, time:%s, \n", testcase.Alertname, testcase.EvalTime.String())
|
||||
fmt.Fprintf(&sb, " exp:%#v, \n", expAlerts.String())
|
||||
fmt.Fprintf(&sb, " got:%#v", gotAlerts.String())
|
||||
|
||||
errs = append(errs, errors.New(sb.String()))
|
||||
testName = fmt.Sprintf(" name: %s,\n", tg.TestGroupName)
|
||||
}
|
||||
expString := indentLines(expAlerts.String(), " ")
|
||||
gotString := indentLines(gotAlerts.String(), " ")
|
||||
errs = append(errs, errors.Errorf("%s alertname: %s, time: %s, \n exp:%v, \n got:%v",
|
||||
testName, testcase.Alertname, testcase.EvalTime.String(), expString, gotString))
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -385,7 +374,7 @@ Outer:
|
|||
return labels.Compare(gotSamples[i].Labels, gotSamples[j].Labels) <= 0
|
||||
})
|
||||
if !reflect.DeepEqual(expSamples, gotSamples) {
|
||||
errs = append(errs, errors.Errorf(" expr: %q, time: %s,\n exp:%#v\n got:%#v", testCase.Expr,
|
||||
errs = append(errs, errors.Errorf(" expr: %q, time: %s,\n exp: %v\n got: %v", testCase.Expr,
|
||||
testCase.EvalTime.String(), parsedSamplesString(expSamples), parsedSamplesString(gotSamples)))
|
||||
}
|
||||
}
|
||||
|
@ -398,7 +387,6 @@ Outer:
|
|||
|
||||
// seriesLoadingString returns the input series in PromQL notation.
|
||||
func (tg *testGroup) seriesLoadingString() string {
|
||||
|
||||
result := fmt.Sprintf("load %v\n", shortDuration(tg.Interval))
|
||||
for _, is := range tg.InputSeries {
|
||||
result += fmt.Sprintf(" %v %v\n", is.Series, is.Values)
|
||||
|
@ -468,6 +456,23 @@ func query(ctx context.Context, qs string, t time.Time, engine *promql.Engine, q
|
|||
}
|
||||
}
|
||||
|
||||
// indentLines prefixes each line in the supplied string with the given "indent"
|
||||
// string.
|
||||
func indentLines(lines, indent string) string {
|
||||
sb := strings.Builder{}
|
||||
n := strings.Split(lines, "\n")
|
||||
for i, l := range n {
|
||||
if i > 0 {
|
||||
sb.WriteString(indent)
|
||||
}
|
||||
sb.WriteString(l)
|
||||
if i != len(n)-1 {
|
||||
sb.WriteRune('\n')
|
||||
}
|
||||
}
|
||||
return sb.String()
|
||||
}
|
||||
|
||||
type labelsAndAnnotations []labelAndAnnotation
|
||||
|
||||
func (la labelsAndAnnotations) Len() int { return len(la) }
|
||||
|
@ -484,11 +489,11 @@ func (la labelsAndAnnotations) String() string {
|
|||
if len(la) == 0 {
|
||||
return "[]"
|
||||
}
|
||||
s := "[" + la[0].String()
|
||||
for _, l := range la[1:] {
|
||||
s += ", " + l.String()
|
||||
s := "[\n0:" + indentLines("\n"+la[0].String(), " ")
|
||||
for i, l := range la[1:] {
|
||||
s += ",\n" + fmt.Sprintf("%d", i+1) + ":" + indentLines("\n"+l.String(), " ")
|
||||
}
|
||||
s += "]"
|
||||
s += "\n]"
|
||||
|
||||
return s
|
||||
}
|
||||
|
@ -499,7 +504,7 @@ type labelAndAnnotation struct {
|
|||
}
|
||||
|
||||
func (la *labelAndAnnotation) String() string {
|
||||
return "Labels:" + la.Labels.String() + " Annotations:" + la.Annotations.String()
|
||||
return "Labels:" + la.Labels.String() + "\nAnnotations:" + la.Annotations.String()
|
||||
}
|
||||
|
||||
type series struct {
|
||||
|
|
|
@ -99,7 +99,7 @@ func Load(s string, expandExternalLabels bool, logger log.Logger) (*Config, erro
|
|||
}
|
||||
|
||||
// LoadFile parses the given YAML file into a Config.
|
||||
func LoadFile(filename string, expandExternalLabels bool, logger log.Logger) (*Config, error) {
|
||||
func LoadFile(filename string, agentMode, expandExternalLabels bool, logger log.Logger) (*Config, error) {
|
||||
content, err := ioutil.ReadFile(filename)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -108,6 +108,25 @@ func LoadFile(filename string, expandExternalLabels bool, logger log.Logger) (*C
|
|||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "parsing YAML file %s", filename)
|
||||
}
|
||||
|
||||
if agentMode {
|
||||
if len(cfg.RemoteWriteConfigs) == 0 {
|
||||
return nil, errors.New("at least one remote_write target must be specified in agent mode")
|
||||
}
|
||||
|
||||
if len(cfg.AlertingConfig.AlertmanagerConfigs) > 0 || len(cfg.AlertingConfig.AlertRelabelConfigs) > 0 {
|
||||
return nil, errors.New("field alerting is not allowed in agent mode")
|
||||
}
|
||||
|
||||
if len(cfg.RuleFiles) > 0 {
|
||||
return nil, errors.New("field rule_files is not allowed in agent mode")
|
||||
}
|
||||
|
||||
if len(cfg.RemoteReadConfigs) > 0 {
|
||||
return nil, errors.New("field remote_read is not allowed in agent mode")
|
||||
}
|
||||
}
|
||||
|
||||
cfg.SetDirectory(filepath.Dir(filename))
|
||||
return cfg, nil
|
||||
}
|
||||
|
|
|
@ -49,6 +49,7 @@ import (
|
|||
"github.com/prometheus/prometheus/discovery/scaleway"
|
||||
"github.com/prometheus/prometheus/discovery/targetgroup"
|
||||
"github.com/prometheus/prometheus/discovery/triton"
|
||||
"github.com/prometheus/prometheus/discovery/uyuni"
|
||||
"github.com/prometheus/prometheus/discovery/xds"
|
||||
"github.com/prometheus/prometheus/discovery/zookeeper"
|
||||
"github.com/prometheus/prometheus/pkg/labels"
|
||||
|
@ -102,6 +103,10 @@ var expectedConf = &Config{
|
|||
ClientID: "123",
|
||||
ClientSecret: "456",
|
||||
TokenURL: "http://remote1/auth",
|
||||
TLSConfig: config.TLSConfig{
|
||||
CertFile: filepath.FromSlash("testdata/valid_cert_file"),
|
||||
KeyFile: filepath.FromSlash("testdata/valid_key_file"),
|
||||
},
|
||||
},
|
||||
FollowRedirects: true,
|
||||
},
|
||||
|
@ -564,6 +569,7 @@ var expectedConf = &Config{
|
|||
AuthenticationMethod: "OAuth",
|
||||
RefreshInterval: model.Duration(5 * time.Minute),
|
||||
Port: 9100,
|
||||
HTTPClientConfig: config.DefaultHTTPClientConfig,
|
||||
},
|
||||
},
|
||||
},
|
||||
|
@ -778,17 +784,19 @@ var expectedConf = &Config{
|
|||
Scheme: DefaultScrapeConfig.Scheme,
|
||||
HTTPClientConfig: config.DefaultHTTPClientConfig,
|
||||
|
||||
ServiceDiscoveryConfigs: discovery.Configs{&openstack.SDConfig{
|
||||
Role: "instance",
|
||||
Region: "RegionOne",
|
||||
Port: 80,
|
||||
Availability: "public",
|
||||
RefreshInterval: model.Duration(60 * time.Second),
|
||||
TLSConfig: config.TLSConfig{
|
||||
CAFile: "testdata/valid_ca_file",
|
||||
CertFile: "testdata/valid_cert_file",
|
||||
KeyFile: "testdata/valid_key_file",
|
||||
}},
|
||||
ServiceDiscoveryConfigs: discovery.Configs{
|
||||
&openstack.SDConfig{
|
||||
Role: "instance",
|
||||
Region: "RegionOne",
|
||||
Port: 80,
|
||||
Availability: "public",
|
||||
RefreshInterval: model.Duration(60 * time.Second),
|
||||
TLSConfig: config.TLSConfig{
|
||||
CAFile: "testdata/valid_ca_file",
|
||||
CertFile: "testdata/valid_cert_file",
|
||||
KeyFile: "testdata/valid_key_file",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
|
@ -802,22 +810,23 @@ var expectedConf = &Config{
|
|||
Scheme: DefaultScrapeConfig.Scheme,
|
||||
HTTPClientConfig: config.DefaultHTTPClientConfig,
|
||||
|
||||
ServiceDiscoveryConfigs: discovery.Configs{&puppetdb.SDConfig{
|
||||
URL: "https://puppetserver/",
|
||||
Query: "resources { type = \"Package\" and title = \"httpd\" }",
|
||||
IncludeParameters: true,
|
||||
Port: 80,
|
||||
RefreshInterval: model.Duration(60 * time.Second),
|
||||
HTTPClientConfig: config.HTTPClientConfig{
|
||||
FollowRedirects: true,
|
||||
TLSConfig: config.TLSConfig{
|
||||
CAFile: "testdata/valid_ca_file",
|
||||
CertFile: "testdata/valid_cert_file",
|
||||
KeyFile: "testdata/valid_key_file",
|
||||
ServiceDiscoveryConfigs: discovery.Configs{
|
||||
&puppetdb.SDConfig{
|
||||
URL: "https://puppetserver/",
|
||||
Query: "resources { type = \"Package\" and title = \"httpd\" }",
|
||||
IncludeParameters: true,
|
||||
Port: 80,
|
||||
RefreshInterval: model.Duration(60 * time.Second),
|
||||
HTTPClientConfig: config.HTTPClientConfig{
|
||||
FollowRedirects: true,
|
||||
TLSConfig: config.TLSConfig{
|
||||
CAFile: "testdata/valid_ca_file",
|
||||
CertFile: "testdata/valid_cert_file",
|
||||
KeyFile: "testdata/valid_key_file",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
JobName: "hetzner",
|
||||
|
@ -934,6 +943,26 @@ var expectedConf = &Config{
|
|||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
JobName: "uyuni",
|
||||
|
||||
HonorTimestamps: true,
|
||||
ScrapeInterval: model.Duration(15 * time.Second),
|
||||
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
|
||||
HTTPClientConfig: config.HTTPClientConfig{FollowRedirects: true},
|
||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||
Scheme: DefaultScrapeConfig.Scheme,
|
||||
ServiceDiscoveryConfigs: discovery.Configs{
|
||||
&uyuni.SDConfig{
|
||||
Server: kubernetesSDHostURL(),
|
||||
Username: "gopher",
|
||||
Password: "hole",
|
||||
Entitlement: "monitoring_entitled",
|
||||
Separator: ",",
|
||||
RefreshInterval: model.Duration(60 * time.Second),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
AlertingConfig: AlertingConfig{
|
||||
AlertmanagerConfigs: []*AlertmanagerConfig{
|
||||
|
@ -960,7 +989,7 @@ var expectedConf = &Config{
|
|||
}
|
||||
|
||||
func TestYAMLRoundtrip(t *testing.T) {
|
||||
want, err := LoadFile("testdata/roundtrip.good.yml", false, log.NewNopLogger())
|
||||
want, err := LoadFile("testdata/roundtrip.good.yml", false, false, log.NewNopLogger())
|
||||
require.NoError(t, err)
|
||||
|
||||
out, err := yaml.Marshal(want)
|
||||
|
@ -973,7 +1002,7 @@ func TestYAMLRoundtrip(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestRemoteWriteRetryOnRateLimit(t *testing.T) {
|
||||
want, err := LoadFile("testdata/remote_write_retry_on_rate_limit.good.yml", false, log.NewNopLogger())
|
||||
want, err := LoadFile("testdata/remote_write_retry_on_rate_limit.good.yml", false, false, log.NewNopLogger())
|
||||
require.NoError(t, err)
|
||||
|
||||
out, err := yaml.Marshal(want)
|
||||
|
@ -989,16 +1018,16 @@ func TestRemoteWriteRetryOnRateLimit(t *testing.T) {
|
|||
func TestLoadConfig(t *testing.T) {
|
||||
// Parse a valid file that sets a global scrape timeout. This tests whether parsing
|
||||
// an overwritten default field in the global config permanently changes the default.
|
||||
_, err := LoadFile("testdata/global_timeout.good.yml", false, log.NewNopLogger())
|
||||
_, err := LoadFile("testdata/global_timeout.good.yml", false, false, log.NewNopLogger())
|
||||
require.NoError(t, err)
|
||||
|
||||
c, err := LoadFile("testdata/conf.good.yml", false, log.NewNopLogger())
|
||||
c, err := LoadFile("testdata/conf.good.yml", false, false, log.NewNopLogger())
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, expectedConf, c)
|
||||
}
|
||||
|
||||
func TestScrapeIntervalLarger(t *testing.T) {
|
||||
c, err := LoadFile("testdata/scrape_interval_larger.good.yml", false, log.NewNopLogger())
|
||||
c, err := LoadFile("testdata/scrape_interval_larger.good.yml", false, false, log.NewNopLogger())
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(c.ScrapeConfigs))
|
||||
for _, sc := range c.ScrapeConfigs {
|
||||
|
@ -1008,7 +1037,7 @@ func TestScrapeIntervalLarger(t *testing.T) {
|
|||
|
||||
// YAML marshaling must not reveal authentication credentials.
|
||||
func TestElideSecrets(t *testing.T) {
|
||||
c, err := LoadFile("testdata/conf.good.yml", false, log.NewNopLogger())
|
||||
c, err := LoadFile("testdata/conf.good.yml", false, false, log.NewNopLogger())
|
||||
require.NoError(t, err)
|
||||
|
||||
secretRe := regexp.MustCompile(`\\u003csecret\\u003e|<secret>`)
|
||||
|
@ -1018,38 +1047,38 @@ func TestElideSecrets(t *testing.T) {
|
|||
yamlConfig := string(config)
|
||||
|
||||
matches := secretRe.FindAllStringIndex(yamlConfig, -1)
|
||||
require.Equal(t, 15, len(matches), "wrong number of secret matches found")
|
||||
require.Equal(t, 16, len(matches), "wrong number of secret matches found")
|
||||
require.NotContains(t, yamlConfig, "mysecret",
|
||||
"yaml marshal reveals authentication credentials.")
|
||||
}
|
||||
|
||||
func TestLoadConfigRuleFilesAbsolutePath(t *testing.T) {
|
||||
// Parse a valid file that sets a rule files with an absolute path
|
||||
c, err := LoadFile(ruleFilesConfigFile, false, log.NewNopLogger())
|
||||
c, err := LoadFile(ruleFilesConfigFile, false, false, log.NewNopLogger())
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, ruleFilesExpectedConf, c)
|
||||
}
|
||||
|
||||
func TestKubernetesEmptyAPIServer(t *testing.T) {
|
||||
_, err := LoadFile("testdata/kubernetes_empty_apiserver.good.yml", false, log.NewNopLogger())
|
||||
_, err := LoadFile("testdata/kubernetes_empty_apiserver.good.yml", false, false, log.NewNopLogger())
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestKubernetesWithKubeConfig(t *testing.T) {
|
||||
_, err := LoadFile("testdata/kubernetes_kubeconfig_without_apiserver.good.yml", false, log.NewNopLogger())
|
||||
_, err := LoadFile("testdata/kubernetes_kubeconfig_without_apiserver.good.yml", false, false, log.NewNopLogger())
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestKubernetesSelectors(t *testing.T) {
|
||||
_, err := LoadFile("testdata/kubernetes_selectors_endpoints.good.yml", false, log.NewNopLogger())
|
||||
_, err := LoadFile("testdata/kubernetes_selectors_endpoints.good.yml", false, false, log.NewNopLogger())
|
||||
require.NoError(t, err)
|
||||
_, err = LoadFile("testdata/kubernetes_selectors_node.good.yml", false, log.NewNopLogger())
|
||||
_, err = LoadFile("testdata/kubernetes_selectors_node.good.yml", false, false, log.NewNopLogger())
|
||||
require.NoError(t, err)
|
||||
_, err = LoadFile("testdata/kubernetes_selectors_ingress.good.yml", false, log.NewNopLogger())
|
||||
_, err = LoadFile("testdata/kubernetes_selectors_ingress.good.yml", false, false, log.NewNopLogger())
|
||||
require.NoError(t, err)
|
||||
_, err = LoadFile("testdata/kubernetes_selectors_pod.good.yml", false, log.NewNopLogger())
|
||||
_, err = LoadFile("testdata/kubernetes_selectors_pod.good.yml", false, false, log.NewNopLogger())
|
||||
require.NoError(t, err)
|
||||
_, err = LoadFile("testdata/kubernetes_selectors_service.good.yml", false, log.NewNopLogger())
|
||||
_, err = LoadFile("testdata/kubernetes_selectors_service.good.yml", false, false, log.NewNopLogger())
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
|
@ -1060,170 +1089,224 @@ var expectedErrors = []struct {
|
|||
{
|
||||
filename: "jobname.bad.yml",
|
||||
errMsg: `job_name is empty`,
|
||||
}, {
|
||||
},
|
||||
{
|
||||
filename: "jobname_dup.bad.yml",
|
||||
errMsg: `found multiple scrape configs with job name "prometheus"`,
|
||||
}, {
|
||||
},
|
||||
{
|
||||
filename: "scrape_interval.bad.yml",
|
||||
errMsg: `scrape timeout greater than scrape interval`,
|
||||
}, {
|
||||
},
|
||||
{
|
||||
filename: "labelname.bad.yml",
|
||||
errMsg: `"not$allowed" is not a valid label name`,
|
||||
}, {
|
||||
},
|
||||
{
|
||||
filename: "labelname2.bad.yml",
|
||||
errMsg: `"not:allowed" is not a valid label name`,
|
||||
}, {
|
||||
},
|
||||
{
|
||||
filename: "labelvalue.bad.yml",
|
||||
errMsg: `"\xff" is not a valid label value`,
|
||||
}, {
|
||||
},
|
||||
{
|
||||
filename: "regex.bad.yml",
|
||||
errMsg: "error parsing regexp",
|
||||
}, {
|
||||
},
|
||||
{
|
||||
filename: "modulus_missing.bad.yml",
|
||||
errMsg: "relabel configuration for hashmod requires non-zero modulus",
|
||||
}, {
|
||||
},
|
||||
{
|
||||
filename: "labelkeep.bad.yml",
|
||||
errMsg: "labelkeep action requires only 'regex', and no other fields",
|
||||
}, {
|
||||
},
|
||||
{
|
||||
filename: "labelkeep2.bad.yml",
|
||||
errMsg: "labelkeep action requires only 'regex', and no other fields",
|
||||
}, {
|
||||
},
|
||||
{
|
||||
filename: "labelkeep3.bad.yml",
|
||||
errMsg: "labelkeep action requires only 'regex', and no other fields",
|
||||
}, {
|
||||
},
|
||||
{
|
||||
filename: "labelkeep4.bad.yml",
|
||||
errMsg: "labelkeep action requires only 'regex', and no other fields",
|
||||
}, {
|
||||
},
|
||||
{
|
||||
filename: "labelkeep5.bad.yml",
|
||||
errMsg: "labelkeep action requires only 'regex', and no other fields",
|
||||
}, {
|
||||
},
|
||||
{
|
||||
filename: "labeldrop.bad.yml",
|
||||
errMsg: "labeldrop action requires only 'regex', and no other fields",
|
||||
}, {
|
||||
},
|
||||
{
|
||||
filename: "labeldrop2.bad.yml",
|
||||
errMsg: "labeldrop action requires only 'regex', and no other fields",
|
||||
}, {
|
||||
},
|
||||
{
|
||||
filename: "labeldrop3.bad.yml",
|
||||
errMsg: "labeldrop action requires only 'regex', and no other fields",
|
||||
}, {
|
||||
},
|
||||
{
|
||||
filename: "labeldrop4.bad.yml",
|
||||
errMsg: "labeldrop action requires only 'regex', and no other fields",
|
||||
}, {
|
||||
},
|
||||
{
|
||||
filename: "labeldrop5.bad.yml",
|
||||
errMsg: "labeldrop action requires only 'regex', and no other fields",
|
||||
}, {
|
||||
},
|
||||
{
|
||||
filename: "labelmap.bad.yml",
|
||||
errMsg: "\"l-$1\" is invalid 'replacement' for labelmap action",
|
||||
}, {
|
||||
},
|
||||
{
|
||||
filename: "rules.bad.yml",
|
||||
errMsg: "invalid rule file path",
|
||||
}, {
|
||||
},
|
||||
{
|
||||
filename: "unknown_attr.bad.yml",
|
||||
errMsg: "field consult_sd_configs not found in type",
|
||||
}, {
|
||||
},
|
||||
{
|
||||
filename: "bearertoken.bad.yml",
|
||||
errMsg: "at most one of bearer_token & bearer_token_file must be configured",
|
||||
}, {
|
||||
},
|
||||
{
|
||||
filename: "bearertoken_basicauth.bad.yml",
|
||||
errMsg: "at most one of basic_auth, oauth2, bearer_token & bearer_token_file must be configured",
|
||||
}, {
|
||||
},
|
||||
{
|
||||
filename: "kubernetes_http_config_without_api_server.bad.yml",
|
||||
errMsg: "to use custom HTTP client configuration please provide the 'api_server' URL explicitly",
|
||||
}, {
|
||||
},
|
||||
{
|
||||
filename: "kubernetes_kubeconfig_with_apiserver.bad.yml",
|
||||
errMsg: "cannot use 'kubeconfig_file' and 'api_server' simultaneously",
|
||||
}, {
|
||||
},
|
||||
{
|
||||
filename: "kubernetes_kubeconfig_with_http_config.bad.yml",
|
||||
errMsg: "cannot use a custom HTTP client configuration together with 'kubeconfig_file'",
|
||||
},
|
||||
{
|
||||
filename: "kubernetes_bearertoken.bad.yml",
|
||||
errMsg: "at most one of bearer_token & bearer_token_file must be configured",
|
||||
}, {
|
||||
},
|
||||
{
|
||||
filename: "kubernetes_role.bad.yml",
|
||||
errMsg: "role",
|
||||
}, {
|
||||
},
|
||||
{
|
||||
filename: "kubernetes_selectors_endpoints.bad.yml",
|
||||
errMsg: "endpoints role supports only pod, service, endpoints selectors",
|
||||
}, {
|
||||
},
|
||||
{
|
||||
filename: "kubernetes_selectors_ingress.bad.yml",
|
||||
errMsg: "ingress role supports only ingress selectors",
|
||||
}, {
|
||||
},
|
||||
{
|
||||
filename: "kubernetes_selectors_node.bad.yml",
|
||||
errMsg: "node role supports only node selectors",
|
||||
}, {
|
||||
},
|
||||
{
|
||||
filename: "kubernetes_selectors_pod.bad.yml",
|
||||
errMsg: "pod role supports only pod selectors",
|
||||
}, {
|
||||
},
|
||||
{
|
||||
filename: "kubernetes_selectors_service.bad.yml",
|
||||
errMsg: "service role supports only service selectors",
|
||||
}, {
|
||||
},
|
||||
{
|
||||
filename: "kubernetes_namespace_discovery.bad.yml",
|
||||
errMsg: "field foo not found in type kubernetes.plain",
|
||||
}, {
|
||||
},
|
||||
{
|
||||
filename: "kubernetes_selectors_duplicated_role.bad.yml",
|
||||
errMsg: "duplicated selector role: pod",
|
||||
}, {
|
||||
},
|
||||
{
|
||||
filename: "kubernetes_selectors_incorrect_selector.bad.yml",
|
||||
errMsg: "invalid selector: 'metadata.status-Running'; can't understand 'metadata.status-Running'",
|
||||
}, {
|
||||
},
|
||||
{
|
||||
filename: "kubernetes_bearertoken_basicauth.bad.yml",
|
||||
errMsg: "at most one of basic_auth, oauth2, bearer_token & bearer_token_file must be configured",
|
||||
}, {
|
||||
},
|
||||
{
|
||||
filename: "kubernetes_authorization_basicauth.bad.yml",
|
||||
errMsg: "at most one of basic_auth, oauth2 & authorization must be configured",
|
||||
}, {
|
||||
},
|
||||
{
|
||||
filename: "marathon_no_servers.bad.yml",
|
||||
errMsg: "marathon_sd: must contain at least one Marathon server",
|
||||
}, {
|
||||
},
|
||||
{
|
||||
filename: "marathon_authtoken_authtokenfile.bad.yml",
|
||||
errMsg: "marathon_sd: at most one of auth_token & auth_token_file must be configured",
|
||||
}, {
|
||||
},
|
||||
{
|
||||
filename: "marathon_authtoken_basicauth.bad.yml",
|
||||
errMsg: "marathon_sd: at most one of basic_auth, auth_token & auth_token_file must be configured",
|
||||
}, {
|
||||
},
|
||||
{
|
||||
filename: "marathon_authtoken_bearertoken.bad.yml",
|
||||
errMsg: "marathon_sd: at most one of bearer_token, bearer_token_file, auth_token & auth_token_file must be configured",
|
||||
}, {
|
||||
},
|
||||
{
|
||||
filename: "marathon_authtoken_authorization.bad.yml",
|
||||
errMsg: "marathon_sd: at most one of auth_token, auth_token_file & authorization must be configured",
|
||||
}, {
|
||||
},
|
||||
{
|
||||
filename: "openstack_role.bad.yml",
|
||||
errMsg: "unknown OpenStack SD role",
|
||||
}, {
|
||||
},
|
||||
{
|
||||
filename: "openstack_availability.bad.yml",
|
||||
errMsg: "unknown availability invalid, must be one of admin, internal or public",
|
||||
}, {
|
||||
},
|
||||
{
|
||||
filename: "url_in_targetgroup.bad.yml",
|
||||
errMsg: "\"http://bad\" is not a valid hostname",
|
||||
}, {
|
||||
},
|
||||
{
|
||||
filename: "target_label_missing.bad.yml",
|
||||
errMsg: "relabel configuration for replace action requires 'target_label' value",
|
||||
}, {
|
||||
},
|
||||
{
|
||||
filename: "target_label_hashmod_missing.bad.yml",
|
||||
errMsg: "relabel configuration for hashmod action requires 'target_label' value",
|
||||
}, {
|
||||
},
|
||||
{
|
||||
filename: "unknown_global_attr.bad.yml",
|
||||
errMsg: "field nonexistent_field not found in type config.plain",
|
||||
}, {
|
||||
},
|
||||
{
|
||||
filename: "remote_read_url_missing.bad.yml",
|
||||
errMsg: `url for remote_read is empty`,
|
||||
}, {
|
||||
},
|
||||
{
|
||||
filename: "remote_write_header.bad.yml",
|
||||
errMsg: `x-prometheus-remote-write-version is a reserved header. It must not be changed`,
|
||||
}, {
|
||||
},
|
||||
{
|
||||
filename: "remote_read_header.bad.yml",
|
||||
errMsg: `x-prometheus-remote-write-version is a reserved header. It must not be changed`,
|
||||
}, {
|
||||
},
|
||||
{
|
||||
filename: "remote_write_authorization_header.bad.yml",
|
||||
errMsg: `authorization header must be changed via the basic_auth, authorization, oauth2, or sigv4 parameter`,
|
||||
}, {
|
||||
},
|
||||
{
|
||||
filename: "remote_write_url_missing.bad.yml",
|
||||
errMsg: `url for remote_write is empty`,
|
||||
}, {
|
||||
},
|
||||
{
|
||||
filename: "remote_write_dup.bad.yml",
|
||||
errMsg: `found multiple remote write configs with job name "queue1"`,
|
||||
}, {
|
||||
},
|
||||
{
|
||||
filename: "remote_read_dup.bad.yml",
|
||||
errMsg: `found multiple remote read configs with job name "queue1"`,
|
||||
},
|
||||
|
@ -1355,7 +1438,7 @@ var expectedErrors = []struct {
|
|||
|
||||
func TestBadConfigs(t *testing.T) {
|
||||
for _, ee := range expectedErrors {
|
||||
_, err := LoadFile("testdata/"+ee.filename, false, log.NewNopLogger())
|
||||
_, err := LoadFile("testdata/"+ee.filename, false, false, log.NewNopLogger())
|
||||
require.Error(t, err, "%s", ee.filename)
|
||||
require.Contains(t, err.Error(), ee.errMsg,
|
||||
"Expected error for %s to contain %q but got: %s", ee.filename, ee.errMsg, err)
|
||||
|
@ -1389,20 +1472,20 @@ func TestExpandExternalLabels(t *testing.T) {
|
|||
// Cleanup ant TEST env variable that could exist on the system.
|
||||
os.Setenv("TEST", "")
|
||||
|
||||
c, err := LoadFile("testdata/external_labels.good.yml", false, log.NewNopLogger())
|
||||
c, err := LoadFile("testdata/external_labels.good.yml", false, false, log.NewNopLogger())
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, labels.Label{Name: "bar", Value: "foo"}, c.GlobalConfig.ExternalLabels[0])
|
||||
require.Equal(t, labels.Label{Name: "baz", Value: "foo${TEST}bar"}, c.GlobalConfig.ExternalLabels[1])
|
||||
require.Equal(t, labels.Label{Name: "foo", Value: "${TEST}"}, c.GlobalConfig.ExternalLabels[2])
|
||||
|
||||
c, err = LoadFile("testdata/external_labels.good.yml", true, log.NewNopLogger())
|
||||
c, err = LoadFile("testdata/external_labels.good.yml", false, true, log.NewNopLogger())
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, labels.Label{Name: "bar", Value: "foo"}, c.GlobalConfig.ExternalLabels[0])
|
||||
require.Equal(t, labels.Label{Name: "baz", Value: "foobar"}, c.GlobalConfig.ExternalLabels[1])
|
||||
require.Equal(t, labels.Label{Name: "foo", Value: ""}, c.GlobalConfig.ExternalLabels[2])
|
||||
|
||||
os.Setenv("TEST", "TestValue")
|
||||
c, err = LoadFile("testdata/external_labels.good.yml", true, log.NewNopLogger())
|
||||
c, err = LoadFile("testdata/external_labels.good.yml", false, true, log.NewNopLogger())
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, labels.Label{Name: "bar", Value: "foo"}, c.GlobalConfig.ExternalLabels[0])
|
||||
require.Equal(t, labels.Label{Name: "baz", Value: "fooTestValuebar"}, c.GlobalConfig.ExternalLabels[1])
|
||||
|
|
9
config/testdata/conf.good.yml
vendored
9
config/testdata/conf.good.yml
vendored
|
@ -23,6 +23,9 @@ remote_write:
|
|||
client_id: "123"
|
||||
client_secret: "456"
|
||||
token_url: "http://remote1/auth"
|
||||
tls_config:
|
||||
cert_file: valid_cert_file
|
||||
key_file: valid_key_file
|
||||
|
||||
- url: http://remote2/push
|
||||
name: rw_tls
|
||||
|
@ -349,6 +352,12 @@ scrape_configs:
|
|||
- authorization:
|
||||
credentials: abcdef
|
||||
|
||||
- job_name: uyuni
|
||||
uyuni_sd_configs:
|
||||
- server: https://localhost:1234
|
||||
username: gopher
|
||||
password: hole
|
||||
|
||||
alerting:
|
||||
alertmanagers:
|
||||
- scheme: https
|
||||
|
|
|
@ -63,13 +63,11 @@ const (
|
|||
ec2LabelSeparator = ","
|
||||
)
|
||||
|
||||
var (
|
||||
// DefaultEC2SDConfig is the default EC2 SD configuration.
|
||||
DefaultEC2SDConfig = EC2SDConfig{
|
||||
Port: 80,
|
||||
RefreshInterval: model.Duration(60 * time.Second),
|
||||
}
|
||||
)
|
||||
// DefaultEC2SDConfig is the default EC2 SD configuration.
|
||||
var DefaultEC2SDConfig = EC2SDConfig{
|
||||
Port: 80,
|
||||
RefreshInterval: model.Duration(60 * time.Second),
|
||||
}
|
||||
|
||||
func init() {
|
||||
discovery.RegisterConfig(&EC2SDConfig{})
|
||||
|
|
|
@ -53,13 +53,11 @@ const (
|
|||
lightsailLabelSeparator = ","
|
||||
)
|
||||
|
||||
var (
|
||||
// DefaultLightsailSDConfig is the default Lightsail SD configuration.
|
||||
DefaultLightsailSDConfig = LightsailSDConfig{
|
||||
Port: 80,
|
||||
RefreshInterval: model.Duration(60 * time.Second),
|
||||
}
|
||||
)
|
||||
// DefaultLightsailSDConfig is the default Lightsail SD configuration.
|
||||
var DefaultLightsailSDConfig = LightsailSDConfig{
|
||||
Port: 80,
|
||||
RefreshInterval: model.Duration(60 * time.Second),
|
||||
}
|
||||
|
||||
func init() {
|
||||
discovery.RegisterConfig(&LightsailSDConfig{})
|
||||
|
|
|
@ -64,6 +64,7 @@ var DefaultSDConfig = SDConfig{
|
|||
RefreshInterval: model.Duration(5 * time.Minute),
|
||||
Environment: azure.PublicCloud.Name,
|
||||
AuthenticationMethod: authMethodOAuth,
|
||||
HTTPClientConfig: config_util.DefaultHTTPClientConfig,
|
||||
}
|
||||
|
||||
func init() {
|
||||
|
@ -80,6 +81,8 @@ type SDConfig struct {
|
|||
ClientSecret config_util.Secret `yaml:"client_secret,omitempty"`
|
||||
RefreshInterval model.Duration `yaml:"refresh_interval,omitempty"`
|
||||
AuthenticationMethod string `yaml:"authentication_method,omitempty"`
|
||||
|
||||
HTTPClientConfig config_util.HTTPClientConfig `yaml:",inline"`
|
||||
}
|
||||
|
||||
// Name returns the name of the Config.
|
||||
|
@ -200,19 +203,29 @@ func createAzureClient(cfg SDConfig) (azureClient, error) {
|
|||
}
|
||||
}
|
||||
|
||||
client, err := config_util.NewClientFromConfig(cfg.HTTPClientConfig, "azure_sd")
|
||||
if err != nil {
|
||||
return azureClient{}, err
|
||||
}
|
||||
sender := autorest.DecorateSender(client)
|
||||
|
||||
bearerAuthorizer := autorest.NewBearerAuthorizer(spt)
|
||||
|
||||
c.vm = compute.NewVirtualMachinesClientWithBaseURI(resourceManagerEndpoint, cfg.SubscriptionID)
|
||||
c.vm.Authorizer = bearerAuthorizer
|
||||
c.vm.Sender = sender
|
||||
|
||||
c.nic = network.NewInterfacesClientWithBaseURI(resourceManagerEndpoint, cfg.SubscriptionID)
|
||||
c.nic.Authorizer = bearerAuthorizer
|
||||
c.nic.Sender = sender
|
||||
|
||||
c.vmss = compute.NewVirtualMachineScaleSetsClientWithBaseURI(resourceManagerEndpoint, cfg.SubscriptionID)
|
||||
c.vmss.Authorizer = bearerAuthorizer
|
||||
c.vm.Sender = sender
|
||||
|
||||
c.vmssvm = compute.NewVirtualMachineScaleSetVMsClientWithBaseURI(resourceManagerEndpoint, cfg.SubscriptionID)
|
||||
c.vmssvm.Authorizer = bearerAuthorizer
|
||||
c.vmssvm.Sender = sender
|
||||
|
||||
return c, nil
|
||||
}
|
||||
|
@ -326,7 +339,6 @@ func (d *Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) {
|
|||
// Get the IP address information via separate call to the network provider.
|
||||
for _, nicID := range vm.NetworkInterfaces {
|
||||
networkInterface, err := client.getNetworkInterfaceByID(ctx, nicID)
|
||||
|
||||
if err != nil {
|
||||
level.Error(d.logger).Log("msg", "Unable to get network interface", "name", nicID, "err", err)
|
||||
ch <- target{labelSet: nil, err: err}
|
||||
|
@ -424,9 +436,8 @@ func (client *azureClient) getScaleSets(ctx context.Context) ([]compute.VirtualM
|
|||
|
||||
func (client *azureClient) getScaleSetVMs(ctx context.Context, scaleSet compute.VirtualMachineScaleSet) ([]virtualMachine, error) {
|
||||
var vms []virtualMachine
|
||||
//TODO do we really need to fetch the resourcegroup this way?
|
||||
// TODO do we really need to fetch the resourcegroup this way?
|
||||
r, err := newAzureResourceFromID(*scaleSet.ID, nil)
|
||||
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not parse scale set ID")
|
||||
}
|
||||
|
|
|
@ -54,7 +54,7 @@ const (
|
|||
healthLabel = model.MetaLabelPrefix + "consul_health"
|
||||
// serviceAddressLabel is the name of the label containing the (optional) service address.
|
||||
serviceAddressLabel = model.MetaLabelPrefix + "consul_service_address"
|
||||
//servicePortLabel is the name of the label containing the service port.
|
||||
// servicePortLabel is the name of the label containing the service port.
|
||||
servicePortLabel = model.MetaLabelPrefix + "consul_service_port"
|
||||
// datacenterLabel is the name of the label containing the datacenter ID.
|
||||
datacenterLabel = model.MetaLabelPrefix + "consul_dc"
|
||||
|
@ -530,7 +530,7 @@ func (srv *consulService) watch(ctx context.Context, ch chan<- []*targetgroup.Gr
|
|||
for _, serviceNode := range serviceNodes {
|
||||
// We surround the separated list with the separator as well. This way regular expressions
|
||||
// in relabeling rules don't have to consider tag positions.
|
||||
var tags = srv.tagSeparator + strings.Join(serviceNode.Service.Tags, srv.tagSeparator) + srv.tagSeparator
|
||||
tags := srv.tagSeparator + strings.Join(serviceNode.Service.Tags, srv.tagSeparator) + srv.tagSeparator
|
||||
|
||||
// If the service address is not empty it should be used instead of the node address
|
||||
// since the service may be registered remotely through a different node.
|
||||
|
|
|
@ -37,9 +37,9 @@ func TestMain(m *testing.M) {
|
|||
|
||||
func TestConfiguredService(t *testing.T) {
|
||||
conf := &SDConfig{
|
||||
Services: []string{"configuredServiceName"}}
|
||||
Services: []string{"configuredServiceName"},
|
||||
}
|
||||
consulDiscovery, err := NewDiscovery(conf, nil)
|
||||
|
||||
if err != nil {
|
||||
t.Errorf("Unexpected error when initializing discovery %v", err)
|
||||
}
|
||||
|
@ -57,7 +57,6 @@ func TestConfiguredServiceWithTag(t *testing.T) {
|
|||
ServiceTags: []string{"http"},
|
||||
}
|
||||
consulDiscovery, err := NewDiscovery(conf, nil)
|
||||
|
||||
if err != nil {
|
||||
t.Errorf("Unexpected error when initializing discovery %v", err)
|
||||
}
|
||||
|
@ -153,7 +152,6 @@ func TestConfiguredServiceWithTags(t *testing.T) {
|
|||
|
||||
for _, tc := range cases {
|
||||
consulDiscovery, err := NewDiscovery(tc.conf, nil)
|
||||
|
||||
if err != nil {
|
||||
t.Errorf("Unexpected error when initializing discovery %v", err)
|
||||
}
|
||||
|
@ -168,7 +166,6 @@ func TestConfiguredServiceWithTags(t *testing.T) {
|
|||
func TestNonConfiguredService(t *testing.T) {
|
||||
conf := &SDConfig{}
|
||||
consulDiscovery, err := NewDiscovery(conf, nil)
|
||||
|
||||
if err != nil {
|
||||
t.Errorf("Unexpected error when initializing discovery %v", err)
|
||||
}
|
||||
|
|
|
@ -75,7 +75,8 @@ func (m *SDMock) HandleDropletsList() {
|
|||
panic(err)
|
||||
}
|
||||
}
|
||||
fmt.Fprint(w, []string{`
|
||||
fmt.Fprint(w, []string{
|
||||
`
|
||||
{
|
||||
"droplets": [
|
||||
{
|
||||
|
|
|
@ -73,7 +73,7 @@ func (t *testRunner) copyFile(src string) string {
|
|||
}
|
||||
|
||||
// copyFileTo atomically copies a file with a different name to the runner's directory.
|
||||
func (t *testRunner) copyFileTo(src string, name string) string {
|
||||
func (t *testRunner) copyFileTo(src, name string) string {
|
||||
t.Helper()
|
||||
|
||||
newf, err := ioutil.TempFile(t.dir, "")
|
||||
|
@ -95,7 +95,7 @@ func (t *testRunner) copyFileTo(src string, name string) string {
|
|||
}
|
||||
|
||||
// writeString writes atomically a string to a file.
|
||||
func (t *testRunner) writeString(file string, data string) {
|
||||
func (t *testRunner) writeString(file, data string) {
|
||||
t.Helper()
|
||||
|
||||
newf, err := ioutil.TempFile(t.dir, "")
|
||||
|
@ -477,6 +477,7 @@ func TestRemoveFile(t *testing.T) {
|
|||
},
|
||||
{
|
||||
Source: fileSource(sdFile, 1),
|
||||
}},
|
||||
},
|
||||
},
|
||||
)
|
||||
}
|
||||
|
|
|
@ -78,6 +78,7 @@ func newHcloudDiscovery(conf *SDConfig, logger log.Logger) (*hcloudDiscovery, er
|
|||
)
|
||||
return d, nil
|
||||
}
|
||||
|
||||
func (d *hcloudDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) {
|
||||
servers, err := d.client.Server.All(ctx)
|
||||
if err != nil {
|
||||
|
|
|
@ -489,8 +489,10 @@ func (m *SDMock) HandleHcloudNetworks() {
|
|||
})
|
||||
}
|
||||
|
||||
const robotTestUsername = "my-hetzner"
|
||||
const robotTestPassword = "my-password"
|
||||
const (
|
||||
robotTestUsername = "my-hetzner"
|
||||
robotTestPassword = "my-password"
|
||||
)
|
||||
|
||||
// HandleRobotServers mocks the robot servers list endpoint.
|
||||
func (m *SDMock) HandleRobotServers() {
|
||||
|
|
|
@ -70,6 +70,7 @@ func newRobotDiscovery(conf *SDConfig, logger log.Logger) (*robotDiscovery, erro
|
|||
|
||||
return d, nil
|
||||
}
|
||||
|
||||
func (d *robotDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) {
|
||||
req, err := http.NewRequest("GET", d.endpoint+"/server", nil)
|
||||
if err != nil {
|
||||
|
|
|
@ -24,8 +24,9 @@ import (
|
|||
"github.com/go-kit/log"
|
||||
"github.com/prometheus/common/config"
|
||||
"github.com/prometheus/common/model"
|
||||
"github.com/prometheus/prometheus/discovery/targetgroup"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/prometheus/prometheus/discovery/targetgroup"
|
||||
)
|
||||
|
||||
func TestHTTPValidRefresh(t *testing.T) {
|
||||
|
@ -60,7 +61,6 @@ func TestHTTPValidRefresh(t *testing.T) {
|
|||
},
|
||||
}
|
||||
require.Equal(t, tgs, expectedTargets)
|
||||
|
||||
}
|
||||
|
||||
func TestHTTPInvalidCode(t *testing.T) {
|
||||
|
@ -398,5 +398,4 @@ func TestSourceDisappeared(t *testing.T) {
|
|||
require.Equal(t, test.expectedTargets[i], tgs)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -34,6 +34,7 @@ import (
|
|||
_ "github.com/prometheus/prometheus/discovery/puppetdb" // register puppetdb
|
||||
_ "github.com/prometheus/prometheus/discovery/scaleway" // register scaleway
|
||||
_ "github.com/prometheus/prometheus/discovery/triton" // register triton
|
||||
_ "github.com/prometheus/prometheus/discovery/uyuni" // register uyuni
|
||||
_ "github.com/prometheus/prometheus/discovery/xds" // register xds
|
||||
_ "github.com/prometheus/prometheus/discovery/zookeeper" // register zookeeper
|
||||
)
|
||||
|
|
|
@ -121,9 +121,11 @@ func (f *clientGoRequestMetricAdapter) Register(registerer prometheus.Registerer
|
|||
clientGoRequestLatencyMetricVec,
|
||||
)
|
||||
}
|
||||
func (clientGoRequestMetricAdapter) Increment(ctx context.Context, code string, method string, host string) {
|
||||
|
||||
func (clientGoRequestMetricAdapter) Increment(ctx context.Context, code, method, host string) {
|
||||
clientGoRequestResultMetricVec.WithLabelValues(code).Inc()
|
||||
}
|
||||
|
||||
func (clientGoRequestMetricAdapter) Observe(ctx context.Context, verb string, u url.URL, latency time.Duration) {
|
||||
clientGoRequestLatencyMetricVec.WithLabelValues(u.EscapedPath()).Observe(latency.Seconds())
|
||||
}
|
||||
|
@ -146,21 +148,27 @@ func (f *clientGoWorkqueueMetricsProvider) Register(registerer prometheus.Regist
|
|||
func (f *clientGoWorkqueueMetricsProvider) NewDepthMetric(name string) workqueue.GaugeMetric {
|
||||
return clientGoWorkqueueDepthMetricVec.WithLabelValues(name)
|
||||
}
|
||||
|
||||
func (f *clientGoWorkqueueMetricsProvider) NewAddsMetric(name string) workqueue.CounterMetric {
|
||||
return clientGoWorkqueueAddsMetricVec.WithLabelValues(name)
|
||||
}
|
||||
|
||||
func (f *clientGoWorkqueueMetricsProvider) NewLatencyMetric(name string) workqueue.HistogramMetric {
|
||||
return clientGoWorkqueueLatencyMetricVec.WithLabelValues(name)
|
||||
}
|
||||
|
||||
func (f *clientGoWorkqueueMetricsProvider) NewWorkDurationMetric(name string) workqueue.HistogramMetric {
|
||||
return clientGoWorkqueueWorkDurationMetricVec.WithLabelValues(name)
|
||||
}
|
||||
|
||||
func (f *clientGoWorkqueueMetricsProvider) NewUnfinishedWorkSecondsMetric(name string) workqueue.SettableGaugeMetric {
|
||||
return clientGoWorkqueueUnfinishedWorkSecondsMetricVec.WithLabelValues(name)
|
||||
}
|
||||
|
||||
func (f *clientGoWorkqueueMetricsProvider) NewLongestRunningProcessorSecondsMetric(name string) workqueue.SettableGaugeMetric {
|
||||
return clientGoWorkqueueLongestRunningProcessorMetricVec.WithLabelValues(name)
|
||||
}
|
||||
|
||||
func (clientGoWorkqueueMetricsProvider) NewRetriesMetric(name string) workqueue.CounterMetric {
|
||||
// Retries are not used so the metric is omitted.
|
||||
return noopMetric{}
|
||||
|
|
|
@ -18,8 +18,6 @@ import (
|
|||
"net"
|
||||
"strconv"
|
||||
|
||||
"github.com/prometheus/prometheus/util/strutil"
|
||||
|
||||
"github.com/go-kit/log"
|
||||
"github.com/go-kit/log/level"
|
||||
"github.com/pkg/errors"
|
||||
|
@ -29,6 +27,7 @@ import (
|
|||
"k8s.io/client-go/util/workqueue"
|
||||
|
||||
"github.com/prometheus/prometheus/discovery/targetgroup"
|
||||
"github.com/prometheus/prometheus/util/strutil"
|
||||
)
|
||||
|
||||
var (
|
||||
|
@ -308,6 +307,14 @@ func (e *Endpoints) buildEndpoints(eps *apiv1.Endpoints) *targetgroup.Group {
|
|||
}
|
||||
}
|
||||
|
||||
v := eps.Labels[apiv1.EndpointsOverCapacity]
|
||||
if v == "truncated" {
|
||||
level.Warn(e.logger).Log("msg", "Number of endpoints in one Endpoints object exceeds 1000 and has been truncated, please use \"role: endpointslice\" instead", "endpoint", eps.Name)
|
||||
}
|
||||
if v == "warning" {
|
||||
level.Warn(e.logger).Log("msg", "Number of endpoints in one Endpoints object exceeds 1000, please use \"role: endpointslice\" instead", "endpoint", eps.Name)
|
||||
}
|
||||
|
||||
// For all seen pods, check all container ports. If they were not covered
|
||||
// by one of the service endpoints, generate targets for them.
|
||||
for _, pe := range seenPods {
|
||||
|
|
|
@ -27,7 +27,7 @@ import (
|
|||
)
|
||||
|
||||
func makeEndpoints() *v1.Endpoints {
|
||||
var nodeName = "foobar"
|
||||
nodeName := "foobar"
|
||||
return &v1.Endpoints{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "testendpoints",
|
||||
|
|
|
@ -86,15 +86,18 @@ func (d k8sDiscoveryTest) Run(t *testing.T) {
|
|||
// Ensure that discovery has a discoverer set. This prevents a race
|
||||
// condition where the above go routine may or may not have set a
|
||||
// discoverer yet.
|
||||
lastDiscoverersCount := 0
|
||||
dis := d.discovery.(*Discovery)
|
||||
for {
|
||||
dis := d.discovery.(*Discovery)
|
||||
dis.RLock()
|
||||
l := len(dis.discoverers)
|
||||
dis.RUnlock()
|
||||
if l > 0 {
|
||||
if l > 0 && l == lastDiscoverersCount {
|
||||
break
|
||||
}
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
|
||||
lastDiscoverersCount = l
|
||||
}
|
||||
|
||||
resChan := make(chan map[string]*targetgroup.Group)
|
||||
|
@ -171,13 +174,15 @@ type hasSynced interface {
|
|||
hasSynced() bool
|
||||
}
|
||||
|
||||
var _ hasSynced = &Discovery{}
|
||||
var _ hasSynced = &Node{}
|
||||
var _ hasSynced = &Endpoints{}
|
||||
var _ hasSynced = &EndpointSlice{}
|
||||
var _ hasSynced = &Ingress{}
|
||||
var _ hasSynced = &Pod{}
|
||||
var _ hasSynced = &Service{}
|
||||
var (
|
||||
_ hasSynced = &Discovery{}
|
||||
_ hasSynced = &Node{}
|
||||
_ hasSynced = &Endpoints{}
|
||||
_ hasSynced = &EndpointSlice{}
|
||||
_ hasSynced = &Ingress{}
|
||||
_ hasSynced = &Pod{}
|
||||
_ hasSynced = &Service{}
|
||||
)
|
||||
|
||||
func (d *Discovery) hasSynced() bool {
|
||||
d.RLock()
|
||||
|
|
|
@ -25,7 +25,7 @@ import (
|
|||
"github.com/prometheus/prometheus/discovery/targetgroup"
|
||||
)
|
||||
|
||||
func makeNode(name, address string, labels map[string]string, annotations map[string]string) *v1.Node {
|
||||
func makeNode(name, address string, labels, annotations map[string]string) *v1.Node {
|
||||
return &v1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
|
|
357
discovery/legacymanager/manager.go
Normal file
357
discovery/legacymanager/manager.go
Normal file
|
@ -0,0 +1,357 @@
|
|||
// Copyright 2016 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package legacymanager
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/go-kit/log"
|
||||
"github.com/go-kit/log/level"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
|
||||
"github.com/prometheus/prometheus/discovery"
|
||||
"github.com/prometheus/prometheus/discovery/targetgroup"
|
||||
)
|
||||
|
||||
var (
|
||||
failedConfigs = prometheus.NewGaugeVec(
|
||||
prometheus.GaugeOpts{
|
||||
Name: "prometheus_sd_failed_configs",
|
||||
Help: "Current number of service discovery configurations that failed to load.",
|
||||
},
|
||||
[]string{"name"},
|
||||
)
|
||||
discoveredTargets = prometheus.NewGaugeVec(
|
||||
prometheus.GaugeOpts{
|
||||
Name: "prometheus_sd_discovered_targets",
|
||||
Help: "Current number of discovered targets.",
|
||||
},
|
||||
[]string{"name", "config"},
|
||||
)
|
||||
receivedUpdates = prometheus.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Name: "prometheus_sd_received_updates_total",
|
||||
Help: "Total number of update events received from the SD providers.",
|
||||
},
|
||||
[]string{"name"},
|
||||
)
|
||||
delayedUpdates = prometheus.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Name: "prometheus_sd_updates_delayed_total",
|
||||
Help: "Total number of update events that couldn't be sent immediately.",
|
||||
},
|
||||
[]string{"name"},
|
||||
)
|
||||
sentUpdates = prometheus.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Name: "prometheus_sd_updates_total",
|
||||
Help: "Total number of update events sent to the SD consumers.",
|
||||
},
|
||||
[]string{"name"},
|
||||
)
|
||||
)
|
||||
|
||||
func RegisterMetrics() {
|
||||
prometheus.MustRegister(failedConfigs, discoveredTargets, receivedUpdates, delayedUpdates, sentUpdates)
|
||||
}
|
||||
|
||||
type poolKey struct {
|
||||
setName string
|
||||
provider string
|
||||
}
|
||||
|
||||
// provider holds a Discoverer instance, its configuration and its subscribers.
|
||||
type provider struct {
|
||||
name string
|
||||
d discovery.Discoverer
|
||||
subs []string
|
||||
config interface{}
|
||||
}
|
||||
|
||||
// NewManager is the Discovery Manager constructor.
|
||||
func NewManager(ctx context.Context, logger log.Logger, options ...func(*Manager)) *Manager {
|
||||
if logger == nil {
|
||||
logger = log.NewNopLogger()
|
||||
}
|
||||
mgr := &Manager{
|
||||
logger: logger,
|
||||
syncCh: make(chan map[string][]*targetgroup.Group),
|
||||
targets: make(map[poolKey]map[string]*targetgroup.Group),
|
||||
discoverCancel: []context.CancelFunc{},
|
||||
ctx: ctx,
|
||||
updatert: 5 * time.Second,
|
||||
triggerSend: make(chan struct{}, 1),
|
||||
}
|
||||
for _, option := range options {
|
||||
option(mgr)
|
||||
}
|
||||
return mgr
|
||||
}
|
||||
|
||||
// Name sets the name of the manager.
|
||||
func Name(n string) func(*Manager) {
|
||||
return func(m *Manager) {
|
||||
m.mtx.Lock()
|
||||
defer m.mtx.Unlock()
|
||||
m.name = n
|
||||
}
|
||||
}
|
||||
|
||||
// Manager maintains a set of discovery providers and sends each update to a map channel.
|
||||
// Targets are grouped by the target set name.
|
||||
type Manager struct {
|
||||
logger log.Logger
|
||||
name string
|
||||
mtx sync.RWMutex
|
||||
ctx context.Context
|
||||
discoverCancel []context.CancelFunc
|
||||
|
||||
// Some Discoverers(eg. k8s) send only the updates for a given target group
|
||||
// so we use map[tg.Source]*targetgroup.Group to know which group to update.
|
||||
targets map[poolKey]map[string]*targetgroup.Group
|
||||
// providers keeps track of SD providers.
|
||||
providers []*provider
|
||||
// The sync channel sends the updates as a map where the key is the job value from the scrape config.
|
||||
syncCh chan map[string][]*targetgroup.Group
|
||||
|
||||
// How long to wait before sending updates to the channel. The variable
|
||||
// should only be modified in unit tests.
|
||||
updatert time.Duration
|
||||
|
||||
// The triggerSend channel signals to the manager that new updates have been received from providers.
|
||||
triggerSend chan struct{}
|
||||
}
|
||||
|
||||
// Run starts the background processing
|
||||
func (m *Manager) Run() error {
|
||||
go m.sender()
|
||||
for range m.ctx.Done() {
|
||||
m.cancelDiscoverers()
|
||||
return m.ctx.Err()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// SyncCh returns a read only channel used by all the clients to receive target updates.
|
||||
func (m *Manager) SyncCh() <-chan map[string][]*targetgroup.Group {
|
||||
return m.syncCh
|
||||
}
|
||||
|
||||
// ApplyConfig removes all running discovery providers and starts new ones using the provided config.
|
||||
func (m *Manager) ApplyConfig(cfg map[string]discovery.Configs) error {
|
||||
m.mtx.Lock()
|
||||
defer m.mtx.Unlock()
|
||||
|
||||
for pk := range m.targets {
|
||||
if _, ok := cfg[pk.setName]; !ok {
|
||||
discoveredTargets.DeleteLabelValues(m.name, pk.setName)
|
||||
}
|
||||
}
|
||||
m.cancelDiscoverers()
|
||||
m.targets = make(map[poolKey]map[string]*targetgroup.Group)
|
||||
m.providers = nil
|
||||
m.discoverCancel = nil
|
||||
|
||||
failedCount := 0
|
||||
for name, scfg := range cfg {
|
||||
failedCount += m.registerProviders(scfg, name)
|
||||
discoveredTargets.WithLabelValues(m.name, name).Set(0)
|
||||
}
|
||||
failedConfigs.WithLabelValues(m.name).Set(float64(failedCount))
|
||||
|
||||
for _, prov := range m.providers {
|
||||
m.startProvider(m.ctx, prov)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// StartCustomProvider is used for sdtool. Only use this if you know what you're doing.
|
||||
func (m *Manager) StartCustomProvider(ctx context.Context, name string, worker discovery.Discoverer) {
|
||||
p := &provider{
|
||||
name: name,
|
||||
d: worker,
|
||||
subs: []string{name},
|
||||
}
|
||||
m.providers = append(m.providers, p)
|
||||
m.startProvider(ctx, p)
|
||||
}
|
||||
|
||||
func (m *Manager) startProvider(ctx context.Context, p *provider) {
|
||||
level.Debug(m.logger).Log("msg", "Starting provider", "provider", p.name, "subs", fmt.Sprintf("%v", p.subs))
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
updates := make(chan []*targetgroup.Group)
|
||||
|
||||
m.discoverCancel = append(m.discoverCancel, cancel)
|
||||
|
||||
go p.d.Run(ctx, updates)
|
||||
go m.updater(ctx, p, updates)
|
||||
}
|
||||
|
||||
func (m *Manager) updater(ctx context.Context, p *provider, updates chan []*targetgroup.Group) {
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case tgs, ok := <-updates:
|
||||
receivedUpdates.WithLabelValues(m.name).Inc()
|
||||
if !ok {
|
||||
level.Debug(m.logger).Log("msg", "Discoverer channel closed", "provider", p.name)
|
||||
return
|
||||
}
|
||||
|
||||
for _, s := range p.subs {
|
||||
m.updateGroup(poolKey{setName: s, provider: p.name}, tgs)
|
||||
}
|
||||
|
||||
select {
|
||||
case m.triggerSend <- struct{}{}:
|
||||
default:
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (m *Manager) sender() {
|
||||
ticker := time.NewTicker(m.updatert)
|
||||
defer ticker.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-m.ctx.Done():
|
||||
return
|
||||
case <-ticker.C: // Some discoverers send updates too often so we throttle these with the ticker.
|
||||
select {
|
||||
case <-m.triggerSend:
|
||||
sentUpdates.WithLabelValues(m.name).Inc()
|
||||
select {
|
||||
case m.syncCh <- m.allGroups():
|
||||
default:
|
||||
delayedUpdates.WithLabelValues(m.name).Inc()
|
||||
level.Debug(m.logger).Log("msg", "Discovery receiver's channel was full so will retry the next cycle")
|
||||
select {
|
||||
case m.triggerSend <- struct{}{}:
|
||||
default:
|
||||
}
|
||||
}
|
||||
default:
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (m *Manager) cancelDiscoverers() {
|
||||
for _, c := range m.discoverCancel {
|
||||
c()
|
||||
}
|
||||
}
|
||||
|
||||
func (m *Manager) updateGroup(poolKey poolKey, tgs []*targetgroup.Group) {
|
||||
m.mtx.Lock()
|
||||
defer m.mtx.Unlock()
|
||||
|
||||
if _, ok := m.targets[poolKey]; !ok {
|
||||
m.targets[poolKey] = make(map[string]*targetgroup.Group)
|
||||
}
|
||||
for _, tg := range tgs {
|
||||
if tg != nil { // Some Discoverers send nil target group so need to check for it to avoid panics.
|
||||
m.targets[poolKey][tg.Source] = tg
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (m *Manager) allGroups() map[string][]*targetgroup.Group {
|
||||
m.mtx.RLock()
|
||||
defer m.mtx.RUnlock()
|
||||
|
||||
tSets := map[string][]*targetgroup.Group{}
|
||||
n := map[string]int{}
|
||||
for pkey, tsets := range m.targets {
|
||||
for _, tg := range tsets {
|
||||
// Even if the target group 'tg' is empty we still need to send it to the 'Scrape manager'
|
||||
// to signal that it needs to stop all scrape loops for this target set.
|
||||
tSets[pkey.setName] = append(tSets[pkey.setName], tg)
|
||||
n[pkey.setName] += len(tg.Targets)
|
||||
}
|
||||
}
|
||||
for setName, v := range n {
|
||||
discoveredTargets.WithLabelValues(m.name, setName).Set(float64(v))
|
||||
}
|
||||
return tSets
|
||||
}
|
||||
|
||||
// registerProviders returns a number of failed SD config.
|
||||
func (m *Manager) registerProviders(cfgs discovery.Configs, setName string) int {
|
||||
var (
|
||||
failed int
|
||||
added bool
|
||||
)
|
||||
add := func(cfg discovery.Config) {
|
||||
for _, p := range m.providers {
|
||||
if reflect.DeepEqual(cfg, p.config) {
|
||||
p.subs = append(p.subs, setName)
|
||||
added = true
|
||||
return
|
||||
}
|
||||
}
|
||||
typ := cfg.Name()
|
||||
d, err := cfg.NewDiscoverer(discovery.DiscovererOptions{
|
||||
Logger: log.With(m.logger, "discovery", typ),
|
||||
})
|
||||
if err != nil {
|
||||
level.Error(m.logger).Log("msg", "Cannot create service discovery", "err", err, "type", typ)
|
||||
failed++
|
||||
return
|
||||
}
|
||||
m.providers = append(m.providers, &provider{
|
||||
name: fmt.Sprintf("%s/%d", typ, len(m.providers)),
|
||||
d: d,
|
||||
config: cfg,
|
||||
subs: []string{setName},
|
||||
})
|
||||
added = true
|
||||
}
|
||||
for _, cfg := range cfgs {
|
||||
add(cfg)
|
||||
}
|
||||
if !added {
|
||||
// Add an empty target group to force the refresh of the corresponding
|
||||
// scrape pool and to notify the receiver that this target set has no
|
||||
// current targets.
|
||||
// It can happen because the combined set of SD configurations is empty
|
||||
// or because we fail to instantiate all the SD configurations.
|
||||
add(discovery.StaticConfig{{}})
|
||||
}
|
||||
return failed
|
||||
}
|
||||
|
||||
// StaticProvider holds a list of target groups that never change.
|
||||
type StaticProvider struct {
|
||||
TargetGroups []*targetgroup.Group
|
||||
}
|
||||
|
||||
// Run implements the Worker interface.
|
||||
func (sd *StaticProvider) Run(ctx context.Context, ch chan<- []*targetgroup.Group) {
|
||||
// We still have to consider that the consumer exits right away in which case
|
||||
// the context will be canceled.
|
||||
select {
|
||||
case ch <- sd.TargetGroups:
|
||||
case <-ctx.Done():
|
||||
}
|
||||
close(ch)
|
||||
}
|
1140
discovery/legacymanager/manager_test.go
Normal file
1140
discovery/legacymanager/manager_test.go
Normal file
File diff suppressed because it is too large
Load diff
259
discovery/legacymanager/registry.go
Normal file
259
discovery/legacymanager/registry.go
Normal file
|
@ -0,0 +1,259 @@
|
|||
// Copyright 2020 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package legacymanager
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"gopkg.in/yaml.v2"
|
||||
|
||||
"github.com/prometheus/prometheus/discovery"
|
||||
"github.com/prometheus/prometheus/discovery/targetgroup"
|
||||
)
|
||||
|
||||
const (
|
||||
configFieldPrefix = "AUTO_DISCOVERY_"
|
||||
staticConfigsKey = "static_configs"
|
||||
staticConfigsFieldName = configFieldPrefix + staticConfigsKey
|
||||
)
|
||||
|
||||
var (
|
||||
configNames = make(map[string]discovery.Config)
|
||||
configFieldNames = make(map[reflect.Type]string)
|
||||
configFields []reflect.StructField
|
||||
|
||||
configTypesMu sync.Mutex
|
||||
configTypes = make(map[reflect.Type]reflect.Type)
|
||||
|
||||
emptyStructType = reflect.TypeOf(struct{}{})
|
||||
configsType = reflect.TypeOf(discovery.Configs{})
|
||||
)
|
||||
|
||||
// RegisterConfig registers the given Config type for YAML marshaling and unmarshaling.
|
||||
func RegisterConfig(config discovery.Config) {
|
||||
registerConfig(config.Name()+"_sd_configs", reflect.TypeOf(config), config)
|
||||
}
|
||||
|
||||
func init() {
|
||||
// N.B.: static_configs is the only Config type implemented by default.
|
||||
// All other types are registered at init by their implementing packages.
|
||||
elemTyp := reflect.TypeOf(&targetgroup.Group{})
|
||||
registerConfig(staticConfigsKey, elemTyp, discovery.StaticConfig{})
|
||||
}
|
||||
|
||||
func registerConfig(yamlKey string, elemType reflect.Type, config discovery.Config) {
|
||||
name := config.Name()
|
||||
if _, ok := configNames[name]; ok {
|
||||
panic(fmt.Sprintf("discovery: Config named %q is already registered", name))
|
||||
}
|
||||
configNames[name] = config
|
||||
|
||||
fieldName := configFieldPrefix + yamlKey // Field must be exported.
|
||||
configFieldNames[elemType] = fieldName
|
||||
|
||||
// Insert fields in sorted order.
|
||||
i := sort.Search(len(configFields), func(k int) bool {
|
||||
return fieldName < configFields[k].Name
|
||||
})
|
||||
configFields = append(configFields, reflect.StructField{}) // Add empty field at end.
|
||||
copy(configFields[i+1:], configFields[i:]) // Shift fields to the right.
|
||||
configFields[i] = reflect.StructField{ // Write new field in place.
|
||||
Name: fieldName,
|
||||
Type: reflect.SliceOf(elemType),
|
||||
Tag: reflect.StructTag(`yaml:"` + yamlKey + `,omitempty"`),
|
||||
}
|
||||
}
|
||||
|
||||
func getConfigType(out reflect.Type) reflect.Type {
|
||||
configTypesMu.Lock()
|
||||
defer configTypesMu.Unlock()
|
||||
if typ, ok := configTypes[out]; ok {
|
||||
return typ
|
||||
}
|
||||
// Initial exported fields map one-to-one.
|
||||
var fields []reflect.StructField
|
||||
for i, n := 0, out.NumField(); i < n; i++ {
|
||||
switch field := out.Field(i); {
|
||||
case field.PkgPath == "" && field.Type != configsType:
|
||||
fields = append(fields, field)
|
||||
default:
|
||||
fields = append(fields, reflect.StructField{
|
||||
Name: "_" + field.Name, // Field must be unexported.
|
||||
PkgPath: out.PkgPath(),
|
||||
Type: emptyStructType,
|
||||
})
|
||||
}
|
||||
}
|
||||
// Append extra config fields on the end.
|
||||
fields = append(fields, configFields...)
|
||||
typ := reflect.StructOf(fields)
|
||||
configTypes[out] = typ
|
||||
return typ
|
||||
}
|
||||
|
||||
// UnmarshalYAMLWithInlineConfigs helps implement yaml.Unmarshal for structs
|
||||
// that have a Configs field that should be inlined.
|
||||
func UnmarshalYAMLWithInlineConfigs(out interface{}, unmarshal func(interface{}) error) error {
|
||||
outVal := reflect.ValueOf(out)
|
||||
if outVal.Kind() != reflect.Ptr {
|
||||
return fmt.Errorf("discovery: can only unmarshal into a struct pointer: %T", out)
|
||||
}
|
||||
outVal = outVal.Elem()
|
||||
if outVal.Kind() != reflect.Struct {
|
||||
return fmt.Errorf("discovery: can only unmarshal into a struct pointer: %T", out)
|
||||
}
|
||||
outTyp := outVal.Type()
|
||||
|
||||
cfgTyp := getConfigType(outTyp)
|
||||
cfgPtr := reflect.New(cfgTyp)
|
||||
cfgVal := cfgPtr.Elem()
|
||||
|
||||
// Copy shared fields (defaults) to dynamic value.
|
||||
var configs *discovery.Configs
|
||||
for i, n := 0, outVal.NumField(); i < n; i++ {
|
||||
if outTyp.Field(i).Type == configsType {
|
||||
configs = outVal.Field(i).Addr().Interface().(*discovery.Configs)
|
||||
continue
|
||||
}
|
||||
if cfgTyp.Field(i).PkgPath != "" {
|
||||
continue // Field is unexported: ignore.
|
||||
}
|
||||
cfgVal.Field(i).Set(outVal.Field(i))
|
||||
}
|
||||
if configs == nil {
|
||||
return fmt.Errorf("discovery: Configs field not found in type: %T", out)
|
||||
}
|
||||
|
||||
// Unmarshal into dynamic value.
|
||||
if err := unmarshal(cfgPtr.Interface()); err != nil {
|
||||
return replaceYAMLTypeError(err, cfgTyp, outTyp)
|
||||
}
|
||||
|
||||
// Copy shared fields from dynamic value.
|
||||
for i, n := 0, outVal.NumField(); i < n; i++ {
|
||||
if cfgTyp.Field(i).PkgPath != "" {
|
||||
continue // Field is unexported: ignore.
|
||||
}
|
||||
outVal.Field(i).Set(cfgVal.Field(i))
|
||||
}
|
||||
|
||||
var err error
|
||||
*configs, err = readConfigs(cfgVal, outVal.NumField())
|
||||
return err
|
||||
}
|
||||
|
||||
func readConfigs(structVal reflect.Value, startField int) (discovery.Configs, error) {
|
||||
var (
|
||||
configs discovery.Configs
|
||||
targets []*targetgroup.Group
|
||||
)
|
||||
for i, n := startField, structVal.NumField(); i < n; i++ {
|
||||
field := structVal.Field(i)
|
||||
if field.Kind() != reflect.Slice {
|
||||
panic("discovery: internal error: field is not a slice")
|
||||
}
|
||||
for k := 0; k < field.Len(); k++ {
|
||||
val := field.Index(k)
|
||||
if val.IsZero() || (val.Kind() == reflect.Ptr && val.Elem().IsZero()) {
|
||||
key := configFieldNames[field.Type().Elem()]
|
||||
key = strings.TrimPrefix(key, configFieldPrefix)
|
||||
return nil, fmt.Errorf("empty or null section in %s", key)
|
||||
}
|
||||
switch c := val.Interface().(type) {
|
||||
case *targetgroup.Group:
|
||||
// Add index to the static config target groups for unique identification
|
||||
// within scrape pool.
|
||||
c.Source = strconv.Itoa(len(targets))
|
||||
// Coalesce multiple static configs into a single static config.
|
||||
targets = append(targets, c)
|
||||
case discovery.Config:
|
||||
configs = append(configs, c)
|
||||
default:
|
||||
panic("discovery: internal error: slice element is not a Config")
|
||||
}
|
||||
}
|
||||
}
|
||||
if len(targets) > 0 {
|
||||
configs = append(configs, discovery.StaticConfig(targets))
|
||||
}
|
||||
return configs, nil
|
||||
}
|
||||
|
||||
// MarshalYAMLWithInlineConfigs helps implement yaml.Marshal for structs
|
||||
// that have a Configs field that should be inlined.
|
||||
func MarshalYAMLWithInlineConfigs(in interface{}) (interface{}, error) {
|
||||
inVal := reflect.ValueOf(in)
|
||||
for inVal.Kind() == reflect.Ptr {
|
||||
inVal = inVal.Elem()
|
||||
}
|
||||
inTyp := inVal.Type()
|
||||
|
||||
cfgTyp := getConfigType(inTyp)
|
||||
cfgPtr := reflect.New(cfgTyp)
|
||||
cfgVal := cfgPtr.Elem()
|
||||
|
||||
// Copy shared fields to dynamic value.
|
||||
var configs *discovery.Configs
|
||||
for i, n := 0, inTyp.NumField(); i < n; i++ {
|
||||
if inTyp.Field(i).Type == configsType {
|
||||
configs = inVal.Field(i).Addr().Interface().(*discovery.Configs)
|
||||
}
|
||||
if cfgTyp.Field(i).PkgPath != "" {
|
||||
continue // Field is unexported: ignore.
|
||||
}
|
||||
cfgVal.Field(i).Set(inVal.Field(i))
|
||||
}
|
||||
if configs == nil {
|
||||
return nil, fmt.Errorf("discovery: Configs field not found in type: %T", in)
|
||||
}
|
||||
|
||||
if err := writeConfigs(cfgVal, *configs); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return cfgPtr.Interface(), nil
|
||||
}
|
||||
|
||||
func writeConfigs(structVal reflect.Value, configs discovery.Configs) error {
|
||||
targets := structVal.FieldByName(staticConfigsFieldName).Addr().Interface().(*[]*targetgroup.Group)
|
||||
for _, c := range configs {
|
||||
if sc, ok := c.(discovery.StaticConfig); ok {
|
||||
*targets = append(*targets, sc...)
|
||||
continue
|
||||
}
|
||||
fieldName, ok := configFieldNames[reflect.TypeOf(c)]
|
||||
if !ok {
|
||||
return fmt.Errorf("discovery: cannot marshal unregistered Config type: %T", c)
|
||||
}
|
||||
field := structVal.FieldByName(fieldName)
|
||||
field.Set(reflect.Append(field, reflect.ValueOf(c)))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func replaceYAMLTypeError(err error, oldTyp, newTyp reflect.Type) error {
|
||||
if e, ok := err.(*yaml.TypeError); ok {
|
||||
oldStr := oldTyp.String()
|
||||
newStr := newTyp.String()
|
||||
for i, s := range e.Errors {
|
||||
e.Errors[i] = strings.Replace(s, oldStr, newStr, -1)
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
|
@ -65,7 +65,7 @@ var (
|
|||
)
|
||||
)
|
||||
|
||||
func init() {
|
||||
func RegisterMetrics() {
|
||||
prometheus.MustRegister(failedConfigs, discoveredTargets, receivedUpdates, delayedUpdates, sentUpdates)
|
||||
}
|
||||
|
||||
|
@ -74,12 +74,26 @@ type poolKey struct {
|
|||
provider string
|
||||
}
|
||||
|
||||
// provider holds a Discoverer instance, its configuration and its subscribers.
|
||||
// provider holds a Discoverer instance, its configuration, cancel func and its subscribers.
|
||||
type provider struct {
|
||||
name string
|
||||
d Discoverer
|
||||
subs []string
|
||||
config interface{}
|
||||
|
||||
cancel context.CancelFunc
|
||||
// done should be called after cleaning up resources associated with cancelled provider.
|
||||
done func()
|
||||
|
||||
mu sync.RWMutex
|
||||
subs map[string]struct{}
|
||||
|
||||
// newSubs is used to temporary store subs to be used upon config reload completion.
|
||||
newSubs map[string]struct{}
|
||||
}
|
||||
|
||||
// IsStarted return true if Discoverer is started.
|
||||
func (p *provider) IsStarted() bool {
|
||||
return p.cancel != nil
|
||||
}
|
||||
|
||||
// NewManager is the Discovery Manager constructor.
|
||||
|
@ -88,13 +102,12 @@ func NewManager(ctx context.Context, logger log.Logger, options ...func(*Manager
|
|||
logger = log.NewNopLogger()
|
||||
}
|
||||
mgr := &Manager{
|
||||
logger: logger,
|
||||
syncCh: make(chan map[string][]*targetgroup.Group),
|
||||
targets: make(map[poolKey]map[string]*targetgroup.Group),
|
||||
discoverCancel: []context.CancelFunc{},
|
||||
ctx: ctx,
|
||||
updatert: 5 * time.Second,
|
||||
triggerSend: make(chan struct{}, 1),
|
||||
logger: logger,
|
||||
syncCh: make(chan map[string][]*targetgroup.Group),
|
||||
targets: make(map[poolKey]map[string]*targetgroup.Group),
|
||||
ctx: ctx,
|
||||
updatert: 5 * time.Second,
|
||||
triggerSend: make(chan struct{}, 1),
|
||||
}
|
||||
for _, option := range options {
|
||||
option(mgr)
|
||||
|
@ -114,15 +127,16 @@ func Name(n string) func(*Manager) {
|
|||
// Manager maintains a set of discovery providers and sends each update to a map channel.
|
||||
// Targets are grouped by the target set name.
|
||||
type Manager struct {
|
||||
logger log.Logger
|
||||
name string
|
||||
mtx sync.RWMutex
|
||||
ctx context.Context
|
||||
discoverCancel []context.CancelFunc
|
||||
logger log.Logger
|
||||
name string
|
||||
mtx sync.RWMutex
|
||||
ctx context.Context
|
||||
|
||||
// Some Discoverers(eg. k8s) send only the updates for a given target group
|
||||
// Some Discoverers(e.g. k8s) send only the updates for a given target group,
|
||||
// so we use map[tg.Source]*targetgroup.Group to know which group to update.
|
||||
targets map[poolKey]map[string]*targetgroup.Group
|
||||
targets map[poolKey]map[string]*targetgroup.Group
|
||||
targetsMtx sync.Mutex
|
||||
|
||||
// providers keeps track of SD providers.
|
||||
providers []*provider
|
||||
// The sync channel sends the updates as a map where the key is the job value from the scrape config.
|
||||
|
@ -132,11 +146,14 @@ type Manager struct {
|
|||
// should only be modified in unit tests.
|
||||
updatert time.Duration
|
||||
|
||||
// The triggerSend channel signals to the manager that new updates have been received from providers.
|
||||
// The triggerSend channel signals to the Manager that new updates have been received from providers.
|
||||
triggerSend chan struct{}
|
||||
|
||||
// lastProvider counts providers registered during Manager's lifetime.
|
||||
lastProvider uint
|
||||
}
|
||||
|
||||
// Run starts the background processing
|
||||
// Run starts the background processing.
|
||||
func (m *Manager) Run() error {
|
||||
go m.sender()
|
||||
for range m.ctx.Done() {
|
||||
|
@ -151,31 +168,80 @@ func (m *Manager) SyncCh() <-chan map[string][]*targetgroup.Group {
|
|||
return m.syncCh
|
||||
}
|
||||
|
||||
// ApplyConfig removes all running discovery providers and starts new ones using the provided config.
|
||||
// ApplyConfig checks if discovery provider with supplied config is already running and keeps them as is.
|
||||
// Remaining providers are then stopped and new required providers are started using the provided config.
|
||||
func (m *Manager) ApplyConfig(cfg map[string]Configs) error {
|
||||
m.mtx.Lock()
|
||||
defer m.mtx.Unlock()
|
||||
|
||||
for pk := range m.targets {
|
||||
if _, ok := cfg[pk.setName]; !ok {
|
||||
discoveredTargets.DeleteLabelValues(m.name, pk.setName)
|
||||
}
|
||||
}
|
||||
m.cancelDiscoverers()
|
||||
m.targets = make(map[poolKey]map[string]*targetgroup.Group)
|
||||
m.providers = nil
|
||||
m.discoverCancel = nil
|
||||
|
||||
failedCount := 0
|
||||
var failedCount int
|
||||
for name, scfg := range cfg {
|
||||
failedCount += m.registerProviders(scfg, name)
|
||||
discoveredTargets.WithLabelValues(m.name, name).Set(0)
|
||||
}
|
||||
failedConfigs.WithLabelValues(m.name).Set(float64(failedCount))
|
||||
|
||||
var (
|
||||
wg sync.WaitGroup
|
||||
// keep shows if we keep any providers after reload.
|
||||
keep bool
|
||||
newProviders []*provider
|
||||
)
|
||||
for _, prov := range m.providers {
|
||||
m.startProvider(m.ctx, prov)
|
||||
// Cancel obsolete providers.
|
||||
if len(prov.newSubs) == 0 {
|
||||
wg.Add(1)
|
||||
prov.done = func() {
|
||||
wg.Done()
|
||||
}
|
||||
prov.cancel()
|
||||
continue
|
||||
}
|
||||
newProviders = append(newProviders, prov)
|
||||
// refTargets keeps reference targets used to populate new subs' targets
|
||||
var refTargets map[string]*targetgroup.Group
|
||||
prov.mu.Lock()
|
||||
for s := range prov.subs {
|
||||
keep = true
|
||||
refTargets = m.targets[poolKey{s, prov.name}]
|
||||
// Remove obsolete subs' targets.
|
||||
if _, ok := prov.newSubs[s]; !ok {
|
||||
m.targetsMtx.Lock()
|
||||
delete(m.targets, poolKey{s, prov.name})
|
||||
m.targetsMtx.Unlock()
|
||||
discoveredTargets.DeleteLabelValues(m.name, s)
|
||||
}
|
||||
}
|
||||
// Set metrics and targets for new subs.
|
||||
for s := range prov.newSubs {
|
||||
if _, ok := prov.subs[s]; !ok {
|
||||
discoveredTargets.WithLabelValues(m.name, s).Set(0)
|
||||
}
|
||||
if l := len(refTargets); l > 0 {
|
||||
m.targets[poolKey{s, prov.name}] = make(map[string]*targetgroup.Group, l)
|
||||
for k, v := range refTargets {
|
||||
m.targets[poolKey{s, prov.name}][k] = v
|
||||
}
|
||||
}
|
||||
}
|
||||
prov.subs = prov.newSubs
|
||||
prov.newSubs = map[string]struct{}{}
|
||||
prov.mu.Unlock()
|
||||
if !prov.IsStarted() {
|
||||
m.startProvider(m.ctx, prov)
|
||||
}
|
||||
}
|
||||
// Currently downstream managers expect full target state upon config reload, so we must oblige.
|
||||
// While startProvider does pull the trigger, it may take some time to do so, therefore
|
||||
// we pull the trigger as soon as possible so that downstream managers can populate their state.
|
||||
// See https://github.com/prometheus/prometheus/pull/8639 for details.
|
||||
if keep {
|
||||
select {
|
||||
case m.triggerSend <- struct{}{}:
|
||||
default:
|
||||
}
|
||||
}
|
||||
m.providers = newProviders
|
||||
wg.Wait()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
@ -185,7 +251,9 @@ func (m *Manager) StartCustomProvider(ctx context.Context, name string, worker D
|
|||
p := &provider{
|
||||
name: name,
|
||||
d: worker,
|
||||
subs: []string{name},
|
||||
subs: map[string]struct{}{
|
||||
name: {},
|
||||
},
|
||||
}
|
||||
m.providers = append(m.providers, p)
|
||||
m.startProvider(ctx, p)
|
||||
|
@ -196,13 +264,29 @@ func (m *Manager) startProvider(ctx context.Context, p *provider) {
|
|||
ctx, cancel := context.WithCancel(ctx)
|
||||
updates := make(chan []*targetgroup.Group)
|
||||
|
||||
m.discoverCancel = append(m.discoverCancel, cancel)
|
||||
p.cancel = cancel
|
||||
|
||||
go p.d.Run(ctx, updates)
|
||||
go m.updater(ctx, p, updates)
|
||||
}
|
||||
|
||||
// cleaner cleans resources associated with provider.
|
||||
func (m *Manager) cleaner(p *provider) {
|
||||
m.targetsMtx.Lock()
|
||||
p.mu.RLock()
|
||||
for s := range p.subs {
|
||||
delete(m.targets, poolKey{s, p.name})
|
||||
}
|
||||
p.mu.RUnlock()
|
||||
m.targetsMtx.Unlock()
|
||||
if p.done != nil {
|
||||
p.done()
|
||||
}
|
||||
}
|
||||
|
||||
func (m *Manager) updater(ctx context.Context, p *provider, updates chan []*targetgroup.Group) {
|
||||
// Ensure targets from this provider are cleaned up.
|
||||
defer m.cleaner(p)
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
|
@ -211,12 +295,16 @@ func (m *Manager) updater(ctx context.Context, p *provider, updates chan []*targ
|
|||
receivedUpdates.WithLabelValues(m.name).Inc()
|
||||
if !ok {
|
||||
level.Debug(m.logger).Log("msg", "Discoverer channel closed", "provider", p.name)
|
||||
// Wait for provider cancellation to ensure targets are cleaned up when expected.
|
||||
<-ctx.Done()
|
||||
return
|
||||
}
|
||||
|
||||
for _, s := range p.subs {
|
||||
p.mu.RLock()
|
||||
for s := range p.subs {
|
||||
m.updateGroup(poolKey{setName: s, provider: p.name}, tgs)
|
||||
}
|
||||
p.mu.RUnlock()
|
||||
|
||||
select {
|
||||
case m.triggerSend <- struct{}{}:
|
||||
|
@ -234,7 +322,7 @@ func (m *Manager) sender() {
|
|||
select {
|
||||
case <-m.ctx.Done():
|
||||
return
|
||||
case <-ticker.C: // Some discoverers send updates too often so we throttle these with the ticker.
|
||||
case <-ticker.C: // Some discoverers send updates too often, so we throttle these with the ticker.
|
||||
select {
|
||||
case <-m.triggerSend:
|
||||
sentUpdates.WithLabelValues(m.name).Inc()
|
||||
|
@ -255,14 +343,18 @@ func (m *Manager) sender() {
|
|||
}
|
||||
|
||||
func (m *Manager) cancelDiscoverers() {
|
||||
for _, c := range m.discoverCancel {
|
||||
c()
|
||||
m.mtx.RLock()
|
||||
defer m.mtx.RUnlock()
|
||||
for _, p := range m.providers {
|
||||
if p.cancel != nil {
|
||||
p.cancel()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (m *Manager) updateGroup(poolKey poolKey, tgs []*targetgroup.Group) {
|
||||
m.mtx.Lock()
|
||||
defer m.mtx.Unlock()
|
||||
m.targetsMtx.Lock()
|
||||
defer m.targetsMtx.Unlock()
|
||||
|
||||
if _, ok := m.targets[poolKey]; !ok {
|
||||
m.targets[poolKey] = make(map[string]*targetgroup.Group)
|
||||
|
@ -275,11 +367,11 @@ func (m *Manager) updateGroup(poolKey poolKey, tgs []*targetgroup.Group) {
|
|||
}
|
||||
|
||||
func (m *Manager) allGroups() map[string][]*targetgroup.Group {
|
||||
m.mtx.RLock()
|
||||
defer m.mtx.RUnlock()
|
||||
|
||||
tSets := map[string][]*targetgroup.Group{}
|
||||
n := map[string]int{}
|
||||
|
||||
m.targetsMtx.Lock()
|
||||
defer m.targetsMtx.Unlock()
|
||||
for pkey, tsets := range m.targets {
|
||||
for _, tg := range tsets {
|
||||
// Even if the target group 'tg' is empty we still need to send it to the 'Scrape manager'
|
||||
|
@ -303,7 +395,7 @@ func (m *Manager) registerProviders(cfgs Configs, setName string) int {
|
|||
add := func(cfg Config) {
|
||||
for _, p := range m.providers {
|
||||
if reflect.DeepEqual(cfg, p.config) {
|
||||
p.subs = append(p.subs, setName)
|
||||
p.newSubs[setName] = struct{}{}
|
||||
added = true
|
||||
return
|
||||
}
|
||||
|
@ -318,11 +410,14 @@ func (m *Manager) registerProviders(cfgs Configs, setName string) int {
|
|||
return
|
||||
}
|
||||
m.providers = append(m.providers, &provider{
|
||||
name: fmt.Sprintf("%s/%d", typ, len(m.providers)),
|
||||
name: fmt.Sprintf("%s/%d", typ, m.lastProvider),
|
||||
d: d,
|
||||
config: cfg,
|
||||
subs: []string{setName},
|
||||
newSubs: map[string]struct{}{
|
||||
setName: {},
|
||||
},
|
||||
})
|
||||
m.lastProvider++
|
||||
added = true
|
||||
}
|
||||
for _, cfg := range cfgs {
|
||||
|
|
|
@ -18,6 +18,7 @@ import (
|
|||
"fmt"
|
||||
"sort"
|
||||
"strconv"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
|
@ -36,7 +37,6 @@ func TestMain(m *testing.M) {
|
|||
|
||||
// TestTargetUpdatesOrder checks that the target updates are received in the expected order.
|
||||
func TestTargetUpdatesOrder(t *testing.T) {
|
||||
|
||||
// The order by which the updates are send is determined by the interval passed to the mock discovery adapter
|
||||
// Final targets array is ordered alphabetically by the name of the discoverer.
|
||||
// For example discoverer "A" with targets "t2,t3" and discoverer "B" with targets "t1,t2" will result in "t2,t3,t1,t2" after the merge.
|
||||
|
@ -116,7 +116,8 @@ func TestTargetUpdatesOrder(t *testing.T) {
|
|||
{
|
||||
Source: "tp1_group2",
|
||||
Targets: []model.LabelSet{{"__instance__": "2"}},
|
||||
}},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
@ -718,6 +719,31 @@ func staticConfig(addrs ...string) StaticConfig {
|
|||
return cfg
|
||||
}
|
||||
|
||||
func verifySyncedPresence(t *testing.T, tGroups map[string][]*targetgroup.Group, key, label string, present bool) {
|
||||
t.Helper()
|
||||
if _, ok := tGroups[key]; !ok {
|
||||
t.Fatalf("'%s' should be present in Group map keys: %v", key, tGroups)
|
||||
return
|
||||
}
|
||||
match := false
|
||||
var mergedTargets string
|
||||
for _, targetGroups := range tGroups[key] {
|
||||
for _, l := range targetGroups.Targets {
|
||||
mergedTargets = mergedTargets + " " + l.String()
|
||||
if l.String() == label {
|
||||
match = true
|
||||
}
|
||||
}
|
||||
}
|
||||
if match != present {
|
||||
msg := ""
|
||||
if !present {
|
||||
msg = "not"
|
||||
}
|
||||
t.Fatalf("%q should %s be present in Group labels: %q", label, msg, mergedTargets)
|
||||
}
|
||||
}
|
||||
|
||||
func verifyPresence(t *testing.T, tSets map[poolKey]map[string]*targetgroup.Group, poolKey poolKey, label string, present bool) {
|
||||
t.Helper()
|
||||
if _, ok := tSets[poolKey]; !ok {
|
||||
|
@ -728,14 +754,12 @@ func verifyPresence(t *testing.T, tSets map[poolKey]map[string]*targetgroup.Grou
|
|||
match := false
|
||||
var mergedTargets string
|
||||
for _, targetGroup := range tSets[poolKey] {
|
||||
|
||||
for _, l := range targetGroup.Targets {
|
||||
mergedTargets = mergedTargets + " " + l.String()
|
||||
if l.String() == label {
|
||||
match = true
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
if match != present {
|
||||
msg := ""
|
||||
|
@ -746,7 +770,180 @@ func verifyPresence(t *testing.T, tSets map[poolKey]map[string]*targetgroup.Grou
|
|||
}
|
||||
}
|
||||
|
||||
func TestTargetSetRecreatesTargetGroupsEveryRun(t *testing.T) {
|
||||
func pk(provider, setName string, n int) poolKey {
|
||||
return poolKey{
|
||||
setName: setName,
|
||||
provider: fmt.Sprintf("%s/%d", provider, n),
|
||||
}
|
||||
}
|
||||
|
||||
func TestTargetSetTargetGroupsPresentOnConfigReload(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
discoveryManager := NewManager(ctx, log.NewNopLogger())
|
||||
discoveryManager.updatert = 100 * time.Millisecond
|
||||
go discoveryManager.Run()
|
||||
|
||||
c := map[string]Configs{
|
||||
"prometheus": {
|
||||
staticConfig("foo:9090"),
|
||||
},
|
||||
}
|
||||
discoveryManager.ApplyConfig(c)
|
||||
|
||||
syncedTargets := <-discoveryManager.SyncCh()
|
||||
require.Equal(t, 1, len(syncedTargets))
|
||||
verifySyncedPresence(t, syncedTargets, "prometheus", "{__address__=\"foo:9090\"}", true)
|
||||
require.Equal(t, 1, len(syncedTargets["prometheus"]))
|
||||
p := pk("static", "prometheus", 0)
|
||||
verifyPresence(t, discoveryManager.targets, p, "{__address__=\"foo:9090\"}", true)
|
||||
require.Equal(t, 1, len(discoveryManager.targets))
|
||||
|
||||
discoveryManager.ApplyConfig(c)
|
||||
|
||||
syncedTargets = <-discoveryManager.SyncCh()
|
||||
verifyPresence(t, discoveryManager.targets, p, "{__address__=\"foo:9090\"}", true)
|
||||
require.Equal(t, 1, len(discoveryManager.targets))
|
||||
require.Equal(t, 1, len(syncedTargets))
|
||||
verifySyncedPresence(t, syncedTargets, "prometheus", "{__address__=\"foo:9090\"}", true)
|
||||
require.Equal(t, 1, len(syncedTargets["prometheus"]))
|
||||
}
|
||||
|
||||
func TestTargetSetTargetGroupsPresentOnConfigRename(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
discoveryManager := NewManager(ctx, log.NewNopLogger())
|
||||
discoveryManager.updatert = 100 * time.Millisecond
|
||||
go discoveryManager.Run()
|
||||
|
||||
c := map[string]Configs{
|
||||
"prometheus": {
|
||||
staticConfig("foo:9090"),
|
||||
},
|
||||
}
|
||||
discoveryManager.ApplyConfig(c)
|
||||
|
||||
syncedTargets := <-discoveryManager.SyncCh()
|
||||
require.Equal(t, 1, len(syncedTargets))
|
||||
verifySyncedPresence(t, syncedTargets, "prometheus", "{__address__=\"foo:9090\"}", true)
|
||||
require.Equal(t, 1, len(syncedTargets["prometheus"]))
|
||||
p := pk("static", "prometheus", 0)
|
||||
verifyPresence(t, discoveryManager.targets, p, "{__address__=\"foo:9090\"}", true)
|
||||
require.Equal(t, 1, len(discoveryManager.targets))
|
||||
|
||||
c["prometheus2"] = c["prometheus"]
|
||||
delete(c, "prometheus")
|
||||
discoveryManager.ApplyConfig(c)
|
||||
|
||||
syncedTargets = <-discoveryManager.SyncCh()
|
||||
p = pk("static", "prometheus2", 0)
|
||||
verifyPresence(t, discoveryManager.targets, p, "{__address__=\"foo:9090\"}", true)
|
||||
require.Equal(t, 1, len(discoveryManager.targets))
|
||||
require.Equal(t, 1, len(syncedTargets))
|
||||
verifySyncedPresence(t, syncedTargets, "prometheus2", "{__address__=\"foo:9090\"}", true)
|
||||
require.Equal(t, 1, len(syncedTargets["prometheus2"]))
|
||||
}
|
||||
|
||||
func TestTargetSetTargetGroupsPresentOnConfigDuplicateAndDeleteOriginal(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
discoveryManager := NewManager(ctx, log.NewNopLogger())
|
||||
discoveryManager.updatert = 100 * time.Millisecond
|
||||
go discoveryManager.Run()
|
||||
|
||||
c := map[string]Configs{
|
||||
"prometheus": {
|
||||
staticConfig("foo:9090"),
|
||||
},
|
||||
}
|
||||
discoveryManager.ApplyConfig(c)
|
||||
<-discoveryManager.SyncCh()
|
||||
|
||||
c["prometheus2"] = c["prometheus"]
|
||||
discoveryManager.ApplyConfig(c)
|
||||
syncedTargets := <-discoveryManager.SyncCh()
|
||||
require.Equal(t, 2, len(syncedTargets))
|
||||
verifySyncedPresence(t, syncedTargets, "prometheus", "{__address__=\"foo:9090\"}", true)
|
||||
require.Equal(t, 1, len(syncedTargets["prometheus"]))
|
||||
verifySyncedPresence(t, syncedTargets, "prometheus2", "{__address__=\"foo:9090\"}", true)
|
||||
require.Equal(t, 1, len(syncedTargets["prometheus2"]))
|
||||
p := pk("static", "prometheus", 0)
|
||||
verifyPresence(t, discoveryManager.targets, p, "{__address__=\"foo:9090\"}", true)
|
||||
require.Equal(t, 2, len(discoveryManager.targets))
|
||||
|
||||
delete(c, "prometheus")
|
||||
discoveryManager.ApplyConfig(c)
|
||||
syncedTargets = <-discoveryManager.SyncCh()
|
||||
p = pk("static", "prometheus2", 0)
|
||||
verifyPresence(t, discoveryManager.targets, p, "{__address__=\"foo:9090\"}", true)
|
||||
require.Equal(t, 1, len(discoveryManager.targets))
|
||||
require.Equal(t, 1, len(syncedTargets))
|
||||
verifySyncedPresence(t, syncedTargets, "prometheus2", "{__address__=\"foo:9090\"}", true)
|
||||
require.Equal(t, 1, len(syncedTargets["prometheus2"]))
|
||||
}
|
||||
|
||||
func TestTargetSetTargetGroupsPresentOnConfigChange(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
discoveryManager := NewManager(ctx, log.NewNopLogger())
|
||||
discoveryManager.updatert = 100 * time.Millisecond
|
||||
go discoveryManager.Run()
|
||||
|
||||
c := map[string]Configs{
|
||||
"prometheus": {
|
||||
staticConfig("foo:9090"),
|
||||
},
|
||||
}
|
||||
discoveryManager.ApplyConfig(c)
|
||||
|
||||
syncedTargets := <-discoveryManager.SyncCh()
|
||||
require.Equal(t, 1, len(syncedTargets))
|
||||
verifySyncedPresence(t, syncedTargets, "prometheus", "{__address__=\"foo:9090\"}", true)
|
||||
require.Equal(t, 1, len(syncedTargets["prometheus"]))
|
||||
|
||||
var mu sync.Mutex
|
||||
c["prometheus2"] = Configs{
|
||||
lockStaticConfig{
|
||||
mu: &mu,
|
||||
config: staticConfig("bar:9090"),
|
||||
},
|
||||
}
|
||||
mu.Lock()
|
||||
discoveryManager.ApplyConfig(c)
|
||||
|
||||
// Original targets should be present as soon as possible.
|
||||
syncedTargets = <-discoveryManager.SyncCh()
|
||||
mu.Unlock()
|
||||
require.Equal(t, 1, len(syncedTargets))
|
||||
verifySyncedPresence(t, syncedTargets, "prometheus", "{__address__=\"foo:9090\"}", true)
|
||||
require.Equal(t, 1, len(syncedTargets["prometheus"]))
|
||||
|
||||
// prometheus2 configs should be ready on second sync.
|
||||
syncedTargets = <-discoveryManager.SyncCh()
|
||||
require.Equal(t, 2, len(syncedTargets))
|
||||
verifySyncedPresence(t, syncedTargets, "prometheus", "{__address__=\"foo:9090\"}", true)
|
||||
require.Equal(t, 1, len(syncedTargets["prometheus"]))
|
||||
verifySyncedPresence(t, syncedTargets, "prometheus2", "{__address__=\"bar:9090\"}", true)
|
||||
require.Equal(t, 1, len(syncedTargets["prometheus2"]))
|
||||
|
||||
p := pk("static", "prometheus", 0)
|
||||
verifyPresence(t, discoveryManager.targets, p, "{__address__=\"foo:9090\"}", true)
|
||||
p = pk("lockstatic", "prometheus2", 1)
|
||||
verifyPresence(t, discoveryManager.targets, p, "{__address__=\"bar:9090\"}", true)
|
||||
require.Equal(t, 2, len(discoveryManager.targets))
|
||||
|
||||
// Delete part of config and ensure only original targets exist.
|
||||
delete(c, "prometheus2")
|
||||
discoveryManager.ApplyConfig(c)
|
||||
syncedTargets = <-discoveryManager.SyncCh()
|
||||
require.Equal(t, 1, len(discoveryManager.targets))
|
||||
verifyPresence(t, discoveryManager.targets, pk("static", "prometheus", 0), "{__address__=\"foo:9090\"}", true)
|
||||
require.Equal(t, 1, len(syncedTargets))
|
||||
verifySyncedPresence(t, syncedTargets, "prometheus", "{__address__=\"foo:9090\"}", true)
|
||||
require.Equal(t, 1, len(syncedTargets["prometheus"]))
|
||||
}
|
||||
|
||||
func TestTargetSetRecreatesTargetGroupsOnConfigChange(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
discoveryManager := NewManager(ctx, log.NewNopLogger())
|
||||
|
@ -760,18 +957,29 @@ func TestTargetSetRecreatesTargetGroupsEveryRun(t *testing.T) {
|
|||
}
|
||||
discoveryManager.ApplyConfig(c)
|
||||
|
||||
<-discoveryManager.SyncCh()
|
||||
verifyPresence(t, discoveryManager.targets, poolKey{setName: "prometheus", provider: "static/0"}, "{__address__=\"foo:9090\"}", true)
|
||||
verifyPresence(t, discoveryManager.targets, poolKey{setName: "prometheus", provider: "static/0"}, "{__address__=\"bar:9090\"}", true)
|
||||
syncedTargets := <-discoveryManager.SyncCh()
|
||||
p := pk("static", "prometheus", 0)
|
||||
verifyPresence(t, discoveryManager.targets, p, "{__address__=\"foo:9090\"}", true)
|
||||
verifyPresence(t, discoveryManager.targets, p, "{__address__=\"bar:9090\"}", true)
|
||||
require.Equal(t, 1, len(discoveryManager.targets))
|
||||
require.Equal(t, 1, len(syncedTargets))
|
||||
verifySyncedPresence(t, syncedTargets, "prometheus", "{__address__=\"foo:9090\"}", true)
|
||||
verifySyncedPresence(t, syncedTargets, "prometheus", "{__address__=\"bar:9090\"}", true)
|
||||
require.Equal(t, 2, len(syncedTargets["prometheus"]))
|
||||
|
||||
c["prometheus"] = Configs{
|
||||
staticConfig("foo:9090"),
|
||||
}
|
||||
discoveryManager.ApplyConfig(c)
|
||||
|
||||
<-discoveryManager.SyncCh()
|
||||
verifyPresence(t, discoveryManager.targets, poolKey{setName: "prometheus", provider: "static/0"}, "{__address__=\"foo:9090\"}", true)
|
||||
verifyPresence(t, discoveryManager.targets, poolKey{setName: "prometheus", provider: "static/0"}, "{__address__=\"bar:9090\"}", false)
|
||||
syncedTargets = <-discoveryManager.SyncCh()
|
||||
require.Equal(t, 1, len(discoveryManager.targets))
|
||||
p = pk("static", "prometheus", 1)
|
||||
verifyPresence(t, discoveryManager.targets, p, "{__address__=\"foo:9090\"}", true)
|
||||
verifyPresence(t, discoveryManager.targets, p, "{__address__=\"bar:9090\"}", false)
|
||||
require.Equal(t, 1, len(discoveryManager.targets))
|
||||
require.Equal(t, 1, len(syncedTargets))
|
||||
verifySyncedPresence(t, syncedTargets, "prometheus", "{__address__=\"foo:9090\"}", true)
|
||||
require.Equal(t, 1, len(syncedTargets["prometheus"]))
|
||||
}
|
||||
|
||||
func TestDiscovererConfigs(t *testing.T) {
|
||||
|
@ -789,10 +997,18 @@ func TestDiscovererConfigs(t *testing.T) {
|
|||
}
|
||||
discoveryManager.ApplyConfig(c)
|
||||
|
||||
<-discoveryManager.SyncCh()
|
||||
verifyPresence(t, discoveryManager.targets, poolKey{setName: "prometheus", provider: "static/0"}, "{__address__=\"foo:9090\"}", true)
|
||||
verifyPresence(t, discoveryManager.targets, poolKey{setName: "prometheus", provider: "static/0"}, "{__address__=\"bar:9090\"}", true)
|
||||
verifyPresence(t, discoveryManager.targets, poolKey{setName: "prometheus", provider: "static/1"}, "{__address__=\"baz:9090\"}", true)
|
||||
syncedTargets := <-discoveryManager.SyncCh()
|
||||
p := pk("static", "prometheus", 0)
|
||||
verifyPresence(t, discoveryManager.targets, p, "{__address__=\"foo:9090\"}", true)
|
||||
verifyPresence(t, discoveryManager.targets, p, "{__address__=\"bar:9090\"}", true)
|
||||
p = pk("static", "prometheus", 1)
|
||||
verifyPresence(t, discoveryManager.targets, p, "{__address__=\"baz:9090\"}", true)
|
||||
require.Equal(t, 2, len(discoveryManager.targets))
|
||||
require.Equal(t, 1, len(syncedTargets))
|
||||
verifySyncedPresence(t, syncedTargets, "prometheus", "{__address__=\"foo:9090\"}", true)
|
||||
verifySyncedPresence(t, syncedTargets, "prometheus", "{__address__=\"bar:9090\"}", true)
|
||||
verifySyncedPresence(t, syncedTargets, "prometheus", "{__address__=\"baz:9090\"}", true)
|
||||
require.Equal(t, 3, len(syncedTargets["prometheus"]))
|
||||
}
|
||||
|
||||
// TestTargetSetRecreatesEmptyStaticConfigs ensures that reloading a config file after
|
||||
|
@ -812,20 +1028,23 @@ func TestTargetSetRecreatesEmptyStaticConfigs(t *testing.T) {
|
|||
}
|
||||
discoveryManager.ApplyConfig(c)
|
||||
|
||||
<-discoveryManager.SyncCh()
|
||||
verifyPresence(t, discoveryManager.targets, poolKey{setName: "prometheus", provider: "static/0"}, "{__address__=\"foo:9090\"}", true)
|
||||
syncedTargets := <-discoveryManager.SyncCh()
|
||||
p := pk("static", "prometheus", 0)
|
||||
verifyPresence(t, discoveryManager.targets, p, "{__address__=\"foo:9090\"}", true)
|
||||
require.Equal(t, 1, len(syncedTargets))
|
||||
verifySyncedPresence(t, syncedTargets, "prometheus", "{__address__=\"foo:9090\"}", true)
|
||||
require.Equal(t, 1, len(syncedTargets["prometheus"]))
|
||||
|
||||
c["prometheus"] = Configs{
|
||||
StaticConfig{{}},
|
||||
}
|
||||
discoveryManager.ApplyConfig(c)
|
||||
|
||||
<-discoveryManager.SyncCh()
|
||||
|
||||
pkey := poolKey{setName: "prometheus", provider: "static/0"}
|
||||
targetGroups, ok := discoveryManager.targets[pkey]
|
||||
syncedTargets = <-discoveryManager.SyncCh()
|
||||
p = pk("static", "prometheus", 1)
|
||||
targetGroups, ok := discoveryManager.targets[p]
|
||||
if !ok {
|
||||
t.Fatalf("'%v' should be present in target groups", pkey)
|
||||
t.Fatalf("'%v' should be present in target groups", p)
|
||||
}
|
||||
group, ok := targetGroups[""]
|
||||
if !ok {
|
||||
|
@ -835,6 +1054,11 @@ func TestTargetSetRecreatesEmptyStaticConfigs(t *testing.T) {
|
|||
if len(group.Targets) != 0 {
|
||||
t.Fatalf("Invalid number of targets: expected 0, got %d", len(group.Targets))
|
||||
}
|
||||
require.Equal(t, 1, len(syncedTargets))
|
||||
require.Equal(t, 1, len(syncedTargets["prometheus"]))
|
||||
if lbls := syncedTargets["prometheus"][0].Labels; lbls != nil {
|
||||
t.Fatalf("Unexpected Group: expected nil Labels, got %v", lbls)
|
||||
}
|
||||
}
|
||||
|
||||
func TestIdenticalConfigurationsAreCoalesced(t *testing.T) {
|
||||
|
@ -854,12 +1078,17 @@ func TestIdenticalConfigurationsAreCoalesced(t *testing.T) {
|
|||
}
|
||||
discoveryManager.ApplyConfig(c)
|
||||
|
||||
<-discoveryManager.SyncCh()
|
||||
verifyPresence(t, discoveryManager.targets, poolKey{setName: "prometheus", provider: "static/0"}, "{__address__=\"foo:9090\"}", true)
|
||||
verifyPresence(t, discoveryManager.targets, poolKey{setName: "prometheus2", provider: "static/0"}, "{__address__=\"foo:9090\"}", true)
|
||||
syncedTargets := <-discoveryManager.SyncCh()
|
||||
verifyPresence(t, discoveryManager.targets, pk("static", "prometheus", 0), "{__address__=\"foo:9090\"}", true)
|
||||
verifyPresence(t, discoveryManager.targets, pk("static", "prometheus2", 0), "{__address__=\"foo:9090\"}", true)
|
||||
if len(discoveryManager.providers) != 1 {
|
||||
t.Fatalf("Invalid number of providers: expected 1, got %d", len(discoveryManager.providers))
|
||||
}
|
||||
require.Equal(t, 2, len(syncedTargets))
|
||||
verifySyncedPresence(t, syncedTargets, "prometheus", "{__address__=\"foo:9090\"}", true)
|
||||
require.Equal(t, 1, len(syncedTargets["prometheus"]))
|
||||
verifySyncedPresence(t, syncedTargets, "prometheus2", "{__address__=\"foo:9090\"}", true)
|
||||
require.Equal(t, 1, len(syncedTargets["prometheus2"]))
|
||||
}
|
||||
|
||||
func TestApplyConfigDoesNotModifyStaticTargets(t *testing.T) {
|
||||
|
@ -891,6 +1120,29 @@ type errorConfig struct{ err error }
|
|||
func (e errorConfig) Name() string { return "error" }
|
||||
func (e errorConfig) NewDiscoverer(DiscovererOptions) (Discoverer, error) { return nil, e.err }
|
||||
|
||||
type lockStaticConfig struct {
|
||||
mu *sync.Mutex
|
||||
config StaticConfig
|
||||
}
|
||||
|
||||
func (s lockStaticConfig) Name() string { return "lockstatic" }
|
||||
func (s lockStaticConfig) NewDiscoverer(options DiscovererOptions) (Discoverer, error) {
|
||||
return (lockStaticDiscoverer)(s), nil
|
||||
}
|
||||
|
||||
type lockStaticDiscoverer lockStaticConfig
|
||||
|
||||
func (s lockStaticDiscoverer) Run(ctx context.Context, up chan<- []*targetgroup.Group) {
|
||||
// TODO: existing implementation closes up chan, but documentation explicitly forbids it...?
|
||||
defer close(up)
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
case up <- s.config:
|
||||
}
|
||||
}
|
||||
|
||||
func TestGaugeFailedConfigs(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
@ -923,7 +1175,6 @@ func TestGaugeFailedConfigs(t *testing.T) {
|
|||
if failedCount != 0 {
|
||||
t.Fatalf("Expected to get no failed config, got: %v", failedCount)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestCoordinationWithReceiver(t *testing.T) {
|
||||
|
@ -1115,7 +1366,11 @@ func (tp mockdiscoveryProvider) Run(ctx context.Context, upCh chan<- []*targetgr
|
|||
for i := range u.targetGroups {
|
||||
tgs[i] = &u.targetGroups[i]
|
||||
}
|
||||
upCh <- tgs
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case upCh <- tgs:
|
||||
}
|
||||
}
|
||||
<-ctx.Done()
|
||||
}
|
||||
|
|
|
@ -478,7 +478,6 @@ func targetsForApp(app *app) []model.LabelSet {
|
|||
|
||||
// Generate a target endpoint string in host:port format.
|
||||
func targetEndpoint(task *task, port uint32, containerNet bool) string {
|
||||
|
||||
var host string
|
||||
|
||||
// Use the task's ipAddress field when it's in a container network
|
||||
|
@ -493,7 +492,6 @@ func targetEndpoint(task *task, port uint32, containerNet bool) string {
|
|||
|
||||
// Get a list of ports and a list of labels from a PortMapping.
|
||||
func extractPortMapping(portMappings []portMapping, containerNet bool) ([]uint32, []map[string]string) {
|
||||
|
||||
ports := make([]uint32, len(portMappings))
|
||||
labels := make([]map[string]string, len(portMappings))
|
||||
|
||||
|
|
|
@ -60,9 +60,7 @@ func TestMarathonSDHandleError(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestMarathonSDEmptyList(t *testing.T) {
|
||||
var (
|
||||
client = func(_ context.Context, _ *http.Client, _ string) (*appList, error) { return &appList{}, nil }
|
||||
)
|
||||
client := func(_ context.Context, _ *http.Client, _ string) (*appList, error) { return &appList{}, nil }
|
||||
tgs, err := testUpdateServices(client)
|
||||
if err != nil {
|
||||
t.Fatalf("Got error: %s", err)
|
||||
|
@ -99,11 +97,9 @@ func marathonTestAppList(labels map[string]string, runningTasks int) *appList {
|
|||
}
|
||||
|
||||
func TestMarathonSDSendGroup(t *testing.T) {
|
||||
var (
|
||||
client = func(_ context.Context, _ *http.Client, _ string) (*appList, error) {
|
||||
return marathonTestAppList(marathonValidLabel, 1), nil
|
||||
}
|
||||
)
|
||||
client := func(_ context.Context, _ *http.Client, _ string) (*appList, error) {
|
||||
return marathonTestAppList(marathonValidLabel, 1), nil
|
||||
}
|
||||
tgs, err := testUpdateServices(client)
|
||||
if err != nil {
|
||||
t.Fatalf("Got error: %s", err)
|
||||
|
@ -195,11 +191,9 @@ func marathonTestAppListWithMultiplePorts(labels map[string]string, runningTasks
|
|||
}
|
||||
|
||||
func TestMarathonSDSendGroupWithMultiplePort(t *testing.T) {
|
||||
var (
|
||||
client = func(_ context.Context, _ *http.Client, _ string) (*appList, error) {
|
||||
return marathonTestAppListWithMultiplePorts(marathonValidLabel, 1), nil
|
||||
}
|
||||
)
|
||||
client := func(_ context.Context, _ *http.Client, _ string) (*appList, error) {
|
||||
return marathonTestAppListWithMultiplePorts(marathonValidLabel, 1), nil
|
||||
}
|
||||
tgs, err := testUpdateServices(client)
|
||||
if err != nil {
|
||||
t.Fatalf("Got error: %s", err)
|
||||
|
@ -254,11 +248,9 @@ func marathonTestZeroTaskPortAppList(labels map[string]string, runningTasks int)
|
|||
}
|
||||
|
||||
func TestMarathonZeroTaskPorts(t *testing.T) {
|
||||
var (
|
||||
client = func(_ context.Context, _ *http.Client, _ string) (*appList, error) {
|
||||
return marathonTestZeroTaskPortAppList(marathonValidLabel, 1), nil
|
||||
}
|
||||
)
|
||||
client := func(_ context.Context, _ *http.Client, _ string) (*appList, error) {
|
||||
return marathonTestZeroTaskPortAppList(marathonValidLabel, 1), nil
|
||||
}
|
||||
tgs, err := testUpdateServices(client)
|
||||
if err != nil {
|
||||
t.Fatalf("Got error: %s", err)
|
||||
|
@ -331,11 +323,9 @@ func marathonTestAppListWithPortDefinitions(labels map[string]string, runningTas
|
|||
}
|
||||
|
||||
func TestMarathonSDSendGroupWithPortDefinitions(t *testing.T) {
|
||||
var (
|
||||
client = func(_ context.Context, _ *http.Client, _ string) (*appList, error) {
|
||||
return marathonTestAppListWithPortDefinitions(marathonValidLabel, 1), nil
|
||||
}
|
||||
)
|
||||
client := func(_ context.Context, _ *http.Client, _ string) (*appList, error) {
|
||||
return marathonTestAppListWithPortDefinitions(marathonValidLabel, 1), nil
|
||||
}
|
||||
tgs, err := testUpdateServices(client)
|
||||
if err != nil {
|
||||
t.Fatalf("Got error: %s", err)
|
||||
|
@ -403,11 +393,9 @@ func marathonTestAppListWithPortDefinitionsRequirePorts(labels map[string]string
|
|||
}
|
||||
|
||||
func TestMarathonSDSendGroupWithPortDefinitionsRequirePorts(t *testing.T) {
|
||||
var (
|
||||
client = func(_ context.Context, _ *http.Client, _ string) (*appList, error) {
|
||||
return marathonTestAppListWithPortDefinitionsRequirePorts(marathonValidLabel, 1), nil
|
||||
}
|
||||
)
|
||||
client := func(_ context.Context, _ *http.Client, _ string) (*appList, error) {
|
||||
return marathonTestAppListWithPortDefinitionsRequirePorts(marathonValidLabel, 1), nil
|
||||
}
|
||||
tgs, err := testUpdateServices(client)
|
||||
if err != nil {
|
||||
t.Fatalf("Got error: %s", err)
|
||||
|
@ -470,11 +458,9 @@ func marathonTestAppListWithPorts(labels map[string]string, runningTasks int) *a
|
|||
}
|
||||
|
||||
func TestMarathonSDSendGroupWithPorts(t *testing.T) {
|
||||
var (
|
||||
client = func(_ context.Context, _ *http.Client, _ string) (*appList, error) {
|
||||
return marathonTestAppListWithPorts(marathonValidLabel, 1), nil
|
||||
}
|
||||
)
|
||||
client := func(_ context.Context, _ *http.Client, _ string) (*appList, error) {
|
||||
return marathonTestAppListWithPorts(marathonValidLabel, 1), nil
|
||||
}
|
||||
tgs, err := testUpdateServices(client)
|
||||
if err != nil {
|
||||
t.Fatalf("Got error: %s", err)
|
||||
|
@ -546,11 +532,9 @@ func marathonTestAppListWithContainerPortMappings(labels map[string]string, runn
|
|||
}
|
||||
|
||||
func TestMarathonSDSendGroupWithContainerPortMappings(t *testing.T) {
|
||||
var (
|
||||
client = func(_ context.Context, _ *http.Client, _ string) (*appList, error) {
|
||||
return marathonTestAppListWithContainerPortMappings(marathonValidLabel, 1), nil
|
||||
}
|
||||
)
|
||||
client := func(_ context.Context, _ *http.Client, _ string) (*appList, error) {
|
||||
return marathonTestAppListWithContainerPortMappings(marathonValidLabel, 1), nil
|
||||
}
|
||||
tgs, err := testUpdateServices(client)
|
||||
if err != nil {
|
||||
t.Fatalf("Got error: %s", err)
|
||||
|
@ -622,11 +606,9 @@ func marathonTestAppListWithDockerContainerPortMappings(labels map[string]string
|
|||
}
|
||||
|
||||
func TestMarathonSDSendGroupWithDockerContainerPortMappings(t *testing.T) {
|
||||
var (
|
||||
client = func(_ context.Context, _ *http.Client, _ string) (*appList, error) {
|
||||
return marathonTestAppListWithDockerContainerPortMappings(marathonValidLabel, 1), nil
|
||||
}
|
||||
)
|
||||
client := func(_ context.Context, _ *http.Client, _ string) (*appList, error) {
|
||||
return marathonTestAppListWithDockerContainerPortMappings(marathonValidLabel, 1), nil
|
||||
}
|
||||
tgs, err := testUpdateServices(client)
|
||||
if err != nil {
|
||||
t.Fatalf("Got error: %s", err)
|
||||
|
@ -702,11 +684,9 @@ func marathonTestAppListWithContainerNetworkAndPortMappings(labels map[string]st
|
|||
}
|
||||
|
||||
func TestMarathonSDSendGroupWithContainerNetworkAndPortMapping(t *testing.T) {
|
||||
var (
|
||||
client = func(_ context.Context, _ *http.Client, _ string) (*appList, error) {
|
||||
return marathonTestAppListWithContainerNetworkAndPortMappings(marathonValidLabel, 1), nil
|
||||
}
|
||||
)
|
||||
client := func(_ context.Context, _ *http.Client, _ string) (*appList, error) {
|
||||
return marathonTestAppListWithContainerNetworkAndPortMappings(marathonValidLabel, 1), nil
|
||||
}
|
||||
tgs, err := testUpdateServices(client)
|
||||
if err != nil {
|
||||
t.Fatalf("Got error: %s", err)
|
||||
|
|
|
@ -28,6 +28,7 @@ import (
|
|||
"github.com/go-kit/log"
|
||||
"github.com/prometheus/common/config"
|
||||
"github.com/prometheus/common/model"
|
||||
|
||||
"github.com/prometheus/prometheus/discovery"
|
||||
"github.com/prometheus/prometheus/discovery/refresh"
|
||||
"github.com/prometheus/prometheus/discovery/targetgroup"
|
||||
|
|
|
@ -19,6 +19,7 @@ import (
|
|||
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/client"
|
||||
|
||||
"github.com/prometheus/prometheus/util/strutil"
|
||||
)
|
||||
|
||||
|
|
|
@ -51,8 +51,10 @@ type HypervisorDiscovery struct {
|
|||
// newHypervisorDiscovery returns a new hypervisor discovery.
|
||||
func newHypervisorDiscovery(provider *gophercloud.ProviderClient, opts *gophercloud.AuthOptions,
|
||||
port int, region string, availability gophercloud.Availability, l log.Logger) *HypervisorDiscovery {
|
||||
return &HypervisorDiscovery{provider: provider, authOpts: opts,
|
||||
region: region, port: port, availability: availability, logger: l}
|
||||
return &HypervisorDiscovery{
|
||||
provider: provider, authOpts: opts,
|
||||
region: region, port: port, availability: availability, logger: l,
|
||||
}
|
||||
}
|
||||
|
||||
func (h *HypervisorDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) {
|
||||
|
|
|
@ -47,7 +47,6 @@ func (s *OpenstackSDHypervisorTestSuite) openstackAuthSuccess() (refresher, erro
|
|||
}
|
||||
|
||||
func TestOpenstackSDHypervisorRefresh(t *testing.T) {
|
||||
|
||||
mock := &OpenstackSDHypervisorTestSuite{}
|
||||
mock.SetupTest(t)
|
||||
|
||||
|
|
|
@ -63,8 +63,10 @@ func newInstanceDiscovery(provider *gophercloud.ProviderClient, opts *gopherclou
|
|||
if l == nil {
|
||||
l = log.NewNopLogger()
|
||||
}
|
||||
return &InstanceDiscovery{provider: provider, authOpts: opts,
|
||||
region: region, port: port, allTenants: allTenants, availability: availability, logger: l}
|
||||
return &InstanceDiscovery{
|
||||
provider: provider, authOpts: opts,
|
||||
region: region, port: port, allTenants: allTenants, availability: availability, logger: l,
|
||||
}
|
||||
}
|
||||
|
||||
type floatingIPKey struct {
|
||||
|
|
|
@ -51,7 +51,6 @@ func (s *OpenstackSDInstanceTestSuite) openstackAuthSuccess() (refresher, error)
|
|||
}
|
||||
|
||||
func TestOpenstackSDInstanceRefresh(t *testing.T) {
|
||||
|
||||
mock := &OpenstackSDInstanceTestSuite{}
|
||||
mock.SetupTest(t)
|
||||
|
||||
|
|
|
@ -54,7 +54,7 @@ func testMethod(t *testing.T, r *http.Request, expected string) {
|
|||
}
|
||||
}
|
||||
|
||||
func testHeader(t *testing.T, r *http.Request, header string, expected string) {
|
||||
func testHeader(t *testing.T, r *http.Request, header, expected string) {
|
||||
if actual := r.Header.Get(header); expected != actual {
|
||||
t.Errorf("Header %s = %s, expected %s", header, actual, expected)
|
||||
}
|
||||
|
|
|
@ -145,7 +145,6 @@ func NewDiscovery(conf *SDConfig, l log.Logger) (*refresh.Discovery, error) {
|
|||
time.Duration(conf.RefreshInterval),
|
||||
r.refresh,
|
||||
), nil
|
||||
|
||||
}
|
||||
|
||||
func newRefresher(conf *SDConfig, l log.Logger) (refresher, error) {
|
||||
|
|
|
@ -25,8 +25,9 @@ import (
|
|||
"github.com/go-kit/log"
|
||||
"github.com/prometheus/common/config"
|
||||
"github.com/prometheus/common/model"
|
||||
"github.com/prometheus/prometheus/discovery/targetgroup"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/prometheus/prometheus/discovery/targetgroup"
|
||||
)
|
||||
|
||||
func mockServer(t *testing.T) *httptest.Server {
|
||||
|
|
|
@ -18,6 +18,7 @@ import (
|
|||
"strings"
|
||||
|
||||
"github.com/prometheus/common/model"
|
||||
|
||||
"github.com/prometheus/prometheus/util/strutil"
|
||||
)
|
||||
|
||||
|
|
|
@ -25,10 +25,11 @@ import (
|
|||
"github.com/prometheus/common/config"
|
||||
"github.com/prometheus/common/model"
|
||||
"github.com/prometheus/common/version"
|
||||
"github.com/prometheus/prometheus/discovery/refresh"
|
||||
"github.com/prometheus/prometheus/discovery/targetgroup"
|
||||
"github.com/scaleway/scaleway-sdk-go/api/baremetal/v1"
|
||||
"github.com/scaleway/scaleway-sdk-go/scw"
|
||||
|
||||
"github.com/prometheus/prometheus/discovery/refresh"
|
||||
"github.com/prometheus/prometheus/discovery/targetgroup"
|
||||
)
|
||||
|
||||
type baremetalDiscovery struct {
|
||||
|
|
|
@ -25,10 +25,11 @@ import (
|
|||
"github.com/prometheus/common/config"
|
||||
"github.com/prometheus/common/model"
|
||||
"github.com/prometheus/common/version"
|
||||
"github.com/prometheus/prometheus/discovery/refresh"
|
||||
"github.com/prometheus/prometheus/discovery/targetgroup"
|
||||
"github.com/scaleway/scaleway-sdk-go/api/instance/v1"
|
||||
"github.com/scaleway/scaleway-sdk-go/scw"
|
||||
|
||||
"github.com/prometheus/prometheus/discovery/refresh"
|
||||
"github.com/prometheus/prometheus/discovery/targetgroup"
|
||||
)
|
||||
|
||||
const (
|
||||
|
|
|
@ -24,10 +24,11 @@ import (
|
|||
"github.com/pkg/errors"
|
||||
"github.com/prometheus/common/config"
|
||||
"github.com/prometheus/common/model"
|
||||
"github.com/scaleway/scaleway-sdk-go/scw"
|
||||
|
||||
"github.com/prometheus/prometheus/discovery"
|
||||
"github.com/prometheus/prometheus/discovery/refresh"
|
||||
"github.com/prometheus/prometheus/discovery/targetgroup"
|
||||
"github.com/scaleway/scaleway-sdk-go/scw"
|
||||
)
|
||||
|
||||
// metaLabelPrefix is the meta prefix used for all meta labels.
|
||||
|
@ -173,8 +174,7 @@ func init() {
|
|||
|
||||
// Discovery periodically performs Scaleway requests. It implements
|
||||
// the Discoverer interface.
|
||||
type Discovery struct {
|
||||
}
|
||||
type Discovery struct{}
|
||||
|
||||
func NewDiscovery(conf *SDConfig, logger log.Logger) (*refresh.Discovery, error) {
|
||||
r, err := newRefresher(conf)
|
||||
|
|
|
@ -38,7 +38,8 @@ func TestTargetGroupStrictJsonUnmarshal(t *testing.T) {
|
|||
expectedReply: nil,
|
||||
expectedGroup: Group{Targets: []model.LabelSet{
|
||||
{"__address__": "localhost:9090"},
|
||||
{"__address__": "localhost:9091"}}, Labels: model.LabelSet{"my": "label"}},
|
||||
{"__address__": "localhost:9091"},
|
||||
}, Labels: model.LabelSet{"my": "label"}},
|
||||
},
|
||||
{
|
||||
json: ` {"label": {},"targets": []}`,
|
||||
|
@ -56,7 +57,6 @@ func TestTargetGroupStrictJsonUnmarshal(t *testing.T) {
|
|||
require.Equal(t, test.expectedReply, actual)
|
||||
require.Equal(t, test.expectedGroup, tg)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestTargetGroupYamlMarshal(t *testing.T) {
|
||||
|
@ -81,10 +81,13 @@ func TestTargetGroupYamlMarshal(t *testing.T) {
|
|||
},
|
||||
{
|
||||
// targets only exposes addresses.
|
||||
group: Group{Targets: []model.LabelSet{
|
||||
{"__address__": "localhost:9090"},
|
||||
{"__address__": "localhost:9091"}},
|
||||
Labels: model.LabelSet{"foo": "bar", "bar": "baz"}},
|
||||
group: Group{
|
||||
Targets: []model.LabelSet{
|
||||
{"__address__": "localhost:9090"},
|
||||
{"__address__": "localhost:9091"},
|
||||
},
|
||||
Labels: model.LabelSet{"foo": "bar", "bar": "baz"},
|
||||
},
|
||||
expectedYaml: "targets:\n- localhost:9090\n- localhost:9091\nlabels:\n bar: baz\n foo: bar\n",
|
||||
expectedErr: nil,
|
||||
},
|
||||
|
@ -120,7 +123,8 @@ func TestTargetGroupYamlUnmarshal(t *testing.T) {
|
|||
expectedReply: nil,
|
||||
expectedGroup: Group{Targets: []model.LabelSet{
|
||||
{"__address__": "localhost:9090"},
|
||||
{"__address__": "localhost:9191"}}, Labels: model.LabelSet{"my": "label"}},
|
||||
{"__address__": "localhost:9191"},
|
||||
}, Labels: model.LabelSet{"my": "label"}},
|
||||
},
|
||||
{
|
||||
// incorrect syntax.
|
||||
|
@ -135,21 +139,25 @@ func TestTargetGroupYamlUnmarshal(t *testing.T) {
|
|||
require.Equal(t, test.expectedReply, actual)
|
||||
require.Equal(t, test.expectedGroup, tg)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestString(t *testing.T) {
|
||||
// String() should return only the source, regardless of other attributes.
|
||||
group1 :=
|
||||
Group{Targets: []model.LabelSet{
|
||||
{"__address__": "localhost:9090"},
|
||||
{"__address__": "localhost:9091"}},
|
||||
Group{
|
||||
Targets: []model.LabelSet{
|
||||
{"__address__": "localhost:9090"},
|
||||
{"__address__": "localhost:9091"},
|
||||
},
|
||||
Source: "<source>",
|
||||
Labels: model.LabelSet{"foo": "bar", "bar": "baz"}}
|
||||
Labels: model.LabelSet{"foo": "bar", "bar": "baz"},
|
||||
}
|
||||
group2 :=
|
||||
Group{Targets: []model.LabelSet{},
|
||||
Source: "<source>",
|
||||
Labels: model.LabelSet{}}
|
||||
Group{
|
||||
Targets: []model.LabelSet{},
|
||||
Source: "<source>",
|
||||
Labels: model.LabelSet{},
|
||||
}
|
||||
require.Equal(t, "<source>", group1.String())
|
||||
require.Equal(t, "<source>", group2.String())
|
||||
require.Equal(t, group1.String(), group2.String())
|
||||
|
|
|
@ -188,9 +188,9 @@ func (d *Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) {
|
|||
case "cn":
|
||||
endpointFormat = "https://%s:%d/v%d/gz/discover"
|
||||
default:
|
||||
return nil, errors.New(fmt.Sprintf("unknown role '%s' in configuration", d.sdConfig.Role))
|
||||
return nil, fmt.Errorf("unknown role '%s' in configuration", d.sdConfig.Role)
|
||||
}
|
||||
var endpoint = fmt.Sprintf(endpointFormat, d.sdConfig.Endpoint, d.sdConfig.Port, d.sdConfig.Version)
|
||||
endpoint := fmt.Sprintf(endpointFormat, d.sdConfig.Endpoint, d.sdConfig.Port, d.sdConfig.Version)
|
||||
if len(d.sdConfig.Groups) > 0 {
|
||||
groups := url.QueryEscape(strings.Join(d.sdConfig.Groups, ","))
|
||||
endpoint = fmt.Sprintf("%s?groups=%s", endpoint, groups)
|
||||
|
@ -223,7 +223,7 @@ func (d *Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) {
|
|||
case "cn":
|
||||
return d.processComputeNodeResponse(data, endpoint)
|
||||
default:
|
||||
return nil, errors.New(fmt.Sprintf("unknown role '%s' in configuration", d.sdConfig.Role))
|
||||
return nil, fmt.Errorf("unknown role '%s' in configuration", d.sdConfig.Role)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -135,8 +135,7 @@ func TestTritonSDRefreshNoTargets(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestTritonSDRefreshMultipleTargets(t *testing.T) {
|
||||
var (
|
||||
dstr = `{"containers":[
|
||||
dstr := `{"containers":[
|
||||
{
|
||||
"groups":["foo","bar","baz"],
|
||||
"server_uuid":"44454c4c-5000-104d-8037-b7c04f5a5131",
|
||||
|
@ -153,7 +152,6 @@ func TestTritonSDRefreshMultipleTargets(t *testing.T) {
|
|||
"vm_uuid":"7b27a514-89d7-11e6-bee6-3f96f367bee7"
|
||||
}]
|
||||
}`
|
||||
)
|
||||
|
||||
tgts := testTritonSDRefresh(t, conf, dstr)
|
||||
require.NotNil(t, tgts)
|
||||
|
@ -161,9 +159,7 @@ func TestTritonSDRefreshMultipleTargets(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestTritonSDRefreshNoServer(t *testing.T) {
|
||||
var (
|
||||
td, _ = newTritonDiscovery(conf)
|
||||
)
|
||||
td, _ := newTritonDiscovery(conf)
|
||||
|
||||
_, err := td.refresh(context.Background())
|
||||
require.Error(t, err)
|
||||
|
@ -171,9 +167,7 @@ func TestTritonSDRefreshNoServer(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestTritonSDRefreshCancelled(t *testing.T) {
|
||||
var (
|
||||
td, _ = newTritonDiscovery(conf)
|
||||
)
|
||||
td, _ := newTritonDiscovery(conf)
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
cancel()
|
||||
|
@ -183,8 +177,7 @@ func TestTritonSDRefreshCancelled(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestTritonSDRefreshCNsUUIDOnly(t *testing.T) {
|
||||
var (
|
||||
dstr = `{"cns":[
|
||||
dstr := `{"cns":[
|
||||
{
|
||||
"server_uuid":"44454c4c-5000-104d-8037-b7c04f5a5131"
|
||||
},
|
||||
|
@ -192,7 +185,6 @@ func TestTritonSDRefreshCNsUUIDOnly(t *testing.T) {
|
|||
"server_uuid":"a5894692-bd32-4ca1-908a-e2dda3c3a5e6"
|
||||
}]
|
||||
}`
|
||||
)
|
||||
|
||||
tgts := testTritonSDRefresh(t, cnconf, dstr)
|
||||
require.NotNil(t, tgts)
|
||||
|
@ -200,8 +192,7 @@ func TestTritonSDRefreshCNsUUIDOnly(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestTritonSDRefreshCNsWithHostname(t *testing.T) {
|
||||
var (
|
||||
dstr = `{"cns":[
|
||||
dstr := `{"cns":[
|
||||
{
|
||||
"server_uuid":"44454c4c-5000-104d-8037-b7c04f5a5131",
|
||||
"server_hostname": "server01"
|
||||
|
@ -211,7 +202,6 @@ func TestTritonSDRefreshCNsWithHostname(t *testing.T) {
|
|||
"server_hostname": "server02"
|
||||
}]
|
||||
}`
|
||||
)
|
||||
|
||||
tgts := testTritonSDRefresh(t, cnconf, dstr)
|
||||
require.NotNil(t, tgts)
|
||||
|
|
332
discovery/uyuni/uyuni.go
Normal file
332
discovery/uyuni/uyuni.go
Normal file
|
@ -0,0 +1,332 @@
|
|||
// Copyright 2020 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package uyuni
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"path"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/go-kit/log"
|
||||
"github.com/go-kit/log/level"
|
||||
"github.com/kolo/xmlrpc"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prometheus/common/config"
|
||||
"github.com/prometheus/common/model"
|
||||
|
||||
"github.com/prometheus/prometheus/discovery"
|
||||
"github.com/prometheus/prometheus/discovery/refresh"
|
||||
"github.com/prometheus/prometheus/discovery/targetgroup"
|
||||
)
|
||||
|
||||
const (
|
||||
uyuniXMLRPCAPIPath = "/rpc/api"
|
||||
|
||||
uyuniMetaLabelPrefix = model.MetaLabelPrefix + "uyuni_"
|
||||
uyuniLabelMinionHostname = uyuniMetaLabelPrefix + "minion_hostname"
|
||||
uyuniLabelPrimaryFQDN = uyuniMetaLabelPrefix + "primary_fqdn"
|
||||
uyuniLablelSystemID = uyuniMetaLabelPrefix + "system_id"
|
||||
uyuniLablelGroups = uyuniMetaLabelPrefix + "groups"
|
||||
uyuniLablelEndpointName = uyuniMetaLabelPrefix + "endpoint_name"
|
||||
uyuniLablelExporter = uyuniMetaLabelPrefix + "exporter"
|
||||
uyuniLabelProxyModule = uyuniMetaLabelPrefix + "proxy_module"
|
||||
uyuniLabelMetricsPath = uyuniMetaLabelPrefix + "metrics_path"
|
||||
uyuniLabelScheme = uyuniMetaLabelPrefix + "scheme"
|
||||
)
|
||||
|
||||
// DefaultSDConfig is the default Uyuni SD configuration.
|
||||
var DefaultSDConfig = SDConfig{
|
||||
Entitlement: "monitoring_entitled",
|
||||
Separator: ",",
|
||||
RefreshInterval: model.Duration(1 * time.Minute),
|
||||
}
|
||||
|
||||
func init() {
|
||||
discovery.RegisterConfig(&SDConfig{})
|
||||
}
|
||||
|
||||
// SDConfig is the configuration for Uyuni based service discovery.
|
||||
type SDConfig struct {
|
||||
Server config.URL `yaml:"server"`
|
||||
Username string `yaml:"username"`
|
||||
Password config.Secret `yaml:"password"`
|
||||
HTTPClientConfig config.HTTPClientConfig `yaml:",inline"`
|
||||
Entitlement string `yaml:"entitlement,omitempty"`
|
||||
Separator string `yaml:"separator,omitempty"`
|
||||
RefreshInterval model.Duration `yaml:"refresh_interval,omitempty"`
|
||||
}
|
||||
|
||||
type systemGroupID struct {
|
||||
GroupID int `xmlrpc:"id"`
|
||||
GroupName string `xmlrpc:"name"`
|
||||
}
|
||||
|
||||
type networkInfo struct {
|
||||
SystemID int `xmlrpc:"system_id"`
|
||||
Hostname string `xmlrpc:"hostname"`
|
||||
PrimaryFQDN string `xmlrpc:"primary_fqdn"`
|
||||
IP string `xmlrpc:"ip"`
|
||||
}
|
||||
|
||||
type endpointInfo struct {
|
||||
SystemID int `xmlrpc:"system_id"`
|
||||
EndpointName string `xmlrpc:"endpoint_name"`
|
||||
Port int `xmlrpc:"port"`
|
||||
Path string `xmlrpc:"path"`
|
||||
Module string `xmlrpc:"module"`
|
||||
ExporterName string `xmlrpc:"exporter_name"`
|
||||
TLSEnabled bool `xmlrpc:"tls_enabled"`
|
||||
}
|
||||
|
||||
// Discovery periodically performs Uyuni API requests. It implements the Discoverer interface.
|
||||
type Discovery struct {
|
||||
*refresh.Discovery
|
||||
apiURL *url.URL
|
||||
roundTripper http.RoundTripper
|
||||
username string
|
||||
password string
|
||||
entitlement string
|
||||
separator string
|
||||
interval time.Duration
|
||||
logger log.Logger
|
||||
}
|
||||
|
||||
// Name returns the name of the Config.
|
||||
func (*SDConfig) Name() string { return "uyuni" }
|
||||
|
||||
// NewDiscoverer returns a Discoverer for the Config.
|
||||
func (c *SDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (discovery.Discoverer, error) {
|
||||
return NewDiscovery(c, opts.Logger)
|
||||
}
|
||||
|
||||
// UnmarshalYAML implements the yaml.Unmarshaler interface.
|
||||
func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
||||
*c = DefaultSDConfig
|
||||
type plain SDConfig
|
||||
err := unmarshal((*plain)(c))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if c.Server.URL == nil {
|
||||
return errors.New("Uyuni SD configuration requires server host")
|
||||
}
|
||||
|
||||
_, err = url.Parse(c.Server.String())
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Uyuni Server URL is not valid")
|
||||
}
|
||||
|
||||
if c.Username == "" {
|
||||
return errors.New("Uyuni SD configuration requires a username")
|
||||
}
|
||||
if c.Password == "" {
|
||||
return errors.New("Uyuni SD configuration requires a password")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func login(rpcclient *xmlrpc.Client, user, pass string) (string, error) {
|
||||
var result string
|
||||
err := rpcclient.Call("auth.login", []interface{}{user, pass}, &result)
|
||||
return result, err
|
||||
}
|
||||
|
||||
func logout(rpcclient *xmlrpc.Client, token string) error {
|
||||
return rpcclient.Call("auth.logout", token, nil)
|
||||
}
|
||||
|
||||
func getSystemGroupsInfoOfMonitoredClients(rpcclient *xmlrpc.Client, token, entitlement string) (map[int][]systemGroupID, error) {
|
||||
var systemGroupsInfos []struct {
|
||||
SystemID int `xmlrpc:"id"`
|
||||
SystemGroups []systemGroupID `xmlrpc:"system_groups"`
|
||||
}
|
||||
|
||||
err := rpcclient.Call("system.listSystemGroupsForSystemsWithEntitlement", []interface{}{token, entitlement}, &systemGroupsInfos)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
result := make(map[int][]systemGroupID)
|
||||
for _, systemGroupsInfo := range systemGroupsInfos {
|
||||
result[systemGroupsInfo.SystemID] = systemGroupsInfo.SystemGroups
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func getNetworkInformationForSystems(rpcclient *xmlrpc.Client, token string, systemIDs []int) (map[int]networkInfo, error) {
|
||||
var networkInfos []networkInfo
|
||||
err := rpcclient.Call("system.getNetworkForSystems", []interface{}{token, systemIDs}, &networkInfos)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
result := make(map[int]networkInfo)
|
||||
for _, networkInfo := range networkInfos {
|
||||
result[networkInfo.SystemID] = networkInfo
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func getEndpointInfoForSystems(
|
||||
rpcclient *xmlrpc.Client,
|
||||
token string,
|
||||
systemIDs []int,
|
||||
) ([]endpointInfo, error) {
|
||||
var endpointInfos []endpointInfo
|
||||
err := rpcclient.Call(
|
||||
"system.monitoring.listEndpoints",
|
||||
[]interface{}{token, systemIDs}, &endpointInfos)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return endpointInfos, err
|
||||
}
|
||||
|
||||
// NewDiscovery returns a uyuni discovery for the given configuration.
|
||||
func NewDiscovery(conf *SDConfig, logger log.Logger) (*Discovery, error) {
|
||||
var apiURL *url.URL
|
||||
*apiURL = *conf.Server.URL
|
||||
apiURL.Path = path.Join(apiURL.Path, uyuniXMLRPCAPIPath)
|
||||
|
||||
rt, err := config.NewRoundTripperFromConfig(conf.HTTPClientConfig, "uyuni_sd")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
d := &Discovery{
|
||||
apiURL: apiURL,
|
||||
roundTripper: rt,
|
||||
username: conf.Username,
|
||||
password: string(conf.Password),
|
||||
entitlement: conf.Entitlement,
|
||||
separator: conf.Separator,
|
||||
interval: time.Duration(conf.RefreshInterval),
|
||||
logger: logger,
|
||||
}
|
||||
|
||||
d.Discovery = refresh.NewDiscovery(
|
||||
logger,
|
||||
"uyuni",
|
||||
time.Duration(conf.RefreshInterval),
|
||||
d.refresh,
|
||||
)
|
||||
return d, nil
|
||||
}
|
||||
|
||||
func (d *Discovery) getEndpointLabels(
|
||||
endpoint endpointInfo,
|
||||
systemGroupIDs []systemGroupID,
|
||||
networkInfo networkInfo,
|
||||
) model.LabelSet {
|
||||
var addr, scheme string
|
||||
managedGroupNames := getSystemGroupNames(systemGroupIDs)
|
||||
addr = fmt.Sprintf("%s:%d", networkInfo.Hostname, endpoint.Port)
|
||||
if endpoint.TLSEnabled {
|
||||
scheme = "https"
|
||||
} else {
|
||||
scheme = "http"
|
||||
}
|
||||
|
||||
result := model.LabelSet{
|
||||
model.AddressLabel: model.LabelValue(addr),
|
||||
uyuniLabelMinionHostname: model.LabelValue(networkInfo.Hostname),
|
||||
uyuniLabelPrimaryFQDN: model.LabelValue(networkInfo.PrimaryFQDN),
|
||||
uyuniLablelSystemID: model.LabelValue(fmt.Sprintf("%d", endpoint.SystemID)),
|
||||
uyuniLablelGroups: model.LabelValue(strings.Join(managedGroupNames, d.separator)),
|
||||
uyuniLablelEndpointName: model.LabelValue(endpoint.EndpointName),
|
||||
uyuniLablelExporter: model.LabelValue(endpoint.ExporterName),
|
||||
uyuniLabelProxyModule: model.LabelValue(endpoint.Module),
|
||||
uyuniLabelMetricsPath: model.LabelValue(endpoint.Path),
|
||||
uyuniLabelScheme: model.LabelValue(scheme),
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
func getSystemGroupNames(systemGroupsIDs []systemGroupID) []string {
|
||||
managedGroupNames := make([]string, 0, len(systemGroupsIDs))
|
||||
for _, systemGroupInfo := range systemGroupsIDs {
|
||||
managedGroupNames = append(managedGroupNames, systemGroupInfo.GroupName)
|
||||
}
|
||||
|
||||
return managedGroupNames
|
||||
}
|
||||
|
||||
func (d *Discovery) getTargetsForSystems(
|
||||
rpcClient *xmlrpc.Client,
|
||||
token string,
|
||||
entitlement string,
|
||||
) ([]model.LabelSet, error) {
|
||||
result := make([]model.LabelSet, 0)
|
||||
|
||||
systemGroupIDsBySystemID, err := getSystemGroupsInfoOfMonitoredClients(rpcClient, token, entitlement)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "unable to get the managed system groups information of monitored clients")
|
||||
}
|
||||
|
||||
systemIDs := make([]int, 0, len(systemGroupIDsBySystemID))
|
||||
for systemID := range systemGroupIDsBySystemID {
|
||||
systemIDs = append(systemIDs, systemID)
|
||||
}
|
||||
|
||||
endpointInfos, err := getEndpointInfoForSystems(rpcClient, token, systemIDs)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "unable to get endpoints information")
|
||||
}
|
||||
|
||||
networkInfoBySystemID, err := getNetworkInformationForSystems(rpcClient, token, systemIDs)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "unable to get the systems network information")
|
||||
}
|
||||
|
||||
for _, endpoint := range endpointInfos {
|
||||
systemID := endpoint.SystemID
|
||||
labels := d.getEndpointLabels(
|
||||
endpoint,
|
||||
systemGroupIDsBySystemID[systemID],
|
||||
networkInfoBySystemID[systemID])
|
||||
result = append(result, labels)
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func (d *Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) {
|
||||
rpcClient, err := xmlrpc.NewClient(d.apiURL.String(), d.roundTripper)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer rpcClient.Close()
|
||||
|
||||
token, err := login(rpcClient, d.username, d.password)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "unable to login to Uyuni API")
|
||||
}
|
||||
defer func() {
|
||||
if err := logout(rpcClient, token); err != nil {
|
||||
level.Debug(d.logger).Log("msg", "Failed to log out from Uyuni API", "err", err)
|
||||
}
|
||||
}()
|
||||
|
||||
targetsForSystems, err := d.getTargetsForSystems(rpcClient, token, d.entitlement)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return []*targetgroup.Group{{Targets: targetsForSystems, Source: d.apiURL.String()}}, nil
|
||||
}
|
|
@ -26,22 +26,19 @@ import (
|
|||
"google.golang.org/protobuf/types/known/anypb"
|
||||
)
|
||||
|
||||
var (
|
||||
httpResourceConf = &HTTPResourceClientConfig{
|
||||
HTTPClientConfig: config.HTTPClientConfig{
|
||||
TLSConfig: config.TLSConfig{InsecureSkipVerify: true},
|
||||
},
|
||||
ResourceType: "monitoring",
|
||||
// Some known type.
|
||||
ResourceTypeURL: "type.googleapis.com/envoy.service.discovery.v3.DiscoveryRequest",
|
||||
Server: "http://localhost",
|
||||
ClientID: "test-id",
|
||||
}
|
||||
)
|
||||
var httpResourceConf = &HTTPResourceClientConfig{
|
||||
HTTPClientConfig: config.HTTPClientConfig{
|
||||
TLSConfig: config.TLSConfig{InsecureSkipVerify: true},
|
||||
},
|
||||
ResourceType: "monitoring",
|
||||
// Some known type.
|
||||
ResourceTypeURL: "type.googleapis.com/envoy.service.discovery.v3.DiscoveryRequest",
|
||||
Server: "http://localhost",
|
||||
ClientID: "test-id",
|
||||
}
|
||||
|
||||
func urlMustParse(str string) *url.URL {
|
||||
parsed, err := url.Parse(str)
|
||||
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
@ -92,7 +89,6 @@ func TestCreateNewHTTPResourceClient(t *testing.T) {
|
|||
|
||||
require.Equal(t, client.endpoint, "http://127.0.0.1:5000/v3/discovery:monitoring?param1=v1")
|
||||
require.Equal(t, client.client.Timeout, 1*time.Minute)
|
||||
|
||||
}
|
||||
|
||||
func createTestHTTPResourceClient(t *testing.T, conf *HTTPResourceClientConfig, protocolVersion ProtocolVersion, responder discoveryResponder) (*HTTPResourceClient, func()) {
|
||||
|
|
|
@ -91,7 +91,6 @@ func getKumaMadsV1DiscoveryResponse(resources ...*MonitoringAssignment) (*v3.Dis
|
|||
serialized := make([]*anypb.Any, len(resources))
|
||||
for i, res := range resources {
|
||||
data, err := proto.Marshal(res)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
@ -288,6 +288,10 @@ serverset_sd_configs:
|
|||
triton_sd_configs:
|
||||
[ - <triton_sd_config> ... ]
|
||||
|
||||
# List of Uyuni service discovery configurations.
|
||||
uyuni_sd_configs:
|
||||
[ - <uyuni_sd_config> ... ]
|
||||
|
||||
# List of labeled statically configured targets for this job.
|
||||
static_configs:
|
||||
[ - <static_config> ... ]
|
||||
|
@ -379,6 +383,10 @@ token_url: <string>
|
|||
# Optional parameters to append to the token URL.
|
||||
endpoint_params:
|
||||
[ <string>: <string> ... ]
|
||||
|
||||
# Configures the token request's TLS settings.
|
||||
tls_config:
|
||||
[ <tls_config> ]
|
||||
```
|
||||
|
||||
### `<azure_sd_config>`
|
||||
|
@ -425,6 +433,42 @@ subscription_id: <string>
|
|||
# The port to scrape metrics from. If using the public IP address, this must
|
||||
# instead be specified in the relabeling rule.
|
||||
[ port: <int> | default = 80 ]
|
||||
|
||||
# Authentication information used to authenticate to the consul server.
|
||||
# Note that `basic_auth`, `authorization` and `oauth2` options are
|
||||
# mutually exclusive.
|
||||
# `password` and `password_file` are mutually exclusive.
|
||||
|
||||
# Optional HTTP basic authentication information, currently not support by Azure.
|
||||
basic_auth:
|
||||
[ username: <string> ]
|
||||
[ password: <secret> ]
|
||||
[ password_file: <string> ]
|
||||
|
||||
# Optional `Authorization` header configuration, currently not supported by Azure.
|
||||
authorization:
|
||||
# Sets the authentication type.
|
||||
[ type: <string> | default: Bearer ]
|
||||
# Sets the credentials. It is mutually exclusive with
|
||||
# `credentials_file`.
|
||||
[ credentials: <secret> ]
|
||||
# Sets the credentials to the credentials read from the configured file.
|
||||
# It is mutually exclusive with `credentials`.
|
||||
[ credentials_file: <filename> ]
|
||||
|
||||
# Optional OAuth 2.0 configuration, currently not supported by Azure.
|
||||
oauth2:
|
||||
[ <oauth2> ]
|
||||
|
||||
# Optional proxy URL.
|
||||
[ proxy_url: <string> ]
|
||||
|
||||
# Configure whether HTTP requests follow HTTP 3xx redirects.
|
||||
[ follow_redirects: <bool> | default = true ]
|
||||
|
||||
# TLS configuration.
|
||||
tls_config:
|
||||
[ <tls_config> ]
|
||||
```
|
||||
|
||||
### `<consul_sd_config>`
|
||||
|
@ -2256,6 +2300,79 @@ tls_config:
|
|||
[ <tls_config> ]
|
||||
```
|
||||
|
||||
### `<uyuni_sd_config>`
|
||||
|
||||
Uyuni SD configurations allow retrieving scrape targets from managed systems
|
||||
via [Uyuni](https://www.uyuni-project.org/) API.
|
||||
|
||||
The following meta labels are available on targets during [relabeling](#relabel_config):
|
||||
|
||||
* `__meta_uyuni_endpoint_name`: the name of the application endpoint
|
||||
* `__meta_uyuni_exporter`: the exporter exposing metrics for the target
|
||||
* `__meta_uyuni_groups`: the system groups of the target
|
||||
* `__meta_uyuni_metrics_path`: metrics path for the target
|
||||
* `__meta_uyuni_minion_hostname`: hostname of the Uyuni client
|
||||
* `__meta_uyuni_primary_fqdn`: primary FQDN of the Uyuni client
|
||||
* `__meta_uyuni_proxy_module`: the module name if _Exporter Exporter_ proxy is
|
||||
configured for the target
|
||||
* `__meta_uyuni_scheme`: the protocol scheme used for requests
|
||||
* `__meta_uyuni_system_id`: the system ID of the client
|
||||
|
||||
See below for the configuration options for Uyuni discovery:
|
||||
|
||||
```yaml
|
||||
# The URL to connect to the Uyuni server.
|
||||
server: <string>
|
||||
|
||||
# Credentials are used to authenticate the requests to Uyuni API.
|
||||
username: <string>
|
||||
password: <secret>
|
||||
|
||||
# The entitlement string to filter eligible systems.
|
||||
[ entitlement: <string> | default = monitoring_entitled ]
|
||||
|
||||
# The string by which Uyuni group names are joined into the groups label.
|
||||
[ separator: <string> | default = , ]
|
||||
|
||||
# Refresh interval to re-read the managed targets list.
|
||||
[ refresh_interval: <duration> | default = 60s ]
|
||||
|
||||
# Optional HTTP basic authentication information, currently not supported by Uyuni.
|
||||
basic_auth:
|
||||
[ username: <string> ]
|
||||
[ password: <secret> ]
|
||||
[ password_file: <string> ]
|
||||
|
||||
# Optional `Authorization` header configuration, currently not supported by Uyuni.
|
||||
authorization:
|
||||
# Sets the authentication type.
|
||||
[ type: <string> | default: Bearer ]
|
||||
# Sets the credentials. It is mutually exclusive with
|
||||
# `credentials_file`.
|
||||
[ credentials: <secret> ]
|
||||
# Sets the credentials to the credentials read from the configured file.
|
||||
# It is mutually exclusive with `credentials`.
|
||||
[ credentials_file: <filename> ]
|
||||
|
||||
# Optional OAuth 2.0 configuration, currently not supported by Uyuni.
|
||||
# Cannot be used at the same time as basic_auth or authorization.
|
||||
oauth2:
|
||||
[ <oauth2> ]
|
||||
|
||||
# Optional proxy URL.
|
||||
[ proxy_url: <string> ]
|
||||
|
||||
# Configure whether HTTP requests follow HTTP 3xx redirects.
|
||||
[ follow_redirects: <bool> | default = true ]
|
||||
|
||||
# TLS configuration.
|
||||
tls_config:
|
||||
[ <tls_config> ]
|
||||
```
|
||||
|
||||
See [the Prometheus uyuni-sd configuration file](/documentation/examples/prometheus-uyuni.yml)
|
||||
for a practical example on how to set up Uyuni Prometheus configuration.
|
||||
|
||||
### `<static_config>`
|
||||
|
||||
A `static_config` allows specifying a list of targets and a common label set
|
||||
|
@ -2518,6 +2635,10 @@ serverset_sd_configs:
|
|||
triton_sd_configs:
|
||||
[ - <triton_sd_config> ... ]
|
||||
|
||||
# List of Uyuni service discovery configurations.
|
||||
uyuni_sd_configs:
|
||||
[ - <uyuni_sd_config> ... ]
|
||||
|
||||
# List of labeled statically configured Alertmanagers.
|
||||
static_configs:
|
||||
[ - <static_config> ... ]
|
||||
|
|
|
@ -73,6 +73,30 @@ http_server_config:
|
|||
# Enable HTTP/2 support. Note that HTTP/2 is only supported with TLS.
|
||||
# This can not be changed on the fly.
|
||||
[ http2: <boolean> | default = true ]
|
||||
# List of headers that can be added to HTTP responses.
|
||||
[ headers:
|
||||
# Set the Content-Security-Policy header to HTTP responses.
|
||||
# Unset if blank.
|
||||
[ Content-Security-Policy: <string> ]
|
||||
# Set the X-Frame-Options header to HTTP responses.
|
||||
# Unset if blank. Accepted values are deny and sameorigin.
|
||||
# https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/X-Frame-Options
|
||||
[ X-Frame-Options: <string> ]
|
||||
# Set the X-Content-Type-Options header to HTTP responses.
|
||||
# Unset if blank. Accepted value is nosniff.
|
||||
# https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/X-Content-Type-Options
|
||||
[ X-Content-Type-Options: <string> ]
|
||||
# Set the X-XSS-Protection header to all responses.
|
||||
# Unset if blank. Accepted value is nosniff.
|
||||
# https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/X-XSS-Protection
|
||||
[ X-XSS-Protection: <string> ]
|
||||
# Set the Strict-Transport-Security header to HTTP responses.
|
||||
# Unset if blank.
|
||||
# Please make sure that you use this with care as this header might force
|
||||
# browsers to load Prometheus and the other applications hosted on the same
|
||||
# domain and subdomains over HTTPS.
|
||||
# https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Strict-Transport-Security
|
||||
[ Strict-Transport-Security: <string> ] ]
|
||||
|
||||
# Usernames and hashed passwords that have full access to the web
|
||||
# server via basic authentication. If empty, no basic authentication is
|
||||
|
|
|
@ -78,8 +78,8 @@ name: <string>
|
|||
# How often rules in the group are evaluated.
|
||||
[ interval: <duration> | default = global.evaluation_interval ]
|
||||
|
||||
# Limit the number of alerts and series individual rules can produce.
|
||||
# 0 is no limit.
|
||||
# Limit the number of alerts an alerting rule and series a recording
|
||||
# rule can produce. 0 is no limit.
|
||||
[ limit: <int> | default = 0 ]
|
||||
|
||||
rules:
|
||||
|
@ -128,3 +128,11 @@ annotations:
|
|||
[ <labelname>: <tmpl_string> ]
|
||||
```
|
||||
|
||||
# Limiting alerts and series
|
||||
|
||||
A limit for alerts produced by alerting rules and series produced recording rules
|
||||
can be configured per-group. When the limit is exceeded, _all_ series produced
|
||||
by the rule are discarded, and if it's an alerting rule, _all_ alerts for
|
||||
the rule, active, pending, or inactive, are cleared as well. The event will be
|
||||
recorded as an error in the evaluation, and as such no stale markers are
|
||||
written.
|
||||
|
|
|
@ -1,9 +1,9 @@
|
|||
---
|
||||
title: Feature Flags
|
||||
title: Feature flags
|
||||
sort_rank: 11
|
||||
---
|
||||
|
||||
# Feature Flags
|
||||
# Feature flags
|
||||
|
||||
Here is a list of features that are disabled by default since they are breaking changes or are considered experimental.
|
||||
Their behaviour can change in future releases which will be communicated via the [release changelog](https://github.com/prometheus/prometheus/blob/main/CHANGELOG.md).
|
||||
|
@ -46,7 +46,7 @@ More details can be found [here](querying/basics.md#offset-modifier).
|
|||
|
||||
The remote write receiver allows Prometheus to accept remote write requests from other Prometheus servers. More details can be found [here](storage.md#overview).
|
||||
|
||||
## Exemplars Storage
|
||||
## Exemplars storage
|
||||
|
||||
`--enable-feature=exemplar-storage`
|
||||
|
||||
|
@ -54,7 +54,7 @@ The remote write receiver allows Prometheus to accept remote write requests from
|
|||
|
||||
Exemplar storage is implemented as a fixed size circular buffer that stores exemplars in memory for all series. Enabling this feature will enable the storage of exemplars scraped by Prometheus. The flag `storage.exemplars.exemplars-limit` can be used to control the size of circular buffer by # of exemplars. An exemplar with just a `traceID=<jaeger-trace-id>` uses roughly 100 bytes of memory via the in-memory exemplar storage. If the exemplar storage is enabled, we will also append the exemplars to WAL for local persistence (for WAL duration).
|
||||
|
||||
## Memory Snapshot on Shutdown
|
||||
## Memory snapshot on shutdown
|
||||
|
||||
`--enable-feature=memory-snapshot-on-shutdown`
|
||||
|
||||
|
@ -62,7 +62,7 @@ This takes the snapshot of the chunks that are in memory along with the series i
|
|||
it on disk. This will reduce the startup time since the memory state can be restored with this snapshot and m-mapped
|
||||
chunks without the need of WAL replay.
|
||||
|
||||
## Extra Scrape Metrics
|
||||
## Extra scrape metrics
|
||||
|
||||
`--enable-feature=extra-scrape-metrics`
|
||||
|
||||
|
@ -71,3 +71,28 @@ When enabled, for each instance scrape, Prometheus stores a sample in the follow
|
|||
- `scrape_timeout_seconds`. The configured `scrape_timeout` for a target. This allows you to measure each target to find out how close they are to timing out with `scrape_duration_seconds / scrape_timeout_seconds`.
|
||||
- `scrape_sample_limit`. The configured `sample_limit` for a target. This allows you to measure each target
|
||||
to find out how close they are to reaching the limit with `scrape_samples_post_metric_relabeling / scrape_sample_limit`. Note that `scrape_sample_limit` can be zero if there is no limit configured, which means that the query above can return `+Inf` for targets with no limit (as we divide by zero). If you want to query only for targets that do have a sample limit use this query: `scrape_samples_post_metric_relabeling / (scrape_sample_limit > 0)`.
|
||||
- `scrape_body_size_bytes`. The uncompressed size of the most recent scrape response, if successful. Scrapes failing because `body_size_limit` is exceeded report `-1`, other scrape failures report `0`.
|
||||
|
||||
## New service discovery manager
|
||||
|
||||
`--enable-feature=new-service-discovery-manager`
|
||||
|
||||
When enabled, Prometheus uses a new service discovery manager that does not
|
||||
restart unchanged discoveries upon reloading. This makes reloads faster and reduces
|
||||
pressure on service discoveries' sources.
|
||||
|
||||
Users are encouraged to test the new service discovery manager and report any
|
||||
issues upstream.
|
||||
|
||||
In future releases, this new service discovery manager will become the default and
|
||||
this feature flag will be ignored.
|
||||
|
||||
## Prometheus agent
|
||||
|
||||
`--enable-feature=agent`
|
||||
|
||||
When enabled, Prometheus runs in agent mode. The agent mode is limited to
|
||||
discovery, scrape and remote write.
|
||||
|
||||
This is useful when you do not need to query the Prometheus data locally, but
|
||||
only from a central [remote endpoint](https://prometheus.io/docs/operating/integrations/#remote-endpoints-and-storage).
|
||||
|
|
|
@ -222,7 +222,7 @@ or
|
|||
both `(label1, label2)` and `(label1, label2,)` are valid syntax.
|
||||
|
||||
`without` removes the listed labels from the result vector, while
|
||||
all other labels are preserved the output. `by` does the opposite and drops
|
||||
all other labels are preserved in the output. `by` does the opposite and drops
|
||||
labels that are not listed in the `by` clause, even if their label values are
|
||||
identical between all elements of the vector.
|
||||
|
||||
|
|
|
@ -50,7 +50,7 @@ var (
|
|||
tagsLabel = model.MetaLabelPrefix + "consul_tags"
|
||||
// serviceAddressLabel is the name of the label containing the (optional) service address.
|
||||
serviceAddressLabel = model.MetaLabelPrefix + "consul_service_address"
|
||||
//servicePortLabel is the name of the label containing the service port.
|
||||
// servicePortLabel is the name of the label containing the service port.
|
||||
servicePortLabel = model.MetaLabelPrefix + "consul_service_port"
|
||||
// serviceIDLabel is the name of the label containing the service ID.
|
||||
serviceIDLabel = model.MetaLabelPrefix + "consul_service_id"
|
||||
|
@ -120,7 +120,7 @@ func (d *discovery) parseServiceNodes(resp *http.Response, name string) (*target
|
|||
for _, node := range nodes {
|
||||
// We surround the separated list with the separator as well. This way regular expressions
|
||||
// in relabeling rules don't have to consider tag positions.
|
||||
var tags = "," + strings.Join(node.ServiceTags, ",") + ","
|
||||
tags := "," + strings.Join(node.ServiceTags, ",") + ","
|
||||
|
||||
// If the service address is not empty it should be used instead of the node address
|
||||
// since the service may be registered remotely through a different node.
|
||||
|
@ -162,7 +162,6 @@ func (d *discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) {
|
|||
for c := time.Tick(time.Duration(d.refreshInterval) * time.Second); ; {
|
||||
var srvs map[string][]string
|
||||
resp, err := http.Get(fmt.Sprintf("http://%s/v1/catalog/services", d.address))
|
||||
|
||||
if err != nil {
|
||||
level.Error(d.logger).Log("msg", "Error getting services list", "err", err)
|
||||
time.Sleep(time.Duration(d.refreshInterval) * time.Second)
|
||||
|
|
|
@ -163,7 +163,7 @@ func (a *Adapter) Run() {
|
|||
}
|
||||
|
||||
// NewAdapter creates a new instance of Adapter.
|
||||
func NewAdapter(ctx context.Context, file string, name string, d discovery.Discoverer, logger log.Logger) *Adapter {
|
||||
func NewAdapter(ctx context.Context, file, name string, d discovery.Discoverer, logger log.Logger) *Adapter {
|
||||
return &Adapter{
|
||||
ctx: ctx,
|
||||
disc: d,
|
||||
|
|
22
documentation/examples/prometheus-agent.yml
Normal file
22
documentation/examples/prometheus-agent.yml
Normal file
|
@ -0,0 +1,22 @@
|
|||
# my global config
|
||||
global:
|
||||
scrape_interval: 15s # Set the scrape interval to every 15 seconds. Default is every 1 minute.
|
||||
evaluation_interval: 15s # Evaluate rules every 15 seconds. The default is every 1 minute.
|
||||
# scrape_timeout is set to the global default (10s).
|
||||
|
||||
# A scrape configuration containing exactly one endpoint to scrape:
|
||||
# Here it's Prometheus itself.
|
||||
scrape_configs:
|
||||
# The job name is added as a label `job=<job_name>` to any timeseries scraped from this config.
|
||||
- job_name: "prometheus"
|
||||
|
||||
# metrics_path defaults to '/metrics'
|
||||
# scheme defaults to 'http'.
|
||||
|
||||
static_configs:
|
||||
- targets: ["localhost:9090"]
|
||||
|
||||
# When running prometheus in Agent mode, remote-write is required.
|
||||
remote_write:
|
||||
# Agent is able to run with a invalid remote-write URL, but, of course, will fail to push timeseries.
|
||||
- url: "http://remote-write-url"
|
|
@ -14,11 +14,10 @@ scrape_configs:
|
|||
|
||||
- job_name: 'puppetdb-scrape-jobs'
|
||||
puppetdb_sd_configs:
|
||||
# This example uses the Prometheus::Scrape_job
|
||||
# exported resources.
|
||||
# https://github.com/camptocamp/prometheus-puppetdb-sd
|
||||
# This examples is compatible with Prometheus-puppetdb-sd,
|
||||
# if the exported Prometheus::Scrape_job only have at most one target.
|
||||
# This example uses Prometheus::Scrape_job exported resources.
|
||||
# It is compatible with the prometheus-puppetdb-sd
|
||||
# (https://github.com/camptocamp/prometheus-puppetdb-sd) if the
|
||||
# exported resources have exactly one target.
|
||||
- url: https://puppetdb.example.com
|
||||
query: 'resources { type = "Prometheus::Scrape_job" and exported = true }'
|
||||
include_parameters: true
|
||||
|
|
36
documentation/examples/prometheus-uyuni.yml
Normal file
36
documentation/examples/prometheus-uyuni.yml
Normal file
|
@ -0,0 +1,36 @@
|
|||
# A example scrape configuration for running Prometheus with Uyuni.
|
||||
|
||||
scrape_configs:
|
||||
|
||||
# Make Prometheus scrape itself for metrics.
|
||||
- job_name: 'prometheus'
|
||||
static_configs:
|
||||
- targets: ['localhost:9090']
|
||||
|
||||
# Discover Uyuni managed targets to scrape.
|
||||
- job_name: 'uyuni'
|
||||
|
||||
# Scrape Uyuni itself to discover new services.
|
||||
uyuni_sd_configs:
|
||||
- server: http://uyuni-project.org
|
||||
username: gopher
|
||||
password: hole
|
||||
relabel_configs:
|
||||
- source_labels: [__meta_uyuni_exporter]
|
||||
target_label: exporter
|
||||
- source_labels: [__meta_uyuni_groups]
|
||||
target_label: groups
|
||||
- source_labels: [__meta_uyuni_minion_hostname]
|
||||
target_label: hostname
|
||||
- source_labels: [__meta_uyuni_primary_fqdn]
|
||||
regex: (.+)
|
||||
target_label: hostname
|
||||
- source_labels: [hostname, __address__]
|
||||
regex: (.*);.*:(.*)
|
||||
replacement: ${1}:${2}
|
||||
target_label: __address__
|
||||
- source_labels: [__meta_uyuni_metrics_path]
|
||||
regex: (.+)
|
||||
target_label: __metrics_path__
|
||||
- source_labels: [__meta_uyuni_proxy_module]
|
||||
target_label: __param_module
|
|
@ -37,7 +37,7 @@ type Client struct {
|
|||
}
|
||||
|
||||
// NewClient creates a new Client.
|
||||
func NewClient(logger log.Logger, address string, transport string, timeout time.Duration, prefix string) *Client {
|
||||
func NewClient(logger log.Logger, address, transport string, timeout time.Duration, prefix string) *Client {
|
||||
if logger == nil {
|
||||
logger = log.NewNopLogger()
|
||||
}
|
||||
|
|
|
@ -20,13 +20,11 @@ import (
|
|||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
var (
|
||||
metric = model.Metric{
|
||||
model.MetricNameLabel: "test:metric",
|
||||
"testlabel": "test:value",
|
||||
"many_chars": "abc!ABC:012-3!45ö67~89./(){},=.\"\\",
|
||||
}
|
||||
)
|
||||
var metric = model.Metric{
|
||||
model.MetricNameLabel: "test:metric",
|
||||
"testlabel": "test:value",
|
||||
"many_chars": "abc!ABC:012-3!45ö67~89./(){},=.\"\\",
|
||||
}
|
||||
|
||||
func TestEscape(t *testing.T) {
|
||||
// Can we correctly keep and escape valid chars.
|
||||
|
|
|
@ -41,7 +41,7 @@ type Client struct {
|
|||
}
|
||||
|
||||
// NewClient creates a new Client.
|
||||
func NewClient(logger log.Logger, conf influx.HTTPConfig, db string, rp string) *Client {
|
||||
func NewClient(logger log.Logger, conf influx.HTTPConfig, db, rp string) *Client {
|
||||
c, err := influx.NewHTTPClient(conf)
|
||||
// Currently influx.NewClient() *should* never return an error.
|
||||
if err != nil {
|
||||
|
|
|
@ -21,13 +21,11 @@ import (
|
|||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
var (
|
||||
metric = model.Metric{
|
||||
model.MetricNameLabel: "test:metric",
|
||||
"testlabel": "test:value",
|
||||
"many_chars": "abc!ABC:012-3!45ö67~89./",
|
||||
}
|
||||
)
|
||||
var metric = model.Metric{
|
||||
model.MetricNameLabel: "test:metric",
|
||||
"testlabel": "test:value",
|
||||
"many_chars": "abc!ABC:012-3!45ö67~89./",
|
||||
}
|
||||
|
||||
func TestTagsFromMetric(t *testing.T) {
|
||||
expected := map[string]TagValue{
|
||||
|
|
|
@ -21,5 +21,9 @@ lint: prometheus_alerts.yaml
|
|||
|
||||
promtool check rules prometheus_alerts.yaml
|
||||
|
||||
.PHONY: jb_install
|
||||
jb_install:
|
||||
jb install
|
||||
|
||||
clean:
|
||||
rm -rf dashboards_out prometheus_alerts.yaml
|
||||
|
|
|
@ -1,8 +0,0 @@
|
|||
module github.com/prometheus/prometheus/documentation/prometheus-mixin
|
||||
|
||||
go 1.15
|
||||
|
||||
require (
|
||||
github.com/google/go-jsonnet v0.16.0
|
||||
github.com/jsonnet-bundler/jsonnet-bundler v0.4.0
|
||||
)
|
|
@ -1,49 +0,0 @@
|
|||
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc h1:cAKDfWh5VpdgMhJosfJnn5/FoN2SRZ4p7fJNX58YPaU=
|
||||
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
||||
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf h1:qet1QNfXsQxTZqLG4oE62mJzwPIB8+Tee4RNCL9ulrY=
|
||||
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
||||
github.com/campoy/embedmd v1.0.0/go.mod h1:oxyr9RCiSXg0M3VJ3ks0UGfp98BpSSGr0kpiX3MzVl8=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
|
||||
github.com/fatih/color v1.9.0 h1:8xPHl4/q1VyqGIPif1F+1V3Y3lSmrq01EabUW3CoW5s=
|
||||
github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU=
|
||||
github.com/google/go-jsonnet v0.16.0 h1:Nb4EEOp+rdeGGyB1rQ5eisgSAqrTnhf9ip+X6lzZbY0=
|
||||
github.com/google/go-jsonnet v0.16.0/go.mod h1:sOcuej3UW1vpPTZOr8L7RQimqai1a57bt5j22LzGZCw=
|
||||
github.com/jsonnet-bundler/jsonnet-bundler v0.4.0 h1:4BKZ6LDqPc2wJDmaKnmYD/vDjUptJtnUpai802MibFc=
|
||||
github.com/jsonnet-bundler/jsonnet-bundler v0.4.0/go.mod h1:/by7P/OoohkI3q4CgSFqcoFsVY+IaNbzOVDknEsKDeU=
|
||||
github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
|
||||
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
||||
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
|
||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||
github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
|
||||
github.com/mattn/go-colorable v0.1.4 h1:snbPLB8fVfU9iwbbo30TPtbLRzwWu6aJS6Xh4eaaviA=
|
||||
github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
|
||||
github.com/mattn/go-isatty v0.0.6/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
|
||||
github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
|
||||
github.com/mattn/go-isatty v0.0.11 h1:FxPOTFNqGkuDUGi3H/qkUbQO4ZiBa2brKq5r0l8TGeM=
|
||||
github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE=
|
||||
github.com/pkg/errors v0.8.0 h1:WdK/asTD0HN+q6hsWO3/vpuAkAr+tw6aNJNDFFf0+qw=
|
||||
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/sergi/go-diff v1.1.0 h1:we8PVUC3FE2uYfodKH/nBHMSetSfHDR6scGdBi+erh0=
|
||||
github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk=
|
||||
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
|
||||
golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190310054646-10058d7d4faa/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037 h1:YyJpGZS1sBuBCzLAR1VEpK193GlqGZbnPFnPV/5Rsb4=
|
||||
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
gopkg.in/alecthomas/kingpin.v2 v2.2.6 h1:jMFz6MfLP0/4fUyZle81rXUoxOBFi19VUFKVDOQfozc=
|
||||
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo=
|
||||
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.4 h1:/eiJrUcujPVeJ3xlSWaiNi3uSVmDGBK1pDHUHAnao1I=
|
||||
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
|
@ -1,26 +0,0 @@
|
|||
// Copyright 2020 The prometheus Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//go:build tools
|
||||
// +build tools
|
||||
|
||||
// Package tools tracks dependencies for tools that used in the build process.
|
||||
// See https://github.com/golang/go/issues/25922
|
||||
package tools
|
||||
|
||||
import (
|
||||
_ "github.com/google/go-jsonnet/cmd/jsonnet"
|
||||
_ "github.com/google/go-jsonnet/cmd/jsonnetfmt"
|
||||
_ "github.com/jsonnet-bundler/jsonnet-bundler/cmd/jb"
|
||||
)
|
61
go.mod
61
go.mod
|
@ -3,39 +3,40 @@ module github.com/prometheus/prometheus
|
|||
go 1.14
|
||||
|
||||
require (
|
||||
github.com/Azure/azure-sdk-for-go v57.1.0+incompatible
|
||||
github.com/Azure/go-autorest/autorest v0.11.20
|
||||
github.com/Azure/go-autorest/autorest/adal v0.9.15
|
||||
github.com/Azure/azure-sdk-for-go v58.2.0+incompatible
|
||||
github.com/Azure/go-autorest/autorest v0.11.21
|
||||
github.com/Azure/go-autorest/autorest/adal v0.9.16
|
||||
github.com/Azure/go-autorest/autorest/to v0.4.0 // indirect
|
||||
github.com/Azure/go-autorest/autorest/validation v0.3.1 // indirect
|
||||
github.com/alecthomas/units v0.0.0-20210912230133-d1bdfacee922
|
||||
github.com/aws/aws-sdk-go v1.40.37
|
||||
github.com/alecthomas/units v0.0.0-20210927113745-59d0afb8317a
|
||||
github.com/aws/aws-sdk-go v1.41.7
|
||||
github.com/cespare/xxhash/v2 v2.1.2
|
||||
github.com/containerd/containerd v1.5.4 // indirect
|
||||
github.com/containerd/containerd v1.5.7 // indirect
|
||||
github.com/dennwc/varint v1.0.0
|
||||
github.com/dgryski/go-sip13 v0.0.0-20200911182023-62edffca9245
|
||||
github.com/digitalocean/godo v1.65.0
|
||||
github.com/docker/docker v20.10.8+incompatible
|
||||
github.com/digitalocean/godo v1.69.1
|
||||
github.com/docker/docker v20.10.9+incompatible
|
||||
github.com/docker/go-connections v0.4.0 // indirect
|
||||
github.com/edsrzf/mmap-go v1.0.0
|
||||
github.com/envoyproxy/go-control-plane v0.9.9
|
||||
github.com/envoyproxy/protoc-gen-validate v0.6.1
|
||||
github.com/go-kit/log v0.1.0
|
||||
github.com/go-kit/log v0.2.0
|
||||
github.com/go-logfmt/logfmt v0.5.1
|
||||
github.com/go-openapi/strfmt v0.20.2
|
||||
github.com/go-openapi/strfmt v0.20.3
|
||||
github.com/go-zookeeper/zk v1.0.2
|
||||
github.com/gogo/protobuf v1.3.2
|
||||
github.com/golang/snappy v0.0.4
|
||||
github.com/google/pprof v0.0.0-20210827144239-02619b876842
|
||||
github.com/gophercloud/gophercloud v0.20.0
|
||||
github.com/google/pprof v0.0.0-20211008130755-947d60d73cc0
|
||||
github.com/gophercloud/gophercloud v0.22.0
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.16.0
|
||||
github.com/hashicorp/consul/api v1.10.1
|
||||
github.com/hashicorp/consul/api v1.11.0
|
||||
github.com/hetznercloud/hcloud-go v1.32.0
|
||||
github.com/influxdata/influxdb v1.9.3
|
||||
github.com/json-iterator/go v1.1.11
|
||||
github.com/linode/linodego v0.32.0
|
||||
github.com/influxdata/influxdb v1.9.5
|
||||
github.com/json-iterator/go v1.1.12
|
||||
github.com/kolo/xmlrpc v0.0.0-20201022064351-38db28db192b
|
||||
github.com/linode/linodego v1.1.0
|
||||
github.com/miekg/dns v1.1.43
|
||||
github.com/moby/term v0.0.0-20201216013528-df9cb8a40635 // indirect
|
||||
github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6 // indirect
|
||||
github.com/morikuni/aec v1.0.0 // indirect
|
||||
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f
|
||||
github.com/oklog/run v1.1.0
|
||||
|
@ -46,9 +47,9 @@ require (
|
|||
github.com/prometheus/alertmanager v0.23.0
|
||||
github.com/prometheus/client_golang v1.11.0
|
||||
github.com/prometheus/client_model v0.2.0
|
||||
github.com/prometheus/common v0.31.1
|
||||
github.com/prometheus/common v0.32.1
|
||||
github.com/prometheus/common/sigv4 v0.1.0
|
||||
github.com/prometheus/exporter-toolkit v0.6.1
|
||||
github.com/prometheus/exporter-toolkit v0.7.0
|
||||
github.com/scaleway/scaleway-sdk-go v1.0.0-beta.7.0.20210223165440-c65ae3540d44
|
||||
github.com/shurcooL/httpfs v0.0.0-20190707220628-8d4bc4ba7749
|
||||
github.com/shurcooL/vfsgen v0.0.0-20200824052919-0d455de96546
|
||||
|
@ -56,25 +57,25 @@ require (
|
|||
github.com/uber/jaeger-client-go v2.29.1+incompatible
|
||||
github.com/uber/jaeger-lib v2.4.1+incompatible
|
||||
go.uber.org/atomic v1.9.0
|
||||
go.uber.org/goleak v1.1.10
|
||||
golang.org/x/net v0.0.0-20210903162142-ad29c8ab022f
|
||||
golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f
|
||||
go.uber.org/goleak v1.1.12
|
||||
golang.org/x/net v0.0.0-20211020060615-d418f374d309
|
||||
golang.org/x/oauth2 v0.0.0-20211005180243-6b3c2da341f1
|
||||
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c
|
||||
golang.org/x/sys v0.0.0-20210906170528-6f6e22806c34
|
||||
golang.org/x/sys v0.0.0-20211020174200-9d6173849985
|
||||
golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac
|
||||
golang.org/x/tools v0.1.5
|
||||
google.golang.org/api v0.56.0
|
||||
google.golang.org/genproto v0.0.0-20210903162649-d08c68adba83
|
||||
golang.org/x/tools v0.1.7
|
||||
google.golang.org/api v0.59.0
|
||||
google.golang.org/genproto v0.0.0-20211020151524-b7c3a969101a
|
||||
google.golang.org/protobuf v1.27.1
|
||||
gopkg.in/alecthomas/kingpin.v2 v2.2.6
|
||||
gopkg.in/fsnotify/fsnotify.v1 v1.4.7
|
||||
gopkg.in/yaml.v2 v2.4.0
|
||||
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b
|
||||
k8s.io/api v0.22.1
|
||||
k8s.io/apimachinery v0.22.1
|
||||
k8s.io/client-go v0.22.1
|
||||
k8s.io/api v0.22.2
|
||||
k8s.io/apimachinery v0.22.2
|
||||
k8s.io/client-go v0.22.2
|
||||
k8s.io/klog v1.0.0
|
||||
k8s.io/klog/v2 v2.10.0
|
||||
k8s.io/klog/v2 v2.20.0
|
||||
)
|
||||
|
||||
replace (
|
||||
|
|
180
go.sum
180
go.sum
|
@ -24,8 +24,10 @@ cloud.google.com/go v0.83.0/go.mod h1:Z7MJUsANfY0pYPdw0lbnivPx4/vhy/e2FEkSkF7vAV
|
|||
cloud.google.com/go v0.84.0/go.mod h1:RazrYuxIK6Kb7YrzzhPoLmCVzl7Sup4NrbKPg8KHSUM=
|
||||
cloud.google.com/go v0.87.0/go.mod h1:TpDYlFy7vuLzZMMZ+B6iRiELaY7z/gJPaqbMx6mlWcY=
|
||||
cloud.google.com/go v0.90.0/go.mod h1:kRX0mNRHe0e2rC6oNakvwQqzyDmg57xJ+SZU1eT2aDQ=
|
||||
cloud.google.com/go v0.93.3 h1:wPBktZFzYBcCZVARvwVKqH1uEj+aLXofJEtrb4oOsio=
|
||||
cloud.google.com/go v0.93.3/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+YI=
|
||||
cloud.google.com/go v0.94.1/go.mod h1:qAlAugsXlC+JWO+Bke5vCtc9ONxjQT3drlTTnAplMW4=
|
||||
cloud.google.com/go v0.97.0 h1:3DXvAyifywvq64LfkKaMOmkWPS1CikIQdMe2lY9vxU8=
|
||||
cloud.google.com/go v0.97.0/go.mod h1:GF7l59pYBVlXQIBLx3a761cZ41F9bBH3JUlihCt2Udc=
|
||||
cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
|
||||
cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE=
|
||||
cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc=
|
||||
|
@ -49,33 +51,32 @@ collectd.org v0.3.0/go.mod h1:A/8DzQBkF6abtvrT2j/AU/4tiBgJWYyh0y/oB/4MlWE=
|
|||
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
|
||||
github.com/Azure/azure-sdk-for-go v16.2.1+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
|
||||
github.com/Azure/azure-sdk-for-go v41.3.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
|
||||
github.com/Azure/azure-sdk-for-go v57.1.0+incompatible h1:TKQ3ieyB0vVKkF6t9dsWbMjq56O1xU3eh3Ec09v6ajM=
|
||||
github.com/Azure/azure-sdk-for-go v57.1.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
|
||||
github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 h1:w+iIsaOQNcT7OZ575w+acHgRric5iCyQh+xv+KJ4HB8=
|
||||
github.com/Azure/azure-sdk-for-go v58.2.0+incompatible h1:iCb2tuoEm3N7ZpUDOvu1Yxl1B3iOVDmaD6weaRuIPzs=
|
||||
github.com/Azure/azure-sdk-for-go v58.2.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
|
||||
github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8=
|
||||
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8=
|
||||
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E=
|
||||
github.com/Azure/go-autorest v10.8.1+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
|
||||
github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs=
|
||||
github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
|
||||
github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI=
|
||||
github.com/Azure/go-autorest/autorest v0.9.3/go.mod h1:GsRuLYvwzLjjjRoWEIyMUaYq8GNUx2nRB378IPt/1p0=
|
||||
github.com/Azure/go-autorest/autorest v0.10.0/go.mod h1:/FALq9T/kS7b5J5qsQ+RSTUdAmGFqi0vUdVNNx8q630=
|
||||
github.com/Azure/go-autorest/autorest v0.10.1/go.mod h1:/FALq9T/kS7b5J5qsQ+RSTUdAmGFqi0vUdVNNx8q630=
|
||||
github.com/Azure/go-autorest/autorest v0.11.1/go.mod h1:JFgpikqFJ/MleTTxwepExTKnFUKKszPS8UavbQYUMuw=
|
||||
github.com/Azure/go-autorest/autorest v0.11.9/go.mod h1:eipySxLmqSyC5s5k1CLupqet0PSENBEDP93LQ9a8QYw=
|
||||
github.com/Azure/go-autorest/autorest v0.11.18/go.mod h1:dSiJPy22c3u0OtOKDNttNgqpNFY/GeWa7GH/Pz56QRA=
|
||||
github.com/Azure/go-autorest/autorest v0.11.20 h1:s8H1PbCZSqg/DH7JMlOz6YMig6htWLNPsjDdlLqCx3M=
|
||||
github.com/Azure/go-autorest/autorest v0.11.20/go.mod h1:o3tqFY+QR40VOlk+pV4d77mORO64jOXSgEnPQgLK6JY=
|
||||
github.com/Azure/go-autorest/autorest v0.11.21 h1:w77zY/9RnUAWcIQyDC0Fc89mCvwftR8F+zsR/OH6enk=
|
||||
github.com/Azure/go-autorest/autorest v0.11.21/go.mod h1:Do/yuMSW/13ayUkcVREpsMHGG+MvV81uzSCFgYPj4tM=
|
||||
github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0=
|
||||
github.com/Azure/go-autorest/autorest/adal v0.8.0/go.mod h1:Z6vX6WXXuyieHAXwMj0S6HY6e6wcHn37qQMBQlvY3lc=
|
||||
github.com/Azure/go-autorest/autorest/adal v0.8.1/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q=
|
||||
github.com/Azure/go-autorest/autorest/adal v0.8.2/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q=
|
||||
github.com/Azure/go-autorest/autorest/adal v0.8.3/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q=
|
||||
github.com/Azure/go-autorest/autorest/adal v0.9.0/go.mod h1:/c022QCutn2P7uY+/oQWWNcK9YU+MH96NgK+jErpbcg=
|
||||
github.com/Azure/go-autorest/autorest/adal v0.9.5/go.mod h1:B7KF7jKIeC9Mct5spmyCB/A8CG/sEz1vwIRGv/bbw7A=
|
||||
github.com/Azure/go-autorest/autorest/adal v0.9.13/go.mod h1:W/MM4U6nLxnIskrw4UwWzlHfGjwUS50aOsc/I3yuU8M=
|
||||
github.com/Azure/go-autorest/autorest/adal v0.9.15 h1:X+p2GF0GWyOiSmqohIaEeuNFNDY4I4EOlVuUQvFdWMk=
|
||||
github.com/Azure/go-autorest/autorest/adal v0.9.15/go.mod h1:tGMin8I49Yij6AQ+rvV+Xa/zwxYQB5hmsd6DkfAx2+A=
|
||||
github.com/Azure/go-autorest/autorest/azure/auth v0.4.2/go.mod h1:90gmfKdlmKgfjUpnCEpOJzsUEjrWDSLwHIG73tSXddM=
|
||||
github.com/Azure/go-autorest/autorest/azure/cli v0.3.1/go.mod h1:ZG5p860J94/0kI9mNJVoIoLgXcirM2gF5i2kWloofxw=
|
||||
github.com/Azure/go-autorest/autorest/adal v0.9.14/go.mod h1:W/MM4U6nLxnIskrw4UwWzlHfGjwUS50aOsc/I3yuU8M=
|
||||
github.com/Azure/go-autorest/autorest/adal v0.9.16 h1:P8An8Z9rH1ldbOLdFpxYorgOt2sywL9V24dAwWHPuGc=
|
||||
github.com/Azure/go-autorest/autorest/adal v0.9.16/go.mod h1:tGMin8I49Yij6AQ+rvV+Xa/zwxYQB5hmsd6DkfAx2+A=
|
||||
github.com/Azure/go-autorest/autorest/azure/auth v0.5.3/go.mod h1:4bJZhUhcq8LB20TruwHbAQsmUs2Xh+QR7utuJpLXX3A=
|
||||
github.com/Azure/go-autorest/autorest/azure/cli v0.4.2/go.mod h1:7qkJkT+j6b+hIpzMOwPChJhTqS8VbsqqgULzMNRugoM=
|
||||
github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA=
|
||||
github.com/Azure/go-autorest/autorest/date v0.2.0/go.mod h1:vcORJHLJEh643/Ioh9+vPmf1Ij9AEBM5FuBIXLmIy0g=
|
||||
github.com/Azure/go-autorest/autorest/date v0.3.0 h1:7gUk1U5M/CQbp9WoqinNzJar+8KY+LPI6wiWrP/myHw=
|
||||
|
@ -125,7 +126,7 @@ github.com/Microsoft/hcsshim v0.8.9/go.mod h1:5692vkUqntj1idxauYlpoINNKeqCiG6Sg3
|
|||
github.com/Microsoft/hcsshim v0.8.14/go.mod h1:NtVKoYxQuTLx6gEq0L96c9Ju4JbRJ4nY2ow3VK6a9Lg=
|
||||
github.com/Microsoft/hcsshim v0.8.15/go.mod h1:x38A4YbHbdxJtc0sF6oIz+RG0npwSCAvn69iY6URG00=
|
||||
github.com/Microsoft/hcsshim v0.8.16/go.mod h1:o5/SZqmR7x9JNKsW3pu+nqHm0MF8vbA+VxGOoXdC600=
|
||||
github.com/Microsoft/hcsshim v0.8.18/go.mod h1:+w2gRZ5ReXQhFOrvSQeNfhrYB/dg3oDwTOcER2fw4I4=
|
||||
github.com/Microsoft/hcsshim v0.8.21/go.mod h1:+w2gRZ5ReXQhFOrvSQeNfhrYB/dg3oDwTOcER2fw4I4=
|
||||
github.com/Microsoft/hcsshim/test v0.0.0-20201218223536-d3e5debf77da/go.mod h1:5hlzMzRKMLyo42nCZ9oml8AdTlq/0cvIaBv6tK1RehU=
|
||||
github.com/Microsoft/hcsshim/test v0.0.0-20210227013316-43a75bb4edd3/go.mod h1:mw7qgWloBUl75W/gVH3cQszUg1+gUITj7D6NY7ywVnY=
|
||||
github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ=
|
||||
|
@ -152,14 +153,15 @@ github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRF
|
|||
github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
||||
github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho=
|
||||
github.com/alecthomas/units v0.0.0-20210208195552-ff826a37aa15/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE=
|
||||
github.com/alecthomas/units v0.0.0-20210912230133-d1bdfacee922 h1:8ypNbf5sd3Sm3cKJ9waOGoQv6dKAFiFty9L6NP1AqJ4=
|
||||
github.com/alecthomas/units v0.0.0-20210912230133-d1bdfacee922/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE=
|
||||
github.com/alecthomas/units v0.0.0-20210927113745-59d0afb8317a h1:E/8AP5dFtMhl5KPJz66Kt9G0n+7Sn41Fy1wv9/jHOrc=
|
||||
github.com/alecthomas/units v0.0.0-20210927113745-59d0afb8317a/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE=
|
||||
github.com/alexflint/go-filemutex v0.0.0-20171022225611-72bdc8eae2ae/go.mod h1:CgnQgUtFrFz9mxFNtED3jI5tLDjKlOM+oUF/sTk6ps0=
|
||||
github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8=
|
||||
github.com/antihax/optional v0.0.0-20180407024304-ca021399b1a6/go.mod h1:V8iCPQYkqmusNa815XgQio277wI47sdRh1dUOLdyC6Q=
|
||||
github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
|
||||
github.com/aokoli/goutils v1.0.1/go.mod h1:SijmP0QR8LtwsmDs8Yii5Z/S4trXFGFC2oO5g9DP+DQ=
|
||||
github.com/apache/arrow/go/arrow v0.0.0-20191024131854-af6fa24be0db/go.mod h1:VTxUBvSJ3s3eHAg65PNgrsn5BtqCRPdmyXh6rAfdxN0=
|
||||
github.com/apache/arrow/go/arrow v0.0.0-20200601151325-b2287a20f230/go.mod h1:QNYViu/X0HXDHw7m3KXzWSVXIbfUvJqBFe6Gj8/pYA0=
|
||||
github.com/apache/arrow/go/arrow v0.0.0-20200923215132-ac86123a3f01/go.mod h1:QNYViu/X0HXDHw7m3KXzWSVXIbfUvJqBFe6Gj8/pYA0=
|
||||
github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ=
|
||||
github.com/apache/thrift v0.13.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ=
|
||||
|
@ -185,8 +187,8 @@ github.com/aws/aws-sdk-go v1.30.12/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZve
|
|||
github.com/aws/aws-sdk-go v1.34.28/go.mod h1:H7NKnBqNVzoTJpGfLrQkkD+ytBA93eiDYi/+8rV9s48=
|
||||
github.com/aws/aws-sdk-go v1.38.35/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro=
|
||||
github.com/aws/aws-sdk-go v1.40.11/go.mod h1:585smgzpB/KqRA+K3y/NL/oYRqQvpNJYvLm+LY1U59Q=
|
||||
github.com/aws/aws-sdk-go v1.40.37 h1:I+Q6cLctkFyMMrKukcDnj+i2kjrQ37LGiOM6xmsxC48=
|
||||
github.com/aws/aws-sdk-go v1.40.37/go.mod h1:585smgzpB/KqRA+K3y/NL/oYRqQvpNJYvLm+LY1U59Q=
|
||||
github.com/aws/aws-sdk-go v1.41.7 h1:vlpR8Cky3ZxUVNINgeRZS6N0p6zmFvu/ZqRRwrTI25U=
|
||||
github.com/aws/aws-sdk-go v1.41.7/go.mod h1:585smgzpB/KqRA+K3y/NL/oYRqQvpNJYvLm+LY1U59Q=
|
||||
github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g=
|
||||
github.com/benbjohnson/immutable v0.2.1/go.mod h1:uc6OHo6PN2++n98KHLxW8ef4W42ylHiQSENghE1ezxI=
|
||||
github.com/benbjohnson/tmpl v1.0.0/go.mod h1:igT620JFIi44B6awvU9IsDhR77IXWtFigTLil/RPdps=
|
||||
|
@ -197,6 +199,7 @@ github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
|||
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
|
||||
github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
|
||||
github.com/bitly/go-simplejson v0.5.0/go.mod h1:cXHtHw4XUPsvGaxgjIAn8PhEWG9NfngEKAMDJEczWVA=
|
||||
github.com/bits-and-blooms/bitset v1.2.0/go.mod h1:gIdJ4wp64HaoK2YrL1Q5/N7Y16edYb8uY+O0FJTyyDA=
|
||||
github.com/blang/semver v3.1.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk=
|
||||
github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk=
|
||||
github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4=
|
||||
|
@ -222,6 +225,7 @@ github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XL
|
|||
github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE=
|
||||
github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/checkpoint-restore/go-criu/v4 v4.1.0/go.mod h1:xUQBLp4RLc5zJtWY++yjOoMoB5lihDt7fai+75m+rGw=
|
||||
github.com/checkpoint-restore/go-criu/v5 v5.0.0/go.mod h1:cfwC0EG7HMUenopBsUf9d89JlCLQIfgVcNsNN0t6T2M=
|
||||
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
|
||||
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
|
||||
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
|
||||
|
@ -229,6 +233,7 @@ github.com/cilium/ebpf v0.0.0-20200110133405-4032b1d8aae3/go.mod h1:MA5e5Lr8slmE
|
|||
github.com/cilium/ebpf v0.0.0-20200702112145-1c8d4c9ef775/go.mod h1:7cR51M8ViRLIdUjrmSXlK9pkrsDlLHbO8jiB8X8JnOc=
|
||||
github.com/cilium/ebpf v0.2.0/go.mod h1:To2CFviqOWL/M0gIMsvSMlqe7em/l1ALkX1PyjrX2Qs=
|
||||
github.com/cilium/ebpf v0.4.0/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs=
|
||||
github.com/cilium/ebpf v0.6.2/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs=
|
||||
github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag=
|
||||
github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I=
|
||||
github.com/clbanning/x2j v0.0.0-20191024224557-825249438eec/go.mod h1:jMjuTZXRI4dUb/I5gc9Hdhagfvm9+RyrPryS/auMzxE=
|
||||
|
@ -272,8 +277,8 @@ github.com/containerd/containerd v1.5.0-beta.3/go.mod h1:/wr9AVtEM7x9c+n0+stptlo
|
|||
github.com/containerd/containerd v1.5.0-beta.4/go.mod h1:GmdgZd2zA2GYIBZ0w09ZvgqEq8EfBp/m3lcVZIvPHhI=
|
||||
github.com/containerd/containerd v1.5.0-rc.0/go.mod h1:V/IXoMqNGgBlabz3tHD2TWDoTJseu1FGOKuoA4nNb2s=
|
||||
github.com/containerd/containerd v1.5.1/go.mod h1:0DOxVqwDy2iZvrZp2JUx/E+hS0UNTVn7dJnIOwtYR4g=
|
||||
github.com/containerd/containerd v1.5.4 h1:uPF0og3ByFzDnaStfiQj3fVGTEtaSNyU+bW7GR/nqGA=
|
||||
github.com/containerd/containerd v1.5.4/go.mod h1:sx18RgvW6ABJ4iYUw7Q5x7bgFOAB9B6G7+yO0XBc4zw=
|
||||
github.com/containerd/containerd v1.5.7 h1:rQyoYtj4KddB3bxG6SAqd4+08gePNyJjRqvOIfV3rkM=
|
||||
github.com/containerd/containerd v1.5.7/go.mod h1:gyvv6+ugqY25TiXxcZC3L5yOeYgEw0QMhscqVp1AR9c=
|
||||
github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y=
|
||||
github.com/containerd/continuity v0.0.0-20190815185530-f2a389ac0a02/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y=
|
||||
github.com/containerd/continuity v0.0.0-20191127005431-f65d91d395eb/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y=
|
||||
|
@ -335,6 +340,7 @@ github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7
|
|||
github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
|
||||
github.com/coreos/go-systemd/v22 v22.0.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk=
|
||||
github.com/coreos/go-systemd/v22 v22.1.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk=
|
||||
github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
|
||||
github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
|
||||
github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
|
||||
|
@ -360,14 +366,13 @@ github.com/dennwc/varint v1.0.0/go.mod h1:hnItb35rvZvJrbTALZtY/iQfDs48JKRG1RPpgz
|
|||
github.com/denverdino/aliyungo v0.0.0-20190125010748-a747050bb1ba/go.mod h1:dV8lFg6daOBZbT6/BDGIz6Y3WFGn8juu6G+CQ6LHtl0=
|
||||
github.com/dgrijalva/jwt-go v0.0.0-20170104182250-a601269ab70c/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
|
||||
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
|
||||
github.com/dgrijalva/jwt-go/v4 v4.0.0-preview1/go.mod h1:+hnT3ywWDTAFrW5aE+u2Sa/wT555ZqwoCS+pk3p6ry4=
|
||||
github.com/dgryski/go-bitstream v0.0.0-20180413035011-3522498ce2c8/go.mod h1:VMaSuZ+SZcx/wljOQKvp5srsbCiKDEb6K2wC4+PiBmQ=
|
||||
github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
|
||||
github.com/dgryski/go-sip13 v0.0.0-20190329191031-25c5027a8c7b/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
|
||||
github.com/dgryski/go-sip13 v0.0.0-20200911182023-62edffca9245 h1:9cOfvEwjQxdwKuNDTQSaMKNRvwKwgZG+U4HrjeRKHso=
|
||||
github.com/dgryski/go-sip13 v0.0.0-20200911182023-62edffca9245/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
|
||||
github.com/digitalocean/godo v1.65.0 h1:3SywGJBC18HaYtPQF+T36jYzXBi+a6eIMonSjDll7TA=
|
||||
github.com/digitalocean/godo v1.65.0/go.mod h1:p7dOjjtSBqCTUksqtA5Fd3uaKs9kyTq2xcz76ulEJRU=
|
||||
github.com/digitalocean/godo v1.69.1 h1:aCyfwth8R3DeOaWB9J9E8v7cjlDIlF19eXTt8R3XhTE=
|
||||
github.com/digitalocean/godo v1.69.1/go.mod h1:epPuOzTOOJujNo0nduDj2D5O1zu8cSpp9R+DdN0W9I0=
|
||||
github.com/dimchansky/utfbom v1.1.0/go.mod h1:rO41eb7gLfo8SF1jd9F8HplJm1Fewwi4mQvIirEdv+8=
|
||||
github.com/dnaeon/go-vcr v1.0.1 h1:r8L/HqC0Hje5AXMu1ooW8oyQyOFv4GxqpL0nRP7SLLY=
|
||||
github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E=
|
||||
|
@ -375,8 +380,8 @@ github.com/docker/distribution v0.0.0-20190905152932-14b96e55d84c/go.mod h1:0+TT
|
|||
github.com/docker/distribution v2.7.1-0.20190205005809-0d3efadf0154+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
|
||||
github.com/docker/distribution v2.7.1+incompatible h1:a5mlkVzth6W5A4fOsS3D2EO5BUmsJpcB+cRlLU7cSug=
|
||||
github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
|
||||
github.com/docker/docker v20.10.8+incompatible h1:RVqD337BgQicVCzYrrlhLDWhq6OAD2PJDUg2LsEUvKM=
|
||||
github.com/docker/docker v20.10.8+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||
github.com/docker/docker v20.10.9+incompatible h1:JlsVnETOjM2RLQa0Cc1XCIspUdXW3Zenq9P54uXBm6k=
|
||||
github.com/docker/docker v20.10.9+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||
github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ=
|
||||
github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec=
|
||||
github.com/docker/go-events v0.0.0-20170721190031-9461782956ad/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA=
|
||||
|
@ -453,8 +458,9 @@ github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2
|
|||
github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||
github.com/go-kit/kit v0.10.0 h1:dXFJfIHVvUcpSgDOV+Ne6t7jXri8Tfv2uOLHUZ2XNuo=
|
||||
github.com/go-kit/kit v0.10.0/go.mod h1:xUsJbQ/Fp4kEt7AFgCuvyX4a71u8h9jB8tj/ORgOZ7o=
|
||||
github.com/go-kit/log v0.1.0 h1:DGJh0Sm43HbOeYDNnVZFl8BvcYVvjD5bqYJvp0REbwQ=
|
||||
github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY=
|
||||
github.com/go-kit/log v0.2.0 h1:7i2K3eKTos3Vc0enKCfnVcgHh2olr/MyfboYq7cAcFw=
|
||||
github.com/go-kit/log v0.2.0/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0=
|
||||
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
|
||||
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
|
||||
github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A=
|
||||
|
@ -539,8 +545,8 @@ github.com/go-openapi/strfmt v0.19.5/go.mod h1:eftuHTlB/dI8Uq8JJOyRlieZf+WkkxUuk
|
|||
github.com/go-openapi/strfmt v0.19.11/go.mod h1:UukAYgTaQfqJuAFlNxxMWNvMYiwiXtLsF2VwmoFtbtc=
|
||||
github.com/go-openapi/strfmt v0.20.0/go.mod h1:UukAYgTaQfqJuAFlNxxMWNvMYiwiXtLsF2VwmoFtbtc=
|
||||
github.com/go-openapi/strfmt v0.20.1/go.mod h1:43urheQI9dNtE5lTZQfuFJvjYJKPrxicATpEfZwHUNk=
|
||||
github.com/go-openapi/strfmt v0.20.2 h1:6XZL+fF4VZYFxKQGLAUB358hOrRh/wS51uWEtlONADE=
|
||||
github.com/go-openapi/strfmt v0.20.2/go.mod h1:43urheQI9dNtE5lTZQfuFJvjYJKPrxicATpEfZwHUNk=
|
||||
github.com/go-openapi/strfmt v0.20.3 h1:YVG4ZgPZ00km/lRHrIf7c6cKL5/4FAUtG2T9RxWAgDY=
|
||||
github.com/go-openapi/strfmt v0.20.3/go.mod h1:43urheQI9dNtE5lTZQfuFJvjYJKPrxicATpEfZwHUNk=
|
||||
github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I=
|
||||
github.com/go-openapi/swag v0.17.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg=
|
||||
github.com/go-openapi/swag v0.18.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg=
|
||||
|
@ -602,6 +608,7 @@ github.com/godbus/dbus v0.0.0-20151105175453-c7fdd8b5cd55/go.mod h1:/YcGZj5zSblf
|
|||
github.com/godbus/dbus v0.0.0-20180201030542-885f9cc04c9c/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw=
|
||||
github.com/godbus/dbus v0.0.0-20190422162347-ade71ed3457e/go.mod h1:bBOAhwG1umN6/6ZUMtDFBMQR8jRg9O75tm9K00oMsK4=
|
||||
github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
|
||||
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
|
||||
github.com/gofrs/uuid v3.3.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM=
|
||||
github.com/gofrs/uuid v4.0.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM=
|
||||
github.com/gogo/googleapis v1.1.0/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s=
|
||||
|
@ -616,6 +623,8 @@ github.com/gogo/protobuf v1.3.0/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXP
|
|||
github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
|
||||
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
|
||||
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
|
||||
github.com/golang-jwt/jwt v3.2.1+incompatible h1:73Z+4BJcrTC+KczS6WvTPvRGOp1WmfEP4Q1lOd9Z/+c=
|
||||
github.com/golang-jwt/jwt v3.2.1+incompatible/go.mod h1:8pz2t5EyA70fFQQSrl6XZXzqecmYZeUEB8OUGHkxJ+I=
|
||||
github.com/golang-jwt/jwt/v4 v4.0.0 h1:RAqyYixv1p7uEnocuy8P1nru5wprCh/MH2BIlW5z5/o=
|
||||
github.com/golang-jwt/jwt/v4 v4.0.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg=
|
||||
github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe/go.mod h1:8vg3r2VgvsThLBIFL93Qb5yWzgyZWhEmBwUJWevAkK0=
|
||||
|
@ -707,8 +716,8 @@ github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLe
|
|||
github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
||||
github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
||||
github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
||||
github.com/google/pprof v0.0.0-20210827144239-02619b876842 h1:JCrt5MIE1fHQtdy1825HwJ45oVQaqHE6lgssRhjcg/o=
|
||||
github.com/google/pprof v0.0.0-20210827144239-02619b876842/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
||||
github.com/google/pprof v0.0.0-20211008130755-947d60d73cc0 h1:zHs+jv3LO743/zFGcByu2KmpbliCU2AhjcGgrdTwSG4=
|
||||
github.com/google/pprof v0.0.0-20211008130755-947d60d73cc0/go.mod h1:KgnwoLYCZ8IQu3XUZ8Nc/bM9CCZFOyjUNOSygVozoDg=
|
||||
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
|
||||
github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
|
@ -717,8 +726,9 @@ github.com/google/uuid v1.2.0 h1:qJYtXnJRWmpe7m/3XlyhrsLrEURqHRM2kxzoxXqyUDs=
|
|||
github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
|
||||
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
|
||||
github.com/googleapis/gax-go/v2 v2.1.0 h1:6DWmvNpomjL1+3liNSZbVns3zsYzzCjm6pRBO1tLeso=
|
||||
github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0=
|
||||
github.com/googleapis/gax-go/v2 v2.1.1 h1:dp3bWCh+PPO1zjRRiCSczJav13sBvG4UhNyVTa1KqdU=
|
||||
github.com/googleapis/gax-go/v2 v2.1.1/go.mod h1:hddJymUZASv3XPyGkUpKj8pPO47Rmb0eJc8R6ouapiM=
|
||||
github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY=
|
||||
github.com/googleapis/gnostic v0.4.0/go.mod h1:on+2t9HRStVgn95RSsFWFz+6Q0Snyqv1awfrALZdbtU=
|
||||
github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg=
|
||||
|
@ -727,8 +737,8 @@ github.com/googleapis/gnostic v0.5.5 h1:9fHAtK0uDfpveeqqo1hkEZJcFvYXAiCN3UutL8F9
|
|||
github.com/googleapis/gnostic v0.5.5/go.mod h1:7+EbHbldMins07ALC74bsA81Ovc97DwqyJO1AENw9kA=
|
||||
github.com/gophercloud/gophercloud v0.1.0/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8=
|
||||
github.com/gophercloud/gophercloud v0.10.0/go.mod h1:gmC5oQqMDOMO1t1gq5DquX/yAU808e/4mzjjDA76+Ss=
|
||||
github.com/gophercloud/gophercloud v0.20.0 h1:1+4jrsjVhdX5omlAo4jkmFc6ftLbuXLzgFo4i6lH+Gk=
|
||||
github.com/gophercloud/gophercloud v0.20.0/go.mod h1:wRtmUelyIIv3CSSDI47aUwbs075O6i+LY+pXsKCBsb4=
|
||||
github.com/gophercloud/gophercloud v0.22.0 h1:9lFISNLafZcecT0xUveIMt3IafexC6DIV9ek1SZdSMw=
|
||||
github.com/gophercloud/gophercloud v0.22.0/go.mod h1:wRtmUelyIIv3CSSDI47aUwbs075O6i+LY+pXsKCBsb4=
|
||||
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
|
||||
github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg=
|
||||
github.com/gorilla/handlers v0.0.0-20150720190736-60c7bfde3e33/go.mod h1:Qkdc/uu4tH4g6mTK6auzZ766c4CA0Ng8+o/OAirnOIQ=
|
||||
|
@ -751,8 +761,8 @@ github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4
|
|||
github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
|
||||
github.com/hashicorp/consul/api v1.3.0/go.mod h1:MmDNSzIMUjNpY/mQ398R4bk2FnqQLoPndWW5VkKPlCE=
|
||||
github.com/hashicorp/consul/api v1.4.0/go.mod h1:xc8u05kyMa3Wjr9eEAsIAo3dg8+LywT5E/Cl7cNS5nU=
|
||||
github.com/hashicorp/consul/api v1.10.1 h1:MwZJp86nlnL+6+W1Zly4JUuVn9YHhMggBirMpHGD7kw=
|
||||
github.com/hashicorp/consul/api v1.10.1/go.mod h1:XjsvQN+RJGWI2TWy1/kqaE16HrR2J/FWgkYjdZQsX9M=
|
||||
github.com/hashicorp/consul/api v1.11.0 h1:Hw/G8TtRvOElqxVIhBzXciiSTbapq8hZ2XKZsXk5ZCE=
|
||||
github.com/hashicorp/consul/api v1.11.0/go.mod h1:XjsvQN+RJGWI2TWy1/kqaE16HrR2J/FWgkYjdZQsX9M=
|
||||
github.com/hashicorp/consul/sdk v0.3.0/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8=
|
||||
github.com/hashicorp/consul/sdk v0.4.0/go.mod h1:fY08Y9z5SvJqevyZNy6WWPXiG3KwBPAvlcdx16zZ0fM=
|
||||
github.com/hashicorp/consul/sdk v0.8.0 h1:OJtKBtEjboEZvG6AOUdh4Z1Zbyu0WcxQ0qatRrZHTVU=
|
||||
|
@ -814,26 +824,28 @@ github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmK
|
|||
github.com/iancoleman/strcase v0.0.0-20180726023541-3605ed457bf7/go.mod h1:SK73tn/9oHe+/Y0h39VT4UCxmurVJkR5NA7kMEAOgSE=
|
||||
github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
|
||||
github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
|
||||
github.com/ianlancetaylor/demangle v0.0.0-20210905161508-09a460cdf81d/go.mod h1:aYm2/VgdVmcIU8iMfdMvDMsRAQjcfZSKFby6HOFvi/w=
|
||||
github.com/imdario/mergo v0.3.4/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
|
||||
github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
|
||||
github.com/imdario/mergo v0.3.8/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
|
||||
github.com/imdario/mergo v0.3.10/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA=
|
||||
github.com/imdario/mergo v0.3.11 h1:3tnifQM4i+fbajXKBHXWEH+KvNHqojZ778UH75j3bGA=
|
||||
github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA=
|
||||
github.com/imdario/mergo v0.3.12 h1:b6R2BslTbIEToALKP7LxUvijTsNI9TAe80pLWN2g/HU=
|
||||
github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA=
|
||||
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
|
||||
github.com/influxdata/flux v0.65.0/go.mod h1:BwN2XG2lMszOoquQaFdPET8FRQfrXiZsWmcMO9rkaVY=
|
||||
github.com/influxdata/flux v0.120.1/go.mod h1:pGSAvyAA5d3et7SSzajaYShWYXmnRnJJq2qWi+WWZ2I=
|
||||
github.com/influxdata/flux v0.131.0/go.mod h1:CKvnYe6FHpTj/E0YGI7TcOZdGiYHoToOPSnoa12RtKI=
|
||||
github.com/influxdata/httprouter v1.3.1-0.20191122104820-ee83e2772f69/go.mod h1:pwymjR6SrP3gD3pRj9RJwdl1j5s3doEEV8gS4X9qSzA=
|
||||
github.com/influxdata/influxdb v1.8.0/go.mod h1:SIzcnsjaHRFpmlxpJ4S3NT64qtEKYweNTUMb/vh0OMQ=
|
||||
github.com/influxdata/influxdb v1.9.3 h1:60F7eqotCxogyuZAjNglNRG9D6WY65KR9mcmugBx6cs=
|
||||
github.com/influxdata/influxdb v1.9.3/go.mod h1:xD4ZjAgEJQO9/bX3NhFrssKtdNPi+ki1kjrttJRDhGc=
|
||||
github.com/influxdata/influxdb v1.9.5 h1:4O7AC5jOA9RoqtDuD2rysXbumcEwaqWlWXmwuyK+a2s=
|
||||
github.com/influxdata/influxdb v1.9.5/go.mod h1:4uPVvcry9KWQVWLxyT9641qpkRXUBN+xa0MJFFNNLKo=
|
||||
github.com/influxdata/influxdb-client-go/v2 v2.3.1-0.20210518120617-5d1fff431040/go.mod h1:vLNHdxTJkIf2mSLvGrpj8TCcISApPoXkaxP8g9uRlW8=
|
||||
github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo=
|
||||
github.com/influxdata/influxql v1.1.0/go.mod h1:KpVI7okXjK6PRi3Z5B+mtKZli+R1DnZgb3N+tzevNgo=
|
||||
github.com/influxdata/influxql v1.1.1-0.20210223160523-b6ab99450c93/go.mod h1:gHp9y86a/pxhjJ+zMjNXiQAA197Xk9wLxaz+fGG+kWk=
|
||||
github.com/influxdata/line-protocol v0.0.0-20180522152040-32c6aa80de5e/go.mod h1:4kt73NQhadE3daL3WhR5EJ/J2ocX0PZzwxQ0gXJ7oFE=
|
||||
github.com/influxdata/line-protocol v0.0.0-20200327222509-2487e7298839/go.mod h1:xaLFMmpvUxqXtVkUJfg9QmT88cDaCJ3ZKgdZ78oO8Qo=
|
||||
github.com/influxdata/pkg-config v0.2.7/go.mod h1:EMS7Ll0S4qkzDk53XS3Z72/egBsPInt+BeRxb0WeSwk=
|
||||
github.com/influxdata/pkg-config v0.2.8/go.mod h1:EMS7Ll0S4qkzDk53XS3Z72/egBsPInt+BeRxb0WeSwk=
|
||||
github.com/influxdata/promql/v2 v2.12.0/go.mod h1:fxOPu+DY0bqCTCECchSRtWfc+0X19ybifQhZoQNF5D8=
|
||||
github.com/influxdata/roaring v0.4.13-0.20180809181101-fc520f41fab6/go.mod h1:bSgUQ7q5ZLSO+bKBGqJiCBGAl+9DxyW63zLTujjUlOE=
|
||||
github.com/influxdata/tdigest v0.0.0-20181121200506-bf2b5ad3c0a9/go.mod h1:Js0mqiSBE6Ffsg94weZZ2c+v/ciT8QRHFOap7EKDrR0=
|
||||
|
@ -861,8 +873,9 @@ github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/u
|
|||
github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
||||
github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
||||
github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
||||
github.com/json-iterator/go v1.1.11 h1:uVUAXhF2To8cbw/3xN3pxj6kk7TYKs98NIrTqPlMWAQ=
|
||||
github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
||||
github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
|
||||
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
|
||||
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
|
||||
github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk=
|
||||
github.com/jsternberg/zap-logfmt v1.0.0/go.mod h1:uvPs/4X51zdkcm5jXl5SYoN+4RK21K8mysFmDaM/h+o=
|
||||
|
@ -886,6 +899,8 @@ github.com/klauspost/compress v1.11.13/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdY
|
|||
github.com/klauspost/cpuid v0.0.0-20170728055534-ae7887de9fa5/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
|
||||
github.com/klauspost/crc32 v0.0.0-20161016154125-cb6bfca970f6/go.mod h1:+ZoRqAPRLkC4NPOvfYeR5KNOrY6TD+/sAC3HXPZgDYg=
|
||||
github.com/klauspost/pgzip v1.0.2-0.20170402124221-0bf5dcad4ada/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
|
||||
github.com/kolo/xmlrpc v0.0.0-20201022064351-38db28db192b h1:iNjcivnc6lhbvJA3LD622NPrUponluJrBWPIwGG/3Bg=
|
||||
github.com/kolo/xmlrpc v0.0.0-20201022064351-38db28db192b/go.mod h1:pcaDhQK0/NJZEvtCO0qQPPropqV0sJOJ6YW7X+9kRwM=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
|
@ -907,8 +922,8 @@ github.com/leodido/go-urn v1.1.0/go.mod h1:+cyI34gQWZcE1eQU7NVgKkkzdXDQHr1dBMtdA
|
|||
github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
|
||||
github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM=
|
||||
github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4=
|
||||
github.com/linode/linodego v0.32.0 h1:IK04cx2b/IwAAd6XLruf1Dl/n3dRXj87Uw/5qo6afVU=
|
||||
github.com/linode/linodego v0.32.0/go.mod h1:BR0gVkCJffEdIGJSl6bHR80Ty+Uvg/2jkjmrWaFectM=
|
||||
github.com/linode/linodego v1.1.0 h1:ZiFVUptlzuExtUbHZtXiN7I0dAOFQAyirBKb/6/n9n4=
|
||||
github.com/linode/linodego v1.1.0/go.mod h1:x/7+BoaKd4unViBmS2umdjYyVAmpFtBtEXZ0wou7FYQ=
|
||||
github.com/lyft/protoc-gen-star v0.5.1/go.mod h1:9toiA3cC7z5uVbODF7kEQ91Xn7XNFkVUl+SrEe+ZORU=
|
||||
github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ=
|
||||
github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
|
||||
|
@ -982,14 +997,15 @@ github.com/moby/sys/mountinfo v0.4.0/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2J
|
|||
github.com/moby/sys/mountinfo v0.4.1/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A=
|
||||
github.com/moby/sys/symlink v0.1.0/go.mod h1:GGDODQmbFOjFsXvfLVn3+ZRxkch54RkSiGqsZeMYowQ=
|
||||
github.com/moby/term v0.0.0-20200312100748-672ec06f55cd/go.mod h1:DdlQx2hp0Ss5/fLikoLlEeIYiATotOjgB//nb973jeo=
|
||||
github.com/moby/term v0.0.0-20201216013528-df9cb8a40635 h1:rzf0wL0CHVc8CEsgyygG0Mn9CNCCPZqOPaz8RiiHYQk=
|
||||
github.com/moby/term v0.0.0-20201216013528-df9cb8a40635/go.mod h1:FBS0z0QWA44HXygs7VXDUOGoN/1TV3RuWkLO04am3wc=
|
||||
github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6 h1:dcztxKSvZ4Id8iPpHERQBbIJfabdt4wUm5qy3wOL2Zc=
|
||||
github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6/go.mod h1:E2VnQOmVuvZB6UYnnDB0qG5Nq/1tD9acaOpo6xmt0Kw=
|
||||
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
||||
github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI=
|
||||
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
||||
github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=
|
||||
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
|
||||
github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc=
|
||||
github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A=
|
||||
github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc=
|
||||
|
@ -1053,14 +1069,17 @@ github.com/opencontainers/runc v0.1.1/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59P
|
|||
github.com/opencontainers/runc v1.0.0-rc8.0.20190926000215-3e425f80a8c9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
|
||||
github.com/opencontainers/runc v1.0.0-rc9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
|
||||
github.com/opencontainers/runc v1.0.0-rc93/go.mod h1:3NOsor4w32B2tC0Zbl8Knk4Wg84SM2ImC1fxBuqJ/H0=
|
||||
github.com/opencontainers/runc v1.0.2/go.mod h1:aTaHFFwQXuA71CiyxOdFFIorAoemI04suvGRQFzWTD0=
|
||||
github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
|
||||
github.com/opencontainers/runtime-spec v1.0.1/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
|
||||
github.com/opencontainers/runtime-spec v1.0.2-0.20190207185410-29686dbc5559/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
|
||||
github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
|
||||
github.com/opencontainers/runtime-spec v1.0.3-0.20200929063507-e6143ca7d51d/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
|
||||
github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
|
||||
github.com/opencontainers/runtime-tools v0.0.0-20181011054405-1d69bd0f9c39/go.mod h1:r3f7wjNzSs2extwzU3Y+6pKfobzPh+kKFJ3ofN+3nfs=
|
||||
github.com/opencontainers/selinux v1.6.0/go.mod h1:VVGKuOLlE7v4PJyT6h7mNWvq1rzqiriPsEqVhc+svHE=
|
||||
github.com/opencontainers/selinux v1.8.0/go.mod h1:RScLhm78qiWa2gbVCcGkC7tCGdgk3ogry1nUQF8Evvo=
|
||||
github.com/opencontainers/selinux v1.8.2/go.mod h1:MUIHuUEvKB1wtJjQdOyYRgOnLD2xAPP8dBsCoU0KuF8=
|
||||
github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492/go.mod h1:Ngi6UdF0k5OKD5t5wlmGhe/EDKPoUM3BXZSSfIuJbis=
|
||||
github.com/opentracing-contrib/go-stdlib v0.0.0-20190519235532-cf7a6c988dc9/go.mod h1:PLldrQSroqzH70Xl+1DQcGnefIbqsKR7UDaiux3zV+w=
|
||||
github.com/opentracing-contrib/go-stdlib v1.0.0 h1:TBS7YuVotp8myLon4Pv7BtCBzOTo1DeZCld0Z63mW2w=
|
||||
|
@ -1141,12 +1160,13 @@ github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB8
|
|||
github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc=
|
||||
github.com/prometheus/common v0.29.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
|
||||
github.com/prometheus/common v0.30.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
|
||||
github.com/prometheus/common v0.31.1 h1:d18hG4PkHnNAKNMOmFuXFaiY8Us0nird/2m60uS1AMs=
|
||||
github.com/prometheus/common v0.31.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
|
||||
github.com/prometheus/common v0.32.1 h1:hWIdL3N2HoUx3B8j3YN9mWor0qhY/NlEKZEaXxuIRh4=
|
||||
github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
|
||||
github.com/prometheus/common/sigv4 v0.1.0 h1:qoVebwtwwEhS85Czm2dSROY5fTo2PAPEVdDeppTwGX4=
|
||||
github.com/prometheus/common/sigv4 v0.1.0/go.mod h1:2Jkxxk9yYvCkE5G1sQT7GuEXm57JrvHu9k5YwTjsNtI=
|
||||
github.com/prometheus/exporter-toolkit v0.6.1 h1:Aqk75wQD92N9CqmTlZwjKwq6272nOGrWIbc8Z7+xQO0=
|
||||
github.com/prometheus/exporter-toolkit v0.6.1/go.mod h1:ZUBIj498ePooX9t/2xtDjeQYwvRpiPP2lh5u4iblj2g=
|
||||
github.com/prometheus/exporter-toolkit v0.7.0 h1:XtYeVeeC5daG4txbc9+mieKq+/AK4gtIBLl9Mulrjnk=
|
||||
github.com/prometheus/exporter-toolkit v0.7.0/go.mod h1:ZUBIj498ePooX9t/2xtDjeQYwvRpiPP2lh5u4iblj2g=
|
||||
github.com/prometheus/procfs v0.0.0-20180125133057-cb4147076ac7/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||
github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||
|
@ -1205,12 +1225,13 @@ github.com/sirupsen/logrus v1.4.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPx
|
|||
github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q=
|
||||
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
|
||||
github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
|
||||
github.com/sirupsen/logrus v1.7.0 h1:ShrD1U9pZB12TX0cVy0DtePoCH97K8EtX+mg7ZARUtM=
|
||||
github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
|
||||
github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE=
|
||||
github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
|
||||
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
|
||||
github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
|
||||
github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
|
||||
github.com/snowflakedb/gosnowflake v1.3.4/go.mod h1:NsRq2QeiMUuoNUJhp5Q6xGC4uBrsS9g6LwZVEkTWgsE=
|
||||
github.com/snowflakedb/gosnowflake v1.3.13/go.mod h1:6nfka9aTXkUNha1p1cjeeyjDvcyh7jfjp0l8kGpDBok=
|
||||
github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM=
|
||||
github.com/sony/gobreaker v0.4.1/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJOjmxWY=
|
||||
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
|
||||
|
@ -1308,6 +1329,7 @@ github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de
|
|||
github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
|
||||
github.com/yuin/goldmark v1.4.0/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
|
||||
github.com/yvasiyarov/go-metrics v0.0.0-20140926110328-57bccd1ccd43/go.mod h1:aX5oPXxHm3bOH+xeAttToC8pqch2ScQN/JoXYupl6xs=
|
||||
github.com/yvasiyarov/gorelic v0.0.0-20141212073537-a9bba5b9ab50/go.mod h1:NUSPSUX/bi6SeDMUh6brw0nXpxHnc96TguQh0+r/ssA=
|
||||
github.com/yvasiyarov/newrelic_platform_go v0.0.0-20140908184405-b21fdbd4370f/go.mod h1:GlGEuHIJweS1mbCqG+7vt2nvWLzLLnRHbXz5JKd/Qbg=
|
||||
|
@ -1346,8 +1368,8 @@ go.uber.org/atomic v1.5.1/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
|
|||
go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
|
||||
go.uber.org/atomic v1.9.0 h1:ECmE8Bn/WFTYwEW/bpKD3M8VtR/zQVbavAoalC1PYyE=
|
||||
go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
|
||||
go.uber.org/goleak v1.1.10 h1:z+mqJhf6ss6BSfSM671tgKyZBFPTTJM+HLxnhPC3wu0=
|
||||
go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A=
|
||||
go.uber.org/goleak v1.1.12 h1:gZAh5/EyT/HQwlpkCy6wTpqfH9H8Lz8zbm3dZh+OyzA=
|
||||
go.uber.org/goleak v1.1.12/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ=
|
||||
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
|
||||
go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4=
|
||||
go.uber.org/multierr v1.4.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4=
|
||||
|
@ -1382,6 +1404,7 @@ golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPh
|
|||
golang.org/x/crypto v0.0.0-20200220183623-bac4c82f6975/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20200422194213-44a606286825/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20200510223506-06a226fb4e37/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20200728195943-123391ffb6de/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
|
@ -1497,8 +1520,9 @@ golang.org/x/net v0.0.0-20210520170846-37e1c6afe023/go.mod h1:9nx3DQGgdP8bBQD5qx
|
|||
golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20210614182718-04defd469f4e/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20210726213435-c6fcb2dbf985/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20210903162142-ad29c8ab022f h1:w6wWR0H+nyVpbSAQbzVEIACVyr/h8l/BEkY6Sokc7Eg=
|
||||
golang.org/x/net v0.0.0-20210903162142-ad29c8ab022f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20211020060615-d418f374d309 h1:A0lJIi+hcTR6aajJH4YqKWwohY4aW9RO7oRMcdv+HKI=
|
||||
golang.org/x/net v0.0.0-20211020060615-d418f374d309/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
|
@ -1513,8 +1537,9 @@ golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ
|
|||
golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f h1:Qmd2pbz05z7z6lm0DrgQVVPuBm92jqujBKMHMOlOQEw=
|
||||
golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/oauth2 v0.0.0-20211005180243-6b3c2da341f1 h1:B333XXssMuKQeBwiNODx4TupZy7bf4sxFZnN2ZOcvUE=
|
||||
golang.org/x/oauth2 v0.0.0-20211005180243-6b3c2da341f1/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
|
@ -1604,7 +1629,6 @@ golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7w
|
|||
golang.org/x/sys v0.0.0-20200728102440-3e129f6d46b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200817155316-9781c653f443/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200826173525-f9321e4c35a6/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200831180312-196b9ba8737a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200909081042-eff7692f9009/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200916030750-2334cc1a136f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
|
@ -1626,6 +1650,7 @@ golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7w
|
|||
golang.org/x/sys v0.0.0-20210324051608-47abb6519492/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210426230700-d19ff857e887/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
|
@ -1634,9 +1659,12 @@ golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBc
|
|||
golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210906170528-6f6e22806c34 h1:GkvMjFtXUmahfDtashnc1mnrCtuBVcwse5QV2lUk/tI=
|
||||
golang.org/x/sys v0.0.0-20210906170528-6f6e22806c34/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20211020174200-9d6173849985 h1:LOlKVhfDyahgmqa97awczplwkjzNaELFg3zRIJ13RYo=
|
||||
golang.org/x/sys v0.0.0-20211020174200-9d6173849985/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d h1:SZxvLBoTP5yHO3Frd4z4vrF+DBX9vMVanchswa69toE=
|
||||
|
@ -1695,7 +1723,6 @@ golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtn
|
|||
golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
|
@ -1739,8 +1766,9 @@ golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
|||
golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
||||
golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
||||
golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
||||
golang.org/x/tools v0.1.5 h1:ouewzE6p+/VEB31YYnTbEJdi8pFqKp4P4n85vwo3DHA=
|
||||
golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
||||
golang.org/x/tools v0.1.7 h1:6j8CgantCy3yc8JGBqkDLMKWqZ0RDU2g1HVgacojGWQ=
|
||||
golang.org/x/tools v0.1.7/go.mod h1:LGqMHiF4EqQNHR1JncWGqT5BVaXmza+X+BDGol+dOxo=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
|
@ -1781,8 +1809,11 @@ google.golang.org/api v0.48.0/go.mod h1:71Pr1vy+TAZRPkPs/xlCf5SsU8WjuAWv1Pfjbtuk
|
|||
google.golang.org/api v0.50.0/go.mod h1:4bNT5pAuq5ji4SRZm+5QIkjny9JAyVD/3gaSihNefaw=
|
||||
google.golang.org/api v0.51.0/go.mod h1:t4HdrdoNgyN5cbEfm7Lum0lcLDLiise1F8qDKX00sOU=
|
||||
google.golang.org/api v0.54.0/go.mod h1:7C4bFFOvVDGXjfDTAsgGwDgAxRDeQ4X8NvUedIt6z3k=
|
||||
google.golang.org/api v0.56.0 h1:08F9XVYTLOGeSQb3xI9C0gXMuQanhdGed0cWFhDozbI=
|
||||
google.golang.org/api v0.55.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE=
|
||||
google.golang.org/api v0.56.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE=
|
||||
google.golang.org/api v0.57.0/go.mod h1:dVPlbZyBo2/OjBpmvNdpn2GRm6rPy75jyU7bmhdrMgI=
|
||||
google.golang.org/api v0.59.0 h1:fPfFO7gttlXYo2ALuD3HxJzh8vaF++4youI0BkFL6GE=
|
||||
google.golang.org/api v0.59.0/go.mod h1:sT2boj7M9YJxZzgeZqXogmhfmRWDtPzT31xkieUbuZU=
|
||||
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||
google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
|
@ -1852,8 +1883,13 @@ google.golang.org/genproto v0.0.0-20210805201207-89edb61ffb67/go.mod h1:ob2IJxKr
|
|||
google.golang.org/genproto v0.0.0-20210813162853-db860fec028c/go.mod h1:cFeNkxwySK631ADgubI+/XFU/xp8FD5KIVV4rj8UC5w=
|
||||
google.golang.org/genproto v0.0.0-20210821163610-241b8fcbd6c8/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
|
||||
google.golang.org/genproto v0.0.0-20210828152312-66f60bf46e71/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
|
||||
google.golang.org/genproto v0.0.0-20210903162649-d08c68adba83 h1:3V2dxSZpz4zozWWUq36vUxXEKnSYitEH2LdsAx+RUmg=
|
||||
google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
|
||||
google.golang.org/genproto v0.0.0-20210903162649-d08c68adba83/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
|
||||
google.golang.org/genproto v0.0.0-20210909211513-a8c4777a87af/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
|
||||
google.golang.org/genproto v0.0.0-20210924002016-3dee208752a0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
|
||||
google.golang.org/genproto v0.0.0-20211008145708-270636b82663/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
|
||||
google.golang.org/genproto v0.0.0-20211020151524-b7c3a969101a h1:8maMHMQp9NroHXhc3HelFX9Ay2lWlXLcdH5mw5Biz0s=
|
||||
google.golang.org/genproto v0.0.0-20211020151524-b7c3a969101a/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
|
||||
google.golang.org/grpc v0.0.0-20160317175043-d3ddb4469d5a/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
|
||||
google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=
|
||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||
|
@ -1964,14 +2000,14 @@ k8s.io/api v0.17.5/go.mod h1:0zV5/ungglgy2Rlm3QK8fbxkXVs+BSJWpJP/+8gUVLY=
|
|||
k8s.io/api v0.20.1/go.mod h1:KqwcCVogGxQY3nBlRpwt+wpAMF/KjaCc7RpywacvqUo=
|
||||
k8s.io/api v0.20.4/go.mod h1:++lNL1AJMkDymriNniQsWRkMDzRaX2Y/POTUi8yvqYQ=
|
||||
k8s.io/api v0.20.6/go.mod h1:X9e8Qag6JV/bL5G6bU8sdVRltWKmdHsFUGS3eVndqE8=
|
||||
k8s.io/api v0.22.1 h1:ISu3tD/jRhYfSW8jI/Q1e+lRxkR7w9UwQEZ7FgslrwY=
|
||||
k8s.io/api v0.22.1/go.mod h1:bh13rkTp3F1XEaLGykbyRD2QaTTzPm0e/BMd8ptFONY=
|
||||
k8s.io/api v0.22.2 h1:M8ZzAD0V6725Fjg53fKeTJxGsJvRbk4TEm/fexHMtfw=
|
||||
k8s.io/api v0.22.2/go.mod h1:y3ydYpLJAaDI+BbSe2xmGcqxiWHmWjkEeIbiwHvnPR8=
|
||||
k8s.io/apimachinery v0.17.5/go.mod h1:ioIo1G/a+uONV7Tv+ZmCbMG1/a3kVw5YcDdncd8ugQ0=
|
||||
k8s.io/apimachinery v0.20.1/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU=
|
||||
k8s.io/apimachinery v0.20.4/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU=
|
||||
k8s.io/apimachinery v0.20.6/go.mod h1:ejZXtW1Ra6V1O5H8xPBGz+T3+4gfkTCeExAHKU57MAc=
|
||||
k8s.io/apimachinery v0.22.1 h1:DTARnyzmdHMz7bFWFDDm22AM4pLWTQECMpRTFu2d2OM=
|
||||
k8s.io/apimachinery v0.22.1/go.mod h1:O3oNtNadZdeOMxHFVxOreoznohCpy0z6mocxbZr7oJ0=
|
||||
k8s.io/apimachinery v0.22.2 h1:ejz6y/zNma8clPVfNDLnPbleBo6MpoFy/HBiBqCouVk=
|
||||
k8s.io/apimachinery v0.22.2/go.mod h1:O3oNtNadZdeOMxHFVxOreoznohCpy0z6mocxbZr7oJ0=
|
||||
k8s.io/apiserver v0.20.1/go.mod h1:ro5QHeQkgMS7ZGpvf4tSMx6bBOgPfE+f52KwvXfScaU=
|
||||
k8s.io/apiserver v0.20.4/go.mod h1:Mc80thBKOyy7tbvFtB4kJv1kbdD0eIH8k8vianJcbFM=
|
||||
k8s.io/apiserver v0.20.6/go.mod h1:QIJXNt6i6JB+0YQRNcS0hdRHJlMhflFmsBDeSgT1r8Q=
|
||||
|
@ -1979,8 +2015,8 @@ k8s.io/client-go v0.17.5/go.mod h1:S8uZpBpjJJdEH/fEyxcqg7Rn0P5jH+ilkgBHjriSmNo=
|
|||
k8s.io/client-go v0.20.1/go.mod h1:/zcHdt1TeWSd5HoUe6elJmHSQ6uLLgp4bIJHVEuy+/Y=
|
||||
k8s.io/client-go v0.20.4/go.mod h1:LiMv25ND1gLUdBeYxBIwKpkSC5IsozMMmOOeSJboP+k=
|
||||
k8s.io/client-go v0.20.6/go.mod h1:nNQMnOvEUEsOzRRFIIkdmYOjAZrC8bgq0ExboWSU1I0=
|
||||
k8s.io/client-go v0.22.1 h1:jW0ZSHi8wW260FvcXHkIa0NLxFBQszTlhiAVsU5mopw=
|
||||
k8s.io/client-go v0.22.1/go.mod h1:BquC5A4UOo4qVDUtoc04/+Nxp1MeHcVc1HJm1KmG8kk=
|
||||
k8s.io/client-go v0.22.2 h1:DaSQgs02aCC1QcwUdkKZWOeaVsQjYvWv8ZazcZ6JcHc=
|
||||
k8s.io/client-go v0.22.2/go.mod h1:sAlhrkVDf50ZHx6z4K0S40wISNTarf1r800F+RlCF6U=
|
||||
k8s.io/component-base v0.20.1/go.mod h1:guxkoJnNoh8LNrbtiQOlyp2Y2XFCZQmrcg2n/DeYNLk=
|
||||
k8s.io/component-base v0.20.4/go.mod h1:t4p9EdiagbVCJKrQ1RsA5/V4rFQNDfRlevJajlGwgjI=
|
||||
k8s.io/component-base v0.20.6/go.mod h1:6f1MPBAeI+mvuts3sIdtpjljHWBQ2cIy38oBIWMYnrM=
|
||||
|
@ -1998,8 +2034,8 @@ k8s.io/kubernetes v1.13.0/go.mod h1:ocZa8+6APFNC2tX1DZASIbocyYT5jHzqFVsY5aoB7Jk=
|
|||
k8s.io/utils v0.0.0-20191114184206-e782cd3c129f/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew=
|
||||
k8s.io/utils v0.0.0-20200414100711-2df71ebbae66/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
|
||||
k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
|
||||
k8s.io/utils v0.0.0-20210707171843-4b05e18ac7d9 h1:imL9YgXQ9p7xmPzHFm/vVd/cF78jad+n4wK1ABwYtMM=
|
||||
k8s.io/utils v0.0.0-20210707171843-4b05e18ac7d9/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
|
||||
k8s.io/utils v0.0.0-20210819203725-bdf08cb9a70a h1:8dYfu/Fc9Gz2rNJKB9IQRGgQOh2clmRzNIPPY1xLY5g=
|
||||
k8s.io/utils v0.0.0-20210819203725-bdf08cb9a70a/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
|
||||
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
|
||||
rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4=
|
||||
rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
|
||||
|
|
|
@ -303,7 +303,6 @@ func (n *Manager) nextBatch() []*Alert {
|
|||
|
||||
// Run dispatches notifications continuously.
|
||||
func (n *Manager) Run(tsets <-chan map[string][]*targetgroup.Group) {
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-n.ctx.Done():
|
||||
|
@ -602,7 +601,7 @@ func (n *Manager) Stop() {
|
|||
n.cancel()
|
||||
}
|
||||
|
||||
// alertmanager holds Alertmanager endpoint information.
|
||||
// Alertmanager holds Alertmanager endpoint information.
|
||||
type alertmanager interface {
|
||||
url() *url.URL
|
||||
}
|
||||
|
@ -654,7 +653,7 @@ func (s *alertmanagerSet) sync(tgs []*targetgroup.Group) {
|
|||
allDroppedAms := []alertmanager{}
|
||||
|
||||
for _, tg := range tgs {
|
||||
ams, droppedAms, err := alertmanagerFromGroup(tg, s.cfg)
|
||||
ams, droppedAms, err := AlertmanagerFromGroup(tg, s.cfg)
|
||||
if err != nil {
|
||||
level.Error(s.logger).Log("msg", "Creating discovered Alertmanagers failed", "err", err)
|
||||
continue
|
||||
|
@ -691,9 +690,9 @@ func postPath(pre string, v config.AlertmanagerAPIVersion) string {
|
|||
return path.Join("/", pre, alertPushEndpoint)
|
||||
}
|
||||
|
||||
// alertmanagerFromGroup extracts a list of alertmanagers from a target group
|
||||
// AlertmanagerFromGroup extracts a list of alertmanagers from a target group
|
||||
// and an associated AlertmanagerConfig.
|
||||
func alertmanagerFromGroup(tg *targetgroup.Group, cfg *config.AlertmanagerConfig) ([]alertmanager, []alertmanager, error) {
|
||||
func AlertmanagerFromGroup(tg *targetgroup.Group, cfg *config.AlertmanagerConfig) ([]alertmanager, []alertmanager, error) {
|
||||
var res []alertmanager
|
||||
var droppedAlertManagers []alertmanager
|
||||
|
||||
|
|
|
@ -40,7 +40,7 @@ import (
|
|||
)
|
||||
|
||||
func TestPostPath(t *testing.T) {
|
||||
var cases = []struct {
|
||||
cases := []struct {
|
||||
in, out string
|
||||
}{
|
||||
{
|
||||
|
@ -447,7 +447,7 @@ func (a alertmanagerMock) url() *url.URL {
|
|||
|
||||
func TestLabelSetNotReused(t *testing.T) {
|
||||
tg := makeInputTargetGroup()
|
||||
_, _, err := alertmanagerFromGroup(tg, &config.AlertmanagerConfig{})
|
||||
_, _, err := AlertmanagerFromGroup(tg, &config.AlertmanagerConfig{})
|
||||
|
||||
require.NoError(t, err)
|
||||
|
||||
|
@ -456,7 +456,7 @@ func TestLabelSetNotReused(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestReload(t *testing.T) {
|
||||
var tests = []struct {
|
||||
tests := []struct {
|
||||
in *targetgroup.Group
|
||||
out string
|
||||
}{
|
||||
|
@ -500,11 +500,10 @@ alerting:
|
|||
|
||||
require.Equal(t, tt.out, res)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestDroppedAlertmanagers(t *testing.T) {
|
||||
var tests = []struct {
|
||||
tests := []struct {
|
||||
in *targetgroup.Group
|
||||
out string
|
||||
}{
|
||||
|
|
3
pkg/README.md
Normal file
3
pkg/README.md
Normal file
|
@ -0,0 +1,3 @@
|
|||
The `pkg` directory is deprecated.
|
||||
Please do not add new packages to this directory.
|
||||
Existing packages will be moved elsewhere eventually.
|
|
@ -704,7 +704,7 @@ func BenchmarkLabels_Hash(b *testing.B) {
|
|||
lbls: func() Labels {
|
||||
lbls := make(Labels, 10)
|
||||
for i := 0; i < len(lbls); i++ {
|
||||
//Label ~50B name, 50B value.
|
||||
// Label ~50B name, 50B value.
|
||||
lbls[i] = Label{Name: fmt.Sprintf("abcdefghijabcdefghijabcdefghijabcdefghijabcdefghij%d", i), Value: fmt.Sprintf("abcdefghijabcdefghijabcdefghijabcdefghijabcdefghij%d", i)}
|
||||
}
|
||||
return lbls
|
||||
|
|
|
@ -21,11 +21,9 @@ import (
|
|||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
var (
|
||||
timestampFormat = log.TimestampFormat(
|
||||
func() time.Time { return time.Now().UTC() },
|
||||
"2006-01-02T15:04:05.000Z07:00",
|
||||
)
|
||||
var timestampFormat = log.TimestampFormat(
|
||||
func() time.Time { return time.Now().UTC() },
|
||||
"2006-01-02T15:04:05.000Z07:00",
|
||||
)
|
||||
|
||||
// JSONFileLogger represents a logger that writes JSON to a file.
|
||||
|
@ -40,7 +38,7 @@ func NewJSONFileLogger(s string) (*JSONFileLogger, error) {
|
|||
return nil, nil
|
||||
}
|
||||
|
||||
f, err := os.OpenFile(s, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0666)
|
||||
f, err := os.OpenFile(s, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0o666)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "can't create json logger")
|
||||
}
|
||||
|
|
|
@ -83,7 +83,7 @@ func (g *RuleGroups) Validate(node ruleGroups) (errs []error) {
|
|||
set[g.Name] = struct{}{}
|
||||
|
||||
for i, r := range g.Rules {
|
||||
for _, node := range r.Validate() {
|
||||
for _, node := range g.Rules[i].Validate() {
|
||||
var ruleName yaml.Node
|
||||
if r.Alert.Value != "" {
|
||||
ruleName = r.Alert
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue