mirror of
https://github.com/prometheus/prometheus.git
synced 2025-03-05 20:59:13 -08:00
Merge remote-tracking branch 'remotes/prometheus/main' into sync-upstream
This commit is contained in:
commit
f955b69b3b
2
.github/workflows/lock.yml
vendored
2
.github/workflows/lock.yml
vendored
|
@ -16,7 +16,7 @@ jobs:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
if: github.repository_owner == 'prometheus'
|
if: github.repository_owner == 'prometheus'
|
||||||
steps:
|
steps:
|
||||||
- uses: dessant/lock-threads@be8aa5be94131386884a6da4189effda9b14aa21 # v4.0.1
|
- uses: dessant/lock-threads@1bf7ec25051fe7c00bdd17e6a7cf3d7bfb7dc771 # v5.0.1
|
||||||
with:
|
with:
|
||||||
process-only: 'issues'
|
process-only: 'issues'
|
||||||
issue-inactive-days: '180'
|
issue-inactive-days: '180'
|
||||||
|
|
|
@ -23,6 +23,7 @@ linters:
|
||||||
- nolintlint
|
- nolintlint
|
||||||
- predeclared
|
- predeclared
|
||||||
- revive
|
- revive
|
||||||
|
- testifylint
|
||||||
- unconvert
|
- unconvert
|
||||||
- unused
|
- unused
|
||||||
|
|
||||||
|
@ -35,13 +36,9 @@ issues:
|
||||||
- path: _test.go
|
- path: _test.go
|
||||||
linters:
|
linters:
|
||||||
- errcheck
|
- errcheck
|
||||||
- path: tsdb/
|
- path: "tsdb/head_wal.go"
|
||||||
linters:
|
linters:
|
||||||
- errorlint
|
- errorlint
|
||||||
- path: tsdb/
|
|
||||||
text: "import 'github.com/pkg/errors' is not allowed"
|
|
||||||
linters:
|
|
||||||
- depguard
|
|
||||||
- linters:
|
- linters:
|
||||||
- godot
|
- godot
|
||||||
source: "^// ==="
|
source: "^// ==="
|
||||||
|
@ -81,8 +78,55 @@ linters-settings:
|
||||||
gofumpt:
|
gofumpt:
|
||||||
extra-rules: true
|
extra-rules: true
|
||||||
revive:
|
revive:
|
||||||
|
# By default, revive will enable only the linting rules that are named in the configuration file.
|
||||||
|
# So, it's needed to explicitly set in configuration all required rules.
|
||||||
|
# The following configuration enables all the rules from the defaults.toml
|
||||||
|
# https://github.com/mgechev/revive/blob/master/defaults.toml
|
||||||
rules:
|
rules:
|
||||||
# https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#unused-parameter
|
# https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md
|
||||||
- name: unused-parameter
|
- name: blank-imports
|
||||||
severity: warning
|
- name: context-as-argument
|
||||||
|
arguments:
|
||||||
|
# allow functions with test or bench signatures
|
||||||
|
- allowTypesBefore: "*testing.T,testing.TB"
|
||||||
|
- name: context-keys-type
|
||||||
|
- name: dot-imports
|
||||||
|
# A lot of false positives: incorrectly identifies channel draining as "empty code block".
|
||||||
|
# See https://github.com/mgechev/revive/issues/386
|
||||||
|
- name: empty-block
|
||||||
disabled: true
|
disabled: true
|
||||||
|
- name: error-naming
|
||||||
|
- name: error-return
|
||||||
|
- name: error-strings
|
||||||
|
- name: errorf
|
||||||
|
- name: exported
|
||||||
|
- name: increment-decrement
|
||||||
|
- name: indent-error-flow
|
||||||
|
- name: package-comments
|
||||||
|
- name: range
|
||||||
|
- name: receiver-naming
|
||||||
|
- name: redefines-builtin-id
|
||||||
|
- name: superfluous-else
|
||||||
|
- name: time-naming
|
||||||
|
- name: unexported-return
|
||||||
|
- name: unreachable-code
|
||||||
|
- name: unused-parameter
|
||||||
|
disabled: true
|
||||||
|
- name: var-declaration
|
||||||
|
- name: var-naming
|
||||||
|
testifylint:
|
||||||
|
disable:
|
||||||
|
- float-compare
|
||||||
|
- go-require
|
||||||
|
enable:
|
||||||
|
- bool-compare
|
||||||
|
- compares
|
||||||
|
- empty
|
||||||
|
- error-is-as
|
||||||
|
- error-nil
|
||||||
|
- expected-actual
|
||||||
|
- len
|
||||||
|
- require-error
|
||||||
|
- suite-dont-use-pkg
|
||||||
|
- suite-extra-assert-call
|
||||||
|
|
||||||
|
|
18
CHANGELOG.md
18
CHANGELOG.md
|
@ -1,21 +1,16 @@
|
||||||
# Changelog
|
# Changelog
|
||||||
|
|
||||||
## 2.48.0-rc.2 / 2023-11-02
|
## unreleased
|
||||||
|
|
||||||
* [ENHANCEMENT] Scraping: Add configuration option for tracking staleness of scraped timestamps. #13060
|
* [ENHANCEMENT] TSDB: Make the wlog watcher read segments synchronously when not tailing. #13224
|
||||||
* [BUGFIX] Storage: Fix crash caused by incorrect mixed samples handling. #13055
|
* [BUGFIX] Agent: Participate in notify calls. #13223
|
||||||
* [BUGFIX] TSDB: Fix compactor failures by adding min time to histogram chunks. #13062
|
|
||||||
|
|
||||||
## 2.48.0-rc.1 / 2023-10-24
|
## 2.48.0 / 2023-11-16
|
||||||
|
|
||||||
* [BUGFIX] PromQL: Reduce inefficiency introduced by warnings/annotations and temporarily remove possible non-counter warnings. #13012
|
|
||||||
|
|
||||||
## 2.48.0-rc.0 / 2023-10-10
|
|
||||||
|
|
||||||
* [CHANGE] Remote-write: respect Retry-After header on 5xx errors. #12677
|
* [CHANGE] Remote-write: respect Retry-After header on 5xx errors. #12677
|
||||||
* [FEATURE] Alerting: Add AWS SigV4 authentication support for Alertmanager endpoints. #12774
|
* [FEATURE] Alerting: Add AWS SigV4 authentication support for Alertmanager endpoints. #12774
|
||||||
* [FEATURE] Promtool: Add support for histograms in the TSDB dump command. #12775
|
* [FEATURE] Promtool: Add support for histograms in the TSDB dump command. #12775
|
||||||
* [FEATURE] PromQL: Add warnings (and annotations) to PromQL query results. #12152 #12982 #12988
|
* [FEATURE] PromQL: Add warnings (and annotations) to PromQL query results. #12152 #12982 #12988 #13012
|
||||||
* [FEATURE] Remote-write: Add Azure AD OAuth authentication support for remote write requests. #12572
|
* [FEATURE] Remote-write: Add Azure AD OAuth authentication support for remote write requests. #12572
|
||||||
* [ENHANCEMENT] Remote-write: Add a header to count retried remote write requests. #12729
|
* [ENHANCEMENT] Remote-write: Add a header to count retried remote write requests. #12729
|
||||||
* [ENHANCEMENT] TSDB: Improve query performance by re-using iterator when moving between series. #12757
|
* [ENHANCEMENT] TSDB: Improve query performance by re-using iterator when moving between series. #12757
|
||||||
|
@ -31,6 +26,7 @@
|
||||||
* [ENHANCEMENT] Scraping: Save memory when scraping by delaying creation of buffer. #12953
|
* [ENHANCEMENT] Scraping: Save memory when scraping by delaying creation of buffer. #12953
|
||||||
* [ENHANCEMENT] Agent: Allow ingestion of out-of-order samples. #12897
|
* [ENHANCEMENT] Agent: Allow ingestion of out-of-order samples. #12897
|
||||||
* [ENHANCEMENT] Promtool: Improve support for native histograms in TSDB analyze command. #12869
|
* [ENHANCEMENT] Promtool: Improve support for native histograms in TSDB analyze command. #12869
|
||||||
|
* [ENHANCEMENT] Scraping: Add configuration option for tracking staleness of scraped timestamps. #13060
|
||||||
* [BUGFIX] SD: Ensure that discovery managers are properly canceled. #10569
|
* [BUGFIX] SD: Ensure that discovery managers are properly canceled. #10569
|
||||||
* [BUGFIX] TSDB: Fix PostingsForMatchers race with creating new series. #12558
|
* [BUGFIX] TSDB: Fix PostingsForMatchers race with creating new series. #12558
|
||||||
* [BUGFIX] TSDB: Fix handling of explicit counter reset header in histograms. #12772
|
* [BUGFIX] TSDB: Fix handling of explicit counter reset header in histograms. #12772
|
||||||
|
@ -40,6 +36,8 @@
|
||||||
* [BUGFIX] Promtool: Fix errors not being reported in check rules command. #12715
|
* [BUGFIX] Promtool: Fix errors not being reported in check rules command. #12715
|
||||||
* [BUGFIX] TSDB: Avoid panics reported in logs when head initialization takes a long time. #12876
|
* [BUGFIX] TSDB: Avoid panics reported in logs when head initialization takes a long time. #12876
|
||||||
* [BUGFIX] TSDB: Ensure that WBL is repaired when possible. #12406
|
* [BUGFIX] TSDB: Ensure that WBL is repaired when possible. #12406
|
||||||
|
* [BUGFIX] Storage: Fix crash caused by incorrect mixed samples handling. #13055
|
||||||
|
* [BUGFIX] TSDB: Fix compactor failures by adding min time to histogram chunks. #13062
|
||||||
|
|
||||||
## 2.47.1 / 2023-10-04
|
## 2.47.1 / 2023-10-04
|
||||||
|
|
||||||
|
|
18
Makefile
18
Makefile
|
@ -93,9 +93,9 @@ endif
|
||||||
# If we only want to only test go code we have to change the test target
|
# If we only want to only test go code we have to change the test target
|
||||||
# which is called by all.
|
# which is called by all.
|
||||||
ifeq ($(GO_ONLY),1)
|
ifeq ($(GO_ONLY),1)
|
||||||
test: common-test
|
test: common-test check-go-mod-version
|
||||||
else
|
else
|
||||||
test: common-test ui-build-module ui-test ui-lint
|
test: common-test ui-build-module ui-test ui-lint check-go-mod-version
|
||||||
endif
|
endif
|
||||||
|
|
||||||
.PHONY: npm_licenses
|
.PHONY: npm_licenses
|
||||||
|
@ -138,3 +138,17 @@ bench_tsdb: $(PROMU)
|
||||||
cli-documentation:
|
cli-documentation:
|
||||||
$(GO) run ./cmd/prometheus/ --write-documentation > docs/command-line/prometheus.md
|
$(GO) run ./cmd/prometheus/ --write-documentation > docs/command-line/prometheus.md
|
||||||
$(GO) run ./cmd/promtool/ write-documentation > docs/command-line/promtool.md
|
$(GO) run ./cmd/promtool/ write-documentation > docs/command-line/promtool.md
|
||||||
|
|
||||||
|
.PHONY: check-go-mod-version
|
||||||
|
check-go-mod-version:
|
||||||
|
@echo ">> checking go.mod version matching"
|
||||||
|
@./scripts/check-go-mod-version.sh
|
||||||
|
|
||||||
|
.PHONY: update-all-go-deps
|
||||||
|
update-all-go-deps:
|
||||||
|
@$(MAKE) update-go-deps
|
||||||
|
@echo ">> updating Go dependencies in ./documentation/examples/remote_storage/"
|
||||||
|
@cd ./documentation/examples/remote_storage/ && for m in $$($(GO) list -mod=readonly -m -f '{{ if and (not .Indirect) (not .Main)}}{{.Path}}{{end}}' all); do \
|
||||||
|
$(GO) get -d $$m; \
|
||||||
|
done
|
||||||
|
@cd ./documentation/examples/remote_storage/ && $(GO) mod tidy
|
||||||
|
|
|
@ -61,7 +61,7 @@ PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_
|
||||||
SKIP_GOLANGCI_LINT :=
|
SKIP_GOLANGCI_LINT :=
|
||||||
GOLANGCI_LINT :=
|
GOLANGCI_LINT :=
|
||||||
GOLANGCI_LINT_OPTS ?=
|
GOLANGCI_LINT_OPTS ?=
|
||||||
GOLANGCI_LINT_VERSION ?= v1.54.2
|
GOLANGCI_LINT_VERSION ?= v1.55.2
|
||||||
# golangci-lint only supports linux, darwin and windows platforms on i386/amd64.
|
# golangci-lint only supports linux, darwin and windows platforms on i386/amd64.
|
||||||
# windows isn't included here because of the path separator being different.
|
# windows isn't included here because of the path separator being different.
|
||||||
ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux darwin))
|
ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux darwin))
|
||||||
|
|
11
RELEASE.md
11
RELEASE.md
|
@ -53,7 +53,8 @@ Release cadence of first pre-releases being cut is 6 weeks.
|
||||||
| v2.46 | 2023-07-12 | Julien Pivotto (GitHub: @roidelapluie) |
|
| v2.46 | 2023-07-12 | Julien Pivotto (GitHub: @roidelapluie) |
|
||||||
| v2.47 | 2023-08-23 | Bryan Boreham (GitHub: @bboreham) |
|
| v2.47 | 2023-08-23 | Bryan Boreham (GitHub: @bboreham) |
|
||||||
| v2.48 | 2023-10-04 | Levi Harrison (GitHub: @LeviHarrison) |
|
| v2.48 | 2023-10-04 | Levi Harrison (GitHub: @LeviHarrison) |
|
||||||
| v2.49 | 2023-11-15 | **searching for volunteer** |
|
| v2.49 | 2023-12-05 | Bartek Plotka (GitHub: @bwplotka) |
|
||||||
|
| v2.50 | 2024-01-16 | **searching for volunteer** |
|
||||||
|
|
||||||
If you are interested in volunteering please create a pull request against the [prometheus/prometheus](https://github.com/prometheus/prometheus) repository and propose yourself for the release series of your choice.
|
If you are interested in volunteering please create a pull request against the [prometheus/prometheus](https://github.com/prometheus/prometheus) repository and propose yourself for the release series of your choice.
|
||||||
|
|
||||||
|
@ -108,14 +109,16 @@ This is also a good time to consider any experimental features and feature
|
||||||
flags for promotion to stable or for deprecation or ultimately removal. Do any
|
flags for promotion to stable or for deprecation or ultimately removal. Do any
|
||||||
of these in pull requests, one per feature.
|
of these in pull requests, one per feature.
|
||||||
|
|
||||||
|
> NOTE: As a validation step check if all security alerts are closed after this step: https://github.com/prometheus/prometheus/security/dependabot. Sometimes it's ok
|
||||||
|
> if not critical and e.g. fix is not released yet (or it does not relate to
|
||||||
|
> upgrading) or when we are unaffected.
|
||||||
|
|
||||||
#### Manually updating Go dependencies
|
#### Manually updating Go dependencies
|
||||||
|
|
||||||
This is usually only needed for `+incompatible` and `v0.0.0` non-semver updates.
|
This is usually only needed for `+incompatible` and `v0.0.0` non-semver updates.
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
make update-go-deps
|
make update-all-go-deps
|
||||||
git add go.mod go.sum
|
|
||||||
git commit -m "Update dependencies"
|
|
||||||
```
|
```
|
||||||
|
|
||||||
#### Manually updating React dependencies
|
#### Manually updating React dependencies
|
||||||
|
|
|
@ -206,9 +206,15 @@ func (c *flagConfig) setFeatureListOptions(logger log.Logger) error {
|
||||||
case "native-histograms":
|
case "native-histograms":
|
||||||
c.tsdb.EnableNativeHistograms = true
|
c.tsdb.EnableNativeHistograms = true
|
||||||
// Change relevant global variables. Hacky, but it's hard to pass a new option or default to unmarshallers.
|
// Change relevant global variables. Hacky, but it's hard to pass a new option or default to unmarshallers.
|
||||||
config.DefaultConfig.GlobalConfig.ScrapeProtocols = config.DefaultNativeHistogramScrapeProtocols
|
config.DefaultConfig.GlobalConfig.ScrapeProtocols = config.DefaultProtoFirstScrapeProtocols
|
||||||
config.DefaultGlobalConfig.ScrapeProtocols = config.DefaultNativeHistogramScrapeProtocols
|
config.DefaultGlobalConfig.ScrapeProtocols = config.DefaultProtoFirstScrapeProtocols
|
||||||
level.Info(logger).Log("msg", "Experimental native histogram support enabled. Changed default scrape_protocols to prefer PrometheusProto format.", "global.scrape_protocols", fmt.Sprintf("%v", config.DefaultGlobalConfig.ScrapeProtocols))
|
level.Info(logger).Log("msg", "Experimental native histogram support enabled. Changed default scrape_protocols to prefer PrometheusProto format.", "global.scrape_protocols", fmt.Sprintf("%v", config.DefaultGlobalConfig.ScrapeProtocols))
|
||||||
|
case "created-timestamp-zero-ingestion":
|
||||||
|
c.scrape.EnableCreatedTimestampZeroIngestion = true
|
||||||
|
// Change relevant global variables. Hacky, but it's hard to pass a new option or default to unmarshallers.
|
||||||
|
config.DefaultConfig.GlobalConfig.ScrapeProtocols = config.DefaultProtoFirstScrapeProtocols
|
||||||
|
config.DefaultGlobalConfig.ScrapeProtocols = config.DefaultProtoFirstScrapeProtocols
|
||||||
|
level.Info(logger).Log("msg", "Experimental created timestamp zero ingestion enabled. Changed default scrape_protocols to prefer PrometheusProto format.", "global.scrape_protocols", fmt.Sprintf("%v", config.DefaultGlobalConfig.ScrapeProtocols))
|
||||||
case "":
|
case "":
|
||||||
continue
|
continue
|
||||||
case "promql-at-modifier", "promql-negative-offset":
|
case "promql-at-modifier", "promql-negative-offset":
|
||||||
|
@ -614,14 +620,52 @@ func main() {
|
||||||
discoveryManagerNotify discoveryManager
|
discoveryManagerNotify discoveryManager
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// Kubernetes client metrics are used by Kubernetes SD.
|
||||||
|
// They are registered here in the main function, because SD mechanisms
|
||||||
|
// can only register metrics specific to a SD instance.
|
||||||
|
// Kubernetes client metrics are the same for the whole process -
|
||||||
|
// they are not specific to an SD instance.
|
||||||
|
err = discovery.RegisterK8sClientMetricsWithPrometheus(prometheus.DefaultRegisterer)
|
||||||
|
if err != nil {
|
||||||
|
level.Error(logger).Log("msg", "failed to register Kubernetes client metrics", "err", err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
if cfg.enableNewSDManager {
|
if cfg.enableNewSDManager {
|
||||||
discovery.RegisterMetrics()
|
{
|
||||||
discoveryManagerScrape = discovery.NewManager(ctxScrape, log.With(logger, "component", "discovery manager scrape"), discovery.Name("scrape"))
|
discMgr := discovery.NewManager(ctxScrape, log.With(logger, "component", "discovery manager scrape"), prometheus.DefaultRegisterer, discovery.Name("scrape"))
|
||||||
discoveryManagerNotify = discovery.NewManager(ctxNotify, log.With(logger, "component", "discovery manager notify"), discovery.Name("notify"))
|
if discMgr == nil {
|
||||||
|
level.Error(logger).Log("msg", "failed to create a discovery manager scrape")
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
discoveryManagerScrape = discMgr
|
||||||
|
}
|
||||||
|
|
||||||
|
{
|
||||||
|
discMgr := discovery.NewManager(ctxNotify, log.With(logger, "component", "discovery manager notify"), prometheus.DefaultRegisterer, discovery.Name("notify"))
|
||||||
|
if discMgr == nil {
|
||||||
|
level.Error(logger).Log("msg", "failed to create a discovery manager notify")
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
discoveryManagerNotify = discMgr
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
legacymanager.RegisterMetrics()
|
{
|
||||||
discoveryManagerScrape = legacymanager.NewManager(ctxScrape, log.With(logger, "component", "discovery manager scrape"), legacymanager.Name("scrape"))
|
discMgr := legacymanager.NewManager(ctxScrape, log.With(logger, "component", "discovery manager scrape"), prometheus.DefaultRegisterer, legacymanager.Name("scrape"))
|
||||||
discoveryManagerNotify = legacymanager.NewManager(ctxNotify, log.With(logger, "component", "discovery manager notify"), legacymanager.Name("notify"))
|
if discMgr == nil {
|
||||||
|
level.Error(logger).Log("msg", "failed to create a discovery manager scrape")
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
discoveryManagerScrape = discMgr
|
||||||
|
}
|
||||||
|
|
||||||
|
{
|
||||||
|
discMgr := legacymanager.NewManager(ctxNotify, log.With(logger, "component", "discovery manager notify"), prometheus.DefaultRegisterer, legacymanager.Name("notify"))
|
||||||
|
if discMgr == nil {
|
||||||
|
level.Error(logger).Log("msg", "failed to create a discovery manager notify")
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
discoveryManagerNotify = discMgr
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
scrapeManager, err := scrape.NewManager(
|
scrapeManager, err := scrape.NewManager(
|
||||||
|
@ -1127,6 +1171,7 @@ func main() {
|
||||||
)
|
)
|
||||||
|
|
||||||
localStorage.Set(db, 0)
|
localStorage.Set(db, 0)
|
||||||
|
db.SetWriteNotified(remoteStorage)
|
||||||
close(dbOpen)
|
close(dbOpen)
|
||||||
<-cancel
|
<-cancel
|
||||||
return nil
|
return nil
|
||||||
|
@ -1448,6 +1493,10 @@ func (n notReadyAppender) UpdateMetadata(ref storage.SeriesRef, l labels.Labels,
|
||||||
return 0, tsdb.ErrNotReady
|
return 0, tsdb.ErrNotReady
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (n notReadyAppender) AppendCTZeroSample(ref storage.SeriesRef, l labels.Labels, t, ct int64) (storage.SeriesRef, error) {
|
||||||
|
return 0, tsdb.ErrNotReady
|
||||||
|
}
|
||||||
|
|
||||||
func (n notReadyAppender) Commit() error { return tsdb.ErrNotReady }
|
func (n notReadyAppender) Commit() error { return tsdb.ErrNotReady }
|
||||||
|
|
||||||
func (n notReadyAppender) Rollback() error { return tsdb.ErrNotReady }
|
func (n notReadyAppender) Rollback() error { return tsdb.ErrNotReady }
|
||||||
|
@ -1586,7 +1635,6 @@ func (opts tsdbOptions) ToTSDBOptions() tsdb.Options {
|
||||||
RetentionDuration: int64(time.Duration(opts.RetentionDuration) / time.Millisecond),
|
RetentionDuration: int64(time.Duration(opts.RetentionDuration) / time.Millisecond),
|
||||||
MaxBytes: int64(opts.MaxBytes),
|
MaxBytes: int64(opts.MaxBytes),
|
||||||
NoLockfile: opts.NoLockfile,
|
NoLockfile: opts.NoLockfile,
|
||||||
AllowOverlappingCompaction: true,
|
|
||||||
WALCompression: wlog.ParseCompressionType(opts.WALCompression, opts.WALCompressionType),
|
WALCompression: wlog.ParseCompressionType(opts.WALCompression, opts.WALCompressionType),
|
||||||
HeadChunksWriteQueueSize: opts.HeadChunksWriteQueueSize,
|
HeadChunksWriteQueueSize: opts.HeadChunksWriteQueueSize,
|
||||||
SamplesPerChunk: opts.SamplesPerChunk,
|
SamplesPerChunk: opts.SamplesPerChunk,
|
||||||
|
|
|
@ -346,7 +346,7 @@ func getCurrentGaugeValuesFor(t *testing.T, reg prometheus.Gatherer, metricNames
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
require.Equal(t, 1, len(g.GetMetric()))
|
require.Len(t, g.GetMetric(), 1)
|
||||||
if _, ok := res[m]; ok {
|
if _, ok := res[m]; ok {
|
||||||
t.Error("expected only one metric family for", m)
|
t.Error("expected only one metric family for", m)
|
||||||
t.FailNow()
|
t.FailNow()
|
||||||
|
|
|
@ -284,7 +284,7 @@ func (p *queryLogTest) run(t *testing.T) {
|
||||||
|
|
||||||
if !p.enabledAtStart {
|
if !p.enabledAtStart {
|
||||||
p.query(t)
|
p.query(t)
|
||||||
require.Equal(t, 0, len(readQueryLog(t, queryLogFile.Name())))
|
require.Empty(t, readQueryLog(t, queryLogFile.Name()))
|
||||||
p.setQueryLog(t, queryLogFile.Name())
|
p.setQueryLog(t, queryLogFile.Name())
|
||||||
p.reloadConfig(t)
|
p.reloadConfig(t)
|
||||||
}
|
}
|
||||||
|
@ -309,7 +309,7 @@ func (p *queryLogTest) run(t *testing.T) {
|
||||||
p.query(t)
|
p.query(t)
|
||||||
|
|
||||||
ql = readQueryLog(t, queryLogFile.Name())
|
ql = readQueryLog(t, queryLogFile.Name())
|
||||||
require.Equal(t, qc, len(ql))
|
require.Len(t, ql, qc)
|
||||||
|
|
||||||
qc = len(ql)
|
qc = len(ql)
|
||||||
p.setQueryLog(t, queryLogFile.Name())
|
p.setQueryLog(t, queryLogFile.Name())
|
||||||
|
@ -320,7 +320,7 @@ func (p *queryLogTest) run(t *testing.T) {
|
||||||
|
|
||||||
ql = readQueryLog(t, queryLogFile.Name())
|
ql = readQueryLog(t, queryLogFile.Name())
|
||||||
if p.exactQueryCount() {
|
if p.exactQueryCount() {
|
||||||
require.Equal(t, qc, len(ql))
|
require.Len(t, ql, qc)
|
||||||
} else {
|
} else {
|
||||||
require.Greater(t, len(ql), qc, "no queries logged")
|
require.Greater(t, len(ql), qc, "no queries logged")
|
||||||
}
|
}
|
||||||
|
@ -340,7 +340,7 @@ func (p *queryLogTest) run(t *testing.T) {
|
||||||
require.NoError(t, os.Rename(queryLogFile.Name(), newFile.Name()))
|
require.NoError(t, os.Rename(queryLogFile.Name(), newFile.Name()))
|
||||||
ql = readQueryLog(t, newFile.Name())
|
ql = readQueryLog(t, newFile.Name())
|
||||||
if p.exactQueryCount() {
|
if p.exactQueryCount() {
|
||||||
require.Equal(t, qc, len(ql))
|
require.Len(t, ql, qc)
|
||||||
}
|
}
|
||||||
p.validateLastQuery(t, ql)
|
p.validateLastQuery(t, ql)
|
||||||
qc = len(ql)
|
qc = len(ql)
|
||||||
|
@ -351,7 +351,7 @@ func (p *queryLogTest) run(t *testing.T) {
|
||||||
|
|
||||||
ql = readQueryLog(t, newFile.Name())
|
ql = readQueryLog(t, newFile.Name())
|
||||||
if p.exactQueryCount() {
|
if p.exactQueryCount() {
|
||||||
require.Equal(t, qc, len(ql))
|
require.Len(t, ql, qc)
|
||||||
} else {
|
} else {
|
||||||
require.Greater(t, len(ql), qc, "no queries logged")
|
require.Greater(t, len(ql), qc, "no queries logged")
|
||||||
}
|
}
|
||||||
|
|
|
@ -61,7 +61,7 @@ func queryAllSeries(t testing.TB, q storage.Querier, expectedMinTime, expectedMa
|
||||||
|
|
||||||
func testBlocks(t *testing.T, db *tsdb.DB, expectedMinTime, expectedMaxTime, expectedBlockDuration int64, expectedSamples []backfillSample, expectedNumBlocks int) {
|
func testBlocks(t *testing.T, db *tsdb.DB, expectedMinTime, expectedMaxTime, expectedBlockDuration int64, expectedSamples []backfillSample, expectedNumBlocks int) {
|
||||||
blocks := db.Blocks()
|
blocks := db.Blocks()
|
||||||
require.Equal(t, expectedNumBlocks, len(blocks), "did not create correct number of blocks")
|
require.Len(t, blocks, expectedNumBlocks, "did not create correct number of blocks")
|
||||||
|
|
||||||
for i, block := range blocks {
|
for i, block := range blocks {
|
||||||
require.Equal(t, block.MinTime()/expectedBlockDuration, (block.MaxTime()-1)/expectedBlockDuration, "block %d contains data outside of one aligned block duration", i)
|
require.Equal(t, block.MinTime()/expectedBlockDuration, (block.MaxTime()-1)/expectedBlockDuration, "block %d contains data outside of one aligned block duration", i)
|
||||||
|
|
|
@ -36,6 +36,7 @@ import (
|
||||||
"github.com/google/pprof/profile"
|
"github.com/google/pprof/profile"
|
||||||
"github.com/prometheus/client_golang/api"
|
"github.com/prometheus/client_golang/api"
|
||||||
v1 "github.com/prometheus/client_golang/api/prometheus/v1"
|
v1 "github.com/prometheus/client_golang/api/prometheus/v1"
|
||||||
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
"github.com/prometheus/client_golang/prometheus/promhttp"
|
"github.com/prometheus/client_golang/prometheus/promhttp"
|
||||||
"github.com/prometheus/client_golang/prometheus/testutil/promlint"
|
"github.com/prometheus/client_golang/prometheus/testutil/promlint"
|
||||||
config_util "github.com/prometheus/common/config"
|
config_util "github.com/prometheus/common/config"
|
||||||
|
@ -198,6 +199,7 @@ func main() {
|
||||||
|
|
||||||
testCmd := app.Command("test", "Unit testing.")
|
testCmd := app.Command("test", "Unit testing.")
|
||||||
testRulesCmd := testCmd.Command("rules", "Unit tests for rules.")
|
testRulesCmd := testCmd.Command("rules", "Unit tests for rules.")
|
||||||
|
testRulesRun := testRulesCmd.Flag("run", "If set, will only run test groups whose names match the regular expression. Can be specified multiple times.").Strings()
|
||||||
testRulesFiles := testRulesCmd.Arg(
|
testRulesFiles := testRulesCmd.Arg(
|
||||||
"test-rule-file",
|
"test-rule-file",
|
||||||
"The unit test file.",
|
"The unit test file.",
|
||||||
|
@ -316,7 +318,7 @@ func main() {
|
||||||
|
|
||||||
switch parsedCmd {
|
switch parsedCmd {
|
||||||
case sdCheckCmd.FullCommand():
|
case sdCheckCmd.FullCommand():
|
||||||
os.Exit(CheckSD(*sdConfigFile, *sdJobName, *sdTimeout, noDefaultScrapePort))
|
os.Exit(CheckSD(*sdConfigFile, *sdJobName, *sdTimeout, noDefaultScrapePort, prometheus.DefaultRegisterer))
|
||||||
|
|
||||||
case checkConfigCmd.FullCommand():
|
case checkConfigCmd.FullCommand():
|
||||||
os.Exit(CheckConfig(*agentMode, *checkConfigSyntaxOnly, newLintConfig(*checkConfigLint, *checkConfigLintFatal), *configFiles...))
|
os.Exit(CheckConfig(*agentMode, *checkConfigSyntaxOnly, newLintConfig(*checkConfigLint, *checkConfigLintFatal), *configFiles...))
|
||||||
|
@ -366,6 +368,7 @@ func main() {
|
||||||
EnableAtModifier: true,
|
EnableAtModifier: true,
|
||||||
EnableNegativeOffset: true,
|
EnableNegativeOffset: true,
|
||||||
},
|
},
|
||||||
|
*testRulesRun,
|
||||||
*testRulesFiles...),
|
*testRulesFiles...),
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -411,7 +414,7 @@ func checkExperimental(f bool) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
var lintError = fmt.Errorf("lint error")
|
var errLint = fmt.Errorf("lint error")
|
||||||
|
|
||||||
type lintConfig struct {
|
type lintConfig struct {
|
||||||
all bool
|
all bool
|
||||||
|
@ -763,7 +766,7 @@ func checkRulesFromStdin(ls lintConfig) (bool, bool) {
|
||||||
fmt.Fprintln(os.Stderr, " FAILED:")
|
fmt.Fprintln(os.Stderr, " FAILED:")
|
||||||
for _, e := range errs {
|
for _, e := range errs {
|
||||||
fmt.Fprintln(os.Stderr, e.Error())
|
fmt.Fprintln(os.Stderr, e.Error())
|
||||||
hasErrors = hasErrors || !errors.Is(e, lintError)
|
hasErrors = hasErrors || !errors.Is(e, errLint)
|
||||||
}
|
}
|
||||||
if hasErrors {
|
if hasErrors {
|
||||||
return failed, hasErrors
|
return failed, hasErrors
|
||||||
|
@ -776,7 +779,7 @@ func checkRulesFromStdin(ls lintConfig) (bool, bool) {
|
||||||
}
|
}
|
||||||
failed = true
|
failed = true
|
||||||
for _, err := range errs {
|
for _, err := range errs {
|
||||||
hasErrors = hasErrors || !errors.Is(err, lintError)
|
hasErrors = hasErrors || !errors.Is(err, errLint)
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
fmt.Printf(" SUCCESS: %d rules found\n", n)
|
fmt.Printf(" SUCCESS: %d rules found\n", n)
|
||||||
|
@ -797,7 +800,7 @@ func checkRules(files []string, ls lintConfig) (bool, bool) {
|
||||||
fmt.Fprintln(os.Stderr, " FAILED:")
|
fmt.Fprintln(os.Stderr, " FAILED:")
|
||||||
for _, e := range errs {
|
for _, e := range errs {
|
||||||
fmt.Fprintln(os.Stderr, e.Error())
|
fmt.Fprintln(os.Stderr, e.Error())
|
||||||
hasErrors = hasErrors || !errors.Is(e, lintError)
|
hasErrors = hasErrors || !errors.Is(e, errLint)
|
||||||
}
|
}
|
||||||
if hasErrors {
|
if hasErrors {
|
||||||
continue
|
continue
|
||||||
|
@ -810,7 +813,7 @@ func checkRules(files []string, ls lintConfig) (bool, bool) {
|
||||||
}
|
}
|
||||||
failed = true
|
failed = true
|
||||||
for _, err := range errs {
|
for _, err := range errs {
|
||||||
hasErrors = hasErrors || !errors.Is(err, lintError)
|
hasErrors = hasErrors || !errors.Is(err, errLint)
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
fmt.Printf(" SUCCESS: %d rules found\n", n)
|
fmt.Printf(" SUCCESS: %d rules found\n", n)
|
||||||
|
@ -837,7 +840,7 @@ func checkRuleGroups(rgs *rulefmt.RuleGroups, lintSettings lintConfig) (int, []e
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
errMessage += "Might cause inconsistency while recording expressions"
|
errMessage += "Might cause inconsistency while recording expressions"
|
||||||
return 0, []error{fmt.Errorf("%w %s", lintError, errMessage)}
|
return 0, []error{fmt.Errorf("%w %s", errLint, errMessage)}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -56,7 +56,7 @@ func TestQueryRange(t *testing.T) {
|
||||||
defer s.Close()
|
defer s.Close()
|
||||||
|
|
||||||
urlObject, err := url.Parse(s.URL)
|
urlObject, err := url.Parse(s.URL)
|
||||||
require.Equal(t, nil, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
p := &promqlPrinter{}
|
p := &promqlPrinter{}
|
||||||
exitCode := QueryRange(urlObject, http.DefaultTransport, map[string]string{}, "up", "0", "300", 0, p)
|
exitCode := QueryRange(urlObject, http.DefaultTransport, map[string]string{}, "up", "0", "300", 0, p)
|
||||||
|
@ -79,7 +79,7 @@ func TestQueryInstant(t *testing.T) {
|
||||||
defer s.Close()
|
defer s.Close()
|
||||||
|
|
||||||
urlObject, err := url.Parse(s.URL)
|
urlObject, err := url.Parse(s.URL)
|
||||||
require.Equal(t, nil, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
p := &promqlPrinter{}
|
p := &promqlPrinter{}
|
||||||
exitCode := QueryInstant(urlObject, http.DefaultTransport, "up", "300", p)
|
exitCode := QueryInstant(urlObject, http.DefaultTransport, "up", "300", p)
|
||||||
|
|
|
@ -91,13 +91,13 @@ func TestBackfillRuleIntegration(t *testing.T) {
|
||||||
for _, err := range errs {
|
for _, err := range errs {
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
}
|
}
|
||||||
require.Equal(t, 3, len(ruleImporter.groups))
|
require.Len(t, ruleImporter.groups, 3)
|
||||||
group1 := ruleImporter.groups[path1+";group0"]
|
group1 := ruleImporter.groups[path1+";group0"]
|
||||||
require.NotNil(t, group1)
|
require.NotNil(t, group1)
|
||||||
const defaultInterval = 60
|
const defaultInterval = 60
|
||||||
require.Equal(t, defaultInterval*time.Second, group1.Interval())
|
require.Equal(t, defaultInterval*time.Second, group1.Interval())
|
||||||
gRules := group1.Rules()
|
gRules := group1.Rules()
|
||||||
require.Equal(t, 1, len(gRules))
|
require.Len(t, gRules, 1)
|
||||||
require.Equal(t, "rule1", gRules[0].Name())
|
require.Equal(t, "rule1", gRules[0].Name())
|
||||||
require.Equal(t, "ruleExpr", gRules[0].Query().String())
|
require.Equal(t, "ruleExpr", gRules[0].Query().String())
|
||||||
require.Equal(t, 1, gRules[0].Labels().Len())
|
require.Equal(t, 1, gRules[0].Labels().Len())
|
||||||
|
@ -106,7 +106,7 @@ func TestBackfillRuleIntegration(t *testing.T) {
|
||||||
require.NotNil(t, group2)
|
require.NotNil(t, group2)
|
||||||
require.Equal(t, defaultInterval*time.Second, group2.Interval())
|
require.Equal(t, defaultInterval*time.Second, group2.Interval())
|
||||||
g2Rules := group2.Rules()
|
g2Rules := group2.Rules()
|
||||||
require.Equal(t, 2, len(g2Rules))
|
require.Len(t, g2Rules, 2)
|
||||||
require.Equal(t, "grp2_rule1", g2Rules[0].Name())
|
require.Equal(t, "grp2_rule1", g2Rules[0].Name())
|
||||||
require.Equal(t, "grp2_rule1_expr", g2Rules[0].Query().String())
|
require.Equal(t, "grp2_rule1_expr", g2Rules[0].Query().String())
|
||||||
require.Equal(t, 0, g2Rules[0].Labels().Len())
|
require.Equal(t, 0, g2Rules[0].Labels().Len())
|
||||||
|
@ -122,7 +122,7 @@ func TestBackfillRuleIntegration(t *testing.T) {
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
blocks := db.Blocks()
|
blocks := db.Blocks()
|
||||||
require.Equal(t, (i+1)*tt.expectedBlockCount, len(blocks))
|
require.Len(t, blocks, (i+1)*tt.expectedBlockCount)
|
||||||
|
|
||||||
q, err := db.Querier(math.MinInt64, math.MaxInt64)
|
q, err := db.Querier(math.MinInt64, math.MaxInt64)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
|
@ -22,6 +22,7 @@ import (
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/go-kit/log"
|
"github.com/go-kit/log"
|
||||||
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
|
|
||||||
"github.com/prometheus/prometheus/config"
|
"github.com/prometheus/prometheus/config"
|
||||||
"github.com/prometheus/prometheus/discovery"
|
"github.com/prometheus/prometheus/discovery"
|
||||||
|
@ -37,7 +38,7 @@ type sdCheckResult struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
// CheckSD performs service discovery for the given job name and reports the results.
|
// CheckSD performs service discovery for the given job name and reports the results.
|
||||||
func CheckSD(sdConfigFiles, sdJobName string, sdTimeout time.Duration, noDefaultScrapePort bool) int {
|
func CheckSD(sdConfigFiles, sdJobName string, sdTimeout time.Duration, noDefaultScrapePort bool, registerer prometheus.Registerer) int {
|
||||||
logger := log.NewLogfmtLogger(log.NewSyncWriter(os.Stderr))
|
logger := log.NewLogfmtLogger(log.NewSyncWriter(os.Stderr))
|
||||||
|
|
||||||
cfg, err := config.LoadFile(sdConfigFiles, false, false, logger)
|
cfg, err := config.LoadFile(sdConfigFiles, false, false, logger)
|
||||||
|
@ -77,7 +78,7 @@ func CheckSD(sdConfigFiles, sdJobName string, sdTimeout time.Duration, noDefault
|
||||||
defer cancel()
|
defer cancel()
|
||||||
|
|
||||||
for _, cfg := range scrapeConfig.ServiceDiscoveryConfigs {
|
for _, cfg := range scrapeConfig.ServiceDiscoveryConfigs {
|
||||||
d, err := cfg.NewDiscoverer(discovery.DiscovererOptions{Logger: logger})
|
d, err := cfg.NewDiscoverer(discovery.DiscovererOptions{Logger: logger, Registerer: registerer})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fmt.Fprintln(os.Stderr, "Could not create new discoverer", err)
|
fmt.Fprintln(os.Stderr, "Could not create new discoverer", err)
|
||||||
return failureExitCode
|
return failureExitCode
|
||||||
|
|
|
@ -35,7 +35,7 @@ func TestSDCheckResult(t *testing.T) {
|
||||||
}}
|
}}
|
||||||
|
|
||||||
reg, err := relabel.NewRegexp("(.*)")
|
reg, err := relabel.NewRegexp("(.*)")
|
||||||
require.Nil(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
scrapeConfig := &config.ScrapeConfig{
|
scrapeConfig := &config.ScrapeConfig{
|
||||||
ScrapeInterval: model.Duration(1 * time.Minute),
|
ScrapeInterval: model.Duration(1 * time.Minute),
|
||||||
|
|
30
cmd/promtool/testdata/rules_run.yml
vendored
Normal file
30
cmd/promtool/testdata/rules_run.yml
vendored
Normal file
|
@ -0,0 +1,30 @@
|
||||||
|
rule_files:
|
||||||
|
- rules.yml
|
||||||
|
|
||||||
|
evaluation_interval: 1m
|
||||||
|
|
||||||
|
# Minimal test cases to check focus on a rule group.
|
||||||
|
tests:
|
||||||
|
- name: correct test
|
||||||
|
input_series:
|
||||||
|
- series: test
|
||||||
|
values: 1
|
||||||
|
|
||||||
|
promql_expr_test:
|
||||||
|
- expr: test
|
||||||
|
eval_time: 0
|
||||||
|
exp_samples:
|
||||||
|
- value: 1
|
||||||
|
labels: test
|
||||||
|
|
||||||
|
- name: wrong test
|
||||||
|
input_series:
|
||||||
|
- series: test
|
||||||
|
values: 0
|
||||||
|
|
||||||
|
promql_expr_test:
|
||||||
|
- expr: test
|
||||||
|
eval_time: 0
|
||||||
|
exp_samples:
|
||||||
|
- value: 1
|
||||||
|
labels: test
|
|
@ -26,6 +26,7 @@ import (
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/go-kit/log"
|
"github.com/go-kit/log"
|
||||||
|
"github.com/grafana/regexp"
|
||||||
"github.com/prometheus/common/model"
|
"github.com/prometheus/common/model"
|
||||||
"gopkg.in/yaml.v2"
|
"gopkg.in/yaml.v2"
|
||||||
|
|
||||||
|
@ -39,11 +40,16 @@ import (
|
||||||
|
|
||||||
// RulesUnitTest does unit testing of rules based on the unit testing files provided.
|
// RulesUnitTest does unit testing of rules based on the unit testing files provided.
|
||||||
// More info about the file format can be found in the docs.
|
// More info about the file format can be found in the docs.
|
||||||
func RulesUnitTest(queryOpts promql.LazyLoaderOpts, files ...string) int {
|
func RulesUnitTest(queryOpts promql.LazyLoaderOpts, runStrings []string, files ...string) int {
|
||||||
failed := false
|
failed := false
|
||||||
|
|
||||||
|
var run *regexp.Regexp
|
||||||
|
if runStrings != nil {
|
||||||
|
run = regexp.MustCompile(strings.Join(runStrings, "|"))
|
||||||
|
}
|
||||||
|
|
||||||
for _, f := range files {
|
for _, f := range files {
|
||||||
if errs := ruleUnitTest(f, queryOpts); errs != nil {
|
if errs := ruleUnitTest(f, queryOpts, run); errs != nil {
|
||||||
fmt.Fprintln(os.Stderr, " FAILED:")
|
fmt.Fprintln(os.Stderr, " FAILED:")
|
||||||
for _, e := range errs {
|
for _, e := range errs {
|
||||||
fmt.Fprintln(os.Stderr, e.Error())
|
fmt.Fprintln(os.Stderr, e.Error())
|
||||||
|
@ -61,7 +67,7 @@ func RulesUnitTest(queryOpts promql.LazyLoaderOpts, files ...string) int {
|
||||||
return successExitCode
|
return successExitCode
|
||||||
}
|
}
|
||||||
|
|
||||||
func ruleUnitTest(filename string, queryOpts promql.LazyLoaderOpts) []error {
|
func ruleUnitTest(filename string, queryOpts promql.LazyLoaderOpts, run *regexp.Regexp) []error {
|
||||||
fmt.Println("Unit Testing: ", filename)
|
fmt.Println("Unit Testing: ", filename)
|
||||||
|
|
||||||
b, err := os.ReadFile(filename)
|
b, err := os.ReadFile(filename)
|
||||||
|
@ -96,6 +102,10 @@ func ruleUnitTest(filename string, queryOpts promql.LazyLoaderOpts) []error {
|
||||||
// Testing.
|
// Testing.
|
||||||
var errs []error
|
var errs []error
|
||||||
for _, t := range unitTestInp.Tests {
|
for _, t := range unitTestInp.Tests {
|
||||||
|
if !matchesRun(t.TestGroupName, run) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
if t.Interval == 0 {
|
if t.Interval == 0 {
|
||||||
t.Interval = unitTestInp.EvaluationInterval
|
t.Interval = unitTestInp.EvaluationInterval
|
||||||
}
|
}
|
||||||
|
@ -111,6 +121,14 @@ func ruleUnitTest(filename string, queryOpts promql.LazyLoaderOpts) []error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func matchesRun(name string, run *regexp.Regexp) bool {
|
||||||
|
if run == nil {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
return run.MatchString(name)
|
||||||
|
}
|
||||||
|
|
||||||
// unitTestFile holds the contents of a single unit test file.
|
// unitTestFile holds the contents of a single unit test file.
|
||||||
type unitTestFile struct {
|
type unitTestFile struct {
|
||||||
RuleFiles []string `yaml:"rule_files"`
|
RuleFiles []string `yaml:"rule_files"`
|
||||||
|
|
|
@ -125,7 +125,60 @@ func TestRulesUnitTest(t *testing.T) {
|
||||||
}
|
}
|
||||||
for _, tt := range tests {
|
for _, tt := range tests {
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
if got := RulesUnitTest(tt.queryOpts, tt.args.files...); got != tt.want {
|
if got := RulesUnitTest(tt.queryOpts, nil, tt.args.files...); got != tt.want {
|
||||||
|
t.Errorf("RulesUnitTest() = %v, want %v", got, tt.want)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestRulesUnitTestRun(t *testing.T) {
|
||||||
|
type args struct {
|
||||||
|
run []string
|
||||||
|
files []string
|
||||||
|
}
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
args args
|
||||||
|
queryOpts promql.LazyLoaderOpts
|
||||||
|
want int
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "Test all without run arg",
|
||||||
|
args: args{
|
||||||
|
run: nil,
|
||||||
|
files: []string{"./testdata/rules_run.yml"},
|
||||||
|
},
|
||||||
|
want: 1,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Test all with run arg",
|
||||||
|
args: args{
|
||||||
|
run: []string{"correct", "wrong"},
|
||||||
|
files: []string{"./testdata/rules_run.yml"},
|
||||||
|
},
|
||||||
|
want: 1,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Test correct",
|
||||||
|
args: args{
|
||||||
|
run: []string{"correct"},
|
||||||
|
files: []string{"./testdata/rules_run.yml"},
|
||||||
|
},
|
||||||
|
want: 0,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Test wrong",
|
||||||
|
args: args{
|
||||||
|
run: []string{"wrong"},
|
||||||
|
files: []string{"./testdata/rules_run.yml"},
|
||||||
|
},
|
||||||
|
want: 1,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, tt := range tests {
|
||||||
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
if got := RulesUnitTest(tt.queryOpts, tt.args.run, tt.args.files...); got != tt.want {
|
||||||
t.Errorf("RulesUnitTest() = %v, want %v", got, tt.want)
|
t.Errorf("RulesUnitTest() = %v, want %v", got, tt.want)
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|
|
@ -454,12 +454,19 @@ var (
|
||||||
OpenMetricsText1_0_0: "application/openmetrics-text;version=1.0.0",
|
OpenMetricsText1_0_0: "application/openmetrics-text;version=1.0.0",
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// DefaultScrapeProtocols is the set of scrape protocols that will be proposed
|
||||||
|
// to scrape target, ordered by priority.
|
||||||
DefaultScrapeProtocols = []ScrapeProtocol{
|
DefaultScrapeProtocols = []ScrapeProtocol{
|
||||||
OpenMetricsText1_0_0,
|
OpenMetricsText1_0_0,
|
||||||
OpenMetricsText0_0_1,
|
OpenMetricsText0_0_1,
|
||||||
PrometheusText0_0_4,
|
PrometheusText0_0_4,
|
||||||
}
|
}
|
||||||
DefaultNativeHistogramScrapeProtocols = []ScrapeProtocol{
|
|
||||||
|
// DefaultProtoFirstScrapeProtocols is like DefaultScrapeProtocols, but it
|
||||||
|
// favors protobuf Prometheus exposition format.
|
||||||
|
// Used by default for certain feature-flags like
|
||||||
|
// "native-histograms" and "created-timestamp-zero-ingestion".
|
||||||
|
DefaultProtoFirstScrapeProtocols = []ScrapeProtocol{
|
||||||
PrometheusProto,
|
PrometheusProto,
|
||||||
OpenMetricsText1_0_0,
|
OpenMetricsText1_0_0,
|
||||||
OpenMetricsText0_0_1,
|
OpenMetricsText0_0_1,
|
||||||
|
|
|
@ -568,6 +568,7 @@ var expectedConf = &Config{
|
||||||
ServiceDiscoveryConfigs: discovery.Configs{
|
ServiceDiscoveryConfigs: discovery.Configs{
|
||||||
&xds.KumaSDConfig{
|
&xds.KumaSDConfig{
|
||||||
Server: "http://kuma-control-plane.kuma-system.svc:5676",
|
Server: "http://kuma-control-plane.kuma-system.svc:5676",
|
||||||
|
ClientID: "main-prometheus",
|
||||||
HTTPClientConfig: config.DefaultHTTPClientConfig,
|
HTTPClientConfig: config.DefaultHTTPClientConfig,
|
||||||
RefreshInterval: model.Duration(15 * time.Second),
|
RefreshInterval: model.Duration(15 * time.Second),
|
||||||
FetchTimeout: model.Duration(2 * time.Minute),
|
FetchTimeout: model.Duration(2 * time.Minute),
|
||||||
|
@ -1457,8 +1458,8 @@ func TestRemoteWriteRetryOnRateLimit(t *testing.T) {
|
||||||
got := &Config{}
|
got := &Config{}
|
||||||
require.NoError(t, yaml.UnmarshalStrict(out, got))
|
require.NoError(t, yaml.UnmarshalStrict(out, got))
|
||||||
|
|
||||||
require.Equal(t, true, got.RemoteWriteConfigs[0].QueueConfig.RetryOnRateLimit)
|
require.True(t, got.RemoteWriteConfigs[0].QueueConfig.RetryOnRateLimit)
|
||||||
require.Equal(t, false, got.RemoteWriteConfigs[1].QueueConfig.RetryOnRateLimit)
|
require.False(t, got.RemoteWriteConfigs[1].QueueConfig.RetryOnRateLimit)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestLoadConfig(t *testing.T) {
|
func TestLoadConfig(t *testing.T) {
|
||||||
|
@ -1475,9 +1476,9 @@ func TestLoadConfig(t *testing.T) {
|
||||||
func TestScrapeIntervalLarger(t *testing.T) {
|
func TestScrapeIntervalLarger(t *testing.T) {
|
||||||
c, err := LoadFile("testdata/scrape_interval_larger.good.yml", false, false, log.NewNopLogger())
|
c, err := LoadFile("testdata/scrape_interval_larger.good.yml", false, false, log.NewNopLogger())
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, 1, len(c.ScrapeConfigs))
|
require.Len(t, c.ScrapeConfigs, 1)
|
||||||
for _, sc := range c.ScrapeConfigs {
|
for _, sc := range c.ScrapeConfigs {
|
||||||
require.Equal(t, true, sc.ScrapeInterval >= sc.ScrapeTimeout)
|
require.GreaterOrEqual(t, sc.ScrapeInterval, sc.ScrapeTimeout)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1493,7 +1494,7 @@ func TestElideSecrets(t *testing.T) {
|
||||||
yamlConfig := string(config)
|
yamlConfig := string(config)
|
||||||
|
|
||||||
matches := secretRe.FindAllStringIndex(yamlConfig, -1)
|
matches := secretRe.FindAllStringIndex(yamlConfig, -1)
|
||||||
require.Equal(t, 22, len(matches), "wrong number of secret matches found")
|
require.Len(t, matches, 22, "wrong number of secret matches found")
|
||||||
require.NotContains(t, yamlConfig, "mysecret",
|
require.NotContains(t, yamlConfig, "mysecret",
|
||||||
"yaml marshal reveals authentication credentials.")
|
"yaml marshal reveals authentication credentials.")
|
||||||
}
|
}
|
||||||
|
@ -2063,7 +2064,7 @@ func TestAgentMode(t *testing.T) {
|
||||||
|
|
||||||
c, err := LoadFile("testdata/agent_mode.without_remote_writes.yml", true, false, log.NewNopLogger())
|
c, err := LoadFile("testdata/agent_mode.without_remote_writes.yml", true, false, log.NewNopLogger())
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Len(t, c.RemoteWriteConfigs, 0)
|
require.Empty(t, c.RemoteWriteConfigs)
|
||||||
|
|
||||||
c, err = LoadFile("testdata/agent_mode.good.yml", true, false, log.NewNopLogger())
|
c, err = LoadFile("testdata/agent_mode.good.yml", true, false, log.NewNopLogger())
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
@ -2257,5 +2258,5 @@ func TestScrapeConfigDisableCompression(t *testing.T) {
|
||||||
got := &Config{}
|
got := &Config{}
|
||||||
require.NoError(t, yaml.UnmarshalStrict(out, got))
|
require.NoError(t, yaml.UnmarshalStrict(out, got))
|
||||||
|
|
||||||
require.Equal(t, false, got.ScrapeConfigs[0].EnableCompression)
|
require.False(t, got.ScrapeConfigs[0].EnableCompression)
|
||||||
}
|
}
|
||||||
|
|
1
config/testdata/conf.good.yml
vendored
1
config/testdata/conf.good.yml
vendored
|
@ -221,6 +221,7 @@ scrape_configs:
|
||||||
|
|
||||||
kuma_sd_configs:
|
kuma_sd_configs:
|
||||||
- server: http://kuma-control-plane.kuma-system.svc:5676
|
- server: http://kuma-control-plane.kuma-system.svc:5676
|
||||||
|
client_id: main-prometheus
|
||||||
|
|
||||||
- job_name: service-marathon
|
- job_name: service-marathon
|
||||||
marathon_sd_configs:
|
marathon_sd_configs:
|
||||||
|
|
1
config/testdata/roundtrip.good.yml
vendored
1
config/testdata/roundtrip.good.yml
vendored
|
@ -108,6 +108,7 @@ scrape_configs:
|
||||||
|
|
||||||
kuma_sd_configs:
|
kuma_sd_configs:
|
||||||
- server: http://kuma-control-plane.kuma-system.svc:5676
|
- server: http://kuma-control-plane.kuma-system.svc:5676
|
||||||
|
client_id: main-prometheus
|
||||||
|
|
||||||
marathon_sd_configs:
|
marathon_sd_configs:
|
||||||
- servers:
|
- servers:
|
||||||
|
|
|
@ -234,6 +234,11 @@ type Config interface {
|
||||||
|
|
||||||
type DiscovererOptions struct {
|
type DiscovererOptions struct {
|
||||||
Logger log.Logger
|
Logger log.Logger
|
||||||
|
|
||||||
|
// A registerer for the Discoverer's metrics.
|
||||||
|
Registerer prometheus.Registerer
|
||||||
|
|
||||||
|
HTTPClientOptions []config.HTTPClientOption
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|
|
@ -30,6 +30,7 @@ import (
|
||||||
"github.com/aws/aws-sdk-go/service/ec2"
|
"github.com/aws/aws-sdk-go/service/ec2"
|
||||||
"github.com/go-kit/log"
|
"github.com/go-kit/log"
|
||||||
"github.com/go-kit/log/level"
|
"github.com/go-kit/log/level"
|
||||||
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
"github.com/prometheus/common/config"
|
"github.com/prometheus/common/config"
|
||||||
"github.com/prometheus/common/model"
|
"github.com/prometheus/common/model"
|
||||||
|
|
||||||
|
@ -101,7 +102,7 @@ func (*EC2SDConfig) Name() string { return "ec2" }
|
||||||
|
|
||||||
// NewDiscoverer returns a Discoverer for the EC2 Config.
|
// NewDiscoverer returns a Discoverer for the EC2 Config.
|
||||||
func (c *EC2SDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (discovery.Discoverer, error) {
|
func (c *EC2SDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (discovery.Discoverer, error) {
|
||||||
return NewEC2Discovery(c, opts.Logger), nil
|
return NewEC2Discovery(c, opts.Logger, opts.Registerer), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// UnmarshalYAML implements the yaml.Unmarshaler interface for the EC2 Config.
|
// UnmarshalYAML implements the yaml.Unmarshaler interface for the EC2 Config.
|
||||||
|
@ -147,7 +148,7 @@ type EC2Discovery struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewEC2Discovery returns a new EC2Discovery which periodically refreshes its targets.
|
// NewEC2Discovery returns a new EC2Discovery which periodically refreshes its targets.
|
||||||
func NewEC2Discovery(conf *EC2SDConfig, logger log.Logger) *EC2Discovery {
|
func NewEC2Discovery(conf *EC2SDConfig, logger log.Logger, reg prometheus.Registerer) *EC2Discovery {
|
||||||
if logger == nil {
|
if logger == nil {
|
||||||
logger = log.NewNopLogger()
|
logger = log.NewNopLogger()
|
||||||
}
|
}
|
||||||
|
@ -156,10 +157,13 @@ func NewEC2Discovery(conf *EC2SDConfig, logger log.Logger) *EC2Discovery {
|
||||||
cfg: conf,
|
cfg: conf,
|
||||||
}
|
}
|
||||||
d.Discovery = refresh.NewDiscovery(
|
d.Discovery = refresh.NewDiscovery(
|
||||||
logger,
|
refresh.Options{
|
||||||
"ec2",
|
Logger: logger,
|
||||||
time.Duration(d.cfg.RefreshInterval),
|
Mech: "ec2",
|
||||||
d.refresh,
|
Interval: time.Duration(d.cfg.RefreshInterval),
|
||||||
|
RefreshF: d.refresh,
|
||||||
|
Registry: reg,
|
||||||
|
},
|
||||||
)
|
)
|
||||||
return d
|
return d
|
||||||
}
|
}
|
||||||
|
|
|
@ -29,6 +29,7 @@ import (
|
||||||
"github.com/aws/aws-sdk-go/aws/session"
|
"github.com/aws/aws-sdk-go/aws/session"
|
||||||
"github.com/aws/aws-sdk-go/service/lightsail"
|
"github.com/aws/aws-sdk-go/service/lightsail"
|
||||||
"github.com/go-kit/log"
|
"github.com/go-kit/log"
|
||||||
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
"github.com/prometheus/common/config"
|
"github.com/prometheus/common/config"
|
||||||
"github.com/prometheus/common/model"
|
"github.com/prometheus/common/model"
|
||||||
|
|
||||||
|
@ -84,7 +85,7 @@ func (*LightsailSDConfig) Name() string { return "lightsail" }
|
||||||
|
|
||||||
// NewDiscoverer returns a Discoverer for the Lightsail Config.
|
// NewDiscoverer returns a Discoverer for the Lightsail Config.
|
||||||
func (c *LightsailSDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (discovery.Discoverer, error) {
|
func (c *LightsailSDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (discovery.Discoverer, error) {
|
||||||
return NewLightsailDiscovery(c, opts.Logger), nil
|
return NewLightsailDiscovery(c, opts.Logger, opts.Registerer), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// UnmarshalYAML implements the yaml.Unmarshaler interface for the Lightsail Config.
|
// UnmarshalYAML implements the yaml.Unmarshaler interface for the Lightsail Config.
|
||||||
|
@ -121,7 +122,7 @@ type LightsailDiscovery struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewLightsailDiscovery returns a new LightsailDiscovery which periodically refreshes its targets.
|
// NewLightsailDiscovery returns a new LightsailDiscovery which periodically refreshes its targets.
|
||||||
func NewLightsailDiscovery(conf *LightsailSDConfig, logger log.Logger) *LightsailDiscovery {
|
func NewLightsailDiscovery(conf *LightsailSDConfig, logger log.Logger, reg prometheus.Registerer) *LightsailDiscovery {
|
||||||
if logger == nil {
|
if logger == nil {
|
||||||
logger = log.NewNopLogger()
|
logger = log.NewNopLogger()
|
||||||
}
|
}
|
||||||
|
@ -129,10 +130,13 @@ func NewLightsailDiscovery(conf *LightsailSDConfig, logger log.Logger) *Lightsai
|
||||||
cfg: conf,
|
cfg: conf,
|
||||||
}
|
}
|
||||||
d.Discovery = refresh.NewDiscovery(
|
d.Discovery = refresh.NewDiscovery(
|
||||||
logger,
|
refresh.Options{
|
||||||
"lightsail",
|
Logger: logger,
|
||||||
time.Duration(d.cfg.RefreshInterval),
|
Mech: "lightsail",
|
||||||
d.refresh,
|
Interval: time.Duration(d.cfg.RefreshInterval),
|
||||||
|
RefreshF: d.refresh,
|
||||||
|
Registry: reg,
|
||||||
|
},
|
||||||
)
|
)
|
||||||
return d
|
return d
|
||||||
}
|
}
|
||||||
|
|
|
@ -28,9 +28,10 @@ import (
|
||||||
"github.com/Azure/azure-sdk-for-go/sdk/azcore/arm"
|
"github.com/Azure/azure-sdk-for-go/sdk/azcore/arm"
|
||||||
"github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud"
|
"github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud"
|
||||||
"github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"
|
"github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"
|
||||||
|
"github.com/Azure/azure-sdk-for-go/sdk/azcore/to"
|
||||||
"github.com/Azure/azure-sdk-for-go/sdk/azidentity"
|
"github.com/Azure/azure-sdk-for-go/sdk/azidentity"
|
||||||
"github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v4"
|
"github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5"
|
||||||
"github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v2"
|
"github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4"
|
||||||
cache "github.com/Code-Hex/go-generics-cache"
|
cache "github.com/Code-Hex/go-generics-cache"
|
||||||
"github.com/Code-Hex/go-generics-cache/policy/lru"
|
"github.com/Code-Hex/go-generics-cache/policy/lru"
|
||||||
"github.com/go-kit/log"
|
"github.com/go-kit/log"
|
||||||
|
@ -78,17 +79,6 @@ var (
|
||||||
AuthenticationMethod: authMethodOAuth,
|
AuthenticationMethod: authMethodOAuth,
|
||||||
HTTPClientConfig: config_util.DefaultHTTPClientConfig,
|
HTTPClientConfig: config_util.DefaultHTTPClientConfig,
|
||||||
}
|
}
|
||||||
|
|
||||||
failuresCount = prometheus.NewCounter(
|
|
||||||
prometheus.CounterOpts{
|
|
||||||
Name: "prometheus_sd_azure_failures_total",
|
|
||||||
Help: "Number of Azure service discovery refresh failures.",
|
|
||||||
})
|
|
||||||
cacheHitCount = prometheus.NewCounter(
|
|
||||||
prometheus.CounterOpts{
|
|
||||||
Name: "prometheus_sd_azure_cache_hit_total",
|
|
||||||
Help: "Number of cache hit during refresh.",
|
|
||||||
})
|
|
||||||
)
|
)
|
||||||
|
|
||||||
var environments = map[string]cloud.Configuration{
|
var environments = map[string]cloud.Configuration{
|
||||||
|
@ -105,7 +95,7 @@ func CloudConfigurationFromName(name string) (cloud.Configuration, error) {
|
||||||
name = strings.ToUpper(name)
|
name = strings.ToUpper(name)
|
||||||
env, ok := environments[name]
|
env, ok := environments[name]
|
||||||
if !ok {
|
if !ok {
|
||||||
return env, fmt.Errorf("There is no cloud configuration matching the name %q", name)
|
return env, fmt.Errorf("there is no cloud configuration matching the name %q", name)
|
||||||
}
|
}
|
||||||
|
|
||||||
return env, nil
|
return env, nil
|
||||||
|
@ -113,8 +103,6 @@ func CloudConfigurationFromName(name string) (cloud.Configuration, error) {
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
discovery.RegisterConfig(&SDConfig{})
|
discovery.RegisterConfig(&SDConfig{})
|
||||||
prometheus.MustRegister(failuresCount)
|
|
||||||
prometheus.MustRegister(cacheHitCount)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// SDConfig is the configuration for Azure based service discovery.
|
// SDConfig is the configuration for Azure based service discovery.
|
||||||
|
@ -137,7 +125,7 @@ func (*SDConfig) Name() string { return "azure" }
|
||||||
|
|
||||||
// NewDiscoverer returns a Discoverer for the Config.
|
// NewDiscoverer returns a Discoverer for the Config.
|
||||||
func (c *SDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (discovery.Discoverer, error) {
|
func (c *SDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (discovery.Discoverer, error) {
|
||||||
return NewDiscovery(c, opts.Logger), nil
|
return NewDiscovery(c, opts.Logger, opts.Registerer)
|
||||||
}
|
}
|
||||||
|
|
||||||
func validateAuthParam(param, name string) error {
|
func validateAuthParam(param, name string) error {
|
||||||
|
@ -180,14 +168,16 @@ func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
||||||
|
|
||||||
type Discovery struct {
|
type Discovery struct {
|
||||||
*refresh.Discovery
|
*refresh.Discovery
|
||||||
logger log.Logger
|
logger log.Logger
|
||||||
cfg *SDConfig
|
cfg *SDConfig
|
||||||
port int
|
port int
|
||||||
cache *cache.Cache[string, *armnetwork.Interface]
|
cache *cache.Cache[string, *armnetwork.Interface]
|
||||||
|
failuresCount prometheus.Counter
|
||||||
|
cacheHitCount prometheus.Counter
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewDiscovery returns a new AzureDiscovery which periodically refreshes its targets.
|
// NewDiscovery returns a new AzureDiscovery which periodically refreshes its targets.
|
||||||
func NewDiscovery(cfg *SDConfig, logger log.Logger) *Discovery {
|
func NewDiscovery(cfg *SDConfig, logger log.Logger, reg prometheus.Registerer) (*Discovery, error) {
|
||||||
if logger == nil {
|
if logger == nil {
|
||||||
logger = log.NewNopLogger()
|
logger = log.NewNopLogger()
|
||||||
}
|
}
|
||||||
|
@ -197,16 +187,30 @@ func NewDiscovery(cfg *SDConfig, logger log.Logger) *Discovery {
|
||||||
port: cfg.Port,
|
port: cfg.Port,
|
||||||
logger: logger,
|
logger: logger,
|
||||||
cache: l,
|
cache: l,
|
||||||
|
failuresCount: prometheus.NewCounter(
|
||||||
|
prometheus.CounterOpts{
|
||||||
|
Name: "prometheus_sd_azure_failures_total",
|
||||||
|
Help: "Number of Azure service discovery refresh failures.",
|
||||||
|
}),
|
||||||
|
cacheHitCount: prometheus.NewCounter(
|
||||||
|
prometheus.CounterOpts{
|
||||||
|
Name: "prometheus_sd_azure_cache_hit_total",
|
||||||
|
Help: "Number of cache hit during refresh.",
|
||||||
|
}),
|
||||||
}
|
}
|
||||||
|
|
||||||
d.Discovery = refresh.NewDiscovery(
|
d.Discovery = refresh.NewDiscovery(
|
||||||
logger,
|
refresh.Options{
|
||||||
"azure",
|
Logger: logger,
|
||||||
time.Duration(cfg.RefreshInterval),
|
Mech: "azure",
|
||||||
d.refresh,
|
Interval: time.Duration(cfg.RefreshInterval),
|
||||||
|
RefreshF: d.refresh,
|
||||||
|
Registry: reg,
|
||||||
|
Metrics: []prometheus.Collector{d.failuresCount, d.cacheHitCount},
|
||||||
|
},
|
||||||
)
|
)
|
||||||
|
|
||||||
return d
|
return d, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// azureClient represents multiple Azure Resource Manager providers.
|
// azureClient represents multiple Azure Resource Manager providers.
|
||||||
|
@ -304,6 +308,7 @@ type virtualMachine struct {
|
||||||
Location string
|
Location string
|
||||||
OsType string
|
OsType string
|
||||||
ScaleSet string
|
ScaleSet string
|
||||||
|
InstanceID string
|
||||||
Tags map[string]*string
|
Tags map[string]*string
|
||||||
NetworkInterfaces []string
|
NetworkInterfaces []string
|
||||||
Size string
|
Size string
|
||||||
|
@ -328,14 +333,14 @@ func (d *Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) {
|
||||||
|
|
||||||
client, err := createAzureClient(*d.cfg)
|
client, err := createAzureClient(*d.cfg)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
failuresCount.Inc()
|
d.failuresCount.Inc()
|
||||||
return nil, fmt.Errorf("could not create Azure client: %w", err)
|
return nil, fmt.Errorf("could not create Azure client: %w", err)
|
||||||
}
|
}
|
||||||
client.logger = d.logger
|
client.logger = d.logger
|
||||||
|
|
||||||
machines, err := client.getVMs(ctx, d.cfg.ResourceGroup)
|
machines, err := client.getVMs(ctx, d.cfg.ResourceGroup)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
failuresCount.Inc()
|
d.failuresCount.Inc()
|
||||||
return nil, fmt.Errorf("could not get virtual machines: %w", err)
|
return nil, fmt.Errorf("could not get virtual machines: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -344,14 +349,14 @@ func (d *Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) {
|
||||||
// Load the vms managed by scale sets.
|
// Load the vms managed by scale sets.
|
||||||
scaleSets, err := client.getScaleSets(ctx, d.cfg.ResourceGroup)
|
scaleSets, err := client.getScaleSets(ctx, d.cfg.ResourceGroup)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
failuresCount.Inc()
|
d.failuresCount.Inc()
|
||||||
return nil, fmt.Errorf("could not get virtual machine scale sets: %w", err)
|
return nil, fmt.Errorf("could not get virtual machine scale sets: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, scaleSet := range scaleSets {
|
for _, scaleSet := range scaleSets {
|
||||||
scaleSetVms, err := client.getScaleSetVMs(ctx, scaleSet)
|
scaleSetVms, err := client.getScaleSetVMs(ctx, scaleSet)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
failuresCount.Inc()
|
d.failuresCount.Inc()
|
||||||
return nil, fmt.Errorf("could not get virtual machine scale set vms: %w", err)
|
return nil, fmt.Errorf("could not get virtual machine scale set vms: %w", err)
|
||||||
}
|
}
|
||||||
machines = append(machines, scaleSetVms...)
|
machines = append(machines, scaleSetVms...)
|
||||||
|
@ -402,9 +407,13 @@ func (d *Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) {
|
||||||
var networkInterface *armnetwork.Interface
|
var networkInterface *armnetwork.Interface
|
||||||
if v, ok := d.getFromCache(nicID); ok {
|
if v, ok := d.getFromCache(nicID); ok {
|
||||||
networkInterface = v
|
networkInterface = v
|
||||||
cacheHitCount.Add(1)
|
d.cacheHitCount.Add(1)
|
||||||
} else {
|
} else {
|
||||||
networkInterface, err = client.getNetworkInterfaceByID(ctx, nicID)
|
if vm.ScaleSet == "" {
|
||||||
|
networkInterface, err = client.getVMNetworkInterfaceByID(ctx, nicID)
|
||||||
|
} else {
|
||||||
|
networkInterface, err = client.getVMScaleSetVMNetworkInterfaceByID(ctx, nicID, vm.ScaleSet, vm.InstanceID)
|
||||||
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if errors.Is(err, errorNotFound) {
|
if errors.Is(err, errorNotFound) {
|
||||||
level.Warn(d.logger).Log("msg", "Network interface does not exist", "name", nicID, "err", err)
|
level.Warn(d.logger).Log("msg", "Network interface does not exist", "name", nicID, "err", err)
|
||||||
|
@ -461,7 +470,7 @@ func (d *Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) {
|
||||||
var tg targetgroup.Group
|
var tg targetgroup.Group
|
||||||
for tgt := range ch {
|
for tgt := range ch {
|
||||||
if tgt.err != nil {
|
if tgt.err != nil {
|
||||||
failuresCount.Inc()
|
d.failuresCount.Inc()
|
||||||
return nil, fmt.Errorf("unable to complete Azure service discovery: %w", tgt.err)
|
return nil, fmt.Errorf("unable to complete Azure service discovery: %w", tgt.err)
|
||||||
}
|
}
|
||||||
if tgt.labelSet != nil {
|
if tgt.labelSet != nil {
|
||||||
|
@ -622,6 +631,7 @@ func mapFromVMScaleSetVM(vm armcompute.VirtualMachineScaleSetVM, scaleSetName st
|
||||||
Location: *(vm.Location),
|
Location: *(vm.Location),
|
||||||
OsType: osType,
|
OsType: osType,
|
||||||
ScaleSet: scaleSetName,
|
ScaleSet: scaleSetName,
|
||||||
|
InstanceID: *(vm.InstanceID),
|
||||||
Tags: tags,
|
Tags: tags,
|
||||||
NetworkInterfaces: networkInterfaces,
|
NetworkInterfaces: networkInterfaces,
|
||||||
Size: size,
|
Size: size,
|
||||||
|
@ -630,21 +640,41 @@ func mapFromVMScaleSetVM(vm armcompute.VirtualMachineScaleSetVM, scaleSetName st
|
||||||
|
|
||||||
var errorNotFound = errors.New("network interface does not exist")
|
var errorNotFound = errors.New("network interface does not exist")
|
||||||
|
|
||||||
// getNetworkInterfaceByID gets the network interface.
|
// getVMNetworkInterfaceByID gets the network interface.
|
||||||
// If a 404 is returned from the Azure API, `errorNotFound` is returned.
|
// If a 404 is returned from the Azure API, `errorNotFound` is returned.
|
||||||
func (client *azureClient) getNetworkInterfaceByID(ctx context.Context, networkInterfaceID string) (*armnetwork.Interface, error) {
|
func (client *azureClient) getVMNetworkInterfaceByID(ctx context.Context, networkInterfaceID string) (*armnetwork.Interface, error) {
|
||||||
r, err := newAzureResourceFromID(networkInterfaceID, client.logger)
|
r, err := newAzureResourceFromID(networkInterfaceID, client.logger)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("could not parse network interface ID: %w", err)
|
return nil, fmt.Errorf("could not parse network interface ID: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
resp, err := client.nic.Get(ctx, r.ResourceGroupName, r.Name, nil)
|
resp, err := client.nic.Get(ctx, r.ResourceGroupName, r.Name, &armnetwork.InterfacesClientGetOptions{Expand: to.Ptr("IPConfigurations/PublicIPAddress")})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
var responseError *azcore.ResponseError
|
var responseError *azcore.ResponseError
|
||||||
if errors.As(err, &responseError) && responseError.StatusCode == http.StatusNotFound {
|
if errors.As(err, &responseError) && responseError.StatusCode == http.StatusNotFound {
|
||||||
return nil, errorNotFound
|
return nil, errorNotFound
|
||||||
}
|
}
|
||||||
return nil, fmt.Errorf("Failed to retrieve Interface %v with error: %w", networkInterfaceID, err)
|
return nil, fmt.Errorf("failed to retrieve Interface %v with error: %w", networkInterfaceID, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return &resp.Interface, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// getVMScaleSetVMNetworkInterfaceByID gets the network interface.
|
||||||
|
// If a 404 is returned from the Azure API, `errorNotFound` is returned.
|
||||||
|
func (client *azureClient) getVMScaleSetVMNetworkInterfaceByID(ctx context.Context, networkInterfaceID, scaleSetName, instanceID string) (*armnetwork.Interface, error) {
|
||||||
|
r, err := newAzureResourceFromID(networkInterfaceID, client.logger)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("could not parse network interface ID: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
resp, err := client.nic.GetVirtualMachineScaleSetNetworkInterface(ctx, r.ResourceGroupName, scaleSetName, instanceID, r.Name, &armnetwork.InterfacesClientGetVirtualMachineScaleSetNetworkInterfaceOptions{Expand: to.Ptr("IPConfigurations/PublicIPAddress")})
|
||||||
|
if err != nil {
|
||||||
|
var responseError *azcore.ResponseError
|
||||||
|
if errors.As(err, &responseError) && responseError.StatusCode == http.StatusNotFound {
|
||||||
|
return nil, errorNotFound
|
||||||
|
}
|
||||||
|
return nil, fmt.Errorf("failed to retrieve Interface %v with error: %w", networkInterfaceID, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return &resp.Interface, nil
|
return &resp.Interface, nil
|
||||||
|
|
|
@ -17,7 +17,7 @@ import (
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/Azure/azure-sdk-for-go/sdk/azcore/arm"
|
"github.com/Azure/azure-sdk-for-go/sdk/azcore/arm"
|
||||||
"github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v4"
|
"github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
"go.uber.org/goleak"
|
"go.uber.org/goleak"
|
||||||
)
|
)
|
||||||
|
@ -142,6 +142,7 @@ func TestMapFromVMScaleSetVMWithEmptyTags(t *testing.T) {
|
||||||
vmSize := armcompute.VirtualMachineSizeTypes(size)
|
vmSize := armcompute.VirtualMachineSizeTypes(size)
|
||||||
osType := armcompute.OperatingSystemTypesLinux
|
osType := armcompute.OperatingSystemTypesLinux
|
||||||
vmType := "type"
|
vmType := "type"
|
||||||
|
instanceID := "123"
|
||||||
location := "westeurope"
|
location := "westeurope"
|
||||||
computerName := "computer_name"
|
computerName := "computer_name"
|
||||||
networkProfile := armcompute.NetworkProfile{
|
networkProfile := armcompute.NetworkProfile{
|
||||||
|
@ -166,6 +167,7 @@ func TestMapFromVMScaleSetVMWithEmptyTags(t *testing.T) {
|
||||||
ID: &id,
|
ID: &id,
|
||||||
Name: &name,
|
Name: &name,
|
||||||
Type: &vmType,
|
Type: &vmType,
|
||||||
|
InstanceID: &instanceID,
|
||||||
Location: &location,
|
Location: &location,
|
||||||
Tags: nil,
|
Tags: nil,
|
||||||
Properties: properties,
|
Properties: properties,
|
||||||
|
@ -182,6 +184,7 @@ func TestMapFromVMScaleSetVMWithEmptyTags(t *testing.T) {
|
||||||
Tags: map[string]*string{},
|
Tags: map[string]*string{},
|
||||||
NetworkInterfaces: []string{},
|
NetworkInterfaces: []string{},
|
||||||
ScaleSet: scaleSet,
|
ScaleSet: scaleSet,
|
||||||
|
InstanceID: instanceID,
|
||||||
Size: size,
|
Size: size,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -197,6 +200,7 @@ func TestMapFromVMScaleSetVMWithTags(t *testing.T) {
|
||||||
vmSize := armcompute.VirtualMachineSizeTypes(size)
|
vmSize := armcompute.VirtualMachineSizeTypes(size)
|
||||||
osType := armcompute.OperatingSystemTypesLinux
|
osType := armcompute.OperatingSystemTypesLinux
|
||||||
vmType := "type"
|
vmType := "type"
|
||||||
|
instanceID := "123"
|
||||||
location := "westeurope"
|
location := "westeurope"
|
||||||
computerName := "computer_name"
|
computerName := "computer_name"
|
||||||
tags := map[string]*string{
|
tags := map[string]*string{
|
||||||
|
@ -224,6 +228,7 @@ func TestMapFromVMScaleSetVMWithTags(t *testing.T) {
|
||||||
ID: &id,
|
ID: &id,
|
||||||
Name: &name,
|
Name: &name,
|
||||||
Type: &vmType,
|
Type: &vmType,
|
||||||
|
InstanceID: &instanceID,
|
||||||
Location: &location,
|
Location: &location,
|
||||||
Tags: tags,
|
Tags: tags,
|
||||||
Properties: properties,
|
Properties: properties,
|
||||||
|
@ -240,6 +245,7 @@ func TestMapFromVMScaleSetVMWithTags(t *testing.T) {
|
||||||
Tags: tags,
|
Tags: tags,
|
||||||
NetworkInterfaces: []string{},
|
NetworkInterfaces: []string{},
|
||||||
ScaleSet: scaleSet,
|
ScaleSet: scaleSet,
|
||||||
|
InstanceID: instanceID,
|
||||||
Size: size,
|
Size: size,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -269,7 +275,7 @@ func TestNewAzureResourceFromID(t *testing.T) {
|
||||||
},
|
},
|
||||||
} {
|
} {
|
||||||
actual, err := newAzureResourceFromID(tc.id, nil)
|
actual, err := newAzureResourceFromID(tc.id, nil)
|
||||||
require.Nil(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, tc.expected.Name, actual.Name)
|
require.Equal(t, tc.expected.Name, actual.Name)
|
||||||
require.Equal(t, tc.expected.ResourceGroupName, actual.ResourceGroupName)
|
require.Equal(t, tc.expected.ResourceGroupName, actual.ResourceGroupName)
|
||||||
}
|
}
|
||||||
|
|
|
@ -71,41 +71,18 @@ const (
|
||||||
namespace = "prometheus"
|
namespace = "prometheus"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
// DefaultSDConfig is the default Consul SD configuration.
|
||||||
rpcFailuresCount = prometheus.NewCounter(
|
var DefaultSDConfig = SDConfig{
|
||||||
prometheus.CounterOpts{
|
TagSeparator: ",",
|
||||||
Namespace: namespace,
|
Scheme: "http",
|
||||||
Name: "sd_consul_rpc_failures_total",
|
Server: "localhost:8500",
|
||||||
Help: "The number of Consul RPC call failures.",
|
AllowStale: true,
|
||||||
})
|
RefreshInterval: model.Duration(30 * time.Second),
|
||||||
rpcDuration = prometheus.NewSummaryVec(
|
HTTPClientConfig: config.DefaultHTTPClientConfig,
|
||||||
prometheus.SummaryOpts{
|
}
|
||||||
Namespace: namespace,
|
|
||||||
Name: "sd_consul_rpc_duration_seconds",
|
|
||||||
Help: "The duration of a Consul RPC call in seconds.",
|
|
||||||
Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001},
|
|
||||||
},
|
|
||||||
[]string{"endpoint", "call"},
|
|
||||||
)
|
|
||||||
|
|
||||||
// Initialize metric vectors.
|
|
||||||
servicesRPCDuration = rpcDuration.WithLabelValues("catalog", "services")
|
|
||||||
serviceRPCDuration = rpcDuration.WithLabelValues("catalog", "service")
|
|
||||||
|
|
||||||
// DefaultSDConfig is the default Consul SD configuration.
|
|
||||||
DefaultSDConfig = SDConfig{
|
|
||||||
TagSeparator: ",",
|
|
||||||
Scheme: "http",
|
|
||||||
Server: "localhost:8500",
|
|
||||||
AllowStale: true,
|
|
||||||
RefreshInterval: model.Duration(30 * time.Second),
|
|
||||||
HTTPClientConfig: config.DefaultHTTPClientConfig,
|
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
discovery.RegisterConfig(&SDConfig{})
|
discovery.RegisterConfig(&SDConfig{})
|
||||||
prometheus.MustRegister(rpcFailuresCount, rpcDuration)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// SDConfig is the configuration for Consul service discovery.
|
// SDConfig is the configuration for Consul service discovery.
|
||||||
|
@ -147,7 +124,7 @@ func (*SDConfig) Name() string { return "consul" }
|
||||||
|
|
||||||
// NewDiscoverer returns a Discoverer for the Config.
|
// NewDiscoverer returns a Discoverer for the Config.
|
||||||
func (c *SDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (discovery.Discoverer, error) {
|
func (c *SDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (discovery.Discoverer, error) {
|
||||||
return NewDiscovery(c, opts.Logger)
|
return NewDiscovery(c, opts.Logger, opts.Registerer)
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetDirectory joins any relative file paths with dir.
|
// SetDirectory joins any relative file paths with dir.
|
||||||
|
@ -184,22 +161,27 @@ func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
||||||
// Discovery retrieves target information from a Consul server
|
// Discovery retrieves target information from a Consul server
|
||||||
// and updates them via watches.
|
// and updates them via watches.
|
||||||
type Discovery struct {
|
type Discovery struct {
|
||||||
client *consul.Client
|
client *consul.Client
|
||||||
clientDatacenter string
|
clientDatacenter string
|
||||||
clientNamespace string
|
clientNamespace string
|
||||||
clientPartition string
|
clientPartition string
|
||||||
tagSeparator string
|
tagSeparator string
|
||||||
watchedServices []string // Set of services which will be discovered.
|
watchedServices []string // Set of services which will be discovered.
|
||||||
watchedTags []string // Tags used to filter instances of a service.
|
watchedTags []string // Tags used to filter instances of a service.
|
||||||
watchedNodeMeta map[string]string
|
watchedNodeMeta map[string]string
|
||||||
allowStale bool
|
allowStale bool
|
||||||
refreshInterval time.Duration
|
refreshInterval time.Duration
|
||||||
finalizer func()
|
finalizer func()
|
||||||
logger log.Logger
|
logger log.Logger
|
||||||
|
rpcFailuresCount prometheus.Counter
|
||||||
|
rpcDuration *prometheus.SummaryVec
|
||||||
|
servicesRPCDuration prometheus.Observer
|
||||||
|
serviceRPCDuration prometheus.Observer
|
||||||
|
metricRegisterer discovery.MetricRegisterer
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewDiscovery returns a new Discovery for the given config.
|
// NewDiscovery returns a new Discovery for the given config.
|
||||||
func NewDiscovery(conf *SDConfig, logger log.Logger) (*Discovery, error) {
|
func NewDiscovery(conf *SDConfig, logger log.Logger, reg prometheus.Registerer) (*Discovery, error) {
|
||||||
if logger == nil {
|
if logger == nil {
|
||||||
logger = log.NewNopLogger()
|
logger = log.NewNopLogger()
|
||||||
}
|
}
|
||||||
|
@ -237,7 +219,35 @@ func NewDiscovery(conf *SDConfig, logger log.Logger) (*Discovery, error) {
|
||||||
clientPartition: conf.Partition,
|
clientPartition: conf.Partition,
|
||||||
finalizer: wrapper.CloseIdleConnections,
|
finalizer: wrapper.CloseIdleConnections,
|
||||||
logger: logger,
|
logger: logger,
|
||||||
|
rpcFailuresCount: prometheus.NewCounter(
|
||||||
|
prometheus.CounterOpts{
|
||||||
|
Namespace: namespace,
|
||||||
|
Name: "sd_consul_rpc_failures_total",
|
||||||
|
Help: "The number of Consul RPC call failures.",
|
||||||
|
}),
|
||||||
|
rpcDuration: prometheus.NewSummaryVec(
|
||||||
|
prometheus.SummaryOpts{
|
||||||
|
Namespace: namespace,
|
||||||
|
Name: "sd_consul_rpc_duration_seconds",
|
||||||
|
Help: "The duration of a Consul RPC call in seconds.",
|
||||||
|
Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001},
|
||||||
|
},
|
||||||
|
[]string{"endpoint", "call"},
|
||||||
|
),
|
||||||
}
|
}
|
||||||
|
|
||||||
|
cd.metricRegisterer = discovery.NewMetricRegisterer(
|
||||||
|
reg,
|
||||||
|
[]prometheus.Collector{
|
||||||
|
cd.rpcFailuresCount,
|
||||||
|
cd.rpcDuration,
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
|
// Initialize metric vectors.
|
||||||
|
cd.servicesRPCDuration = cd.rpcDuration.WithLabelValues("catalog", "services")
|
||||||
|
cd.serviceRPCDuration = cd.rpcDuration.WithLabelValues("catalog", "service")
|
||||||
|
|
||||||
return cd, nil
|
return cd, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -293,7 +303,7 @@ func (d *Discovery) getDatacenter() error {
|
||||||
info, err := d.client.Agent().Self()
|
info, err := d.client.Agent().Self()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
level.Error(d.logger).Log("msg", "Error retrieving datacenter name", "err", err)
|
level.Error(d.logger).Log("msg", "Error retrieving datacenter name", "err", err)
|
||||||
rpcFailuresCount.Inc()
|
d.rpcFailuresCount.Inc()
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -334,6 +344,13 @@ func (d *Discovery) initialize(ctx context.Context) {
|
||||||
|
|
||||||
// Run implements the Discoverer interface.
|
// Run implements the Discoverer interface.
|
||||||
func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) {
|
func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) {
|
||||||
|
err := d.metricRegisterer.RegisterMetrics()
|
||||||
|
if err != nil {
|
||||||
|
level.Error(d.logger).Log("msg", "Unable to register metrics", "err", err.Error())
|
||||||
|
return
|
||||||
|
}
|
||||||
|
defer d.metricRegisterer.UnregisterMetrics()
|
||||||
|
|
||||||
if d.finalizer != nil {
|
if d.finalizer != nil {
|
||||||
defer d.finalizer()
|
defer d.finalizer()
|
||||||
}
|
}
|
||||||
|
@ -382,7 +399,7 @@ func (d *Discovery) watchServices(ctx context.Context, ch chan<- []*targetgroup.
|
||||||
t0 := time.Now()
|
t0 := time.Now()
|
||||||
srvs, meta, err := catalog.Services(opts.WithContext(ctx))
|
srvs, meta, err := catalog.Services(opts.WithContext(ctx))
|
||||||
elapsed := time.Since(t0)
|
elapsed := time.Since(t0)
|
||||||
servicesRPCDuration.Observe(elapsed.Seconds())
|
d.servicesRPCDuration.Observe(elapsed.Seconds())
|
||||||
|
|
||||||
// Check the context before in order to exit early.
|
// Check the context before in order to exit early.
|
||||||
select {
|
select {
|
||||||
|
@ -393,7 +410,7 @@ func (d *Discovery) watchServices(ctx context.Context, ch chan<- []*targetgroup.
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
level.Error(d.logger).Log("msg", "Error refreshing service list", "err", err)
|
level.Error(d.logger).Log("msg", "Error refreshing service list", "err", err)
|
||||||
rpcFailuresCount.Inc()
|
d.rpcFailuresCount.Inc()
|
||||||
time.Sleep(retryInterval)
|
time.Sleep(retryInterval)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@ -449,13 +466,15 @@ func (d *Discovery) watchServices(ctx context.Context, ch chan<- []*targetgroup.
|
||||||
|
|
||||||
// consulService contains data belonging to the same service.
|
// consulService contains data belonging to the same service.
|
||||||
type consulService struct {
|
type consulService struct {
|
||||||
name string
|
name string
|
||||||
tags []string
|
tags []string
|
||||||
labels model.LabelSet
|
labels model.LabelSet
|
||||||
discovery *Discovery
|
discovery *Discovery
|
||||||
client *consul.Client
|
client *consul.Client
|
||||||
tagSeparator string
|
tagSeparator string
|
||||||
logger log.Logger
|
logger log.Logger
|
||||||
|
rpcFailuresCount prometheus.Counter
|
||||||
|
serviceRPCDuration prometheus.Observer
|
||||||
}
|
}
|
||||||
|
|
||||||
// Start watching a service.
|
// Start watching a service.
|
||||||
|
@ -469,8 +488,10 @@ func (d *Discovery) watchService(ctx context.Context, ch chan<- []*targetgroup.G
|
||||||
serviceLabel: model.LabelValue(name),
|
serviceLabel: model.LabelValue(name),
|
||||||
datacenterLabel: model.LabelValue(d.clientDatacenter),
|
datacenterLabel: model.LabelValue(d.clientDatacenter),
|
||||||
},
|
},
|
||||||
tagSeparator: d.tagSeparator,
|
tagSeparator: d.tagSeparator,
|
||||||
logger: d.logger,
|
logger: d.logger,
|
||||||
|
rpcFailuresCount: d.rpcFailuresCount,
|
||||||
|
serviceRPCDuration: d.serviceRPCDuration,
|
||||||
}
|
}
|
||||||
|
|
||||||
go func() {
|
go func() {
|
||||||
|
@ -508,7 +529,7 @@ func (srv *consulService) watch(ctx context.Context, ch chan<- []*targetgroup.Gr
|
||||||
t0 := time.Now()
|
t0 := time.Now()
|
||||||
serviceNodes, meta, err := health.ServiceMultipleTags(srv.name, srv.tags, false, opts.WithContext(ctx))
|
serviceNodes, meta, err := health.ServiceMultipleTags(srv.name, srv.tags, false, opts.WithContext(ctx))
|
||||||
elapsed := time.Since(t0)
|
elapsed := time.Since(t0)
|
||||||
serviceRPCDuration.Observe(elapsed.Seconds())
|
srv.serviceRPCDuration.Observe(elapsed.Seconds())
|
||||||
|
|
||||||
// Check the context before in order to exit early.
|
// Check the context before in order to exit early.
|
||||||
select {
|
select {
|
||||||
|
@ -520,7 +541,7 @@ func (srv *consulService) watch(ctx context.Context, ch chan<- []*targetgroup.Gr
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
level.Error(srv.logger).Log("msg", "Error refreshing service", "service", srv.name, "tags", strings.Join(srv.tags, ","), "err", err)
|
level.Error(srv.logger).Log("msg", "Error refreshing service", "service", srv.name, "tags", strings.Join(srv.tags, ","), "err", err)
|
||||||
rpcFailuresCount.Inc()
|
srv.rpcFailuresCount.Inc()
|
||||||
time.Sleep(retryInterval)
|
time.Sleep(retryInterval)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
|
@ -22,6 +22,7 @@ import (
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/go-kit/log"
|
"github.com/go-kit/log"
|
||||||
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
"github.com/prometheus/common/config"
|
"github.com/prometheus/common/config"
|
||||||
"github.com/prometheus/common/model"
|
"github.com/prometheus/common/model"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
|
@ -39,7 +40,7 @@ func TestConfiguredService(t *testing.T) {
|
||||||
conf := &SDConfig{
|
conf := &SDConfig{
|
||||||
Services: []string{"configuredServiceName"},
|
Services: []string{"configuredServiceName"},
|
||||||
}
|
}
|
||||||
consulDiscovery, err := NewDiscovery(conf, nil)
|
consulDiscovery, err := NewDiscovery(conf, nil, prometheus.NewRegistry())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("Unexpected error when initializing discovery %v", err)
|
t.Errorf("Unexpected error when initializing discovery %v", err)
|
||||||
}
|
}
|
||||||
|
@ -56,7 +57,7 @@ func TestConfiguredServiceWithTag(t *testing.T) {
|
||||||
Services: []string{"configuredServiceName"},
|
Services: []string{"configuredServiceName"},
|
||||||
ServiceTags: []string{"http"},
|
ServiceTags: []string{"http"},
|
||||||
}
|
}
|
||||||
consulDiscovery, err := NewDiscovery(conf, nil)
|
consulDiscovery, err := NewDiscovery(conf, nil, prometheus.NewRegistry())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("Unexpected error when initializing discovery %v", err)
|
t.Errorf("Unexpected error when initializing discovery %v", err)
|
||||||
}
|
}
|
||||||
|
@ -151,7 +152,7 @@ func TestConfiguredServiceWithTags(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, tc := range cases {
|
for _, tc := range cases {
|
||||||
consulDiscovery, err := NewDiscovery(tc.conf, nil)
|
consulDiscovery, err := NewDiscovery(tc.conf, nil, prometheus.NewRegistry())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("Unexpected error when initializing discovery %v", err)
|
t.Errorf("Unexpected error when initializing discovery %v", err)
|
||||||
}
|
}
|
||||||
|
@ -165,7 +166,7 @@ func TestConfiguredServiceWithTags(t *testing.T) {
|
||||||
|
|
||||||
func TestNonConfiguredService(t *testing.T) {
|
func TestNonConfiguredService(t *testing.T) {
|
||||||
conf := &SDConfig{}
|
conf := &SDConfig{}
|
||||||
consulDiscovery, err := NewDiscovery(conf, nil)
|
consulDiscovery, err := NewDiscovery(conf, nil, prometheus.NewRegistry())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("Unexpected error when initializing discovery %v", err)
|
t.Errorf("Unexpected error when initializing discovery %v", err)
|
||||||
}
|
}
|
||||||
|
@ -262,19 +263,19 @@ func newServer(t *testing.T) (*httptest.Server, *SDConfig) {
|
||||||
|
|
||||||
func newDiscovery(t *testing.T, config *SDConfig) *Discovery {
|
func newDiscovery(t *testing.T, config *SDConfig) *Discovery {
|
||||||
logger := log.NewNopLogger()
|
logger := log.NewNopLogger()
|
||||||
d, err := NewDiscovery(config, logger)
|
d, err := NewDiscovery(config, logger, prometheus.NewRegistry())
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
return d
|
return d
|
||||||
}
|
}
|
||||||
|
|
||||||
func checkOneTarget(t *testing.T, tg []*targetgroup.Group) {
|
func checkOneTarget(t *testing.T, tg []*targetgroup.Group) {
|
||||||
require.Equal(t, 1, len(tg))
|
require.Len(t, tg, 1)
|
||||||
target := tg[0]
|
target := tg[0]
|
||||||
require.Equal(t, "test-dc", string(target.Labels["__meta_consul_dc"]))
|
require.Equal(t, "test-dc", string(target.Labels["__meta_consul_dc"]))
|
||||||
require.Equal(t, target.Source, string(target.Labels["__meta_consul_service"]))
|
require.Equal(t, target.Source, string(target.Labels["__meta_consul_service"]))
|
||||||
if target.Source == "test" {
|
if target.Source == "test" {
|
||||||
// test service should have one node.
|
// test service should have one node.
|
||||||
require.Greater(t, len(target.Targets), 0, "Test service should have one node")
|
require.NotEmpty(t, target.Targets, "Test service should have one node")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -313,7 +314,7 @@ func TestNoTargets(t *testing.T) {
|
||||||
}()
|
}()
|
||||||
|
|
||||||
targets := (<-ch)[0].Targets
|
targets := (<-ch)[0].Targets
|
||||||
require.Equal(t, 0, len(targets))
|
require.Empty(t, targets)
|
||||||
cancel()
|
cancel()
|
||||||
<-ch
|
<-ch
|
||||||
}
|
}
|
||||||
|
@ -484,7 +485,7 @@ oauth2:
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
require.Equal(t, config, test.expected)
|
require.Equal(t, test.expected, config)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -24,6 +24,7 @@ import (
|
||||||
|
|
||||||
"github.com/digitalocean/godo"
|
"github.com/digitalocean/godo"
|
||||||
"github.com/go-kit/log"
|
"github.com/go-kit/log"
|
||||||
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
"github.com/prometheus/common/config"
|
"github.com/prometheus/common/config"
|
||||||
"github.com/prometheus/common/model"
|
"github.com/prometheus/common/model"
|
||||||
"github.com/prometheus/common/version"
|
"github.com/prometheus/common/version"
|
||||||
|
@ -75,7 +76,7 @@ func (*SDConfig) Name() string { return "digitalocean" }
|
||||||
|
|
||||||
// NewDiscoverer returns a Discoverer for the Config.
|
// NewDiscoverer returns a Discoverer for the Config.
|
||||||
func (c *SDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (discovery.Discoverer, error) {
|
func (c *SDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (discovery.Discoverer, error) {
|
||||||
return NewDiscovery(c, opts.Logger)
|
return NewDiscovery(c, opts.Logger, opts.Registerer)
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetDirectory joins any relative file paths with dir.
|
// SetDirectory joins any relative file paths with dir.
|
||||||
|
@ -103,7 +104,7 @@ type Discovery struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewDiscovery returns a new Discovery which periodically refreshes its targets.
|
// NewDiscovery returns a new Discovery which periodically refreshes its targets.
|
||||||
func NewDiscovery(conf *SDConfig, logger log.Logger) (*Discovery, error) {
|
func NewDiscovery(conf *SDConfig, logger log.Logger, reg prometheus.Registerer) (*Discovery, error) {
|
||||||
d := &Discovery{
|
d := &Discovery{
|
||||||
port: conf.Port,
|
port: conf.Port,
|
||||||
}
|
}
|
||||||
|
@ -125,10 +126,13 @@ func NewDiscovery(conf *SDConfig, logger log.Logger) (*Discovery, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
d.Discovery = refresh.NewDiscovery(
|
d.Discovery = refresh.NewDiscovery(
|
||||||
logger,
|
refresh.Options{
|
||||||
"digitalocean",
|
Logger: logger,
|
||||||
time.Duration(conf.RefreshInterval),
|
Mech: "digitalocean",
|
||||||
d.refresh,
|
Interval: time.Duration(conf.RefreshInterval),
|
||||||
|
RefreshF: d.refresh,
|
||||||
|
Registry: reg,
|
||||||
|
},
|
||||||
)
|
)
|
||||||
return d, nil
|
return d, nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -20,6 +20,7 @@ import (
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/go-kit/log"
|
"github.com/go-kit/log"
|
||||||
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
"github.com/prometheus/common/model"
|
"github.com/prometheus/common/model"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
)
|
)
|
||||||
|
@ -46,7 +47,7 @@ func TestDigitalOceanSDRefresh(t *testing.T) {
|
||||||
|
|
||||||
cfg := DefaultSDConfig
|
cfg := DefaultSDConfig
|
||||||
cfg.HTTPClientConfig.BearerToken = tokenID
|
cfg.HTTPClientConfig.BearerToken = tokenID
|
||||||
d, err := NewDiscovery(&cfg, log.NewNopLogger())
|
d, err := NewDiscovery(&cfg, log.NewNopLogger(), prometheus.NewRegistry())
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
endpoint, err := url.Parse(sdmock.Mock.Endpoint())
|
endpoint, err := url.Parse(sdmock.Mock.Endpoint())
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
@ -56,12 +57,12 @@ func TestDigitalOceanSDRefresh(t *testing.T) {
|
||||||
tgs, err := d.refresh(ctx)
|
tgs, err := d.refresh(ctx)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
require.Equal(t, 1, len(tgs))
|
require.Len(t, tgs, 1)
|
||||||
|
|
||||||
tg := tgs[0]
|
tg := tgs[0]
|
||||||
require.NotNil(t, tg)
|
require.NotNil(t, tg)
|
||||||
require.NotNil(t, tg.Targets)
|
require.NotNil(t, tg.Targets)
|
||||||
require.Equal(t, 4, len(tg.Targets))
|
require.Len(t, tg.Targets, 4)
|
||||||
|
|
||||||
for i, lbls := range []model.LabelSet{
|
for i, lbls := range []model.LabelSet{
|
||||||
{
|
{
|
||||||
|
|
|
@ -18,6 +18,7 @@ import (
|
||||||
"reflect"
|
"reflect"
|
||||||
|
|
||||||
"github.com/go-kit/log"
|
"github.com/go-kit/log"
|
||||||
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
"github.com/prometheus/common/config"
|
"github.com/prometheus/common/config"
|
||||||
|
|
||||||
"github.com/prometheus/prometheus/discovery/targetgroup"
|
"github.com/prometheus/prometheus/discovery/targetgroup"
|
||||||
|
@ -42,6 +43,15 @@ type Discoverer interface {
|
||||||
type DiscovererOptions struct {
|
type DiscovererOptions struct {
|
||||||
Logger log.Logger
|
Logger log.Logger
|
||||||
|
|
||||||
|
// A registerer for the Discoverer's metrics.
|
||||||
|
// Some Discoverers may ignore this registerer and use the global one instead.
|
||||||
|
// For now this will work, because the Prometheus `main` function uses the global registry.
|
||||||
|
// However, in the future the Prometheus `main` function will be updated to not use the global registry.
|
||||||
|
// Hence, if a discoverer wants its metrics to be visible via the Prometheus executable's
|
||||||
|
// `/metrics` endpoint, it should use this explicit registerer.
|
||||||
|
// TODO(ptodev): Update this comment once the Prometheus `main` function does not use the global registry.
|
||||||
|
Registerer prometheus.Registerer
|
||||||
|
|
||||||
// Extra HTTP client options to expose to Discoverers. This field may be
|
// Extra HTTP client options to expose to Discoverers. This field may be
|
||||||
// ignored; Discoverer implementations must opt-in to reading it.
|
// ignored; Discoverer implementations must opt-in to reading it.
|
||||||
HTTPClientOptions []config.HTTPClientOption
|
HTTPClientOptions []config.HTTPClientOption
|
||||||
|
|
|
@ -42,35 +42,21 @@ const (
|
||||||
dnsSrvRecordPortLabel = dnsSrvRecordPrefix + "port"
|
dnsSrvRecordPortLabel = dnsSrvRecordPrefix + "port"
|
||||||
dnsMxRecordPrefix = model.MetaLabelPrefix + "dns_mx_record_"
|
dnsMxRecordPrefix = model.MetaLabelPrefix + "dns_mx_record_"
|
||||||
dnsMxRecordTargetLabel = dnsMxRecordPrefix + "target"
|
dnsMxRecordTargetLabel = dnsMxRecordPrefix + "target"
|
||||||
|
dnsNsRecordPrefix = model.MetaLabelPrefix + "dns_ns_record_"
|
||||||
|
dnsNsRecordTargetLabel = dnsNsRecordPrefix + "target"
|
||||||
|
|
||||||
// Constants for instrumentation.
|
// Constants for instrumentation.
|
||||||
namespace = "prometheus"
|
namespace = "prometheus"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
// DefaultSDConfig is the default DNS SD configuration.
|
||||||
dnsSDLookupsCount = prometheus.NewCounter(
|
var DefaultSDConfig = SDConfig{
|
||||||
prometheus.CounterOpts{
|
RefreshInterval: model.Duration(30 * time.Second),
|
||||||
Namespace: namespace,
|
Type: "SRV",
|
||||||
Name: "sd_dns_lookups_total",
|
}
|
||||||
Help: "The number of DNS-SD lookups.",
|
|
||||||
})
|
|
||||||
dnsSDLookupFailuresCount = prometheus.NewCounter(
|
|
||||||
prometheus.CounterOpts{
|
|
||||||
Namespace: namespace,
|
|
||||||
Name: "sd_dns_lookup_failures_total",
|
|
||||||
Help: "The number of DNS-SD lookup failures.",
|
|
||||||
})
|
|
||||||
|
|
||||||
// DefaultSDConfig is the default DNS SD configuration.
|
|
||||||
DefaultSDConfig = SDConfig{
|
|
||||||
RefreshInterval: model.Duration(30 * time.Second),
|
|
||||||
Type: "SRV",
|
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
discovery.RegisterConfig(&SDConfig{})
|
discovery.RegisterConfig(&SDConfig{})
|
||||||
prometheus.MustRegister(dnsSDLookupFailuresCount, dnsSDLookupsCount)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// SDConfig is the configuration for DNS based service discovery.
|
// SDConfig is the configuration for DNS based service discovery.
|
||||||
|
@ -86,7 +72,7 @@ func (*SDConfig) Name() string { return "dns" }
|
||||||
|
|
||||||
// NewDiscoverer returns a Discoverer for the Config.
|
// NewDiscoverer returns a Discoverer for the Config.
|
||||||
func (c *SDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (discovery.Discoverer, error) {
|
func (c *SDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (discovery.Discoverer, error) {
|
||||||
return NewDiscovery(*c, opts.Logger), nil
|
return NewDiscovery(*c, opts.Logger, opts.Registerer)
|
||||||
}
|
}
|
||||||
|
|
||||||
// UnmarshalYAML implements the yaml.Unmarshaler interface.
|
// UnmarshalYAML implements the yaml.Unmarshaler interface.
|
||||||
|
@ -102,7 +88,7 @@ func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
||||||
}
|
}
|
||||||
switch strings.ToUpper(c.Type) {
|
switch strings.ToUpper(c.Type) {
|
||||||
case "SRV":
|
case "SRV":
|
||||||
case "A", "AAAA", "MX":
|
case "A", "AAAA", "MX", "NS":
|
||||||
if c.Port == 0 {
|
if c.Port == 0 {
|
||||||
return errors.New("a port is required in DNS-SD configs for all record types except SRV")
|
return errors.New("a port is required in DNS-SD configs for all record types except SRV")
|
||||||
}
|
}
|
||||||
|
@ -116,16 +102,18 @@ func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
||||||
// the Discoverer interface.
|
// the Discoverer interface.
|
||||||
type Discovery struct {
|
type Discovery struct {
|
||||||
*refresh.Discovery
|
*refresh.Discovery
|
||||||
names []string
|
names []string
|
||||||
port int
|
port int
|
||||||
qtype uint16
|
qtype uint16
|
||||||
logger log.Logger
|
logger log.Logger
|
||||||
|
dnsSDLookupsCount prometheus.Counter
|
||||||
|
dnsSDLookupFailuresCount prometheus.Counter
|
||||||
|
|
||||||
lookupFn func(name string, qtype uint16, logger log.Logger) (*dns.Msg, error)
|
lookupFn func(name string, qtype uint16, logger log.Logger) (*dns.Msg, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewDiscovery returns a new Discovery which periodically refreshes its targets.
|
// NewDiscovery returns a new Discovery which periodically refreshes its targets.
|
||||||
func NewDiscovery(conf SDConfig, logger log.Logger) *Discovery {
|
func NewDiscovery(conf SDConfig, logger log.Logger, reg prometheus.Registerer) (*Discovery, error) {
|
||||||
if logger == nil {
|
if logger == nil {
|
||||||
logger = log.NewNopLogger()
|
logger = log.NewNopLogger()
|
||||||
}
|
}
|
||||||
|
@ -140,6 +128,8 @@ func NewDiscovery(conf SDConfig, logger log.Logger) *Discovery {
|
||||||
qtype = dns.TypeSRV
|
qtype = dns.TypeSRV
|
||||||
case "MX":
|
case "MX":
|
||||||
qtype = dns.TypeMX
|
qtype = dns.TypeMX
|
||||||
|
case "NS":
|
||||||
|
qtype = dns.TypeNS
|
||||||
}
|
}
|
||||||
d := &Discovery{
|
d := &Discovery{
|
||||||
names: conf.Names,
|
names: conf.Names,
|
||||||
|
@ -147,14 +137,32 @@ func NewDiscovery(conf SDConfig, logger log.Logger) *Discovery {
|
||||||
port: conf.Port,
|
port: conf.Port,
|
||||||
logger: logger,
|
logger: logger,
|
||||||
lookupFn: lookupWithSearchPath,
|
lookupFn: lookupWithSearchPath,
|
||||||
|
dnsSDLookupsCount: prometheus.NewCounter(
|
||||||
|
prometheus.CounterOpts{
|
||||||
|
Namespace: namespace,
|
||||||
|
Name: "sd_dns_lookups_total",
|
||||||
|
Help: "The number of DNS-SD lookups.",
|
||||||
|
}),
|
||||||
|
dnsSDLookupFailuresCount: prometheus.NewCounter(
|
||||||
|
prometheus.CounterOpts{
|
||||||
|
Namespace: namespace,
|
||||||
|
Name: "sd_dns_lookup_failures_total",
|
||||||
|
Help: "The number of DNS-SD lookup failures.",
|
||||||
|
}),
|
||||||
}
|
}
|
||||||
|
|
||||||
d.Discovery = refresh.NewDiscovery(
|
d.Discovery = refresh.NewDiscovery(
|
||||||
logger,
|
refresh.Options{
|
||||||
"dns",
|
Logger: logger,
|
||||||
time.Duration(conf.RefreshInterval),
|
Mech: "dns",
|
||||||
d.refresh,
|
Interval: time.Duration(conf.RefreshInterval),
|
||||||
|
RefreshF: d.refresh,
|
||||||
|
Registry: prometheus.NewRegistry(),
|
||||||
|
Metrics: []prometheus.Collector{d.dnsSDLookupsCount, d.dnsSDLookupFailuresCount},
|
||||||
|
},
|
||||||
)
|
)
|
||||||
return d
|
|
||||||
|
return d, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) {
|
func (d *Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) {
|
||||||
|
@ -187,9 +195,9 @@ func (d *Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) {
|
||||||
|
|
||||||
func (d *Discovery) refreshOne(ctx context.Context, name string, ch chan<- *targetgroup.Group) error {
|
func (d *Discovery) refreshOne(ctx context.Context, name string, ch chan<- *targetgroup.Group) error {
|
||||||
response, err := d.lookupFn(name, d.qtype, d.logger)
|
response, err := d.lookupFn(name, d.qtype, d.logger)
|
||||||
dnsSDLookupsCount.Inc()
|
d.dnsSDLookupsCount.Inc()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
dnsSDLookupFailuresCount.Inc()
|
d.dnsSDLookupFailuresCount.Inc()
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -199,7 +207,7 @@ func (d *Discovery) refreshOne(ctx context.Context, name string, ch chan<- *targ
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, record := range response.Answer {
|
for _, record := range response.Answer {
|
||||||
var target, dnsSrvRecordTarget, dnsSrvRecordPort, dnsMxRecordTarget model.LabelValue
|
var target, dnsSrvRecordTarget, dnsSrvRecordPort, dnsMxRecordTarget, dnsNsRecordTarget model.LabelValue
|
||||||
|
|
||||||
switch addr := record.(type) {
|
switch addr := record.(type) {
|
||||||
case *dns.SRV:
|
case *dns.SRV:
|
||||||
|
@ -217,6 +225,13 @@ func (d *Discovery) refreshOne(ctx context.Context, name string, ch chan<- *targ
|
||||||
addr.Mx = strings.TrimRight(addr.Mx, ".")
|
addr.Mx = strings.TrimRight(addr.Mx, ".")
|
||||||
|
|
||||||
target = hostPort(addr.Mx, d.port)
|
target = hostPort(addr.Mx, d.port)
|
||||||
|
case *dns.NS:
|
||||||
|
dnsNsRecordTarget = model.LabelValue(addr.Ns)
|
||||||
|
|
||||||
|
// Remove the final dot from rooted DNS names to make them look more usual.
|
||||||
|
addr.Ns = strings.TrimRight(addr.Ns, ".")
|
||||||
|
|
||||||
|
target = hostPort(addr.Ns, d.port)
|
||||||
case *dns.A:
|
case *dns.A:
|
||||||
target = hostPort(addr.A.String(), d.port)
|
target = hostPort(addr.A.String(), d.port)
|
||||||
case *dns.AAAA:
|
case *dns.AAAA:
|
||||||
|
@ -234,6 +249,7 @@ func (d *Discovery) refreshOne(ctx context.Context, name string, ch chan<- *targ
|
||||||
dnsSrvRecordTargetLabel: dnsSrvRecordTarget,
|
dnsSrvRecordTargetLabel: dnsSrvRecordTarget,
|
||||||
dnsSrvRecordPortLabel: dnsSrvRecordPort,
|
dnsSrvRecordPortLabel: dnsSrvRecordPort,
|
||||||
dnsMxRecordTargetLabel: dnsMxRecordTarget,
|
dnsMxRecordTargetLabel: dnsMxRecordTarget,
|
||||||
|
dnsNsRecordTargetLabel: dnsNsRecordTarget,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -22,6 +22,7 @@ import (
|
||||||
|
|
||||||
"github.com/go-kit/log"
|
"github.com/go-kit/log"
|
||||||
"github.com/miekg/dns"
|
"github.com/miekg/dns"
|
||||||
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
"github.com/prometheus/common/model"
|
"github.com/prometheus/common/model"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
"go.uber.org/goleak"
|
"go.uber.org/goleak"
|
||||||
|
@ -81,6 +82,7 @@ func TestDNS(t *testing.T) {
|
||||||
"__meta_dns_srv_record_target": "",
|
"__meta_dns_srv_record_target": "",
|
||||||
"__meta_dns_srv_record_port": "",
|
"__meta_dns_srv_record_port": "",
|
||||||
"__meta_dns_mx_record_target": "",
|
"__meta_dns_mx_record_target": "",
|
||||||
|
"__meta_dns_ns_record_target": "",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
@ -112,6 +114,7 @@ func TestDNS(t *testing.T) {
|
||||||
"__meta_dns_srv_record_target": "",
|
"__meta_dns_srv_record_target": "",
|
||||||
"__meta_dns_srv_record_port": "",
|
"__meta_dns_srv_record_port": "",
|
||||||
"__meta_dns_mx_record_target": "",
|
"__meta_dns_mx_record_target": "",
|
||||||
|
"__meta_dns_ns_record_target": "",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
@ -143,6 +146,7 @@ func TestDNS(t *testing.T) {
|
||||||
"__meta_dns_srv_record_target": "db1.example.com.",
|
"__meta_dns_srv_record_target": "db1.example.com.",
|
||||||
"__meta_dns_srv_record_port": "3306",
|
"__meta_dns_srv_record_port": "3306",
|
||||||
"__meta_dns_mx_record_target": "",
|
"__meta_dns_mx_record_target": "",
|
||||||
|
"__meta_dns_ns_record_target": "",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"__address__": "db2.example.com:3306",
|
"__address__": "db2.example.com:3306",
|
||||||
|
@ -150,6 +154,7 @@ func TestDNS(t *testing.T) {
|
||||||
"__meta_dns_srv_record_target": "db2.example.com.",
|
"__meta_dns_srv_record_target": "db2.example.com.",
|
||||||
"__meta_dns_srv_record_port": "3306",
|
"__meta_dns_srv_record_port": "3306",
|
||||||
"__meta_dns_mx_record_target": "",
|
"__meta_dns_mx_record_target": "",
|
||||||
|
"__meta_dns_ns_record_target": "",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
@ -180,6 +185,7 @@ func TestDNS(t *testing.T) {
|
||||||
"__meta_dns_srv_record_target": "db1.example.com.",
|
"__meta_dns_srv_record_target": "db1.example.com.",
|
||||||
"__meta_dns_srv_record_port": "3306",
|
"__meta_dns_srv_record_port": "3306",
|
||||||
"__meta_dns_mx_record_target": "",
|
"__meta_dns_mx_record_target": "",
|
||||||
|
"__meta_dns_ns_record_target": "",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
@ -227,6 +233,7 @@ func TestDNS(t *testing.T) {
|
||||||
"__meta_dns_srv_record_target": "",
|
"__meta_dns_srv_record_target": "",
|
||||||
"__meta_dns_srv_record_port": "",
|
"__meta_dns_srv_record_port": "",
|
||||||
"__meta_dns_mx_record_target": "smtp1.example.com.",
|
"__meta_dns_mx_record_target": "smtp1.example.com.",
|
||||||
|
"__meta_dns_ns_record_target": "",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"__address__": "smtp2.example.com:25",
|
"__address__": "smtp2.example.com:25",
|
||||||
|
@ -234,6 +241,7 @@ func TestDNS(t *testing.T) {
|
||||||
"__meta_dns_srv_record_target": "",
|
"__meta_dns_srv_record_target": "",
|
||||||
"__meta_dns_srv_record_port": "",
|
"__meta_dns_srv_record_port": "",
|
||||||
"__meta_dns_mx_record_target": "smtp2.example.com.",
|
"__meta_dns_mx_record_target": "smtp2.example.com.",
|
||||||
|
"__meta_dns_ns_record_target": "",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
@ -245,7 +253,8 @@ func TestDNS(t *testing.T) {
|
||||||
tc := tc
|
tc := tc
|
||||||
t.Run(tc.name, func(t *testing.T) {
|
t.Run(tc.name, func(t *testing.T) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
sd := NewDiscovery(tc.config, nil)
|
sd, err := NewDiscovery(tc.config, nil, prometheus.NewRegistry())
|
||||||
|
require.NoError(t, err)
|
||||||
sd.lookupFn = tc.lookup
|
sd.lookupFn = tc.lookup
|
||||||
|
|
||||||
tgs, err := sd.refresh(context.Background())
|
tgs, err := sd.refresh(context.Background())
|
||||||
|
|
|
@ -184,17 +184,17 @@ func TestFetchApps(t *testing.T) {
|
||||||
apps, err := fetchApps(context.TODO(), ts.URL, &http.Client{})
|
apps, err := fetchApps(context.TODO(), ts.URL, &http.Client{})
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
require.Equal(t, len(apps.Applications), 2)
|
require.Len(t, apps.Applications, 2)
|
||||||
require.Equal(t, apps.Applications[0].Name, "CONFIG-SERVICE")
|
require.Equal(t, "CONFIG-SERVICE", apps.Applications[0].Name)
|
||||||
require.Equal(t, apps.Applications[1].Name, "META-SERVICE")
|
require.Equal(t, "META-SERVICE", apps.Applications[1].Name)
|
||||||
|
|
||||||
require.Equal(t, len(apps.Applications[1].Instances), 2)
|
require.Len(t, apps.Applications[1].Instances, 2)
|
||||||
require.Equal(t, apps.Applications[1].Instances[0].InstanceID, "meta-service002.test.com:meta-service:8080")
|
require.Equal(t, "meta-service002.test.com:meta-service:8080", apps.Applications[1].Instances[0].InstanceID)
|
||||||
require.Equal(t, apps.Applications[1].Instances[0].Metadata.Items[0].XMLName.Local, "project")
|
require.Equal(t, "project", apps.Applications[1].Instances[0].Metadata.Items[0].XMLName.Local)
|
||||||
require.Equal(t, apps.Applications[1].Instances[0].Metadata.Items[0].Content, "meta-service")
|
require.Equal(t, "meta-service", apps.Applications[1].Instances[0].Metadata.Items[0].Content)
|
||||||
require.Equal(t, apps.Applications[1].Instances[0].Metadata.Items[1].XMLName.Local, "management.port")
|
require.Equal(t, "management.port", apps.Applications[1].Instances[0].Metadata.Items[1].XMLName.Local)
|
||||||
require.Equal(t, apps.Applications[1].Instances[0].Metadata.Items[1].Content, "8090")
|
require.Equal(t, "8090", apps.Applications[1].Instances[0].Metadata.Items[1].Content)
|
||||||
require.Equal(t, apps.Applications[1].Instances[1].InstanceID, "meta-service001.test.com:meta-service:8080")
|
require.Equal(t, "meta-service001.test.com:meta-service:8080", apps.Applications[1].Instances[1].InstanceID)
|
||||||
}
|
}
|
||||||
|
|
||||||
func Test500ErrorHttpResponse(t *testing.T) {
|
func Test500ErrorHttpResponse(t *testing.T) {
|
||||||
|
|
|
@ -23,6 +23,7 @@ import (
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/go-kit/log"
|
"github.com/go-kit/log"
|
||||||
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
"github.com/prometheus/common/config"
|
"github.com/prometheus/common/config"
|
||||||
"github.com/prometheus/common/model"
|
"github.com/prometheus/common/model"
|
||||||
|
|
||||||
|
@ -80,7 +81,7 @@ func (*SDConfig) Name() string { return "eureka" }
|
||||||
|
|
||||||
// NewDiscoverer returns a Discoverer for the Config.
|
// NewDiscoverer returns a Discoverer for the Config.
|
||||||
func (c *SDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (discovery.Discoverer, error) {
|
func (c *SDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (discovery.Discoverer, error) {
|
||||||
return NewDiscovery(c, opts.Logger)
|
return NewDiscovery(c, opts.Logger, opts.Registerer)
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetDirectory joins any relative file paths with dir.
|
// SetDirectory joins any relative file paths with dir.
|
||||||
|
@ -117,7 +118,7 @@ type Discovery struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewDiscovery creates a new Eureka discovery for the given role.
|
// NewDiscovery creates a new Eureka discovery for the given role.
|
||||||
func NewDiscovery(conf *SDConfig, logger log.Logger) (*Discovery, error) {
|
func NewDiscovery(conf *SDConfig, logger log.Logger, reg prometheus.Registerer) (*Discovery, error) {
|
||||||
rt, err := config.NewRoundTripperFromConfig(conf.HTTPClientConfig, "eureka_sd")
|
rt, err := config.NewRoundTripperFromConfig(conf.HTTPClientConfig, "eureka_sd")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
@ -128,10 +129,13 @@ func NewDiscovery(conf *SDConfig, logger log.Logger) (*Discovery, error) {
|
||||||
server: conf.Server,
|
server: conf.Server,
|
||||||
}
|
}
|
||||||
d.Discovery = refresh.NewDiscovery(
|
d.Discovery = refresh.NewDiscovery(
|
||||||
logger,
|
refresh.Options{
|
||||||
"eureka",
|
Logger: logger,
|
||||||
time.Duration(conf.RefreshInterval),
|
Mech: "eureka",
|
||||||
d.refresh,
|
Interval: time.Duration(conf.RefreshInterval),
|
||||||
|
RefreshF: d.refresh,
|
||||||
|
Registry: reg,
|
||||||
|
},
|
||||||
)
|
)
|
||||||
return d, nil
|
return d, nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -20,6 +20,7 @@ import (
|
||||||
"net/http/httptest"
|
"net/http/httptest"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
"github.com/prometheus/common/model"
|
"github.com/prometheus/common/model"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
|
|
||||||
|
@ -35,7 +36,7 @@ func testUpdateServices(respHandler http.HandlerFunc) ([]*targetgroup.Group, err
|
||||||
Server: ts.URL,
|
Server: ts.URL,
|
||||||
}
|
}
|
||||||
|
|
||||||
md, err := NewDiscovery(&conf, nil)
|
md, err := NewDiscovery(&conf, nil, prometheus.NewRegistry())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -55,7 +56,7 @@ func TestEurekaSDHandleError(t *testing.T) {
|
||||||
tgs, err := testUpdateServices(respHandler)
|
tgs, err := testUpdateServices(respHandler)
|
||||||
|
|
||||||
require.EqualError(t, err, errTesting)
|
require.EqualError(t, err, errTesting)
|
||||||
require.Equal(t, len(tgs), 0)
|
require.Empty(t, tgs)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestEurekaSDEmptyList(t *testing.T) {
|
func TestEurekaSDEmptyList(t *testing.T) {
|
||||||
|
@ -72,7 +73,7 @@ func TestEurekaSDEmptyList(t *testing.T) {
|
||||||
)
|
)
|
||||||
tgs, err := testUpdateServices(respHandler)
|
tgs, err := testUpdateServices(respHandler)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, len(tgs), 1)
|
require.Len(t, tgs, 1)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestEurekaSDSendGroup(t *testing.T) {
|
func TestEurekaSDSendGroup(t *testing.T) {
|
||||||
|
@ -232,11 +233,11 @@ func TestEurekaSDSendGroup(t *testing.T) {
|
||||||
|
|
||||||
tgs, err := testUpdateServices(respHandler)
|
tgs, err := testUpdateServices(respHandler)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, len(tgs), 1)
|
require.Len(t, tgs, 1)
|
||||||
|
|
||||||
tg := tgs[0]
|
tg := tgs[0]
|
||||||
require.Equal(t, tg.Source, "eureka")
|
require.Equal(t, "eureka", tg.Source)
|
||||||
require.Equal(t, len(tg.Targets), 4)
|
require.Len(t, tg.Targets, 4)
|
||||||
|
|
||||||
tgt := tg.Targets[0]
|
tgt := tg.Targets[0]
|
||||||
require.Equal(t, tgt[model.AddressLabel], model.LabelValue("config-service001.test.com:8080"))
|
require.Equal(t, tgt[model.AddressLabel], model.LabelValue("config-service001.test.com:8080"))
|
||||||
|
|
|
@ -39,24 +39,6 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
fileSDReadErrorsCount = prometheus.NewCounter(
|
|
||||||
prometheus.CounterOpts{
|
|
||||||
Name: "prometheus_sd_file_read_errors_total",
|
|
||||||
Help: "The number of File-SD read errors.",
|
|
||||||
})
|
|
||||||
fileSDScanDuration = prometheus.NewSummary(
|
|
||||||
prometheus.SummaryOpts{
|
|
||||||
Name: "prometheus_sd_file_scan_duration_seconds",
|
|
||||||
Help: "The duration of the File-SD scan in seconds.",
|
|
||||||
Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001},
|
|
||||||
})
|
|
||||||
fileSDTimeStamp = NewTimestampCollector()
|
|
||||||
fileWatcherErrorsCount = prometheus.NewCounter(
|
|
||||||
prometheus.CounterOpts{
|
|
||||||
Name: "prometheus_sd_file_watcher_errors_total",
|
|
||||||
Help: "The number of File-SD errors caused by filesystem watch failures.",
|
|
||||||
})
|
|
||||||
|
|
||||||
patFileSDName = regexp.MustCompile(`^[^*]*(\*[^/]*)?\.(json|yml|yaml|JSON|YML|YAML)$`)
|
patFileSDName = regexp.MustCompile(`^[^*]*(\*[^/]*)?\.(json|yml|yaml|JSON|YML|YAML)$`)
|
||||||
|
|
||||||
// DefaultSDConfig is the default file SD configuration.
|
// DefaultSDConfig is the default file SD configuration.
|
||||||
|
@ -67,7 +49,6 @@ var (
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
discovery.RegisterConfig(&SDConfig{})
|
discovery.RegisterConfig(&SDConfig{})
|
||||||
prometheus.MustRegister(fileSDReadErrorsCount, fileSDScanDuration, fileSDTimeStamp, fileWatcherErrorsCount)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// SDConfig is the configuration for file based discovery.
|
// SDConfig is the configuration for file based discovery.
|
||||||
|
@ -81,7 +62,7 @@ func (*SDConfig) Name() string { return "file" }
|
||||||
|
|
||||||
// NewDiscoverer returns a Discoverer for the Config.
|
// NewDiscoverer returns a Discoverer for the Config.
|
||||||
func (c *SDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (discovery.Discoverer, error) {
|
func (c *SDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (discovery.Discoverer, error) {
|
||||||
return NewDiscovery(c, opts.Logger), nil
|
return NewDiscovery(c, opts.Logger, opts.Registerer)
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetDirectory joins any relative file paths with dir.
|
// SetDirectory joins any relative file paths with dir.
|
||||||
|
@ -187,10 +168,17 @@ type Discovery struct {
|
||||||
// This is used to detect deleted target groups.
|
// This is used to detect deleted target groups.
|
||||||
lastRefresh map[string]int
|
lastRefresh map[string]int
|
||||||
logger log.Logger
|
logger log.Logger
|
||||||
|
|
||||||
|
fileSDReadErrorsCount prometheus.Counter
|
||||||
|
fileSDScanDuration prometheus.Summary
|
||||||
|
fileWatcherErrorsCount prometheus.Counter
|
||||||
|
fileSDTimeStamp *TimestampCollector
|
||||||
|
|
||||||
|
metricRegisterer discovery.MetricRegisterer
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewDiscovery returns a new file discovery for the given paths.
|
// NewDiscovery returns a new file discovery for the given paths.
|
||||||
func NewDiscovery(conf *SDConfig, logger log.Logger) *Discovery {
|
func NewDiscovery(conf *SDConfig, logger log.Logger, reg prometheus.Registerer) (*Discovery, error) {
|
||||||
if logger == nil {
|
if logger == nil {
|
||||||
logger = log.NewNopLogger()
|
logger = log.NewNopLogger()
|
||||||
}
|
}
|
||||||
|
@ -200,9 +188,35 @@ func NewDiscovery(conf *SDConfig, logger log.Logger) *Discovery {
|
||||||
interval: time.Duration(conf.RefreshInterval),
|
interval: time.Duration(conf.RefreshInterval),
|
||||||
timestamps: make(map[string]float64),
|
timestamps: make(map[string]float64),
|
||||||
logger: logger,
|
logger: logger,
|
||||||
|
fileSDReadErrorsCount: prometheus.NewCounter(
|
||||||
|
prometheus.CounterOpts{
|
||||||
|
Name: "prometheus_sd_file_read_errors_total",
|
||||||
|
Help: "The number of File-SD read errors.",
|
||||||
|
}),
|
||||||
|
fileSDScanDuration: prometheus.NewSummary(
|
||||||
|
prometheus.SummaryOpts{
|
||||||
|
Name: "prometheus_sd_file_scan_duration_seconds",
|
||||||
|
Help: "The duration of the File-SD scan in seconds.",
|
||||||
|
Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001},
|
||||||
|
}),
|
||||||
|
fileWatcherErrorsCount: prometheus.NewCounter(
|
||||||
|
prometheus.CounterOpts{
|
||||||
|
Name: "prometheus_sd_file_watcher_errors_total",
|
||||||
|
Help: "The number of File-SD errors caused by filesystem watch failures.",
|
||||||
|
}),
|
||||||
|
fileSDTimeStamp: NewTimestampCollector(),
|
||||||
}
|
}
|
||||||
fileSDTimeStamp.addDiscoverer(disc)
|
|
||||||
return disc
|
disc.fileSDTimeStamp.addDiscoverer(disc)
|
||||||
|
|
||||||
|
disc.metricRegisterer = discovery.NewMetricRegisterer(reg, []prometheus.Collector{
|
||||||
|
disc.fileSDReadErrorsCount,
|
||||||
|
disc.fileSDScanDuration,
|
||||||
|
disc.fileWatcherErrorsCount,
|
||||||
|
disc.fileSDTimeStamp,
|
||||||
|
})
|
||||||
|
|
||||||
|
return disc, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// listFiles returns a list of all files that match the configured patterns.
|
// listFiles returns a list of all files that match the configured patterns.
|
||||||
|
@ -239,10 +253,17 @@ func (d *Discovery) watchFiles() {
|
||||||
|
|
||||||
// Run implements the Discoverer interface.
|
// Run implements the Discoverer interface.
|
||||||
func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) {
|
func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) {
|
||||||
|
err := d.metricRegisterer.RegisterMetrics()
|
||||||
|
if err != nil {
|
||||||
|
level.Error(d.logger).Log("msg", "Unable to register metrics", "err", err.Error())
|
||||||
|
return
|
||||||
|
}
|
||||||
|
defer d.metricRegisterer.UnregisterMetrics()
|
||||||
|
|
||||||
watcher, err := fsnotify.NewWatcher()
|
watcher, err := fsnotify.NewWatcher()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
level.Error(d.logger).Log("msg", "Error adding file watcher", "err", err)
|
level.Error(d.logger).Log("msg", "Error adding file watcher", "err", err)
|
||||||
fileWatcherErrorsCount.Inc()
|
d.fileWatcherErrorsCount.Inc()
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
d.watcher = watcher
|
d.watcher = watcher
|
||||||
|
@ -306,7 +327,7 @@ func (d *Discovery) stop() {
|
||||||
done := make(chan struct{})
|
done := make(chan struct{})
|
||||||
defer close(done)
|
defer close(done)
|
||||||
|
|
||||||
fileSDTimeStamp.removeDiscoverer(d)
|
d.fileSDTimeStamp.removeDiscoverer(d)
|
||||||
|
|
||||||
// Closing the watcher will deadlock unless all events and errors are drained.
|
// Closing the watcher will deadlock unless all events and errors are drained.
|
||||||
go func() {
|
go func() {
|
||||||
|
@ -332,13 +353,13 @@ func (d *Discovery) stop() {
|
||||||
func (d *Discovery) refresh(ctx context.Context, ch chan<- []*targetgroup.Group) {
|
func (d *Discovery) refresh(ctx context.Context, ch chan<- []*targetgroup.Group) {
|
||||||
t0 := time.Now()
|
t0 := time.Now()
|
||||||
defer func() {
|
defer func() {
|
||||||
fileSDScanDuration.Observe(time.Since(t0).Seconds())
|
d.fileSDScanDuration.Observe(time.Since(t0).Seconds())
|
||||||
}()
|
}()
|
||||||
ref := map[string]int{}
|
ref := map[string]int{}
|
||||||
for _, p := range d.listFiles() {
|
for _, p := range d.listFiles() {
|
||||||
tgroups, err := d.readFile(p)
|
tgroups, err := d.readFile(p)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fileSDReadErrorsCount.Inc()
|
d.fileSDReadErrorsCount.Inc()
|
||||||
|
|
||||||
level.Error(d.logger).Log("msg", "Error reading file", "path", p, "err", err)
|
level.Error(d.logger).Log("msg", "Error reading file", "path", p, "err", err)
|
||||||
// Prevent deletion down below.
|
// Prevent deletion down below.
|
||||||
|
|
|
@ -24,6 +24,7 @@ import (
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
"github.com/prometheus/common/model"
|
"github.com/prometheus/common/model"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
"go.uber.org/goleak"
|
"go.uber.org/goleak"
|
||||||
|
@ -143,7 +144,7 @@ func (t *testRunner) run(files ...string) {
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
t.cancelSD = cancel
|
t.cancelSD = cancel
|
||||||
go func() {
|
go func() {
|
||||||
NewDiscovery(
|
d, err := NewDiscovery(
|
||||||
&SDConfig{
|
&SDConfig{
|
||||||
Files: files,
|
Files: files,
|
||||||
// Setting a high refresh interval to make sure that the tests only
|
// Setting a high refresh interval to make sure that the tests only
|
||||||
|
@ -151,7 +152,11 @@ func (t *testRunner) run(files ...string) {
|
||||||
RefreshInterval: model.Duration(1 * time.Hour),
|
RefreshInterval: model.Duration(1 * time.Hour),
|
||||||
},
|
},
|
||||||
nil,
|
nil,
|
||||||
).Run(ctx, t.ch)
|
prometheus.NewRegistry(),
|
||||||
|
)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
d.Run(ctx, t.ch)
|
||||||
}()
|
}()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -23,6 +23,7 @@ import (
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/go-kit/log"
|
"github.com/go-kit/log"
|
||||||
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
"github.com/prometheus/common/model"
|
"github.com/prometheus/common/model"
|
||||||
"golang.org/x/oauth2/google"
|
"golang.org/x/oauth2/google"
|
||||||
"google.golang.org/api/compute/v1"
|
"google.golang.org/api/compute/v1"
|
||||||
|
@ -86,7 +87,7 @@ func (*SDConfig) Name() string { return "gce" }
|
||||||
|
|
||||||
// NewDiscoverer returns a Discoverer for the Config.
|
// NewDiscoverer returns a Discoverer for the Config.
|
||||||
func (c *SDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (discovery.Discoverer, error) {
|
func (c *SDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (discovery.Discoverer, error) {
|
||||||
return NewDiscovery(*c, opts.Logger)
|
return NewDiscovery(*c, opts.Logger, opts.Registerer)
|
||||||
}
|
}
|
||||||
|
|
||||||
// UnmarshalYAML implements the yaml.Unmarshaler interface.
|
// UnmarshalYAML implements the yaml.Unmarshaler interface.
|
||||||
|
@ -121,7 +122,7 @@ type Discovery struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewDiscovery returns a new Discovery which periodically refreshes its targets.
|
// NewDiscovery returns a new Discovery which periodically refreshes its targets.
|
||||||
func NewDiscovery(conf SDConfig, logger log.Logger) (*Discovery, error) {
|
func NewDiscovery(conf SDConfig, logger log.Logger, reg prometheus.Registerer) (*Discovery, error) {
|
||||||
d := &Discovery{
|
d := &Discovery{
|
||||||
project: conf.Project,
|
project: conf.Project,
|
||||||
zone: conf.Zone,
|
zone: conf.Zone,
|
||||||
|
@ -141,10 +142,13 @@ func NewDiscovery(conf SDConfig, logger log.Logger) (*Discovery, error) {
|
||||||
d.isvc = compute.NewInstancesService(d.svc)
|
d.isvc = compute.NewInstancesService(d.svc)
|
||||||
|
|
||||||
d.Discovery = refresh.NewDiscovery(
|
d.Discovery = refresh.NewDiscovery(
|
||||||
logger,
|
refresh.Options{
|
||||||
"gce",
|
Logger: logger,
|
||||||
time.Duration(conf.RefreshInterval),
|
Mech: "gce",
|
||||||
d.refresh,
|
Interval: time.Duration(conf.RefreshInterval),
|
||||||
|
RefreshF: d.refresh,
|
||||||
|
Registry: reg,
|
||||||
|
},
|
||||||
)
|
)
|
||||||
return d, nil
|
return d, nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -48,12 +48,12 @@ func TestHCloudSDRefresh(t *testing.T) {
|
||||||
|
|
||||||
targetGroups, err := d.refresh(context.Background())
|
targetGroups, err := d.refresh(context.Background())
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, 1, len(targetGroups))
|
require.Len(t, targetGroups, 1)
|
||||||
|
|
||||||
targetGroup := targetGroups[0]
|
targetGroup := targetGroups[0]
|
||||||
require.NotNil(t, targetGroup, "targetGroup should not be nil")
|
require.NotNil(t, targetGroup, "targetGroup should not be nil")
|
||||||
require.NotNil(t, targetGroup.Targets, "targetGroup.targets should not be nil")
|
require.NotNil(t, targetGroup.Targets, "targetGroup.targets should not be nil")
|
||||||
require.Equal(t, 3, len(targetGroup.Targets))
|
require.Len(t, targetGroup.Targets, 3)
|
||||||
|
|
||||||
for i, labelSet := range []model.LabelSet{
|
for i, labelSet := range []model.LabelSet{
|
||||||
{
|
{
|
||||||
|
|
|
@ -21,6 +21,7 @@ import (
|
||||||
|
|
||||||
"github.com/go-kit/log"
|
"github.com/go-kit/log"
|
||||||
"github.com/hetznercloud/hcloud-go/v2/hcloud"
|
"github.com/hetznercloud/hcloud-go/v2/hcloud"
|
||||||
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
"github.com/prometheus/common/config"
|
"github.com/prometheus/common/config"
|
||||||
"github.com/prometheus/common/model"
|
"github.com/prometheus/common/model"
|
||||||
|
|
||||||
|
@ -67,7 +68,7 @@ func (*SDConfig) Name() string { return "hetzner" }
|
||||||
|
|
||||||
// NewDiscoverer returns a Discoverer for the Config.
|
// NewDiscoverer returns a Discoverer for the Config.
|
||||||
func (c *SDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (discovery.Discoverer, error) {
|
func (c *SDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (discovery.Discoverer, error) {
|
||||||
return NewDiscovery(c, opts.Logger)
|
return NewDiscovery(c, opts.Logger, opts.Registerer)
|
||||||
}
|
}
|
||||||
|
|
||||||
type refresher interface {
|
type refresher interface {
|
||||||
|
@ -127,17 +128,20 @@ type Discovery struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewDiscovery returns a new Discovery which periodically refreshes its targets.
|
// NewDiscovery returns a new Discovery which periodically refreshes its targets.
|
||||||
func NewDiscovery(conf *SDConfig, logger log.Logger) (*refresh.Discovery, error) {
|
func NewDiscovery(conf *SDConfig, logger log.Logger, reg prometheus.Registerer) (*refresh.Discovery, error) {
|
||||||
r, err := newRefresher(conf, logger)
|
r, err := newRefresher(conf, logger)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
return refresh.NewDiscovery(
|
return refresh.NewDiscovery(
|
||||||
logger,
|
refresh.Options{
|
||||||
"hetzner",
|
Logger: logger,
|
||||||
time.Duration(conf.RefreshInterval),
|
Mech: "hetzner",
|
||||||
r.refresh,
|
Interval: time.Duration(conf.RefreshInterval),
|
||||||
|
RefreshF: r.refresh,
|
||||||
|
Registry: reg,
|
||||||
|
},
|
||||||
), nil
|
), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -47,12 +47,12 @@ func TestRobotSDRefresh(t *testing.T) {
|
||||||
|
|
||||||
targetGroups, err := d.refresh(context.Background())
|
targetGroups, err := d.refresh(context.Background())
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, 1, len(targetGroups))
|
require.Len(t, targetGroups, 1)
|
||||||
|
|
||||||
targetGroup := targetGroups[0]
|
targetGroup := targetGroups[0]
|
||||||
require.NotNil(t, targetGroup, "targetGroup should not be nil")
|
require.NotNil(t, targetGroup, "targetGroup should not be nil")
|
||||||
require.NotNil(t, targetGroup.Targets, "targetGroup.targets should not be nil")
|
require.NotNil(t, targetGroup.Targets, "targetGroup.targets should not be nil")
|
||||||
require.Equal(t, 2, len(targetGroup.Targets))
|
require.Len(t, targetGroup.Targets, 2)
|
||||||
|
|
||||||
for i, labelSet := range []model.LabelSet{
|
for i, labelSet := range []model.LabelSet{
|
||||||
{
|
{
|
||||||
|
@ -98,5 +98,5 @@ func TestRobotSDRefreshHandleError(t *testing.T) {
|
||||||
require.Error(t, err)
|
require.Error(t, err)
|
||||||
require.Equal(t, "non 2xx status '401' response during hetzner service discovery with role robot", err.Error())
|
require.Equal(t, "non 2xx status '401' response during hetzner service discovery with role robot", err.Error())
|
||||||
|
|
||||||
require.Equal(t, 0, len(targetGroups))
|
require.Empty(t, targetGroups)
|
||||||
}
|
}
|
||||||
|
|
|
@ -45,17 +45,10 @@ var (
|
||||||
}
|
}
|
||||||
userAgent = fmt.Sprintf("Prometheus/%s", version.Version)
|
userAgent = fmt.Sprintf("Prometheus/%s", version.Version)
|
||||||
matchContentType = regexp.MustCompile(`^(?i:application\/json(;\s*charset=("utf-8"|utf-8))?)$`)
|
matchContentType = regexp.MustCompile(`^(?i:application\/json(;\s*charset=("utf-8"|utf-8))?)$`)
|
||||||
|
|
||||||
failuresCount = prometheus.NewCounter(
|
|
||||||
prometheus.CounterOpts{
|
|
||||||
Name: "prometheus_sd_http_failures_total",
|
|
||||||
Help: "Number of HTTP service discovery refresh failures.",
|
|
||||||
})
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
discovery.RegisterConfig(&SDConfig{})
|
discovery.RegisterConfig(&SDConfig{})
|
||||||
prometheus.MustRegister(failuresCount)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// SDConfig is the configuration for HTTP based discovery.
|
// SDConfig is the configuration for HTTP based discovery.
|
||||||
|
@ -70,7 +63,7 @@ func (*SDConfig) Name() string { return "http" }
|
||||||
|
|
||||||
// NewDiscoverer returns a Discoverer for the Config.
|
// NewDiscoverer returns a Discoverer for the Config.
|
||||||
func (c *SDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (discovery.Discoverer, error) {
|
func (c *SDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (discovery.Discoverer, error) {
|
||||||
return NewDiscovery(c, opts.Logger, opts.HTTPClientOptions)
|
return NewDiscovery(c, opts.Logger, opts.HTTPClientOptions, opts.Registerer)
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetDirectory joins any relative file paths with dir.
|
// SetDirectory joins any relative file paths with dir.
|
||||||
|
@ -112,10 +105,11 @@ type Discovery struct {
|
||||||
client *http.Client
|
client *http.Client
|
||||||
refreshInterval time.Duration
|
refreshInterval time.Duration
|
||||||
tgLastLength int
|
tgLastLength int
|
||||||
|
failuresCount prometheus.Counter
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewDiscovery returns a new HTTP discovery for the given config.
|
// NewDiscovery returns a new HTTP discovery for the given config.
|
||||||
func NewDiscovery(conf *SDConfig, logger log.Logger, clientOpts []config.HTTPClientOption) (*Discovery, error) {
|
func NewDiscovery(conf *SDConfig, logger log.Logger, clientOpts []config.HTTPClientOption, reg prometheus.Registerer) (*Discovery, error) {
|
||||||
if logger == nil {
|
if logger == nil {
|
||||||
logger = log.NewNopLogger()
|
logger = log.NewNopLogger()
|
||||||
}
|
}
|
||||||
|
@ -130,13 +124,22 @@ func NewDiscovery(conf *SDConfig, logger log.Logger, clientOpts []config.HTTPCli
|
||||||
url: conf.URL,
|
url: conf.URL,
|
||||||
client: client,
|
client: client,
|
||||||
refreshInterval: time.Duration(conf.RefreshInterval), // Stored to be sent as headers.
|
refreshInterval: time.Duration(conf.RefreshInterval), // Stored to be sent as headers.
|
||||||
|
failuresCount: prometheus.NewCounter(
|
||||||
|
prometheus.CounterOpts{
|
||||||
|
Name: "prometheus_sd_http_failures_total",
|
||||||
|
Help: "Number of HTTP service discovery refresh failures.",
|
||||||
|
}),
|
||||||
}
|
}
|
||||||
|
|
||||||
d.Discovery = refresh.NewDiscovery(
|
d.Discovery = refresh.NewDiscovery(
|
||||||
logger,
|
refresh.Options{
|
||||||
"http",
|
Logger: logger,
|
||||||
time.Duration(conf.RefreshInterval),
|
Mech: "http",
|
||||||
d.Refresh,
|
Interval: time.Duration(conf.RefreshInterval),
|
||||||
|
RefreshF: d.Refresh,
|
||||||
|
Registry: reg,
|
||||||
|
Metrics: []prometheus.Collector{d.failuresCount},
|
||||||
|
},
|
||||||
)
|
)
|
||||||
return d, nil
|
return d, nil
|
||||||
}
|
}
|
||||||
|
@ -152,7 +155,7 @@ func (d *Discovery) Refresh(ctx context.Context) ([]*targetgroup.Group, error) {
|
||||||
|
|
||||||
resp, err := d.client.Do(req.WithContext(ctx))
|
resp, err := d.client.Do(req.WithContext(ctx))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
failuresCount.Inc()
|
d.failuresCount.Inc()
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
defer func() {
|
defer func() {
|
||||||
|
@ -161,31 +164,31 @@ func (d *Discovery) Refresh(ctx context.Context) ([]*targetgroup.Group, error) {
|
||||||
}()
|
}()
|
||||||
|
|
||||||
if resp.StatusCode != http.StatusOK {
|
if resp.StatusCode != http.StatusOK {
|
||||||
failuresCount.Inc()
|
d.failuresCount.Inc()
|
||||||
return nil, fmt.Errorf("server returned HTTP status %s", resp.Status)
|
return nil, fmt.Errorf("server returned HTTP status %s", resp.Status)
|
||||||
}
|
}
|
||||||
|
|
||||||
if !matchContentType.MatchString(strings.TrimSpace(resp.Header.Get("Content-Type"))) {
|
if !matchContentType.MatchString(strings.TrimSpace(resp.Header.Get("Content-Type"))) {
|
||||||
failuresCount.Inc()
|
d.failuresCount.Inc()
|
||||||
return nil, fmt.Errorf("unsupported content type %q", resp.Header.Get("Content-Type"))
|
return nil, fmt.Errorf("unsupported content type %q", resp.Header.Get("Content-Type"))
|
||||||
}
|
}
|
||||||
|
|
||||||
b, err := io.ReadAll(resp.Body)
|
b, err := io.ReadAll(resp.Body)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
failuresCount.Inc()
|
d.failuresCount.Inc()
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
var targetGroups []*targetgroup.Group
|
var targetGroups []*targetgroup.Group
|
||||||
|
|
||||||
if err := json.Unmarshal(b, &targetGroups); err != nil {
|
if err := json.Unmarshal(b, &targetGroups); err != nil {
|
||||||
failuresCount.Inc()
|
d.failuresCount.Inc()
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
for i, tg := range targetGroups {
|
for i, tg := range targetGroups {
|
||||||
if tg == nil {
|
if tg == nil {
|
||||||
failuresCount.Inc()
|
d.failuresCount.Inc()
|
||||||
err = errors.New("nil target group item found")
|
err = errors.New("nil target group item found")
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
|
@ -41,7 +41,7 @@ func TestHTTPValidRefresh(t *testing.T) {
|
||||||
RefreshInterval: model.Duration(30 * time.Second),
|
RefreshInterval: model.Duration(30 * time.Second),
|
||||||
}
|
}
|
||||||
|
|
||||||
d, err := NewDiscovery(&cfg, log.NewNopLogger(), nil)
|
d, err := NewDiscovery(&cfg, log.NewNopLogger(), nil, prometheus.NewRegistry())
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
|
@ -62,8 +62,8 @@ func TestHTTPValidRefresh(t *testing.T) {
|
||||||
Source: urlSource(ts.URL+"/http_sd.good.json", 0),
|
Source: urlSource(ts.URL+"/http_sd.good.json", 0),
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
require.Equal(t, tgs, expectedTargets)
|
require.Equal(t, expectedTargets, tgs)
|
||||||
require.Equal(t, 0.0, getFailureCount())
|
require.Equal(t, 0.0, getFailureCount(d.failuresCount))
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestHTTPInvalidCode(t *testing.T) {
|
func TestHTTPInvalidCode(t *testing.T) {
|
||||||
|
@ -79,13 +79,13 @@ func TestHTTPInvalidCode(t *testing.T) {
|
||||||
RefreshInterval: model.Duration(30 * time.Second),
|
RefreshInterval: model.Duration(30 * time.Second),
|
||||||
}
|
}
|
||||||
|
|
||||||
d, err := NewDiscovery(&cfg, log.NewNopLogger(), nil)
|
d, err := NewDiscovery(&cfg, log.NewNopLogger(), nil, prometheus.NewRegistry())
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
_, err = d.Refresh(ctx)
|
_, err = d.Refresh(ctx)
|
||||||
require.EqualError(t, err, "server returned HTTP status 400 Bad Request")
|
require.EqualError(t, err, "server returned HTTP status 400 Bad Request")
|
||||||
require.Equal(t, 1.0, getFailureCount())
|
require.Equal(t, 1.0, getFailureCount(d.failuresCount))
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestHTTPInvalidFormat(t *testing.T) {
|
func TestHTTPInvalidFormat(t *testing.T) {
|
||||||
|
@ -101,18 +101,16 @@ func TestHTTPInvalidFormat(t *testing.T) {
|
||||||
RefreshInterval: model.Duration(30 * time.Second),
|
RefreshInterval: model.Duration(30 * time.Second),
|
||||||
}
|
}
|
||||||
|
|
||||||
d, err := NewDiscovery(&cfg, log.NewNopLogger(), nil)
|
d, err := NewDiscovery(&cfg, log.NewNopLogger(), nil, prometheus.NewRegistry())
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
_, err = d.Refresh(ctx)
|
_, err = d.Refresh(ctx)
|
||||||
require.EqualError(t, err, `unsupported content type "text/plain; charset=utf-8"`)
|
require.EqualError(t, err, `unsupported content type "text/plain; charset=utf-8"`)
|
||||||
require.Equal(t, 1.0, getFailureCount())
|
require.Equal(t, 1.0, getFailureCount(d.failuresCount))
|
||||||
}
|
}
|
||||||
|
|
||||||
var lastFailureCount float64
|
func getFailureCount(failuresCount prometheus.Counter) float64 {
|
||||||
|
|
||||||
func getFailureCount() float64 {
|
|
||||||
failureChan := make(chan prometheus.Metric)
|
failureChan := make(chan prometheus.Metric)
|
||||||
|
|
||||||
go func() {
|
go func() {
|
||||||
|
@ -129,10 +127,7 @@ func getFailureCount() float64 {
|
||||||
metric.Write(&counter)
|
metric.Write(&counter)
|
||||||
}
|
}
|
||||||
|
|
||||||
// account for failures in prior tests
|
return *counter.Counter.Value
|
||||||
count := *counter.Counter.Value - lastFailureCount
|
|
||||||
lastFailureCount = *counter.Counter.Value
|
|
||||||
return count
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestContentTypeRegex(t *testing.T) {
|
func TestContentTypeRegex(t *testing.T) {
|
||||||
|
@ -417,7 +412,7 @@ func TestSourceDisappeared(t *testing.T) {
|
||||||
URL: ts.URL,
|
URL: ts.URL,
|
||||||
RefreshInterval: model.Duration(1 * time.Second),
|
RefreshInterval: model.Duration(1 * time.Second),
|
||||||
}
|
}
|
||||||
d, err := NewDiscovery(&cfg, log.NewNopLogger(), nil)
|
d, err := NewDiscovery(&cfg, log.NewNopLogger(), nil, prometheus.NewRegistry())
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
for _, test := range cases {
|
for _, test := range cases {
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
|
|
|
@ -23,6 +23,8 @@ import (
|
||||||
|
|
||||||
"github.com/prometheus/prometheus/discovery"
|
"github.com/prometheus/prometheus/discovery"
|
||||||
"github.com/prometheus/prometheus/discovery/refresh"
|
"github.com/prometheus/prometheus/discovery/refresh"
|
||||||
|
|
||||||
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
@ -41,7 +43,7 @@ func init() {
|
||||||
type Discovery struct{}
|
type Discovery struct{}
|
||||||
|
|
||||||
// NewDiscovery returns a new refresh.Discovery for IONOS Cloud.
|
// NewDiscovery returns a new refresh.Discovery for IONOS Cloud.
|
||||||
func NewDiscovery(conf *SDConfig, logger log.Logger) (*refresh.Discovery, error) {
|
func NewDiscovery(conf *SDConfig, logger log.Logger, reg prometheus.Registerer) (*refresh.Discovery, error) {
|
||||||
if conf.ionosEndpoint == "" {
|
if conf.ionosEndpoint == "" {
|
||||||
conf.ionosEndpoint = "https://api.ionos.com"
|
conf.ionosEndpoint = "https://api.ionos.com"
|
||||||
}
|
}
|
||||||
|
@ -52,10 +54,13 @@ func NewDiscovery(conf *SDConfig, logger log.Logger) (*refresh.Discovery, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
return refresh.NewDiscovery(
|
return refresh.NewDiscovery(
|
||||||
logger,
|
refresh.Options{
|
||||||
"ionos",
|
Logger: logger,
|
||||||
time.Duration(conf.RefreshInterval),
|
Mech: "ionos",
|
||||||
d.refresh,
|
Interval: time.Duration(conf.RefreshInterval),
|
||||||
|
RefreshF: d.refresh,
|
||||||
|
Registry: reg,
|
||||||
|
},
|
||||||
), nil
|
), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -86,7 +91,7 @@ func (c SDConfig) Name() string {
|
||||||
|
|
||||||
// NewDiscoverer returns a new discovery.Discoverer for IONOS Cloud.
|
// NewDiscoverer returns a new discovery.Discoverer for IONOS Cloud.
|
||||||
func (c SDConfig) NewDiscoverer(options discovery.DiscovererOptions) (discovery.Discoverer, error) {
|
func (c SDConfig) NewDiscoverer(options discovery.DiscovererOptions) (discovery.Discoverer, error) {
|
||||||
return NewDiscovery(&c, options.Logger)
|
return NewDiscovery(&c, options.Logger, options.Registerer)
|
||||||
}
|
}
|
||||||
|
|
||||||
// UnmarshalYAML implements the yaml.Unmarshaler interface.
|
// UnmarshalYAML implements the yaml.Unmarshaler interface.
|
||||||
|
|
|
@ -48,12 +48,12 @@ func TestIONOSServerRefresh(t *testing.T) {
|
||||||
tgs, err := d.refresh(ctx)
|
tgs, err := d.refresh(ctx)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
require.Equal(t, 1, len(tgs))
|
require.Len(t, tgs, 1)
|
||||||
|
|
||||||
tg := tgs[0]
|
tg := tgs[0]
|
||||||
require.NotNil(t, tg)
|
require.NotNil(t, tg)
|
||||||
require.NotNil(t, tg.Targets)
|
require.NotNil(t, tg.Targets)
|
||||||
require.Equal(t, 2, len(tg.Targets))
|
require.Len(t, tg.Targets, 2)
|
||||||
|
|
||||||
for i, lbls := range []model.LabelSet{
|
for i, lbls := range []model.LabelSet{
|
||||||
{
|
{
|
||||||
|
|
|
@ -22,6 +22,7 @@ import (
|
||||||
|
|
||||||
"github.com/go-kit/log"
|
"github.com/go-kit/log"
|
||||||
"github.com/go-kit/log/level"
|
"github.com/go-kit/log/level"
|
||||||
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
"github.com/prometheus/common/model"
|
"github.com/prometheus/common/model"
|
||||||
apiv1 "k8s.io/api/core/v1"
|
apiv1 "k8s.io/api/core/v1"
|
||||||
"k8s.io/client-go/tools/cache"
|
"k8s.io/client-go/tools/cache"
|
||||||
|
@ -30,12 +31,6 @@ import (
|
||||||
"github.com/prometheus/prometheus/discovery/targetgroup"
|
"github.com/prometheus/prometheus/discovery/targetgroup"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
|
||||||
epAddCount = eventCount.WithLabelValues("endpoints", "add")
|
|
||||||
epUpdateCount = eventCount.WithLabelValues("endpoints", "update")
|
|
||||||
epDeleteCount = eventCount.WithLabelValues("endpoints", "delete")
|
|
||||||
)
|
|
||||||
|
|
||||||
// Endpoints discovers new endpoint targets.
|
// Endpoints discovers new endpoint targets.
|
||||||
type Endpoints struct {
|
type Endpoints struct {
|
||||||
logger log.Logger
|
logger log.Logger
|
||||||
|
@ -54,10 +49,19 @@ type Endpoints struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewEndpoints returns a new endpoints discovery.
|
// NewEndpoints returns a new endpoints discovery.
|
||||||
func NewEndpoints(l log.Logger, eps cache.SharedIndexInformer, svc, pod, node cache.SharedInformer) *Endpoints {
|
func NewEndpoints(l log.Logger, eps cache.SharedIndexInformer, svc, pod, node cache.SharedInformer, eventCount *prometheus.CounterVec) *Endpoints {
|
||||||
if l == nil {
|
if l == nil {
|
||||||
l = log.NewNopLogger()
|
l = log.NewNopLogger()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
epAddCount := eventCount.WithLabelValues(RoleEndpoint.String(), MetricLabelRoleAdd)
|
||||||
|
epUpdateCount := eventCount.WithLabelValues(RoleEndpoint.String(), MetricLabelRoleUpdate)
|
||||||
|
epDeleteCount := eventCount.WithLabelValues(RoleEndpoint.String(), MetricLabelRoleDelete)
|
||||||
|
|
||||||
|
svcAddCount := eventCount.WithLabelValues(RoleService.String(), MetricLabelRoleAdd)
|
||||||
|
svcUpdateCount := eventCount.WithLabelValues(RoleService.String(), MetricLabelRoleUpdate)
|
||||||
|
svcDeleteCount := eventCount.WithLabelValues(RoleService.String(), MetricLabelRoleDelete)
|
||||||
|
|
||||||
e := &Endpoints{
|
e := &Endpoints{
|
||||||
logger: l,
|
logger: l,
|
||||||
endpointsInf: eps,
|
endpointsInf: eps,
|
||||||
|
@ -68,7 +72,7 @@ func NewEndpoints(l log.Logger, eps cache.SharedIndexInformer, svc, pod, node ca
|
||||||
podStore: pod.GetStore(),
|
podStore: pod.GetStore(),
|
||||||
nodeInf: node,
|
nodeInf: node,
|
||||||
withNodeMetadata: node != nil,
|
withNodeMetadata: node != nil,
|
||||||
queue: workqueue.NewNamed("endpoints"),
|
queue: workqueue.NewNamed(RoleEndpoint.String()),
|
||||||
}
|
}
|
||||||
|
|
||||||
_, err := e.endpointsInf.AddEventHandler(cache.ResourceEventHandlerFuncs{
|
_, err := e.endpointsInf.AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||||
|
|
|
@ -22,6 +22,7 @@ import (
|
||||||
|
|
||||||
"github.com/go-kit/log"
|
"github.com/go-kit/log"
|
||||||
"github.com/go-kit/log/level"
|
"github.com/go-kit/log/level"
|
||||||
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
"github.com/prometheus/common/model"
|
"github.com/prometheus/common/model"
|
||||||
apiv1 "k8s.io/api/core/v1"
|
apiv1 "k8s.io/api/core/v1"
|
||||||
v1 "k8s.io/api/discovery/v1"
|
v1 "k8s.io/api/discovery/v1"
|
||||||
|
@ -33,12 +34,6 @@ import (
|
||||||
"github.com/prometheus/prometheus/util/strutil"
|
"github.com/prometheus/prometheus/util/strutil"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
|
||||||
epslAddCount = eventCount.WithLabelValues("endpointslice", "add")
|
|
||||||
epslUpdateCount = eventCount.WithLabelValues("endpointslice", "update")
|
|
||||||
epslDeleteCount = eventCount.WithLabelValues("endpointslice", "delete")
|
|
||||||
)
|
|
||||||
|
|
||||||
// EndpointSlice discovers new endpoint targets.
|
// EndpointSlice discovers new endpoint targets.
|
||||||
type EndpointSlice struct {
|
type EndpointSlice struct {
|
||||||
logger log.Logger
|
logger log.Logger
|
||||||
|
@ -57,10 +52,19 @@ type EndpointSlice struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewEndpointSlice returns a new endpointslice discovery.
|
// NewEndpointSlice returns a new endpointslice discovery.
|
||||||
func NewEndpointSlice(l log.Logger, eps cache.SharedIndexInformer, svc, pod, node cache.SharedInformer) *EndpointSlice {
|
func NewEndpointSlice(l log.Logger, eps cache.SharedIndexInformer, svc, pod, node cache.SharedInformer, eventCount *prometheus.CounterVec) *EndpointSlice {
|
||||||
if l == nil {
|
if l == nil {
|
||||||
l = log.NewNopLogger()
|
l = log.NewNopLogger()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
epslAddCount := eventCount.WithLabelValues(RoleEndpointSlice.String(), MetricLabelRoleAdd)
|
||||||
|
epslUpdateCount := eventCount.WithLabelValues(RoleEndpointSlice.String(), MetricLabelRoleUpdate)
|
||||||
|
epslDeleteCount := eventCount.WithLabelValues(RoleEndpointSlice.String(), MetricLabelRoleDelete)
|
||||||
|
|
||||||
|
svcAddCount := eventCount.WithLabelValues(RoleService.String(), MetricLabelRoleAdd)
|
||||||
|
svcUpdateCount := eventCount.WithLabelValues(RoleService.String(), MetricLabelRoleUpdate)
|
||||||
|
svcDeleteCount := eventCount.WithLabelValues(RoleService.String(), MetricLabelRoleDelete)
|
||||||
|
|
||||||
e := &EndpointSlice{
|
e := &EndpointSlice{
|
||||||
logger: l,
|
logger: l,
|
||||||
endpointSliceInf: eps,
|
endpointSliceInf: eps,
|
||||||
|
@ -71,7 +75,7 @@ func NewEndpointSlice(l log.Logger, eps cache.SharedIndexInformer, svc, pod, nod
|
||||||
podStore: pod.GetStore(),
|
podStore: pod.GetStore(),
|
||||||
nodeInf: node,
|
nodeInf: node,
|
||||||
withNodeMetadata: node != nil,
|
withNodeMetadata: node != nil,
|
||||||
queue: workqueue.NewNamed("endpointSlice"),
|
queue: workqueue.NewNamed(RoleEndpointSlice.String()),
|
||||||
}
|
}
|
||||||
|
|
||||||
_, err := e.endpointSliceInf.AddEventHandler(cache.ResourceEventHandlerFuncs{
|
_, err := e.endpointSliceInf.AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||||
|
|
|
@ -29,7 +29,7 @@ func Test_EndpointSliceAdaptor_v1(t *testing.T) {
|
||||||
require.Equal(t, endpointSlice.ObjectMeta.Namespace, adaptor.namespace())
|
require.Equal(t, endpointSlice.ObjectMeta.Namespace, adaptor.namespace())
|
||||||
require.Equal(t, endpointSlice.AddressType, v1.AddressType(adaptor.addressType()))
|
require.Equal(t, endpointSlice.AddressType, v1.AddressType(adaptor.addressType()))
|
||||||
require.Equal(t, endpointSlice.Labels, adaptor.labels())
|
require.Equal(t, endpointSlice.Labels, adaptor.labels())
|
||||||
require.Equal(t, endpointSlice.Labels[v1.LabelServiceName], "testendpoints")
|
require.Equal(t, "testendpoints", endpointSlice.Labels[v1.LabelServiceName])
|
||||||
|
|
||||||
for i, endpointAdaptor := range adaptor.endpoints() {
|
for i, endpointAdaptor := range adaptor.endpoints() {
|
||||||
require.Equal(t, endpointSlice.Endpoints[i].Addresses, endpointAdaptor.addresses())
|
require.Equal(t, endpointSlice.Endpoints[i].Addresses, endpointAdaptor.addresses())
|
||||||
|
@ -57,7 +57,7 @@ func Test_EndpointSliceAdaptor_v1beta1(t *testing.T) {
|
||||||
require.Equal(t, endpointSlice.ObjectMeta.Namespace, adaptor.namespace())
|
require.Equal(t, endpointSlice.ObjectMeta.Namespace, adaptor.namespace())
|
||||||
require.Equal(t, endpointSlice.AddressType, v1beta1.AddressType(adaptor.addressType()))
|
require.Equal(t, endpointSlice.AddressType, v1beta1.AddressType(adaptor.addressType()))
|
||||||
require.Equal(t, endpointSlice.Labels, adaptor.labels())
|
require.Equal(t, endpointSlice.Labels, adaptor.labels())
|
||||||
require.Equal(t, endpointSlice.Labels[v1beta1.LabelServiceName], "testendpoints")
|
require.Equal(t, "testendpoints", endpointSlice.Labels[v1beta1.LabelServiceName])
|
||||||
|
|
||||||
for i, endpointAdaptor := range adaptor.endpoints() {
|
for i, endpointAdaptor := range adaptor.endpoints() {
|
||||||
require.Equal(t, endpointSlice.Endpoints[i].Addresses, endpointAdaptor.addresses())
|
require.Equal(t, endpointSlice.Endpoints[i].Addresses, endpointAdaptor.addresses())
|
||||||
|
|
|
@ -21,6 +21,7 @@ import (
|
||||||
|
|
||||||
"github.com/go-kit/log"
|
"github.com/go-kit/log"
|
||||||
"github.com/go-kit/log/level"
|
"github.com/go-kit/log/level"
|
||||||
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
"github.com/prometheus/common/model"
|
"github.com/prometheus/common/model"
|
||||||
v1 "k8s.io/api/networking/v1"
|
v1 "k8s.io/api/networking/v1"
|
||||||
"k8s.io/api/networking/v1beta1"
|
"k8s.io/api/networking/v1beta1"
|
||||||
|
@ -30,12 +31,6 @@ import (
|
||||||
"github.com/prometheus/prometheus/discovery/targetgroup"
|
"github.com/prometheus/prometheus/discovery/targetgroup"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
|
||||||
ingressAddCount = eventCount.WithLabelValues("ingress", "add")
|
|
||||||
ingressUpdateCount = eventCount.WithLabelValues("ingress", "update")
|
|
||||||
ingressDeleteCount = eventCount.WithLabelValues("ingress", "delete")
|
|
||||||
)
|
|
||||||
|
|
||||||
// Ingress implements discovery of Kubernetes ingress.
|
// Ingress implements discovery of Kubernetes ingress.
|
||||||
type Ingress struct {
|
type Ingress struct {
|
||||||
logger log.Logger
|
logger log.Logger
|
||||||
|
@ -45,8 +40,18 @@ type Ingress struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewIngress returns a new ingress discovery.
|
// NewIngress returns a new ingress discovery.
|
||||||
func NewIngress(l log.Logger, inf cache.SharedInformer) *Ingress {
|
func NewIngress(l log.Logger, inf cache.SharedInformer, eventCount *prometheus.CounterVec) *Ingress {
|
||||||
s := &Ingress{logger: l, informer: inf, store: inf.GetStore(), queue: workqueue.NewNamed("ingress")}
|
ingressAddCount := eventCount.WithLabelValues(RoleIngress.String(), MetricLabelRoleAdd)
|
||||||
|
ingressUpdateCount := eventCount.WithLabelValues(RoleIngress.String(), MetricLabelRoleUpdate)
|
||||||
|
ingressDeleteCount := eventCount.WithLabelValues(RoleIngress.String(), MetricLabelRoleDelete)
|
||||||
|
|
||||||
|
s := &Ingress{
|
||||||
|
logger: l,
|
||||||
|
informer: inf,
|
||||||
|
store: inf.GetStore(),
|
||||||
|
queue: workqueue.NewNamed(RoleIngress.String()),
|
||||||
|
}
|
||||||
|
|
||||||
_, err := s.informer.AddEventHandler(cache.ResourceEventHandlerFuncs{
|
_, err := s.informer.AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||||
AddFunc: func(o interface{}) {
|
AddFunc: func(o interface{}) {
|
||||||
ingressAddCount.Inc()
|
ingressAddCount.Inc()
|
||||||
|
|
|
@ -58,24 +58,14 @@ import (
|
||||||
const (
|
const (
|
||||||
// metaLabelPrefix is the meta prefix used for all meta labels.
|
// metaLabelPrefix is the meta prefix used for all meta labels.
|
||||||
// in this discovery.
|
// in this discovery.
|
||||||
metaLabelPrefix = model.MetaLabelPrefix + "kubernetes_"
|
metaLabelPrefix = model.MetaLabelPrefix + "kubernetes_"
|
||||||
namespaceLabel = metaLabelPrefix + "namespace"
|
namespaceLabel = metaLabelPrefix + "namespace"
|
||||||
metricsNamespace = "prometheus_sd_kubernetes"
|
presentValue = model.LabelValue("true")
|
||||||
presentValue = model.LabelValue("true")
|
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
// Http header.
|
// Http header.
|
||||||
userAgent = fmt.Sprintf("Prometheus/%s", version.Version)
|
userAgent = fmt.Sprintf("Prometheus/%s", version.Version)
|
||||||
// Custom events metric.
|
|
||||||
eventCount = prometheus.NewCounterVec(
|
|
||||||
prometheus.CounterOpts{
|
|
||||||
Namespace: metricsNamespace,
|
|
||||||
Name: "events_total",
|
|
||||||
Help: "The number of Kubernetes events handled.",
|
|
||||||
},
|
|
||||||
[]string{"role", "event"},
|
|
||||||
)
|
|
||||||
// DefaultSDConfig is the default Kubernetes SD configuration.
|
// DefaultSDConfig is the default Kubernetes SD configuration.
|
||||||
DefaultSDConfig = SDConfig{
|
DefaultSDConfig = SDConfig{
|
||||||
HTTPClientConfig: config.DefaultHTTPClientConfig,
|
HTTPClientConfig: config.DefaultHTTPClientConfig,
|
||||||
|
@ -84,15 +74,6 @@ var (
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
discovery.RegisterConfig(&SDConfig{})
|
discovery.RegisterConfig(&SDConfig{})
|
||||||
prometheus.MustRegister(eventCount)
|
|
||||||
// Initialize metric vectors.
|
|
||||||
for _, role := range []string{"endpointslice", "endpoints", "node", "pod", "service", "ingress"} {
|
|
||||||
for _, evt := range []string{"add", "delete", "update"} {
|
|
||||||
eventCount.WithLabelValues(role, evt)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
(&clientGoRequestMetricAdapter{}).Register(prometheus.DefaultRegisterer)
|
|
||||||
(&clientGoWorkqueueMetricsProvider{}).Register(prometheus.DefaultRegisterer)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Role is role of the service in Kubernetes.
|
// Role is role of the service in Kubernetes.
|
||||||
|
@ -121,6 +102,16 @@ func (c *Role) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (c Role) String() string {
|
||||||
|
return string(c)
|
||||||
|
}
|
||||||
|
|
||||||
|
const (
|
||||||
|
MetricLabelRoleAdd = "add"
|
||||||
|
MetricLabelRoleDelete = "delete"
|
||||||
|
MetricLabelRoleUpdate = "update"
|
||||||
|
)
|
||||||
|
|
||||||
// SDConfig is the configuration for Kubernetes service discovery.
|
// SDConfig is the configuration for Kubernetes service discovery.
|
||||||
type SDConfig struct {
|
type SDConfig struct {
|
||||||
APIServer config.URL `yaml:"api_server,omitempty"`
|
APIServer config.URL `yaml:"api_server,omitempty"`
|
||||||
|
@ -137,7 +128,7 @@ func (*SDConfig) Name() string { return "kubernetes" }
|
||||||
|
|
||||||
// NewDiscoverer returns a Discoverer for the Config.
|
// NewDiscoverer returns a Discoverer for the Config.
|
||||||
func (c *SDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (discovery.Discoverer, error) {
|
func (c *SDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (discovery.Discoverer, error) {
|
||||||
return New(opts.Logger, c)
|
return New(opts.Logger, opts.Registerer, c)
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetDirectory joins any relative file paths with dir.
|
// SetDirectory joins any relative file paths with dir.
|
||||||
|
@ -274,6 +265,8 @@ type Discovery struct {
|
||||||
selectors roleSelector
|
selectors roleSelector
|
||||||
ownNamespace string
|
ownNamespace string
|
||||||
attachMetadata AttachMetadataConfig
|
attachMetadata AttachMetadataConfig
|
||||||
|
eventCount *prometheus.CounterVec
|
||||||
|
metricRegisterer discovery.MetricRegisterer
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *Discovery) getNamespaces() []string {
|
func (d *Discovery) getNamespaces() []string {
|
||||||
|
@ -292,7 +285,7 @@ func (d *Discovery) getNamespaces() []string {
|
||||||
}
|
}
|
||||||
|
|
||||||
// New creates a new Kubernetes discovery for the given role.
|
// New creates a new Kubernetes discovery for the given role.
|
||||||
func New(l log.Logger, conf *SDConfig) (*Discovery, error) {
|
func New(l log.Logger, reg prometheus.Registerer, conf *SDConfig) (*Discovery, error) {
|
||||||
if l == nil {
|
if l == nil {
|
||||||
l = log.NewNopLogger()
|
l = log.NewNopLogger()
|
||||||
}
|
}
|
||||||
|
@ -346,7 +339,7 @@ func New(l log.Logger, conf *SDConfig) (*Discovery, error) {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
return &Discovery{
|
d := &Discovery{
|
||||||
client: c,
|
client: c,
|
||||||
logger: l,
|
logger: l,
|
||||||
role: conf.Role,
|
role: conf.Role,
|
||||||
|
@ -355,7 +348,37 @@ func New(l log.Logger, conf *SDConfig) (*Discovery, error) {
|
||||||
selectors: mapSelector(conf.Selectors),
|
selectors: mapSelector(conf.Selectors),
|
||||||
ownNamespace: ownNamespace,
|
ownNamespace: ownNamespace,
|
||||||
attachMetadata: conf.AttachMetadata,
|
attachMetadata: conf.AttachMetadata,
|
||||||
}, nil
|
eventCount: prometheus.NewCounterVec(
|
||||||
|
prometheus.CounterOpts{
|
||||||
|
Namespace: discovery.KubernetesMetricsNamespace,
|
||||||
|
Name: "events_total",
|
||||||
|
Help: "The number of Kubernetes events handled.",
|
||||||
|
},
|
||||||
|
[]string{"role", "event"},
|
||||||
|
),
|
||||||
|
}
|
||||||
|
|
||||||
|
d.metricRegisterer = discovery.NewMetricRegisterer(reg, []prometheus.Collector{d.eventCount})
|
||||||
|
|
||||||
|
// Initialize metric vectors.
|
||||||
|
for _, role := range []string{
|
||||||
|
RoleEndpointSlice.String(),
|
||||||
|
RoleEndpoint.String(),
|
||||||
|
RoleNode.String(),
|
||||||
|
RolePod.String(),
|
||||||
|
RoleService.String(),
|
||||||
|
RoleIngress.String(),
|
||||||
|
} {
|
||||||
|
for _, evt := range []string{
|
||||||
|
MetricLabelRoleAdd,
|
||||||
|
MetricLabelRoleDelete,
|
||||||
|
MetricLabelRoleUpdate,
|
||||||
|
} {
|
||||||
|
d.eventCount.WithLabelValues(role, evt)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return d, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func mapSelector(rawSelector []SelectorConfig) roleSelector {
|
func mapSelector(rawSelector []SelectorConfig) roleSelector {
|
||||||
|
@ -391,6 +414,14 @@ const resyncDisabled = 0
|
||||||
// Run implements the discoverer interface.
|
// Run implements the discoverer interface.
|
||||||
func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) {
|
func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) {
|
||||||
d.Lock()
|
d.Lock()
|
||||||
|
|
||||||
|
err := d.metricRegisterer.RegisterMetrics()
|
||||||
|
if err != nil {
|
||||||
|
level.Error(d.logger).Log("msg", "Unable to register metrics", "err", err.Error())
|
||||||
|
return
|
||||||
|
}
|
||||||
|
defer d.metricRegisterer.UnregisterMetrics()
|
||||||
|
|
||||||
namespaces := d.getNamespaces()
|
namespaces := d.getNamespaces()
|
||||||
|
|
||||||
switch d.role {
|
switch d.role {
|
||||||
|
@ -482,6 +513,7 @@ func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) {
|
||||||
cache.NewSharedInformer(slw, &apiv1.Service{}, resyncDisabled),
|
cache.NewSharedInformer(slw, &apiv1.Service{}, resyncDisabled),
|
||||||
cache.NewSharedInformer(plw, &apiv1.Pod{}, resyncDisabled),
|
cache.NewSharedInformer(plw, &apiv1.Pod{}, resyncDisabled),
|
||||||
nodeInf,
|
nodeInf,
|
||||||
|
d.eventCount,
|
||||||
)
|
)
|
||||||
d.discoverers = append(d.discoverers, eps)
|
d.discoverers = append(d.discoverers, eps)
|
||||||
go eps.endpointSliceInf.Run(ctx.Done())
|
go eps.endpointSliceInf.Run(ctx.Done())
|
||||||
|
@ -541,6 +573,7 @@ func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) {
|
||||||
cache.NewSharedInformer(slw, &apiv1.Service{}, resyncDisabled),
|
cache.NewSharedInformer(slw, &apiv1.Service{}, resyncDisabled),
|
||||||
cache.NewSharedInformer(plw, &apiv1.Pod{}, resyncDisabled),
|
cache.NewSharedInformer(plw, &apiv1.Pod{}, resyncDisabled),
|
||||||
nodeInf,
|
nodeInf,
|
||||||
|
d.eventCount,
|
||||||
)
|
)
|
||||||
d.discoverers = append(d.discoverers, eps)
|
d.discoverers = append(d.discoverers, eps)
|
||||||
go eps.endpointsInf.Run(ctx.Done())
|
go eps.endpointsInf.Run(ctx.Done())
|
||||||
|
@ -572,6 +605,7 @@ func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) {
|
||||||
log.With(d.logger, "role", "pod"),
|
log.With(d.logger, "role", "pod"),
|
||||||
d.newPodsByNodeInformer(plw),
|
d.newPodsByNodeInformer(plw),
|
||||||
nodeInformer,
|
nodeInformer,
|
||||||
|
d.eventCount,
|
||||||
)
|
)
|
||||||
d.discoverers = append(d.discoverers, pod)
|
d.discoverers = append(d.discoverers, pod)
|
||||||
go pod.podInf.Run(ctx.Done())
|
go pod.podInf.Run(ctx.Done())
|
||||||
|
@ -594,6 +628,7 @@ func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) {
|
||||||
svc := NewService(
|
svc := NewService(
|
||||||
log.With(d.logger, "role", "service"),
|
log.With(d.logger, "role", "service"),
|
||||||
cache.NewSharedInformer(slw, &apiv1.Service{}, resyncDisabled),
|
cache.NewSharedInformer(slw, &apiv1.Service{}, resyncDisabled),
|
||||||
|
d.eventCount,
|
||||||
)
|
)
|
||||||
d.discoverers = append(d.discoverers, svc)
|
d.discoverers = append(d.discoverers, svc)
|
||||||
go svc.informer.Run(ctx.Done())
|
go svc.informer.Run(ctx.Done())
|
||||||
|
@ -651,13 +686,14 @@ func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) {
|
||||||
ingress := NewIngress(
|
ingress := NewIngress(
|
||||||
log.With(d.logger, "role", "ingress"),
|
log.With(d.logger, "role", "ingress"),
|
||||||
informer,
|
informer,
|
||||||
|
d.eventCount,
|
||||||
)
|
)
|
||||||
d.discoverers = append(d.discoverers, ingress)
|
d.discoverers = append(d.discoverers, ingress)
|
||||||
go ingress.informer.Run(ctx.Done())
|
go ingress.informer.Run(ctx.Done())
|
||||||
}
|
}
|
||||||
case RoleNode:
|
case RoleNode:
|
||||||
nodeInformer := d.newNodeInformer(ctx)
|
nodeInformer := d.newNodeInformer(ctx)
|
||||||
node := NewNode(log.With(d.logger, "role", "node"), nodeInformer)
|
node := NewNode(log.With(d.logger, "role", "node"), nodeInformer, d.eventCount)
|
||||||
d.discoverers = append(d.discoverers, node)
|
d.discoverers = append(d.discoverers, node)
|
||||||
go node.informer.Run(ctx.Done())
|
go node.informer.Run(ctx.Done())
|
||||||
default:
|
default:
|
||||||
|
|
|
@ -29,6 +29,8 @@ import (
|
||||||
"k8s.io/client-go/kubernetes/fake"
|
"k8s.io/client-go/kubernetes/fake"
|
||||||
"k8s.io/client-go/tools/cache"
|
"k8s.io/client-go/tools/cache"
|
||||||
|
|
||||||
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
|
|
||||||
"github.com/prometheus/prometheus/discovery"
|
"github.com/prometheus/prometheus/discovery"
|
||||||
"github.com/prometheus/prometheus/discovery/targetgroup"
|
"github.com/prometheus/prometheus/discovery/targetgroup"
|
||||||
"github.com/prometheus/prometheus/util/testutil"
|
"github.com/prometheus/prometheus/util/testutil"
|
||||||
|
@ -49,13 +51,25 @@ func makeDiscoveryWithVersion(role Role, nsDiscovery NamespaceDiscovery, k8sVer
|
||||||
fakeDiscovery, _ := clientset.Discovery().(*fakediscovery.FakeDiscovery)
|
fakeDiscovery, _ := clientset.Discovery().(*fakediscovery.FakeDiscovery)
|
||||||
fakeDiscovery.FakedServerVersion = &version.Info{GitVersion: k8sVer}
|
fakeDiscovery.FakedServerVersion = &version.Info{GitVersion: k8sVer}
|
||||||
|
|
||||||
return &Discovery{
|
d := &Discovery{
|
||||||
client: clientset,
|
client: clientset,
|
||||||
logger: log.NewNopLogger(),
|
logger: log.NewNopLogger(),
|
||||||
role: role,
|
role: role,
|
||||||
namespaceDiscovery: &nsDiscovery,
|
namespaceDiscovery: &nsDiscovery,
|
||||||
ownNamespace: "own-ns",
|
ownNamespace: "own-ns",
|
||||||
}, clientset
|
eventCount: prometheus.NewCounterVec(
|
||||||
|
prometheus.CounterOpts{
|
||||||
|
Namespace: discovery.KubernetesMetricsNamespace,
|
||||||
|
Name: "events_total",
|
||||||
|
Help: "The number of Kubernetes events handled.",
|
||||||
|
},
|
||||||
|
[]string{"role", "event"},
|
||||||
|
),
|
||||||
|
}
|
||||||
|
|
||||||
|
d.metricRegisterer = discovery.NewMetricRegisterer(prometheus.NewRegistry(), []prometheus.Collector{d.eventCount})
|
||||||
|
|
||||||
|
return d, clientset
|
||||||
}
|
}
|
||||||
|
|
||||||
// makeDiscoveryWithMetadata creates a kubernetes.Discovery instance with the specified metadata config.
|
// makeDiscoveryWithMetadata creates a kubernetes.Discovery instance with the specified metadata config.
|
||||||
|
|
|
@ -22,6 +22,7 @@ import (
|
||||||
|
|
||||||
"github.com/go-kit/log"
|
"github.com/go-kit/log"
|
||||||
"github.com/go-kit/log/level"
|
"github.com/go-kit/log/level"
|
||||||
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
"github.com/prometheus/common/model"
|
"github.com/prometheus/common/model"
|
||||||
apiv1 "k8s.io/api/core/v1"
|
apiv1 "k8s.io/api/core/v1"
|
||||||
"k8s.io/client-go/tools/cache"
|
"k8s.io/client-go/tools/cache"
|
||||||
|
@ -35,12 +36,6 @@ const (
|
||||||
NodeLegacyHostIP = "LegacyHostIP"
|
NodeLegacyHostIP = "LegacyHostIP"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
|
||||||
nodeAddCount = eventCount.WithLabelValues("node", "add")
|
|
||||||
nodeUpdateCount = eventCount.WithLabelValues("node", "update")
|
|
||||||
nodeDeleteCount = eventCount.WithLabelValues("node", "delete")
|
|
||||||
)
|
|
||||||
|
|
||||||
// Node discovers Kubernetes nodes.
|
// Node discovers Kubernetes nodes.
|
||||||
type Node struct {
|
type Node struct {
|
||||||
logger log.Logger
|
logger log.Logger
|
||||||
|
@ -50,11 +45,22 @@ type Node struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewNode returns a new node discovery.
|
// NewNode returns a new node discovery.
|
||||||
func NewNode(l log.Logger, inf cache.SharedInformer) *Node {
|
func NewNode(l log.Logger, inf cache.SharedInformer, eventCount *prometheus.CounterVec) *Node {
|
||||||
if l == nil {
|
if l == nil {
|
||||||
l = log.NewNopLogger()
|
l = log.NewNopLogger()
|
||||||
}
|
}
|
||||||
n := &Node{logger: l, informer: inf, store: inf.GetStore(), queue: workqueue.NewNamed("node")}
|
|
||||||
|
nodeAddCount := eventCount.WithLabelValues(RoleNode.String(), MetricLabelRoleAdd)
|
||||||
|
nodeUpdateCount := eventCount.WithLabelValues(RoleNode.String(), MetricLabelRoleUpdate)
|
||||||
|
nodeDeleteCount := eventCount.WithLabelValues(RoleNode.String(), MetricLabelRoleDelete)
|
||||||
|
|
||||||
|
n := &Node{
|
||||||
|
logger: l,
|
||||||
|
informer: inf,
|
||||||
|
store: inf.GetStore(),
|
||||||
|
queue: workqueue.NewNamed(RoleNode.String()),
|
||||||
|
}
|
||||||
|
|
||||||
_, err := n.informer.AddEventHandler(cache.ResourceEventHandlerFuncs{
|
_, err := n.informer.AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||||
AddFunc: func(o interface{}) {
|
AddFunc: func(o interface{}) {
|
||||||
nodeAddCount.Inc()
|
nodeAddCount.Inc()
|
||||||
|
|
|
@ -23,6 +23,7 @@ import (
|
||||||
|
|
||||||
"github.com/go-kit/log"
|
"github.com/go-kit/log"
|
||||||
"github.com/go-kit/log/level"
|
"github.com/go-kit/log/level"
|
||||||
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
"github.com/prometheus/common/model"
|
"github.com/prometheus/common/model"
|
||||||
apiv1 "k8s.io/api/core/v1"
|
apiv1 "k8s.io/api/core/v1"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
@ -34,12 +35,6 @@ import (
|
||||||
|
|
||||||
const nodeIndex = "node"
|
const nodeIndex = "node"
|
||||||
|
|
||||||
var (
|
|
||||||
podAddCount = eventCount.WithLabelValues("pod", "add")
|
|
||||||
podUpdateCount = eventCount.WithLabelValues("pod", "update")
|
|
||||||
podDeleteCount = eventCount.WithLabelValues("pod", "delete")
|
|
||||||
)
|
|
||||||
|
|
||||||
// Pod discovers new pod targets.
|
// Pod discovers new pod targets.
|
||||||
type Pod struct {
|
type Pod struct {
|
||||||
podInf cache.SharedIndexInformer
|
podInf cache.SharedIndexInformer
|
||||||
|
@ -51,18 +46,22 @@ type Pod struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewPod creates a new pod discovery.
|
// NewPod creates a new pod discovery.
|
||||||
func NewPod(l log.Logger, pods cache.SharedIndexInformer, nodes cache.SharedInformer) *Pod {
|
func NewPod(l log.Logger, pods cache.SharedIndexInformer, nodes cache.SharedInformer, eventCount *prometheus.CounterVec) *Pod {
|
||||||
if l == nil {
|
if l == nil {
|
||||||
l = log.NewNopLogger()
|
l = log.NewNopLogger()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
podAddCount := eventCount.WithLabelValues(RolePod.String(), MetricLabelRoleAdd)
|
||||||
|
podDeleteCount := eventCount.WithLabelValues(RolePod.String(), MetricLabelRoleDelete)
|
||||||
|
podUpdateCount := eventCount.WithLabelValues(RolePod.String(), MetricLabelRoleUpdate)
|
||||||
|
|
||||||
p := &Pod{
|
p := &Pod{
|
||||||
podInf: pods,
|
podInf: pods,
|
||||||
nodeInf: nodes,
|
nodeInf: nodes,
|
||||||
withNodeMetadata: nodes != nil,
|
withNodeMetadata: nodes != nil,
|
||||||
store: pods.GetStore(),
|
store: pods.GetStore(),
|
||||||
logger: l,
|
logger: l,
|
||||||
queue: workqueue.NewNamed("pod"),
|
queue: workqueue.NewNamed(RolePod.String()),
|
||||||
}
|
}
|
||||||
_, err := p.podInf.AddEventHandler(cache.ResourceEventHandlerFuncs{
|
_, err := p.podInf.AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||||
AddFunc: func(o interface{}) {
|
AddFunc: func(o interface{}) {
|
||||||
|
|
|
@ -22,6 +22,7 @@ import (
|
||||||
|
|
||||||
"github.com/go-kit/log"
|
"github.com/go-kit/log"
|
||||||
"github.com/go-kit/log/level"
|
"github.com/go-kit/log/level"
|
||||||
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
"github.com/prometheus/common/model"
|
"github.com/prometheus/common/model"
|
||||||
apiv1 "k8s.io/api/core/v1"
|
apiv1 "k8s.io/api/core/v1"
|
||||||
"k8s.io/client-go/tools/cache"
|
"k8s.io/client-go/tools/cache"
|
||||||
|
@ -30,12 +31,6 @@ import (
|
||||||
"github.com/prometheus/prometheus/discovery/targetgroup"
|
"github.com/prometheus/prometheus/discovery/targetgroup"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
|
||||||
svcAddCount = eventCount.WithLabelValues("service", "add")
|
|
||||||
svcUpdateCount = eventCount.WithLabelValues("service", "update")
|
|
||||||
svcDeleteCount = eventCount.WithLabelValues("service", "delete")
|
|
||||||
)
|
|
||||||
|
|
||||||
// Service implements discovery of Kubernetes services.
|
// Service implements discovery of Kubernetes services.
|
||||||
type Service struct {
|
type Service struct {
|
||||||
logger log.Logger
|
logger log.Logger
|
||||||
|
@ -45,11 +40,22 @@ type Service struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewService returns a new service discovery.
|
// NewService returns a new service discovery.
|
||||||
func NewService(l log.Logger, inf cache.SharedInformer) *Service {
|
func NewService(l log.Logger, inf cache.SharedInformer, eventCount *prometheus.CounterVec) *Service {
|
||||||
if l == nil {
|
if l == nil {
|
||||||
l = log.NewNopLogger()
|
l = log.NewNopLogger()
|
||||||
}
|
}
|
||||||
s := &Service{logger: l, informer: inf, store: inf.GetStore(), queue: workqueue.NewNamed("service")}
|
|
||||||
|
svcAddCount := eventCount.WithLabelValues(RoleService.String(), MetricLabelRoleAdd)
|
||||||
|
svcUpdateCount := eventCount.WithLabelValues(RoleService.String(), MetricLabelRoleUpdate)
|
||||||
|
svcDeleteCount := eventCount.WithLabelValues(RoleService.String(), MetricLabelRoleDelete)
|
||||||
|
|
||||||
|
s := &Service{
|
||||||
|
logger: l,
|
||||||
|
informer: inf,
|
||||||
|
store: inf.GetStore(),
|
||||||
|
queue: workqueue.NewNamed(RoleService.String()),
|
||||||
|
}
|
||||||
|
|
||||||
_, err := s.informer.AddEventHandler(cache.ResourceEventHandlerFuncs{
|
_, err := s.informer.AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||||
AddFunc: func(o interface{}) {
|
AddFunc: func(o interface{}) {
|
||||||
svcAddCount.Inc()
|
svcAddCount.Inc()
|
||||||
|
|
|
@ -28,48 +28,6 @@ import (
|
||||||
"github.com/prometheus/prometheus/discovery/targetgroup"
|
"github.com/prometheus/prometheus/discovery/targetgroup"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
|
||||||
failedConfigs = prometheus.NewGaugeVec(
|
|
||||||
prometheus.GaugeOpts{
|
|
||||||
Name: "prometheus_sd_failed_configs",
|
|
||||||
Help: "Current number of service discovery configurations that failed to load.",
|
|
||||||
},
|
|
||||||
[]string{"name"},
|
|
||||||
)
|
|
||||||
discoveredTargets = prometheus.NewGaugeVec(
|
|
||||||
prometheus.GaugeOpts{
|
|
||||||
Name: "prometheus_sd_discovered_targets",
|
|
||||||
Help: "Current number of discovered targets.",
|
|
||||||
},
|
|
||||||
[]string{"name", "config"},
|
|
||||||
)
|
|
||||||
receivedUpdates = prometheus.NewCounterVec(
|
|
||||||
prometheus.CounterOpts{
|
|
||||||
Name: "prometheus_sd_received_updates_total",
|
|
||||||
Help: "Total number of update events received from the SD providers.",
|
|
||||||
},
|
|
||||||
[]string{"name"},
|
|
||||||
)
|
|
||||||
delayedUpdates = prometheus.NewCounterVec(
|
|
||||||
prometheus.CounterOpts{
|
|
||||||
Name: "prometheus_sd_updates_delayed_total",
|
|
||||||
Help: "Total number of update events that couldn't be sent immediately.",
|
|
||||||
},
|
|
||||||
[]string{"name"},
|
|
||||||
)
|
|
||||||
sentUpdates = prometheus.NewCounterVec(
|
|
||||||
prometheus.CounterOpts{
|
|
||||||
Name: "prometheus_sd_updates_total",
|
|
||||||
Help: "Total number of update events sent to the SD consumers.",
|
|
||||||
},
|
|
||||||
[]string{"name"},
|
|
||||||
)
|
|
||||||
)
|
|
||||||
|
|
||||||
func RegisterMetrics() {
|
|
||||||
prometheus.MustRegister(failedConfigs, discoveredTargets, receivedUpdates, delayedUpdates, sentUpdates)
|
|
||||||
}
|
|
||||||
|
|
||||||
type poolKey struct {
|
type poolKey struct {
|
||||||
setName string
|
setName string
|
||||||
provider string
|
provider string
|
||||||
|
@ -84,7 +42,7 @@ type provider struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewManager is the Discovery Manager constructor.
|
// NewManager is the Discovery Manager constructor.
|
||||||
func NewManager(ctx context.Context, logger log.Logger, options ...func(*Manager)) *Manager {
|
func NewManager(ctx context.Context, logger log.Logger, registerer prometheus.Registerer, options ...func(*Manager)) *Manager {
|
||||||
if logger == nil {
|
if logger == nil {
|
||||||
logger = log.NewNopLogger()
|
logger = log.NewNopLogger()
|
||||||
}
|
}
|
||||||
|
@ -96,10 +54,21 @@ func NewManager(ctx context.Context, logger log.Logger, options ...func(*Manager
|
||||||
ctx: ctx,
|
ctx: ctx,
|
||||||
updatert: 5 * time.Second,
|
updatert: 5 * time.Second,
|
||||||
triggerSend: make(chan struct{}, 1),
|
triggerSend: make(chan struct{}, 1),
|
||||||
|
registerer: registerer,
|
||||||
}
|
}
|
||||||
for _, option := range options {
|
for _, option := range options {
|
||||||
option(mgr)
|
option(mgr)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Register the metrics.
|
||||||
|
// We have to do this after setting all options, so that the name of the Manager is set.
|
||||||
|
if metrics, err := discovery.NewMetrics(registerer, mgr.name); err == nil {
|
||||||
|
mgr.metrics = metrics
|
||||||
|
} else {
|
||||||
|
level.Error(logger).Log("msg", "Failed to create discovery manager metrics", "manager", mgr.name, "err", err)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
return mgr
|
return mgr
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -135,6 +104,11 @@ type Manager struct {
|
||||||
|
|
||||||
// The triggerSend channel signals to the manager that new updates have been received from providers.
|
// The triggerSend channel signals to the manager that new updates have been received from providers.
|
||||||
triggerSend chan struct{}
|
triggerSend chan struct{}
|
||||||
|
|
||||||
|
// A registerer for all service discovery metrics.
|
||||||
|
registerer prometheus.Registerer
|
||||||
|
|
||||||
|
metrics *discovery.Metrics
|
||||||
}
|
}
|
||||||
|
|
||||||
// Run starts the background processing.
|
// Run starts the background processing.
|
||||||
|
@ -157,7 +131,7 @@ func (m *Manager) ApplyConfig(cfg map[string]discovery.Configs) error {
|
||||||
|
|
||||||
for pk := range m.targets {
|
for pk := range m.targets {
|
||||||
if _, ok := cfg[pk.setName]; !ok {
|
if _, ok := cfg[pk.setName]; !ok {
|
||||||
discoveredTargets.DeleteLabelValues(m.name, pk.setName)
|
m.metrics.DiscoveredTargets.DeleteLabelValues(m.name, pk.setName)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
m.cancelDiscoverers()
|
m.cancelDiscoverers()
|
||||||
|
@ -168,9 +142,9 @@ func (m *Manager) ApplyConfig(cfg map[string]discovery.Configs) error {
|
||||||
failedCount := 0
|
failedCount := 0
|
||||||
for name, scfg := range cfg {
|
for name, scfg := range cfg {
|
||||||
failedCount += m.registerProviders(scfg, name)
|
failedCount += m.registerProviders(scfg, name)
|
||||||
discoveredTargets.WithLabelValues(m.name, name).Set(0)
|
m.metrics.DiscoveredTargets.WithLabelValues(name).Set(0)
|
||||||
}
|
}
|
||||||
failedConfigs.WithLabelValues(m.name).Set(float64(failedCount))
|
m.metrics.FailedConfigs.Set(float64(failedCount))
|
||||||
|
|
||||||
for _, prov := range m.providers {
|
for _, prov := range m.providers {
|
||||||
m.startProvider(m.ctx, prov)
|
m.startProvider(m.ctx, prov)
|
||||||
|
@ -207,7 +181,7 @@ func (m *Manager) updater(ctx context.Context, p *provider, updates chan []*targ
|
||||||
case <-ctx.Done():
|
case <-ctx.Done():
|
||||||
return
|
return
|
||||||
case tgs, ok := <-updates:
|
case tgs, ok := <-updates:
|
||||||
receivedUpdates.WithLabelValues(m.name).Inc()
|
m.metrics.ReceivedUpdates.Inc()
|
||||||
if !ok {
|
if !ok {
|
||||||
level.Debug(m.logger).Log("msg", "Discoverer channel closed", "provider", p.name)
|
level.Debug(m.logger).Log("msg", "Discoverer channel closed", "provider", p.name)
|
||||||
return
|
return
|
||||||
|
@ -236,11 +210,11 @@ func (m *Manager) sender() {
|
||||||
case <-ticker.C: // Some discoverers send updates too often so we throttle these with the ticker.
|
case <-ticker.C: // Some discoverers send updates too often so we throttle these with the ticker.
|
||||||
select {
|
select {
|
||||||
case <-m.triggerSend:
|
case <-m.triggerSend:
|
||||||
sentUpdates.WithLabelValues(m.name).Inc()
|
m.metrics.SentUpdates.Inc()
|
||||||
select {
|
select {
|
||||||
case m.syncCh <- m.allGroups():
|
case m.syncCh <- m.allGroups():
|
||||||
default:
|
default:
|
||||||
delayedUpdates.WithLabelValues(m.name).Inc()
|
m.metrics.DelayedUpdates.Inc()
|
||||||
level.Debug(m.logger).Log("msg", "Discovery receiver's channel was full so will retry the next cycle")
|
level.Debug(m.logger).Log("msg", "Discovery receiver's channel was full so will retry the next cycle")
|
||||||
select {
|
select {
|
||||||
case m.triggerSend <- struct{}{}:
|
case m.triggerSend <- struct{}{}:
|
||||||
|
@ -288,7 +262,7 @@ func (m *Manager) allGroups() map[string][]*targetgroup.Group {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
for setName, v := range n {
|
for setName, v := range n {
|
||||||
discoveredTargets.WithLabelValues(m.name, setName).Set(float64(v))
|
m.metrics.DiscoveredTargets.WithLabelValues(setName).Set(float64(v))
|
||||||
}
|
}
|
||||||
return tSets
|
return tSets
|
||||||
}
|
}
|
||||||
|
@ -309,7 +283,8 @@ func (m *Manager) registerProviders(cfgs discovery.Configs, setName string) int
|
||||||
}
|
}
|
||||||
typ := cfg.Name()
|
typ := cfg.Name()
|
||||||
d, err := cfg.NewDiscoverer(discovery.DiscovererOptions{
|
d, err := cfg.NewDiscoverer(discovery.DiscovererOptions{
|
||||||
Logger: log.With(m.logger, "discovery", typ, "config", setName),
|
Logger: log.With(m.logger, "discovery", typ, "config", setName),
|
||||||
|
Registerer: m.registerer,
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
level.Error(m.logger).Log("msg", "Cannot create service discovery", "err", err, "type", typ, "config", setName)
|
level.Error(m.logger).Log("msg", "Cannot create service discovery", "err", err, "type", typ, "config", setName)
|
||||||
|
|
|
@ -22,6 +22,7 @@ import (
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/go-kit/log"
|
"github.com/go-kit/log"
|
||||||
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
client_testutil "github.com/prometheus/client_golang/prometheus/testutil"
|
client_testutil "github.com/prometheus/client_golang/prometheus/testutil"
|
||||||
"github.com/prometheus/common/model"
|
"github.com/prometheus/common/model"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
|
@ -664,7 +665,8 @@ func TestTargetUpdatesOrder(t *testing.T) {
|
||||||
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
|
|
||||||
discoveryManager := NewManager(ctx, log.NewNopLogger())
|
discoveryManager := NewManager(ctx, log.NewNopLogger(), prometheus.NewRegistry())
|
||||||
|
require.NotNil(t, discoveryManager)
|
||||||
discoveryManager.updatert = 100 * time.Millisecond
|
discoveryManager.updatert = 100 * time.Millisecond
|
||||||
|
|
||||||
var totalUpdatesCount int
|
var totalUpdatesCount int
|
||||||
|
@ -746,7 +748,8 @@ func verifyPresence(t *testing.T, tSets map[poolKey]map[string]*targetgroup.Grou
|
||||||
func TestTargetSetRecreatesTargetGroupsEveryRun(t *testing.T) {
|
func TestTargetSetRecreatesTargetGroupsEveryRun(t *testing.T) {
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
defer cancel()
|
defer cancel()
|
||||||
discoveryManager := NewManager(ctx, log.NewNopLogger())
|
discoveryManager := NewManager(ctx, log.NewNopLogger(), prometheus.NewRegistry())
|
||||||
|
require.NotNil(t, discoveryManager)
|
||||||
discoveryManager.updatert = 100 * time.Millisecond
|
discoveryManager.updatert = 100 * time.Millisecond
|
||||||
go discoveryManager.Run()
|
go discoveryManager.Run()
|
||||||
|
|
||||||
|
@ -774,7 +777,8 @@ func TestTargetSetRecreatesTargetGroupsEveryRun(t *testing.T) {
|
||||||
func TestDiscovererConfigs(t *testing.T) {
|
func TestDiscovererConfigs(t *testing.T) {
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
defer cancel()
|
defer cancel()
|
||||||
discoveryManager := NewManager(ctx, log.NewNopLogger())
|
discoveryManager := NewManager(ctx, log.NewNopLogger(), prometheus.NewRegistry())
|
||||||
|
require.NotNil(t, discoveryManager)
|
||||||
discoveryManager.updatert = 100 * time.Millisecond
|
discoveryManager.updatert = 100 * time.Millisecond
|
||||||
go discoveryManager.Run()
|
go discoveryManager.Run()
|
||||||
|
|
||||||
|
@ -798,7 +802,8 @@ func TestDiscovererConfigs(t *testing.T) {
|
||||||
func TestTargetSetRecreatesEmptyStaticConfigs(t *testing.T) {
|
func TestTargetSetRecreatesEmptyStaticConfigs(t *testing.T) {
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
defer cancel()
|
defer cancel()
|
||||||
discoveryManager := NewManager(ctx, log.NewNopLogger())
|
discoveryManager := NewManager(ctx, log.NewNopLogger(), prometheus.NewRegistry())
|
||||||
|
require.NotNil(t, discoveryManager)
|
||||||
discoveryManager.updatert = 100 * time.Millisecond
|
discoveryManager.updatert = 100 * time.Millisecond
|
||||||
go discoveryManager.Run()
|
go discoveryManager.Run()
|
||||||
|
|
||||||
|
@ -837,7 +842,8 @@ func TestTargetSetRecreatesEmptyStaticConfigs(t *testing.T) {
|
||||||
func TestIdenticalConfigurationsAreCoalesced(t *testing.T) {
|
func TestIdenticalConfigurationsAreCoalesced(t *testing.T) {
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
defer cancel()
|
defer cancel()
|
||||||
discoveryManager := NewManager(ctx, nil)
|
discoveryManager := NewManager(ctx, nil, prometheus.NewRegistry())
|
||||||
|
require.NotNil(t, discoveryManager)
|
||||||
discoveryManager.updatert = 100 * time.Millisecond
|
discoveryManager.updatert = 100 * time.Millisecond
|
||||||
go discoveryManager.Run()
|
go discoveryManager.Run()
|
||||||
|
|
||||||
|
@ -868,7 +874,8 @@ func TestApplyConfigDoesNotModifyStaticTargets(t *testing.T) {
|
||||||
}
|
}
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
defer cancel()
|
defer cancel()
|
||||||
discoveryManager := NewManager(ctx, log.NewNopLogger())
|
discoveryManager := NewManager(ctx, log.NewNopLogger(), prometheus.NewRegistry())
|
||||||
|
require.NotNil(t, discoveryManager)
|
||||||
discoveryManager.updatert = 100 * time.Millisecond
|
discoveryManager.updatert = 100 * time.Millisecond
|
||||||
go discoveryManager.Run()
|
go discoveryManager.Run()
|
||||||
|
|
||||||
|
@ -893,7 +900,8 @@ func (e errorConfig) NewDiscoverer(discovery.DiscovererOptions) (discovery.Disco
|
||||||
func TestGaugeFailedConfigs(t *testing.T) {
|
func TestGaugeFailedConfigs(t *testing.T) {
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
defer cancel()
|
defer cancel()
|
||||||
discoveryManager := NewManager(ctx, log.NewNopLogger())
|
discoveryManager := NewManager(ctx, log.NewNopLogger(), prometheus.NewRegistry())
|
||||||
|
require.NotNil(t, discoveryManager)
|
||||||
discoveryManager.updatert = 100 * time.Millisecond
|
discoveryManager.updatert = 100 * time.Millisecond
|
||||||
go discoveryManager.Run()
|
go discoveryManager.Run()
|
||||||
|
|
||||||
|
@ -907,7 +915,7 @@ func TestGaugeFailedConfigs(t *testing.T) {
|
||||||
discoveryManager.ApplyConfig(c)
|
discoveryManager.ApplyConfig(c)
|
||||||
<-discoveryManager.SyncCh()
|
<-discoveryManager.SyncCh()
|
||||||
|
|
||||||
failedCount := client_testutil.ToFloat64(failedConfigs)
|
failedCount := client_testutil.ToFloat64(discoveryManager.metrics.FailedConfigs)
|
||||||
if failedCount != 3 {
|
if failedCount != 3 {
|
||||||
t.Fatalf("Expected to have 3 failed configs, got: %v", failedCount)
|
t.Fatalf("Expected to have 3 failed configs, got: %v", failedCount)
|
||||||
}
|
}
|
||||||
|
@ -918,7 +926,7 @@ func TestGaugeFailedConfigs(t *testing.T) {
|
||||||
discoveryManager.ApplyConfig(c)
|
discoveryManager.ApplyConfig(c)
|
||||||
<-discoveryManager.SyncCh()
|
<-discoveryManager.SyncCh()
|
||||||
|
|
||||||
failedCount = client_testutil.ToFloat64(failedConfigs)
|
failedCount = client_testutil.ToFloat64(discoveryManager.metrics.FailedConfigs)
|
||||||
if failedCount != 0 {
|
if failedCount != 0 {
|
||||||
t.Fatalf("Expected to get no failed config, got: %v", failedCount)
|
t.Fatalf("Expected to get no failed config, got: %v", failedCount)
|
||||||
}
|
}
|
||||||
|
@ -1049,7 +1057,8 @@ func TestCoordinationWithReceiver(t *testing.T) {
|
||||||
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
|
|
||||||
mgr := NewManager(ctx, nil)
|
mgr := NewManager(ctx, nil, prometheus.NewRegistry())
|
||||||
|
require.NotNil(t, mgr)
|
||||||
mgr.updatert = updateDelay
|
mgr.updatert = updateDelay
|
||||||
go mgr.Run()
|
go mgr.Run()
|
||||||
|
|
||||||
|
|
|
@ -67,24 +67,15 @@ const (
|
||||||
)
|
)
|
||||||
|
|
||||||
// DefaultSDConfig is the default Linode SD configuration.
|
// DefaultSDConfig is the default Linode SD configuration.
|
||||||
var (
|
var DefaultSDConfig = SDConfig{
|
||||||
DefaultSDConfig = SDConfig{
|
TagSeparator: ",",
|
||||||
TagSeparator: ",",
|
Port: 80,
|
||||||
Port: 80,
|
RefreshInterval: model.Duration(60 * time.Second),
|
||||||
RefreshInterval: model.Duration(60 * time.Second),
|
HTTPClientConfig: config.DefaultHTTPClientConfig,
|
||||||
HTTPClientConfig: config.DefaultHTTPClientConfig,
|
}
|
||||||
}
|
|
||||||
|
|
||||||
failuresCount = prometheus.NewCounter(
|
|
||||||
prometheus.CounterOpts{
|
|
||||||
Name: "prometheus_sd_linode_failures_total",
|
|
||||||
Help: "Number of Linode service discovery refresh failures.",
|
|
||||||
})
|
|
||||||
)
|
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
discovery.RegisterConfig(&SDConfig{})
|
discovery.RegisterConfig(&SDConfig{})
|
||||||
prometheus.MustRegister(failuresCount)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// SDConfig is the configuration for Linode based service discovery.
|
// SDConfig is the configuration for Linode based service discovery.
|
||||||
|
@ -101,7 +92,7 @@ func (*SDConfig) Name() string { return "linode" }
|
||||||
|
|
||||||
// NewDiscoverer returns a Discoverer for the Config.
|
// NewDiscoverer returns a Discoverer for the Config.
|
||||||
func (c *SDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (discovery.Discoverer, error) {
|
func (c *SDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (discovery.Discoverer, error) {
|
||||||
return NewDiscovery(c, opts.Logger)
|
return NewDiscovery(c, opts.Logger, opts.Registerer)
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetDirectory joins any relative file paths with dir.
|
// SetDirectory joins any relative file paths with dir.
|
||||||
|
@ -131,16 +122,22 @@ type Discovery struct {
|
||||||
pollCount int
|
pollCount int
|
||||||
lastResults []*targetgroup.Group
|
lastResults []*targetgroup.Group
|
||||||
eventPollingEnabled bool
|
eventPollingEnabled bool
|
||||||
|
failuresCount prometheus.Counter
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewDiscovery returns a new Discovery which periodically refreshes its targets.
|
// NewDiscovery returns a new Discovery which periodically refreshes its targets.
|
||||||
func NewDiscovery(conf *SDConfig, logger log.Logger) (*Discovery, error) {
|
func NewDiscovery(conf *SDConfig, logger log.Logger, reg prometheus.Registerer) (*Discovery, error) {
|
||||||
d := &Discovery{
|
d := &Discovery{
|
||||||
port: conf.Port,
|
port: conf.Port,
|
||||||
tagSeparator: conf.TagSeparator,
|
tagSeparator: conf.TagSeparator,
|
||||||
pollCount: 0,
|
pollCount: 0,
|
||||||
lastRefreshTimestamp: time.Now().UTC(),
|
lastRefreshTimestamp: time.Now().UTC(),
|
||||||
eventPollingEnabled: true,
|
eventPollingEnabled: true,
|
||||||
|
failuresCount: prometheus.NewCounter(
|
||||||
|
prometheus.CounterOpts{
|
||||||
|
Name: "prometheus_sd_linode_failures_total",
|
||||||
|
Help: "Number of Linode service discovery refresh failures.",
|
||||||
|
}),
|
||||||
}
|
}
|
||||||
|
|
||||||
rt, err := config.NewRoundTripperFromConfig(conf.HTTPClientConfig, "linode_sd")
|
rt, err := config.NewRoundTripperFromConfig(conf.HTTPClientConfig, "linode_sd")
|
||||||
|
@ -158,10 +155,14 @@ func NewDiscovery(conf *SDConfig, logger log.Logger) (*Discovery, error) {
|
||||||
d.client = &client
|
d.client = &client
|
||||||
|
|
||||||
d.Discovery = refresh.NewDiscovery(
|
d.Discovery = refresh.NewDiscovery(
|
||||||
logger,
|
refresh.Options{
|
||||||
"linode",
|
Logger: logger,
|
||||||
time.Duration(conf.RefreshInterval),
|
Mech: "linode",
|
||||||
d.refresh,
|
Interval: time.Duration(conf.RefreshInterval),
|
||||||
|
RefreshF: d.refresh,
|
||||||
|
Registry: reg,
|
||||||
|
Metrics: []prometheus.Collector{d.failuresCount},
|
||||||
|
},
|
||||||
)
|
)
|
||||||
return d, nil
|
return d, nil
|
||||||
}
|
}
|
||||||
|
@ -222,14 +223,14 @@ func (d *Discovery) refreshData(ctx context.Context) ([]*targetgroup.Group, erro
|
||||||
// Gather all linode instances.
|
// Gather all linode instances.
|
||||||
instances, err := d.client.ListInstances(ctx, &linodego.ListOptions{PageSize: 500})
|
instances, err := d.client.ListInstances(ctx, &linodego.ListOptions{PageSize: 500})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
failuresCount.Inc()
|
d.failuresCount.Inc()
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Gather detailed IP address info for all IPs on all linode instances.
|
// Gather detailed IP address info for all IPs on all linode instances.
|
||||||
detailedIPs, err := d.client.ListIPAddresses(ctx, &linodego.ListOptions{PageSize: 500})
|
detailedIPs, err := d.client.ListIPAddresses(ctx, &linodego.ListOptions{PageSize: 500})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
failuresCount.Inc()
|
d.failuresCount.Inc()
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -20,6 +20,7 @@ import (
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/go-kit/log"
|
"github.com/go-kit/log"
|
||||||
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
"github.com/prometheus/common/config"
|
"github.com/prometheus/common/config"
|
||||||
"github.com/prometheus/common/model"
|
"github.com/prometheus/common/model"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
|
@ -52,7 +53,7 @@ func TestLinodeSDRefresh(t *testing.T) {
|
||||||
Credentials: tokenID,
|
Credentials: tokenID,
|
||||||
Type: "Bearer",
|
Type: "Bearer",
|
||||||
}
|
}
|
||||||
d, err := NewDiscovery(&cfg, log.NewNopLogger())
|
d, err := NewDiscovery(&cfg, log.NewNopLogger(), prometheus.NewRegistry())
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
endpoint, err := url.Parse(sdmock.Mock.Endpoint())
|
endpoint, err := url.Parse(sdmock.Mock.Endpoint())
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
@ -61,12 +62,12 @@ func TestLinodeSDRefresh(t *testing.T) {
|
||||||
tgs, err := d.refresh(context.Background())
|
tgs, err := d.refresh(context.Background())
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
require.Equal(t, 1, len(tgs))
|
require.Len(t, tgs, 1)
|
||||||
|
|
||||||
tg := tgs[0]
|
tg := tgs[0]
|
||||||
require.NotNil(t, tg)
|
require.NotNil(t, tg)
|
||||||
require.NotNil(t, tg.Targets)
|
require.NotNil(t, tg.Targets)
|
||||||
require.Equal(t, 4, len(tg.Targets))
|
require.Len(t, tg.Targets, 4)
|
||||||
|
|
||||||
for i, lbls := range []model.LabelSet{
|
for i, lbls := range []model.LabelSet{
|
||||||
{
|
{
|
||||||
|
|
|
@ -28,48 +28,6 @@ import (
|
||||||
"github.com/prometheus/prometheus/discovery/targetgroup"
|
"github.com/prometheus/prometheus/discovery/targetgroup"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
|
||||||
failedConfigs = prometheus.NewGaugeVec(
|
|
||||||
prometheus.GaugeOpts{
|
|
||||||
Name: "prometheus_sd_failed_configs",
|
|
||||||
Help: "Current number of service discovery configurations that failed to load.",
|
|
||||||
},
|
|
||||||
[]string{"name"},
|
|
||||||
)
|
|
||||||
discoveredTargets = prometheus.NewGaugeVec(
|
|
||||||
prometheus.GaugeOpts{
|
|
||||||
Name: "prometheus_sd_discovered_targets",
|
|
||||||
Help: "Current number of discovered targets.",
|
|
||||||
},
|
|
||||||
[]string{"name", "config"},
|
|
||||||
)
|
|
||||||
receivedUpdates = prometheus.NewCounterVec(
|
|
||||||
prometheus.CounterOpts{
|
|
||||||
Name: "prometheus_sd_received_updates_total",
|
|
||||||
Help: "Total number of update events received from the SD providers.",
|
|
||||||
},
|
|
||||||
[]string{"name"},
|
|
||||||
)
|
|
||||||
delayedUpdates = prometheus.NewCounterVec(
|
|
||||||
prometheus.CounterOpts{
|
|
||||||
Name: "prometheus_sd_updates_delayed_total",
|
|
||||||
Help: "Total number of update events that couldn't be sent immediately.",
|
|
||||||
},
|
|
||||||
[]string{"name"},
|
|
||||||
)
|
|
||||||
sentUpdates = prometheus.NewCounterVec(
|
|
||||||
prometheus.CounterOpts{
|
|
||||||
Name: "prometheus_sd_updates_total",
|
|
||||||
Help: "Total number of update events sent to the SD consumers.",
|
|
||||||
},
|
|
||||||
[]string{"name"},
|
|
||||||
)
|
|
||||||
)
|
|
||||||
|
|
||||||
func RegisterMetrics() {
|
|
||||||
prometheus.MustRegister(failedConfigs, discoveredTargets, receivedUpdates, delayedUpdates, sentUpdates)
|
|
||||||
}
|
|
||||||
|
|
||||||
type poolKey struct {
|
type poolKey struct {
|
||||||
setName string
|
setName string
|
||||||
provider string
|
provider string
|
||||||
|
@ -107,7 +65,7 @@ func (p *Provider) Config() interface{} {
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewManager is the Discovery Manager constructor.
|
// NewManager is the Discovery Manager constructor.
|
||||||
func NewManager(ctx context.Context, logger log.Logger, options ...func(*Manager)) *Manager {
|
func NewManager(ctx context.Context, logger log.Logger, registerer prometheus.Registerer, options ...func(*Manager)) *Manager {
|
||||||
if logger == nil {
|
if logger == nil {
|
||||||
logger = log.NewNopLogger()
|
logger = log.NewNopLogger()
|
||||||
}
|
}
|
||||||
|
@ -118,10 +76,21 @@ func NewManager(ctx context.Context, logger log.Logger, options ...func(*Manager
|
||||||
ctx: ctx,
|
ctx: ctx,
|
||||||
updatert: 5 * time.Second,
|
updatert: 5 * time.Second,
|
||||||
triggerSend: make(chan struct{}, 1),
|
triggerSend: make(chan struct{}, 1),
|
||||||
|
registerer: registerer,
|
||||||
}
|
}
|
||||||
for _, option := range options {
|
for _, option := range options {
|
||||||
option(mgr)
|
option(mgr)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Register the metrics.
|
||||||
|
// We have to do this after setting all options, so that the name of the Manager is set.
|
||||||
|
if metrics, err := NewMetrics(registerer, mgr.name); err == nil {
|
||||||
|
mgr.metrics = metrics
|
||||||
|
} else {
|
||||||
|
level.Error(logger).Log("msg", "Failed to create discovery manager metrics", "manager", mgr.name, "err", err)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
return mgr
|
return mgr
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -170,6 +139,11 @@ type Manager struct {
|
||||||
|
|
||||||
// lastProvider counts providers registered during Manager's lifetime.
|
// lastProvider counts providers registered during Manager's lifetime.
|
||||||
lastProvider uint
|
lastProvider uint
|
||||||
|
|
||||||
|
// A registerer for all service discovery metrics.
|
||||||
|
registerer prometheus.Registerer
|
||||||
|
|
||||||
|
metrics *Metrics
|
||||||
}
|
}
|
||||||
|
|
||||||
// Providers returns the currently configured SD providers.
|
// Providers returns the currently configured SD providers.
|
||||||
|
@ -200,7 +174,7 @@ func (m *Manager) ApplyConfig(cfg map[string]Configs) error {
|
||||||
for name, scfg := range cfg {
|
for name, scfg := range cfg {
|
||||||
failedCount += m.registerProviders(scfg, name)
|
failedCount += m.registerProviders(scfg, name)
|
||||||
}
|
}
|
||||||
failedConfigs.WithLabelValues(m.name).Set(float64(failedCount))
|
m.metrics.FailedConfigs.Set(float64(failedCount))
|
||||||
|
|
||||||
var (
|
var (
|
||||||
wg sync.WaitGroup
|
wg sync.WaitGroup
|
||||||
|
@ -230,13 +204,13 @@ func (m *Manager) ApplyConfig(cfg map[string]Configs) error {
|
||||||
// Remove obsolete subs' targets.
|
// Remove obsolete subs' targets.
|
||||||
if _, ok := prov.newSubs[s]; !ok {
|
if _, ok := prov.newSubs[s]; !ok {
|
||||||
delete(m.targets, poolKey{s, prov.name})
|
delete(m.targets, poolKey{s, prov.name})
|
||||||
discoveredTargets.DeleteLabelValues(m.name, s)
|
m.metrics.DiscoveredTargets.DeleteLabelValues(m.name, s)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Set metrics and targets for new subs.
|
// Set metrics and targets for new subs.
|
||||||
for s := range prov.newSubs {
|
for s := range prov.newSubs {
|
||||||
if _, ok := prov.subs[s]; !ok {
|
if _, ok := prov.subs[s]; !ok {
|
||||||
discoveredTargets.WithLabelValues(m.name, s).Set(0)
|
m.metrics.DiscoveredTargets.WithLabelValues(s).Set(0)
|
||||||
}
|
}
|
||||||
if l := len(refTargets); l > 0 {
|
if l := len(refTargets); l > 0 {
|
||||||
m.targets[poolKey{s, prov.name}] = make(map[string]*targetgroup.Group, l)
|
m.targets[poolKey{s, prov.name}] = make(map[string]*targetgroup.Group, l)
|
||||||
|
@ -316,7 +290,7 @@ func (m *Manager) updater(ctx context.Context, p *Provider, updates chan []*targ
|
||||||
case <-ctx.Done():
|
case <-ctx.Done():
|
||||||
return
|
return
|
||||||
case tgs, ok := <-updates:
|
case tgs, ok := <-updates:
|
||||||
receivedUpdates.WithLabelValues(m.name).Inc()
|
m.metrics.ReceivedUpdates.Inc()
|
||||||
if !ok {
|
if !ok {
|
||||||
level.Debug(m.logger).Log("msg", "Discoverer channel closed", "provider", p.name)
|
level.Debug(m.logger).Log("msg", "Discoverer channel closed", "provider", p.name)
|
||||||
// Wait for provider cancellation to ensure targets are cleaned up when expected.
|
// Wait for provider cancellation to ensure targets are cleaned up when expected.
|
||||||
|
@ -349,11 +323,11 @@ func (m *Manager) sender() {
|
||||||
case <-ticker.C: // Some discoverers send updates too often, so we throttle these with the ticker.
|
case <-ticker.C: // Some discoverers send updates too often, so we throttle these with the ticker.
|
||||||
select {
|
select {
|
||||||
case <-m.triggerSend:
|
case <-m.triggerSend:
|
||||||
sentUpdates.WithLabelValues(m.name).Inc()
|
m.metrics.SentUpdates.Inc()
|
||||||
select {
|
select {
|
||||||
case m.syncCh <- m.allGroups():
|
case m.syncCh <- m.allGroups():
|
||||||
default:
|
default:
|
||||||
delayedUpdates.WithLabelValues(m.name).Inc()
|
m.metrics.DelayedUpdates.Inc()
|
||||||
level.Debug(m.logger).Log("msg", "Discovery receiver's channel was full so will retry the next cycle")
|
level.Debug(m.logger).Log("msg", "Discovery receiver's channel was full so will retry the next cycle")
|
||||||
select {
|
select {
|
||||||
case m.triggerSend <- struct{}{}:
|
case m.triggerSend <- struct{}{}:
|
||||||
|
@ -405,7 +379,7 @@ func (m *Manager) allGroups() map[string][]*targetgroup.Group {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
for setName, v := range n {
|
for setName, v := range n {
|
||||||
discoveredTargets.WithLabelValues(m.name, setName).Set(float64(v))
|
m.metrics.DiscoveredTargets.WithLabelValues(setName).Set(float64(v))
|
||||||
}
|
}
|
||||||
return tSets
|
return tSets
|
||||||
}
|
}
|
||||||
|
@ -428,6 +402,7 @@ func (m *Manager) registerProviders(cfgs Configs, setName string) int {
|
||||||
d, err := cfg.NewDiscoverer(DiscovererOptions{
|
d, err := cfg.NewDiscoverer(DiscovererOptions{
|
||||||
Logger: log.With(m.logger, "discovery", typ, "config", setName),
|
Logger: log.With(m.logger, "discovery", typ, "config", setName),
|
||||||
HTTPClientOptions: m.httpOpts,
|
HTTPClientOptions: m.httpOpts,
|
||||||
|
Registerer: m.registerer,
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
level.Error(m.logger).Log("msg", "Cannot create service discovery", "err", err, "type", typ, "config", setName)
|
level.Error(m.logger).Log("msg", "Cannot create service discovery", "err", err, "type", typ, "config", setName)
|
||||||
|
|
|
@ -23,6 +23,7 @@ import (
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/go-kit/log"
|
"github.com/go-kit/log"
|
||||||
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
client_testutil "github.com/prometheus/client_golang/prometheus/testutil"
|
client_testutil "github.com/prometheus/client_golang/prometheus/testutil"
|
||||||
"github.com/prometheus/common/model"
|
"github.com/prometheus/common/model"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
|
@ -664,7 +665,8 @@ func TestTargetUpdatesOrder(t *testing.T) {
|
||||||
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
|
|
||||||
discoveryManager := NewManager(ctx, log.NewNopLogger())
|
discoveryManager := NewManager(ctx, log.NewNopLogger(), prometheus.NewRegistry())
|
||||||
|
require.NotNil(t, discoveryManager)
|
||||||
discoveryManager.updatert = 100 * time.Millisecond
|
discoveryManager.updatert = 100 * time.Millisecond
|
||||||
|
|
||||||
var totalUpdatesCount int
|
var totalUpdatesCount int
|
||||||
|
@ -778,7 +780,8 @@ func pk(provider, setName string, n int) poolKey {
|
||||||
func TestTargetSetTargetGroupsPresentOnConfigReload(t *testing.T) {
|
func TestTargetSetTargetGroupsPresentOnConfigReload(t *testing.T) {
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
defer cancel()
|
defer cancel()
|
||||||
discoveryManager := NewManager(ctx, log.NewNopLogger())
|
discoveryManager := NewManager(ctx, log.NewNopLogger(), prometheus.NewRegistry())
|
||||||
|
require.NotNil(t, discoveryManager)
|
||||||
discoveryManager.updatert = 100 * time.Millisecond
|
discoveryManager.updatert = 100 * time.Millisecond
|
||||||
go discoveryManager.Run()
|
go discoveryManager.Run()
|
||||||
|
|
||||||
|
@ -790,27 +793,28 @@ func TestTargetSetTargetGroupsPresentOnConfigReload(t *testing.T) {
|
||||||
discoveryManager.ApplyConfig(c)
|
discoveryManager.ApplyConfig(c)
|
||||||
|
|
||||||
syncedTargets := <-discoveryManager.SyncCh()
|
syncedTargets := <-discoveryManager.SyncCh()
|
||||||
require.Equal(t, 1, len(syncedTargets))
|
require.Len(t, syncedTargets, 1)
|
||||||
verifySyncedPresence(t, syncedTargets, "prometheus", "{__address__=\"foo:9090\"}", true)
|
verifySyncedPresence(t, syncedTargets, "prometheus", "{__address__=\"foo:9090\"}", true)
|
||||||
require.Equal(t, 1, len(syncedTargets["prometheus"]))
|
require.Len(t, syncedTargets["prometheus"], 1)
|
||||||
p := pk("static", "prometheus", 0)
|
p := pk("static", "prometheus", 0)
|
||||||
verifyPresence(t, discoveryManager.targets, p, "{__address__=\"foo:9090\"}", true)
|
verifyPresence(t, discoveryManager.targets, p, "{__address__=\"foo:9090\"}", true)
|
||||||
require.Equal(t, 1, len(discoveryManager.targets))
|
require.Len(t, discoveryManager.targets, 1)
|
||||||
|
|
||||||
discoveryManager.ApplyConfig(c)
|
discoveryManager.ApplyConfig(c)
|
||||||
|
|
||||||
syncedTargets = <-discoveryManager.SyncCh()
|
syncedTargets = <-discoveryManager.SyncCh()
|
||||||
verifyPresence(t, discoveryManager.targets, p, "{__address__=\"foo:9090\"}", true)
|
verifyPresence(t, discoveryManager.targets, p, "{__address__=\"foo:9090\"}", true)
|
||||||
require.Equal(t, 1, len(discoveryManager.targets))
|
require.Len(t, discoveryManager.targets, 1)
|
||||||
require.Equal(t, 1, len(syncedTargets))
|
require.Len(t, syncedTargets, 1)
|
||||||
verifySyncedPresence(t, syncedTargets, "prometheus", "{__address__=\"foo:9090\"}", true)
|
verifySyncedPresence(t, syncedTargets, "prometheus", "{__address__=\"foo:9090\"}", true)
|
||||||
require.Equal(t, 1, len(syncedTargets["prometheus"]))
|
require.Len(t, syncedTargets["prometheus"], 1)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestTargetSetTargetGroupsPresentOnConfigRename(t *testing.T) {
|
func TestTargetSetTargetGroupsPresentOnConfigRename(t *testing.T) {
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
defer cancel()
|
defer cancel()
|
||||||
discoveryManager := NewManager(ctx, log.NewNopLogger())
|
discoveryManager := NewManager(ctx, log.NewNopLogger(), prometheus.NewRegistry())
|
||||||
|
require.NotNil(t, discoveryManager)
|
||||||
discoveryManager.updatert = 100 * time.Millisecond
|
discoveryManager.updatert = 100 * time.Millisecond
|
||||||
go discoveryManager.Run()
|
go discoveryManager.Run()
|
||||||
|
|
||||||
|
@ -822,12 +826,12 @@ func TestTargetSetTargetGroupsPresentOnConfigRename(t *testing.T) {
|
||||||
discoveryManager.ApplyConfig(c)
|
discoveryManager.ApplyConfig(c)
|
||||||
|
|
||||||
syncedTargets := <-discoveryManager.SyncCh()
|
syncedTargets := <-discoveryManager.SyncCh()
|
||||||
require.Equal(t, 1, len(syncedTargets))
|
require.Len(t, syncedTargets, 1)
|
||||||
verifySyncedPresence(t, syncedTargets, "prometheus", "{__address__=\"foo:9090\"}", true)
|
verifySyncedPresence(t, syncedTargets, "prometheus", "{__address__=\"foo:9090\"}", true)
|
||||||
require.Equal(t, 1, len(syncedTargets["prometheus"]))
|
require.Len(t, syncedTargets["prometheus"], 1)
|
||||||
p := pk("static", "prometheus", 0)
|
p := pk("static", "prometheus", 0)
|
||||||
verifyPresence(t, discoveryManager.targets, p, "{__address__=\"foo:9090\"}", true)
|
verifyPresence(t, discoveryManager.targets, p, "{__address__=\"foo:9090\"}", true)
|
||||||
require.Equal(t, 1, len(discoveryManager.targets))
|
require.Len(t, discoveryManager.targets, 1)
|
||||||
|
|
||||||
c["prometheus2"] = c["prometheus"]
|
c["prometheus2"] = c["prometheus"]
|
||||||
delete(c, "prometheus")
|
delete(c, "prometheus")
|
||||||
|
@ -836,16 +840,17 @@ func TestTargetSetTargetGroupsPresentOnConfigRename(t *testing.T) {
|
||||||
syncedTargets = <-discoveryManager.SyncCh()
|
syncedTargets = <-discoveryManager.SyncCh()
|
||||||
p = pk("static", "prometheus2", 0)
|
p = pk("static", "prometheus2", 0)
|
||||||
verifyPresence(t, discoveryManager.targets, p, "{__address__=\"foo:9090\"}", true)
|
verifyPresence(t, discoveryManager.targets, p, "{__address__=\"foo:9090\"}", true)
|
||||||
require.Equal(t, 1, len(discoveryManager.targets))
|
require.Len(t, discoveryManager.targets, 1)
|
||||||
require.Equal(t, 1, len(syncedTargets))
|
require.Len(t, syncedTargets, 1)
|
||||||
verifySyncedPresence(t, syncedTargets, "prometheus2", "{__address__=\"foo:9090\"}", true)
|
verifySyncedPresence(t, syncedTargets, "prometheus2", "{__address__=\"foo:9090\"}", true)
|
||||||
require.Equal(t, 1, len(syncedTargets["prometheus2"]))
|
require.Len(t, syncedTargets["prometheus2"], 1)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestTargetSetTargetGroupsPresentOnConfigDuplicateAndDeleteOriginal(t *testing.T) {
|
func TestTargetSetTargetGroupsPresentOnConfigDuplicateAndDeleteOriginal(t *testing.T) {
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
defer cancel()
|
defer cancel()
|
||||||
discoveryManager := NewManager(ctx, log.NewNopLogger())
|
discoveryManager := NewManager(ctx, log.NewNopLogger(), prometheus.NewRegistry())
|
||||||
|
require.NotNil(t, discoveryManager)
|
||||||
discoveryManager.updatert = 100 * time.Millisecond
|
discoveryManager.updatert = 100 * time.Millisecond
|
||||||
go discoveryManager.Run()
|
go discoveryManager.Run()
|
||||||
|
|
||||||
|
@ -860,30 +865,31 @@ func TestTargetSetTargetGroupsPresentOnConfigDuplicateAndDeleteOriginal(t *testi
|
||||||
c["prometheus2"] = c["prometheus"]
|
c["prometheus2"] = c["prometheus"]
|
||||||
discoveryManager.ApplyConfig(c)
|
discoveryManager.ApplyConfig(c)
|
||||||
syncedTargets := <-discoveryManager.SyncCh()
|
syncedTargets := <-discoveryManager.SyncCh()
|
||||||
require.Equal(t, 2, len(syncedTargets))
|
require.Len(t, syncedTargets, 2)
|
||||||
verifySyncedPresence(t, syncedTargets, "prometheus", "{__address__=\"foo:9090\"}", true)
|
verifySyncedPresence(t, syncedTargets, "prometheus", "{__address__=\"foo:9090\"}", true)
|
||||||
require.Equal(t, 1, len(syncedTargets["prometheus"]))
|
require.Len(t, syncedTargets["prometheus"], 1)
|
||||||
verifySyncedPresence(t, syncedTargets, "prometheus2", "{__address__=\"foo:9090\"}", true)
|
verifySyncedPresence(t, syncedTargets, "prometheus2", "{__address__=\"foo:9090\"}", true)
|
||||||
require.Equal(t, 1, len(syncedTargets["prometheus2"]))
|
require.Len(t, syncedTargets["prometheus2"], 1)
|
||||||
p := pk("static", "prometheus", 0)
|
p := pk("static", "prometheus", 0)
|
||||||
verifyPresence(t, discoveryManager.targets, p, "{__address__=\"foo:9090\"}", true)
|
verifyPresence(t, discoveryManager.targets, p, "{__address__=\"foo:9090\"}", true)
|
||||||
require.Equal(t, 2, len(discoveryManager.targets))
|
require.Len(t, discoveryManager.targets, 2)
|
||||||
|
|
||||||
delete(c, "prometheus")
|
delete(c, "prometheus")
|
||||||
discoveryManager.ApplyConfig(c)
|
discoveryManager.ApplyConfig(c)
|
||||||
syncedTargets = <-discoveryManager.SyncCh()
|
syncedTargets = <-discoveryManager.SyncCh()
|
||||||
p = pk("static", "prometheus2", 0)
|
p = pk("static", "prometheus2", 0)
|
||||||
verifyPresence(t, discoveryManager.targets, p, "{__address__=\"foo:9090\"}", true)
|
verifyPresence(t, discoveryManager.targets, p, "{__address__=\"foo:9090\"}", true)
|
||||||
require.Equal(t, 1, len(discoveryManager.targets))
|
require.Len(t, discoveryManager.targets, 1)
|
||||||
require.Equal(t, 1, len(syncedTargets))
|
require.Len(t, syncedTargets, 1)
|
||||||
verifySyncedPresence(t, syncedTargets, "prometheus2", "{__address__=\"foo:9090\"}", true)
|
verifySyncedPresence(t, syncedTargets, "prometheus2", "{__address__=\"foo:9090\"}", true)
|
||||||
require.Equal(t, 1, len(syncedTargets["prometheus2"]))
|
require.Len(t, syncedTargets["prometheus2"], 1)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestTargetSetTargetGroupsPresentOnConfigChange(t *testing.T) {
|
func TestTargetSetTargetGroupsPresentOnConfigChange(t *testing.T) {
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
defer cancel()
|
defer cancel()
|
||||||
discoveryManager := NewManager(ctx, log.NewNopLogger())
|
discoveryManager := NewManager(ctx, log.NewNopLogger(), prometheus.NewRegistry())
|
||||||
|
require.NotNil(t, discoveryManager)
|
||||||
discoveryManager.updatert = 100 * time.Millisecond
|
discoveryManager.updatert = 100 * time.Millisecond
|
||||||
go discoveryManager.Run()
|
go discoveryManager.Run()
|
||||||
|
|
||||||
|
@ -895,9 +901,9 @@ func TestTargetSetTargetGroupsPresentOnConfigChange(t *testing.T) {
|
||||||
discoveryManager.ApplyConfig(c)
|
discoveryManager.ApplyConfig(c)
|
||||||
|
|
||||||
syncedTargets := <-discoveryManager.SyncCh()
|
syncedTargets := <-discoveryManager.SyncCh()
|
||||||
require.Equal(t, 1, len(syncedTargets))
|
require.Len(t, syncedTargets, 1)
|
||||||
verifySyncedPresence(t, syncedTargets, "prometheus", "{__address__=\"foo:9090\"}", true)
|
verifySyncedPresence(t, syncedTargets, "prometheus", "{__address__=\"foo:9090\"}", true)
|
||||||
require.Equal(t, 1, len(syncedTargets["prometheus"]))
|
require.Len(t, syncedTargets["prometheus"], 1)
|
||||||
|
|
||||||
var mu sync.Mutex
|
var mu sync.Mutex
|
||||||
c["prometheus2"] = Configs{
|
c["prometheus2"] = Configs{
|
||||||
|
@ -912,39 +918,40 @@ func TestTargetSetTargetGroupsPresentOnConfigChange(t *testing.T) {
|
||||||
// Original targets should be present as soon as possible.
|
// Original targets should be present as soon as possible.
|
||||||
syncedTargets = <-discoveryManager.SyncCh()
|
syncedTargets = <-discoveryManager.SyncCh()
|
||||||
mu.Unlock()
|
mu.Unlock()
|
||||||
require.Equal(t, 1, len(syncedTargets))
|
require.Len(t, syncedTargets, 1)
|
||||||
verifySyncedPresence(t, syncedTargets, "prometheus", "{__address__=\"foo:9090\"}", true)
|
verifySyncedPresence(t, syncedTargets, "prometheus", "{__address__=\"foo:9090\"}", true)
|
||||||
require.Equal(t, 1, len(syncedTargets["prometheus"]))
|
require.Len(t, syncedTargets["prometheus"], 1)
|
||||||
|
|
||||||
// prometheus2 configs should be ready on second sync.
|
// prometheus2 configs should be ready on second sync.
|
||||||
syncedTargets = <-discoveryManager.SyncCh()
|
syncedTargets = <-discoveryManager.SyncCh()
|
||||||
require.Equal(t, 2, len(syncedTargets))
|
require.Len(t, syncedTargets, 2)
|
||||||
verifySyncedPresence(t, syncedTargets, "prometheus", "{__address__=\"foo:9090\"}", true)
|
verifySyncedPresence(t, syncedTargets, "prometheus", "{__address__=\"foo:9090\"}", true)
|
||||||
require.Equal(t, 1, len(syncedTargets["prometheus"]))
|
require.Len(t, syncedTargets["prometheus"], 1)
|
||||||
verifySyncedPresence(t, syncedTargets, "prometheus2", "{__address__=\"bar:9090\"}", true)
|
verifySyncedPresence(t, syncedTargets, "prometheus2", "{__address__=\"bar:9090\"}", true)
|
||||||
require.Equal(t, 1, len(syncedTargets["prometheus2"]))
|
require.Len(t, syncedTargets["prometheus2"], 1)
|
||||||
|
|
||||||
p := pk("static", "prometheus", 0)
|
p := pk("static", "prometheus", 0)
|
||||||
verifyPresence(t, discoveryManager.targets, p, "{__address__=\"foo:9090\"}", true)
|
verifyPresence(t, discoveryManager.targets, p, "{__address__=\"foo:9090\"}", true)
|
||||||
p = pk("lockstatic", "prometheus2", 1)
|
p = pk("lockstatic", "prometheus2", 1)
|
||||||
verifyPresence(t, discoveryManager.targets, p, "{__address__=\"bar:9090\"}", true)
|
verifyPresence(t, discoveryManager.targets, p, "{__address__=\"bar:9090\"}", true)
|
||||||
require.Equal(t, 2, len(discoveryManager.targets))
|
require.Len(t, discoveryManager.targets, 2)
|
||||||
|
|
||||||
// Delete part of config and ensure only original targets exist.
|
// Delete part of config and ensure only original targets exist.
|
||||||
delete(c, "prometheus2")
|
delete(c, "prometheus2")
|
||||||
discoveryManager.ApplyConfig(c)
|
discoveryManager.ApplyConfig(c)
|
||||||
syncedTargets = <-discoveryManager.SyncCh()
|
syncedTargets = <-discoveryManager.SyncCh()
|
||||||
require.Equal(t, 1, len(discoveryManager.targets))
|
require.Len(t, discoveryManager.targets, 1)
|
||||||
verifyPresence(t, discoveryManager.targets, pk("static", "prometheus", 0), "{__address__=\"foo:9090\"}", true)
|
verifyPresence(t, discoveryManager.targets, pk("static", "prometheus", 0), "{__address__=\"foo:9090\"}", true)
|
||||||
require.Equal(t, 1, len(syncedTargets))
|
require.Len(t, syncedTargets, 1)
|
||||||
verifySyncedPresence(t, syncedTargets, "prometheus", "{__address__=\"foo:9090\"}", true)
|
verifySyncedPresence(t, syncedTargets, "prometheus", "{__address__=\"foo:9090\"}", true)
|
||||||
require.Equal(t, 1, len(syncedTargets["prometheus"]))
|
require.Len(t, syncedTargets["prometheus"], 1)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestTargetSetRecreatesTargetGroupsOnConfigChange(t *testing.T) {
|
func TestTargetSetRecreatesTargetGroupsOnConfigChange(t *testing.T) {
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
defer cancel()
|
defer cancel()
|
||||||
discoveryManager := NewManager(ctx, log.NewNopLogger())
|
discoveryManager := NewManager(ctx, log.NewNopLogger(), prometheus.NewRegistry())
|
||||||
|
require.NotNil(t, discoveryManager)
|
||||||
discoveryManager.updatert = 100 * time.Millisecond
|
discoveryManager.updatert = 100 * time.Millisecond
|
||||||
go discoveryManager.Run()
|
go discoveryManager.Run()
|
||||||
|
|
||||||
|
@ -959,31 +966,32 @@ func TestTargetSetRecreatesTargetGroupsOnConfigChange(t *testing.T) {
|
||||||
p := pk("static", "prometheus", 0)
|
p := pk("static", "prometheus", 0)
|
||||||
verifyPresence(t, discoveryManager.targets, p, "{__address__=\"foo:9090\"}", true)
|
verifyPresence(t, discoveryManager.targets, p, "{__address__=\"foo:9090\"}", true)
|
||||||
verifyPresence(t, discoveryManager.targets, p, "{__address__=\"bar:9090\"}", true)
|
verifyPresence(t, discoveryManager.targets, p, "{__address__=\"bar:9090\"}", true)
|
||||||
require.Equal(t, 1, len(discoveryManager.targets))
|
require.Len(t, discoveryManager.targets, 1)
|
||||||
require.Equal(t, 1, len(syncedTargets))
|
require.Len(t, syncedTargets, 1)
|
||||||
verifySyncedPresence(t, syncedTargets, "prometheus", "{__address__=\"foo:9090\"}", true)
|
verifySyncedPresence(t, syncedTargets, "prometheus", "{__address__=\"foo:9090\"}", true)
|
||||||
verifySyncedPresence(t, syncedTargets, "prometheus", "{__address__=\"bar:9090\"}", true)
|
verifySyncedPresence(t, syncedTargets, "prometheus", "{__address__=\"bar:9090\"}", true)
|
||||||
require.Equal(t, 2, len(syncedTargets["prometheus"]))
|
require.Len(t, syncedTargets["prometheus"], 2)
|
||||||
|
|
||||||
c["prometheus"] = Configs{
|
c["prometheus"] = Configs{
|
||||||
staticConfig("foo:9090"),
|
staticConfig("foo:9090"),
|
||||||
}
|
}
|
||||||
discoveryManager.ApplyConfig(c)
|
discoveryManager.ApplyConfig(c)
|
||||||
syncedTargets = <-discoveryManager.SyncCh()
|
syncedTargets = <-discoveryManager.SyncCh()
|
||||||
require.Equal(t, 1, len(discoveryManager.targets))
|
require.Len(t, discoveryManager.targets, 1)
|
||||||
p = pk("static", "prometheus", 1)
|
p = pk("static", "prometheus", 1)
|
||||||
verifyPresence(t, discoveryManager.targets, p, "{__address__=\"foo:9090\"}", true)
|
verifyPresence(t, discoveryManager.targets, p, "{__address__=\"foo:9090\"}", true)
|
||||||
verifyPresence(t, discoveryManager.targets, p, "{__address__=\"bar:9090\"}", false)
|
verifyPresence(t, discoveryManager.targets, p, "{__address__=\"bar:9090\"}", false)
|
||||||
require.Equal(t, 1, len(discoveryManager.targets))
|
require.Len(t, discoveryManager.targets, 1)
|
||||||
require.Equal(t, 1, len(syncedTargets))
|
require.Len(t, syncedTargets, 1)
|
||||||
verifySyncedPresence(t, syncedTargets, "prometheus", "{__address__=\"foo:9090\"}", true)
|
verifySyncedPresence(t, syncedTargets, "prometheus", "{__address__=\"foo:9090\"}", true)
|
||||||
require.Equal(t, 1, len(syncedTargets["prometheus"]))
|
require.Len(t, syncedTargets["prometheus"], 1)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestDiscovererConfigs(t *testing.T) {
|
func TestDiscovererConfigs(t *testing.T) {
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
defer cancel()
|
defer cancel()
|
||||||
discoveryManager := NewManager(ctx, log.NewNopLogger())
|
discoveryManager := NewManager(ctx, log.NewNopLogger(), prometheus.NewRegistry())
|
||||||
|
require.NotNil(t, discoveryManager)
|
||||||
discoveryManager.updatert = 100 * time.Millisecond
|
discoveryManager.updatert = 100 * time.Millisecond
|
||||||
go discoveryManager.Run()
|
go discoveryManager.Run()
|
||||||
|
|
||||||
|
@ -1001,12 +1009,12 @@ func TestDiscovererConfigs(t *testing.T) {
|
||||||
verifyPresence(t, discoveryManager.targets, p, "{__address__=\"bar:9090\"}", true)
|
verifyPresence(t, discoveryManager.targets, p, "{__address__=\"bar:9090\"}", true)
|
||||||
p = pk("static", "prometheus", 1)
|
p = pk("static", "prometheus", 1)
|
||||||
verifyPresence(t, discoveryManager.targets, p, "{__address__=\"baz:9090\"}", true)
|
verifyPresence(t, discoveryManager.targets, p, "{__address__=\"baz:9090\"}", true)
|
||||||
require.Equal(t, 2, len(discoveryManager.targets))
|
require.Len(t, discoveryManager.targets, 2)
|
||||||
require.Equal(t, 1, len(syncedTargets))
|
require.Len(t, syncedTargets, 1)
|
||||||
verifySyncedPresence(t, syncedTargets, "prometheus", "{__address__=\"foo:9090\"}", true)
|
verifySyncedPresence(t, syncedTargets, "prometheus", "{__address__=\"foo:9090\"}", true)
|
||||||
verifySyncedPresence(t, syncedTargets, "prometheus", "{__address__=\"bar:9090\"}", true)
|
verifySyncedPresence(t, syncedTargets, "prometheus", "{__address__=\"bar:9090\"}", true)
|
||||||
verifySyncedPresence(t, syncedTargets, "prometheus", "{__address__=\"baz:9090\"}", true)
|
verifySyncedPresence(t, syncedTargets, "prometheus", "{__address__=\"baz:9090\"}", true)
|
||||||
require.Equal(t, 3, len(syncedTargets["prometheus"]))
|
require.Len(t, syncedTargets["prometheus"], 3)
|
||||||
}
|
}
|
||||||
|
|
||||||
// TestTargetSetRecreatesEmptyStaticConfigs ensures that reloading a config file after
|
// TestTargetSetRecreatesEmptyStaticConfigs ensures that reloading a config file after
|
||||||
|
@ -1015,7 +1023,8 @@ func TestDiscovererConfigs(t *testing.T) {
|
||||||
func TestTargetSetRecreatesEmptyStaticConfigs(t *testing.T) {
|
func TestTargetSetRecreatesEmptyStaticConfigs(t *testing.T) {
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
defer cancel()
|
defer cancel()
|
||||||
discoveryManager := NewManager(ctx, log.NewNopLogger())
|
discoveryManager := NewManager(ctx, log.NewNopLogger(), prometheus.NewRegistry())
|
||||||
|
require.NotNil(t, discoveryManager)
|
||||||
discoveryManager.updatert = 100 * time.Millisecond
|
discoveryManager.updatert = 100 * time.Millisecond
|
||||||
go discoveryManager.Run()
|
go discoveryManager.Run()
|
||||||
|
|
||||||
|
@ -1029,9 +1038,9 @@ func TestTargetSetRecreatesEmptyStaticConfigs(t *testing.T) {
|
||||||
syncedTargets := <-discoveryManager.SyncCh()
|
syncedTargets := <-discoveryManager.SyncCh()
|
||||||
p := pk("static", "prometheus", 0)
|
p := pk("static", "prometheus", 0)
|
||||||
verifyPresence(t, discoveryManager.targets, p, "{__address__=\"foo:9090\"}", true)
|
verifyPresence(t, discoveryManager.targets, p, "{__address__=\"foo:9090\"}", true)
|
||||||
require.Equal(t, 1, len(syncedTargets))
|
require.Len(t, syncedTargets, 1)
|
||||||
verifySyncedPresence(t, syncedTargets, "prometheus", "{__address__=\"foo:9090\"}", true)
|
verifySyncedPresence(t, syncedTargets, "prometheus", "{__address__=\"foo:9090\"}", true)
|
||||||
require.Equal(t, 1, len(syncedTargets["prometheus"]))
|
require.Len(t, syncedTargets["prometheus"], 1)
|
||||||
|
|
||||||
c["prometheus"] = Configs{
|
c["prometheus"] = Configs{
|
||||||
StaticConfig{{}},
|
StaticConfig{{}},
|
||||||
|
@ -1052,8 +1061,8 @@ func TestTargetSetRecreatesEmptyStaticConfigs(t *testing.T) {
|
||||||
if len(group.Targets) != 0 {
|
if len(group.Targets) != 0 {
|
||||||
t.Fatalf("Invalid number of targets: expected 0, got %d", len(group.Targets))
|
t.Fatalf("Invalid number of targets: expected 0, got %d", len(group.Targets))
|
||||||
}
|
}
|
||||||
require.Equal(t, 1, len(syncedTargets))
|
require.Len(t, syncedTargets, 1)
|
||||||
require.Equal(t, 1, len(syncedTargets["prometheus"]))
|
require.Len(t, syncedTargets["prometheus"], 1)
|
||||||
if lbls := syncedTargets["prometheus"][0].Labels; lbls != nil {
|
if lbls := syncedTargets["prometheus"][0].Labels; lbls != nil {
|
||||||
t.Fatalf("Unexpected Group: expected nil Labels, got %v", lbls)
|
t.Fatalf("Unexpected Group: expected nil Labels, got %v", lbls)
|
||||||
}
|
}
|
||||||
|
@ -1062,7 +1071,8 @@ func TestTargetSetRecreatesEmptyStaticConfigs(t *testing.T) {
|
||||||
func TestIdenticalConfigurationsAreCoalesced(t *testing.T) {
|
func TestIdenticalConfigurationsAreCoalesced(t *testing.T) {
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
defer cancel()
|
defer cancel()
|
||||||
discoveryManager := NewManager(ctx, nil)
|
discoveryManager := NewManager(ctx, nil, prometheus.NewRegistry())
|
||||||
|
require.NotNil(t, discoveryManager)
|
||||||
discoveryManager.updatert = 100 * time.Millisecond
|
discoveryManager.updatert = 100 * time.Millisecond
|
||||||
go discoveryManager.Run()
|
go discoveryManager.Run()
|
||||||
|
|
||||||
|
@ -1082,11 +1092,11 @@ func TestIdenticalConfigurationsAreCoalesced(t *testing.T) {
|
||||||
if len(discoveryManager.providers) != 1 {
|
if len(discoveryManager.providers) != 1 {
|
||||||
t.Fatalf("Invalid number of providers: expected 1, got %d", len(discoveryManager.providers))
|
t.Fatalf("Invalid number of providers: expected 1, got %d", len(discoveryManager.providers))
|
||||||
}
|
}
|
||||||
require.Equal(t, 2, len(syncedTargets))
|
require.Len(t, syncedTargets, 2)
|
||||||
verifySyncedPresence(t, syncedTargets, "prometheus", "{__address__=\"foo:9090\"}", true)
|
verifySyncedPresence(t, syncedTargets, "prometheus", "{__address__=\"foo:9090\"}", true)
|
||||||
require.Equal(t, 1, len(syncedTargets["prometheus"]))
|
require.Len(t, syncedTargets["prometheus"], 1)
|
||||||
verifySyncedPresence(t, syncedTargets, "prometheus2", "{__address__=\"foo:9090\"}", true)
|
verifySyncedPresence(t, syncedTargets, "prometheus2", "{__address__=\"foo:9090\"}", true)
|
||||||
require.Equal(t, 1, len(syncedTargets["prometheus2"]))
|
require.Len(t, syncedTargets["prometheus2"], 1)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestApplyConfigDoesNotModifyStaticTargets(t *testing.T) {
|
func TestApplyConfigDoesNotModifyStaticTargets(t *testing.T) {
|
||||||
|
@ -1098,7 +1108,8 @@ func TestApplyConfigDoesNotModifyStaticTargets(t *testing.T) {
|
||||||
}
|
}
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
defer cancel()
|
defer cancel()
|
||||||
discoveryManager := NewManager(ctx, log.NewNopLogger())
|
discoveryManager := NewManager(ctx, log.NewNopLogger(), prometheus.NewRegistry())
|
||||||
|
require.NotNil(t, discoveryManager)
|
||||||
discoveryManager.updatert = 100 * time.Millisecond
|
discoveryManager.updatert = 100 * time.Millisecond
|
||||||
go discoveryManager.Run()
|
go discoveryManager.Run()
|
||||||
|
|
||||||
|
@ -1144,7 +1155,8 @@ func (s lockStaticDiscoverer) Run(ctx context.Context, up chan<- []*targetgroup.
|
||||||
func TestGaugeFailedConfigs(t *testing.T) {
|
func TestGaugeFailedConfigs(t *testing.T) {
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
defer cancel()
|
defer cancel()
|
||||||
discoveryManager := NewManager(ctx, log.NewNopLogger())
|
discoveryManager := NewManager(ctx, log.NewNopLogger(), prometheus.NewRegistry())
|
||||||
|
require.NotNil(t, discoveryManager)
|
||||||
discoveryManager.updatert = 100 * time.Millisecond
|
discoveryManager.updatert = 100 * time.Millisecond
|
||||||
go discoveryManager.Run()
|
go discoveryManager.Run()
|
||||||
|
|
||||||
|
@ -1158,7 +1170,7 @@ func TestGaugeFailedConfigs(t *testing.T) {
|
||||||
discoveryManager.ApplyConfig(c)
|
discoveryManager.ApplyConfig(c)
|
||||||
<-discoveryManager.SyncCh()
|
<-discoveryManager.SyncCh()
|
||||||
|
|
||||||
failedCount := client_testutil.ToFloat64(failedConfigs)
|
failedCount := client_testutil.ToFloat64(discoveryManager.metrics.FailedConfigs)
|
||||||
if failedCount != 3 {
|
if failedCount != 3 {
|
||||||
t.Fatalf("Expected to have 3 failed configs, got: %v", failedCount)
|
t.Fatalf("Expected to have 3 failed configs, got: %v", failedCount)
|
||||||
}
|
}
|
||||||
|
@ -1169,7 +1181,7 @@ func TestGaugeFailedConfigs(t *testing.T) {
|
||||||
discoveryManager.ApplyConfig(c)
|
discoveryManager.ApplyConfig(c)
|
||||||
<-discoveryManager.SyncCh()
|
<-discoveryManager.SyncCh()
|
||||||
|
|
||||||
failedCount = client_testutil.ToFloat64(failedConfigs)
|
failedCount = client_testutil.ToFloat64(discoveryManager.metrics.FailedConfigs)
|
||||||
if failedCount != 0 {
|
if failedCount != 0 {
|
||||||
t.Fatalf("Expected to get no failed config, got: %v", failedCount)
|
t.Fatalf("Expected to get no failed config, got: %v", failedCount)
|
||||||
}
|
}
|
||||||
|
@ -1300,7 +1312,8 @@ func TestCoordinationWithReceiver(t *testing.T) {
|
||||||
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
|
|
||||||
mgr := NewManager(ctx, nil)
|
mgr := NewManager(ctx, nil, prometheus.NewRegistry())
|
||||||
|
require.NotNil(t, mgr)
|
||||||
mgr.updatert = updateDelay
|
mgr.updatert = updateDelay
|
||||||
go mgr.Run()
|
go mgr.Run()
|
||||||
|
|
||||||
|
@ -1392,10 +1405,11 @@ func (o onceProvider) Run(_ context.Context, ch chan<- []*targetgroup.Group) {
|
||||||
|
|
||||||
// TestTargetSetTargetGroupsUpdateDuringApplyConfig is used to detect races when
|
// TestTargetSetTargetGroupsUpdateDuringApplyConfig is used to detect races when
|
||||||
// ApplyConfig happens at the same time as targets update.
|
// ApplyConfig happens at the same time as targets update.
|
||||||
func TestTargetSetTargetGroupsUpdateDuringApplyConfig(*testing.T) {
|
func TestTargetSetTargetGroupsUpdateDuringApplyConfig(t *testing.T) {
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
defer cancel()
|
defer cancel()
|
||||||
discoveryManager := NewManager(ctx, log.NewNopLogger())
|
discoveryManager := NewManager(ctx, log.NewNopLogger(), prometheus.NewRegistry())
|
||||||
|
require.NotNil(t, discoveryManager)
|
||||||
discoveryManager.updatert = 100 * time.Millisecond
|
discoveryManager.updatert = 100 * time.Millisecond
|
||||||
go discoveryManager.Run()
|
go discoveryManager.Run()
|
||||||
|
|
||||||
|
|
|
@ -28,6 +28,7 @@ import (
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/go-kit/log"
|
"github.com/go-kit/log"
|
||||||
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
"github.com/prometheus/common/config"
|
"github.com/prometheus/common/config"
|
||||||
"github.com/prometheus/common/model"
|
"github.com/prometheus/common/model"
|
||||||
|
|
||||||
|
@ -83,7 +84,7 @@ func (*SDConfig) Name() string { return "marathon" }
|
||||||
|
|
||||||
// NewDiscoverer returns a Discoverer for the Config.
|
// NewDiscoverer returns a Discoverer for the Config.
|
||||||
func (c *SDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (discovery.Discoverer, error) {
|
func (c *SDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (discovery.Discoverer, error) {
|
||||||
return NewDiscovery(*c, opts.Logger)
|
return NewDiscovery(*c, opts.Logger, opts.Registerer)
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetDirectory joins any relative file paths with dir.
|
// SetDirectory joins any relative file paths with dir.
|
||||||
|
@ -132,7 +133,7 @@ type Discovery struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewDiscovery returns a new Marathon Discovery.
|
// NewDiscovery returns a new Marathon Discovery.
|
||||||
func NewDiscovery(conf SDConfig, logger log.Logger) (*Discovery, error) {
|
func NewDiscovery(conf SDConfig, logger log.Logger, reg prometheus.Registerer) (*Discovery, error) {
|
||||||
rt, err := config.NewRoundTripperFromConfig(conf.HTTPClientConfig, "marathon_sd")
|
rt, err := config.NewRoundTripperFromConfig(conf.HTTPClientConfig, "marathon_sd")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
@ -154,10 +155,13 @@ func NewDiscovery(conf SDConfig, logger log.Logger) (*Discovery, error) {
|
||||||
appsClient: fetchApps,
|
appsClient: fetchApps,
|
||||||
}
|
}
|
||||||
d.Discovery = refresh.NewDiscovery(
|
d.Discovery = refresh.NewDiscovery(
|
||||||
logger,
|
refresh.Options{
|
||||||
"marathon",
|
Logger: logger,
|
||||||
time.Duration(conf.RefreshInterval),
|
Mech: "marathon",
|
||||||
d.refresh,
|
Interval: time.Duration(conf.RefreshInterval),
|
||||||
|
RefreshF: d.refresh,
|
||||||
|
Registry: reg,
|
||||||
|
},
|
||||||
)
|
)
|
||||||
return d, nil
|
return d, nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -21,6 +21,7 @@ import (
|
||||||
"net/http/httptest"
|
"net/http/httptest"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
"github.com/prometheus/common/model"
|
"github.com/prometheus/common/model"
|
||||||
|
|
||||||
"github.com/prometheus/prometheus/discovery/targetgroup"
|
"github.com/prometheus/prometheus/discovery/targetgroup"
|
||||||
|
@ -36,7 +37,7 @@ func testConfig() SDConfig {
|
||||||
}
|
}
|
||||||
|
|
||||||
func testUpdateServices(client appListClient) ([]*targetgroup.Group, error) {
|
func testUpdateServices(client appListClient) ([]*targetgroup.Group, error) {
|
||||||
md, err := NewDiscovery(testConfig(), nil)
|
md, err := NewDiscovery(testConfig(), nil, prometheus.NewRegistry())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -129,7 +130,7 @@ func TestMarathonSDSendGroup(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestMarathonSDRemoveApp(t *testing.T) {
|
func TestMarathonSDRemoveApp(t *testing.T) {
|
||||||
md, err := NewDiscovery(testConfig(), nil)
|
md, err := NewDiscovery(testConfig(), nil, prometheus.NewRegistry())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("%s", err)
|
t.Fatalf("%s", err)
|
||||||
}
|
}
|
||||||
|
|
101
discovery/metrics.go
Normal file
101
discovery/metrics.go
Normal file
|
@ -0,0 +1,101 @@
|
||||||
|
// Copyright 2016 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package discovery
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
clientGoRequestMetrics = &clientGoRequestMetricAdapter{}
|
||||||
|
clientGoWorkloadMetrics = &clientGoWorkqueueMetricsProvider{}
|
||||||
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
clientGoRequestMetrics.RegisterWithK8sGoClient()
|
||||||
|
clientGoWorkloadMetrics.RegisterWithK8sGoClient()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Metrics to be used with a discovery manager.
|
||||||
|
type Metrics struct {
|
||||||
|
FailedConfigs prometheus.Gauge
|
||||||
|
DiscoveredTargets *prometheus.GaugeVec
|
||||||
|
ReceivedUpdates prometheus.Counter
|
||||||
|
DelayedUpdates prometheus.Counter
|
||||||
|
SentUpdates prometheus.Counter
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewMetrics(registerer prometheus.Registerer, sdManagerName string) (*Metrics, error) {
|
||||||
|
m := &Metrics{}
|
||||||
|
|
||||||
|
m.FailedConfigs = prometheus.NewGauge(
|
||||||
|
prometheus.GaugeOpts{
|
||||||
|
Name: "prometheus_sd_failed_configs",
|
||||||
|
Help: "Current number of service discovery configurations that failed to load.",
|
||||||
|
ConstLabels: prometheus.Labels{"name": sdManagerName},
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
|
m.DiscoveredTargets = prometheus.NewGaugeVec(
|
||||||
|
prometheus.GaugeOpts{
|
||||||
|
Name: "prometheus_sd_discovered_targets",
|
||||||
|
Help: "Current number of discovered targets.",
|
||||||
|
ConstLabels: prometheus.Labels{"name": sdManagerName},
|
||||||
|
},
|
||||||
|
[]string{"config"},
|
||||||
|
)
|
||||||
|
|
||||||
|
m.ReceivedUpdates = prometheus.NewCounter(
|
||||||
|
prometheus.CounterOpts{
|
||||||
|
Name: "prometheus_sd_received_updates_total",
|
||||||
|
Help: "Total number of update events received from the SD providers.",
|
||||||
|
ConstLabels: prometheus.Labels{"name": sdManagerName},
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
|
m.DelayedUpdates = prometheus.NewCounter(
|
||||||
|
prometheus.CounterOpts{
|
||||||
|
Name: "prometheus_sd_updates_delayed_total",
|
||||||
|
Help: "Total number of update events that couldn't be sent immediately.",
|
||||||
|
ConstLabels: prometheus.Labels{"name": sdManagerName},
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
|
m.SentUpdates = prometheus.NewCounter(
|
||||||
|
prometheus.CounterOpts{
|
||||||
|
Name: "prometheus_sd_updates_total",
|
||||||
|
Help: "Total number of update events sent to the SD consumers.",
|
||||||
|
ConstLabels: prometheus.Labels{"name": sdManagerName},
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
|
metrics := []prometheus.Collector{
|
||||||
|
m.FailedConfigs,
|
||||||
|
m.DiscoveredTargets,
|
||||||
|
m.ReceivedUpdates,
|
||||||
|
m.DelayedUpdates,
|
||||||
|
m.SentUpdates,
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, collector := range metrics {
|
||||||
|
err := registerer.Register(collector)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to register discovery manager metrics: %w", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return m, nil
|
||||||
|
}
|
|
@ -11,10 +11,11 @@
|
||||||
// See the License for the specific language governing permissions and
|
// See the License for the specific language governing permissions and
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
package kubernetes
|
package discovery
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"fmt"
|
||||||
"net/url"
|
"net/url"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
@ -23,13 +24,22 @@ import (
|
||||||
"k8s.io/client-go/util/workqueue"
|
"k8s.io/client-go/util/workqueue"
|
||||||
)
|
)
|
||||||
|
|
||||||
const workqueueMetricsNamespace = metricsNamespace + "_workqueue"
|
// This file registers metrics used by the Kubernetes Go client (k8s.io/client-go).
|
||||||
|
// Unfortunately, k8s.io/client-go metrics are global.
|
||||||
|
// If we instantiate multiple k8s SD instances, their k8s/client-go metrics will overlap.
|
||||||
|
// To prevent us from displaying misleading metrics, we register k8s.io/client-go metrics
|
||||||
|
// outside of the Kubernetes SD.
|
||||||
|
|
||||||
|
const (
|
||||||
|
KubernetesMetricsNamespace = "prometheus_sd_kubernetes"
|
||||||
|
workqueueMetricsNamespace = KubernetesMetricsNamespace + "_workqueue"
|
||||||
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
// Metrics for client-go's HTTP requests.
|
// Metrics for client-go's HTTP requests.
|
||||||
clientGoRequestResultMetricVec = prometheus.NewCounterVec(
|
clientGoRequestResultMetricVec = prometheus.NewCounterVec(
|
||||||
prometheus.CounterOpts{
|
prometheus.CounterOpts{
|
||||||
Namespace: metricsNamespace,
|
Namespace: KubernetesMetricsNamespace,
|
||||||
Name: "http_request_total",
|
Name: "http_request_total",
|
||||||
Help: "Total number of HTTP requests to the Kubernetes API by status code.",
|
Help: "Total number of HTTP requests to the Kubernetes API by status code.",
|
||||||
},
|
},
|
||||||
|
@ -37,7 +47,7 @@ var (
|
||||||
)
|
)
|
||||||
clientGoRequestLatencyMetricVec = prometheus.NewSummaryVec(
|
clientGoRequestLatencyMetricVec = prometheus.NewSummaryVec(
|
||||||
prometheus.SummaryOpts{
|
prometheus.SummaryOpts{
|
||||||
Namespace: metricsNamespace,
|
Namespace: KubernetesMetricsNamespace,
|
||||||
Name: "http_request_duration_seconds",
|
Name: "http_request_duration_seconds",
|
||||||
Help: "Summary of latencies for HTTP requests to the Kubernetes API by endpoint.",
|
Help: "Summary of latencies for HTTP requests to the Kubernetes API by endpoint.",
|
||||||
Objectives: map[float64]float64{},
|
Objectives: map[float64]float64{},
|
||||||
|
@ -109,17 +119,38 @@ func (noopMetric) Set(float64) {}
|
||||||
// Definition of client-go metrics adapters for HTTP requests observation.
|
// Definition of client-go metrics adapters for HTTP requests observation.
|
||||||
type clientGoRequestMetricAdapter struct{}
|
type clientGoRequestMetricAdapter struct{}
|
||||||
|
|
||||||
func (f *clientGoRequestMetricAdapter) Register(registerer prometheus.Registerer) {
|
// Returns all of the Prometheus metrics derived from k8s.io/client-go.
|
||||||
|
// This may be used tu register and unregister the metrics.
|
||||||
|
func clientGoMetrics() []prometheus.Collector {
|
||||||
|
return []prometheus.Collector{
|
||||||
|
clientGoRequestResultMetricVec,
|
||||||
|
clientGoRequestLatencyMetricVec,
|
||||||
|
clientGoWorkqueueDepthMetricVec,
|
||||||
|
clientGoWorkqueueAddsMetricVec,
|
||||||
|
clientGoWorkqueueLatencyMetricVec,
|
||||||
|
clientGoWorkqueueUnfinishedWorkSecondsMetricVec,
|
||||||
|
clientGoWorkqueueLongestRunningProcessorMetricVec,
|
||||||
|
clientGoWorkqueueWorkDurationMetricVec,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func RegisterK8sClientMetricsWithPrometheus(registerer prometheus.Registerer) error {
|
||||||
|
for _, collector := range clientGoMetrics() {
|
||||||
|
err := registerer.Register(collector)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to register Kubernetes Go Client metrics: %w", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *clientGoRequestMetricAdapter) RegisterWithK8sGoClient() {
|
||||||
metrics.Register(
|
metrics.Register(
|
||||||
metrics.RegisterOpts{
|
metrics.RegisterOpts{
|
||||||
RequestLatency: f,
|
RequestLatency: f,
|
||||||
RequestResult: f,
|
RequestResult: f,
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
registerer.MustRegister(
|
|
||||||
clientGoRequestResultMetricVec,
|
|
||||||
clientGoRequestLatencyMetricVec,
|
|
||||||
)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (clientGoRequestMetricAdapter) Increment(_ context.Context, code, _, _ string) {
|
func (clientGoRequestMetricAdapter) Increment(_ context.Context, code, _, _ string) {
|
||||||
|
@ -133,16 +164,8 @@ func (clientGoRequestMetricAdapter) Observe(_ context.Context, _ string, u url.U
|
||||||
// Definition of client-go workqueue metrics provider definition.
|
// Definition of client-go workqueue metrics provider definition.
|
||||||
type clientGoWorkqueueMetricsProvider struct{}
|
type clientGoWorkqueueMetricsProvider struct{}
|
||||||
|
|
||||||
func (f *clientGoWorkqueueMetricsProvider) Register(registerer prometheus.Registerer) {
|
func (f *clientGoWorkqueueMetricsProvider) RegisterWithK8sGoClient() {
|
||||||
workqueue.SetProvider(f)
|
workqueue.SetProvider(f)
|
||||||
registerer.MustRegister(
|
|
||||||
clientGoWorkqueueDepthMetricVec,
|
|
||||||
clientGoWorkqueueAddsMetricVec,
|
|
||||||
clientGoWorkqueueLatencyMetricVec,
|
|
||||||
clientGoWorkqueueWorkDurationMetricVec,
|
|
||||||
clientGoWorkqueueUnfinishedWorkSecondsMetricVec,
|
|
||||||
clientGoWorkqueueLongestRunningProcessorMetricVec,
|
|
||||||
)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *clientGoWorkqueueMetricsProvider) NewDepthMetric(name string) workqueue.GaugeMetric {
|
func (f *clientGoWorkqueueMetricsProvider) NewDepthMetric(name string) workqueue.GaugeMetric {
|
|
@ -26,6 +26,7 @@ import (
|
||||||
"github.com/docker/docker/api/types/filters"
|
"github.com/docker/docker/api/types/filters"
|
||||||
"github.com/docker/docker/client"
|
"github.com/docker/docker/client"
|
||||||
"github.com/go-kit/log"
|
"github.com/go-kit/log"
|
||||||
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
"github.com/prometheus/common/config"
|
"github.com/prometheus/common/config"
|
||||||
"github.com/prometheus/common/model"
|
"github.com/prometheus/common/model"
|
||||||
|
|
||||||
|
@ -80,7 +81,7 @@ func (*DockerSDConfig) Name() string { return "docker" }
|
||||||
|
|
||||||
// NewDiscoverer returns a Discoverer for the Config.
|
// NewDiscoverer returns a Discoverer for the Config.
|
||||||
func (c *DockerSDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (discovery.Discoverer, error) {
|
func (c *DockerSDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (discovery.Discoverer, error) {
|
||||||
return NewDockerDiscovery(c, opts.Logger)
|
return NewDockerDiscovery(c, opts.Logger, opts.Registerer)
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetDirectory joins any relative file paths with dir.
|
// SetDirectory joins any relative file paths with dir.
|
||||||
|
@ -114,7 +115,7 @@ type DockerDiscovery struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewDockerDiscovery returns a new DockerDiscovery which periodically refreshes its targets.
|
// NewDockerDiscovery returns a new DockerDiscovery which periodically refreshes its targets.
|
||||||
func NewDockerDiscovery(conf *DockerSDConfig, logger log.Logger) (*DockerDiscovery, error) {
|
func NewDockerDiscovery(conf *DockerSDConfig, logger log.Logger, reg prometheus.Registerer) (*DockerDiscovery, error) {
|
||||||
var err error
|
var err error
|
||||||
|
|
||||||
d := &DockerDiscovery{
|
d := &DockerDiscovery{
|
||||||
|
@ -165,10 +166,13 @@ func NewDockerDiscovery(conf *DockerSDConfig, logger log.Logger) (*DockerDiscove
|
||||||
}
|
}
|
||||||
|
|
||||||
d.Discovery = refresh.NewDiscovery(
|
d.Discovery = refresh.NewDiscovery(
|
||||||
logger,
|
refresh.Options{
|
||||||
"docker",
|
Logger: logger,
|
||||||
time.Duration(conf.RefreshInterval),
|
Mech: "docker",
|
||||||
d.refresh,
|
Interval: time.Duration(conf.RefreshInterval),
|
||||||
|
RefreshF: d.refresh,
|
||||||
|
Registry: reg,
|
||||||
|
},
|
||||||
)
|
)
|
||||||
return d, nil
|
return d, nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -19,6 +19,7 @@ import (
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/go-kit/log"
|
"github.com/go-kit/log"
|
||||||
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
"github.com/prometheus/common/model"
|
"github.com/prometheus/common/model"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
"gopkg.in/yaml.v2"
|
"gopkg.in/yaml.v2"
|
||||||
|
@ -37,19 +38,19 @@ host: %s
|
||||||
var cfg DockerSDConfig
|
var cfg DockerSDConfig
|
||||||
require.NoError(t, yaml.Unmarshal([]byte(cfgString), &cfg))
|
require.NoError(t, yaml.Unmarshal([]byte(cfgString), &cfg))
|
||||||
|
|
||||||
d, err := NewDockerDiscovery(&cfg, log.NewNopLogger())
|
d, err := NewDockerDiscovery(&cfg, log.NewNopLogger(), prometheus.NewRegistry())
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
tgs, err := d.refresh(ctx)
|
tgs, err := d.refresh(ctx)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
require.Equal(t, 1, len(tgs))
|
require.Len(t, tgs, 1)
|
||||||
|
|
||||||
tg := tgs[0]
|
tg := tgs[0]
|
||||||
require.NotNil(t, tg)
|
require.NotNil(t, tg)
|
||||||
require.NotNil(t, tg.Targets)
|
require.NotNil(t, tg.Targets)
|
||||||
require.Equal(t, 3, len(tg.Targets))
|
require.Len(t, tg.Targets, 3)
|
||||||
|
|
||||||
for i, lbls := range []model.LabelSet{
|
for i, lbls := range []model.LabelSet{
|
||||||
{
|
{
|
||||||
|
|
|
@ -23,6 +23,7 @@ import (
|
||||||
"github.com/docker/docker/api/types/filters"
|
"github.com/docker/docker/api/types/filters"
|
||||||
"github.com/docker/docker/client"
|
"github.com/docker/docker/client"
|
||||||
"github.com/go-kit/log"
|
"github.com/go-kit/log"
|
||||||
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
"github.com/prometheus/common/config"
|
"github.com/prometheus/common/config"
|
||||||
"github.com/prometheus/common/model"
|
"github.com/prometheus/common/model"
|
||||||
"github.com/prometheus/common/version"
|
"github.com/prometheus/common/version"
|
||||||
|
@ -74,7 +75,7 @@ func (*DockerSwarmSDConfig) Name() string { return "dockerswarm" }
|
||||||
|
|
||||||
// NewDiscoverer returns a Discoverer for the Config.
|
// NewDiscoverer returns a Discoverer for the Config.
|
||||||
func (c *DockerSwarmSDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (discovery.Discoverer, error) {
|
func (c *DockerSwarmSDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (discovery.Discoverer, error) {
|
||||||
return NewDiscovery(c, opts.Logger)
|
return NewDiscovery(c, opts.Logger, opts.Registerer)
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetDirectory joins any relative file paths with dir.
|
// SetDirectory joins any relative file paths with dir.
|
||||||
|
@ -117,7 +118,7 @@ type Discovery struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewDiscovery returns a new Discovery which periodically refreshes its targets.
|
// NewDiscovery returns a new Discovery which periodically refreshes its targets.
|
||||||
func NewDiscovery(conf *DockerSwarmSDConfig, logger log.Logger) (*Discovery, error) {
|
func NewDiscovery(conf *DockerSwarmSDConfig, logger log.Logger, reg prometheus.Registerer) (*Discovery, error) {
|
||||||
var err error
|
var err error
|
||||||
|
|
||||||
d := &Discovery{
|
d := &Discovery{
|
||||||
|
@ -168,10 +169,13 @@ func NewDiscovery(conf *DockerSwarmSDConfig, logger log.Logger) (*Discovery, err
|
||||||
}
|
}
|
||||||
|
|
||||||
d.Discovery = refresh.NewDiscovery(
|
d.Discovery = refresh.NewDiscovery(
|
||||||
logger,
|
refresh.Options{
|
||||||
"dockerswarm",
|
Logger: logger,
|
||||||
time.Duration(conf.RefreshInterval),
|
Mech: "dockerswarm",
|
||||||
d.refresh,
|
Interval: time.Duration(conf.RefreshInterval),
|
||||||
|
RefreshF: d.refresh,
|
||||||
|
Registry: reg,
|
||||||
|
},
|
||||||
)
|
)
|
||||||
return d, nil
|
return d, nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -19,6 +19,7 @@ import (
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/go-kit/log"
|
"github.com/go-kit/log"
|
||||||
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
"github.com/prometheus/common/model"
|
"github.com/prometheus/common/model"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
"gopkg.in/yaml.v2"
|
"gopkg.in/yaml.v2"
|
||||||
|
@ -38,19 +39,19 @@ host: %s
|
||||||
var cfg DockerSwarmSDConfig
|
var cfg DockerSwarmSDConfig
|
||||||
require.NoError(t, yaml.Unmarshal([]byte(cfgString), &cfg))
|
require.NoError(t, yaml.Unmarshal([]byte(cfgString), &cfg))
|
||||||
|
|
||||||
d, err := NewDiscovery(&cfg, log.NewNopLogger())
|
d, err := NewDiscovery(&cfg, log.NewNopLogger(), prometheus.NewRegistry())
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
tgs, err := d.refresh(ctx)
|
tgs, err := d.refresh(ctx)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
require.Equal(t, 1, len(tgs))
|
require.Len(t, tgs, 1)
|
||||||
|
|
||||||
tg := tgs[0]
|
tg := tgs[0]
|
||||||
require.NotNil(t, tg)
|
require.NotNil(t, tg)
|
||||||
require.NotNil(t, tg.Targets)
|
require.NotNil(t, tg.Targets)
|
||||||
require.Equal(t, 5, len(tg.Targets))
|
require.Len(t, tg.Targets, 5)
|
||||||
|
|
||||||
for i, lbls := range []model.LabelSet{
|
for i, lbls := range []model.LabelSet{
|
||||||
{
|
{
|
||||||
|
|
|
@ -19,6 +19,7 @@ import (
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/go-kit/log"
|
"github.com/go-kit/log"
|
||||||
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
"github.com/prometheus/common/model"
|
"github.com/prometheus/common/model"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
"gopkg.in/yaml.v2"
|
"gopkg.in/yaml.v2"
|
||||||
|
@ -38,19 +39,19 @@ host: %s
|
||||||
var cfg DockerSwarmSDConfig
|
var cfg DockerSwarmSDConfig
|
||||||
require.NoError(t, yaml.Unmarshal([]byte(cfgString), &cfg))
|
require.NoError(t, yaml.Unmarshal([]byte(cfgString), &cfg))
|
||||||
|
|
||||||
d, err := NewDiscovery(&cfg, log.NewNopLogger())
|
d, err := NewDiscovery(&cfg, log.NewNopLogger(), prometheus.NewRegistry())
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
tgs, err := d.refresh(ctx)
|
tgs, err := d.refresh(ctx)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
require.Equal(t, 1, len(tgs))
|
require.Len(t, tgs, 1)
|
||||||
|
|
||||||
tg := tgs[0]
|
tg := tgs[0]
|
||||||
require.NotNil(t, tg)
|
require.NotNil(t, tg)
|
||||||
require.NotNil(t, tg.Targets)
|
require.NotNil(t, tg.Targets)
|
||||||
require.Equal(t, 15, len(tg.Targets))
|
require.Len(t, tg.Targets, 15)
|
||||||
|
|
||||||
for i, lbls := range []model.LabelSet{
|
for i, lbls := range []model.LabelSet{
|
||||||
{
|
{
|
||||||
|
@ -332,19 +333,19 @@ filters:
|
||||||
var cfg DockerSwarmSDConfig
|
var cfg DockerSwarmSDConfig
|
||||||
require.NoError(t, yaml.Unmarshal([]byte(cfgString), &cfg))
|
require.NoError(t, yaml.Unmarshal([]byte(cfgString), &cfg))
|
||||||
|
|
||||||
d, err := NewDiscovery(&cfg, log.NewNopLogger())
|
d, err := NewDiscovery(&cfg, log.NewNopLogger(), prometheus.NewRegistry())
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
tgs, err := d.refresh(ctx)
|
tgs, err := d.refresh(ctx)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
require.Equal(t, 1, len(tgs))
|
require.Len(t, tgs, 1)
|
||||||
|
|
||||||
tg := tgs[0]
|
tg := tgs[0]
|
||||||
require.NotNil(t, tg, "tg should not be nil")
|
require.NotNil(t, tg, "tg should not be nil")
|
||||||
require.NotNil(t, tg.Targets, "tg.targets should not be nil")
|
require.NotNil(t, tg.Targets, "tg.targets should not be nil")
|
||||||
require.Equal(t, 4, len(tg.Targets))
|
require.Len(t, tg.Targets, 4)
|
||||||
|
|
||||||
for i, lbls := range []model.LabelSet{
|
for i, lbls := range []model.LabelSet{
|
||||||
{
|
{
|
||||||
|
|
|
@ -19,6 +19,7 @@ import (
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/go-kit/log"
|
"github.com/go-kit/log"
|
||||||
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
"github.com/prometheus/common/model"
|
"github.com/prometheus/common/model"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
"gopkg.in/yaml.v2"
|
"gopkg.in/yaml.v2"
|
||||||
|
@ -38,19 +39,19 @@ host: %s
|
||||||
var cfg DockerSwarmSDConfig
|
var cfg DockerSwarmSDConfig
|
||||||
require.NoError(t, yaml.Unmarshal([]byte(cfgString), &cfg))
|
require.NoError(t, yaml.Unmarshal([]byte(cfgString), &cfg))
|
||||||
|
|
||||||
d, err := NewDiscovery(&cfg, log.NewNopLogger())
|
d, err := NewDiscovery(&cfg, log.NewNopLogger(), prometheus.NewRegistry())
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
tgs, err := d.refresh(ctx)
|
tgs, err := d.refresh(ctx)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
require.Equal(t, 1, len(tgs))
|
require.Len(t, tgs, 1)
|
||||||
|
|
||||||
tg := tgs[0]
|
tg := tgs[0]
|
||||||
require.NotNil(t, tg)
|
require.NotNil(t, tg)
|
||||||
require.NotNil(t, tg.Targets)
|
require.NotNil(t, tg.Targets)
|
||||||
require.Equal(t, 27, len(tg.Targets))
|
require.Len(t, tg.Targets, 27)
|
||||||
|
|
||||||
for i, lbls := range []model.LabelSet{
|
for i, lbls := range []model.LabelSet{
|
||||||
{
|
{
|
||||||
|
|
|
@ -49,27 +49,18 @@ const (
|
||||||
)
|
)
|
||||||
|
|
||||||
// DefaultSDConfig is the default nomad SD configuration.
|
// DefaultSDConfig is the default nomad SD configuration.
|
||||||
var (
|
var DefaultSDConfig = SDConfig{
|
||||||
DefaultSDConfig = SDConfig{
|
AllowStale: true,
|
||||||
AllowStale: true,
|
HTTPClientConfig: config.DefaultHTTPClientConfig,
|
||||||
HTTPClientConfig: config.DefaultHTTPClientConfig,
|
Namespace: "default",
|
||||||
Namespace: "default",
|
RefreshInterval: model.Duration(60 * time.Second),
|
||||||
RefreshInterval: model.Duration(60 * time.Second),
|
Region: "global",
|
||||||
Region: "global",
|
Server: "http://localhost:4646",
|
||||||
Server: "http://localhost:4646",
|
TagSeparator: ",",
|
||||||
TagSeparator: ",",
|
}
|
||||||
}
|
|
||||||
|
|
||||||
failuresCount = prometheus.NewCounter(
|
|
||||||
prometheus.CounterOpts{
|
|
||||||
Name: "prometheus_sd_nomad_failures_total",
|
|
||||||
Help: "Number of nomad service discovery refresh failures.",
|
|
||||||
})
|
|
||||||
)
|
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
discovery.RegisterConfig(&SDConfig{})
|
discovery.RegisterConfig(&SDConfig{})
|
||||||
prometheus.MustRegister(failuresCount)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// SDConfig is the configuration for nomad based service discovery.
|
// SDConfig is the configuration for nomad based service discovery.
|
||||||
|
@ -88,7 +79,7 @@ func (*SDConfig) Name() string { return "nomad" }
|
||||||
|
|
||||||
// NewDiscoverer returns a Discoverer for the Config.
|
// NewDiscoverer returns a Discoverer for the Config.
|
||||||
func (c *SDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (discovery.Discoverer, error) {
|
func (c *SDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (discovery.Discoverer, error) {
|
||||||
return NewDiscovery(c, opts.Logger)
|
return NewDiscovery(c, opts.Logger, opts.Registerer)
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetDirectory joins any relative file paths with dir.
|
// SetDirectory joins any relative file paths with dir.
|
||||||
|
@ -121,10 +112,11 @@ type Discovery struct {
|
||||||
region string
|
region string
|
||||||
server string
|
server string
|
||||||
tagSeparator string
|
tagSeparator string
|
||||||
|
failuresCount prometheus.Counter
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewDiscovery returns a new Discovery which periodically refreshes its targets.
|
// NewDiscovery returns a new Discovery which periodically refreshes its targets.
|
||||||
func NewDiscovery(conf *SDConfig, logger log.Logger) (*Discovery, error) {
|
func NewDiscovery(conf *SDConfig, logger log.Logger, reg prometheus.Registerer) (*Discovery, error) {
|
||||||
d := &Discovery{
|
d := &Discovery{
|
||||||
allowStale: conf.AllowStale,
|
allowStale: conf.AllowStale,
|
||||||
namespace: conf.Namespace,
|
namespace: conf.Namespace,
|
||||||
|
@ -132,6 +124,11 @@ func NewDiscovery(conf *SDConfig, logger log.Logger) (*Discovery, error) {
|
||||||
region: conf.Region,
|
region: conf.Region,
|
||||||
server: conf.Server,
|
server: conf.Server,
|
||||||
tagSeparator: conf.TagSeparator,
|
tagSeparator: conf.TagSeparator,
|
||||||
|
failuresCount: prometheus.NewCounter(
|
||||||
|
prometheus.CounterOpts{
|
||||||
|
Name: "prometheus_sd_nomad_failures_total",
|
||||||
|
Help: "Number of nomad service discovery refresh failures.",
|
||||||
|
}),
|
||||||
}
|
}
|
||||||
|
|
||||||
HTTPClient, err := config.NewClientFromConfig(conf.HTTPClientConfig, "nomad_sd")
|
HTTPClient, err := config.NewClientFromConfig(conf.HTTPClientConfig, "nomad_sd")
|
||||||
|
@ -153,10 +150,14 @@ func NewDiscovery(conf *SDConfig, logger log.Logger) (*Discovery, error) {
|
||||||
d.client = client
|
d.client = client
|
||||||
|
|
||||||
d.Discovery = refresh.NewDiscovery(
|
d.Discovery = refresh.NewDiscovery(
|
||||||
logger,
|
refresh.Options{
|
||||||
"nomad",
|
Logger: logger,
|
||||||
time.Duration(conf.RefreshInterval),
|
Mech: "nomad",
|
||||||
d.refresh,
|
Interval: time.Duration(conf.RefreshInterval),
|
||||||
|
RefreshF: d.refresh,
|
||||||
|
Registry: reg,
|
||||||
|
Metrics: []prometheus.Collector{d.failuresCount},
|
||||||
|
},
|
||||||
)
|
)
|
||||||
return d, nil
|
return d, nil
|
||||||
}
|
}
|
||||||
|
@ -167,7 +168,7 @@ func (d *Discovery) refresh(context.Context) ([]*targetgroup.Group, error) {
|
||||||
}
|
}
|
||||||
stubs, _, err := d.client.Services().List(opts)
|
stubs, _, err := d.client.Services().List(opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
failuresCount.Inc()
|
d.failuresCount.Inc()
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -179,7 +180,7 @@ func (d *Discovery) refresh(context.Context) ([]*targetgroup.Group, error) {
|
||||||
for _, service := range stub.Services {
|
for _, service := range stub.Services {
|
||||||
instances, _, err := d.client.Services().Get(service.ServiceName, opts)
|
instances, _, err := d.client.Services().Get(service.ServiceName, opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
failuresCount.Inc()
|
d.failuresCount.Inc()
|
||||||
return nil, fmt.Errorf("failed to fetch services: %w", err)
|
return nil, fmt.Errorf("failed to fetch services: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -22,6 +22,7 @@ import (
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/go-kit/log"
|
"github.com/go-kit/log"
|
||||||
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
"github.com/prometheus/common/model"
|
"github.com/prometheus/common/model"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
)
|
)
|
||||||
|
@ -127,7 +128,7 @@ func TestConfiguredService(t *testing.T) {
|
||||||
conf := &SDConfig{
|
conf := &SDConfig{
|
||||||
Server: "http://localhost:4646",
|
Server: "http://localhost:4646",
|
||||||
}
|
}
|
||||||
_, err := NewDiscovery(conf, nil)
|
_, err := NewDiscovery(conf, nil, prometheus.NewRegistry())
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -141,18 +142,18 @@ func TestNomadSDRefresh(t *testing.T) {
|
||||||
|
|
||||||
cfg := DefaultSDConfig
|
cfg := DefaultSDConfig
|
||||||
cfg.Server = endpoint.String()
|
cfg.Server = endpoint.String()
|
||||||
d, err := NewDiscovery(&cfg, log.NewNopLogger())
|
d, err := NewDiscovery(&cfg, log.NewNopLogger(), prometheus.NewRegistry())
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
tgs, err := d.refresh(context.Background())
|
tgs, err := d.refresh(context.Background())
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
require.Equal(t, 1, len(tgs))
|
require.Len(t, tgs, 1)
|
||||||
|
|
||||||
tg := tgs[0]
|
tg := tgs[0]
|
||||||
require.NotNil(t, tg)
|
require.NotNil(t, tg)
|
||||||
require.NotNil(t, tg.Targets)
|
require.NotNil(t, tg.Targets)
|
||||||
require.Equal(t, 1, len(tg.Targets))
|
require.Len(t, tg.Targets, 1)
|
||||||
|
|
||||||
lbls := model.LabelSet{
|
lbls := model.LabelSet{
|
||||||
"__address__": model.LabelValue("127.0.0.1:30456"),
|
"__address__": model.LabelValue("127.0.0.1:30456"),
|
||||||
|
|
|
@ -53,12 +53,12 @@ func TestOpenstackSDHypervisorRefresh(t *testing.T) {
|
||||||
hypervisor, _ := mock.openstackAuthSuccess()
|
hypervisor, _ := mock.openstackAuthSuccess()
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
tgs, err := hypervisor.refresh(ctx)
|
tgs, err := hypervisor.refresh(ctx)
|
||||||
require.Equal(t, 1, len(tgs))
|
require.Len(t, tgs, 1)
|
||||||
tg := tgs[0]
|
tg := tgs[0]
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.NotNil(t, tg)
|
require.NotNil(t, tg)
|
||||||
require.NotNil(t, tg.Targets)
|
require.NotNil(t, tg.Targets)
|
||||||
require.Equal(t, 2, len(tg.Targets))
|
require.Len(t, tg.Targets, 2)
|
||||||
|
|
||||||
for l, v := range map[string]string{
|
for l, v := range map[string]string{
|
||||||
"__address__": "172.16.70.14:0",
|
"__address__": "172.16.70.14:0",
|
||||||
|
|
|
@ -145,16 +145,16 @@ func (i *InstanceDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group,
|
||||||
openstackLabelUserID: model.LabelValue(s.UserID),
|
openstackLabelUserID: model.LabelValue(s.UserID),
|
||||||
}
|
}
|
||||||
|
|
||||||
flavorId, ok := s.Flavor["id"].(string)
|
flavorID, ok := s.Flavor["id"].(string)
|
||||||
if !ok {
|
if !ok {
|
||||||
level.Warn(i.logger).Log("msg", "Invalid type for flavor id, expected string")
|
level.Warn(i.logger).Log("msg", "Invalid type for flavor id, expected string")
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
labels[openstackLabelInstanceFlavor] = model.LabelValue(flavorId)
|
labels[openstackLabelInstanceFlavor] = model.LabelValue(flavorID)
|
||||||
|
|
||||||
imageId, ok := s.Image["id"].(string)
|
imageID, ok := s.Image["id"].(string)
|
||||||
if ok {
|
if ok {
|
||||||
labels[openstackLabelInstanceImage] = model.LabelValue(imageId)
|
labels[openstackLabelInstanceImage] = model.LabelValue(imageID)
|
||||||
}
|
}
|
||||||
|
|
||||||
for k, v := range s.Metadata {
|
for k, v := range s.Metadata {
|
||||||
|
|
|
@ -61,12 +61,12 @@ func TestOpenstackSDInstanceRefresh(t *testing.T) {
|
||||||
tgs, err := instance.refresh(ctx)
|
tgs, err := instance.refresh(ctx)
|
||||||
|
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, 1, len(tgs))
|
require.Len(t, tgs, 1)
|
||||||
|
|
||||||
tg := tgs[0]
|
tg := tgs[0]
|
||||||
require.NotNil(t, tg)
|
require.NotNil(t, tg)
|
||||||
require.NotNil(t, tg.Targets)
|
require.NotNil(t, tg.Targets)
|
||||||
require.Equal(t, 4, len(tg.Targets))
|
require.Len(t, tg.Targets, 4)
|
||||||
|
|
||||||
for i, lbls := range []model.LabelSet{
|
for i, lbls := range []model.LabelSet{
|
||||||
{
|
{
|
||||||
|
|
|
@ -24,6 +24,7 @@ import (
|
||||||
"github.com/gophercloud/gophercloud"
|
"github.com/gophercloud/gophercloud"
|
||||||
"github.com/gophercloud/gophercloud/openstack"
|
"github.com/gophercloud/gophercloud/openstack"
|
||||||
"github.com/mwitkow/go-conntrack"
|
"github.com/mwitkow/go-conntrack"
|
||||||
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
"github.com/prometheus/common/config"
|
"github.com/prometheus/common/config"
|
||||||
"github.com/prometheus/common/model"
|
"github.com/prometheus/common/model"
|
||||||
|
|
||||||
|
@ -70,7 +71,7 @@ func (*SDConfig) Name() string { return "openstack" }
|
||||||
|
|
||||||
// NewDiscoverer returns a Discoverer for the Config.
|
// NewDiscoverer returns a Discoverer for the Config.
|
||||||
func (c *SDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (discovery.Discoverer, error) {
|
func (c *SDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (discovery.Discoverer, error) {
|
||||||
return NewDiscovery(c, opts.Logger)
|
return NewDiscovery(c, opts.Logger, opts.Registerer)
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetDirectory joins any relative file paths with dir.
|
// SetDirectory joins any relative file paths with dir.
|
||||||
|
@ -134,16 +135,19 @@ type refresher interface {
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewDiscovery returns a new OpenStack Discoverer which periodically refreshes its targets.
|
// NewDiscovery returns a new OpenStack Discoverer which periodically refreshes its targets.
|
||||||
func NewDiscovery(conf *SDConfig, l log.Logger) (*refresh.Discovery, error) {
|
func NewDiscovery(conf *SDConfig, l log.Logger, reg prometheus.Registerer) (*refresh.Discovery, error) {
|
||||||
r, err := newRefresher(conf, l)
|
r, err := newRefresher(conf, l)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return refresh.NewDiscovery(
|
return refresh.NewDiscovery(
|
||||||
l,
|
refresh.Options{
|
||||||
"openstack",
|
Logger: l,
|
||||||
time.Duration(conf.RefreshInterval),
|
Mech: "openstack",
|
||||||
r.refresh,
|
Interval: time.Duration(conf.RefreshInterval),
|
||||||
|
RefreshF: r.refresh,
|
||||||
|
Registry: reg,
|
||||||
|
},
|
||||||
), nil
|
), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -47,11 +47,11 @@ consumer_key: %s`, mock.URL, ovhcloudApplicationKeyTest, ovhcloudApplicationSecr
|
||||||
targetGroups, err := d.refresh(ctx)
|
targetGroups, err := d.refresh(ctx)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
require.Equal(t, 1, len(targetGroups))
|
require.Len(t, targetGroups, 1)
|
||||||
targetGroup := targetGroups[0]
|
targetGroup := targetGroups[0]
|
||||||
require.NotNil(t, targetGroup)
|
require.NotNil(t, targetGroup)
|
||||||
require.NotNil(t, targetGroup.Targets)
|
require.NotNil(t, targetGroup.Targets)
|
||||||
require.Equal(t, 1, len(targetGroup.Targets))
|
require.Len(t, targetGroup.Targets, 1)
|
||||||
|
|
||||||
for i, lbls := range []model.LabelSet{
|
for i, lbls := range []model.LabelSet{
|
||||||
{
|
{
|
||||||
|
|
|
@ -22,6 +22,7 @@ import (
|
||||||
|
|
||||||
"github.com/go-kit/log"
|
"github.com/go-kit/log"
|
||||||
"github.com/ovh/go-ovh/ovh"
|
"github.com/ovh/go-ovh/ovh"
|
||||||
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
"github.com/prometheus/common/config"
|
"github.com/prometheus/common/config"
|
||||||
"github.com/prometheus/common/model"
|
"github.com/prometheus/common/model"
|
||||||
|
|
||||||
|
@ -93,7 +94,7 @@ func createClient(config *SDConfig) (*ovh.Client, error) {
|
||||||
|
|
||||||
// NewDiscoverer returns a Discoverer for the Config.
|
// NewDiscoverer returns a Discoverer for the Config.
|
||||||
func (c *SDConfig) NewDiscoverer(options discovery.DiscovererOptions) (discovery.Discoverer, error) {
|
func (c *SDConfig) NewDiscoverer(options discovery.DiscovererOptions) (discovery.Discoverer, error) {
|
||||||
return NewDiscovery(c, options.Logger)
|
return NewDiscovery(c, options.Logger, options.Registerer)
|
||||||
}
|
}
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
|
@ -140,16 +141,19 @@ func newRefresher(conf *SDConfig, logger log.Logger) (refresher, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewDiscovery returns a new OVHcloud Discoverer which periodically refreshes its targets.
|
// NewDiscovery returns a new OVHcloud Discoverer which periodically refreshes its targets.
|
||||||
func NewDiscovery(conf *SDConfig, logger log.Logger) (*refresh.Discovery, error) {
|
func NewDiscovery(conf *SDConfig, logger log.Logger, reg prometheus.Registerer) (*refresh.Discovery, error) {
|
||||||
r, err := newRefresher(conf, logger)
|
r, err := newRefresher(conf, logger)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
return refresh.NewDiscovery(
|
return refresh.NewDiscovery(
|
||||||
logger,
|
refresh.Options{
|
||||||
"ovhcloud",
|
Logger: logger,
|
||||||
time.Duration(conf.RefreshInterval),
|
Mech: "ovhcloud",
|
||||||
r.refresh,
|
Interval: time.Duration(conf.RefreshInterval),
|
||||||
|
RefreshF: r.refresh,
|
||||||
|
Registry: reg,
|
||||||
|
},
|
||||||
), nil
|
), nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -18,6 +18,7 @@ import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
"github.com/prometheus/common/config"
|
"github.com/prometheus/common/config"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
"gopkg.in/yaml.v2"
|
"gopkg.in/yaml.v2"
|
||||||
|
@ -122,7 +123,8 @@ func TestDiscoverer(t *testing.T) {
|
||||||
conf, _ := getMockConf("vps")
|
conf, _ := getMockConf("vps")
|
||||||
logger := testutil.NewLogger(t)
|
logger := testutil.NewLogger(t)
|
||||||
_, err := conf.NewDiscoverer(discovery.DiscovererOptions{
|
_, err := conf.NewDiscoverer(discovery.DiscovererOptions{
|
||||||
Logger: logger,
|
Logger: logger,
|
||||||
|
Registerer: prometheus.NewRegistry(),
|
||||||
})
|
})
|
||||||
|
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
|
@ -49,11 +49,11 @@ consumer_key: %s`, mock.URL, ovhcloudApplicationKeyTest, ovhcloudApplicationSecr
|
||||||
targetGroups, err := d.refresh(ctx)
|
targetGroups, err := d.refresh(ctx)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
require.Equal(t, 1, len(targetGroups))
|
require.Len(t, targetGroups, 1)
|
||||||
targetGroup := targetGroups[0]
|
targetGroup := targetGroups[0]
|
||||||
require.NotNil(t, targetGroup)
|
require.NotNil(t, targetGroup)
|
||||||
require.NotNil(t, targetGroup.Targets)
|
require.NotNil(t, targetGroup.Targets)
|
||||||
require.Equal(t, 1, len(targetGroup.Targets))
|
require.Len(t, targetGroup.Targets, 1)
|
||||||
for i, lbls := range []model.LabelSet{
|
for i, lbls := range []model.LabelSet{
|
||||||
{
|
{
|
||||||
"__address__": "192.0.2.1",
|
"__address__": "192.0.2.1",
|
||||||
|
|
|
@ -29,6 +29,7 @@ import (
|
||||||
|
|
||||||
"github.com/go-kit/log"
|
"github.com/go-kit/log"
|
||||||
"github.com/grafana/regexp"
|
"github.com/grafana/regexp"
|
||||||
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
"github.com/prometheus/common/config"
|
"github.com/prometheus/common/config"
|
||||||
"github.com/prometheus/common/model"
|
"github.com/prometheus/common/model"
|
||||||
"github.com/prometheus/common/version"
|
"github.com/prometheus/common/version"
|
||||||
|
@ -83,7 +84,7 @@ func (*SDConfig) Name() string { return "puppetdb" }
|
||||||
|
|
||||||
// NewDiscoverer returns a Discoverer for the Config.
|
// NewDiscoverer returns a Discoverer for the Config.
|
||||||
func (c *SDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (discovery.Discoverer, error) {
|
func (c *SDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (discovery.Discoverer, error) {
|
||||||
return NewDiscovery(c, opts.Logger)
|
return NewDiscovery(c, opts.Logger, opts.Registerer)
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetDirectory joins any relative file paths with dir.
|
// SetDirectory joins any relative file paths with dir.
|
||||||
|
@ -130,7 +131,7 @@ type Discovery struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewDiscovery returns a new PuppetDB discovery for the given config.
|
// NewDiscovery returns a new PuppetDB discovery for the given config.
|
||||||
func NewDiscovery(conf *SDConfig, logger log.Logger) (*Discovery, error) {
|
func NewDiscovery(conf *SDConfig, logger log.Logger, reg prometheus.Registerer) (*Discovery, error) {
|
||||||
if logger == nil {
|
if logger == nil {
|
||||||
logger = log.NewNopLogger()
|
logger = log.NewNopLogger()
|
||||||
}
|
}
|
||||||
|
@ -156,10 +157,13 @@ func NewDiscovery(conf *SDConfig, logger log.Logger) (*Discovery, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
d.Discovery = refresh.NewDiscovery(
|
d.Discovery = refresh.NewDiscovery(
|
||||||
logger,
|
refresh.Options{
|
||||||
"http",
|
Logger: logger,
|
||||||
time.Duration(conf.RefreshInterval),
|
Mech: "http",
|
||||||
d.refresh,
|
Interval: time.Duration(conf.RefreshInterval),
|
||||||
|
RefreshF: d.refresh,
|
||||||
|
Registry: reg,
|
||||||
|
},
|
||||||
)
|
)
|
||||||
return d, nil
|
return d, nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -23,6 +23,7 @@ import (
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/go-kit/log"
|
"github.com/go-kit/log"
|
||||||
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
"github.com/prometheus/common/config"
|
"github.com/prometheus/common/config"
|
||||||
"github.com/prometheus/common/model"
|
"github.com/prometheus/common/model"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
|
@ -62,7 +63,7 @@ func TestPuppetSlashInURL(t *testing.T) {
|
||||||
Port: 80,
|
Port: 80,
|
||||||
RefreshInterval: model.Duration(30 * time.Second),
|
RefreshInterval: model.Duration(30 * time.Second),
|
||||||
}
|
}
|
||||||
d, err := NewDiscovery(&cfg, log.NewNopLogger())
|
d, err := NewDiscovery(&cfg, log.NewNopLogger(), prometheus.NewRegistry())
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, apiURL, d.url)
|
require.Equal(t, apiURL, d.url)
|
||||||
}
|
}
|
||||||
|
@ -79,7 +80,7 @@ func TestPuppetDBRefresh(t *testing.T) {
|
||||||
RefreshInterval: model.Duration(30 * time.Second),
|
RefreshInterval: model.Duration(30 * time.Second),
|
||||||
}
|
}
|
||||||
|
|
||||||
d, err := NewDiscovery(&cfg, log.NewNopLogger())
|
d, err := NewDiscovery(&cfg, log.NewNopLogger(), prometheus.NewRegistry())
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
|
@ -105,7 +106,7 @@ func TestPuppetDBRefresh(t *testing.T) {
|
||||||
Source: ts.URL + "/pdb/query/v4?query=vhosts",
|
Source: ts.URL + "/pdb/query/v4?query=vhosts",
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
require.Equal(t, tgs, expectedTargets)
|
require.Equal(t, expectedTargets, tgs)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestPuppetDBRefreshWithParameters(t *testing.T) {
|
func TestPuppetDBRefreshWithParameters(t *testing.T) {
|
||||||
|
@ -120,7 +121,7 @@ func TestPuppetDBRefreshWithParameters(t *testing.T) {
|
||||||
RefreshInterval: model.Duration(30 * time.Second),
|
RefreshInterval: model.Duration(30 * time.Second),
|
||||||
}
|
}
|
||||||
|
|
||||||
d, err := NewDiscovery(&cfg, log.NewNopLogger())
|
d, err := NewDiscovery(&cfg, log.NewNopLogger(), prometheus.NewRegistry())
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
|
@ -156,7 +157,7 @@ func TestPuppetDBRefreshWithParameters(t *testing.T) {
|
||||||
Source: ts.URL + "/pdb/query/v4?query=vhosts",
|
Source: ts.URL + "/pdb/query/v4?query=vhosts",
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
require.Equal(t, tgs, expectedTargets)
|
require.Equal(t, expectedTargets, tgs)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestPuppetDBInvalidCode(t *testing.T) {
|
func TestPuppetDBInvalidCode(t *testing.T) {
|
||||||
|
@ -172,7 +173,7 @@ func TestPuppetDBInvalidCode(t *testing.T) {
|
||||||
RefreshInterval: model.Duration(30 * time.Second),
|
RefreshInterval: model.Duration(30 * time.Second),
|
||||||
}
|
}
|
||||||
|
|
||||||
d, err := NewDiscovery(&cfg, log.NewNopLogger())
|
d, err := NewDiscovery(&cfg, log.NewNopLogger(), prometheus.NewRegistry())
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
|
@ -193,7 +194,7 @@ func TestPuppetDBInvalidFormat(t *testing.T) {
|
||||||
RefreshInterval: model.Duration(30 * time.Second),
|
RefreshInterval: model.Duration(30 * time.Second),
|
||||||
}
|
}
|
||||||
|
|
||||||
d, err := NewDiscovery(&cfg, log.NewNopLogger())
|
d, err := NewDiscovery(&cfg, log.NewNopLogger(), prometheus.NewRegistry())
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
|
|
|
@ -22,29 +22,17 @@ import (
|
||||||
"github.com/go-kit/log/level"
|
"github.com/go-kit/log/level"
|
||||||
"github.com/prometheus/client_golang/prometheus"
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
|
|
||||||
|
"github.com/prometheus/prometheus/discovery"
|
||||||
"github.com/prometheus/prometheus/discovery/targetgroup"
|
"github.com/prometheus/prometheus/discovery/targetgroup"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
type Options struct {
|
||||||
failuresCount = prometheus.NewCounterVec(
|
Logger log.Logger
|
||||||
prometheus.CounterOpts{
|
Mech string
|
||||||
Name: "prometheus_sd_refresh_failures_total",
|
Interval time.Duration
|
||||||
Help: "Number of refresh failures for the given SD mechanism.",
|
RefreshF func(ctx context.Context) ([]*targetgroup.Group, error)
|
||||||
},
|
Registry prometheus.Registerer
|
||||||
[]string{"mechanism"},
|
Metrics []prometheus.Collector
|
||||||
)
|
|
||||||
duration = prometheus.NewSummaryVec(
|
|
||||||
prometheus.SummaryOpts{
|
|
||||||
Name: "prometheus_sd_refresh_duration_seconds",
|
|
||||||
Help: "The duration of a refresh in seconds for the given SD mechanism.",
|
|
||||||
Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001},
|
|
||||||
},
|
|
||||||
[]string{"mechanism"},
|
|
||||||
)
|
|
||||||
)
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
prometheus.MustRegister(duration, failuresCount)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Discovery implements the Discoverer interface.
|
// Discovery implements the Discoverer interface.
|
||||||
|
@ -54,25 +42,62 @@ type Discovery struct {
|
||||||
refreshf func(ctx context.Context) ([]*targetgroup.Group, error)
|
refreshf func(ctx context.Context) ([]*targetgroup.Group, error)
|
||||||
|
|
||||||
failures prometheus.Counter
|
failures prometheus.Counter
|
||||||
duration prometheus.Observer
|
duration prometheus.Summary
|
||||||
|
|
||||||
|
metricRegisterer discovery.MetricRegisterer
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewDiscovery returns a Discoverer function that calls a refresh() function at every interval.
|
// NewDiscovery returns a Discoverer function that calls a refresh() function at every interval.
|
||||||
func NewDiscovery(l log.Logger, mech string, interval time.Duration, refreshf func(ctx context.Context) ([]*targetgroup.Group, error)) *Discovery {
|
func NewDiscovery(opts Options) *Discovery {
|
||||||
if l == nil {
|
var logger log.Logger
|
||||||
l = log.NewNopLogger()
|
if opts.Logger == nil {
|
||||||
|
logger = log.NewNopLogger()
|
||||||
|
} else {
|
||||||
|
logger = opts.Logger
|
||||||
}
|
}
|
||||||
return &Discovery{
|
|
||||||
logger: l,
|
d := Discovery{
|
||||||
interval: interval,
|
logger: logger,
|
||||||
refreshf: refreshf,
|
interval: opts.Interval,
|
||||||
failures: failuresCount.WithLabelValues(mech),
|
refreshf: opts.RefreshF,
|
||||||
duration: duration.WithLabelValues(mech),
|
failures: prometheus.NewCounter(
|
||||||
|
prometheus.CounterOpts{
|
||||||
|
Name: "prometheus_sd_refresh_failures_total",
|
||||||
|
Help: "Number of refresh failures for the given SD mechanism.",
|
||||||
|
ConstLabels: prometheus.Labels{
|
||||||
|
"mechanism": opts.Mech,
|
||||||
|
},
|
||||||
|
}),
|
||||||
|
duration: prometheus.NewSummary(
|
||||||
|
prometheus.SummaryOpts{
|
||||||
|
Name: "prometheus_sd_refresh_duration_seconds",
|
||||||
|
Help: "The duration of a refresh in seconds for the given SD mechanism.",
|
||||||
|
Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001},
|
||||||
|
ConstLabels: prometheus.Labels{
|
||||||
|
"mechanism": opts.Mech,
|
||||||
|
},
|
||||||
|
}),
|
||||||
}
|
}
|
||||||
|
|
||||||
|
metrics := []prometheus.Collector{d.failures, d.duration}
|
||||||
|
if opts.Metrics != nil {
|
||||||
|
metrics = append(metrics, opts.Metrics...)
|
||||||
|
}
|
||||||
|
|
||||||
|
d.metricRegisterer = discovery.NewMetricRegisterer(opts.Registry, metrics)
|
||||||
|
|
||||||
|
return &d
|
||||||
}
|
}
|
||||||
|
|
||||||
// Run implements the Discoverer interface.
|
// Run implements the Discoverer interface.
|
||||||
func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) {
|
func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) {
|
||||||
|
err := d.metricRegisterer.RegisterMetrics()
|
||||||
|
if err != nil {
|
||||||
|
level.Error(d.logger).Log("msg", "Unable to register metrics", "err", err.Error())
|
||||||
|
return
|
||||||
|
}
|
||||||
|
defer d.metricRegisterer.UnregisterMetrics()
|
||||||
|
|
||||||
// Get an initial set right away.
|
// Get an initial set right away.
|
||||||
tgs, err := d.refresh(ctx)
|
tgs, err := d.refresh(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
|
@ -19,6 +19,7 @@ import (
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
"github.com/prometheus/common/model"
|
"github.com/prometheus/common/model"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
"go.uber.org/goleak"
|
"go.uber.org/goleak"
|
||||||
|
@ -65,7 +66,15 @@ func TestRefresh(t *testing.T) {
|
||||||
return nil, fmt.Errorf("some error")
|
return nil, fmt.Errorf("some error")
|
||||||
}
|
}
|
||||||
interval := time.Millisecond
|
interval := time.Millisecond
|
||||||
d := NewDiscovery(nil, "test", interval, refresh)
|
d := NewDiscovery(
|
||||||
|
Options{
|
||||||
|
Logger: nil,
|
||||||
|
Mech: "test",
|
||||||
|
Interval: interval,
|
||||||
|
RefreshF: refresh,
|
||||||
|
Registry: prometheus.NewRegistry(),
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
ch := make(chan []*targetgroup.Group)
|
ch := make(chan []*targetgroup.Group)
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
|
|
|
@ -55,12 +55,12 @@ api_url: %s
|
||||||
tgs, err := d.refresh(ctx)
|
tgs, err := d.refresh(ctx)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
require.Equal(t, 1, len(tgs))
|
require.Len(t, tgs, 1)
|
||||||
|
|
||||||
tg := tgs[0]
|
tg := tgs[0]
|
||||||
require.NotNil(t, tg)
|
require.NotNil(t, tg)
|
||||||
require.NotNil(t, tg.Targets)
|
require.NotNil(t, tg.Targets)
|
||||||
require.Equal(t, 2, len(tg.Targets))
|
require.Len(t, tg.Targets, 2)
|
||||||
|
|
||||||
for i, lbls := range []model.LabelSet{
|
for i, lbls := range []model.LabelSet{
|
||||||
{
|
{
|
||||||
|
@ -161,5 +161,5 @@ api_url: %s
|
||||||
tgs, err := d.refresh(ctx)
|
tgs, err := d.refresh(ctx)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
require.Equal(t, 1, len(tgs))
|
require.Len(t, tgs, 1)
|
||||||
}
|
}
|
||||||
|
|
|
@ -23,6 +23,7 @@ import (
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/go-kit/log"
|
"github.com/go-kit/log"
|
||||||
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
"github.com/prometheus/common/config"
|
"github.com/prometheus/common/config"
|
||||||
"github.com/prometheus/common/model"
|
"github.com/prometheus/common/model"
|
||||||
"github.com/scaleway/scaleway-sdk-go/scw"
|
"github.com/scaleway/scaleway-sdk-go/scw"
|
||||||
|
@ -160,7 +161,7 @@ func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c SDConfig) NewDiscoverer(options discovery.DiscovererOptions) (discovery.Discoverer, error) {
|
func (c SDConfig) NewDiscoverer(options discovery.DiscovererOptions) (discovery.Discoverer, error) {
|
||||||
return NewDiscovery(&c, options.Logger)
|
return NewDiscovery(&c, options.Logger, options.Registerer)
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetDirectory joins any relative file paths with dir.
|
// SetDirectory joins any relative file paths with dir.
|
||||||
|
@ -177,17 +178,20 @@ func init() {
|
||||||
// the Discoverer interface.
|
// the Discoverer interface.
|
||||||
type Discovery struct{}
|
type Discovery struct{}
|
||||||
|
|
||||||
func NewDiscovery(conf *SDConfig, logger log.Logger) (*refresh.Discovery, error) {
|
func NewDiscovery(conf *SDConfig, logger log.Logger, reg prometheus.Registerer) (*refresh.Discovery, error) {
|
||||||
r, err := newRefresher(conf)
|
r, err := newRefresher(conf)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
return refresh.NewDiscovery(
|
return refresh.NewDiscovery(
|
||||||
logger,
|
refresh.Options{
|
||||||
"scaleway",
|
Logger: logger,
|
||||||
time.Duration(conf.RefreshInterval),
|
Mech: "scaleway",
|
||||||
r.refresh,
|
Interval: time.Duration(conf.RefreshInterval),
|
||||||
|
RefreshF: r.refresh,
|
||||||
|
Registry: reg,
|
||||||
|
},
|
||||||
), nil
|
), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -26,6 +26,7 @@ import (
|
||||||
|
|
||||||
"github.com/go-kit/log"
|
"github.com/go-kit/log"
|
||||||
"github.com/mwitkow/go-conntrack"
|
"github.com/mwitkow/go-conntrack"
|
||||||
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
"github.com/prometheus/common/config"
|
"github.com/prometheus/common/config"
|
||||||
"github.com/prometheus/common/model"
|
"github.com/prometheus/common/model"
|
||||||
|
|
||||||
|
@ -74,7 +75,7 @@ func (*SDConfig) Name() string { return "triton" }
|
||||||
|
|
||||||
// NewDiscoverer returns a Discoverer for the Config.
|
// NewDiscoverer returns a Discoverer for the Config.
|
||||||
func (c *SDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (discovery.Discoverer, error) {
|
func (c *SDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (discovery.Discoverer, error) {
|
||||||
return New(opts.Logger, c)
|
return New(opts.Logger, c, opts.Registerer)
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetDirectory joins any relative file paths with dir.
|
// SetDirectory joins any relative file paths with dir.
|
||||||
|
@ -138,7 +139,7 @@ type Discovery struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
// New returns a new Discovery which periodically refreshes its targets.
|
// New returns a new Discovery which periodically refreshes its targets.
|
||||||
func New(logger log.Logger, conf *SDConfig) (*Discovery, error) {
|
func New(logger log.Logger, conf *SDConfig, reg prometheus.Registerer) (*Discovery, error) {
|
||||||
tls, err := config.NewTLSConfig(&conf.TLSConfig)
|
tls, err := config.NewTLSConfig(&conf.TLSConfig)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
@ -159,10 +160,13 @@ func New(logger log.Logger, conf *SDConfig) (*Discovery, error) {
|
||||||
sdConfig: conf,
|
sdConfig: conf,
|
||||||
}
|
}
|
||||||
d.Discovery = refresh.NewDiscovery(
|
d.Discovery = refresh.NewDiscovery(
|
||||||
logger,
|
refresh.Options{
|
||||||
"triton",
|
Logger: logger,
|
||||||
time.Duration(conf.RefreshInterval),
|
Mech: "triton",
|
||||||
d.refresh,
|
Interval: time.Duration(conf.RefreshInterval),
|
||||||
|
RefreshF: d.refresh,
|
||||||
|
Registry: reg,
|
||||||
|
},
|
||||||
)
|
)
|
||||||
return d, nil
|
return d, nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -24,6 +24,7 @@ import (
|
||||||
"strings"
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
"github.com/prometheus/common/config"
|
"github.com/prometheus/common/config"
|
||||||
"github.com/prometheus/common/model"
|
"github.com/prometheus/common/model"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
|
@ -79,7 +80,7 @@ var (
|
||||||
)
|
)
|
||||||
|
|
||||||
func newTritonDiscovery(c SDConfig) (*Discovery, error) {
|
func newTritonDiscovery(c SDConfig) (*Discovery, error) {
|
||||||
return New(nil, &c)
|
return New(nil, &c, prometheus.NewRegistry())
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestTritonSDNew(t *testing.T) {
|
func TestTritonSDNew(t *testing.T) {
|
||||||
|
@ -155,7 +156,7 @@ func TestTritonSDRefreshMultipleTargets(t *testing.T) {
|
||||||
|
|
||||||
tgts := testTritonSDRefresh(t, conf, dstr)
|
tgts := testTritonSDRefresh(t, conf, dstr)
|
||||||
require.NotNil(t, tgts)
|
require.NotNil(t, tgts)
|
||||||
require.Equal(t, 2, len(tgts))
|
require.Len(t, tgts, 2)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestTritonSDRefreshNoServer(t *testing.T) {
|
func TestTritonSDRefreshNoServer(t *testing.T) {
|
||||||
|
@ -163,7 +164,7 @@ func TestTritonSDRefreshNoServer(t *testing.T) {
|
||||||
|
|
||||||
_, err := td.refresh(context.Background())
|
_, err := td.refresh(context.Background())
|
||||||
require.Error(t, err)
|
require.Error(t, err)
|
||||||
require.Equal(t, strings.Contains(err.Error(), "an error occurred when requesting targets from the discovery endpoint"), true)
|
require.True(t, strings.Contains(err.Error(), "an error occurred when requesting targets from the discovery endpoint"))
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestTritonSDRefreshCancelled(t *testing.T) {
|
func TestTritonSDRefreshCancelled(t *testing.T) {
|
||||||
|
@ -173,7 +174,7 @@ func TestTritonSDRefreshCancelled(t *testing.T) {
|
||||||
cancel()
|
cancel()
|
||||||
_, err := td.refresh(ctx)
|
_, err := td.refresh(ctx)
|
||||||
require.Error(t, err)
|
require.Error(t, err)
|
||||||
require.Equal(t, strings.Contains(err.Error(), context.Canceled.Error()), true)
|
require.True(t, strings.Contains(err.Error(), context.Canceled.Error()))
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestTritonSDRefreshCNsUUIDOnly(t *testing.T) {
|
func TestTritonSDRefreshCNsUUIDOnly(t *testing.T) {
|
||||||
|
@ -188,7 +189,7 @@ func TestTritonSDRefreshCNsUUIDOnly(t *testing.T) {
|
||||||
|
|
||||||
tgts := testTritonSDRefresh(t, cnconf, dstr)
|
tgts := testTritonSDRefresh(t, cnconf, dstr)
|
||||||
require.NotNil(t, tgts)
|
require.NotNil(t, tgts)
|
||||||
require.Equal(t, 2, len(tgts))
|
require.Len(t, tgts, 2)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestTritonSDRefreshCNsWithHostname(t *testing.T) {
|
func TestTritonSDRefreshCNsWithHostname(t *testing.T) {
|
||||||
|
@ -205,7 +206,7 @@ func TestTritonSDRefreshCNsWithHostname(t *testing.T) {
|
||||||
|
|
||||||
tgts := testTritonSDRefresh(t, cnconf, dstr)
|
tgts := testTritonSDRefresh(t, cnconf, dstr)
|
||||||
require.NotNil(t, tgts)
|
require.NotNil(t, tgts)
|
||||||
require.Equal(t, 2, len(tgts))
|
require.Len(t, tgts, 2)
|
||||||
}
|
}
|
||||||
|
|
||||||
func testTritonSDRefresh(t *testing.T, c SDConfig, dstr string) []model.LabelSet {
|
func testTritonSDRefresh(t *testing.T, c SDConfig, dstr string) []model.LabelSet {
|
||||||
|
@ -235,7 +236,7 @@ func testTritonSDRefresh(t *testing.T, c SDConfig, dstr string) []model.LabelSet
|
||||||
|
|
||||||
tgs, err := td.refresh(context.Background())
|
tgs, err := td.refresh(context.Background())
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, 1, len(tgs))
|
require.Len(t, tgs, 1)
|
||||||
tg := tgs[0]
|
tg := tgs[0]
|
||||||
require.NotNil(t, tg)
|
require.NotNil(t, tg)
|
||||||
|
|
||||||
|
|
72
discovery/util.go
Normal file
72
discovery/util.go
Normal file
|
@ -0,0 +1,72 @@
|
||||||
|
// Copyright 2020 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package discovery
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
|
)
|
||||||
|
|
||||||
|
// A utility to be used by implementations of discovery.Discoverer
|
||||||
|
// which need to manage the lifetime of their metrics.
|
||||||
|
type MetricRegisterer interface {
|
||||||
|
RegisterMetrics() error
|
||||||
|
UnregisterMetrics()
|
||||||
|
}
|
||||||
|
|
||||||
|
// metricRegistererImpl is an implementation of MetricRegisterer.
|
||||||
|
type metricRegistererImpl struct {
|
||||||
|
reg prometheus.Registerer
|
||||||
|
metrics []prometheus.Collector
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ MetricRegisterer = &metricRegistererImpl{}
|
||||||
|
|
||||||
|
// Creates an instance of a MetricRegisterer.
|
||||||
|
// Typically called inside the implementation of the NewDiscoverer() method.
|
||||||
|
func NewMetricRegisterer(reg prometheus.Registerer, metrics []prometheus.Collector) MetricRegisterer {
|
||||||
|
return &metricRegistererImpl{
|
||||||
|
reg: reg,
|
||||||
|
metrics: metrics,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// RegisterMetrics registers the metrics with a Prometheus registerer.
|
||||||
|
// If any metric fails to register, it will unregister all metrics that
|
||||||
|
// were registered so far, and return an error.
|
||||||
|
// Typically called at the start of the SD's Run() method.
|
||||||
|
func (rh *metricRegistererImpl) RegisterMetrics() error {
|
||||||
|
for _, collector := range rh.metrics {
|
||||||
|
err := rh.reg.Register(collector)
|
||||||
|
if err != nil {
|
||||||
|
// Unregister all metrics that were registered so far.
|
||||||
|
// This is so that if RegisterMetrics() gets called again,
|
||||||
|
// there will not be an error due to a duplicate registration.
|
||||||
|
rh.UnregisterMetrics()
|
||||||
|
|
||||||
|
return fmt.Errorf("failed to register metric: %w", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnregisterMetrics unregisters the metrics from the same Prometheus
|
||||||
|
// registerer which was used to register them.
|
||||||
|
// Typically called at the end of the SD's Run() method by a defer statement.
|
||||||
|
func (rh *metricRegistererImpl) UnregisterMetrics() {
|
||||||
|
for _, collector := range rh.metrics {
|
||||||
|
rh.reg.Unregister(collector)
|
||||||
|
}
|
||||||
|
}
|
|
@ -25,6 +25,7 @@ import (
|
||||||
|
|
||||||
"github.com/go-kit/log"
|
"github.com/go-kit/log"
|
||||||
"github.com/kolo/xmlrpc"
|
"github.com/kolo/xmlrpc"
|
||||||
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
"github.com/prometheus/common/config"
|
"github.com/prometheus/common/config"
|
||||||
"github.com/prometheus/common/model"
|
"github.com/prometheus/common/model"
|
||||||
|
|
||||||
|
@ -115,7 +116,7 @@ func (*SDConfig) Name() string { return "uyuni" }
|
||||||
|
|
||||||
// NewDiscoverer returns a Discoverer for the Config.
|
// NewDiscoverer returns a Discoverer for the Config.
|
||||||
func (c *SDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (discovery.Discoverer, error) {
|
func (c *SDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (discovery.Discoverer, error) {
|
||||||
return NewDiscovery(c, opts.Logger)
|
return NewDiscovery(c, opts.Logger, opts.Registerer)
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetDirectory joins any relative file paths with dir.
|
// SetDirectory joins any relative file paths with dir.
|
||||||
|
@ -203,7 +204,7 @@ func getEndpointInfoForSystems(
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewDiscovery returns a uyuni discovery for the given configuration.
|
// NewDiscovery returns a uyuni discovery for the given configuration.
|
||||||
func NewDiscovery(conf *SDConfig, logger log.Logger) (*Discovery, error) {
|
func NewDiscovery(conf *SDConfig, logger log.Logger, reg prometheus.Registerer) (*Discovery, error) {
|
||||||
apiURL, err := url.Parse(conf.Server)
|
apiURL, err := url.Parse(conf.Server)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
@ -227,10 +228,13 @@ func NewDiscovery(conf *SDConfig, logger log.Logger) (*Discovery, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
d.Discovery = refresh.NewDiscovery(
|
d.Discovery = refresh.NewDiscovery(
|
||||||
logger,
|
refresh.Options{
|
||||||
"uyuni",
|
Logger: logger,
|
||||||
time.Duration(conf.RefreshInterval),
|
Mech: "uyuni",
|
||||||
d.refresh,
|
Interval: time.Duration(conf.RefreshInterval),
|
||||||
|
RefreshF: d.refresh,
|
||||||
|
Registry: reg,
|
||||||
|
},
|
||||||
)
|
)
|
||||||
return d, nil
|
return d, nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -23,6 +23,8 @@ import (
|
||||||
|
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
|
|
||||||
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
|
|
||||||
"github.com/prometheus/prometheus/discovery/targetgroup"
|
"github.com/prometheus/prometheus/discovery/targetgroup"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -35,7 +37,7 @@ func testUpdateServices(respHandler http.HandlerFunc) ([]*targetgroup.Group, err
|
||||||
Server: ts.URL,
|
Server: ts.URL,
|
||||||
}
|
}
|
||||||
|
|
||||||
md, err := NewDiscovery(&conf, nil)
|
md, err := NewDiscovery(&conf, nil, prometheus.NewRegistry())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -55,7 +57,7 @@ func TestUyuniSDHandleError(t *testing.T) {
|
||||||
tgs, err := testUpdateServices(respHandler)
|
tgs, err := testUpdateServices(respHandler)
|
||||||
|
|
||||||
require.EqualError(t, err, errTesting)
|
require.EqualError(t, err, errTesting)
|
||||||
require.Equal(t, len(tgs), 0)
|
require.Empty(t, tgs)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestUyuniSDLogin(t *testing.T) {
|
func TestUyuniSDLogin(t *testing.T) {
|
||||||
|
@ -87,7 +89,7 @@ func TestUyuniSDLogin(t *testing.T) {
|
||||||
tgs, err := testUpdateServices(respHandler)
|
tgs, err := testUpdateServices(respHandler)
|
||||||
|
|
||||||
require.EqualError(t, err, errTesting)
|
require.EqualError(t, err, errTesting)
|
||||||
require.Equal(t, len(tgs), 0)
|
require.Empty(t, tgs)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestUyuniSDSkipLogin(t *testing.T) {
|
func TestUyuniSDSkipLogin(t *testing.T) {
|
||||||
|
@ -108,7 +110,7 @@ func TestUyuniSDSkipLogin(t *testing.T) {
|
||||||
Server: ts.URL,
|
Server: ts.URL,
|
||||||
}
|
}
|
||||||
|
|
||||||
md, err := NewDiscovery(&conf, nil)
|
md, err := NewDiscovery(&conf, nil, prometheus.NewRegistry())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Error(err)
|
t.Error(err)
|
||||||
}
|
}
|
||||||
|
@ -119,5 +121,5 @@ func TestUyuniSDSkipLogin(t *testing.T) {
|
||||||
tgs, err := md.refresh(context.Background())
|
tgs, err := md.refresh(context.Background())
|
||||||
|
|
||||||
require.EqualError(t, err, errTesting)
|
require.EqualError(t, err, errTesting)
|
||||||
require.Equal(t, len(tgs), 0)
|
require.Empty(t, tgs)
|
||||||
}
|
}
|
||||||
|
|
|
@ -23,6 +23,7 @@ import (
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/go-kit/log"
|
"github.com/go-kit/log"
|
||||||
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
"github.com/prometheus/common/config"
|
"github.com/prometheus/common/config"
|
||||||
"github.com/prometheus/common/model"
|
"github.com/prometheus/common/model"
|
||||||
"github.com/prometheus/common/version"
|
"github.com/prometheus/common/version"
|
||||||
|
@ -78,7 +79,7 @@ func (*SDConfig) Name() string { return "vultr" }
|
||||||
|
|
||||||
// NewDiscoverer returns a Discoverer for the Config.
|
// NewDiscoverer returns a Discoverer for the Config.
|
||||||
func (c *SDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (discovery.Discoverer, error) {
|
func (c *SDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (discovery.Discoverer, error) {
|
||||||
return NewDiscovery(c, opts.Logger)
|
return NewDiscovery(c, opts.Logger, opts.Registerer)
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetDirectory joins any relative file paths with dir.
|
// SetDirectory joins any relative file paths with dir.
|
||||||
|
@ -106,7 +107,7 @@ type Discovery struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewDiscovery returns a new Discovery which periodically refreshes its targets.
|
// NewDiscovery returns a new Discovery which periodically refreshes its targets.
|
||||||
func NewDiscovery(conf *SDConfig, logger log.Logger) (*Discovery, error) {
|
func NewDiscovery(conf *SDConfig, logger log.Logger, reg prometheus.Registerer) (*Discovery, error) {
|
||||||
d := &Discovery{
|
d := &Discovery{
|
||||||
port: conf.Port,
|
port: conf.Port,
|
||||||
}
|
}
|
||||||
|
@ -128,10 +129,13 @@ func NewDiscovery(conf *SDConfig, logger log.Logger) (*Discovery, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
d.Discovery = refresh.NewDiscovery(
|
d.Discovery = refresh.NewDiscovery(
|
||||||
logger,
|
refresh.Options{
|
||||||
"vultr",
|
Logger: logger,
|
||||||
time.Duration(conf.RefreshInterval),
|
Mech: "vultr",
|
||||||
d.refresh,
|
Interval: time.Duration(conf.RefreshInterval),
|
||||||
|
RefreshF: d.refresh,
|
||||||
|
Registry: reg,
|
||||||
|
},
|
||||||
)
|
)
|
||||||
return d, nil
|
return d, nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -20,6 +20,7 @@ import (
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/go-kit/log"
|
"github.com/go-kit/log"
|
||||||
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
"github.com/prometheus/common/model"
|
"github.com/prometheus/common/model"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
)
|
)
|
||||||
|
@ -46,7 +47,7 @@ func TestVultrSDRefresh(t *testing.T) {
|
||||||
|
|
||||||
cfg := DefaultSDConfig
|
cfg := DefaultSDConfig
|
||||||
cfg.HTTPClientConfig.BearerToken = APIKey
|
cfg.HTTPClientConfig.BearerToken = APIKey
|
||||||
d, err := NewDiscovery(&cfg, log.NewNopLogger())
|
d, err := NewDiscovery(&cfg, log.NewNopLogger(), prometheus.NewRegistry())
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
endpoint, err := url.Parse(sdMock.Mock.Endpoint())
|
endpoint, err := url.Parse(sdMock.Mock.Endpoint())
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
@ -56,12 +57,12 @@ func TestVultrSDRefresh(t *testing.T) {
|
||||||
tgs, err := d.refresh(ctx)
|
tgs, err := d.refresh(ctx)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
require.Equal(t, 1, len(tgs))
|
require.Len(t, tgs, 1)
|
||||||
|
|
||||||
tg := tgs[0]
|
tg := tgs[0]
|
||||||
require.NotNil(t, tg)
|
require.NotNil(t, tg)
|
||||||
require.NotNil(t, tg.Targets)
|
require.NotNil(t, tg.Targets)
|
||||||
require.Equal(t, 3, len(tg.Targets))
|
require.Len(t, tg.Targets, 3)
|
||||||
|
|
||||||
for i, k := range []model.LabelSet{
|
for i, k := range []model.LabelSet{
|
||||||
{
|
{
|
||||||
|
|
|
@ -53,14 +53,14 @@ func TestMakeXDSResourceHttpEndpointEmptyServerURLScheme(t *testing.T) {
|
||||||
|
|
||||||
require.Empty(t, endpointURL)
|
require.Empty(t, endpointURL)
|
||||||
require.Error(t, err)
|
require.Error(t, err)
|
||||||
require.Equal(t, err.Error(), "invalid xDS server URL")
|
require.Equal(t, "invalid xDS server URL", err.Error())
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestMakeXDSResourceHttpEndpointEmptyServerURLHost(t *testing.T) {
|
func TestMakeXDSResourceHttpEndpointEmptyServerURLHost(t *testing.T) {
|
||||||
endpointURL, err := makeXDSResourceHTTPEndpointURL(ProtocolV3, urlMustParse("grpc://127.0.0.1"), "monitoring")
|
endpointURL, err := makeXDSResourceHTTPEndpointURL(ProtocolV3, urlMustParse("grpc://127.0.0.1"), "monitoring")
|
||||||
|
|
||||||
require.Empty(t, endpointURL)
|
require.Empty(t, endpointURL)
|
||||||
require.NotNil(t, err)
|
require.Error(t, err)
|
||||||
require.Contains(t, err.Error(), "must be either 'http' or 'https'")
|
require.Contains(t, err.Error(), "must be either 'http' or 'https'")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -68,7 +68,7 @@ func TestMakeXDSResourceHttpEndpoint(t *testing.T) {
|
||||||
endpointURL, err := makeXDSResourceHTTPEndpointURL(ProtocolV3, urlMustParse("http://127.0.0.1:5000"), "monitoring")
|
endpointURL, err := makeXDSResourceHTTPEndpointURL(ProtocolV3, urlMustParse("http://127.0.0.1:5000"), "monitoring")
|
||||||
|
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, endpointURL.String(), "http://127.0.0.1:5000/v3/discovery:monitoring")
|
require.Equal(t, "http://127.0.0.1:5000/v3/discovery:monitoring", endpointURL.String())
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestCreateNewHTTPResourceClient(t *testing.T) {
|
func TestCreateNewHTTPResourceClient(t *testing.T) {
|
||||||
|
@ -89,8 +89,8 @@ func TestCreateNewHTTPResourceClient(t *testing.T) {
|
||||||
|
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
require.Equal(t, client.endpoint, "http://127.0.0.1:5000/v3/discovery:monitoring?param1=v1")
|
require.Equal(t, "http://127.0.0.1:5000/v3/discovery:monitoring?param1=v1", client.endpoint)
|
||||||
require.Equal(t, client.client.Timeout, 1*time.Minute)
|
require.Equal(t, 1*time.Minute, client.client.Timeout)
|
||||||
}
|
}
|
||||||
|
|
||||||
func createTestHTTPResourceClient(t *testing.T, conf *HTTPResourceClientConfig, protocolVersion ProtocolVersion, responder discoveryResponder) (*HTTPResourceClient, func()) {
|
func createTestHTTPResourceClient(t *testing.T, conf *HTTPResourceClientConfig, protocolVersion ProtocolVersion, responder discoveryResponder) (*HTTPResourceClient, func()) {
|
||||||
|
@ -138,7 +138,7 @@ func TestHTTPResourceClientFetchFullResponse(t *testing.T) {
|
||||||
require.NotNil(t, res)
|
require.NotNil(t, res)
|
||||||
|
|
||||||
require.Equal(t, client.ResourceTypeURL(), res.TypeUrl)
|
require.Equal(t, client.ResourceTypeURL(), res.TypeUrl)
|
||||||
require.Len(t, res.Resources, 0)
|
require.Empty(t, res.Resources)
|
||||||
require.Equal(t, "abc", client.latestNonce, "Nonce not cached")
|
require.Equal(t, "abc", client.latestNonce, "Nonce not cached")
|
||||||
require.Equal(t, "1", client.latestVersion, "Version not cached")
|
require.Equal(t, "1", client.latestVersion, "Version not cached")
|
||||||
|
|
||||||
|
|
|
@ -30,35 +30,12 @@ import (
|
||||||
"github.com/prometheus/prometheus/util/strutil"
|
"github.com/prometheus/prometheus/util/strutil"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
// DefaultKumaSDConfig is the default Kuma MADS SD configuration.
|
||||||
// DefaultKumaSDConfig is the default Kuma MADS SD configuration.
|
var DefaultKumaSDConfig = KumaSDConfig{
|
||||||
DefaultKumaSDConfig = KumaSDConfig{
|
HTTPClientConfig: config.DefaultHTTPClientConfig,
|
||||||
HTTPClientConfig: config.DefaultHTTPClientConfig,
|
RefreshInterval: model.Duration(15 * time.Second),
|
||||||
RefreshInterval: model.Duration(15 * time.Second),
|
FetchTimeout: model.Duration(2 * time.Minute),
|
||||||
FetchTimeout: model.Duration(2 * time.Minute),
|
}
|
||||||
}
|
|
||||||
|
|
||||||
kumaFetchFailuresCount = prometheus.NewCounter(
|
|
||||||
prometheus.CounterOpts{
|
|
||||||
Namespace: namespace,
|
|
||||||
Name: "sd_kuma_fetch_failures_total",
|
|
||||||
Help: "The number of Kuma MADS fetch call failures.",
|
|
||||||
})
|
|
||||||
kumaFetchSkipUpdateCount = prometheus.NewCounter(
|
|
||||||
prometheus.CounterOpts{
|
|
||||||
Namespace: namespace,
|
|
||||||
Name: "sd_kuma_fetch_skipped_updates_total",
|
|
||||||
Help: "The number of Kuma MADS fetch calls that result in no updates to the targets.",
|
|
||||||
})
|
|
||||||
kumaFetchDuration = prometheus.NewSummary(
|
|
||||||
prometheus.SummaryOpts{
|
|
||||||
Namespace: namespace,
|
|
||||||
Name: "sd_kuma_fetch_duration_seconds",
|
|
||||||
Help: "The duration of a Kuma MADS fetch call.",
|
|
||||||
Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001},
|
|
||||||
},
|
|
||||||
)
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
const (
|
||||||
// kumaMetaLabelPrefix is the meta prefix used for all kuma meta labels.
|
// kumaMetaLabelPrefix is the meta prefix used for all kuma meta labels.
|
||||||
|
@ -120,7 +97,7 @@ func (c *KumaSDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (discover
|
||||||
logger = log.NewNopLogger()
|
logger = log.NewNopLogger()
|
||||||
}
|
}
|
||||||
|
|
||||||
return NewKumaHTTPDiscovery(c, logger)
|
return NewKumaHTTPDiscovery(c, logger, opts.Registerer)
|
||||||
}
|
}
|
||||||
|
|
||||||
func convertKumaV1MonitoringAssignment(assignment *MonitoringAssignment) []model.LabelSet {
|
func convertKumaV1MonitoringAssignment(assignment *MonitoringAssignment) []model.LabelSet {
|
||||||
|
@ -176,12 +153,16 @@ func kumaMadsV1ResourceParser(resources []*anypb.Any, typeURL string) ([]model.L
|
||||||
return targets, nil
|
return targets, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewKumaHTTPDiscovery(conf *KumaSDConfig, logger log.Logger) (discovery.Discoverer, error) {
|
func NewKumaHTTPDiscovery(conf *KumaSDConfig, logger log.Logger, reg prometheus.Registerer) (discovery.Discoverer, error) {
|
||||||
// Default to "prometheus" if hostname is unavailable.
|
// Default to "prometheus" if hostname is unavailable.
|
||||||
clientID, err := osutil.GetFQDN()
|
clientID := conf.ClientID
|
||||||
if err != nil {
|
if clientID == "" {
|
||||||
level.Debug(logger).Log("msg", "error getting FQDN", "err", err)
|
var err error
|
||||||
clientID = "prometheus"
|
clientID, err = osutil.GetFQDN()
|
||||||
|
if err != nil {
|
||||||
|
level.Debug(logger).Log("msg", "error getting FQDN", "err", err)
|
||||||
|
clientID = "prometheus"
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
clientConfig := &HTTPResourceClientConfig{
|
clientConfig := &HTTPResourceClientConfig{
|
||||||
|
@ -203,15 +184,41 @@ func NewKumaHTTPDiscovery(conf *KumaSDConfig, logger log.Logger) (discovery.Disc
|
||||||
}
|
}
|
||||||
|
|
||||||
d := &fetchDiscovery{
|
d := &fetchDiscovery{
|
||||||
client: client,
|
client: client,
|
||||||
logger: logger,
|
logger: logger,
|
||||||
refreshInterval: time.Duration(conf.RefreshInterval),
|
refreshInterval: time.Duration(conf.RefreshInterval),
|
||||||
source: "kuma",
|
source: "kuma",
|
||||||
parseResources: kumaMadsV1ResourceParser,
|
parseResources: kumaMadsV1ResourceParser,
|
||||||
fetchFailuresCount: kumaFetchFailuresCount,
|
fetchFailuresCount: prometheus.NewCounter(
|
||||||
fetchSkipUpdateCount: kumaFetchSkipUpdateCount,
|
prometheus.CounterOpts{
|
||||||
fetchDuration: kumaFetchDuration,
|
Namespace: namespace,
|
||||||
|
Name: "sd_kuma_fetch_failures_total",
|
||||||
|
Help: "The number of Kuma MADS fetch call failures.",
|
||||||
|
}),
|
||||||
|
fetchSkipUpdateCount: prometheus.NewCounter(
|
||||||
|
prometheus.CounterOpts{
|
||||||
|
Namespace: namespace,
|
||||||
|
Name: "sd_kuma_fetch_skipped_updates_total",
|
||||||
|
Help: "The number of Kuma MADS fetch calls that result in no updates to the targets.",
|
||||||
|
}),
|
||||||
|
fetchDuration: prometheus.NewSummary(
|
||||||
|
prometheus.SummaryOpts{
|
||||||
|
Namespace: namespace,
|
||||||
|
Name: "sd_kuma_fetch_duration_seconds",
|
||||||
|
Help: "The duration of a Kuma MADS fetch call.",
|
||||||
|
Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001},
|
||||||
|
},
|
||||||
|
),
|
||||||
}
|
}
|
||||||
|
|
||||||
|
d.metricRegisterer = discovery.NewMetricRegisterer(
|
||||||
|
reg,
|
||||||
|
[]prometheus.Collector{
|
||||||
|
d.fetchFailuresCount,
|
||||||
|
d.fetchSkipUpdateCount,
|
||||||
|
d.fetchDuration,
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
return d, nil
|
return d, nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -21,6 +21,7 @@ import (
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
v3 "github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3"
|
v3 "github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3"
|
||||||
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
"github.com/prometheus/common/model"
|
"github.com/prometheus/common/model"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
"google.golang.org/protobuf/proto"
|
"google.golang.org/protobuf/proto"
|
||||||
|
@ -107,7 +108,7 @@ func getKumaMadsV1DiscoveryResponse(resources ...*MonitoringAssignment) (*v3.Dis
|
||||||
}
|
}
|
||||||
|
|
||||||
func newKumaTestHTTPDiscovery(c KumaSDConfig) (*fetchDiscovery, error) {
|
func newKumaTestHTTPDiscovery(c KumaSDConfig) (*fetchDiscovery, error) {
|
||||||
kd, err := NewKumaHTTPDiscovery(&c, nopLogger)
|
kd, err := NewKumaHTTPDiscovery(&c, nopLogger, prometheus.NewRegistry())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -129,7 +130,7 @@ func TestKumaMadsV1ResourceParserInvalidTypeURL(t *testing.T) {
|
||||||
func TestKumaMadsV1ResourceParserEmptySlice(t *testing.T) {
|
func TestKumaMadsV1ResourceParserEmptySlice(t *testing.T) {
|
||||||
resources := make([]*anypb.Any, 0)
|
resources := make([]*anypb.Any, 0)
|
||||||
groups, err := kumaMadsV1ResourceParser(resources, KumaMadsV1ResourceTypeURL)
|
groups, err := kumaMadsV1ResourceParser(resources, KumaMadsV1ResourceTypeURL)
|
||||||
require.Len(t, groups, 0)
|
require.Empty(t, groups)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -204,7 +205,7 @@ func TestNewKumaHTTPDiscovery(t *testing.T) {
|
||||||
require.True(t, ok)
|
require.True(t, ok)
|
||||||
require.Equal(t, kumaConf.Server, resClient.Server())
|
require.Equal(t, kumaConf.Server, resClient.Server())
|
||||||
require.Equal(t, KumaMadsV1ResourceTypeURL, resClient.ResourceTypeURL())
|
require.Equal(t, KumaMadsV1ResourceTypeURL, resClient.ResourceTypeURL())
|
||||||
require.NotEmpty(t, resClient.ID())
|
require.Equal(t, kumaConf.ClientID, resClient.ID())
|
||||||
require.Equal(t, KumaMadsV1ResourceType, resClient.config.ResourceType)
|
require.Equal(t, KumaMadsV1ResourceType, resClient.config.ResourceType)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -55,6 +55,7 @@ type SDConfig struct {
|
||||||
RefreshInterval model.Duration `yaml:"refresh_interval,omitempty"`
|
RefreshInterval model.Duration `yaml:"refresh_interval,omitempty"`
|
||||||
FetchTimeout model.Duration `yaml:"fetch_timeout,omitempty"`
|
FetchTimeout model.Duration `yaml:"fetch_timeout,omitempty"`
|
||||||
Server string `yaml:"server,omitempty"`
|
Server string `yaml:"server,omitempty"`
|
||||||
|
ClientID string `yaml:"client_id,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// mustRegisterMessage registers the provided message type in the typeRegistry, and panics
|
// mustRegisterMessage registers the provided message type in the typeRegistry, and panics
|
||||||
|
@ -69,9 +70,6 @@ func init() {
|
||||||
// Register top-level SD Configs.
|
// Register top-level SD Configs.
|
||||||
discovery.RegisterConfig(&KumaSDConfig{})
|
discovery.RegisterConfig(&KumaSDConfig{})
|
||||||
|
|
||||||
// Register metrics.
|
|
||||||
prometheus.MustRegister(kumaFetchDuration, kumaFetchSkipUpdateCount, kumaFetchFailuresCount)
|
|
||||||
|
|
||||||
// Register protobuf types that need to be marshalled/ unmarshalled.
|
// Register protobuf types that need to be marshalled/ unmarshalled.
|
||||||
mustRegisterMessage(protoTypes, (&v3.DiscoveryRequest{}).ProtoReflect().Type())
|
mustRegisterMessage(protoTypes, (&v3.DiscoveryRequest{}).ProtoReflect().Type())
|
||||||
mustRegisterMessage(protoTypes, (&v3.DiscoveryResponse{}).ProtoReflect().Type())
|
mustRegisterMessage(protoTypes, (&v3.DiscoveryResponse{}).ProtoReflect().Type())
|
||||||
|
@ -109,12 +107,20 @@ type fetchDiscovery struct {
|
||||||
parseResources resourceParser
|
parseResources resourceParser
|
||||||
logger log.Logger
|
logger log.Logger
|
||||||
|
|
||||||
fetchDuration prometheus.Observer
|
fetchDuration prometheus.Summary
|
||||||
fetchSkipUpdateCount prometheus.Counter
|
fetchSkipUpdateCount prometheus.Counter
|
||||||
fetchFailuresCount prometheus.Counter
|
fetchFailuresCount prometheus.Counter
|
||||||
|
|
||||||
|
metricRegisterer discovery.MetricRegisterer
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *fetchDiscovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) {
|
func (d *fetchDiscovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) {
|
||||||
|
err := d.metricRegisterer.RegisterMetrics()
|
||||||
|
if err != nil {
|
||||||
|
level.Error(d.logger).Log("msg", "Unable to register metrics", "err", err.Error())
|
||||||
|
return
|
||||||
|
}
|
||||||
|
defer d.metricRegisterer.UnregisterMetrics()
|
||||||
defer d.client.Close()
|
defer d.client.Close()
|
||||||
|
|
||||||
ticker := time.NewTicker(d.refreshInterval)
|
ticker := time.NewTicker(d.refreshInterval)
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue