mirror of
https://github.com/prometheus/prometheus.git
synced 2024-12-24 21:24:05 -08:00
Use const labels for Discovery Manager metrics.
Signed-off-by: Paulin Todev <paulin.todev@gmail.com>
This commit is contained in:
parent
6de80d7fb0
commit
6a5306a53c
|
@ -620,30 +620,56 @@ func main() {
|
||||||
discoveryManagerNotify discoveryManager
|
discoveryManagerNotify discoveryManager
|
||||||
)
|
)
|
||||||
|
|
||||||
// Register the metrics used by both "scrape" and "notify" discovery managers.
|
// Kubernetes client metrics are used by Kubernetes SD.
|
||||||
// The same metrics are used for both discovery managers. Hence the registration
|
// They are registered here in the main function, because SD mechanisms
|
||||||
// needs to be done here, outside the NewManager() calls, to avoid duplicate
|
// can only register metrics specific to a SD instance.
|
||||||
// metric registrations.
|
// Kubernetes client metrics are the same for the whole process -
|
||||||
discoveryMetrics, err := discovery.NewMetrics(prometheus.DefaultRegisterer)
|
// they are not specific to an SD instance.
|
||||||
|
err = discovery.RegisterK8sClientMetricsWithPrometheus(prometheus.DefaultRegisterer)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
level.Error(logger).Log("msg", "failed to create discovery metrics", "err", err)
|
level.Error(logger).Log("msg", "failed to register Kubernetes client metrics", "err", err)
|
||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
}
|
}
|
||||||
if cfg.enableNewSDManager {
|
if cfg.enableNewSDManager {
|
||||||
discoveryManagerScrape = discovery.NewManager(ctxScrape, log.With(logger, "component", "discovery manager scrape"), prometheus.DefaultRegisterer, discoveryMetrics, discovery.Name("scrape"))
|
{
|
||||||
discoveryManagerNotify = discovery.NewManager(ctxNotify, log.With(logger, "component", "discovery manager notify"), prometheus.DefaultRegisterer, discoveryMetrics, discovery.Name("notify"))
|
discMgr := discovery.NewManager(ctxScrape, log.With(logger, "component", "discovery manager scrape"), prometheus.DefaultRegisterer, discovery.Name("scrape"))
|
||||||
} else {
|
if discMgr == nil {
|
||||||
discoveryManagerScrape = legacymanager.NewManager(ctxScrape, log.With(logger, "component", "discovery manager scrape"), prometheus.DefaultRegisterer, discoveryMetrics, legacymanager.Name("scrape"))
|
|
||||||
discoveryManagerNotify = legacymanager.NewManager(ctxNotify, log.With(logger, "component", "discovery manager notify"), prometheus.DefaultRegisterer, discoveryMetrics, legacymanager.Name("notify"))
|
|
||||||
}
|
|
||||||
|
|
||||||
if discoveryManagerScrape == nil {
|
|
||||||
level.Error(logger).Log("msg", "failed to create a discovery manager scrape")
|
level.Error(logger).Log("msg", "failed to create a discovery manager scrape")
|
||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
|
} else {
|
||||||
|
discoveryManagerScrape = discMgr
|
||||||
}
|
}
|
||||||
if discoveryManagerNotify == nil {
|
}
|
||||||
|
|
||||||
|
{
|
||||||
|
discMgr := discovery.NewManager(ctxNotify, log.With(logger, "component", "discovery manager notify"), prometheus.DefaultRegisterer, discovery.Name("notify"))
|
||||||
|
if discMgr == nil {
|
||||||
level.Error(logger).Log("msg", "failed to create a discovery manager notify")
|
level.Error(logger).Log("msg", "failed to create a discovery manager notify")
|
||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
|
} else {
|
||||||
|
discoveryManagerNotify = discMgr
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
{
|
||||||
|
discMgr := legacymanager.NewManager(ctxScrape, log.With(logger, "component", "discovery manager scrape"), prometheus.DefaultRegisterer, legacymanager.Name("scrape"))
|
||||||
|
if discMgr == nil {
|
||||||
|
level.Error(logger).Log("msg", "failed to create a discovery manager scrape")
|
||||||
|
os.Exit(1)
|
||||||
|
} else {
|
||||||
|
discoveryManagerScrape = discMgr
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
{
|
||||||
|
discMgr := legacymanager.NewManager(ctxNotify, log.With(logger, "component", "discovery manager notify"), prometheus.DefaultRegisterer, legacymanager.Name("notify"))
|
||||||
|
if discMgr == nil {
|
||||||
|
level.Error(logger).Log("msg", "failed to create a discovery manager notify")
|
||||||
|
os.Exit(1)
|
||||||
|
} else {
|
||||||
|
discoveryManagerNotify = discMgr
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
scrapeManager, err := scrape.NewManager(
|
scrapeManager, err := scrape.NewManager(
|
||||||
|
|
|
@ -42,7 +42,7 @@ type provider struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewManager is the Discovery Manager constructor.
|
// NewManager is the Discovery Manager constructor.
|
||||||
func NewManager(ctx context.Context, logger log.Logger, registerer prometheus.Registerer, metrics *discovery.Metrics, options ...func(*Manager)) *Manager {
|
func NewManager(ctx context.Context, logger log.Logger, registerer prometheus.Registerer, options ...func(*Manager)) *Manager {
|
||||||
if logger == nil {
|
if logger == nil {
|
||||||
logger = log.NewNopLogger()
|
logger = log.NewNopLogger()
|
||||||
}
|
}
|
||||||
|
@ -55,11 +55,20 @@ func NewManager(ctx context.Context, logger log.Logger, registerer prometheus.Re
|
||||||
updatert: 5 * time.Second,
|
updatert: 5 * time.Second,
|
||||||
triggerSend: make(chan struct{}, 1),
|
triggerSend: make(chan struct{}, 1),
|
||||||
registerer: registerer,
|
registerer: registerer,
|
||||||
metrics: metrics,
|
|
||||||
}
|
}
|
||||||
for _, option := range options {
|
for _, option := range options {
|
||||||
option(mgr)
|
option(mgr)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Register the metrics.
|
||||||
|
// We have to do this after setting all options, so that the name of the Manager is set.
|
||||||
|
if metrics, err := discovery.NewMetrics(registerer, mgr.name); err == nil {
|
||||||
|
mgr.metrics = metrics
|
||||||
|
} else {
|
||||||
|
level.Error(logger).Log("msg", "Failed to create discovery manager metrics", "manager", mgr.name, "err", err)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
return mgr
|
return mgr
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -133,9 +142,9 @@ func (m *Manager) ApplyConfig(cfg map[string]discovery.Configs) error {
|
||||||
failedCount := 0
|
failedCount := 0
|
||||||
for name, scfg := range cfg {
|
for name, scfg := range cfg {
|
||||||
failedCount += m.registerProviders(scfg, name)
|
failedCount += m.registerProviders(scfg, name)
|
||||||
m.metrics.DiscoveredTargets.WithLabelValues(m.name, name).Set(0)
|
m.metrics.DiscoveredTargets.WithLabelValues(name).Set(0)
|
||||||
}
|
}
|
||||||
m.metrics.FailedConfigs.WithLabelValues(m.name).Set(float64(failedCount))
|
m.metrics.FailedConfigs.Set(float64(failedCount))
|
||||||
|
|
||||||
for _, prov := range m.providers {
|
for _, prov := range m.providers {
|
||||||
m.startProvider(m.ctx, prov)
|
m.startProvider(m.ctx, prov)
|
||||||
|
@ -172,7 +181,7 @@ func (m *Manager) updater(ctx context.Context, p *provider, updates chan []*targ
|
||||||
case <-ctx.Done():
|
case <-ctx.Done():
|
||||||
return
|
return
|
||||||
case tgs, ok := <-updates:
|
case tgs, ok := <-updates:
|
||||||
m.metrics.ReceivedUpdates.WithLabelValues(m.name).Inc()
|
m.metrics.ReceivedUpdates.Inc()
|
||||||
if !ok {
|
if !ok {
|
||||||
level.Debug(m.logger).Log("msg", "Discoverer channel closed", "provider", p.name)
|
level.Debug(m.logger).Log("msg", "Discoverer channel closed", "provider", p.name)
|
||||||
return
|
return
|
||||||
|
@ -201,11 +210,11 @@ func (m *Manager) sender() {
|
||||||
case <-ticker.C: // Some discoverers send updates too often so we throttle these with the ticker.
|
case <-ticker.C: // Some discoverers send updates too often so we throttle these with the ticker.
|
||||||
select {
|
select {
|
||||||
case <-m.triggerSend:
|
case <-m.triggerSend:
|
||||||
m.metrics.SentUpdates.WithLabelValues(m.name).Inc()
|
m.metrics.SentUpdates.Inc()
|
||||||
select {
|
select {
|
||||||
case m.syncCh <- m.allGroups():
|
case m.syncCh <- m.allGroups():
|
||||||
default:
|
default:
|
||||||
m.metrics.DelayedUpdates.WithLabelValues(m.name).Inc()
|
m.metrics.DelayedUpdates.Inc()
|
||||||
level.Debug(m.logger).Log("msg", "Discovery receiver's channel was full so will retry the next cycle")
|
level.Debug(m.logger).Log("msg", "Discovery receiver's channel was full so will retry the next cycle")
|
||||||
select {
|
select {
|
||||||
case m.triggerSend <- struct{}{}:
|
case m.triggerSend <- struct{}{}:
|
||||||
|
@ -253,7 +262,7 @@ func (m *Manager) allGroups() map[string][]*targetgroup.Group {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
for setName, v := range n {
|
for setName, v := range n {
|
||||||
m.metrics.DiscoveredTargets.WithLabelValues(m.name, setName).Set(float64(v))
|
m.metrics.DiscoveredTargets.WithLabelValues(setName).Set(float64(v))
|
||||||
}
|
}
|
||||||
return tSets
|
return tSets
|
||||||
}
|
}
|
||||||
|
|
|
@ -36,12 +36,6 @@ func TestMain(m *testing.M) {
|
||||||
testutil.TolerantVerifyLeak(m)
|
testutil.TolerantVerifyLeak(m)
|
||||||
}
|
}
|
||||||
|
|
||||||
func newTestDiscoveryMetrics(t *testing.T) *discovery.Metrics {
|
|
||||||
metrics, err := discovery.NewMetrics(prometheus.NewRegistry())
|
|
||||||
require.NoError(t, err)
|
|
||||||
return metrics
|
|
||||||
}
|
|
||||||
|
|
||||||
// TestTargetUpdatesOrder checks that the target updates are received in the expected order.
|
// TestTargetUpdatesOrder checks that the target updates are received in the expected order.
|
||||||
func TestTargetUpdatesOrder(t *testing.T) {
|
func TestTargetUpdatesOrder(t *testing.T) {
|
||||||
// The order by which the updates are send is determined by the interval passed to the mock discovery adapter
|
// The order by which the updates are send is determined by the interval passed to the mock discovery adapter
|
||||||
|
@ -671,7 +665,8 @@ func TestTargetUpdatesOrder(t *testing.T) {
|
||||||
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
|
|
||||||
discoveryManager := NewManager(ctx, log.NewNopLogger(), prometheus.NewRegistry(), newTestDiscoveryMetrics(t))
|
discoveryManager := NewManager(ctx, log.NewNopLogger(), prometheus.NewRegistry())
|
||||||
|
require.NotNil(t, discoveryManager)
|
||||||
discoveryManager.updatert = 100 * time.Millisecond
|
discoveryManager.updatert = 100 * time.Millisecond
|
||||||
|
|
||||||
var totalUpdatesCount int
|
var totalUpdatesCount int
|
||||||
|
@ -753,7 +748,8 @@ func verifyPresence(t *testing.T, tSets map[poolKey]map[string]*targetgroup.Grou
|
||||||
func TestTargetSetRecreatesTargetGroupsEveryRun(t *testing.T) {
|
func TestTargetSetRecreatesTargetGroupsEveryRun(t *testing.T) {
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
defer cancel()
|
defer cancel()
|
||||||
discoveryManager := NewManager(ctx, log.NewNopLogger(), prometheus.NewRegistry(), newTestDiscoveryMetrics(t))
|
discoveryManager := NewManager(ctx, log.NewNopLogger(), prometheus.NewRegistry())
|
||||||
|
require.NotNil(t, discoveryManager)
|
||||||
discoveryManager.updatert = 100 * time.Millisecond
|
discoveryManager.updatert = 100 * time.Millisecond
|
||||||
go discoveryManager.Run()
|
go discoveryManager.Run()
|
||||||
|
|
||||||
|
@ -781,7 +777,8 @@ func TestTargetSetRecreatesTargetGroupsEveryRun(t *testing.T) {
|
||||||
func TestDiscovererConfigs(t *testing.T) {
|
func TestDiscovererConfigs(t *testing.T) {
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
defer cancel()
|
defer cancel()
|
||||||
discoveryManager := NewManager(ctx, log.NewNopLogger(), prometheus.NewRegistry(), newTestDiscoveryMetrics(t))
|
discoveryManager := NewManager(ctx, log.NewNopLogger(), prometheus.NewRegistry())
|
||||||
|
require.NotNil(t, discoveryManager)
|
||||||
discoveryManager.updatert = 100 * time.Millisecond
|
discoveryManager.updatert = 100 * time.Millisecond
|
||||||
go discoveryManager.Run()
|
go discoveryManager.Run()
|
||||||
|
|
||||||
|
@ -805,7 +802,8 @@ func TestDiscovererConfigs(t *testing.T) {
|
||||||
func TestTargetSetRecreatesEmptyStaticConfigs(t *testing.T) {
|
func TestTargetSetRecreatesEmptyStaticConfigs(t *testing.T) {
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
defer cancel()
|
defer cancel()
|
||||||
discoveryManager := NewManager(ctx, log.NewNopLogger(), prometheus.NewRegistry(), newTestDiscoveryMetrics(t))
|
discoveryManager := NewManager(ctx, log.NewNopLogger(), prometheus.NewRegistry())
|
||||||
|
require.NotNil(t, discoveryManager)
|
||||||
discoveryManager.updatert = 100 * time.Millisecond
|
discoveryManager.updatert = 100 * time.Millisecond
|
||||||
go discoveryManager.Run()
|
go discoveryManager.Run()
|
||||||
|
|
||||||
|
@ -844,7 +842,8 @@ func TestTargetSetRecreatesEmptyStaticConfigs(t *testing.T) {
|
||||||
func TestIdenticalConfigurationsAreCoalesced(t *testing.T) {
|
func TestIdenticalConfigurationsAreCoalesced(t *testing.T) {
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
defer cancel()
|
defer cancel()
|
||||||
discoveryManager := NewManager(ctx, nil, prometheus.NewRegistry(), newTestDiscoveryMetrics(t))
|
discoveryManager := NewManager(ctx, nil, prometheus.NewRegistry())
|
||||||
|
require.NotNil(t, discoveryManager)
|
||||||
discoveryManager.updatert = 100 * time.Millisecond
|
discoveryManager.updatert = 100 * time.Millisecond
|
||||||
go discoveryManager.Run()
|
go discoveryManager.Run()
|
||||||
|
|
||||||
|
@ -875,7 +874,8 @@ func TestApplyConfigDoesNotModifyStaticTargets(t *testing.T) {
|
||||||
}
|
}
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
defer cancel()
|
defer cancel()
|
||||||
discoveryManager := NewManager(ctx, log.NewNopLogger(), prometheus.NewRegistry(), newTestDiscoveryMetrics(t))
|
discoveryManager := NewManager(ctx, log.NewNopLogger(), prometheus.NewRegistry())
|
||||||
|
require.NotNil(t, discoveryManager)
|
||||||
discoveryManager.updatert = 100 * time.Millisecond
|
discoveryManager.updatert = 100 * time.Millisecond
|
||||||
go discoveryManager.Run()
|
go discoveryManager.Run()
|
||||||
|
|
||||||
|
@ -900,7 +900,8 @@ func (e errorConfig) NewDiscoverer(discovery.DiscovererOptions) (discovery.Disco
|
||||||
func TestGaugeFailedConfigs(t *testing.T) {
|
func TestGaugeFailedConfigs(t *testing.T) {
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
defer cancel()
|
defer cancel()
|
||||||
discoveryManager := NewManager(ctx, log.NewNopLogger(), prometheus.NewRegistry(), newTestDiscoveryMetrics(t))
|
discoveryManager := NewManager(ctx, log.NewNopLogger(), prometheus.NewRegistry())
|
||||||
|
require.NotNil(t, discoveryManager)
|
||||||
discoveryManager.updatert = 100 * time.Millisecond
|
discoveryManager.updatert = 100 * time.Millisecond
|
||||||
go discoveryManager.Run()
|
go discoveryManager.Run()
|
||||||
|
|
||||||
|
@ -1056,7 +1057,8 @@ func TestCoordinationWithReceiver(t *testing.T) {
|
||||||
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
|
|
||||||
mgr := NewManager(ctx, nil, prometheus.NewRegistry(), newTestDiscoveryMetrics(t))
|
mgr := NewManager(ctx, nil, prometheus.NewRegistry())
|
||||||
|
require.NotNil(t, mgr)
|
||||||
mgr.updatert = updateDelay
|
mgr.updatert = updateDelay
|
||||||
go mgr.Run()
|
go mgr.Run()
|
||||||
|
|
||||||
|
|
|
@ -65,7 +65,7 @@ func (p *Provider) Config() interface{} {
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewManager is the Discovery Manager constructor.
|
// NewManager is the Discovery Manager constructor.
|
||||||
func NewManager(ctx context.Context, logger log.Logger, registerer prometheus.Registerer, metrics *Metrics, options ...func(*Manager)) *Manager {
|
func NewManager(ctx context.Context, logger log.Logger, registerer prometheus.Registerer, options ...func(*Manager)) *Manager {
|
||||||
if logger == nil {
|
if logger == nil {
|
||||||
logger = log.NewNopLogger()
|
logger = log.NewNopLogger()
|
||||||
}
|
}
|
||||||
|
@ -77,11 +77,20 @@ func NewManager(ctx context.Context, logger log.Logger, registerer prometheus.Re
|
||||||
updatert: 5 * time.Second,
|
updatert: 5 * time.Second,
|
||||||
triggerSend: make(chan struct{}, 1),
|
triggerSend: make(chan struct{}, 1),
|
||||||
registerer: registerer,
|
registerer: registerer,
|
||||||
metrics: metrics,
|
|
||||||
}
|
}
|
||||||
for _, option := range options {
|
for _, option := range options {
|
||||||
option(mgr)
|
option(mgr)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Register the metrics.
|
||||||
|
// We have to do this after setting all options, so that the name of the Manager is set.
|
||||||
|
if metrics, err := NewMetrics(registerer, mgr.name); err == nil {
|
||||||
|
mgr.metrics = metrics
|
||||||
|
} else {
|
||||||
|
level.Error(logger).Log("msg", "Failed to create discovery manager metrics", "manager", mgr.name, "err", err)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
return mgr
|
return mgr
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -165,7 +174,7 @@ func (m *Manager) ApplyConfig(cfg map[string]Configs) error {
|
||||||
for name, scfg := range cfg {
|
for name, scfg := range cfg {
|
||||||
failedCount += m.registerProviders(scfg, name)
|
failedCount += m.registerProviders(scfg, name)
|
||||||
}
|
}
|
||||||
m.metrics.FailedConfigs.WithLabelValues(m.name).Set(float64(failedCount))
|
m.metrics.FailedConfigs.Set(float64(failedCount))
|
||||||
|
|
||||||
var (
|
var (
|
||||||
wg sync.WaitGroup
|
wg sync.WaitGroup
|
||||||
|
@ -201,7 +210,7 @@ func (m *Manager) ApplyConfig(cfg map[string]Configs) error {
|
||||||
// Set metrics and targets for new subs.
|
// Set metrics and targets for new subs.
|
||||||
for s := range prov.newSubs {
|
for s := range prov.newSubs {
|
||||||
if _, ok := prov.subs[s]; !ok {
|
if _, ok := prov.subs[s]; !ok {
|
||||||
m.metrics.DiscoveredTargets.WithLabelValues(m.name, s).Set(0)
|
m.metrics.DiscoveredTargets.WithLabelValues(s).Set(0)
|
||||||
}
|
}
|
||||||
if l := len(refTargets); l > 0 {
|
if l := len(refTargets); l > 0 {
|
||||||
m.targets[poolKey{s, prov.name}] = make(map[string]*targetgroup.Group, l)
|
m.targets[poolKey{s, prov.name}] = make(map[string]*targetgroup.Group, l)
|
||||||
|
@ -281,7 +290,7 @@ func (m *Manager) updater(ctx context.Context, p *Provider, updates chan []*targ
|
||||||
case <-ctx.Done():
|
case <-ctx.Done():
|
||||||
return
|
return
|
||||||
case tgs, ok := <-updates:
|
case tgs, ok := <-updates:
|
||||||
m.metrics.ReceivedUpdates.WithLabelValues(m.name).Inc()
|
m.metrics.ReceivedUpdates.Inc()
|
||||||
if !ok {
|
if !ok {
|
||||||
level.Debug(m.logger).Log("msg", "Discoverer channel closed", "provider", p.name)
|
level.Debug(m.logger).Log("msg", "Discoverer channel closed", "provider", p.name)
|
||||||
// Wait for provider cancellation to ensure targets are cleaned up when expected.
|
// Wait for provider cancellation to ensure targets are cleaned up when expected.
|
||||||
|
@ -314,11 +323,11 @@ func (m *Manager) sender() {
|
||||||
case <-ticker.C: // Some discoverers send updates too often, so we throttle these with the ticker.
|
case <-ticker.C: // Some discoverers send updates too often, so we throttle these with the ticker.
|
||||||
select {
|
select {
|
||||||
case <-m.triggerSend:
|
case <-m.triggerSend:
|
||||||
m.metrics.SentUpdates.WithLabelValues(m.name).Inc()
|
m.metrics.SentUpdates.Inc()
|
||||||
select {
|
select {
|
||||||
case m.syncCh <- m.allGroups():
|
case m.syncCh <- m.allGroups():
|
||||||
default:
|
default:
|
||||||
m.metrics.DelayedUpdates.WithLabelValues(m.name).Inc()
|
m.metrics.DelayedUpdates.Inc()
|
||||||
level.Debug(m.logger).Log("msg", "Discovery receiver's channel was full so will retry the next cycle")
|
level.Debug(m.logger).Log("msg", "Discovery receiver's channel was full so will retry the next cycle")
|
||||||
select {
|
select {
|
||||||
case m.triggerSend <- struct{}{}:
|
case m.triggerSend <- struct{}{}:
|
||||||
|
@ -370,7 +379,7 @@ func (m *Manager) allGroups() map[string][]*targetgroup.Group {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
for setName, v := range n {
|
for setName, v := range n {
|
||||||
m.metrics.DiscoveredTargets.WithLabelValues(m.name, setName).Set(float64(v))
|
m.metrics.DiscoveredTargets.WithLabelValues(setName).Set(float64(v))
|
||||||
}
|
}
|
||||||
return tSets
|
return tSets
|
||||||
}
|
}
|
||||||
|
|
|
@ -36,12 +36,6 @@ func TestMain(m *testing.M) {
|
||||||
testutil.TolerantVerifyLeak(m)
|
testutil.TolerantVerifyLeak(m)
|
||||||
}
|
}
|
||||||
|
|
||||||
func newTestDiscoveryMetrics(t *testing.T) *Metrics {
|
|
||||||
metrics, err := NewMetrics(prometheus.NewRegistry())
|
|
||||||
require.NoError(t, err)
|
|
||||||
return metrics
|
|
||||||
}
|
|
||||||
|
|
||||||
// TestTargetUpdatesOrder checks that the target updates are received in the expected order.
|
// TestTargetUpdatesOrder checks that the target updates are received in the expected order.
|
||||||
func TestTargetUpdatesOrder(t *testing.T) {
|
func TestTargetUpdatesOrder(t *testing.T) {
|
||||||
// The order by which the updates are send is determined by the interval passed to the mock discovery adapter
|
// The order by which the updates are send is determined by the interval passed to the mock discovery adapter
|
||||||
|
@ -671,7 +665,8 @@ func TestTargetUpdatesOrder(t *testing.T) {
|
||||||
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
|
|
||||||
discoveryManager := NewManager(ctx, log.NewNopLogger(), prometheus.NewRegistry(), newTestDiscoveryMetrics(t))
|
discoveryManager := NewManager(ctx, log.NewNopLogger(), prometheus.NewRegistry())
|
||||||
|
require.NotNil(t, discoveryManager)
|
||||||
discoveryManager.updatert = 100 * time.Millisecond
|
discoveryManager.updatert = 100 * time.Millisecond
|
||||||
|
|
||||||
var totalUpdatesCount int
|
var totalUpdatesCount int
|
||||||
|
@ -785,7 +780,8 @@ func pk(provider, setName string, n int) poolKey {
|
||||||
func TestTargetSetTargetGroupsPresentOnConfigReload(t *testing.T) {
|
func TestTargetSetTargetGroupsPresentOnConfigReload(t *testing.T) {
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
defer cancel()
|
defer cancel()
|
||||||
discoveryManager := NewManager(ctx, log.NewNopLogger(), prometheus.NewRegistry(), newTestDiscoveryMetrics(t))
|
discoveryManager := NewManager(ctx, log.NewNopLogger(), prometheus.NewRegistry())
|
||||||
|
require.NotNil(t, discoveryManager)
|
||||||
discoveryManager.updatert = 100 * time.Millisecond
|
discoveryManager.updatert = 100 * time.Millisecond
|
||||||
go discoveryManager.Run()
|
go discoveryManager.Run()
|
||||||
|
|
||||||
|
@ -817,7 +813,8 @@ func TestTargetSetTargetGroupsPresentOnConfigReload(t *testing.T) {
|
||||||
func TestTargetSetTargetGroupsPresentOnConfigRename(t *testing.T) {
|
func TestTargetSetTargetGroupsPresentOnConfigRename(t *testing.T) {
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
defer cancel()
|
defer cancel()
|
||||||
discoveryManager := NewManager(ctx, log.NewNopLogger(), prometheus.NewRegistry(), newTestDiscoveryMetrics(t))
|
discoveryManager := NewManager(ctx, log.NewNopLogger(), prometheus.NewRegistry())
|
||||||
|
require.NotNil(t, discoveryManager)
|
||||||
discoveryManager.updatert = 100 * time.Millisecond
|
discoveryManager.updatert = 100 * time.Millisecond
|
||||||
go discoveryManager.Run()
|
go discoveryManager.Run()
|
||||||
|
|
||||||
|
@ -852,7 +849,8 @@ func TestTargetSetTargetGroupsPresentOnConfigRename(t *testing.T) {
|
||||||
func TestTargetSetTargetGroupsPresentOnConfigDuplicateAndDeleteOriginal(t *testing.T) {
|
func TestTargetSetTargetGroupsPresentOnConfigDuplicateAndDeleteOriginal(t *testing.T) {
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
defer cancel()
|
defer cancel()
|
||||||
discoveryManager := NewManager(ctx, log.NewNopLogger(), prometheus.NewRegistry(), newTestDiscoveryMetrics(t))
|
discoveryManager := NewManager(ctx, log.NewNopLogger(), prometheus.NewRegistry())
|
||||||
|
require.NotNil(t, discoveryManager)
|
||||||
discoveryManager.updatert = 100 * time.Millisecond
|
discoveryManager.updatert = 100 * time.Millisecond
|
||||||
go discoveryManager.Run()
|
go discoveryManager.Run()
|
||||||
|
|
||||||
|
@ -890,7 +888,8 @@ func TestTargetSetTargetGroupsPresentOnConfigDuplicateAndDeleteOriginal(t *testi
|
||||||
func TestTargetSetTargetGroupsPresentOnConfigChange(t *testing.T) {
|
func TestTargetSetTargetGroupsPresentOnConfigChange(t *testing.T) {
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
defer cancel()
|
defer cancel()
|
||||||
discoveryManager := NewManager(ctx, log.NewNopLogger(), prometheus.NewRegistry(), newTestDiscoveryMetrics(t))
|
discoveryManager := NewManager(ctx, log.NewNopLogger(), prometheus.NewRegistry())
|
||||||
|
require.NotNil(t, discoveryManager)
|
||||||
discoveryManager.updatert = 100 * time.Millisecond
|
discoveryManager.updatert = 100 * time.Millisecond
|
||||||
go discoveryManager.Run()
|
go discoveryManager.Run()
|
||||||
|
|
||||||
|
@ -951,7 +950,8 @@ func TestTargetSetTargetGroupsPresentOnConfigChange(t *testing.T) {
|
||||||
func TestTargetSetRecreatesTargetGroupsOnConfigChange(t *testing.T) {
|
func TestTargetSetRecreatesTargetGroupsOnConfigChange(t *testing.T) {
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
defer cancel()
|
defer cancel()
|
||||||
discoveryManager := NewManager(ctx, log.NewNopLogger(), prometheus.NewRegistry(), newTestDiscoveryMetrics(t))
|
discoveryManager := NewManager(ctx, log.NewNopLogger(), prometheus.NewRegistry())
|
||||||
|
require.NotNil(t, discoveryManager)
|
||||||
discoveryManager.updatert = 100 * time.Millisecond
|
discoveryManager.updatert = 100 * time.Millisecond
|
||||||
go discoveryManager.Run()
|
go discoveryManager.Run()
|
||||||
|
|
||||||
|
@ -990,7 +990,8 @@ func TestTargetSetRecreatesTargetGroupsOnConfigChange(t *testing.T) {
|
||||||
func TestDiscovererConfigs(t *testing.T) {
|
func TestDiscovererConfigs(t *testing.T) {
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
defer cancel()
|
defer cancel()
|
||||||
discoveryManager := NewManager(ctx, log.NewNopLogger(), prometheus.NewRegistry(), newTestDiscoveryMetrics(t))
|
discoveryManager := NewManager(ctx, log.NewNopLogger(), prometheus.NewRegistry())
|
||||||
|
require.NotNil(t, discoveryManager)
|
||||||
discoveryManager.updatert = 100 * time.Millisecond
|
discoveryManager.updatert = 100 * time.Millisecond
|
||||||
go discoveryManager.Run()
|
go discoveryManager.Run()
|
||||||
|
|
||||||
|
@ -1022,7 +1023,8 @@ func TestDiscovererConfigs(t *testing.T) {
|
||||||
func TestTargetSetRecreatesEmptyStaticConfigs(t *testing.T) {
|
func TestTargetSetRecreatesEmptyStaticConfigs(t *testing.T) {
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
defer cancel()
|
defer cancel()
|
||||||
discoveryManager := NewManager(ctx, log.NewNopLogger(), prometheus.NewRegistry(), newTestDiscoveryMetrics(t))
|
discoveryManager := NewManager(ctx, log.NewNopLogger(), prometheus.NewRegistry())
|
||||||
|
require.NotNil(t, discoveryManager)
|
||||||
discoveryManager.updatert = 100 * time.Millisecond
|
discoveryManager.updatert = 100 * time.Millisecond
|
||||||
go discoveryManager.Run()
|
go discoveryManager.Run()
|
||||||
|
|
||||||
|
@ -1069,7 +1071,8 @@ func TestTargetSetRecreatesEmptyStaticConfigs(t *testing.T) {
|
||||||
func TestIdenticalConfigurationsAreCoalesced(t *testing.T) {
|
func TestIdenticalConfigurationsAreCoalesced(t *testing.T) {
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
defer cancel()
|
defer cancel()
|
||||||
discoveryManager := NewManager(ctx, nil, prometheus.NewRegistry(), newTestDiscoveryMetrics(t))
|
discoveryManager := NewManager(ctx, nil, prometheus.NewRegistry())
|
||||||
|
require.NotNil(t, discoveryManager)
|
||||||
discoveryManager.updatert = 100 * time.Millisecond
|
discoveryManager.updatert = 100 * time.Millisecond
|
||||||
go discoveryManager.Run()
|
go discoveryManager.Run()
|
||||||
|
|
||||||
|
@ -1105,7 +1108,8 @@ func TestApplyConfigDoesNotModifyStaticTargets(t *testing.T) {
|
||||||
}
|
}
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
defer cancel()
|
defer cancel()
|
||||||
discoveryManager := NewManager(ctx, log.NewNopLogger(), prometheus.NewRegistry(), newTestDiscoveryMetrics(t))
|
discoveryManager := NewManager(ctx, log.NewNopLogger(), prometheus.NewRegistry())
|
||||||
|
require.NotNil(t, discoveryManager)
|
||||||
discoveryManager.updatert = 100 * time.Millisecond
|
discoveryManager.updatert = 100 * time.Millisecond
|
||||||
go discoveryManager.Run()
|
go discoveryManager.Run()
|
||||||
|
|
||||||
|
@ -1151,7 +1155,8 @@ func (s lockStaticDiscoverer) Run(ctx context.Context, up chan<- []*targetgroup.
|
||||||
func TestGaugeFailedConfigs(t *testing.T) {
|
func TestGaugeFailedConfigs(t *testing.T) {
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
defer cancel()
|
defer cancel()
|
||||||
discoveryManager := NewManager(ctx, log.NewNopLogger(), prometheus.NewRegistry(), newTestDiscoveryMetrics(t))
|
discoveryManager := NewManager(ctx, log.NewNopLogger(), prometheus.NewRegistry())
|
||||||
|
require.NotNil(t, discoveryManager)
|
||||||
discoveryManager.updatert = 100 * time.Millisecond
|
discoveryManager.updatert = 100 * time.Millisecond
|
||||||
go discoveryManager.Run()
|
go discoveryManager.Run()
|
||||||
|
|
||||||
|
@ -1307,7 +1312,8 @@ func TestCoordinationWithReceiver(t *testing.T) {
|
||||||
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
|
|
||||||
mgr := NewManager(ctx, nil, prometheus.NewRegistry(), newTestDiscoveryMetrics(t))
|
mgr := NewManager(ctx, nil, prometheus.NewRegistry())
|
||||||
|
require.NotNil(t, mgr)
|
||||||
mgr.updatert = updateDelay
|
mgr.updatert = updateDelay
|
||||||
go mgr.Run()
|
go mgr.Run()
|
||||||
|
|
||||||
|
@ -1402,7 +1408,8 @@ func (o onceProvider) Run(_ context.Context, ch chan<- []*targetgroup.Group) {
|
||||||
func TestTargetSetTargetGroupsUpdateDuringApplyConfig(t *testing.T) {
|
func TestTargetSetTargetGroupsUpdateDuringApplyConfig(t *testing.T) {
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
defer cancel()
|
defer cancel()
|
||||||
discoveryManager := NewManager(ctx, log.NewNopLogger(), prometheus.NewRegistry(), newTestDiscoveryMetrics(t))
|
discoveryManager := NewManager(ctx, log.NewNopLogger(), prometheus.NewRegistry())
|
||||||
|
require.NotNil(t, discoveryManager)
|
||||||
discoveryManager.updatert = 100 * time.Millisecond
|
discoveryManager.updatert = 100 * time.Millisecond
|
||||||
go discoveryManager.Run()
|
go discoveryManager.Run()
|
||||||
|
|
||||||
|
|
|
@ -31,66 +31,64 @@ func init() {
|
||||||
|
|
||||||
// Metrics to be used with a discovery manager.
|
// Metrics to be used with a discovery manager.
|
||||||
type Metrics struct {
|
type Metrics struct {
|
||||||
FailedConfigs *prometheus.GaugeVec
|
FailedConfigs prometheus.Gauge
|
||||||
DiscoveredTargets *prometheus.GaugeVec
|
DiscoveredTargets *prometheus.GaugeVec
|
||||||
ReceivedUpdates *prometheus.CounterVec
|
ReceivedUpdates prometheus.Counter
|
||||||
DelayedUpdates *prometheus.CounterVec
|
DelayedUpdates prometheus.Counter
|
||||||
SentUpdates *prometheus.CounterVec
|
SentUpdates prometheus.Counter
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewMetrics(registerer prometheus.Registerer) (*Metrics, error) {
|
func NewMetrics(registerer prometheus.Registerer, sdManagerName string) (*Metrics, error) {
|
||||||
m := &Metrics{}
|
m := &Metrics{}
|
||||||
|
|
||||||
m.FailedConfigs = prometheus.NewGaugeVec(
|
m.FailedConfigs = prometheus.NewGauge(
|
||||||
prometheus.GaugeOpts{
|
prometheus.GaugeOpts{
|
||||||
Name: "prometheus_sd_failed_configs",
|
Name: "prometheus_sd_failed_configs",
|
||||||
Help: "Current number of service discovery configurations that failed to load.",
|
Help: "Current number of service discovery configurations that failed to load.",
|
||||||
|
ConstLabels: prometheus.Labels{"name": sdManagerName},
|
||||||
},
|
},
|
||||||
[]string{"name"},
|
|
||||||
)
|
)
|
||||||
|
|
||||||
m.DiscoveredTargets = prometheus.NewGaugeVec(
|
m.DiscoveredTargets = prometheus.NewGaugeVec(
|
||||||
prometheus.GaugeOpts{
|
prometheus.GaugeOpts{
|
||||||
Name: "prometheus_sd_discovered_targets",
|
Name: "prometheus_sd_discovered_targets",
|
||||||
Help: "Current number of discovered targets.",
|
Help: "Current number of discovered targets.",
|
||||||
|
ConstLabels: prometheus.Labels{"name": sdManagerName},
|
||||||
},
|
},
|
||||||
[]string{"name", "config"},
|
[]string{"config"},
|
||||||
)
|
)
|
||||||
|
|
||||||
m.ReceivedUpdates = prometheus.NewCounterVec(
|
m.ReceivedUpdates = prometheus.NewCounter(
|
||||||
prometheus.CounterOpts{
|
prometheus.CounterOpts{
|
||||||
Name: "prometheus_sd_received_updates_total",
|
Name: "prometheus_sd_received_updates_total",
|
||||||
Help: "Total number of update events received from the SD providers.",
|
Help: "Total number of update events received from the SD providers.",
|
||||||
|
ConstLabels: prometheus.Labels{"name": sdManagerName},
|
||||||
},
|
},
|
||||||
[]string{"name"},
|
|
||||||
)
|
)
|
||||||
|
|
||||||
m.DelayedUpdates = prometheus.NewCounterVec(
|
m.DelayedUpdates = prometheus.NewCounter(
|
||||||
prometheus.CounterOpts{
|
prometheus.CounterOpts{
|
||||||
Name: "prometheus_sd_updates_delayed_total",
|
Name: "prometheus_sd_updates_delayed_total",
|
||||||
Help: "Total number of update events that couldn't be sent immediately.",
|
Help: "Total number of update events that couldn't be sent immediately.",
|
||||||
|
ConstLabels: prometheus.Labels{"name": sdManagerName},
|
||||||
},
|
},
|
||||||
[]string{"name"},
|
|
||||||
)
|
)
|
||||||
|
|
||||||
m.SentUpdates = prometheus.NewCounterVec(
|
m.SentUpdates = prometheus.NewCounter(
|
||||||
prometheus.CounterOpts{
|
prometheus.CounterOpts{
|
||||||
Name: "prometheus_sd_updates_total",
|
Name: "prometheus_sd_updates_total",
|
||||||
Help: "Total number of update events sent to the SD consumers.",
|
Help: "Total number of update events sent to the SD consumers.",
|
||||||
|
ConstLabels: prometheus.Labels{"name": sdManagerName},
|
||||||
},
|
},
|
||||||
[]string{"name"},
|
|
||||||
)
|
)
|
||||||
|
|
||||||
metrics := append(
|
metrics := []prometheus.Collector{
|
||||||
[]prometheus.Collector{
|
|
||||||
m.FailedConfigs,
|
m.FailedConfigs,
|
||||||
m.DiscoveredTargets,
|
m.DiscoveredTargets,
|
||||||
m.ReceivedUpdates,
|
m.ReceivedUpdates,
|
||||||
m.DelayedUpdates,
|
m.DelayedUpdates,
|
||||||
m.SentUpdates,
|
m.SentUpdates,
|
||||||
},
|
}
|
||||||
clientGoMetrics()...,
|
|
||||||
)
|
|
||||||
|
|
||||||
for _, collector := range metrics {
|
for _, collector := range metrics {
|
||||||
err := registerer.Register(collector)
|
err := registerer.Register(collector)
|
||||||
|
|
|
@ -15,6 +15,7 @@ package discovery
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"fmt"
|
||||||
"net/url"
|
"net/url"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
@ -133,6 +134,16 @@ func clientGoMetrics() []prometheus.Collector {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func RegisterK8sClientMetricsWithPrometheus(registerer prometheus.Registerer) error {
|
||||||
|
for _, collector := range clientGoMetrics() {
|
||||||
|
err := registerer.Register(collector)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to register Kubernetes Go Client metrics: %w", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
func (f *clientGoRequestMetricAdapter) RegisterWithK8sGoClient() {
|
func (f *clientGoRequestMetricAdapter) RegisterWithK8sGoClient() {
|
||||||
metrics.Register(
|
metrics.Register(
|
||||||
metrics.RegisterOpts{
|
metrics.RegisterOpts{
|
||||||
|
|
|
@ -28,10 +28,8 @@ import (
|
||||||
"github.com/alecthomas/kingpin/v2"
|
"github.com/alecthomas/kingpin/v2"
|
||||||
"github.com/go-kit/log"
|
"github.com/go-kit/log"
|
||||||
"github.com/go-kit/log/level"
|
"github.com/go-kit/log/level"
|
||||||
"github.com/prometheus/client_golang/prometheus"
|
|
||||||
"github.com/prometheus/common/model"
|
"github.com/prometheus/common/model"
|
||||||
|
|
||||||
prom_discovery "github.com/prometheus/prometheus/discovery"
|
|
||||||
"github.com/prometheus/prometheus/discovery/targetgroup"
|
"github.com/prometheus/prometheus/discovery/targetgroup"
|
||||||
"github.com/prometheus/prometheus/documentation/examples/custom-sd/adapter"
|
"github.com/prometheus/prometheus/documentation/examples/custom-sd/adapter"
|
||||||
"github.com/prometheus/prometheus/util/strutil"
|
"github.com/prometheus/prometheus/util/strutil"
|
||||||
|
@ -271,12 +269,11 @@ func main() {
|
||||||
fmt.Println("err: ", err)
|
fmt.Println("err: ", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
discoveryMetrics, err := prom_discovery.NewMetrics(prometheus.DefaultRegisterer)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
level.Error(logger).Log("msg", "failed to create discovery metrics", "err", err)
|
level.Error(logger).Log("msg", "failed to create discovery metrics", "err", err)
|
||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
}
|
}
|
||||||
sdAdapter := adapter.NewAdapter(ctx, *outputFile, "exampleSD", disc, logger, discoveryMetrics)
|
sdAdapter := adapter.NewAdapter(ctx, *outputFile, "exampleSD", disc, logger)
|
||||||
sdAdapter.Run()
|
sdAdapter.Run()
|
||||||
|
|
||||||
<-ctx.Done()
|
<-ctx.Done()
|
||||||
|
|
|
@ -163,12 +163,12 @@ func (a *Adapter) Run() {
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewAdapter creates a new instance of Adapter.
|
// NewAdapter creates a new instance of Adapter.
|
||||||
func NewAdapter(ctx context.Context, file, name string, d discovery.Discoverer, logger log.Logger, metrics *discovery.Metrics) *Adapter {
|
func NewAdapter(ctx context.Context, file, name string, d discovery.Discoverer, logger log.Logger) *Adapter {
|
||||||
return &Adapter{
|
return &Adapter{
|
||||||
ctx: ctx,
|
ctx: ctx,
|
||||||
disc: d,
|
disc: d,
|
||||||
groups: make(map[string]*customSD),
|
groups: make(map[string]*customSD),
|
||||||
manager: discovery.NewManager(ctx, logger, prometheus.NewRegistry(), metrics),
|
manager: discovery.NewManager(ctx, logger, prometheus.NewRegistry()),
|
||||||
output: file,
|
output: file,
|
||||||
name: name,
|
name: name,
|
||||||
logger: logger,
|
logger: logger,
|
||||||
|
|
|
@ -18,11 +18,9 @@ import (
|
||||||
"os"
|
"os"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/prometheus/client_golang/prometheus"
|
|
||||||
"github.com/prometheus/common/model"
|
"github.com/prometheus/common/model"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
|
|
||||||
"github.com/prometheus/prometheus/discovery"
|
|
||||||
"github.com/prometheus/prometheus/discovery/targetgroup"
|
"github.com/prometheus/prometheus/discovery/targetgroup"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -228,8 +226,7 @@ func TestWriteOutput(t *testing.T) {
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
defer os.Remove(tmpfile.Name())
|
defer os.Remove(tmpfile.Name())
|
||||||
tmpfile.Close()
|
tmpfile.Close()
|
||||||
metrics, err := discovery.NewMetrics(prometheus.NewRegistry())
|
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
adapter := NewAdapter(ctx, tmpfile.Name(), "test_sd", nil, nil, metrics)
|
adapter := NewAdapter(ctx, tmpfile.Name(), "test_sd", nil, nil)
|
||||||
require.NoError(t, adapter.writeOutput())
|
require.NoError(t, adapter.writeOutput())
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in a new issue