Merge pull request #13897 from dashpole/unregister_scrape_metrics

This commit is contained in:
Arthur Silva Sens 2024-04-05 14:44:32 -03:00 committed by GitHub
commit b4a973753c
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
3 changed files with 46 additions and 1 deletions

View file

@ -129,6 +129,11 @@ func (m *Manager) Run(tsets <-chan map[string][]*targetgroup.Group) error {
}
}
// UnregisterMetrics unregisters manager metrics.
func (m *Manager) UnregisterMetrics() {
m.metrics.Unregister()
}
func (m *Manager) reloader() {
reloadIntervalDuration := m.opts.DiscoveryReloadInterval
if reloadIntervalDuration < model.Duration(5*time.Second) {

View file

@ -857,3 +857,16 @@ func getResultFloats(app *collectResultAppender, expectedMetricName string) (res
}
return result
}
func TestUnregisterMetrics(t *testing.T) {
reg := prometheus.NewRegistry()
// Check that all metrics can be unregistered, allowing a second manager to be created.
for i := 0; i < 2; i++ {
opts := Options{}
manager, err := NewManager(&opts, nil, nil, reg)
require.NotNil(t, manager)
require.NoError(t, err)
// Unregister all metrics.
manager.UnregisterMetrics()
}
}

View file

@ -20,6 +20,7 @@ import (
)
type scrapeMetrics struct {
reg prometheus.Registerer
// Used by Manager.
targetMetadataCache *MetadataMetricsCollector
targetScrapePools prometheus.Counter
@ -54,7 +55,7 @@ type scrapeMetrics struct {
}
func newScrapeMetrics(reg prometheus.Registerer) (*scrapeMetrics, error) {
sm := &scrapeMetrics{}
sm := &scrapeMetrics{reg: reg}
// Manager metrics.
sm.targetMetadataCache = &MetadataMetricsCollector{
@ -260,6 +261,32 @@ func (sm *scrapeMetrics) setTargetMetadataCacheGatherer(gatherer TargetsGatherer
sm.targetMetadataCache.TargetsGatherer = gatherer
}
// Unregister unregisters all metrics.
func (sm *scrapeMetrics) Unregister() {
sm.reg.Unregister(sm.targetMetadataCache)
sm.reg.Unregister(sm.targetScrapePools)
sm.reg.Unregister(sm.targetScrapePoolsFailed)
sm.reg.Unregister(sm.targetReloadIntervalLength)
sm.reg.Unregister(sm.targetScrapePoolReloads)
sm.reg.Unregister(sm.targetScrapePoolReloadsFailed)
sm.reg.Unregister(sm.targetSyncIntervalLength)
sm.reg.Unregister(sm.targetScrapePoolSyncsCounter)
sm.reg.Unregister(sm.targetScrapePoolExceededTargetLimit)
sm.reg.Unregister(sm.targetScrapePoolTargetLimit)
sm.reg.Unregister(sm.targetScrapePoolTargetsAdded)
sm.reg.Unregister(sm.targetSyncFailed)
sm.reg.Unregister(sm.targetScrapeExceededBodySizeLimit)
sm.reg.Unregister(sm.targetScrapeCacheFlushForced)
sm.reg.Unregister(sm.targetIntervalLength)
sm.reg.Unregister(sm.targetScrapeSampleLimit)
sm.reg.Unregister(sm.targetScrapeSampleDuplicate)
sm.reg.Unregister(sm.targetScrapeSampleOutOfOrder)
sm.reg.Unregister(sm.targetScrapeSampleOutOfBounds)
sm.reg.Unregister(sm.targetScrapeExemplarOutOfOrder)
sm.reg.Unregister(sm.targetScrapePoolExceededLabelLimits)
sm.reg.Unregister(sm.targetScrapeNativeHistogramBucketLimit)
}
type TargetsGatherer interface {
TargetsActive() map[string][]*Target
}