scrape: Add metrics to track bytes and entries in the metadata cache (#6675)

Signed-off-by: gotjosh <josue@grafana.com>
This commit is contained in:
gotjosh 2020-01-29 11:13:18 +00:00 committed by GitHub
parent 9adad8ad30
commit 8b49c9285d
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
4 changed files with 138 additions and 14 deletions

View file

@ -27,12 +27,79 @@ import (
"github.com/go-kit/kit/log/level" "github.com/go-kit/kit/log/level"
"github.com/pkg/errors" "github.com/pkg/errors"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/prometheus/config" "github.com/prometheus/prometheus/config"
"github.com/prometheus/prometheus/discovery/targetgroup" "github.com/prometheus/prometheus/discovery/targetgroup"
"github.com/prometheus/prometheus/pkg/labels" "github.com/prometheus/prometheus/pkg/labels"
"github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/storage"
) )
var targetMetadataCache = newMetadataMetricsCollector()
// MetadataMetricsCollector is a Custom Collector for the metadata cache metrics.
type MetadataMetricsCollector struct {
CacheEntries *prometheus.Desc
CacheBytes *prometheus.Desc
scrapeManager *Manager
}
func newMetadataMetricsCollector() *MetadataMetricsCollector {
return &MetadataMetricsCollector{
CacheEntries: prometheus.NewDesc(
"prometheus_target_metadata_cache_entries",
"Total number of metric metadata entries in the cache",
[]string{"scrape_job"},
nil,
),
CacheBytes: prometheus.NewDesc(
"prometheus_target_metadata_cache_bytes",
"The number of bytes that are currently used for storing metric metadata in the cache",
[]string{"scrape_job"},
nil,
),
}
}
func (mc *MetadataMetricsCollector) registerManager(m *Manager) {
mc.scrapeManager = m
}
// Describe sends the metrics descriptions to the channel.
func (mc *MetadataMetricsCollector) Describe(ch chan<- *prometheus.Desc) {
ch <- mc.CacheEntries
ch <- mc.CacheBytes
}
// Collect creates and sends the metrics for the metadata cache.
func (mc *MetadataMetricsCollector) Collect(ch chan<- prometheus.Metric) {
if mc.scrapeManager == nil {
return
}
for tset, targets := range mc.scrapeManager.TargetsActive() {
var size, length int
for _, t := range targets {
size += t.MetadataSize()
length += t.MetadataLength()
}
ch <- prometheus.MustNewConstMetric(
mc.CacheEntries,
prometheus.GaugeValue,
float64(length),
tset,
)
ch <- prometheus.MustNewConstMetric(
mc.CacheBytes,
prometheus.GaugeValue,
float64(size),
tset,
)
}
}
// Appendable returns an Appender. // Appendable returns an Appender.
type Appendable interface { type Appendable interface {
Appender() (storage.Appender, error) Appender() (storage.Appender, error)
@ -43,7 +110,7 @@ func NewManager(logger log.Logger, app Appendable) *Manager {
if logger == nil { if logger == nil {
logger = log.NewNopLogger() logger = log.NewNopLogger()
} }
return &Manager{ m := &Manager{
append: app, append: app,
logger: logger, logger: logger,
scrapeConfigs: make(map[string]*config.ScrapeConfig), scrapeConfigs: make(map[string]*config.ScrapeConfig),
@ -51,6 +118,9 @@ func NewManager(logger log.Logger, app Appendable) *Manager {
graceShut: make(chan struct{}), graceShut: make(chan struct{}),
triggerReload: make(chan struct{}, 1), triggerReload: make(chan struct{}, 1),
} }
targetMetadataCache.registerManager(m)
return m
} }
// Manager maintains a set of scrape pools and manages start/stop cycles // Manager maintains a set of scrape pools and manages start/stop cycles

View file

@ -136,19 +136,22 @@ var (
) )
func init() { func init() {
prometheus.MustRegister(targetIntervalLength) prometheus.MustRegister(
prometheus.MustRegister(targetReloadIntervalLength) targetIntervalLength,
prometheus.MustRegister(targetScrapePools) targetReloadIntervalLength,
prometheus.MustRegister(targetScrapePoolsFailed) targetScrapePools,
prometheus.MustRegister(targetScrapePoolReloads) targetScrapePoolsFailed,
prometheus.MustRegister(targetScrapePoolReloadsFailed) targetScrapePoolReloads,
prometheus.MustRegister(targetSyncIntervalLength) targetScrapePoolReloadsFailed,
prometheus.MustRegister(targetScrapePoolSyncsCounter) targetSyncIntervalLength,
prometheus.MustRegister(targetScrapeSampleLimit) targetScrapePoolSyncsCounter,
prometheus.MustRegister(targetScrapeSampleDuplicate) targetScrapeSampleLimit,
prometheus.MustRegister(targetScrapeSampleOutOfOrder) targetScrapeSampleDuplicate,
prometheus.MustRegister(targetScrapeSampleOutOfBounds) targetScrapeSampleOutOfOrder,
prometheus.MustRegister(targetScrapeCacheFlushForced) targetScrapeSampleOutOfBounds,
targetScrapeCacheFlushForced,
targetMetadataCache,
)
} }
// scrapePool manages scrapes for sets of targets. // scrapePool manages scrapes for sets of targets.
@ -658,6 +661,11 @@ type metaEntry struct {
unit string unit string
} }
func (m *metaEntry) size() int {
// The attribute lastIter although part of the struct it is not metadata.
return len(m.help) + len(m.unit) + len(m.typ)
}
func newScrapeCache() *scrapeCache { func newScrapeCache() *scrapeCache {
return &scrapeCache{ return &scrapeCache{
series: map[string]*cacheEntry{}, series: map[string]*cacheEntry{},
@ -842,6 +850,25 @@ func (c *scrapeCache) ListMetadata() []MetricMetadata {
return res return res
} }
// MetadataSize returns the size of the metadata cache.
func (c *scrapeCache) SizeMetadata() (s int) {
c.metaMtx.Lock()
defer c.metaMtx.Unlock()
for _, e := range c.metadata {
s += e.size()
}
return s
}
// MetadataLen returns the number of metadata entries in the cache.
func (c *scrapeCache) LengthMetadata() int {
c.metaMtx.Lock()
defer c.metaMtx.Unlock()
return len(c.metadata)
}
func newScrapeLoop(ctx context.Context, func newScrapeLoop(ctx context.Context,
sc scraper, sc scraper,
l log.Logger, l log.Logger,

View file

@ -78,6 +78,8 @@ func (t *Target) String() string {
type MetricMetadataStore interface { type MetricMetadataStore interface {
ListMetadata() []MetricMetadata ListMetadata() []MetricMetadata
GetMetadata(metric string) (MetricMetadata, bool) GetMetadata(metric string) (MetricMetadata, bool)
SizeMetadata() int
LengthMetadata() int
} }
// MetricMetadata is a piece of metadata for a metric. // MetricMetadata is a piece of metadata for a metric.
@ -98,6 +100,28 @@ func (t *Target) MetadataList() []MetricMetadata {
return t.metadata.ListMetadata() return t.metadata.ListMetadata()
} }
func (t *Target) MetadataSize() int {
t.mtx.RLock()
defer t.mtx.RUnlock()
if t.metadata == nil {
return 0
}
return t.metadata.SizeMetadata()
}
func (t *Target) MetadataLength() int {
t.mtx.RLock()
defer t.mtx.RUnlock()
if t.metadata == nil {
return 0
}
return t.metadata.LengthMetadata()
}
// Metadata returns type and help metadata for the given metric. // Metadata returns type and help metadata for the given metric.
func (t *Target) Metadata(metric string) (MetricMetadata, bool) { func (t *Target) Metadata(metric string) (MetricMetadata, bool) {
t.mtx.RLock() t.mtx.RLock()

View file

@ -77,6 +77,9 @@ func (s *testMetaStore) GetMetadata(metric string) (scrape.MetricMetadata, bool)
return scrape.MetricMetadata{}, false return scrape.MetricMetadata{}, false
} }
func (s *testMetaStore) SizeMetadata() int { return 0 }
func (s *testMetaStore) LengthMetadata() int { return 0 }
// testTargetRetriever represents a list of targets to scrape. // testTargetRetriever represents a list of targets to scrape.
// It is used to represent targets as part of test cases. // It is used to represent targets as part of test cases.
type testTargetRetriever struct { type testTargetRetriever struct {