Merge pull request #383 from prometheus/beorn7/client_golang

Remove `prometheus_` prefix from metrics
This commit is contained in:
Frederic Branczyk 2018-09-17 15:16:04 +02:00 committed by GitHub
commit 1b651ea7d4
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
5 changed files with 33 additions and 33 deletions

View file

@ -84,30 +84,30 @@ func newCompactorMetrics(r prometheus.Registerer) *compactorMetrics {
m := &compactorMetrics{} m := &compactorMetrics{}
m.ran = prometheus.NewCounter(prometheus.CounterOpts{ m.ran = prometheus.NewCounter(prometheus.CounterOpts{
Name: "prometheus_tsdb_compactions_total", Name: "tsdb_compactions_total",
Help: "Total number of compactions that were executed for the partition.", Help: "Total number of compactions that were executed for the partition.",
}) })
m.failed = prometheus.NewCounter(prometheus.CounterOpts{ m.failed = prometheus.NewCounter(prometheus.CounterOpts{
Name: "prometheus_tsdb_compactions_failed_total", Name: "tsdb_compactions_failed_total",
Help: "Total number of compactions that failed for the partition.", Help: "Total number of compactions that failed for the partition.",
}) })
m.duration = prometheus.NewHistogram(prometheus.HistogramOpts{ m.duration = prometheus.NewHistogram(prometheus.HistogramOpts{
Name: "prometheus_tsdb_compaction_duration_seconds", Name: "tsdb_compaction_duration_seconds",
Help: "Duration of compaction runs", Help: "Duration of compaction runs",
Buckets: prometheus.ExponentialBuckets(1, 2, 10), Buckets: prometheus.ExponentialBuckets(1, 2, 10),
}) })
m.chunkSize = prometheus.NewHistogram(prometheus.HistogramOpts{ m.chunkSize = prometheus.NewHistogram(prometheus.HistogramOpts{
Name: "prometheus_tsdb_compaction_chunk_size_bytes", Name: "tsdb_compaction_chunk_size_bytes",
Help: "Final size of chunks on their first compaction", Help: "Final size of chunks on their first compaction",
Buckets: prometheus.ExponentialBuckets(32, 1.5, 12), Buckets: prometheus.ExponentialBuckets(32, 1.5, 12),
}) })
m.chunkSamples = prometheus.NewHistogram(prometheus.HistogramOpts{ m.chunkSamples = prometheus.NewHistogram(prometheus.HistogramOpts{
Name: "prometheus_tsdb_compaction_chunk_samples", Name: "tsdb_compaction_chunk_samples",
Help: "Final number of samples on their first compaction", Help: "Final number of samples on their first compaction",
Buckets: prometheus.ExponentialBuckets(4, 1.5, 12), Buckets: prometheus.ExponentialBuckets(4, 1.5, 12),
}) })
m.chunkRange = prometheus.NewHistogram(prometheus.HistogramOpts{ m.chunkRange = prometheus.NewHistogram(prometheus.HistogramOpts{
Name: "prometheus_tsdb_compaction_chunk_range_seconds", Name: "tsdb_compaction_chunk_range_seconds",
Help: "Final time range of chunks on their first compaction", Help: "Final time range of chunks on their first compaction",
Buckets: prometheus.ExponentialBuckets(100, 4, 10), Buckets: prometheus.ExponentialBuckets(100, 4, 10),
}) })

18
db.go
View file

@ -133,7 +133,7 @@ func newDBMetrics(db *DB, r prometheus.Registerer) *dbMetrics {
m := &dbMetrics{} m := &dbMetrics{}
m.loadedBlocks = prometheus.NewGaugeFunc(prometheus.GaugeOpts{ m.loadedBlocks = prometheus.NewGaugeFunc(prometheus.GaugeOpts{
Name: "prometheus_tsdb_blocks_loaded", Name: "tsdb_blocks_loaded",
Help: "Number of currently loaded data blocks", Help: "Number of currently loaded data blocks",
}, func() float64 { }, func() float64 {
db.mtx.RLock() db.mtx.RLock()
@ -141,7 +141,7 @@ func newDBMetrics(db *DB, r prometheus.Registerer) *dbMetrics {
return float64(len(db.blocks)) return float64(len(db.blocks))
}) })
m.symbolTableSize = prometheus.NewGaugeFunc(prometheus.GaugeOpts{ m.symbolTableSize = prometheus.NewGaugeFunc(prometheus.GaugeOpts{
Name: "prometheus_tsdb_symbol_table_size_bytes", Name: "tsdb_symbol_table_size_bytes",
Help: "Size of symbol table on disk (in bytes)", Help: "Size of symbol table on disk (in bytes)",
}, func() float64 { }, func() float64 {
db.mtx.RLock() db.mtx.RLock()
@ -154,27 +154,27 @@ func newDBMetrics(db *DB, r prometheus.Registerer) *dbMetrics {
return float64(symTblSize) return float64(symTblSize)
}) })
m.reloads = prometheus.NewCounter(prometheus.CounterOpts{ m.reloads = prometheus.NewCounter(prometheus.CounterOpts{
Name: "prometheus_tsdb_reloads_total", Name: "tsdb_reloads_total",
Help: "Number of times the database reloaded block data from disk.", Help: "Number of times the database reloaded block data from disk.",
}) })
m.reloadsFailed = prometheus.NewCounter(prometheus.CounterOpts{ m.reloadsFailed = prometheus.NewCounter(prometheus.CounterOpts{
Name: "prometheus_tsdb_reloads_failures_total", Name: "tsdb_reloads_failures_total",
Help: "Number of times the database failed to reload block data from disk.", Help: "Number of times the database failed to reload block data from disk.",
}) })
m.compactionsTriggered = prometheus.NewCounter(prometheus.CounterOpts{ m.compactionsTriggered = prometheus.NewCounter(prometheus.CounterOpts{
Name: "prometheus_tsdb_compactions_triggered_total", Name: "tsdb_compactions_triggered_total",
Help: "Total number of triggered compactions for the partition.", Help: "Total number of triggered compactions for the partition.",
}) })
m.cutoffs = prometheus.NewCounter(prometheus.CounterOpts{ m.cutoffs = prometheus.NewCounter(prometheus.CounterOpts{
Name: "prometheus_tsdb_retention_cutoffs_total", Name: "tsdb_retention_cutoffs_total",
Help: "Number of times the database cut off block data from disk.", Help: "Number of times the database cut off block data from disk.",
}) })
m.cutoffsFailed = prometheus.NewCounter(prometheus.CounterOpts{ m.cutoffsFailed = prometheus.NewCounter(prometheus.CounterOpts{
Name: "prometheus_tsdb_retention_cutoffs_failures_total", Name: "tsdb_retention_cutoffs_failures_total",
Help: "Number of times the database failed to cut off block data from disk.", Help: "Number of times the database failed to cut off block data from disk.",
}) })
m.startTime = prometheus.NewGaugeFunc(prometheus.GaugeOpts{ m.startTime = prometheus.NewGaugeFunc(prometheus.GaugeOpts{
Name: "prometheus_tsdb_lowest_timestamp", Name: "tsdb_lowest_timestamp",
Help: "Lowest timestamp value stored in the database.", Help: "Lowest timestamp value stored in the database.",
}, func() float64 { }, func() float64 {
db.mtx.RLock() db.mtx.RLock()
@ -185,7 +185,7 @@ func newDBMetrics(db *DB, r prometheus.Registerer) *dbMetrics {
return float64(db.blocks[0].meta.MinTime) return float64(db.blocks[0].meta.MinTime)
}) })
m.tombCleanTimer = prometheus.NewHistogram(prometheus.HistogramOpts{ m.tombCleanTimer = prometheus.NewHistogram(prometheus.HistogramOpts{
Name: "prometheus_tsdb_tombstone_cleanup_seconds", Name: "tsdb_tombstone_cleanup_seconds",
Help: "The time taken to recompact blocks to remove tombstones.", Help: "The time taken to recompact blocks to remove tombstones.",
}) })

26
head.go
View file

@ -95,59 +95,59 @@ func newHeadMetrics(h *Head, r prometheus.Registerer) *headMetrics {
m := &headMetrics{} m := &headMetrics{}
m.activeAppenders = prometheus.NewGauge(prometheus.GaugeOpts{ m.activeAppenders = prometheus.NewGauge(prometheus.GaugeOpts{
Name: "prometheus_tsdb_head_active_appenders", Name: "tsdb_head_active_appenders",
Help: "Number of currently active appender transactions", Help: "Number of currently active appender transactions",
}) })
m.series = prometheus.NewGauge(prometheus.GaugeOpts{ m.series = prometheus.NewGauge(prometheus.GaugeOpts{
Name: "prometheus_tsdb_head_series", Name: "tsdb_head_series",
Help: "Total number of series in the head block.", Help: "Total number of series in the head block.",
}) })
m.seriesCreated = prometheus.NewCounter(prometheus.CounterOpts{ m.seriesCreated = prometheus.NewCounter(prometheus.CounterOpts{
Name: "prometheus_tsdb_head_series_created_total", Name: "tsdb_head_series_created_total",
Help: "Total number of series created in the head", Help: "Total number of series created in the head",
}) })
m.seriesRemoved = prometheus.NewCounter(prometheus.CounterOpts{ m.seriesRemoved = prometheus.NewCounter(prometheus.CounterOpts{
Name: "prometheus_tsdb_head_series_removed_total", Name: "tsdb_head_series_removed_total",
Help: "Total number of series removed in the head", Help: "Total number of series removed in the head",
}) })
m.seriesNotFound = prometheus.NewCounter(prometheus.CounterOpts{ m.seriesNotFound = prometheus.NewCounter(prometheus.CounterOpts{
Name: "prometheus_tsdb_head_series_not_found_total", Name: "tsdb_head_series_not_found_total",
Help: "Total number of requests for series that were not found.", Help: "Total number of requests for series that were not found.",
}) })
m.chunks = prometheus.NewGauge(prometheus.GaugeOpts{ m.chunks = prometheus.NewGauge(prometheus.GaugeOpts{
Name: "prometheus_tsdb_head_chunks", Name: "tsdb_head_chunks",
Help: "Total number of chunks in the head block.", Help: "Total number of chunks in the head block.",
}) })
m.chunksCreated = prometheus.NewCounter(prometheus.CounterOpts{ m.chunksCreated = prometheus.NewCounter(prometheus.CounterOpts{
Name: "prometheus_tsdb_head_chunks_created_total", Name: "tsdb_head_chunks_created_total",
Help: "Total number of chunks created in the head", Help: "Total number of chunks created in the head",
}) })
m.chunksRemoved = prometheus.NewCounter(prometheus.CounterOpts{ m.chunksRemoved = prometheus.NewCounter(prometheus.CounterOpts{
Name: "prometheus_tsdb_head_chunks_removed_total", Name: "tsdb_head_chunks_removed_total",
Help: "Total number of chunks removed in the head", Help: "Total number of chunks removed in the head",
}) })
m.gcDuration = prometheus.NewSummary(prometheus.SummaryOpts{ m.gcDuration = prometheus.NewSummary(prometheus.SummaryOpts{
Name: "prometheus_tsdb_head_gc_duration_seconds", Name: "tsdb_head_gc_duration_seconds",
Help: "Runtime of garbage collection in the head block.", Help: "Runtime of garbage collection in the head block.",
}) })
m.maxTime = prometheus.NewGaugeFunc(prometheus.GaugeOpts{ m.maxTime = prometheus.NewGaugeFunc(prometheus.GaugeOpts{
Name: "prometheus_tsdb_head_max_time", Name: "tsdb_head_max_time",
Help: "Maximum timestamp of the head block.", Help: "Maximum timestamp of the head block.",
}, func() float64 { }, func() float64 {
return float64(h.MaxTime()) return float64(h.MaxTime())
}) })
m.minTime = prometheus.NewGaugeFunc(prometheus.GaugeOpts{ m.minTime = prometheus.NewGaugeFunc(prometheus.GaugeOpts{
Name: "prometheus_tsdb_head_min_time", Name: "tsdb_head_min_time",
Help: "Minimum time bound of the head block.", Help: "Minimum time bound of the head block.",
}, func() float64 { }, func() float64 {
return float64(h.MinTime()) return float64(h.MinTime())
}) })
m.walTruncateDuration = prometheus.NewSummary(prometheus.SummaryOpts{ m.walTruncateDuration = prometheus.NewSummary(prometheus.SummaryOpts{
Name: "prometheus_tsdb_wal_truncate_duration_seconds", Name: "tsdb_wal_truncate_duration_seconds",
Help: "Duration of WAL truncation.", Help: "Duration of WAL truncation.",
}) })
m.samplesAppended = prometheus.NewCounter(prometheus.CounterOpts{ m.samplesAppended = prometheus.NewCounter(prometheus.CounterOpts{
Name: "prometheus_tsdb_head_samples_appended_total", Name: "tsdb_head_samples_appended_total",
Help: "Total number of appended samples.", Help: "Total number of appended samples.",
}) })

4
wal.go
View file

@ -64,11 +64,11 @@ func newWalMetrics(wal *SegmentWAL, r prometheus.Registerer) *walMetrics {
m := &walMetrics{} m := &walMetrics{}
m.fsyncDuration = prometheus.NewSummary(prometheus.SummaryOpts{ m.fsyncDuration = prometheus.NewSummary(prometheus.SummaryOpts{
Name: "prometheus_tsdb_wal_fsync_duration_seconds", Name: "tsdb_wal_fsync_duration_seconds",
Help: "Duration of WAL fsync.", Help: "Duration of WAL fsync.",
}) })
m.corruptions = prometheus.NewCounter(prometheus.CounterOpts{ m.corruptions = prometheus.NewCounter(prometheus.CounterOpts{
Name: "prometheus_tsdb_wal_corruptions_total", Name: "tsdb_wal_corruptions_total",
Help: "Total number of WAL corruptions.", Help: "Total number of WAL corruptions.",
}) })

View file

@ -190,15 +190,15 @@ func NewSize(logger log.Logger, reg prometheus.Registerer, dir string, segmentSi
stopc: make(chan chan struct{}), stopc: make(chan chan struct{}),
} }
w.fsyncDuration = prometheus.NewSummary(prometheus.SummaryOpts{ w.fsyncDuration = prometheus.NewSummary(prometheus.SummaryOpts{
Name: "prometheus_tsdb_wal_fsync_duration_seconds", Name: "tsdb_wal_fsync_duration_seconds",
Help: "Duration of WAL fsync.", Help: "Duration of WAL fsync.",
}) })
w.pageFlushes = prometheus.NewCounter(prometheus.CounterOpts{ w.pageFlushes = prometheus.NewCounter(prometheus.CounterOpts{
Name: "prometheus_tsdb_wal_page_flushes_total", Name: "tsdb_wal_page_flushes_total",
Help: "Total number of page flushes.", Help: "Total number of page flushes.",
}) })
w.pageCompletions = prometheus.NewCounter(prometheus.CounterOpts{ w.pageCompletions = prometheus.NewCounter(prometheus.CounterOpts{
Name: "prometheus_tsdb_wal_completed_pages_total", Name: "tsdb_wal_completed_pages_total",
Help: "Total number of completed pages.", Help: "Total number of completed pages.",
}) })
if reg != nil { if reg != nil {