2013-02-07 02:38:01 -08:00
// Copyright 2013 Prometheus Team
2012-12-09 07:27:12 -08:00
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
2013-02-08 09:03:26 -08:00
package metric
2012-12-09 07:27:12 -08:00
import (
2013-02-08 09:03:26 -08:00
"flag"
2013-04-28 05:47:43 -07:00
"fmt"
2013-02-08 09:03:26 -08:00
"sort"
2013-03-04 11:43:07 -08:00
"sync"
2012-12-12 03:53:34 -08:00
"time"
2012-12-09 07:27:12 -08:00
2013-06-08 01:27:44 -07:00
"code.google.com/p/goprotobuf/proto"
2013-08-12 08:18:02 -07:00
"github.com/golang/glog"
2013-06-08 01:27:44 -07:00
2013-06-25 05:02:27 -07:00
clientmodel "github.com/prometheus/client_golang/model"
2013-06-08 01:27:44 -07:00
"github.com/prometheus/prometheus/storage"
2013-08-05 08:31:49 -07:00
"github.com/prometheus/prometheus/storage/raw"
2013-06-08 01:27:44 -07:00
"github.com/prometheus/prometheus/storage/raw/leveldb"
"github.com/prometheus/prometheus/utility"
2013-08-12 08:18:02 -07:00
dto "github.com/prometheus/prometheus/model/generated"
2013-02-08 09:03:26 -08:00
)
2013-06-08 01:27:44 -07:00
const sortConcurrency = 2
2014-02-14 10:36:27 -08:00
// LevelDBMetricPersistence is a leveldb-backed persistence layer for metrics.
2013-02-08 09:03:26 -08:00
type LevelDBMetricPersistence struct {
2013-08-06 03:00:31 -07:00
CurationRemarks CurationRemarker
2013-08-12 15:36:12 -07:00
FingerprintToMetrics FingerprintMetricIndex
2014-02-14 10:36:27 -08:00
LabelPairToFingerprints LabelPairFingerprintIndex
2013-08-05 00:25:47 -07:00
MetricHighWatermarks HighWatermarker
2013-08-12 15:36:12 -07:00
MetricMembershipIndex MetricMembershipIndex
2013-08-07 03:07:35 -07:00
Indexer MetricIndexer
MetricSamples * leveldb . LevelDBPersistence
// The remaining indices will be replaced with generalized interface resolvers:
//
// type FingerprintResolver interface {
// GetFingerprintForMetric(clientmodel.Metric) (*clientmodel.Fingerprint, bool, error)
// GetFingerprintsForLabelSet(LabelPair) (clientmodel.Fingerprints, bool, error)
// }
// type MetricResolver interface {
// GetMetricsForFingerprint(clientmodel.Fingerprints) (FingerprintMetricMapping, bool, error)
// }
2013-02-08 09:03:26 -08:00
}
var (
2013-05-21 07:11:35 -07:00
leveldbChunkSize = flag . Int ( "leveldbChunkSize" , 200 , "Maximum number of samples stored under one key." )
2014-02-14 10:36:27 -08:00
// These flag values are back of the envelope, though they seem
// sensible. Please re-evaluate based on your own needs.
2013-06-24 03:09:16 -07:00
curationRemarksCacheSize = flag . Int ( "curationRemarksCacheSize" , 5 * 1024 * 1024 , "The size for the curation remarks cache (bytes)." )
fingerprintsToLabelPairCacheSize = flag . Int ( "fingerprintsToLabelPairCacheSizeBytes" , 25 * 1024 * 1024 , "The size for the fingerprint to label pair index (bytes)." )
highWatermarkCacheSize = flag . Int ( "highWatermarksByFingerprintSizeBytes" , 5 * 1024 * 1024 , "The size for the metric high watermarks (bytes)." )
labelPairToFingerprintsCacheSize = flag . Int ( "labelPairToFingerprintsCacheSizeBytes" , 25 * 1024 * 1024 , "The size for the label pair to metric fingerprint index (bytes)." )
metricMembershipIndexCacheSize = flag . Int ( "metricMembershipCacheSizeBytes" , 5 * 1024 * 1024 , "The size for the metric membership index (bytes)." )
samplesByFingerprintCacheSize = flag . Int ( "samplesByFingerprintCacheSizeBytes" , 50 * 1024 * 1024 , "The size for the samples database (bytes)." )
2013-02-08 09:03:26 -08:00
)
type leveldbOpener func ( )
2014-02-14 10:36:27 -08:00
// Close closes all the underlying persistence layers. It implements the
// MetricPersistence interface.
2013-04-01 04:22:38 -07:00
func ( l * LevelDBMetricPersistence ) Close ( ) {
2014-02-14 10:36:27 -08:00
var persistences = [ ] raw . Database {
2013-05-02 03:49:13 -07:00
l . CurationRemarks ,
2013-08-12 15:36:12 -07:00
l . FingerprintToMetrics ,
2014-02-14 10:36:27 -08:00
l . LabelPairToFingerprints ,
2013-05-02 03:49:13 -07:00
l . MetricHighWatermarks ,
2013-08-12 15:36:12 -07:00
l . MetricMembershipIndex ,
2013-05-02 03:49:13 -07:00
l . MetricSamples ,
2013-02-08 09:03:26 -08:00
}
2013-04-01 04:22:38 -07:00
closerGroup := sync . WaitGroup { }
2013-02-08 09:03:26 -08:00
2013-08-03 09:46:02 -07:00
for _ , c := range persistences {
2013-04-01 04:22:38 -07:00
closerGroup . Add ( 1 )
2014-02-14 10:36:27 -08:00
go func ( c raw . Database ) {
2013-08-03 09:46:02 -07:00
if c != nil {
2014-02-14 10:36:27 -08:00
if err := c . Close ( ) ; err != nil {
glog . Error ( "Error closing persistence: " , err )
2013-08-03 09:46:02 -07:00
}
2013-04-24 02:51:40 -07:00
}
2013-04-01 04:22:38 -07:00
closerGroup . Done ( )
2013-08-03 09:46:02 -07:00
} ( c )
2013-02-08 09:03:26 -08:00
}
2013-04-01 04:22:38 -07:00
closerGroup . Wait ( )
2013-02-08 09:03:26 -08:00
}
2014-02-14 10:36:27 -08:00
// NewLevelDBMetricPersistence returns a LevelDBMetricPersistence object ready
// to use.
2013-04-28 05:47:43 -07:00
func NewLevelDBMetricPersistence ( baseDirectory string ) ( * LevelDBMetricPersistence , error ) {
2014-03-05 13:55:26 -08:00
workers := utility . NewUncertaintyGroup ( 6 )
2013-02-08 09:03:26 -08:00
2014-02-14 10:36:27 -08:00
emission := & LevelDBMetricPersistence { }
2013-02-08 09:03:26 -08:00
var subsystemOpeners = [ ] struct {
name string
opener leveldbOpener
} {
{
"Label Names and Value Pairs by Fingerprint" ,
func ( ) {
var err error
2014-02-27 05:34:46 -08:00
emission . FingerprintToMetrics , err = NewLevelDBFingerprintMetricIndex (
leveldb . LevelDBOptions {
2013-08-05 00:25:47 -07:00
Name : "Metrics by Fingerprint" ,
Purpose : "Index" ,
2013-08-03 09:46:02 -07:00
Path : baseDirectory + "/label_name_and_value_pairs_by_fingerprint" ,
CacheSizeBytes : * fingerprintsToLabelPairCacheSize ,
} ,
2014-02-27 05:34:46 -08:00
)
2013-04-28 05:47:43 -07:00
workers . MayFail ( err )
2013-02-08 09:03:26 -08:00
} ,
} ,
{
"Samples by Fingerprint" ,
func ( ) {
var err error
2013-08-10 08:02:28 -07:00
emission . MetricSamples , err = leveldb . NewLevelDBPersistence ( leveldb . LevelDBOptions {
2013-08-05 00:25:47 -07:00
Name : "Samples" ,
Purpose : "Timeseries" ,
2013-08-03 08:25:03 -07:00
Path : baseDirectory + "/samples_by_fingerprint" ,
CacheSizeBytes : * fingerprintsToLabelPairCacheSize ,
2013-08-10 08:02:28 -07:00
} )
2013-04-28 05:47:43 -07:00
workers . MayFail ( err )
2013-02-08 09:03:26 -08:00
} ,
} ,
2013-03-14 19:24:28 -07:00
{
"High Watermarks by Fingerprint" ,
func ( ) {
var err error
2014-02-27 05:34:46 -08:00
emission . MetricHighWatermarks , err = NewLevelDBHighWatermarker (
leveldb . LevelDBOptions {
2013-08-05 00:25:47 -07:00
Name : "High Watermarks" ,
Purpose : "The youngest sample in the database per metric." ,
Path : baseDirectory + "/high_watermarks_by_fingerprint" ,
CacheSizeBytes : * highWatermarkCacheSize ,
2014-02-27 05:34:46 -08:00
} ,
)
2013-04-28 05:47:43 -07:00
workers . MayFail ( err )
2013-03-14 19:24:28 -07:00
} ,
} ,
2013-02-08 09:03:26 -08:00
{
"Fingerprints by Label Name and Value Pair" ,
func ( ) {
var err error
2014-02-27 05:34:46 -08:00
emission . LabelPairToFingerprints , err = NewLevelDBLabelSetFingerprintIndex (
leveldb . LevelDBOptions {
2013-08-05 00:25:47 -07:00
Name : "Fingerprints by Label Pair" ,
Purpose : "Index" ,
2013-08-03 09:46:02 -07:00
Path : baseDirectory + "/fingerprints_by_label_name_and_value_pair" ,
CacheSizeBytes : * labelPairToFingerprintsCacheSize ,
} ,
2014-02-27 05:34:46 -08:00
)
2013-04-28 05:47:43 -07:00
workers . MayFail ( err )
2013-02-08 09:03:26 -08:00
} ,
} ,
{
"Metric Membership Index" ,
func ( ) {
var err error
2013-08-12 15:36:12 -07:00
emission . MetricMembershipIndex , err = NewLevelDBMetricMembershipIndex (
2014-02-27 05:34:46 -08:00
leveldb . LevelDBOptions {
Name : "Metric Membership" ,
Purpose : "Index" ,
Path : baseDirectory + "/metric_membership_index" ,
CacheSizeBytes : * metricMembershipIndexCacheSize ,
} ,
)
2013-04-28 05:47:43 -07:00
workers . MayFail ( err )
} ,
} ,
{
"Sample Curation Remarks" ,
func ( ) {
var err error
2014-02-27 05:34:46 -08:00
emission . CurationRemarks , err = NewLevelDBCurationRemarker (
leveldb . LevelDBOptions {
2013-08-06 03:00:31 -07:00
Name : "Sample Curation Remarks" ,
Purpose : "Ledger of Progress for Various Curators" ,
Path : baseDirectory + "/curation_remarks" ,
CacheSizeBytes : * curationRemarksCacheSize ,
} ,
2014-02-27 05:34:46 -08:00
)
2013-04-28 05:47:43 -07:00
workers . MayFail ( err )
2013-02-08 09:03:26 -08:00
} ,
} ,
}
for _ , subsystem := range subsystemOpeners {
opener := subsystem . opener
go opener ( )
}
2013-04-28 05:47:43 -07:00
if ! workers . Wait ( ) {
for _ , err := range workers . Errors ( ) {
2013-08-12 09:22:48 -07:00
glog . Error ( "Could not open storage: " , err )
2013-02-08 09:03:26 -08:00
}
2013-04-28 05:47:43 -07:00
2014-02-14 10:36:27 -08:00
return nil , fmt . Errorf ( "unable to open metric persistence" )
2013-02-08 09:03:26 -08:00
}
2013-08-07 03:07:35 -07:00
emission . Indexer = & TotalIndexer {
2013-08-12 15:36:12 -07:00
FingerprintToMetric : emission . FingerprintToMetrics ,
2014-02-14 10:36:27 -08:00
LabelPairToFingerprint : emission . LabelPairToFingerprints ,
2013-08-12 15:36:12 -07:00
MetricMembership : emission . MetricMembershipIndex ,
2013-08-07 03:07:35 -07:00
}
2013-04-28 05:47:43 -07:00
return emission , nil
2013-02-08 09:03:26 -08:00
}
2014-02-14 10:36:27 -08:00
// AppendSample implements the MetricPersistence interface.
2013-06-25 05:02:27 -07:00
func ( l * LevelDBMetricPersistence ) AppendSample ( sample * clientmodel . Sample ) ( err error ) {
2013-04-05 09:03:45 -07:00
defer func ( begin time . Time ) {
2013-03-11 14:21:25 -07:00
duration := time . Since ( begin )
2013-02-08 09:03:26 -08:00
2013-03-01 09:51:36 -08:00
recordOutcome ( duration , err , map [ string ] string { operation : appendSample , result : success } , map [ string ] string { operation : appendSample , result : failure } )
2013-04-05 09:03:45 -07:00
} ( time . Now ( ) )
2013-02-08 09:03:26 -08:00
2013-06-25 05:02:27 -07:00
err = l . AppendSamples ( clientmodel . Samples { sample } )
2013-02-08 09:03:26 -08:00
return
}
2013-03-14 15:42:28 -07:00
// groupByFingerprint collects all of the provided samples, groups them
// together by their respective metric fingerprint, and finally sorts
// them chronologically.
2013-06-25 05:02:27 -07:00
func groupByFingerprint ( samples clientmodel . Samples ) map [ clientmodel . Fingerprint ] clientmodel . Samples {
fingerprintToSamples := map [ clientmodel . Fingerprint ] clientmodel . Samples { }
2013-02-08 09:03:26 -08:00
for _ , sample := range samples {
2013-06-25 05:02:27 -07:00
fingerprint := & clientmodel . Fingerprint { }
fingerprint . LoadFromMetric ( sample . Metric )
samples := fingerprintToSamples [ * fingerprint ]
2013-02-08 09:03:26 -08:00
samples = append ( samples , sample )
2013-06-25 05:02:27 -07:00
fingerprintToSamples [ * fingerprint ] = samples
2013-02-08 09:03:26 -08:00
}
2013-05-21 07:11:35 -07:00
sortingSemaphore := make ( chan bool , sortConcurrency )
doneSorting := sync . WaitGroup { }
2013-03-14 15:42:28 -07:00
2013-02-08 09:03:26 -08:00
for _ , samples := range fingerprintToSamples {
2013-03-04 11:43:07 -08:00
doneSorting . Add ( 1 )
2013-03-14 15:42:28 -07:00
2013-06-25 05:02:27 -07:00
sortingSemaphore <- true
go func ( samples clientmodel . Samples ) {
2013-02-08 09:03:26 -08:00
sort . Sort ( samples )
2013-06-25 05:02:27 -07:00
<- sortingSemaphore
2013-03-04 11:43:07 -08:00
doneSorting . Done ( )
2013-02-08 09:03:26 -08:00
} ( samples )
}
2013-03-04 11:43:07 -08:00
doneSorting . Wait ( )
2013-02-08 09:03:26 -08:00
2013-03-14 15:42:28 -07:00
return fingerprintToSamples
}
2013-03-07 11:01:32 -08:00
2013-06-25 05:02:27 -07:00
func ( l * LevelDBMetricPersistence ) refreshHighWatermarks ( groups map [ clientmodel . Fingerprint ] clientmodel . Samples ) ( err error ) {
2013-04-05 09:03:45 -07:00
defer func ( begin time . Time ) {
2013-03-14 19:24:28 -07:00
duration := time . Since ( begin )
recordOutcome ( duration , err , map [ string ] string { operation : refreshHighWatermarks , result : success } , map [ string ] string { operation : refreshHighWatermarks , result : failure } )
2013-04-05 09:03:45 -07:00
} ( time . Now ( ) )
2013-03-14 19:24:28 -07:00
2013-08-05 00:25:47 -07:00
b := FingerprintHighWatermarkMapping { }
for fp , ss := range groups {
if len ( ss ) == 0 {
2013-06-08 01:27:44 -07:00
continue
}
2013-03-14 19:24:28 -07:00
2013-08-05 00:25:47 -07:00
b [ fp ] = ss [ len ( ss ) - 1 ] . Timestamp
2013-03-14 19:24:28 -07:00
}
2013-08-05 00:25:47 -07:00
return l . MetricHighWatermarks . UpdateBatch ( b )
2013-03-14 19:24:28 -07:00
}
2014-02-14 10:36:27 -08:00
// AppendSamples appends the given Samples to the database and indexes them.
2013-06-25 05:02:27 -07:00
func ( l * LevelDBMetricPersistence ) AppendSamples ( samples clientmodel . Samples ) ( err error ) {
2013-04-05 09:03:45 -07:00
defer func ( begin time . Time ) {
2013-03-14 15:42:28 -07:00
duration := time . Since ( begin )
recordOutcome ( duration , err , map [ string ] string { operation : appendSamples , result : success } , map [ string ] string { operation : appendSamples , result : failure } )
2013-04-05 09:03:45 -07:00
} ( time . Now ( ) )
2013-03-14 15:42:28 -07:00
2013-05-21 07:11:35 -07:00
fingerprintToSamples := groupByFingerprint ( samples )
indexErrChan := make ( chan error , 1 )
watermarkErrChan := make ( chan error , 1 )
2013-03-14 15:42:28 -07:00
2013-06-25 05:02:27 -07:00
go func ( groups map [ clientmodel . Fingerprint ] clientmodel . Samples ) {
2013-08-03 09:46:02 -07:00
metrics := FingerprintMetricMapping { }
2013-03-14 16:55:50 -07:00
for fingerprint , samples := range groups {
metrics [ fingerprint ] = samples [ 0 ] . Metric
}
2013-08-07 03:07:35 -07:00
indexErrChan <- l . Indexer . IndexMetrics ( metrics )
2013-03-14 15:42:28 -07:00
} ( fingerprintToSamples )
2013-06-25 05:02:27 -07:00
go func ( groups map [ clientmodel . Fingerprint ] clientmodel . Samples ) {
2013-03-14 19:24:28 -07:00
watermarkErrChan <- l . refreshHighWatermarks ( groups )
} ( fingerprintToSamples )
2013-03-14 18:09:19 -07:00
samplesBatch := leveldb . NewBatch ( )
defer samplesBatch . Close ( )
2013-03-14 15:42:28 -07:00
2014-02-14 10:36:27 -08:00
key := & SampleKey { }
keyDto := & dto . SampleKey { }
2014-02-26 14:47:25 -08:00
values := make ( Values , 0 , * leveldbChunkSize )
2013-09-05 01:14:34 -07:00
2013-03-14 18:09:19 -07:00
for fingerprint , group := range fingerprintToSamples {
for {
2014-02-26 14:47:25 -08:00
values := values [ : 0 ]
2013-03-14 18:09:19 -07:00
lengthOfGroup := len ( group )
2013-03-14 15:42:28 -07:00
2013-03-14 18:09:19 -07:00
if lengthOfGroup == 0 {
break
}
2013-03-14 15:42:28 -07:00
2013-03-14 18:09:19 -07:00
take := * leveldbChunkSize
if lengthOfGroup < take {
take = lengthOfGroup
}
2013-03-14 15:42:28 -07:00
2013-03-14 18:09:19 -07:00
chunk := group [ 0 : take ]
group = group [ take : lengthOfGroup ]
2013-03-14 15:42:28 -07:00
2013-09-05 01:14:34 -07:00
key . Fingerprint = & fingerprint
key . FirstTimestamp = chunk [ 0 ] . Timestamp
key . LastTimestamp = chunk [ take - 1 ] . Timestamp
key . SampleCount = uint32 ( take )
2013-03-14 15:42:28 -07:00
2014-02-26 14:47:25 -08:00
key . Dump ( keyDto )
2013-03-14 18:09:19 -07:00
for _ , sample := range chunk {
2014-02-26 14:47:25 -08:00
values = append ( values , & SamplePair {
Timestamp : sample . Timestamp ,
Value : sample . Value ,
2013-03-14 18:09:19 -07:00
} )
2013-03-14 15:42:28 -07:00
}
2014-02-26 14:47:25 -08:00
val := values . marshal ( )
samplesBatch . PutRaw ( keyDto , val )
2013-03-14 15:42:28 -07:00
}
2013-03-14 18:09:19 -07:00
}
2013-03-14 15:42:28 -07:00
2013-05-02 03:49:13 -07:00
err = l . MetricSamples . Commit ( samplesBatch )
2013-03-14 18:09:19 -07:00
if err != nil {
2013-04-05 09:03:45 -07:00
return
2013-03-14 18:09:19 -07:00
}
2013-03-14 15:42:28 -07:00
err = <- indexErrChan
if err != nil {
2013-04-05 09:03:45 -07:00
return
2013-03-14 15:42:28 -07:00
}
2013-02-08 09:03:26 -08:00
2013-03-14 19:24:28 -07:00
err = <- watermarkErrChan
if err != nil {
2013-04-05 09:03:45 -07:00
return
2013-03-14 19:24:28 -07:00
}
2013-02-08 09:03:26 -08:00
return
}
2013-06-25 05:02:27 -07:00
func extractSampleKey ( i leveldb . Iterator ) ( * SampleKey , error ) {
2013-04-21 10:16:15 -07:00
k := & dto . SampleKey { }
2013-08-29 06:15:22 -07:00
if err := i . Key ( k ) ; err != nil {
2013-06-25 05:02:27 -07:00
return nil , err
2013-03-06 18:16:20 -08:00
}
2013-06-25 05:02:27 -07:00
key := & SampleKey { }
key . Load ( k )
2012-12-25 04:50:36 -08:00
2013-06-25 05:02:27 -07:00
return key , nil
2012-12-25 04:50:36 -08:00
}
2013-08-03 09:46:02 -07:00
func ( l * LevelDBMetricPersistence ) hasIndexMetric ( m clientmodel . Metric ) ( value bool , err error ) {
2013-04-05 09:03:45 -07:00
defer func ( begin time . Time ) {
2013-03-11 14:21:25 -07:00
duration := time . Since ( begin )
2013-01-23 08:18:45 -08:00
2013-03-01 09:51:36 -08:00
recordOutcome ( duration , err , map [ string ] string { operation : hasIndexMetric , result : success } , map [ string ] string { operation : hasIndexMetric , result : failure } )
2013-04-05 09:03:45 -07:00
} ( time . Now ( ) )
2013-01-23 08:18:45 -08:00
2013-08-12 15:36:12 -07:00
return l . MetricMembershipIndex . Has ( m )
2012-12-09 07:27:12 -08:00
}
2014-02-14 10:36:27 -08:00
// HasLabelPair returns true if the given LabelPair is present in the underlying
// LabelPair index.
2013-08-03 09:46:02 -07:00
func ( l * LevelDBMetricPersistence ) HasLabelPair ( p * LabelPair ) ( value bool , err error ) {
2013-04-05 09:03:45 -07:00
defer func ( begin time . Time ) {
2013-03-11 14:21:25 -07:00
duration := time . Since ( begin )
2013-01-23 08:18:45 -08:00
2013-03-01 09:51:36 -08:00
recordOutcome ( duration , err , map [ string ] string { operation : hasLabelPair , result : success } , map [ string ] string { operation : hasLabelPair , result : failure } )
2013-04-05 09:03:45 -07:00
} ( time . Now ( ) )
2013-01-23 08:18:45 -08:00
2014-02-14 10:36:27 -08:00
return l . LabelPairToFingerprints . Has ( p )
2012-12-09 07:27:12 -08:00
}
2014-02-14 10:36:27 -08:00
// GetFingerprintsForLabelSet returns the Fingerprints for the given LabelSet by
// querying the underlying LabelPairFingerprintIndex for each LabelPair
// contained in LabelSet. It implements the MetricPersistence interface.
2013-06-25 05:02:27 -07:00
func ( l * LevelDBMetricPersistence ) GetFingerprintsForLabelSet ( labelSet clientmodel . LabelSet ) ( fps clientmodel . Fingerprints , err error ) {
2013-04-05 09:03:45 -07:00
defer func ( begin time . Time ) {
2013-03-11 14:21:25 -07:00
duration := time . Since ( begin )
2012-12-09 07:27:12 -08:00
2013-03-01 09:51:36 -08:00
recordOutcome ( duration , err , map [ string ] string { operation : getFingerprintsForLabelSet , result : success } , map [ string ] string { operation : getFingerprintsForLabelSet , result : failure } )
2013-04-05 09:03:45 -07:00
} ( time . Now ( ) )
2012-12-09 07:27:12 -08:00
2013-01-13 02:15:01 -08:00
sets := [ ] utility . Set { }
2013-06-25 05:02:27 -07:00
for name , value := range labelSet {
2014-02-14 10:36:27 -08:00
fps , _ , err := l . LabelPairToFingerprints . Lookup ( & LabelPair {
2013-08-03 09:46:02 -07:00
Name : name ,
Value : value ,
} )
2012-12-25 04:50:36 -08:00
if err != nil {
2013-08-03 09:46:02 -07:00
return nil , err
2013-06-08 01:27:44 -07:00
}
2012-12-25 04:50:36 -08:00
2013-01-13 02:15:01 -08:00
set := utility . Set { }
2013-08-03 09:46:02 -07:00
for _ , fp := range fps {
2013-05-17 03:58:15 -07:00
set . Add ( * fp )
2012-12-09 07:27:12 -08:00
}
2013-01-13 02:15:01 -08:00
sets = append ( sets , set )
}
numberOfSets := len ( sets )
if numberOfSets == 0 {
2013-06-25 05:02:27 -07:00
return nil , nil
2013-01-13 02:15:01 -08:00
}
base := sets [ 0 ]
for i := 1 ; i < numberOfSets ; i ++ {
base = base . Intersection ( sets [ i ] )
}
for _ , e := range base . Elements ( ) {
2013-06-25 05:02:27 -07:00
fingerprint := e . ( clientmodel . Fingerprint )
2013-05-17 03:58:15 -07:00
fps = append ( fps , & fingerprint )
2012-12-09 07:27:12 -08:00
}
2013-06-25 05:02:27 -07:00
return fps , nil
2012-12-09 07:27:12 -08:00
}
2014-02-14 10:36:27 -08:00
// GetMetricForFingerprint returns the Metric for the given Fingerprint from the
// underlying FingerprintMetricIndex. It implements the MetricPersistence
// interface.
2013-06-25 05:02:27 -07:00
func ( l * LevelDBMetricPersistence ) GetMetricForFingerprint ( f * clientmodel . Fingerprint ) ( m clientmodel . Metric , err error ) {
2013-04-05 09:03:45 -07:00
defer func ( begin time . Time ) {
2013-03-11 14:21:25 -07:00
duration := time . Since ( begin )
2012-12-25 04:50:36 -08:00
2013-03-01 09:51:36 -08:00
recordOutcome ( duration , err , map [ string ] string { operation : getMetricForFingerprint , result : success } , map [ string ] string { operation : getMetricForFingerprint , result : failure } )
2013-04-05 09:03:45 -07:00
} ( time . Now ( ) )
2012-12-25 04:50:36 -08:00
2013-08-03 09:46:02 -07:00
// TODO(matt): Update signature to work with ok.
2013-08-12 15:36:12 -07:00
m , _ , err = l . FingerprintToMetrics . Lookup ( f )
2012-12-12 03:53:34 -08:00
2013-06-08 01:27:44 -07:00
return m , nil
2012-12-12 03:53:34 -08:00
}
2014-02-14 10:36:27 -08:00
// GetAllValuesForLabel gets all label values that are associated with the
// provided label name.
2013-06-25 05:02:27 -07:00
func ( l * LevelDBMetricPersistence ) GetAllValuesForLabel ( labelName clientmodel . LabelName ) ( values clientmodel . LabelValues , err error ) {
2013-03-26 03:45:56 -07:00
filter := & LabelNameFilter {
labelName : labelName ,
}
labelValuesOp := & CollectLabelValuesOp { }
2013-02-06 08:06:39 -08:00
2014-02-14 10:36:27 -08:00
_ , err = l . LabelPairToFingerprints . ForEach ( & MetricKeyDecoder { } , filter , labelValuesOp )
2013-02-06 08:06:39 -08:00
if err != nil {
return
}
2013-03-26 03:45:56 -07:00
values = labelValuesOp . labelValues
2013-02-06 08:06:39 -08:00
return
}
2013-02-08 09:03:26 -08:00
2013-08-06 03:00:31 -07:00
// Prune compacts each database's keyspace serially.
2013-05-10 07:41:02 -07:00
//
// Beware that it would probably be imprudent to run this on a live user-facing
// server due to latency implications.
2013-08-05 08:31:49 -07:00
func ( l * LevelDBMetricPersistence ) Prune ( ) {
l . CurationRemarks . Prune ( )
2013-08-12 15:36:12 -07:00
l . FingerprintToMetrics . Prune ( )
2014-02-14 10:36:27 -08:00
l . LabelPairToFingerprints . Prune ( )
2013-08-05 08:31:49 -07:00
l . MetricHighWatermarks . Prune ( )
2013-08-12 15:36:12 -07:00
l . MetricMembershipIndex . Prune ( )
2013-08-05 08:31:49 -07:00
l . MetricSamples . Prune ( )
2013-05-10 16:02:57 -07:00
}
2014-02-14 10:36:27 -08:00
// Sizes returns the sum of all sizes of the underlying databases.
2013-08-06 03:00:31 -07:00
func ( l * LevelDBMetricPersistence ) Sizes ( ) ( total uint64 , err error ) {
2013-05-10 16:02:57 -07:00
size := uint64 ( 0 )
2014-02-14 10:36:27 -08:00
if size , err = l . CurationRemarks . Size ( ) ; err != nil {
2013-05-10 16:02:57 -07:00
return 0 , err
2013-05-10 07:41:02 -07:00
}
2013-05-10 16:02:57 -07:00
total += size
2014-02-14 10:36:27 -08:00
if size , err = l . FingerprintToMetrics . Size ( ) ; err != nil {
2013-08-05 00:25:47 -07:00
return 0 , err
}
total += size
2013-05-10 16:02:57 -07:00
2014-02-14 10:36:27 -08:00
if size , err = l . LabelPairToFingerprints . Size ( ) ; err != nil {
2013-08-05 00:25:47 -07:00
return 0 , err
}
total += size
2013-05-10 16:02:57 -07:00
2014-02-14 10:36:27 -08:00
if size , err = l . MetricHighWatermarks . Size ( ) ; err != nil {
2013-05-10 16:02:57 -07:00
return 0 , err
2013-05-10 07:41:02 -07:00
}
2013-05-10 16:02:57 -07:00
total += size
2014-02-14 10:36:27 -08:00
if size , err = l . MetricMembershipIndex . Size ( ) ; err != nil {
2013-08-05 00:25:47 -07:00
return 0 , err
}
total += size
2013-05-10 16:02:57 -07:00
2013-08-06 03:00:31 -07:00
if size , err = l . MetricSamples . Size ( ) ; err != nil {
2013-05-10 16:02:57 -07:00
return 0 , err
2013-05-10 07:41:02 -07:00
}
2013-05-10 16:02:57 -07:00
total += size
2013-05-10 07:41:02 -07:00
2013-05-10 16:02:57 -07:00
return total , nil
2013-05-10 07:41:02 -07:00
}
2013-05-14 02:21:27 -07:00
2014-02-14 10:36:27 -08:00
// States returns the DatabaseStates of all underlying databases.
2013-08-05 08:31:49 -07:00
func ( l * LevelDBMetricPersistence ) States ( ) raw . DatabaseStates {
return raw . DatabaseStates {
2013-08-05 00:25:47 -07:00
l . CurationRemarks . State ( ) ,
2013-08-12 15:36:12 -07:00
l . FingerprintToMetrics . State ( ) ,
2014-02-14 10:36:27 -08:00
l . LabelPairToFingerprints . State ( ) ,
2013-08-05 00:25:47 -07:00
l . MetricHighWatermarks . State ( ) ,
2013-08-12 15:36:12 -07:00
l . MetricMembershipIndex . State ( ) ,
2013-08-05 00:25:47 -07:00
l . MetricSamples . State ( ) ,
}
2013-05-14 02:21:27 -07:00
}
2013-10-22 16:06:49 -07:00
2014-02-14 10:36:27 -08:00
// CollectLabelValuesOp implements storage.RecordOperator. It collects the
// encountered LabelValues in a slice.
type CollectLabelValuesOp struct {
labelValues [ ] clientmodel . LabelValue
}
// Operate implements storage.RecordOperator. 'key' is required to be a
// LabelPair. Its Value is appended to a slice of collected LabelValues.
func ( op * CollectLabelValuesOp ) Operate ( key , value interface { } ) ( err * storage . OperatorError ) {
labelPair := key . ( LabelPair )
op . labelValues = append ( op . labelValues , labelPair . Value )
return
}
// MetricKeyDecoder implements storage.RecordDecoder for LabelPairs.
type MetricKeyDecoder struct { }
// DecodeKey implements storage.RecordDecoder. It requires 'in' to be a
// LabelPair protobuf. 'out' is a metric.LabelPair.
func ( d * MetricKeyDecoder ) DecodeKey ( in interface { } ) ( out interface { } , err error ) {
unmarshaled := dto . LabelPair { }
err = proto . Unmarshal ( in . ( [ ] byte ) , & unmarshaled )
if err != nil {
return
}
out = LabelPair {
Name : clientmodel . LabelName ( * unmarshaled . Name ) ,
Value : clientmodel . LabelValue ( * unmarshaled . Value ) ,
}
return
}
// DecodeValue implements storage.RecordDecoder. It is a no-op and always
// returns (nil, nil).
func ( d * MetricKeyDecoder ) DecodeValue ( in interface { } ) ( out interface { } , err error ) {
return
}
// MetricSamplesDecoder implements storage.RecordDecoder for SampleKeys.
Use custom timestamp type for sample timestamps and related code.
So far we've been using Go's native time.Time for anything related to sample
timestamps. Since the range of time.Time is much bigger than what we need, this
has created two problems:
- there could be time.Time values which were out of the range/precision of the
time type that we persist to disk, therefore causing incorrectly ordered keys.
One bug caused by this was:
https://github.com/prometheus/prometheus/issues/367
It would be good to use a timestamp type that's more closely aligned with
what the underlying storage supports.
- sizeof(time.Time) is 192, while Prometheus should be ok with a single 64-bit
Unix timestamp (possibly even a 32-bit one). Since we store samples in large
numbers, this seriously affects memory usage. Furthermore, copying/working
with the data will be faster if it's smaller.
*MEMORY USAGE RESULTS*
Initial memory usage comparisons for a running Prometheus with 1 timeseries and
100,000 samples show roughly a 13% decrease in total (VIRT) memory usage. In my
tests, this advantage for some reason decreased a bit the more samples the
timeseries had (to 5-7% for millions of samples). This I can't fully explain,
but perhaps garbage collection issues were involved.
*WHEN TO USE THE NEW TIMESTAMP TYPE*
The new clientmodel.Timestamp type should be used whenever time
calculations are either directly or indirectly related to sample
timestamps.
For example:
- the timestamp of a sample itself
- all kinds of watermarks
- anything that may become or is compared to a sample timestamp (like the timestamp
passed into Target.Scrape()).
When to still use time.Time:
- for measuring durations/times not related to sample timestamps, like duration
telemetry exporting, timers that indicate how frequently to execute some
action, etc.
*NOTE ON OPERATOR OPTIMIZATION TESTS*
We don't use operator optimization code anymore, but it still lives in
the code as dead code. It still has tests, but I couldn't get all of them to
pass with the new timestamp format. I commented out the failing cases for now,
but we should probably remove the dead code soon. I just didn't want to do that
in the same change as this.
Change-Id: I821787414b0debe85c9fffaeb57abd453727af0f
2013-10-28 06:35:02 -07:00
type MetricSamplesDecoder struct { }
2013-10-22 16:06:49 -07:00
2014-02-14 10:36:27 -08:00
// DecodeKey implements storage.RecordDecoder. It requires 'in' to be a
// SampleKey protobuf. 'out' is a metric.SampleKey.
2013-10-22 16:06:49 -07:00
func ( d * MetricSamplesDecoder ) DecodeKey ( in interface { } ) ( interface { } , error ) {
key := & dto . SampleKey { }
err := proto . Unmarshal ( in . ( [ ] byte ) , key )
if err != nil {
return nil , err
}
sampleKey := & SampleKey { }
sampleKey . Load ( key )
return sampleKey , nil
}
2014-02-14 10:36:27 -08:00
// DecodeValue implements storage.RecordDecoder. It requires 'in' to be a
// SampleValueSeries protobuf. 'out' is of type metric.Values.
2013-10-22 16:06:49 -07:00
func ( d * MetricSamplesDecoder ) DecodeValue ( in interface { } ) ( interface { } , error ) {
2014-02-26 14:47:25 -08:00
return unmarshalValues ( in . ( [ ] byte ) ) , nil
2013-10-22 16:06:49 -07:00
}
2014-02-14 10:36:27 -08:00
// AcceptAllFilter implements storage.RecordFilter and accepts all records.
Use custom timestamp type for sample timestamps and related code.
So far we've been using Go's native time.Time for anything related to sample
timestamps. Since the range of time.Time is much bigger than what we need, this
has created two problems:
- there could be time.Time values which were out of the range/precision of the
time type that we persist to disk, therefore causing incorrectly ordered keys.
One bug caused by this was:
https://github.com/prometheus/prometheus/issues/367
It would be good to use a timestamp type that's more closely aligned with
what the underlying storage supports.
- sizeof(time.Time) is 192, while Prometheus should be ok with a single 64-bit
Unix timestamp (possibly even a 32-bit one). Since we store samples in large
numbers, this seriously affects memory usage. Furthermore, copying/working
with the data will be faster if it's smaller.
*MEMORY USAGE RESULTS*
Initial memory usage comparisons for a running Prometheus with 1 timeseries and
100,000 samples show roughly a 13% decrease in total (VIRT) memory usage. In my
tests, this advantage for some reason decreased a bit the more samples the
timeseries had (to 5-7% for millions of samples). This I can't fully explain,
but perhaps garbage collection issues were involved.
*WHEN TO USE THE NEW TIMESTAMP TYPE*
The new clientmodel.Timestamp type should be used whenever time
calculations are either directly or indirectly related to sample
timestamps.
For example:
- the timestamp of a sample itself
- all kinds of watermarks
- anything that may become or is compared to a sample timestamp (like the timestamp
passed into Target.Scrape()).
When to still use time.Time:
- for measuring durations/times not related to sample timestamps, like duration
telemetry exporting, timers that indicate how frequently to execute some
action, etc.
*NOTE ON OPERATOR OPTIMIZATION TESTS*
We don't use operator optimization code anymore, but it still lives in
the code as dead code. It still has tests, but I couldn't get all of them to
pass with the new timestamp format. I commented out the failing cases for now,
but we should probably remove the dead code soon. I just didn't want to do that
in the same change as this.
Change-Id: I821787414b0debe85c9fffaeb57abd453727af0f
2013-10-28 06:35:02 -07:00
type AcceptAllFilter struct { }
2013-10-22 16:06:49 -07:00
2014-02-14 10:36:27 -08:00
// Filter implements storage.RecordFilter. It always returns ACCEPT.
2013-10-22 16:06:49 -07:00
func ( d * AcceptAllFilter ) Filter ( _ , _ interface { } ) storage . FilterResult {
2014-02-14 10:36:27 -08:00
return storage . Accept
}
// LabelNameFilter implements storage.RecordFilter and filters records matching
// a LabelName.
type LabelNameFilter struct {
labelName clientmodel . LabelName
}
// Filter implements storage.RecordFilter. 'key' is expected to be a
// LabelPair. The result is ACCEPT if the Name of the LabelPair matches the
// LabelName of this LabelNameFilter.
func ( f LabelNameFilter ) Filter ( key , value interface { } ) ( filterResult storage . FilterResult ) {
labelPair , ok := key . ( LabelPair )
if ok && labelPair . Name == f . labelName {
return storage . Accept
}
return storage . Skip
2013-10-22 16:06:49 -07:00
}