Merge pull request #115 from prometheus/fix/storage/nil-behaviors

Validate diskFrontier domain for series candidate.
This commit is contained in:
Matt T. Proud 2013-04-09 03:08:29 -07:00
commit 6146116e2f
15 changed files with 118 additions and 153 deletions

View file

@ -35,11 +35,11 @@ type diskFrontier struct {
lastSupertime time.Time lastSupertime time.Time
} }
func (f *diskFrontier) String() string { func (f diskFrontier) String() string {
return fmt.Sprintf("diskFrontier from %s at %s to %s at %s", f.firstFingerprint.ToRowKey(), f.firstSupertime, f.lastFingerprint.ToRowKey(), f.lastSupertime) return fmt.Sprintf("diskFrontier from %s at %s to %s at %s", f.firstFingerprint.ToRowKey(), f.firstSupertime, f.lastFingerprint.ToRowKey(), f.lastSupertime)
} }
func (f *diskFrontier) ContainsFingerprint(fingerprint model.Fingerprint) bool { func (f diskFrontier) ContainsFingerprint(fingerprint model.Fingerprint) bool {
return !(fingerprint.Less(f.firstFingerprint) || f.lastFingerprint.Less(fingerprint)) return !(fingerprint.Less(f.firstFingerprint) || f.lastFingerprint.Less(fingerprint))
} }
@ -48,12 +48,15 @@ func newDiskFrontier(i leveldb.Iterator) (d *diskFrontier, err error) {
if !i.SeekToLast() || i.Key() == nil { if !i.SeekToLast() || i.Key() == nil {
return return
} }
lastKey, err := extractSampleKey(i) lastKey, err := extractSampleKey(i)
if err != nil { if err != nil {
panic(err) panic(err)
} }
i.SeekToFirst() if !i.SeekToFirst() || i.Key() == nil {
return
}
firstKey, err := extractSampleKey(i) firstKey, err := extractSampleKey(i)
if i.Key() == nil { if i.Key() == nil {
return return
@ -92,6 +95,13 @@ func newSeriesFrontier(f model.Fingerprint, d diskFrontier, i leveldb.Iterator)
upperSeek = lastSupertime upperSeek = lastSupertime
) )
// If the diskFrontier for this iterator says that the candidate fingerprint
// is outside of its seeking domain, there is no way that a seriesFrontier
// could be materialized. Simply bail.
if !d.ContainsFingerprint(f) {
return
}
// If we are either the first or the last key in the database, we need to use // If we are either the first or the last key in the database, we need to use
// pessimistic boundary frontiers. // pessimistic boundary frontiers.
if f.Equal(d.firstFingerprint) { if f.Equal(d.firstFingerprint) {

View file

@ -24,7 +24,7 @@ import (
type MetricPersistence interface { type MetricPersistence interface {
// A storage system may rely on external resources and thusly should be // A storage system may rely on external resources and thusly should be
// closed when finished. // closed when finished.
Close() error Close()
// Commit all pending operations, if any, since some of the storage components // Commit all pending operations, if any, since some of the storage components
// queue work on channels and operate on it in bulk. // queue work on channels and operate on it in bulk.

View file

@ -24,7 +24,6 @@ import (
index "github.com/prometheus/prometheus/storage/raw/index/leveldb" index "github.com/prometheus/prometheus/storage/raw/index/leveldb"
leveldb "github.com/prometheus/prometheus/storage/raw/leveldb" leveldb "github.com/prometheus/prometheus/storage/raw/leveldb"
"github.com/prometheus/prometheus/utility" "github.com/prometheus/prometheus/utility"
"io"
"log" "log"
"sort" "sort"
"sync" "sync"
@ -58,68 +57,31 @@ var (
) )
type leveldbOpener func() type leveldbOpener func()
type leveldbCloser interface {
Close()
}
func (l *LevelDBMetricPersistence) Close() error { func (l *LevelDBMetricPersistence) Close() {
var persistences = []struct { var persistences = []leveldbCloser{
name string l.fingerprintToMetrics,
closer io.Closer l.metricHighWatermarks,
}{ l.metricSamples,
{ l.labelNameToFingerprints,
"Fingerprint to Label Name and Value Pairs", l.labelSetToFingerprints,
l.fingerprintToMetrics, l.metricMembershipIndex,
},
{
"Fingerprint High Watermarks",
l.metricHighWatermarks,
},
{
"Fingerprint Samples",
l.metricSamples,
},
{
"Label Name to Fingerprints",
l.labelNameToFingerprints,
},
{
"Label Name and Value Pairs to Fingerprints",
l.labelSetToFingerprints,
},
{
"Metric Membership Index",
l.metricMembershipIndex,
},
} }
errorChannel := make(chan error, len(persistences)) closerGroup := sync.WaitGroup{}
for _, persistence := range persistences { for _, closer := range persistences {
name := persistence.name closerGroup.Add(1)
closer := persistence.closer go func(closer leveldbCloser) {
closer.Close()
go func(name string, closer io.Closer) { closerGroup.Done()
if closer != nil { }(closer)
closingError := closer.Close()
if closingError != nil {
log.Printf("Could not close a LevelDBPersistence storage container; inconsistencies are possible: %q\n", closingError)
}
errorChannel <- closingError
} else {
errorChannel <- nil
}
}(name, closer)
} }
for i := 0; i < cap(errorChannel); i++ { closerGroup.Wait()
closingError := <-errorChannel
if closingError != nil {
return closingError
}
}
return nil
} }
func NewLevelDBMetricPersistence(baseDirectory string) (persistence *LevelDBMetricPersistence, err error) { func NewLevelDBMetricPersistence(baseDirectory string) (persistence *LevelDBMetricPersistence, err error) {

View file

@ -327,7 +327,7 @@ func (s memorySeriesStorage) GetRangeValues(fp model.Fingerprint, i model.Interv
return return
} }
func (s memorySeriesStorage) Close() (err error) { func (s memorySeriesStorage) Close() {
// This can probably be simplified: // This can probably be simplified:
// //
// s.fingerPrintToSeries = map[model.Fingerprint]*stream{} // s.fingerPrintToSeries = map[model.Fingerprint]*stream{}
@ -344,8 +344,6 @@ func (s memorySeriesStorage) Close() (err error) {
for labelName := range s.labelNameToFingerprints { for labelName := range s.labelNameToFingerprints {
delete(s.labelNameToFingerprints, labelName) delete(s.labelNameToFingerprints, labelName)
} }
return
} }
func (s memorySeriesStorage) GetAllValuesForLabel(labelName model.LabelName) (values model.LabelValues, err error) { func (s memorySeriesStorage) GetAllValuesForLabel(labelName model.LabelName) (values model.LabelValues, err error) {

View file

@ -552,13 +552,8 @@ func GetValueAtTimeTests(persistenceMaker func() (MetricPersistence, test.Closer
func() { func() {
p, closer := persistenceMaker() p, closer := persistenceMaker()
defer func() { defer closer.Close()
defer closer.Close() defer p.Close()
err := p.Close()
if err != nil {
t.Fatalf("Encountered anomaly closing persistence: %q\n", err)
}
}()
m := model.Metric{ m := model.Metric{
model.MetricNameLabel: "age_in_years", model.MetricNameLabel: "age_in_years",
@ -994,13 +989,8 @@ func GetBoundaryValuesTests(persistenceMaker func() (MetricPersistence, test.Clo
func() { func() {
p, closer := persistenceMaker() p, closer := persistenceMaker()
defer func() { defer closer.Close()
defer closer.Close() defer p.Close()
err := p.Close()
if err != nil {
t.Fatalf("Encountered anomaly closing persistence: %q\n", err)
}
}()
m := model.Metric{ m := model.Metric{
model.MetricNameLabel: "age_in_years", model.MetricNameLabel: "age_in_years",
@ -1348,13 +1338,8 @@ func GetRangeValuesTests(persistenceMaker func() (MetricPersistence, test.Closer
func() { func() {
p, closer := persistenceMaker() p, closer := persistenceMaker()
defer func() { defer closer.Close()
defer closer.Close() defer p.Close()
err := p.Close()
if err != nil {
t.Fatalf("Encountered anomaly closing persistence: %q\n", err)
}
}()
m := model.Metric{ m := model.Metric{
model.MetricNameLabel: "age_in_years", model.MetricNameLabel: "age_in_years",

View file

@ -189,12 +189,7 @@ func StochasticTests(persistenceMaker func() (MetricPersistence, test.Closer), t
stochastic := func(x int) (success bool) { stochastic := func(x int) (success bool) {
p, closer := persistenceMaker() p, closer := persistenceMaker()
defer closer.Close() defer closer.Close()
defer func() { defer p.Close()
err := p.Close()
if err != nil {
t.Error(err)
}
}()
seed := rand.NewSource(int64(x)) seed := rand.NewSource(int64(x))
random := rand.New(seed) random := rand.New(seed)

View file

@ -56,12 +56,7 @@ func buildLevelDBTestPersistence(name string, f func(p MetricPersistence, t test
t.Errorf("Could not create LevelDB Metric Persistence: %q\n", err) t.Errorf("Could not create LevelDB Metric Persistence: %q\n", err)
} }
defer func() { defer p.Close()
err := p.Close()
if err != nil {
t.Errorf("Anomaly while closing database: %q\n", err)
}
}()
f(p, t) f(p, t)
} }
@ -72,12 +67,7 @@ func buildMemoryTestPersistence(f func(p MetricPersistence, t test.Tester)) func
p := NewMemorySeriesStorage() p := NewMemorySeriesStorage()
defer func() { defer p.Close()
err := p.Close()
if err != nil {
t.Errorf("Anomaly while closing database: %q\n", err)
}
}()
f(p, t) f(p, t)
} }

View file

@ -145,7 +145,7 @@ func (t *tieredStorage) MakeView(builder ViewRequestBuilder, deadline time.Durat
return return
} }
func (t *tieredStorage) rebuildDiskFrontier() (err error) { func (t *tieredStorage) rebuildDiskFrontier(i leveldb.Iterator) (err error) {
begin := time.Now() begin := time.Now()
defer func() { defer func() {
duration := time.Since(begin) duration := time.Since(begin)
@ -153,9 +153,6 @@ func (t *tieredStorage) rebuildDiskFrontier() (err error) {
recordOutcome(duration, err, map[string]string{operation: appendSample, result: success}, map[string]string{operation: rebuildDiskFrontier, result: failure}) recordOutcome(duration, err, map[string]string{operation: appendSample, result: success}, map[string]string{operation: rebuildDiskFrontier, result: failure})
}() }()
i := t.diskStorage.metricSamples.NewIterator(true)
defer i.Close()
t.diskFrontier, err = newDiskFrontier(i) t.diskFrontier, err = newDiskFrontier(i)
if err != nil { if err != nil {
panic(err) panic(err)
@ -298,8 +295,6 @@ func (f *memoryToDiskFlusher) ForStream(stream stream) (decoder storage.RecordDe
flusher: f, flusher: f,
} }
// fmt.Printf("fingerprint -> %s\n", model.NewFingerprintFromMetric(stream.metric).ToRowKey())
return visitor, visitor, visitor return visitor, visitor, visitor
} }
@ -309,11 +304,7 @@ func (f *memoryToDiskFlusher) Flush() {
for i := 0; i < length; i++ { for i := 0; i < length; i++ {
samples = append(samples, <-f.toDiskQueue) samples = append(samples, <-f.toDiskQueue)
} }
start := time.Now()
f.disk.AppendSamples(samples) f.disk.AppendSamples(samples)
if false {
fmt.Printf("Took %s to append...\n", time.Since(start))
}
} }
func (f memoryToDiskFlusher) Close() { func (f memoryToDiskFlusher) Close() {
@ -360,11 +351,14 @@ func (t *tieredStorage) renderView(viewJob viewJob) {
var ( var (
scans = viewJob.builder.ScanJobs() scans = viewJob.builder.ScanJobs()
view = newView() view = newView()
// Get a single iterator that will be used for all data extraction below.
iterator = t.diskStorage.metricSamples.NewIterator(true)
) )
defer iterator.Close()
// Rebuilding of the frontier should happen on a conditional basis if a // Rebuilding of the frontier should happen on a conditional basis if a
// (fingerprint, timestamp) tuple is outside of the current frontier. // (fingerprint, timestamp) tuple is outside of the current frontier.
err = t.rebuildDiskFrontier() err = t.rebuildDiskFrontier(iterator)
if err != nil { if err != nil {
panic(err) panic(err)
} }
@ -374,10 +368,6 @@ func (t *tieredStorage) renderView(viewJob viewJob) {
return return
} }
// Get a single iterator that will be used for all data extraction below.
iterator := t.diskStorage.metricSamples.NewIterator(true)
defer iterator.Close()
for _, scanJob := range scans { for _, scanJob := range scans {
seriesFrontier, err := newSeriesFrontier(scanJob.fingerprint, *t.diskFrontier, iterator) seriesFrontier, err := newSeriesFrontier(scanJob.fingerprint, *t.diskFrontier, iterator)
if err != nil { if err != nil {

View file

@ -21,5 +21,5 @@ type MembershipIndex interface {
Has(key coding.Encoder) (bool, error) Has(key coding.Encoder) (bool, error)
Put(key coding.Encoder) error Put(key coding.Encoder) error
Drop(key coding.Encoder) error Drop(key coding.Encoder) error
Close() error Close()
} }

View file

@ -28,8 +28,8 @@ type LevelDBMembershipIndex struct {
persistence *leveldb.LevelDBPersistence persistence *leveldb.LevelDBPersistence
} }
func (l *LevelDBMembershipIndex) Close() error { func (l *LevelDBMembershipIndex) Close() {
return l.persistence.Close() l.persistence.Close()
} }
func (l *LevelDBMembershipIndex) Has(key coding.Encoder) (bool, error) { func (l *LevelDBMembershipIndex) Has(key coding.Encoder) (bool, error) {

View file

@ -16,14 +16,14 @@ package raw
import ( import (
"github.com/prometheus/prometheus/coding" "github.com/prometheus/prometheus/coding"
"github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/storage"
"io"
) )
// Persistence models a key-value store for bytes that supports various // Persistence models a key-value store for bytes that supports various
// additional operations. // additional operations.
type Persistence interface { type Persistence interface {
io.Closer // Close reaps all of the underlying system resources associated with this
// persistence.
Close()
// Has informs the user whether a given key exists in the database. // Has informs the user whether a given key exists in the database.
Has(key coding.Encoder) (bool, error) Has(key coding.Encoder) (bool, error)
// Get retrieves the key from the database if it exists or returns nil if // Get retrieves the key from the database if it exists or returns nil if
@ -50,8 +50,9 @@ type Persistence interface {
// en masse. The interface implies no protocol around the atomicity of // en masse. The interface implies no protocol around the atomicity of
// effectuation. // effectuation.
type Batch interface { type Batch interface {
io.Closer // Close reaps all of the underlying system resources associated with this
// batch mutation.
Close()
// Put follows the same protocol as Persistence.Put. // Put follows the same protocol as Persistence.Put.
Put(key, value coding.Encoder) Put(key, value coding.Encoder)
// Drop follows the same protocol as Persistence.Drop. // Drop follows the same protocol as Persistence.Drop.

View file

@ -50,8 +50,6 @@ func (b batch) Put(key, value coding.Encoder) {
b.batch.Put(keyEncoded, valueEncoded) b.batch.Put(keyEncoded, valueEncoded)
} }
func (b batch) Close() (err error) { func (b batch) Close() {
b.batch.Close() b.batch.Close()
return
} }

View file

@ -38,4 +38,5 @@ type Iterator interface {
SeekToFirst() (ok bool) SeekToFirst() (ok bool)
SeekToLast() (ok bool) SeekToLast() (ok bool)
Value() []byte Value() []byte
Close()
} }

View file

@ -15,10 +15,12 @@ package leveldb
import ( import (
"flag" "flag"
"fmt"
"github.com/jmhodges/levigo" "github.com/jmhodges/levigo"
"github.com/prometheus/prometheus/coding" "github.com/prometheus/prometheus/coding"
"github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/storage"
"github.com/prometheus/prometheus/storage/raw" "github.com/prometheus/prometheus/storage/raw"
"time"
) )
var ( var (
@ -57,9 +59,34 @@ type levigoIterator struct {
storage *levigo.DB storage *levigo.DB
// closed indicates whether the iterator has been closed before. // closed indicates whether the iterator has been closed before.
closed bool closed bool
// valid indicates whether the iterator may be used. If a LevelDB iterator
// ever becomes invalid, it must be disposed of and cannot be reused.
valid bool
// creationTime provides the time at which the iterator was made.
creationTime time.Time
} }
func (i *levigoIterator) Close() (err error) { func (i levigoIterator) String() string {
var (
valid = "valid"
open = "open"
snapshotted = "snapshotted"
)
if i.closed {
open = "closed"
}
if !i.valid {
valid = "invalid"
}
if i.snapshot == nil {
snapshotted = "unsnapshotted"
}
return fmt.Sprintf("levigoIterator created at %s that is %s and %s and %s", i.creationTime, open, valid, snapshotted)
}
func (i *levigoIterator) Close() {
if i.closed { if i.closed {
return return
} }
@ -81,38 +108,49 @@ func (i *levigoIterator) Close() (err error) {
i.storage = nil i.storage = nil
i.closed = true i.closed = true
i.valid = false
return return
} }
func (i levigoIterator) Seek(key []byte) (ok bool) { func (i *levigoIterator) Seek(key []byte) bool {
i.iterator.Seek(key) i.iterator.Seek(key)
return i.iterator.Valid() i.valid = i.iterator.Valid()
return i.valid
} }
func (i levigoIterator) SeekToFirst() (ok bool) { func (i *levigoIterator) SeekToFirst() bool {
i.iterator.SeekToFirst() i.iterator.SeekToFirst()
return i.iterator.Valid() i.valid = i.iterator.Valid()
return i.valid
} }
func (i levigoIterator) SeekToLast() (ok bool) { func (i *levigoIterator) SeekToLast() bool {
i.iterator.SeekToLast() i.iterator.SeekToLast()
return i.iterator.Valid() i.valid = i.iterator.Valid()
return i.valid
} }
func (i levigoIterator) Next() (ok bool) { func (i *levigoIterator) Next() bool {
i.iterator.Next() i.iterator.Next()
return i.iterator.Valid() i.valid = i.iterator.Valid()
return i.valid
} }
func (i levigoIterator) Previous() (ok bool) { func (i *levigoIterator) Previous() bool {
i.iterator.Prev() i.iterator.Prev()
return i.iterator.Valid() i.valid = i.iterator.Valid()
return i.valid
} }
func (i levigoIterator) Key() (key []byte) { func (i levigoIterator) Key() (key []byte) {
@ -166,7 +204,7 @@ func NewLevelDBPersistence(storageRoot string, cacheCapacity, bitsPerBloomFilter
return return
} }
func (l *LevelDBPersistence) Close() (err error) { func (l *LevelDBPersistence) Close() {
// These are deferred to take advantage of forced closing in case of stack // These are deferred to take advantage of forced closing in case of stack
// unwinding due to anomalies. // unwinding due to anomalies.
defer func() { defer func() {
@ -283,7 +321,7 @@ func (l *LevelDBPersistence) Commit(b raw.Batch) (err error) {
// will be leaked. // will be leaked.
// //
// The iterator is optionally snapshotable. // The iterator is optionally snapshotable.
func (l *LevelDBPersistence) NewIterator(snapshotted bool) levigoIterator { func (l *LevelDBPersistence) NewIterator(snapshotted bool) Iterator {
var ( var (
snapshot *levigo.Snapshot snapshot *levigo.Snapshot
readOptions *levigo.ReadOptions readOptions *levigo.ReadOptions
@ -299,11 +337,12 @@ func (l *LevelDBPersistence) NewIterator(snapshotted bool) levigoIterator {
iterator = l.storage.NewIterator(l.readOptions) iterator = l.storage.NewIterator(l.readOptions)
} }
return levigoIterator{ return &levigoIterator{
iterator: iterator, creationTime: time.Now(),
readOptions: readOptions, iterator: iterator,
snapshot: snapshot, readOptions: readOptions,
storage: l.storage, snapshot: snapshot,
storage: l.storage,
} }
} }

View file

@ -68,12 +68,8 @@ func (p preparer) Prepare(n string, f FixtureFactory) (t test.TemporaryDirectory
defer t.Close() defer t.Close()
p.tester.Fatal(err) p.tester.Fatal(err)
} }
defer func() {
err = persistence.Close() defer persistence.Close()
if err != nil {
p.tester.Fatal(err)
}
}()
for f.HasNext() { for f.HasNext() {
key, value := f.Next() key, value := f.Next()