mirror of
https://github.com/prometheus/prometheus.git
synced 2025-01-12 06:17:27 -08:00
Checkpoint.
This commit is contained in:
parent
91c5e29f1f
commit
41068c2e84
14
main.go
14
main.go
|
@ -22,12 +22,11 @@ import (
|
|||
"github.com/prometheus/prometheus/rules"
|
||||
"github.com/prometheus/prometheus/rules/ast"
|
||||
"github.com/prometheus/prometheus/storage/metric"
|
||||
"github.com/prometheus/prometheus/storage/metric/leveldb"
|
||||
"github.com/prometheus/prometheus/storage/metric/memory"
|
||||
"github.com/prometheus/prometheus/web"
|
||||
"log"
|
||||
"os"
|
||||
"os/signal"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Commandline flags.
|
||||
|
@ -49,9 +48,9 @@ func main() {
|
|||
|
||||
var persistence metric.MetricPersistence
|
||||
if *memoryArena {
|
||||
persistence = memory.NewMemorySeriesStorage()
|
||||
persistence = metric.NewMemorySeriesStorage()
|
||||
} else {
|
||||
persistence, err = leveldb.NewLevelDBMetricPersistence(*metricsStoragePath)
|
||||
persistence, err = metric.NewLevelDBMetricPersistence(*metricsStoragePath)
|
||||
if err != nil {
|
||||
log.Fatalf("Error opening storage: %v", err)
|
||||
}
|
||||
|
@ -91,16 +90,23 @@ func main() {
|
|||
|
||||
web.StartServing(appState)
|
||||
|
||||
ts := metric.NewTieredStorage(5000, 5000, 100, time.Second*30, time.Second*1, time.Second*20)
|
||||
go ts.Serve()
|
||||
go ts.Expose()
|
||||
|
||||
for {
|
||||
select {
|
||||
case scrapeResult := <-scrapeResults:
|
||||
if scrapeResult.Err == nil {
|
||||
persistence.AppendSample(scrapeResult.Sample)
|
||||
ts.AppendSample(scrapeResult.Sample)
|
||||
}
|
||||
|
||||
case ruleResult := <-ruleResults:
|
||||
for _, sample := range ruleResult.Samples {
|
||||
// XXX: Wart
|
||||
persistence.AppendSample(*sample)
|
||||
ts.AppendSample(*sample)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -14,55 +14,43 @@
|
|||
package dto;
|
||||
|
||||
message LabelPair {
|
||||
optional int64 version = 1 [default = 1];
|
||||
|
||||
optional string name = 2;
|
||||
optional string value = 3;
|
||||
optional string name = 1;
|
||||
optional string value = 2;
|
||||
}
|
||||
|
||||
message LabelName {
|
||||
optional int64 version = 1 [default = 1];
|
||||
|
||||
optional string name = 2;
|
||||
optional string name = 1;
|
||||
}
|
||||
|
||||
message Metric {
|
||||
optional int64 version = 1 [default = 1];
|
||||
|
||||
repeated LabelPair label_pair = 2;
|
||||
repeated LabelPair label_pair = 1;
|
||||
}
|
||||
|
||||
message Fingerprint {
|
||||
optional int64 version = 1 [default = 1];
|
||||
|
||||
optional string signature = 2;
|
||||
optional string signature = 1;
|
||||
}
|
||||
|
||||
message FingerprintCollection {
|
||||
optional int64 version = 1 [default = 1];
|
||||
|
||||
repeated Fingerprint member = 2;
|
||||
repeated Fingerprint member = 1;
|
||||
}
|
||||
|
||||
message LabelSet {
|
||||
optional int64 version = 1 [default = 1];
|
||||
|
||||
repeated LabelPair member = 2;
|
||||
repeated LabelPair member = 1;
|
||||
}
|
||||
|
||||
message SampleKey {
|
||||
optional int64 version = 1 [default = 1];
|
||||
|
||||
optional Fingerprint fingerprint = 2;
|
||||
optional bytes timestamp = 3;
|
||||
optional Fingerprint fingerprint = 1;
|
||||
optional bytes timestamp = 2;
|
||||
optional int64 last_timestamp = 3;
|
||||
}
|
||||
|
||||
message SampleValue {
|
||||
optional int64 version = 1 [default = 1];
|
||||
|
||||
optional float value = 2;
|
||||
message SampleValueSeries {
|
||||
message Value {
|
||||
optional int64 timestamp = 1;
|
||||
optional float value = 2;
|
||||
}
|
||||
repeated Value value = 1;
|
||||
}
|
||||
|
||||
message MembershipIndexValue {
|
||||
optional int64 version = 1 [default = 1];
|
||||
}
|
||||
|
|
22
model/dto.go
22
model/dto.go
|
@ -15,8 +15,6 @@ package model
|
|||
|
||||
import (
|
||||
"code.google.com/p/goprotobuf/proto"
|
||||
"crypto/md5"
|
||||
"encoding/hex"
|
||||
dto "github.com/prometheus/prometheus/model/generated"
|
||||
"sort"
|
||||
"time"
|
||||
|
@ -77,12 +75,6 @@ func MetricToDTO(m *Metric) *dto.Metric {
|
|||
}
|
||||
}
|
||||
|
||||
func BytesToFingerprint(v []byte) Fingerprint {
|
||||
hash := md5.New()
|
||||
hash.Write(v)
|
||||
return Fingerprint(hex.EncodeToString(hash.Sum([]byte{})))
|
||||
}
|
||||
|
||||
func LabelSetToDTOs(s *LabelSet) []*dto.LabelPair {
|
||||
metricLength := len(*s)
|
||||
labelNames := make([]string, 0, metricLength)
|
||||
|
@ -121,15 +113,15 @@ func LabelNameToDTO(l *LabelName) *dto.LabelName {
|
|||
}
|
||||
}
|
||||
|
||||
func FingerprintToDTO(f *Fingerprint) *dto.Fingerprint {
|
||||
func FingerprintToDTO(f Fingerprint) *dto.Fingerprint {
|
||||
return &dto.Fingerprint{
|
||||
Signature: proto.String(string(*f)),
|
||||
Signature: proto.String(f.ToRowKey()),
|
||||
}
|
||||
}
|
||||
|
||||
func SampleFromDTO(m *Metric, t *time.Time, v *dto.SampleValue) *Sample {
|
||||
func SampleFromDTO(m *Metric, t *time.Time, v *dto.SampleValueSeries) *Sample {
|
||||
s := &Sample{
|
||||
Value: SampleValue(*v.Value),
|
||||
Value: SampleValue(*v.Value[0].Value),
|
||||
Timestamp: *t,
|
||||
}
|
||||
|
||||
|
@ -137,9 +129,3 @@ func SampleFromDTO(m *Metric, t *time.Time, v *dto.SampleValue) *Sample {
|
|||
|
||||
return s
|
||||
}
|
||||
|
||||
func (f Fingerprint) ToDTO() *dto.Fingerprint {
|
||||
return &dto.Fingerprint{
|
||||
Signature: proto.String(string(f)),
|
||||
}
|
||||
}
|
||||
|
|
205
model/fingerprinting.go
Normal file
205
model/fingerprinting.go
Normal file
|
@ -0,0 +1,205 @@
|
|||
// Copyright 2013 Prometheus Team
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package model
|
||||
|
||||
import (
|
||||
"code.google.com/p/goprotobuf/proto"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
dto "github.com/prometheus/prometheus/model/generated"
|
||||
"hash/fnv"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
const (
|
||||
// rowKeyDelimiter is used to separate formatted versions of a metric's row
|
||||
// key.
|
||||
rowKeyDelimiter = "-"
|
||||
)
|
||||
|
||||
// Provides a compact representation of a Metric.
|
||||
type Fingerprint interface {
|
||||
// Transforms the fingerprint into a database row key.
|
||||
ToRowKey() string
|
||||
Hash() uint64
|
||||
FirstCharacterOfFirstLabelName() string
|
||||
LabelMatterLength() uint
|
||||
LastCharacterOfLastLabelValue() string
|
||||
ToDTO() *dto.Fingerprint
|
||||
Less(Fingerprint) bool
|
||||
Equal(Fingerprint) bool
|
||||
}
|
||||
|
||||
// Builds a Fingerprint from a row key.
|
||||
func NewFingerprintFromRowKey(rowKey string) (f Fingerprint) {
|
||||
components := strings.Split(rowKey, rowKeyDelimiter)
|
||||
hash, err := strconv.ParseUint(components[0], 10, 64)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
labelMatterLength, err := strconv.ParseUint(components[2], 10, 0)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
return fingerprint{
|
||||
hash: hash,
|
||||
firstCharacterOfFirstLabelName: components[1],
|
||||
labelMatterLength: uint(labelMatterLength),
|
||||
lastCharacterOfLastLabelValue: components[3],
|
||||
}
|
||||
}
|
||||
|
||||
// Builds a Fingerprint from a datastore entry.
|
||||
func NewFingerprintFromDTO(f *dto.Fingerprint) Fingerprint {
|
||||
return NewFingerprintFromRowKey(*f.Signature)
|
||||
}
|
||||
|
||||
// Decomposes a Metric into a Fingerprint.
|
||||
func NewFingerprintFromMetric(metric Metric) (f Fingerprint) {
|
||||
labelLength := len(metric)
|
||||
labelNames := make([]string, 0, labelLength)
|
||||
|
||||
for labelName := range metric {
|
||||
labelNames = append(labelNames, string(labelName))
|
||||
}
|
||||
|
||||
sort.Strings(labelNames)
|
||||
|
||||
summer := fnv.New64a()
|
||||
firstCharacterOfFirstLabelName := ""
|
||||
lastCharacterOfLastLabelValue := ""
|
||||
labelMatterLength := 0
|
||||
|
||||
for i, labelName := range labelNames {
|
||||
labelValue := metric[LabelName(labelName)]
|
||||
labelNameLength := len(labelName)
|
||||
labelValueLength := len(labelValue)
|
||||
labelMatterLength += labelNameLength + labelValueLength
|
||||
|
||||
switch i {
|
||||
case 0:
|
||||
firstCharacterOfFirstLabelName = labelName[0:1]
|
||||
case labelLength - 1:
|
||||
lastCharacterOfLastLabelValue = string(labelValue[labelValueLength-2 : labelValueLength-1])
|
||||
}
|
||||
|
||||
summer.Write([]byte(labelName))
|
||||
summer.Write([]byte(reservedDelimiter))
|
||||
summer.Write([]byte(labelValue))
|
||||
}
|
||||
|
||||
return fingerprint{
|
||||
firstCharacterOfFirstLabelName: firstCharacterOfFirstLabelName,
|
||||
hash: binary.LittleEndian.Uint64(summer.Sum(nil)),
|
||||
labelMatterLength: uint(labelMatterLength),
|
||||
lastCharacterOfLastLabelValue: lastCharacterOfLastLabelValue,
|
||||
}
|
||||
}
|
||||
|
||||
// A simplified representation of an entity.
|
||||
type fingerprint struct {
|
||||
// A hashed representation of the underyling entity. For our purposes, FNV-1A
|
||||
// 64-bit is used.
|
||||
hash uint64
|
||||
firstCharacterOfFirstLabelName string
|
||||
labelMatterLength uint
|
||||
lastCharacterOfLastLabelValue string
|
||||
}
|
||||
|
||||
func (f fingerprint) ToRowKey() string {
|
||||
return strings.Join([]string{fmt.Sprintf("%020d", f.hash), f.firstCharacterOfFirstLabelName, fmt.Sprint(f.labelMatterLength), f.lastCharacterOfLastLabelValue}, rowKeyDelimiter)
|
||||
}
|
||||
|
||||
func (f fingerprint) ToDTO() *dto.Fingerprint {
|
||||
return &dto.Fingerprint{
|
||||
Signature: proto.String(f.ToRowKey()),
|
||||
}
|
||||
}
|
||||
|
||||
func (f fingerprint) Hash() uint64 {
|
||||
return f.hash
|
||||
}
|
||||
|
||||
func (f fingerprint) FirstCharacterOfFirstLabelName() string {
|
||||
return f.firstCharacterOfFirstLabelName
|
||||
}
|
||||
|
||||
func (f fingerprint) LabelMatterLength() uint {
|
||||
return f.labelMatterLength
|
||||
}
|
||||
|
||||
func (f fingerprint) LastCharacterOfLastLabelValue() string {
|
||||
return f.lastCharacterOfLastLabelValue
|
||||
}
|
||||
|
||||
func (f fingerprint) Less(o Fingerprint) (before bool) {
|
||||
before = f.Hash() <= o.Hash()
|
||||
if !before {
|
||||
return
|
||||
}
|
||||
|
||||
before = sort.StringsAreSorted([]string{f.FirstCharacterOfFirstLabelName(), o.FirstCharacterOfFirstLabelName()})
|
||||
if !before {
|
||||
return
|
||||
}
|
||||
|
||||
before = f.LabelMatterLength() <= o.LabelMatterLength()
|
||||
if !before {
|
||||
return
|
||||
}
|
||||
|
||||
before = sort.StringsAreSorted([]string{f.LastCharacterOfLastLabelValue(), o.LastCharacterOfLastLabelValue()})
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (f fingerprint) Equal(o Fingerprint) (equal bool) {
|
||||
equal = f.Hash() == o.Hash()
|
||||
if !equal {
|
||||
return
|
||||
}
|
||||
|
||||
equal = f.FirstCharacterOfFirstLabelName() == o.FirstCharacterOfFirstLabelName()
|
||||
if !equal {
|
||||
return
|
||||
}
|
||||
|
||||
equal = f.LabelMatterLength() == o.LabelMatterLength()
|
||||
if !equal {
|
||||
return
|
||||
}
|
||||
|
||||
equal = f.LastCharacterOfLastLabelValue() == o.LastCharacterOfLastLabelValue()
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// Represents a collection of Fingerprint subject to a given natural sorting
|
||||
// scheme.
|
||||
type Fingerprints []Fingerprint
|
||||
|
||||
func (f Fingerprints) Len() int {
|
||||
return len(f)
|
||||
}
|
||||
|
||||
func (f Fingerprints) Less(i, j int) (less bool) {
|
||||
return f[i].Less(f[j])
|
||||
}
|
||||
|
||||
func (f Fingerprints) Swap(i, j int) {
|
||||
f[i], f[j] = f[j], f[i]
|
||||
}
|
|
@ -11,17 +11,29 @@
|
|||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package leveldb
|
||||
package model
|
||||
|
||||
import (
|
||||
index "github.com/prometheus/prometheus/storage/raw/index/leveldb"
|
||||
storage "github.com/prometheus/prometheus/storage/raw/leveldb"
|
||||
"sort"
|
||||
)
|
||||
|
||||
type LevelDBMetricPersistence struct {
|
||||
fingerprintToMetrics *storage.LevelDBPersistence
|
||||
metricSamples *storage.LevelDBPersistence
|
||||
labelNameToFingerprints *storage.LevelDBPersistence
|
||||
labelSetToFingerprints *storage.LevelDBPersistence
|
||||
metricMembershipIndex *index.LevelDBMembershipIndex
|
||||
// A LabelName is a key for a LabelSet or Metric. It has a value associated
|
||||
// therewith.
|
||||
type LabelName string
|
||||
|
||||
type LabelNames []LabelName
|
||||
|
||||
func (l LabelNames) Len() int {
|
||||
return len(l)
|
||||
}
|
||||
|
||||
func (l LabelNames) Less(i, j int) bool {
|
||||
return sort.StringsAreSorted([]string{
|
||||
string(l[i]),
|
||||
string(l[j]),
|
||||
})
|
||||
}
|
||||
|
||||
func (l LabelNames) Swap(i, j int) {
|
||||
l[i], l[j] = l[j], l[i]
|
||||
}
|
56
model/labelname_test.go
Normal file
56
model/labelname_test.go
Normal file
|
@ -0,0 +1,56 @@
|
|||
// Copyright 2013 Prometheus Team
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package model
|
||||
|
||||
import (
|
||||
"github.com/prometheus/prometheus/utility/test"
|
||||
"sort"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func testLabelNames(t test.Tester) {
|
||||
var scenarios = []struct {
|
||||
in LabelNames
|
||||
out LabelNames
|
||||
}{
|
||||
{
|
||||
in: LabelNames{"ZZZ", "zzz"},
|
||||
out: LabelNames{"ZZZ", "zzz"},
|
||||
},
|
||||
{
|
||||
in: LabelNames{"aaa", "AAA"},
|
||||
out: LabelNames{"AAA", "aaa"},
|
||||
},
|
||||
}
|
||||
|
||||
for i, scenario := range scenarios {
|
||||
sort.Sort(scenario.in)
|
||||
|
||||
for j, expected := range scenario.out {
|
||||
if expected != scenario.in[j] {
|
||||
t.Errorf("%d.%d expected %s, got %s", i, j, expected, scenario.in[j])
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestLabelNames(t *testing.T) {
|
||||
testLabelNames(t)
|
||||
}
|
||||
|
||||
func BenchmarkLabelNames(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
testLabelNames(b)
|
||||
}
|
||||
}
|
|
@ -11,21 +11,40 @@
|
|||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package memory
|
||||
package model
|
||||
|
||||
import (
|
||||
"github.com/prometheus/prometheus/storage/metric"
|
||||
"testing"
|
||||
"sort"
|
||||
)
|
||||
|
||||
var testGetFingerprintsForLabelSetUsesAndForLabelMatching = buildTestPersistence(metric.GetFingerprintsForLabelSetUsesAndForLabelMatchingTests)
|
||||
|
||||
func TestGetFingerprintsForLabelSetUsesAndForLabelMatching(t *testing.T) {
|
||||
testGetFingerprintsForLabelSetUsesAndForLabelMatching(t)
|
||||
type LabelPair struct {
|
||||
Name LabelName
|
||||
Value LabelValue
|
||||
}
|
||||
|
||||
func BenchmarkGetFingerprintsForLabelSetUsesAndLabelMatching(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
testGetFingerprintsForLabelSetUsesAndForLabelMatching(b)
|
||||
type LabelPairs []LabelPair
|
||||
|
||||
func (l LabelPairs) Len() int {
|
||||
return len(l)
|
||||
}
|
||||
|
||||
func (l LabelPairs) Less(i, j int) (less bool) {
|
||||
less = sort.StringsAreSorted([]string{
|
||||
string(l[i].Name),
|
||||
string(l[j].Name),
|
||||
})
|
||||
if !less {
|
||||
return
|
||||
}
|
||||
|
||||
less = sort.StringsAreSorted([]string{
|
||||
string(l[i].Value),
|
||||
string(l[j].Value),
|
||||
})
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (l LabelPairs) Swap(i, j int) {
|
||||
l[i], l[j] = l[j], l[i]
|
||||
}
|
84
model/labelpair_test.go
Normal file
84
model/labelpair_test.go
Normal file
|
@ -0,0 +1,84 @@
|
|||
// Copyright 2013 Prometheus Team
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package model
|
||||
|
||||
import (
|
||||
"github.com/prometheus/prometheus/utility/test"
|
||||
"sort"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func testLabelPairs(t test.Tester) {
|
||||
var scenarios = []struct {
|
||||
in LabelPairs
|
||||
out LabelPairs
|
||||
}{
|
||||
{
|
||||
in: LabelPairs{
|
||||
{
|
||||
Name: "AAA",
|
||||
Value: "aaa",
|
||||
},
|
||||
},
|
||||
out: LabelPairs{
|
||||
{
|
||||
Name: "AAA",
|
||||
Value: "aaa",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
in: LabelPairs{
|
||||
{
|
||||
Name: "aaa",
|
||||
Value: "aaa",
|
||||
},
|
||||
{
|
||||
Name: "ZZZ",
|
||||
Value: "aaa",
|
||||
},
|
||||
},
|
||||
out: LabelPairs{
|
||||
{
|
||||
Name: "ZZZ",
|
||||
Value: "aaa",
|
||||
},
|
||||
{
|
||||
Name: "aaa",
|
||||
Value: "aaa",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for i, scenario := range scenarios {
|
||||
sort.Sort(scenario.in)
|
||||
|
||||
for j, expected := range scenario.out {
|
||||
if expected != scenario.in[j] {
|
||||
t.Errorf("%d.%d expected %s, got %s", i, j, expected, scenario.in[j])
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestLabelPairs(t *testing.T) {
|
||||
testLabelPairs(t)
|
||||
}
|
||||
|
||||
func BenchmarkLabelPairs(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
testLabelPairs(b)
|
||||
}
|
||||
}
|
35
model/labelvalue.go
Normal file
35
model/labelvalue.go
Normal file
|
@ -0,0 +1,35 @@
|
|||
// Copyright 2013 Prometheus Team
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package model
|
||||
|
||||
import (
|
||||
"sort"
|
||||
)
|
||||
|
||||
// A LabelValue is an associated value for a LabelName.
|
||||
type LabelValue string
|
||||
|
||||
type LabelValues []LabelValue
|
||||
|
||||
func (l LabelValues) Len() int {
|
||||
return len(l)
|
||||
}
|
||||
|
||||
func (l LabelValues) Less(i, j int) bool {
|
||||
return sort.StringsAreSorted([]string{string(l[i]), string(l[j])})
|
||||
}
|
||||
|
||||
func (l LabelValues) Swap(i, j int) {
|
||||
l[i], l[j] = l[j], l[i]
|
||||
}
|
56
model/labelvalue_test.go
Normal file
56
model/labelvalue_test.go
Normal file
|
@ -0,0 +1,56 @@
|
|||
// Copyright 2013 Prometheus Team
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package model
|
||||
|
||||
import (
|
||||
"github.com/prometheus/prometheus/utility/test"
|
||||
"sort"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func testLabelValues(t test.Tester) {
|
||||
var scenarios = []struct {
|
||||
in LabelValues
|
||||
out LabelValues
|
||||
}{
|
||||
{
|
||||
in: LabelValues{"ZZZ", "zzz"},
|
||||
out: LabelValues{"ZZZ", "zzz"},
|
||||
},
|
||||
{
|
||||
in: LabelValues{"aaa", "AAA"},
|
||||
out: LabelValues{"AAA", "aaa"},
|
||||
},
|
||||
}
|
||||
|
||||
for i, scenario := range scenarios {
|
||||
sort.Sort(scenario.in)
|
||||
|
||||
for j, expected := range scenario.out {
|
||||
if expected != scenario.in[j] {
|
||||
t.Errorf("%d.%d expected %s, got %s", i, j, expected, scenario.in[j])
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestLabelValues(t *testing.T) {
|
||||
testLabelValues(t)
|
||||
}
|
||||
|
||||
func BenchmarkLabelValues(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
testLabelValues(b)
|
||||
}
|
||||
}
|
|
@ -15,8 +15,6 @@ package model
|
|||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/md5"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"sort"
|
||||
"time"
|
||||
|
@ -27,17 +25,6 @@ const (
|
|||
reservedDelimiter = `"`
|
||||
)
|
||||
|
||||
// A Fingerprint is a simplified representation of an entity---e.g., a hash of
|
||||
// an entire Metric.
|
||||
type Fingerprint string
|
||||
|
||||
// A LabelName is a key for a LabelSet or Metric. It has a value associated
|
||||
// therewith.
|
||||
type LabelName string
|
||||
|
||||
// A LabelValue is an associated value for a LabelName.
|
||||
type LabelValue string
|
||||
|
||||
// A LabelSet is a collection of LabelName and LabelValue pairs. The LabelSet
|
||||
// may be fully-qualified down to the point where it may resolve to a single
|
||||
// Metric in the data store or not. All operations that occur within the realm
|
||||
|
@ -78,60 +65,10 @@ func (l LabelSet) String() string {
|
|||
return buffer.String()
|
||||
}
|
||||
|
||||
type LabelNames []LabelName
|
||||
|
||||
func (l LabelNames) Len() int {
|
||||
return len(l)
|
||||
}
|
||||
|
||||
func (l LabelNames) Less(i, j int) bool {
|
||||
return l[i] < l[j]
|
||||
}
|
||||
|
||||
func (l LabelNames) Swap(i, j int) {
|
||||
l[i], l[j] = l[j], l[i]
|
||||
}
|
||||
|
||||
// A Metric is similar to a LabelSet, but the key difference is that a Metric is
|
||||
// a singleton and refers to one and only one stream of samples.
|
||||
type Metric map[LabelName]LabelValue
|
||||
|
||||
type Fingerprints []Fingerprint
|
||||
|
||||
func (f Fingerprints) Len() int {
|
||||
return len(f)
|
||||
}
|
||||
|
||||
func (f Fingerprints) Less(i, j int) bool {
|
||||
return sort.StringsAreSorted([]string{string(f[i]), string(f[j])})
|
||||
}
|
||||
|
||||
func (f Fingerprints) Swap(i, j int) {
|
||||
f[i], f[j] = f[j], f[i]
|
||||
}
|
||||
|
||||
// Fingerprint generates a fingerprint for this given Metric.
|
||||
func (m Metric) Fingerprint() Fingerprint {
|
||||
labelLength := len(m)
|
||||
labelNames := make([]string, 0, labelLength)
|
||||
|
||||
for labelName := range m {
|
||||
labelNames = append(labelNames, string(labelName))
|
||||
}
|
||||
|
||||
sort.Strings(labelNames)
|
||||
|
||||
summer := md5.New()
|
||||
|
||||
for _, labelName := range labelNames {
|
||||
summer.Write([]byte(labelName))
|
||||
summer.Write([]byte(reservedDelimiter))
|
||||
summer.Write([]byte(m[LabelName(labelName)]))
|
||||
}
|
||||
|
||||
return Fingerprint(hex.EncodeToString(summer.Sum(nil)))
|
||||
}
|
||||
|
||||
// A SampleValue is a representation of a value for a given sample at a given
|
||||
// time. It is presently float32 due to that being the representation that
|
||||
// Protocol Buffers provide of floats in Go. This is a smell and should be
|
||||
|
@ -146,12 +83,6 @@ func (s SamplePair) MarshalJSON() ([]byte, error) {
|
|||
return []byte(fmt.Sprintf("{\"Value\": \"%f\", \"Timestamp\": %d}", s.Value, s.Timestamp.Unix())), nil
|
||||
}
|
||||
|
||||
type Sample struct {
|
||||
Metric Metric
|
||||
Value SampleValue
|
||||
Timestamp time.Time
|
||||
}
|
||||
|
||||
type SamplePair struct {
|
||||
Value SampleValue
|
||||
Timestamp time.Time
|
||||
|
@ -180,10 +111,3 @@ type Interval struct {
|
|||
OldestInclusive time.Time
|
||||
NewestInclusive time.Time
|
||||
}
|
||||
|
||||
// PENDING DELETION BELOW THIS LINE
|
||||
|
||||
type Samples struct {
|
||||
Value SampleValue
|
||||
Timestamp time.Time
|
||||
}
|
||||
|
|
|
@ -21,11 +21,13 @@ import (
|
|||
func testMetric(t test.Tester) {
|
||||
var scenarios = []struct {
|
||||
input map[string]string
|
||||
output Fingerprint
|
||||
hash uint64
|
||||
rowkey string
|
||||
}{
|
||||
{
|
||||
input: map[string]string{},
|
||||
output: "d41d8cd98f00b204e9800998ecf8427e",
|
||||
rowkey: "02676020557754725067--0-",
|
||||
hash: 2676020557754725067,
|
||||
},
|
||||
{
|
||||
input: map[string]string{
|
||||
|
@ -33,7 +35,8 @@ func testMetric(t test.Tester) {
|
|||
"occupation": "robot",
|
||||
"manufacturer": "westinghouse",
|
||||
},
|
||||
output: "18596f03fce001153495d903b8b577c0",
|
||||
rowkey: "04776841610193542734-f-56-o",
|
||||
hash: 4776841610193542734,
|
||||
},
|
||||
}
|
||||
|
||||
|
@ -43,11 +46,17 @@ func testMetric(t test.Tester) {
|
|||
metric[LabelName(key)] = LabelValue(value)
|
||||
}
|
||||
|
||||
expected := scenario.output
|
||||
actual := metric.Fingerprint()
|
||||
expectedRowKey := scenario.rowkey
|
||||
expectedHash := scenario.hash
|
||||
fingerprint := NewFingerprintFromMetric(metric)
|
||||
actualRowKey := fingerprint.ToRowKey()
|
||||
actualHash := fingerprint.Hash()
|
||||
|
||||
if expected != actual {
|
||||
t.Errorf("%d. expected %s, got %s", i, expected, actual)
|
||||
if expectedRowKey != actualRowKey {
|
||||
t.Errorf("%d. expected %s, got %s", i, expectedRowKey, actualRowKey)
|
||||
}
|
||||
if actualHash != expectedHash {
|
||||
t.Errorf("%d. expected %d, got %d", i, expectedHash, actualHash)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
51
model/sample.go
Normal file
51
model/sample.go
Normal file
|
@ -0,0 +1,51 @@
|
|||
// Copyright 2013 Prometheus Team
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package model
|
||||
|
||||
import (
|
||||
"sort"
|
||||
"time"
|
||||
)
|
||||
|
||||
type Sample struct {
|
||||
Metric Metric
|
||||
Value SampleValue
|
||||
Timestamp time.Time
|
||||
}
|
||||
|
||||
type Samples []Sample
|
||||
|
||||
func (s Samples) Len() int {
|
||||
return len(s)
|
||||
}
|
||||
|
||||
func (s Samples) Less(i, j int) (less bool) {
|
||||
fingerprints := Fingerprints{
|
||||
NewFingerprintFromMetric(s[i].Metric),
|
||||
NewFingerprintFromMetric(s[j].Metric),
|
||||
}
|
||||
|
||||
less = sort.IsSorted(fingerprints)
|
||||
if !less {
|
||||
return
|
||||
}
|
||||
|
||||
less = s[i].Timestamp.Before(s[j].Timestamp)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (s Samples) Swap(i, j int) {
|
||||
s[i], s[j] = s[j], s[i]
|
||||
}
|
|
@ -16,7 +16,7 @@ package rules
|
|||
import (
|
||||
"fmt"
|
||||
"github.com/prometheus/prometheus/rules/ast"
|
||||
"github.com/prometheus/prometheus/storage/metric/leveldb"
|
||||
"github.com/prometheus/prometheus/storage/metric"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"strings"
|
||||
|
@ -172,7 +172,7 @@ func TestExpressions(t *testing.T) {
|
|||
t.Errorf("Could not remove temporary directory: %q\n", err)
|
||||
}
|
||||
}()
|
||||
persistence, err := leveldb.NewLevelDBMetricPersistence(temporaryDirectory)
|
||||
persistence, err := metric.NewLevelDBMetricPersistence(temporaryDirectory)
|
||||
if err != nil {
|
||||
t.Errorf("Could not create LevelDB Metric Persistence: %q\n", err)
|
||||
return
|
||||
|
|
462
storage/metric/end_to_end_test.go
Normal file
462
storage/metric/end_to_end_test.go
Normal file
|
@ -0,0 +1,462 @@
|
|||
// Copyright 2013 Prometheus Team
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package metric
|
||||
|
||||
import (
|
||||
"github.com/prometheus/prometheus/model"
|
||||
"github.com/prometheus/prometheus/utility/test"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func GetFingerprintsForLabelSetTests(p MetricPersistence, t test.Tester) {
|
||||
testAppendSample(p, model.Sample{
|
||||
Value: 0,
|
||||
Timestamp: time.Time{},
|
||||
Metric: model.Metric{
|
||||
"name": "my_metric",
|
||||
"request_type": "your_mom",
|
||||
},
|
||||
}, t)
|
||||
|
||||
testAppendSample(p, model.Sample{
|
||||
Value: 0,
|
||||
Timestamp: time.Time{},
|
||||
Metric: model.Metric{
|
||||
"name": "my_metric",
|
||||
"request_type": "your_dad",
|
||||
},
|
||||
}, t)
|
||||
|
||||
result, err := p.GetFingerprintsForLabelSet(model.LabelSet{
|
||||
model.LabelName("name"): model.LabelValue("my_metric"),
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
if len(result) != 2 {
|
||||
t.Errorf("Expected two elements.")
|
||||
}
|
||||
|
||||
result, err = p.GetFingerprintsForLabelSet(model.LabelSet{
|
||||
model.LabelName("request_type"): model.LabelValue("your_mom"),
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
if len(result) != 1 {
|
||||
t.Errorf("Expected one element.")
|
||||
}
|
||||
|
||||
result, err = p.GetFingerprintsForLabelSet(model.LabelSet{
|
||||
model.LabelName("request_type"): model.LabelValue("your_dad"),
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
if len(result) != 1 {
|
||||
t.Errorf("Expected one element.")
|
||||
}
|
||||
}
|
||||
|
||||
func GetFingerprintsForLabelNameTests(p MetricPersistence, t test.Tester) {
|
||||
testAppendSample(p, model.Sample{
|
||||
Value: 0,
|
||||
Timestamp: time.Time{},
|
||||
Metric: model.Metric{
|
||||
"name": "my_metric",
|
||||
"request_type": "your_mom",
|
||||
"language": "english",
|
||||
},
|
||||
}, t)
|
||||
|
||||
testAppendSample(p, model.Sample{
|
||||
Value: 0,
|
||||
Timestamp: time.Time{},
|
||||
Metric: model.Metric{
|
||||
"name": "my_metric",
|
||||
"request_type": "your_dad",
|
||||
"sprache": "deutsch",
|
||||
},
|
||||
}, t)
|
||||
|
||||
b := model.LabelName("name")
|
||||
result, err := p.GetFingerprintsForLabelName(b)
|
||||
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
if len(result) != 2 {
|
||||
t.Errorf("Expected two elements.")
|
||||
}
|
||||
|
||||
b = model.LabelName("request_type")
|
||||
result, err = p.GetFingerprintsForLabelName(b)
|
||||
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
if len(result) != 2 {
|
||||
t.Errorf("Expected two elements.")
|
||||
}
|
||||
|
||||
b = model.LabelName("language")
|
||||
result, err = p.GetFingerprintsForLabelName(b)
|
||||
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
if len(result) != 1 {
|
||||
t.Errorf("Expected one element.")
|
||||
}
|
||||
|
||||
b = model.LabelName("sprache")
|
||||
result, err = p.GetFingerprintsForLabelName(b)
|
||||
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
if len(result) != 1 {
|
||||
t.Errorf("Expected one element.")
|
||||
}
|
||||
}
|
||||
|
||||
func GetMetricForFingerprintTests(p MetricPersistence, t test.Tester) {
|
||||
testAppendSample(p, model.Sample{
|
||||
Value: 0,
|
||||
Timestamp: time.Time{},
|
||||
Metric: model.Metric{
|
||||
"request_type": "your_mom",
|
||||
},
|
||||
}, t)
|
||||
|
||||
testAppendSample(p, model.Sample{
|
||||
Value: 0,
|
||||
Timestamp: time.Time{},
|
||||
Metric: model.Metric{
|
||||
"request_type": "your_dad",
|
||||
"one-off": "value",
|
||||
},
|
||||
}, t)
|
||||
|
||||
result, err := p.GetFingerprintsForLabelSet(model.LabelSet{
|
||||
model.LabelName("request_type"): model.LabelValue("your_mom"),
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
if len(result) != 1 {
|
||||
t.Errorf("Expected one element.")
|
||||
}
|
||||
|
||||
v, e := p.GetMetricForFingerprint(result[0])
|
||||
if e != nil {
|
||||
t.Error(e)
|
||||
}
|
||||
|
||||
if v == nil {
|
||||
t.Fatal("Did not expect nil.")
|
||||
}
|
||||
|
||||
metric := *v
|
||||
|
||||
if len(metric) != 1 {
|
||||
t.Errorf("Expected one-dimensional metric.")
|
||||
}
|
||||
|
||||
if metric["request_type"] != "your_mom" {
|
||||
t.Errorf("Expected metric to match.")
|
||||
}
|
||||
|
||||
result, err = p.GetFingerprintsForLabelSet(model.LabelSet{
|
||||
model.LabelName("request_type"): model.LabelValue("your_dad"),
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
if len(result) != 1 {
|
||||
t.Errorf("Expected one element.")
|
||||
}
|
||||
|
||||
v, e = p.GetMetricForFingerprint(result[0])
|
||||
|
||||
if v == nil {
|
||||
t.Fatal("Did not expect nil.")
|
||||
}
|
||||
|
||||
metric = *v
|
||||
|
||||
if e != nil {
|
||||
t.Error(e)
|
||||
}
|
||||
|
||||
if len(metric) != 2 {
|
||||
t.Errorf("Expected one-dimensional metric.")
|
||||
}
|
||||
|
||||
if metric["request_type"] != "your_dad" {
|
||||
t.Errorf("Expected metric to match.")
|
||||
}
|
||||
|
||||
if metric["one-off"] != "value" {
|
||||
t.Errorf("Expected metric to match.")
|
||||
}
|
||||
}
|
||||
|
||||
func AppendRepeatingValuesTests(p MetricPersistence, t test.Tester) {
|
||||
metric := model.Metric{
|
||||
"controller": "foo",
|
||||
"name": "errors_total",
|
||||
"operation": "bar",
|
||||
}
|
||||
|
||||
increments := 10
|
||||
repetitions := 500
|
||||
|
||||
for i := 0; i < increments; i++ {
|
||||
for j := 0; j < repetitions; j++ {
|
||||
time := time.Time{}.Add(time.Duration(i) * time.Hour).Add(time.Duration(j) * time.Second)
|
||||
testAppendSample(p, model.Sample{
|
||||
Value: model.SampleValue(i),
|
||||
Timestamp: time,
|
||||
Metric: metric,
|
||||
}, t)
|
||||
}
|
||||
}
|
||||
|
||||
if true {
|
||||
// XXX: Purely a benchmark.
|
||||
return
|
||||
}
|
||||
|
||||
labelSet := model.LabelSet{
|
||||
"controller": "foo",
|
||||
"name": "errors_total",
|
||||
"operation": "bar",
|
||||
}
|
||||
|
||||
for i := 0; i < increments; i++ {
|
||||
for j := 0; j < repetitions; j++ {
|
||||
fingerprints, err := p.GetFingerprintsForLabelSet(labelSet)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(fingerprints) != 1 {
|
||||
t.Fatalf("expected %d fingerprints, got %d", 1, len(fingerprints))
|
||||
}
|
||||
|
||||
time := time.Time{}.Add(time.Duration(i) * time.Hour).Add(time.Duration(j) * time.Second)
|
||||
sample, err := p.GetValueAtTime(metric, time, StalenessPolicy{})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if sample == nil {
|
||||
t.Fatal("expected non-nil sample.")
|
||||
}
|
||||
|
||||
expected := model.SampleValue(i)
|
||||
|
||||
if sample.Value != expected {
|
||||
t.Fatalf("expected %d value, got %d", expected, sample.Value)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func AppendsRepeatingValuesTests(p MetricPersistence, t test.Tester) {
|
||||
metric := model.Metric{
|
||||
"controller": "foo",
|
||||
"name": "errors_total",
|
||||
"operation": "bar",
|
||||
}
|
||||
|
||||
increments := 10
|
||||
repetitions := 500
|
||||
|
||||
s := model.Samples{}
|
||||
for i := 0; i < increments; i++ {
|
||||
for j := 0; j < repetitions; j++ {
|
||||
time := time.Time{}.Add(time.Duration(i) * time.Hour).Add(time.Duration(j) * time.Second)
|
||||
s = append(s, model.Sample{
|
||||
Value: model.SampleValue(i),
|
||||
Timestamp: time,
|
||||
Metric: metric,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
p.AppendSamples(s)
|
||||
|
||||
if true {
|
||||
// XXX: Purely a benchmark.
|
||||
return
|
||||
}
|
||||
|
||||
labelSet := model.LabelSet{
|
||||
"controller": "foo",
|
||||
"name": "errors_total",
|
||||
"operation": "bar",
|
||||
}
|
||||
|
||||
for i := 0; i < increments; i++ {
|
||||
for j := 0; j < repetitions; j++ {
|
||||
fingerprints, err := p.GetFingerprintsForLabelSet(labelSet)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(fingerprints) != 1 {
|
||||
t.Fatalf("expected %d fingerprints, got %d", 1, len(fingerprints))
|
||||
}
|
||||
|
||||
time := time.Time{}.Add(time.Duration(i) * time.Hour).Add(time.Duration(j) * time.Second)
|
||||
sample, err := p.GetValueAtTime(metric, time, StalenessPolicy{})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if sample == nil {
|
||||
t.Fatal("expected non-nil sample.")
|
||||
}
|
||||
|
||||
expected := model.SampleValue(i)
|
||||
|
||||
if sample.Value != expected {
|
||||
t.Fatalf("expected %d value, got %d", expected, sample.Value)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Test Definitions Below
|
||||
|
||||
var testLevelDBGetFingerprintsForLabelSet = buildLevelDBTestPersistence("get_fingerprints_for_labelset", GetFingerprintsForLabelSetTests)
|
||||
|
||||
func TestLevelDBGetFingerprintsForLabelSet(t *testing.T) {
|
||||
testLevelDBGetFingerprintsForLabelSet(t)
|
||||
}
|
||||
|
||||
func BenchmarkLevelDBGetFingerprintsForLabelSet(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
testLevelDBGetFingerprintsForLabelSet(b)
|
||||
}
|
||||
}
|
||||
|
||||
var testLevelDBGetFingerprintsForLabelName = buildLevelDBTestPersistence("get_fingerprints_for_labelname", GetFingerprintsForLabelNameTests)
|
||||
|
||||
func TestLevelDBGetFingerprintsForLabelName(t *testing.T) {
|
||||
testLevelDBGetFingerprintsForLabelName(t)
|
||||
}
|
||||
|
||||
func BenchmarkLevelDBGetFingerprintsForLabelName(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
testLevelDBGetFingerprintsForLabelName(b)
|
||||
}
|
||||
}
|
||||
|
||||
var testLevelDBGetMetricForFingerprint = buildLevelDBTestPersistence("get_metric_for_fingerprint", GetMetricForFingerprintTests)
|
||||
|
||||
func TestLevelDBGetMetricForFingerprint(t *testing.T) {
|
||||
testLevelDBGetMetricForFingerprint(t)
|
||||
}
|
||||
|
||||
func BenchmarkLevelDBGetMetricForFingerprint(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
testLevelDBGetMetricForFingerprint(b)
|
||||
}
|
||||
}
|
||||
|
||||
var testLevelDBAppendRepeatingValues = buildLevelDBTestPersistence("append_repeating_values", AppendRepeatingValuesTests)
|
||||
|
||||
func TestLevelDBAppendRepeatingValues(t *testing.T) {
|
||||
testLevelDBAppendRepeatingValues(t)
|
||||
}
|
||||
|
||||
func BenchmarkLevelDBAppendRepeatingValues(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
testLevelDBAppendRepeatingValues(b)
|
||||
}
|
||||
}
|
||||
|
||||
var testLevelDBAppendsRepeatingValues = buildLevelDBTestPersistence("appends_repeating_values", AppendsRepeatingValuesTests)
|
||||
|
||||
func TestLevelDBAppendsRepeatingValues(t *testing.T) {
|
||||
testLevelDBAppendsRepeatingValues(t)
|
||||
}
|
||||
|
||||
func BenchmarkLevelDBAppendsRepeatingValues(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
testLevelDBAppendsRepeatingValues(b)
|
||||
}
|
||||
}
|
||||
|
||||
var testMemoryGetFingerprintsForLabelSet = buildMemoryTestPersistence(GetFingerprintsForLabelSetTests)
|
||||
|
||||
func TestMemoryGetFingerprintsForLabelSet(t *testing.T) {
|
||||
testMemoryGetFingerprintsForLabelSet(t)
|
||||
}
|
||||
|
||||
func BenchmarkMemoryGetFingerprintsForLabelSet(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
testMemoryGetFingerprintsForLabelSet(b)
|
||||
}
|
||||
}
|
||||
|
||||
var testMemoryGetFingerprintsForLabelName = buildMemoryTestPersistence(GetFingerprintsForLabelNameTests)
|
||||
|
||||
func TestMemoryGetFingerprintsForLabelName(t *testing.T) {
|
||||
testMemoryGetFingerprintsForLabelName(t)
|
||||
}
|
||||
|
||||
func BenchmarkMemoryGetFingerprintsForLabelName(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
testMemoryGetFingerprintsForLabelName(b)
|
||||
}
|
||||
}
|
||||
|
||||
var testMemoryGetMetricForFingerprint = buildMemoryTestPersistence(GetMetricForFingerprintTests)
|
||||
|
||||
func TestMemoryGetMetricForFingerprint(t *testing.T) {
|
||||
testMemoryGetMetricForFingerprint(t)
|
||||
}
|
||||
|
||||
func BenchmarkMemoryGetMetricForFingerprint(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
testMemoryGetMetricForFingerprint(b)
|
||||
}
|
||||
}
|
||||
|
||||
var testMemoryAppendRepeatingValues = buildMemoryTestPersistence(AppendRepeatingValuesTests)
|
||||
|
||||
func TestMemoryAppendRepeatingValues(t *testing.T) {
|
||||
testMemoryAppendRepeatingValues(t)
|
||||
}
|
||||
|
||||
func BenchmarkMemoryAppendRepeatingValues(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
testMemoryAppendRepeatingValues(b)
|
||||
}
|
||||
}
|
|
@ -1,228 +0,0 @@
|
|||
// Copyright 2013 Prometheus Team
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package metric
|
||||
|
||||
import (
|
||||
"github.com/prometheus/prometheus/model"
|
||||
"github.com/prometheus/prometheus/utility/test"
|
||||
"time"
|
||||
)
|
||||
|
||||
func GetFingerprintsForLabelSetTests(p MetricPersistence, t test.Tester) {
|
||||
appendSample(p, model.Sample{
|
||||
Value: 0,
|
||||
Timestamp: time.Time{},
|
||||
Metric: model.Metric{
|
||||
"name": "my_metric",
|
||||
"request_type": "your_mom",
|
||||
},
|
||||
}, t)
|
||||
|
||||
appendSample(p, model.Sample{
|
||||
Value: 0,
|
||||
Timestamp: time.Time{},
|
||||
Metric: model.Metric{
|
||||
"name": "my_metric",
|
||||
"request_type": "your_dad",
|
||||
},
|
||||
}, t)
|
||||
|
||||
result, err := p.GetFingerprintsForLabelSet(model.LabelSet{
|
||||
model.LabelName("name"): model.LabelValue("my_metric"),
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
if len(result) != 2 {
|
||||
t.Errorf("Expected two elements.")
|
||||
}
|
||||
|
||||
result, err = p.GetFingerprintsForLabelSet(model.LabelSet{
|
||||
model.LabelName("request_type"): model.LabelValue("your_mom"),
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
if len(result) != 1 {
|
||||
t.Errorf("Expected one element.")
|
||||
}
|
||||
|
||||
result, err = p.GetFingerprintsForLabelSet(model.LabelSet{
|
||||
model.LabelName("request_type"): model.LabelValue("your_dad"),
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
if len(result) != 1 {
|
||||
t.Errorf("Expected one element.")
|
||||
}
|
||||
}
|
||||
|
||||
func GetFingerprintsForLabelNameTests(p MetricPersistence, t test.Tester) {
|
||||
appendSample(p, model.Sample{
|
||||
Value: 0,
|
||||
Timestamp: time.Time{},
|
||||
Metric: model.Metric{
|
||||
"name": "my_metric",
|
||||
"request_type": "your_mom",
|
||||
"language": "english",
|
||||
},
|
||||
}, t)
|
||||
|
||||
appendSample(p, model.Sample{
|
||||
Value: 0,
|
||||
Timestamp: time.Time{},
|
||||
Metric: model.Metric{
|
||||
"name": "my_metric",
|
||||
"request_type": "your_dad",
|
||||
"sprache": "deutsch",
|
||||
},
|
||||
}, t)
|
||||
|
||||
b := model.LabelName("name")
|
||||
result, err := p.GetFingerprintsForLabelName(b)
|
||||
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
if len(result) != 2 {
|
||||
t.Errorf("Expected two elements.")
|
||||
}
|
||||
|
||||
b = model.LabelName("request_type")
|
||||
result, err = p.GetFingerprintsForLabelName(b)
|
||||
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
if len(result) != 2 {
|
||||
t.Errorf("Expected two elements.")
|
||||
}
|
||||
|
||||
b = model.LabelName("language")
|
||||
result, err = p.GetFingerprintsForLabelName(b)
|
||||
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
if len(result) != 1 {
|
||||
t.Errorf("Expected one element.")
|
||||
}
|
||||
|
||||
b = model.LabelName("sprache")
|
||||
result, err = p.GetFingerprintsForLabelName(b)
|
||||
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
if len(result) != 1 {
|
||||
t.Errorf("Expected one element.")
|
||||
}
|
||||
}
|
||||
|
||||
func GetMetricForFingerprintTests(p MetricPersistence, t test.Tester) {
|
||||
appendSample(p, model.Sample{
|
||||
Value: 0,
|
||||
Timestamp: time.Time{},
|
||||
Metric: model.Metric{
|
||||
"request_type": "your_mom",
|
||||
},
|
||||
}, t)
|
||||
|
||||
appendSample(p, model.Sample{
|
||||
Value: 0,
|
||||
Timestamp: time.Time{},
|
||||
Metric: model.Metric{
|
||||
"request_type": "your_dad",
|
||||
"one-off": "value",
|
||||
},
|
||||
}, t)
|
||||
|
||||
result, err := p.GetFingerprintsForLabelSet(model.LabelSet{
|
||||
model.LabelName("request_type"): model.LabelValue("your_mom"),
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
if len(result) != 1 {
|
||||
t.Errorf("Expected one element.")
|
||||
}
|
||||
|
||||
v, e := p.GetMetricForFingerprint(result[0])
|
||||
if e != nil {
|
||||
t.Error(e)
|
||||
}
|
||||
|
||||
if v == nil {
|
||||
t.Fatal("Did not expect nil.")
|
||||
}
|
||||
|
||||
metric := *v
|
||||
|
||||
if len(metric) != 1 {
|
||||
t.Errorf("Expected one-dimensional metric.")
|
||||
}
|
||||
|
||||
if metric["request_type"] != "your_mom" {
|
||||
t.Errorf("Expected metric to match.")
|
||||
}
|
||||
|
||||
result, err = p.GetFingerprintsForLabelSet(model.LabelSet{
|
||||
model.LabelName("request_type"): model.LabelValue("your_dad"),
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
if len(result) != 1 {
|
||||
t.Errorf("Expected one element.")
|
||||
}
|
||||
|
||||
v, e = p.GetMetricForFingerprint(result[0])
|
||||
|
||||
if v == nil {
|
||||
t.Fatal("Did not expect nil.")
|
||||
}
|
||||
|
||||
metric = *v
|
||||
|
||||
if e != nil {
|
||||
t.Error(e)
|
||||
}
|
||||
|
||||
if len(metric) != 2 {
|
||||
t.Errorf("Expected one-dimensional metric.")
|
||||
}
|
||||
|
||||
if metric["request_type"] != "your_dad" {
|
||||
t.Errorf("Expected metric to match.")
|
||||
}
|
||||
|
||||
if metric["one-off"] != "value" {
|
||||
t.Errorf("Expected metric to match.")
|
||||
}
|
||||
}
|
170
storage/metric/frontier.go
Normal file
170
storage/metric/frontier.go
Normal file
|
@ -0,0 +1,170 @@
|
|||
// Copyright 2013 Prometheus Team
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package metric
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/prometheus/prometheus/coding"
|
||||
"github.com/prometheus/prometheus/coding/indexable"
|
||||
"github.com/prometheus/prometheus/model"
|
||||
dto "github.com/prometheus/prometheus/model/generated"
|
||||
"time"
|
||||
)
|
||||
|
||||
// diskFrontier describes an on-disk store of series to provide a
|
||||
// representation of the known keyspace and time series values available.
|
||||
//
|
||||
// This is used to reduce the burden associated with LevelDB iterator
|
||||
// management.
|
||||
type diskFrontier struct {
|
||||
firstFingerprint model.Fingerprint
|
||||
firstSupertime time.Time
|
||||
lastFingerprint model.Fingerprint
|
||||
lastSupertime time.Time
|
||||
}
|
||||
|
||||
func (f *diskFrontier) String() string {
|
||||
return fmt.Sprintf("diskFrontier from %s at %s to %s at %s", f.firstFingerprint.ToRowKey(), f.firstSupertime, f.lastFingerprint.ToRowKey(), f.lastSupertime)
|
||||
}
|
||||
|
||||
func newDiskFrontier(i iterator) (d *diskFrontier, err error) {
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
i.SeekToLast()
|
||||
if i.Key() == nil {
|
||||
return
|
||||
}
|
||||
lastKey, err := extractSampleKey(i)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
i.SeekToFirst()
|
||||
firstKey, err := extractSampleKey(i)
|
||||
if i.Key() == nil {
|
||||
return
|
||||
}
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
d = &diskFrontier{}
|
||||
|
||||
d.firstFingerprint = model.NewFingerprintFromRowKey(*firstKey.Fingerprint.Signature)
|
||||
d.firstSupertime = indexable.DecodeTime(firstKey.Timestamp)
|
||||
d.lastFingerprint = model.NewFingerprintFromRowKey(*lastKey.Fingerprint.Signature)
|
||||
d.lastSupertime = indexable.DecodeTime(lastKey.Timestamp)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// seriesFrontier represents the valid seek frontier for a given series.
|
||||
type seriesFrontier struct {
|
||||
firstSupertime time.Time
|
||||
lastSupertime time.Time
|
||||
lastTime time.Time
|
||||
}
|
||||
|
||||
func (f seriesFrontier) String() string {
|
||||
return fmt.Sprintf("seriesFrontier from %s to %s at %s", f.firstSupertime, f.lastSupertime, f.lastTime)
|
||||
}
|
||||
|
||||
// newSeriesFrontier furnishes a populated diskFrontier for a given
|
||||
// fingerprint. A nil diskFrontier will be returned if the series cannot
|
||||
// be found in the store.
|
||||
func newSeriesFrontier(f model.Fingerprint, d diskFrontier, i iterator) (s *seriesFrontier, err error) {
|
||||
var (
|
||||
lowerSeek = firstSupertime
|
||||
upperSeek = lastSupertime
|
||||
)
|
||||
|
||||
// If we are either the first or the last key in the database, we need to use
|
||||
// pessimistic boundary frontiers.
|
||||
if f.Equal(d.firstFingerprint) {
|
||||
lowerSeek = indexable.EncodeTime(d.firstSupertime)
|
||||
}
|
||||
if f.Equal(d.lastFingerprint) {
|
||||
upperSeek = indexable.EncodeTime(d.lastSupertime)
|
||||
}
|
||||
|
||||
key := &dto.SampleKey{
|
||||
Fingerprint: f.ToDTO(),
|
||||
Timestamp: upperSeek,
|
||||
}
|
||||
|
||||
raw, err := coding.NewProtocolBufferEncoder(key).Encode()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
i.Seek(raw)
|
||||
|
||||
if i.Key() == nil {
|
||||
return
|
||||
}
|
||||
|
||||
retrievedKey, err := extractSampleKey(i)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
retrievedFingerprint := model.NewFingerprintFromRowKey(*retrievedKey.Fingerprint.Signature)
|
||||
|
||||
// The returned fingerprint may not match if the original seek key lives
|
||||
// outside of a metric's frontier. This is probable, for we are seeking to
|
||||
// to the maximum allowed time, which could advance us to the next
|
||||
// fingerprint.
|
||||
//
|
||||
//
|
||||
if !retrievedFingerprint.Equal(f) {
|
||||
i.Prev()
|
||||
|
||||
retrievedKey, err = extractSampleKey(i)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
retrievedFingerprint := model.NewFingerprintFromRowKey(*retrievedKey.Fingerprint.Signature)
|
||||
// If the previous key does not match, we know that the requested
|
||||
// fingerprint does not live in the database.
|
||||
if !retrievedFingerprint.Equal(f) {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
s = &seriesFrontier{
|
||||
lastSupertime: indexable.DecodeTime(retrievedKey.Timestamp),
|
||||
lastTime: time.Unix(*retrievedKey.LastTimestamp, 0),
|
||||
}
|
||||
|
||||
key.Timestamp = lowerSeek
|
||||
|
||||
raw, err = coding.NewProtocolBufferEncoder(key).Encode()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
i.Seek(raw)
|
||||
|
||||
retrievedKey, err = extractSampleKey(i)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
retrievedFingerprint = model.NewFingerprintFromRowKey(*retrievedKey.Fingerprint.Signature)
|
||||
|
||||
s.firstSupertime = indexable.DecodeTime(retrievedKey.Timestamp)
|
||||
|
||||
return
|
||||
}
|
|
@ -11,7 +11,7 @@
|
|||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package leveldb
|
||||
package metric
|
||||
|
||||
import (
|
||||
"github.com/prometheus/client_golang"
|
||||
|
@ -30,6 +30,7 @@ const (
|
|||
appendLabelNameFingerprint = "append_label_name_fingerprint"
|
||||
appendLabelPairFingerprint = "append_label_pair_fingerprint"
|
||||
appendSample = "append_sample"
|
||||
appendSamples = "append_samples"
|
||||
getBoundaryValues = "get_boundary_values"
|
||||
getFingerprintsForLabelName = "get_fingerprints_for_label_name"
|
||||
getFingerprintsForLabelSet = "get_fingerprints_for_labelset"
|
|
@ -15,13 +15,10 @@ package metric
|
|||
|
||||
import (
|
||||
"github.com/prometheus/prometheus/model"
|
||||
"github.com/prometheus/prometheus/storage"
|
||||
"time"
|
||||
)
|
||||
|
||||
type StalenessPolicy struct {
|
||||
DeltaAllowance time.Duration
|
||||
}
|
||||
|
||||
// MetricPersistence is a system for storing metric samples in a persistence
|
||||
// layer.
|
||||
type MetricPersistence interface {
|
||||
|
@ -29,9 +26,16 @@ type MetricPersistence interface {
|
|||
// closed when finished.
|
||||
Close() error
|
||||
|
||||
// Commit all pending operations, if any, since some of the storage components
|
||||
// queue work on channels and operate on it in bulk.
|
||||
// Flush() error
|
||||
|
||||
// Record a new sample in the storage layer.
|
||||
AppendSample(model.Sample) error
|
||||
|
||||
// Record a new sample in the storage layer.
|
||||
AppendSamples(model.Samples) error
|
||||
|
||||
// Get all of the metric fingerprints that are associated with the provided
|
||||
// label set.
|
||||
GetFingerprintsForLabelSet(model.LabelSet) (model.Fingerprints, error)
|
||||
|
@ -46,11 +50,38 @@ type MetricPersistence interface {
|
|||
GetBoundaryValues(model.Metric, model.Interval, StalenessPolicy) (*model.Sample, *model.Sample, error)
|
||||
GetRangeValues(model.Metric, model.Interval) (*model.SampleSet, error)
|
||||
|
||||
ForEachSample(IteratorsForFingerprintBuilder) (err error)
|
||||
|
||||
GetAllMetricNames() ([]string, error)
|
||||
|
||||
// DIAGNOSTIC FUNCTIONS PENDING DELETION BELOW HERE
|
||||
|
||||
GetAllLabelNames() ([]string, error)
|
||||
GetAllLabelPairs() ([]model.LabelSet, error)
|
||||
GetAllMetrics() ([]model.LabelSet, error)
|
||||
// Requests the storage stack to build a materialized View of the values
|
||||
// contained therein.
|
||||
// MakeView(builder ViewRequestBuilder, deadline time.Duration) (View, error)
|
||||
}
|
||||
|
||||
// Describes the lenience limits for querying the materialized View.
|
||||
type StalenessPolicy struct {
|
||||
// Describes the inclusive limit at which individual points if requested will
|
||||
// be matched and subject to interpolation.
|
||||
DeltaAllowance time.Duration
|
||||
}
|
||||
|
||||
// View provides view of the values in the datastore subject to the request of a
|
||||
// preloading operation.
|
||||
type View interface {
|
||||
GetValueAtTime(model.Metric, time.Time, StalenessPolicy) (*model.Sample, error)
|
||||
GetBoundaryValues(model.Metric, model.Interval, StalenessPolicy) (*model.Sample, *model.Sample, error)
|
||||
GetRangeValues(model.Metric, model.Interval) (*model.SampleSet, error)
|
||||
|
||||
// Destroy this view.
|
||||
Close()
|
||||
}
|
||||
|
||||
type Series interface {
|
||||
Fingerprint() model.Fingerprint
|
||||
Metric() model.Metric
|
||||
}
|
||||
|
||||
type IteratorsForFingerprintBuilder interface {
|
||||
ForStream(stream stream) (storage.RecordDecoder, storage.RecordFilter, storage.RecordOperator)
|
||||
}
|
||||
|
|
|
@ -11,13 +11,13 @@
|
|||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package memory
|
||||
package metric
|
||||
|
||||
import (
|
||||
"github.com/prometheus/prometheus/storage/metric"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestInterfaceAdherence(t *testing.T) {
|
||||
var _ metric.MetricPersistence = NewMemorySeriesStorage()
|
||||
var _ MetricPersistence = &LevelDBMetricPersistence{}
|
||||
var _ MetricPersistence = NewMemorySeriesStorage()
|
||||
}
|
|
@ -11,13 +11,16 @@
|
|||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package leveldb
|
||||
package metric
|
||||
|
||||
import (
|
||||
"github.com/prometheus/prometheus/storage/metric"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestInterfaceAdherence(t *testing.T) {
|
||||
var _ metric.MetricPersistence = &LevelDBMetricPersistence{}
|
||||
type Iterator interface {
|
||||
Seek(key interface{}) (ok bool)
|
||||
Next() (ok bool)
|
||||
Previous() (ok bool)
|
||||
Key() interface{}
|
||||
Value() interface{}
|
||||
}
|
||||
|
||||
type IteratorManager interface {
|
||||
Iterator() Iterator
|
||||
}
|
|
@ -11,21 +11,490 @@
|
|||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package leveldb
|
||||
package metric
|
||||
|
||||
import (
|
||||
"code.google.com/p/goprotobuf/proto"
|
||||
"flag"
|
||||
"fmt"
|
||||
"github.com/prometheus/prometheus/coding"
|
||||
"github.com/prometheus/prometheus/coding/indexable"
|
||||
"github.com/prometheus/prometheus/model"
|
||||
dto "github.com/prometheus/prometheus/model/generated"
|
||||
"github.com/prometheus/prometheus/storage"
|
||||
"github.com/prometheus/prometheus/storage/metric"
|
||||
index "github.com/prometheus/prometheus/storage/raw/index/leveldb"
|
||||
leveldb "github.com/prometheus/prometheus/storage/raw/leveldb"
|
||||
"github.com/prometheus/prometheus/utility"
|
||||
"io"
|
||||
"log"
|
||||
"sort"
|
||||
"time"
|
||||
)
|
||||
|
||||
var (
|
||||
_ = fmt.Sprintf("")
|
||||
)
|
||||
|
||||
type LevelDBMetricPersistence struct {
|
||||
fingerprintToMetrics *leveldb.LevelDBPersistence
|
||||
metricSamples *leveldb.LevelDBPersistence
|
||||
labelNameToFingerprints *leveldb.LevelDBPersistence
|
||||
labelSetToFingerprints *leveldb.LevelDBPersistence
|
||||
metricMembershipIndex *index.LevelDBMembershipIndex
|
||||
}
|
||||
|
||||
var (
|
||||
// These flag values are back of the envelope, though they seem sensible.
|
||||
// Please re-evaluate based on your own needs.
|
||||
fingerprintsToLabelPairCacheSize = flag.Int("fingerprintsToLabelPairCacheSizeBytes", 100*1024*1024, "The size for the fingerprint to label pair index (bytes).")
|
||||
samplesByFingerprintCacheSize = flag.Int("samplesByFingerprintCacheSizeBytes", 500*1024*1024, "The size for the samples database (bytes).")
|
||||
labelNameToFingerprintsCacheSize = flag.Int("labelNameToFingerprintsCacheSizeBytes", 100*1024*1024, "The size for the label name to metric fingerprint index (bytes).")
|
||||
labelPairToFingerprintsCacheSize = flag.Int("labelPairToFingerprintsCacheSizeBytes", 100*1024*1024, "The size for the label pair to metric fingerprint index (bytes).")
|
||||
metricMembershipIndexCacheSize = flag.Int("metricMembershipCacheSizeBytes", 50*1024*1024, "The size for the metric membership index (bytes).")
|
||||
)
|
||||
|
||||
type leveldbOpener func()
|
||||
|
||||
func (l *LevelDBMetricPersistence) Close() error {
|
||||
var persistences = []struct {
|
||||
name string
|
||||
closer io.Closer
|
||||
}{
|
||||
{
|
||||
"Fingerprint to Label Name and Value Pairs",
|
||||
l.fingerprintToMetrics,
|
||||
},
|
||||
{
|
||||
"Fingerprint Samples",
|
||||
l.metricSamples,
|
||||
},
|
||||
{
|
||||
"Label Name to Fingerprints",
|
||||
l.labelNameToFingerprints,
|
||||
},
|
||||
{
|
||||
"Label Name and Value Pairs to Fingerprints",
|
||||
l.labelSetToFingerprints,
|
||||
},
|
||||
{
|
||||
"Metric Membership Index",
|
||||
l.metricMembershipIndex,
|
||||
},
|
||||
}
|
||||
|
||||
errorChannel := make(chan error, len(persistences))
|
||||
|
||||
for _, persistence := range persistences {
|
||||
name := persistence.name
|
||||
closer := persistence.closer
|
||||
|
||||
go func(name string, closer io.Closer) {
|
||||
if closer != nil {
|
||||
closingError := closer.Close()
|
||||
|
||||
if closingError != nil {
|
||||
log.Printf("Could not close a LevelDBPersistence storage container; inconsistencies are possible: %q\n", closingError)
|
||||
}
|
||||
|
||||
errorChannel <- closingError
|
||||
} else {
|
||||
errorChannel <- nil
|
||||
}
|
||||
}(name, closer)
|
||||
}
|
||||
|
||||
for i := 0; i < cap(errorChannel); i++ {
|
||||
closingError := <-errorChannel
|
||||
|
||||
if closingError != nil {
|
||||
return closingError
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func NewLevelDBMetricPersistence(baseDirectory string) (persistence *LevelDBMetricPersistence, err error) {
|
||||
errorChannel := make(chan error, 5)
|
||||
|
||||
emission := &LevelDBMetricPersistence{}
|
||||
|
||||
var subsystemOpeners = []struct {
|
||||
name string
|
||||
opener leveldbOpener
|
||||
}{
|
||||
{
|
||||
"Label Names and Value Pairs by Fingerprint",
|
||||
func() {
|
||||
var err error
|
||||
emission.fingerprintToMetrics, err = leveldb.NewLevelDBPersistence(baseDirectory+"/label_name_and_value_pairs_by_fingerprint", *fingerprintsToLabelPairCacheSize, 10)
|
||||
errorChannel <- err
|
||||
},
|
||||
},
|
||||
{
|
||||
"Samples by Fingerprint",
|
||||
func() {
|
||||
var err error
|
||||
emission.metricSamples, err = leveldb.NewLevelDBPersistence(baseDirectory+"/samples_by_fingerprint", *samplesByFingerprintCacheSize, 10)
|
||||
errorChannel <- err
|
||||
},
|
||||
},
|
||||
{
|
||||
"Fingerprints by Label Name",
|
||||
func() {
|
||||
var err error
|
||||
emission.labelNameToFingerprints, err = leveldb.NewLevelDBPersistence(baseDirectory+"/fingerprints_by_label_name", *labelNameToFingerprintsCacheSize, 10)
|
||||
errorChannel <- err
|
||||
},
|
||||
},
|
||||
{
|
||||
"Fingerprints by Label Name and Value Pair",
|
||||
func() {
|
||||
var err error
|
||||
emission.labelSetToFingerprints, err = leveldb.NewLevelDBPersistence(baseDirectory+"/fingerprints_by_label_name_and_value_pair", *labelPairToFingerprintsCacheSize, 10)
|
||||
errorChannel <- err
|
||||
},
|
||||
},
|
||||
{
|
||||
"Metric Membership Index",
|
||||
func() {
|
||||
var err error
|
||||
emission.metricMembershipIndex, err = index.NewLevelDBMembershipIndex(baseDirectory+"/metric_membership_index", *metricMembershipIndexCacheSize, 10)
|
||||
errorChannel <- err
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, subsystem := range subsystemOpeners {
|
||||
opener := subsystem.opener
|
||||
go opener()
|
||||
}
|
||||
|
||||
for i := 0; i < cap(errorChannel); i++ {
|
||||
err = <-errorChannel
|
||||
|
||||
if err != nil {
|
||||
log.Printf("Could not open a LevelDBPersistence storage container: %q\n", err)
|
||||
|
||||
return
|
||||
}
|
||||
}
|
||||
persistence = emission
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (l *LevelDBMetricPersistence) AppendSample(sample model.Sample) (err error) {
|
||||
begin := time.Now()
|
||||
defer func() {
|
||||
duration := time.Now().Sub(begin)
|
||||
|
||||
recordOutcome(storageOperations, storageLatency, duration, err, map[string]string{operation: appendSample, result: success}, map[string]string{operation: appendSample, result: failure})
|
||||
}()
|
||||
|
||||
err = l.AppendSamples(model.Samples{sample})
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
const (
|
||||
maximumChunkSize = 200
|
||||
sortConcurrency = 2
|
||||
)
|
||||
|
||||
func (l *LevelDBMetricPersistence) AppendSamples(samples model.Samples) (err error) {
|
||||
begin := time.Now()
|
||||
defer func() {
|
||||
duration := time.Now().Sub(begin)
|
||||
|
||||
recordOutcome(storageOperations, storageLatency, duration, err, map[string]string{operation: appendSamples, result: success}, map[string]string{operation: appendSample, result: failure})
|
||||
}()
|
||||
|
||||
// Group the samples by fingerprint.
|
||||
var (
|
||||
fingerprintToSamples = map[model.Fingerprint]model.Samples{}
|
||||
)
|
||||
|
||||
for _, sample := range samples {
|
||||
fingerprint := model.NewFingerprintFromMetric(sample.Metric)
|
||||
samples := fingerprintToSamples[fingerprint]
|
||||
samples = append(samples, sample)
|
||||
fingerprintToSamples[fingerprint] = samples
|
||||
}
|
||||
|
||||
// Begin the sorting of grouped samples.
|
||||
|
||||
sortingSemaphore := make(chan bool, sortConcurrency)
|
||||
doneSorting := make(chan bool, len(fingerprintToSamples))
|
||||
for i := 0; i < sortConcurrency; i++ {
|
||||
sortingSemaphore <- true
|
||||
}
|
||||
|
||||
for _, samples := range fingerprintToSamples {
|
||||
go func(samples model.Samples) {
|
||||
<-sortingSemaphore
|
||||
sort.Sort(samples)
|
||||
sortingSemaphore <- true
|
||||
doneSorting <- true
|
||||
}(samples)
|
||||
}
|
||||
|
||||
for i := 0; i < len(fingerprintToSamples); i++ {
|
||||
<-doneSorting
|
||||
}
|
||||
|
||||
var (
|
||||
absentFingerprints = map[model.Fingerprint]model.Samples{}
|
||||
)
|
||||
|
||||
// Determine which metrics are unknown in the database.
|
||||
|
||||
for fingerprint, samples := range fingerprintToSamples {
|
||||
sample := samples[0]
|
||||
metricDTO := model.SampleToMetricDTO(&sample)
|
||||
indexHas, err := l.hasIndexMetric(metricDTO)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
continue
|
||||
}
|
||||
if !indexHas {
|
||||
absentFingerprints[fingerprint] = samples
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: For the missing fingerprints, determine what label names and pairs
|
||||
// are absent and act accordingly and append fingerprints.
|
||||
|
||||
var (
|
||||
doneBuildingLabelNameIndex = make(chan interface{})
|
||||
doneBuildingLabelPairIndex = make(chan interface{})
|
||||
)
|
||||
|
||||
// Update LabelName -> Fingerprint index.
|
||||
go func() {
|
||||
labelNameFingerprints := map[model.LabelName]utility.Set{}
|
||||
|
||||
for fingerprint, samples := range absentFingerprints {
|
||||
metric := samples[0].Metric
|
||||
for labelName := range metric {
|
||||
fingerprintSet, ok := labelNameFingerprints[labelName]
|
||||
if !ok {
|
||||
fingerprintSet = utility.Set{}
|
||||
}
|
||||
|
||||
fingerprints, err := l.GetFingerprintsForLabelName(labelName)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
doneBuildingLabelNameIndex <- err
|
||||
return
|
||||
}
|
||||
|
||||
for _, fingerprint := range fingerprints {
|
||||
fingerprintSet.Add(fingerprint)
|
||||
}
|
||||
fingerprintSet.Add(fingerprint)
|
||||
labelNameFingerprints[labelName] = fingerprintSet
|
||||
}
|
||||
}
|
||||
|
||||
batch := leveldb.NewBatch()
|
||||
defer batch.Close()
|
||||
|
||||
for labelName, fingerprintSet := range labelNameFingerprints {
|
||||
fingerprints := model.Fingerprints{}
|
||||
for fingerprint := range fingerprintSet {
|
||||
fingerprints = append(fingerprints, fingerprint.(model.Fingerprint))
|
||||
}
|
||||
|
||||
sort.Sort(fingerprints)
|
||||
|
||||
key := &dto.LabelName{
|
||||
Name: proto.String(string(labelName)),
|
||||
}
|
||||
value := &dto.FingerprintCollection{}
|
||||
for _, fingerprint := range fingerprints {
|
||||
value.Member = append(value.Member, fingerprint.ToDTO())
|
||||
}
|
||||
|
||||
batch.Put(coding.NewProtocolBufferEncoder(key), coding.NewProtocolBufferEncoder(value))
|
||||
}
|
||||
|
||||
err := l.labelNameToFingerprints.Commit(batch)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
doneBuildingLabelNameIndex <- err
|
||||
return
|
||||
}
|
||||
|
||||
doneBuildingLabelNameIndex <- true
|
||||
}()
|
||||
|
||||
// Update LabelPair -> Fingerprint index.
|
||||
go func() {
|
||||
labelPairFingerprints := map[model.LabelPair]utility.Set{}
|
||||
|
||||
for fingerprint, samples := range absentFingerprints {
|
||||
metric := samples[0].Metric
|
||||
for labelName, labelValue := range metric {
|
||||
labelPair := model.LabelPair{
|
||||
Name: labelName,
|
||||
Value: labelValue,
|
||||
}
|
||||
fingerprintSet, ok := labelPairFingerprints[labelPair]
|
||||
if !ok {
|
||||
fingerprintSet = utility.Set{}
|
||||
}
|
||||
|
||||
fingerprints, err := l.GetFingerprintsForLabelSet(model.LabelSet{
|
||||
labelName: labelValue,
|
||||
})
|
||||
if err != nil {
|
||||
panic(err)
|
||||
doneBuildingLabelPairIndex <- err
|
||||
return
|
||||
}
|
||||
|
||||
for _, fingerprint := range fingerprints {
|
||||
fingerprintSet.Add(fingerprint)
|
||||
}
|
||||
fingerprintSet.Add(fingerprint)
|
||||
labelPairFingerprints[labelPair] = fingerprintSet
|
||||
}
|
||||
}
|
||||
|
||||
batch := leveldb.NewBatch()
|
||||
defer batch.Close()
|
||||
|
||||
for labelPair, fingerprintSet := range labelPairFingerprints {
|
||||
fingerprints := model.Fingerprints{}
|
||||
for fingerprint := range fingerprintSet {
|
||||
fingerprints = append(fingerprints, fingerprint.(model.Fingerprint))
|
||||
}
|
||||
|
||||
sort.Sort(fingerprints)
|
||||
|
||||
key := &dto.LabelPair{
|
||||
Name: proto.String(string(labelPair.Name)),
|
||||
Value: proto.String(string(labelPair.Value)),
|
||||
}
|
||||
value := &dto.FingerprintCollection{}
|
||||
for _, fingerprint := range fingerprints {
|
||||
value.Member = append(value.Member, fingerprint.ToDTO())
|
||||
}
|
||||
|
||||
batch.Put(coding.NewProtocolBufferEncoder(key), coding.NewProtocolBufferEncoder(value))
|
||||
}
|
||||
|
||||
err := l.labelSetToFingerprints.Commit(batch)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
doneBuildingLabelPairIndex <- true
|
||||
return
|
||||
}
|
||||
|
||||
doneBuildingLabelPairIndex <- true
|
||||
}()
|
||||
|
||||
makeTopLevelIndex := true
|
||||
|
||||
v := <-doneBuildingLabelNameIndex
|
||||
_, ok := v.(error)
|
||||
if ok {
|
||||
panic(err)
|
||||
makeTopLevelIndex = false
|
||||
}
|
||||
v = <-doneBuildingLabelPairIndex
|
||||
_, ok = v.(error)
|
||||
if ok {
|
||||
panic(err)
|
||||
makeTopLevelIndex = false
|
||||
}
|
||||
|
||||
// Update the Metric existence index.
|
||||
|
||||
if len(absentFingerprints) > 0 {
|
||||
batch := leveldb.NewBatch()
|
||||
defer batch.Close()
|
||||
|
||||
for fingerprint, samples := range absentFingerprints {
|
||||
for _, sample := range samples {
|
||||
key := coding.NewProtocolBufferEncoder(fingerprint.ToDTO())
|
||||
value := coding.NewProtocolBufferEncoder(model.SampleToMetricDTO(&sample))
|
||||
batch.Put(key, value)
|
||||
}
|
||||
}
|
||||
|
||||
err = l.fingerprintToMetrics.Commit(batch)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
// Critical
|
||||
log.Println(err)
|
||||
}
|
||||
}
|
||||
|
||||
if makeTopLevelIndex {
|
||||
batch := leveldb.NewBatch()
|
||||
defer batch.Close()
|
||||
|
||||
// WART: We should probably encode simple fingerprints.
|
||||
for _, samples := range absentFingerprints {
|
||||
sample := samples[0]
|
||||
key := coding.NewProtocolBufferEncoder(model.SampleToMetricDTO(&sample))
|
||||
batch.Put(key, key)
|
||||
}
|
||||
|
||||
err := l.metricMembershipIndex.Commit(batch)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
// Not critical.
|
||||
log.Println(err)
|
||||
}
|
||||
}
|
||||
|
||||
samplesBatch := leveldb.NewBatch()
|
||||
defer samplesBatch.Close()
|
||||
|
||||
for fingerprint, group := range fingerprintToSamples {
|
||||
for {
|
||||
lengthOfGroup := len(group)
|
||||
|
||||
if lengthOfGroup == 0 {
|
||||
break
|
||||
}
|
||||
|
||||
take := maximumChunkSize
|
||||
if lengthOfGroup < take {
|
||||
take = lengthOfGroup
|
||||
}
|
||||
|
||||
chunk := group[0:take]
|
||||
group = group[take:lengthOfGroup]
|
||||
|
||||
key := &dto.SampleKey{
|
||||
Fingerprint: fingerprint.ToDTO(),
|
||||
Timestamp: indexable.EncodeTime(chunk[0].Timestamp),
|
||||
LastTimestamp: proto.Int64(chunk[take-1].Timestamp.Unix()),
|
||||
}
|
||||
|
||||
value := &dto.SampleValueSeries{}
|
||||
for _, sample := range chunk {
|
||||
value.Value = append(value.Value, &dto.SampleValueSeries_Value{
|
||||
Timestamp: proto.Int64(sample.Timestamp.Unix()),
|
||||
Value: proto.Float32(float32(sample.Value)),
|
||||
})
|
||||
}
|
||||
|
||||
samplesBatch.Put(coding.NewProtocolBufferEncoder(key), coding.NewProtocolBufferEncoder(value))
|
||||
}
|
||||
}
|
||||
|
||||
err = l.metricSamples.Commit(samplesBatch)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func extractSampleKey(i iterator) (k *dto.SampleKey, err error) {
|
||||
k = &dto.SampleKey{}
|
||||
err = proto.Unmarshal(i.Key(), k)
|
||||
|
@ -33,8 +502,8 @@ func extractSampleKey(i iterator) (k *dto.SampleKey, err error) {
|
|||
return
|
||||
}
|
||||
|
||||
func extractSampleValue(i iterator) (v *dto.SampleValue, err error) {
|
||||
v = &dto.SampleValue{}
|
||||
func extractSampleValue(i iterator) (v *dto.SampleValueSeries, err error) {
|
||||
v = &dto.SampleValueSeries{}
|
||||
err = proto.Unmarshal(i.Value(), v)
|
||||
|
||||
return
|
||||
|
@ -89,21 +558,6 @@ func (l *LevelDBMetricPersistence) hasIndexMetric(dto *dto.Metric) (value bool,
|
|||
return
|
||||
}
|
||||
|
||||
func (l *LevelDBMetricPersistence) indexMetric(dto *dto.Metric) (err error) {
|
||||
begin := time.Now()
|
||||
|
||||
defer func() {
|
||||
duration := time.Now().Sub(begin)
|
||||
|
||||
recordOutcome(storageOperations, storageLatency, duration, err, map[string]string{operation: indexMetric, result: success}, map[string]string{operation: indexMetric, result: failure})
|
||||
}()
|
||||
|
||||
dtoKey := coding.NewProtocolBufferEncoder(dto)
|
||||
err = l.metricMembershipIndex.Put(dtoKey)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (l *LevelDBMetricPersistence) HasLabelPair(dto *dto.LabelPair) (value bool, err error) {
|
||||
begin := time.Now()
|
||||
|
||||
|
@ -134,49 +588,6 @@ func (l *LevelDBMetricPersistence) HasLabelName(dto *dto.LabelName) (value bool,
|
|||
return
|
||||
}
|
||||
|
||||
func (l *LevelDBMetricPersistence) getFingerprintsForLabelSet(p *dto.LabelPair) (c *dto.FingerprintCollection, err error) {
|
||||
begin := time.Now()
|
||||
|
||||
defer func() {
|
||||
duration := time.Now().Sub(begin)
|
||||
|
||||
recordOutcome(storageOperations, storageLatency, duration, err, map[string]string{operation: getFingerprintsForLabelSet, result: success}, map[string]string{operation: getFingerprintsForLabelSet, result: failure})
|
||||
}()
|
||||
|
||||
dtoKey := coding.NewProtocolBufferEncoder(p)
|
||||
get, err := l.labelSetToFingerprints.Get(dtoKey)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
c = &dto.FingerprintCollection{}
|
||||
err = proto.Unmarshal(get, c)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// XXX: Delete me and replace with GetFingerprintsForLabelName.
|
||||
func (l *LevelDBMetricPersistence) GetLabelNameFingerprints(n *dto.LabelName) (c *dto.FingerprintCollection, err error) {
|
||||
begin := time.Now()
|
||||
|
||||
defer func() {
|
||||
duration := time.Now().Sub(begin)
|
||||
|
||||
recordOutcome(storageOperations, storageLatency, duration, err, map[string]string{operation: getLabelNameFingerprints, result: success}, map[string]string{operation: getLabelNameFingerprints, result: failure})
|
||||
}()
|
||||
|
||||
dtoKey := coding.NewProtocolBufferEncoder(n)
|
||||
get, err := l.labelNameToFingerprints.Get(dtoKey)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
c = &dto.FingerprintCollection{}
|
||||
err = proto.Unmarshal(get, c)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (l *LevelDBMetricPersistence) GetFingerprintsForLabelSet(labelSet model.LabelSet) (fps model.Fingerprints, err error) {
|
||||
begin := time.Now()
|
||||
|
||||
|
@ -203,7 +614,7 @@ func (l *LevelDBMetricPersistence) GetFingerprintsForLabelSet(labelSet model.Lab
|
|||
set := utility.Set{}
|
||||
|
||||
for _, m := range unmarshaled.Member {
|
||||
fp := model.Fingerprint(*m.Signature)
|
||||
fp := model.NewFingerprintFromRowKey(*m.Signature)
|
||||
set.Add(fp)
|
||||
}
|
||||
|
||||
|
@ -249,7 +660,7 @@ func (l *LevelDBMetricPersistence) GetFingerprintsForLabelName(labelName model.L
|
|||
}
|
||||
|
||||
for _, m := range unmarshaled.Member {
|
||||
fp := model.Fingerprint(*m.Signature)
|
||||
fp := model.NewFingerprintFromRowKey(*m.Signature)
|
||||
fps = append(fps, fp)
|
||||
}
|
||||
|
||||
|
@ -265,7 +676,7 @@ func (l *LevelDBMetricPersistence) GetMetricForFingerprint(f model.Fingerprint)
|
|||
recordOutcome(storageOperations, storageLatency, duration, err, map[string]string{operation: getMetricForFingerprint, result: success}, map[string]string{operation: getMetricForFingerprint, result: failure})
|
||||
}()
|
||||
|
||||
raw, err := l.fingerprintToMetrics.Get(coding.NewProtocolBufferEncoder(model.FingerprintToDTO(&f)))
|
||||
raw, err := l.fingerprintToMetrics.Get(coding.NewProtocolBufferEncoder(model.FingerprintToDTO(f)))
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
@ -289,7 +700,7 @@ func (l *LevelDBMetricPersistence) GetMetricForFingerprint(f model.Fingerprint)
|
|||
return
|
||||
}
|
||||
|
||||
func (l *LevelDBMetricPersistence) GetBoundaryValues(m model.Metric, i model.Interval, s metric.StalenessPolicy) (open *model.Sample, end *model.Sample, err error) {
|
||||
func (l *LevelDBMetricPersistence) GetBoundaryValues(m model.Metric, i model.Interval, s StalenessPolicy) (open *model.Sample, end *model.Sample, err error) {
|
||||
begin := time.Now()
|
||||
|
||||
defer func() {
|
||||
|
@ -338,7 +749,7 @@ type iterator interface {
|
|||
Value() []byte
|
||||
}
|
||||
|
||||
func (l *LevelDBMetricPersistence) GetValueAtTime(m model.Metric, t time.Time, s metric.StalenessPolicy) (sample *model.Sample, err error) {
|
||||
func (l *LevelDBMetricPersistence) GetValueAtTime(m model.Metric, t time.Time, s StalenessPolicy) (sample *model.Sample, err error) {
|
||||
begin := time.Now()
|
||||
|
||||
defer func() {
|
||||
|
@ -347,7 +758,7 @@ func (l *LevelDBMetricPersistence) GetValueAtTime(m model.Metric, t time.Time, s
|
|||
recordOutcome(storageOperations, storageLatency, duration, err, map[string]string{operation: getValueAtTime, result: success}, map[string]string{operation: getValueAtTime, result: failure})
|
||||
}()
|
||||
|
||||
f := m.Fingerprint().ToDTO()
|
||||
f := model.NewFingerprintFromMetric(m).ToDTO()
|
||||
|
||||
// Candidate for Refactoring
|
||||
k := &dto.SampleKey{
|
||||
|
@ -395,7 +806,7 @@ func (l *LevelDBMetricPersistence) GetValueAtTime(m model.Metric, t time.Time, s
|
|||
|
||||
var (
|
||||
firstKey *dto.SampleKey
|
||||
firstValue *dto.SampleValue
|
||||
firstValue *dto.SampleValueSeries
|
||||
)
|
||||
|
||||
firstKey, err = extractSampleKey(iterator)
|
||||
|
@ -448,7 +859,7 @@ func (l *LevelDBMetricPersistence) GetValueAtTime(m model.Metric, t time.Time, s
|
|||
|
||||
var (
|
||||
alternativeKey *dto.SampleKey
|
||||
alternativeValue *dto.SampleValue
|
||||
alternativeValue *dto.SampleValueSeries
|
||||
)
|
||||
|
||||
alternativeKey, err = extractSampleKey(iterator)
|
||||
|
@ -534,18 +945,20 @@ func (l *LevelDBMetricPersistence) GetValueAtTime(m model.Metric, t time.Time, s
|
|||
return
|
||||
}
|
||||
|
||||
var secondValue *dto.SampleValue
|
||||
var secondValue *dto.SampleValueSeries
|
||||
|
||||
secondValue, err = extractSampleValue(iterator)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
interpolated := interpolate(firstTime, secondTime, *firstValue.Value, *secondValue.Value, t)
|
||||
fValue := *firstValue.Value[0].Value
|
||||
sValue := *secondValue.Value[0].Value
|
||||
|
||||
sampleValue := &dto.SampleValue{
|
||||
Value: &interpolated,
|
||||
}
|
||||
interpolated := interpolate(firstTime, secondTime, fValue, sValue, t)
|
||||
|
||||
sampleValue := &dto.SampleValueSeries{}
|
||||
sampleValue.Value = append(sampleValue.Value, &dto.SampleValueSeries_Value{Value: &interpolated})
|
||||
|
||||
sample = model.SampleFromDTO(&m, &t, sampleValue)
|
||||
|
||||
|
@ -560,7 +973,7 @@ func (l *LevelDBMetricPersistence) GetRangeValues(m model.Metric, i model.Interv
|
|||
|
||||
recordOutcome(storageOperations, storageLatency, duration, err, map[string]string{operation: getRangeValues, result: success}, map[string]string{operation: getRangeValues, result: failure})
|
||||
}()
|
||||
f := m.Fingerprint().ToDTO()
|
||||
f := model.NewFingerprintFromMetric(m).ToDTO()
|
||||
|
||||
k := &dto.SampleKey{
|
||||
Fingerprint: f,
|
||||
|
@ -608,7 +1021,7 @@ func (l *LevelDBMetricPersistence) GetRangeValues(m model.Metric, i model.Interv
|
|||
}
|
||||
|
||||
v.Values = append(v.Values, model.SamplePair{
|
||||
Value: model.SampleValue(*retrievedValue.Value),
|
||||
Value: model.SampleValue(*retrievedValue.Value[0].Value),
|
||||
Timestamp: indexable.DecodeTime(retrievedKey.Timestamp),
|
||||
})
|
||||
}
|
||||
|
@ -671,3 +1084,7 @@ func (l *LevelDBMetricPersistence) GetAllMetricNames() (metricNames []string, er
|
|||
metricNames = metricNamesOp.metricNames
|
||||
return
|
||||
}
|
||||
|
||||
func (l *LevelDBMetricPersistence) ForEachSample(builder IteratorsForFingerprintBuilder) (err error) {
|
||||
panic("not implemented")
|
||||
}
|
|
@ -1,176 +0,0 @@
|
|||
// Copyright 2013 Prometheus Team
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package leveldb
|
||||
|
||||
import (
|
||||
"code.google.com/p/goprotobuf/proto"
|
||||
"errors"
|
||||
"github.com/prometheus/prometheus/coding"
|
||||
"github.com/prometheus/prometheus/coding/indexable"
|
||||
"github.com/prometheus/prometheus/model"
|
||||
dto "github.com/prometheus/prometheus/model/generated"
|
||||
"github.com/prometheus/prometheus/utility"
|
||||
"log"
|
||||
)
|
||||
|
||||
func (l *LevelDBMetricPersistence) GetAllLabelNames() ([]string, error) {
|
||||
if getAll, getAllError := l.labelNameToFingerprints.GetAll(); getAllError == nil {
|
||||
result := make([]string, 0, len(getAll))
|
||||
labelNameDTO := &dto.LabelName{}
|
||||
|
||||
for _, pair := range getAll {
|
||||
if unmarshalError := proto.Unmarshal(pair.Left, labelNameDTO); unmarshalError == nil {
|
||||
result = append(result, *labelNameDTO.Name)
|
||||
} else {
|
||||
return nil, unmarshalError
|
||||
}
|
||||
}
|
||||
|
||||
return result, nil
|
||||
|
||||
} else {
|
||||
return nil, getAllError
|
||||
}
|
||||
|
||||
return nil, errors.New("Unknown error encountered when querying label names.")
|
||||
}
|
||||
|
||||
func (l *LevelDBMetricPersistence) GetAllLabelPairs() ([]model.LabelSet, error) {
|
||||
if getAll, getAllError := l.labelSetToFingerprints.GetAll(); getAllError == nil {
|
||||
result := make([]model.LabelSet, 0, len(getAll))
|
||||
labelPairDTO := &dto.LabelPair{}
|
||||
|
||||
for _, pair := range getAll {
|
||||
if unmarshalError := proto.Unmarshal(pair.Left, labelPairDTO); unmarshalError == nil {
|
||||
n := model.LabelName(*labelPairDTO.Name)
|
||||
v := model.LabelValue(*labelPairDTO.Value)
|
||||
item := model.LabelSet{n: v}
|
||||
result = append(result, item)
|
||||
} else {
|
||||
return nil, unmarshalError
|
||||
}
|
||||
}
|
||||
|
||||
return result, nil
|
||||
|
||||
} else {
|
||||
return nil, getAllError
|
||||
}
|
||||
|
||||
return nil, errors.New("Unknown error encountered when querying label pairs.")
|
||||
}
|
||||
|
||||
func (l *LevelDBMetricPersistence) GetAllMetrics() ([]model.LabelSet, error) {
|
||||
if getAll, getAllError := l.labelSetToFingerprints.GetAll(); getAllError == nil {
|
||||
result := make([]model.LabelSet, 0)
|
||||
fingerprintCollection := &dto.FingerprintCollection{}
|
||||
|
||||
fingerprints := make(utility.Set)
|
||||
|
||||
for _, pair := range getAll {
|
||||
if unmarshalError := proto.Unmarshal(pair.Right, fingerprintCollection); unmarshalError == nil {
|
||||
for _, member := range fingerprintCollection.Member {
|
||||
if !fingerprints.Has(*member.Signature) {
|
||||
fingerprints.Add(*member.Signature)
|
||||
fingerprintEncoded := coding.NewProtocolBufferEncoder(member)
|
||||
if labelPairCollectionRaw, labelPairCollectionRawError := l.fingerprintToMetrics.Get(fingerprintEncoded); labelPairCollectionRawError == nil {
|
||||
|
||||
labelPairCollectionDTO := &dto.LabelSet{}
|
||||
|
||||
if labelPairCollectionDTOMarshalError := proto.Unmarshal(labelPairCollectionRaw, labelPairCollectionDTO); labelPairCollectionDTOMarshalError == nil {
|
||||
intermediate := make(model.LabelSet, 0)
|
||||
|
||||
for _, member := range labelPairCollectionDTO.Member {
|
||||
n := model.LabelName(*member.Name)
|
||||
v := model.LabelValue(*member.Value)
|
||||
intermediate[n] = v
|
||||
}
|
||||
|
||||
result = append(result, intermediate)
|
||||
} else {
|
||||
return nil, labelPairCollectionDTOMarshalError
|
||||
}
|
||||
} else {
|
||||
return nil, labelPairCollectionRawError
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
return nil, unmarshalError
|
||||
}
|
||||
}
|
||||
return result, nil
|
||||
} else {
|
||||
return nil, getAllError
|
||||
}
|
||||
|
||||
return nil, errors.New("Unknown error encountered when querying metrics.")
|
||||
}
|
||||
|
||||
func (l *LevelDBMetricPersistence) GetSamplesForMetric(metric model.Metric, interval model.Interval) ([]model.Samples, error) {
|
||||
if iterator, closer, iteratorErr := l.metricSamples.GetIterator(); iteratorErr == nil {
|
||||
defer closer.Close()
|
||||
|
||||
fingerprintDTO := metric.Fingerprint().ToDTO()
|
||||
start := &dto.SampleKey{
|
||||
Fingerprint: fingerprintDTO,
|
||||
Timestamp: indexable.EncodeTime(interval.OldestInclusive),
|
||||
}
|
||||
|
||||
emission := make([]model.Samples, 0)
|
||||
|
||||
if encode, encodeErr := coding.NewProtocolBufferEncoder(start).Encode(); encodeErr == nil {
|
||||
iterator.Seek(encode)
|
||||
|
||||
predicate := keyIsAtMostOld(interval.NewestInclusive)
|
||||
|
||||
for iterator = iterator; iterator.Valid(); iterator.Next() {
|
||||
key := &dto.SampleKey{}
|
||||
value := &dto.SampleValue{}
|
||||
if keyUnmarshalErr := proto.Unmarshal(iterator.Key(), key); keyUnmarshalErr == nil {
|
||||
if valueUnmarshalErr := proto.Unmarshal(iterator.Value(), value); valueUnmarshalErr == nil {
|
||||
if fingerprintsEqual(fingerprintDTO, key.Fingerprint) {
|
||||
// Wart
|
||||
if predicate(key) {
|
||||
emission = append(emission, model.Samples{
|
||||
Value: model.SampleValue(*value.Value),
|
||||
Timestamp: indexable.DecodeTime(key.Timestamp),
|
||||
})
|
||||
} else {
|
||||
break
|
||||
}
|
||||
} else {
|
||||
break
|
||||
}
|
||||
} else {
|
||||
return nil, valueUnmarshalErr
|
||||
}
|
||||
} else {
|
||||
return nil, keyUnmarshalErr
|
||||
}
|
||||
}
|
||||
|
||||
return emission, nil
|
||||
|
||||
} else {
|
||||
log.Printf("Could not encode the start key: %q\n", encodeErr)
|
||||
return nil, encodeErr
|
||||
}
|
||||
} else {
|
||||
log.Printf("Could not acquire iterator: %q\n", iteratorErr)
|
||||
return nil, iteratorErr
|
||||
}
|
||||
|
||||
panic("unreachable")
|
||||
}
|
|
@ -1,55 +0,0 @@
|
|||
// Copyright 2013 Prometheus Team
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package leveldb
|
||||
|
||||
import (
|
||||
"github.com/prometheus/prometheus/storage/metric"
|
||||
"testing"
|
||||
)
|
||||
|
||||
var testGetFingerprintsForLabelSet = buildTestPersistence("get_fingerprints_for_labelset", metric.GetFingerprintsForLabelSetTests)
|
||||
|
||||
func TestGetFingerprintsForLabelSet(t *testing.T) {
|
||||
testGetFingerprintsForLabelSet(t)
|
||||
}
|
||||
|
||||
func BenchmarkGetFingerprintsForLabelSet(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
testGetFingerprintsForLabelSet(b)
|
||||
}
|
||||
}
|
||||
|
||||
var testGetFingerprintsForLabelName = buildTestPersistence("get_fingerprints_for_labelname", metric.GetFingerprintsForLabelNameTests)
|
||||
|
||||
func TestGetFingerprintsForLabelName(t *testing.T) {
|
||||
testGetFingerprintsForLabelName(t)
|
||||
}
|
||||
|
||||
func BenchmarkGetFingerprintsForLabelName(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
testGetFingerprintsForLabelName(b)
|
||||
}
|
||||
}
|
||||
|
||||
var testGetMetricForFingerprint = buildTestPersistence("get_metric_for_fingerprint", metric.GetMetricForFingerprintTests)
|
||||
|
||||
func TestGetMetricForFingerprint(t *testing.T) {
|
||||
testGetMetricForFingerprint(t)
|
||||
}
|
||||
|
||||
func BenchmarkGetMetricForFingerprint(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
testGetMetricForFingerprint(b)
|
||||
}
|
||||
}
|
|
@ -1,163 +0,0 @@
|
|||
// Copyright 2013 Prometheus Team
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package leveldb
|
||||
|
||||
import (
|
||||
"flag"
|
||||
index "github.com/prometheus/prometheus/storage/raw/index/leveldb"
|
||||
storage "github.com/prometheus/prometheus/storage/raw/leveldb"
|
||||
"io"
|
||||
"log"
|
||||
)
|
||||
|
||||
var (
|
||||
// These flag values are back of the envelope, though they seem sensible.
|
||||
// Please re-evaluate based on your own needs.
|
||||
fingerprintsToLabelPairCacheSize = flag.Int("fingerprintsToLabelPairCacheSizeBytes", 100*1024*1024, "The size for the fingerprint to label pair index (bytes).")
|
||||
samplesByFingerprintCacheSize = flag.Int("samplesByFingerprintCacheSizeBytes", 500*1024*1024, "The size for the samples database (bytes).")
|
||||
labelNameToFingerprintsCacheSize = flag.Int("labelNameToFingerprintsCacheSizeBytes", 100*1024*1024, "The size for the label name to metric fingerprint index (bytes).")
|
||||
labelPairToFingerprintsCacheSize = flag.Int("labelPairToFingerprintsCacheSizeBytes", 100*1024*1024, "The size for the label pair to metric fingerprint index (bytes).")
|
||||
metricMembershipIndexCacheSize = flag.Int("metricMembershipCacheSizeBytes", 50*1024*1024, "The size for the metric membership index (bytes).")
|
||||
)
|
||||
|
||||
type leveldbOpener func()
|
||||
|
||||
func (l *LevelDBMetricPersistence) Close() error {
|
||||
var persistences = []struct {
|
||||
name string
|
||||
closer io.Closer
|
||||
}{
|
||||
{
|
||||
"Fingerprint to Label Name and Value Pairs",
|
||||
l.fingerprintToMetrics,
|
||||
},
|
||||
{
|
||||
"Fingerprint Samples",
|
||||
l.metricSamples,
|
||||
},
|
||||
{
|
||||
"Label Name to Fingerprints",
|
||||
l.labelNameToFingerprints,
|
||||
},
|
||||
{
|
||||
"Label Name and Value Pairs to Fingerprints",
|
||||
l.labelSetToFingerprints,
|
||||
},
|
||||
{
|
||||
"Metric Membership Index",
|
||||
l.metricMembershipIndex,
|
||||
},
|
||||
}
|
||||
|
||||
errorChannel := make(chan error, len(persistences))
|
||||
|
||||
for _, persistence := range persistences {
|
||||
name := persistence.name
|
||||
closer := persistence.closer
|
||||
|
||||
go func(name string, closer io.Closer) {
|
||||
if closer != nil {
|
||||
closingError := closer.Close()
|
||||
|
||||
if closingError != nil {
|
||||
log.Printf("Could not close a LevelDBPersistence storage container; inconsistencies are possible: %q\n", closingError)
|
||||
}
|
||||
|
||||
errorChannel <- closingError
|
||||
} else {
|
||||
errorChannel <- nil
|
||||
}
|
||||
}(name, closer)
|
||||
}
|
||||
|
||||
for i := 0; i < cap(errorChannel); i++ {
|
||||
closingError := <-errorChannel
|
||||
|
||||
if closingError != nil {
|
||||
return closingError
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func NewLevelDBMetricPersistence(baseDirectory string) (persistence *LevelDBMetricPersistence, err error) {
|
||||
errorChannel := make(chan error, 5)
|
||||
|
||||
emission := &LevelDBMetricPersistence{}
|
||||
|
||||
var subsystemOpeners = []struct {
|
||||
name string
|
||||
opener leveldbOpener
|
||||
}{
|
||||
{
|
||||
"Label Names and Value Pairs by Fingerprint",
|
||||
func() {
|
||||
var err error
|
||||
emission.fingerprintToMetrics, err = storage.NewLevelDBPersistence(baseDirectory+"/label_name_and_value_pairs_by_fingerprint", *fingerprintsToLabelPairCacheSize, 10)
|
||||
errorChannel <- err
|
||||
},
|
||||
},
|
||||
{
|
||||
"Samples by Fingerprint",
|
||||
func() {
|
||||
var err error
|
||||
emission.metricSamples, err = storage.NewLevelDBPersistence(baseDirectory+"/samples_by_fingerprint", *samplesByFingerprintCacheSize, 10)
|
||||
errorChannel <- err
|
||||
},
|
||||
},
|
||||
{
|
||||
"Fingerprints by Label Name",
|
||||
func() {
|
||||
var err error
|
||||
emission.labelNameToFingerprints, err = storage.NewLevelDBPersistence(baseDirectory+"/fingerprints_by_label_name", *labelNameToFingerprintsCacheSize, 10)
|
||||
errorChannel <- err
|
||||
},
|
||||
},
|
||||
{
|
||||
"Fingerprints by Label Name and Value Pair",
|
||||
func() {
|
||||
var err error
|
||||
emission.labelSetToFingerprints, err = storage.NewLevelDBPersistence(baseDirectory+"/fingerprints_by_label_name_and_value_pair", *labelPairToFingerprintsCacheSize, 10)
|
||||
errorChannel <- err
|
||||
},
|
||||
},
|
||||
{
|
||||
"Metric Membership Index",
|
||||
func() {
|
||||
var err error
|
||||
emission.metricMembershipIndex, err = index.NewLevelDBMembershipIndex(baseDirectory+"/metric_membership_index", *metricMembershipIndexCacheSize, 10)
|
||||
errorChannel <- err
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, subsystem := range subsystemOpeners {
|
||||
opener := subsystem.opener
|
||||
go opener()
|
||||
}
|
||||
|
||||
for i := 0; i < cap(errorChannel); i++ {
|
||||
err = <-errorChannel
|
||||
|
||||
if err != nil {
|
||||
log.Printf("Could not open a LevelDBPersistence storage container: %q\n", err)
|
||||
|
||||
return
|
||||
}
|
||||
}
|
||||
persistence = emission
|
||||
|
||||
return
|
||||
}
|
|
@ -1,224 +0,0 @@
|
|||
// Copyright 2013 Prometheus Team
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package leveldb
|
||||
|
||||
import (
|
||||
"code.google.com/p/goprotobuf/proto"
|
||||
"github.com/prometheus/prometheus/coding"
|
||||
"github.com/prometheus/prometheus/coding/indexable"
|
||||
"github.com/prometheus/prometheus/model"
|
||||
dto "github.com/prometheus/prometheus/model/generated"
|
||||
"time"
|
||||
)
|
||||
|
||||
func (l *LevelDBMetricPersistence) setLabelPairFingerprints(labelPair *dto.LabelPair, fingerprints *dto.FingerprintCollection) (err error) {
|
||||
begin := time.Now()
|
||||
|
||||
defer func() {
|
||||
duration := time.Now().Sub(begin)
|
||||
|
||||
recordOutcome(storageOperations, storageLatency, duration, err, map[string]string{operation: setLabelPairFingerprints, result: success}, map[string]string{operation: setLabelPairFingerprints, result: failure})
|
||||
}()
|
||||
|
||||
labelPairEncoded := coding.NewProtocolBufferEncoder(labelPair)
|
||||
fingerprintsEncoded := coding.NewProtocolBufferEncoder(fingerprints)
|
||||
err = l.labelSetToFingerprints.Put(labelPairEncoded, fingerprintsEncoded)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (l *LevelDBMetricPersistence) setLabelNameFingerprints(labelName *dto.LabelName, fingerprints *dto.FingerprintCollection) (err error) {
|
||||
begin := time.Now()
|
||||
|
||||
defer func() {
|
||||
duration := time.Now().Sub(begin)
|
||||
|
||||
recordOutcome(storageOperations, storageLatency, duration, err, map[string]string{operation: setLabelNameFingerprints, result: success}, map[string]string{operation: setLabelNameFingerprints, result: failure})
|
||||
}()
|
||||
|
||||
labelNameEncoded := coding.NewProtocolBufferEncoder(labelName)
|
||||
fingerprintsEncoded := coding.NewProtocolBufferEncoder(fingerprints)
|
||||
|
||||
err = l.labelNameToFingerprints.Put(labelNameEncoded, fingerprintsEncoded)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (l *LevelDBMetricPersistence) appendLabelPairFingerprint(labelPair *dto.LabelPair, fingerprint *dto.Fingerprint) (err error) {
|
||||
begin := time.Now()
|
||||
|
||||
defer func() {
|
||||
duration := time.Now().Sub(begin)
|
||||
|
||||
recordOutcome(storageOperations, storageLatency, duration, err, map[string]string{operation: appendLabelPairFingerprint, result: success}, map[string]string{operation: appendLabelPairFingerprint, result: failure})
|
||||
}()
|
||||
|
||||
has, err := l.HasLabelPair(labelPair)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
var fingerprints *dto.FingerprintCollection
|
||||
if has {
|
||||
fingerprints, err = l.getFingerprintsForLabelSet(labelPair)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
} else {
|
||||
fingerprints = &dto.FingerprintCollection{}
|
||||
}
|
||||
|
||||
fingerprints.Member = append(fingerprints.Member, fingerprint)
|
||||
|
||||
err = l.setLabelPairFingerprints(labelPair, fingerprints)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (l *LevelDBMetricPersistence) appendLabelNameFingerprint(labelPair *dto.LabelPair, fingerprint *dto.Fingerprint) (err error) {
|
||||
begin := time.Now()
|
||||
|
||||
defer func() {
|
||||
duration := time.Now().Sub(begin)
|
||||
|
||||
recordOutcome(storageOperations, storageLatency, duration, err, map[string]string{operation: appendLabelNameFingerprint, result: success}, map[string]string{operation: appendLabelNameFingerprint, result: failure})
|
||||
}()
|
||||
|
||||
labelName := &dto.LabelName{
|
||||
Name: labelPair.Name,
|
||||
}
|
||||
|
||||
has, err := l.HasLabelName(labelName)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
var fingerprints *dto.FingerprintCollection
|
||||
if has {
|
||||
fingerprints, err = l.GetLabelNameFingerprints(labelName)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
} else {
|
||||
fingerprints = &dto.FingerprintCollection{}
|
||||
}
|
||||
|
||||
fingerprints.Member = append(fingerprints.Member, fingerprint)
|
||||
|
||||
err = l.setLabelNameFingerprints(labelName, fingerprints)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (l *LevelDBMetricPersistence) appendFingerprints(sample model.Sample) (err error) {
|
||||
begin := time.Now()
|
||||
|
||||
defer func() {
|
||||
duration := time.Now().Sub(begin)
|
||||
|
||||
recordOutcome(storageOperations, storageLatency, duration, err, map[string]string{operation: appendFingerprints, result: success}, map[string]string{operation: appendFingerprints, result: failure})
|
||||
}()
|
||||
|
||||
fingerprintDTO := sample.Metric.Fingerprint().ToDTO()
|
||||
|
||||
fingerprintKey := coding.NewProtocolBufferEncoder(fingerprintDTO)
|
||||
metricDTO := model.SampleToMetricDTO(&sample)
|
||||
metricDTOEncoder := coding.NewProtocolBufferEncoder(metricDTO)
|
||||
|
||||
err = l.fingerprintToMetrics.Put(fingerprintKey, metricDTOEncoder)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
labelCount := len(metricDTO.LabelPair)
|
||||
labelPairErrors := make(chan error, labelCount)
|
||||
labelNameErrors := make(chan error, labelCount)
|
||||
|
||||
for _, labelPair := range metricDTO.LabelPair {
|
||||
go func(labelPair *dto.LabelPair) {
|
||||
labelNameErrors <- l.appendLabelNameFingerprint(labelPair, fingerprintDTO)
|
||||
}(labelPair)
|
||||
|
||||
go func(labelPair *dto.LabelPair) {
|
||||
labelPairErrors <- l.appendLabelPairFingerprint(labelPair, fingerprintDTO)
|
||||
}(labelPair)
|
||||
}
|
||||
|
||||
for i := 0; i < cap(labelPairErrors); i++ {
|
||||
err = <-labelPairErrors
|
||||
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
for i := 0; i < cap(labelNameErrors); i++ {
|
||||
err = <-labelNameErrors
|
||||
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (l *LevelDBMetricPersistence) AppendSample(sample model.Sample) (err error) {
|
||||
begin := time.Now()
|
||||
defer func() {
|
||||
duration := time.Now().Sub(begin)
|
||||
|
||||
recordOutcome(storageOperations, storageLatency, duration, err, map[string]string{operation: appendSample, result: success}, map[string]string{operation: appendSample, result: failure})
|
||||
}()
|
||||
|
||||
metricDTO := model.SampleToMetricDTO(&sample)
|
||||
|
||||
indexHas, err := l.hasIndexMetric(metricDTO)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
fingerprint := sample.Metric.Fingerprint()
|
||||
|
||||
if !indexHas {
|
||||
err = l.indexMetric(metricDTO)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
err = l.appendFingerprints(sample)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
fingerprintDTO := fingerprint.ToDTO()
|
||||
|
||||
sampleKeyDTO := &dto.SampleKey{
|
||||
Fingerprint: fingerprintDTO,
|
||||
Timestamp: indexable.EncodeTime(sample.Timestamp),
|
||||
}
|
||||
sampleValueDTO := &dto.SampleValue{
|
||||
Value: proto.Float32(float32(sample.Value)),
|
||||
}
|
||||
sampleKeyEncoded := coding.NewProtocolBufferEncoder(sampleKeyDTO)
|
||||
sampleValueEncoded := coding.NewProtocolBufferEncoder(sampleValueDTO)
|
||||
|
||||
err = l.metricSamples.Put(sampleKeyEncoded, sampleValueEncoded)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
return
|
||||
}
|
|
@ -1,31 +0,0 @@
|
|||
// Copyright 2013 Prometheus Team
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package leveldb
|
||||
|
||||
import (
|
||||
"github.com/prometheus/prometheus/storage/metric"
|
||||
"testing"
|
||||
)
|
||||
|
||||
var testGetFingerprintsForLabelSetUsesAndForLabelMatching = buildTestPersistence("get_fingerprints_for_labelset_uses_and_for_label_matching", metric.GetFingerprintsForLabelSetUsesAndForLabelMatchingTests)
|
||||
|
||||
func TestGetFingerprintsForLabelSetUsesAndForLabelMatching(t *testing.T) {
|
||||
testGetFingerprintsForLabelSetUsesAndForLabelMatching(t)
|
||||
}
|
||||
|
||||
func BenchmarkGetFingerprintsForLabelSetUsesAndForLabelMatching(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
testGetFingerprintsForLabelSetUsesAndForLabelMatching(b)
|
||||
}
|
||||
}
|
|
@ -1,67 +0,0 @@
|
|||
// Copyright 2013 Prometheus Team
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package leveldb
|
||||
|
||||
import (
|
||||
"github.com/prometheus/prometheus/storage/metric"
|
||||
"github.com/prometheus/prometheus/utility/test"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func testGetValueAtTime(t test.Tester) {
|
||||
persistenceMaker := buildTestPersistencesMaker("get_value_at_time", t)
|
||||
metric.GetValueAtTimeTests(persistenceMaker, t)
|
||||
}
|
||||
|
||||
func TestGetValueAtTime(t *testing.T) {
|
||||
testGetValueAtTime(t)
|
||||
}
|
||||
|
||||
func BenchmarkGetValueAtTime(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
testGetValueAtTime(b)
|
||||
}
|
||||
}
|
||||
|
||||
func testGetBoundaryValues(t test.Tester) {
|
||||
persistenceMaker := buildTestPersistencesMaker("get_boundary_values", t)
|
||||
|
||||
metric.GetBoundaryValuesTests(persistenceMaker, t)
|
||||
}
|
||||
|
||||
func TestGetBoundaryValues(t *testing.T) {
|
||||
testGetBoundaryValues(t)
|
||||
}
|
||||
|
||||
func BenchmarkGetBoundaryValues(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
testGetBoundaryValues(b)
|
||||
}
|
||||
}
|
||||
|
||||
func testGetRangeValues(t test.Tester) {
|
||||
persistenceMaker := buildTestPersistencesMaker("get_range_values", t)
|
||||
|
||||
metric.GetRangeValuesTests(persistenceMaker, t)
|
||||
}
|
||||
|
||||
func TestGetRangeValues(t *testing.T) {
|
||||
testGetRangeValues(t)
|
||||
}
|
||||
|
||||
func BenchmarkGetRangeValues(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
testGetRangeValues(b)
|
||||
}
|
||||
}
|
|
@ -1,109 +0,0 @@
|
|||
// Copyright 2013 Prometheus Team
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package leveldb
|
||||
|
||||
import (
|
||||
"github.com/prometheus/prometheus/storage/metric"
|
||||
"github.com/prometheus/prometheus/utility/test"
|
||||
"io/ioutil"
|
||||
"testing"
|
||||
)
|
||||
|
||||
var testBasicLifecycle = buildTestPersistence("basic_lifecycle", metric.BasicLifecycleTests)
|
||||
|
||||
func TestBasicLifecycle(t *testing.T) {
|
||||
testBasicLifecycle(t)
|
||||
}
|
||||
|
||||
func BenchmarkBasicLifecycle(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
testBasicLifecycle(b)
|
||||
}
|
||||
}
|
||||
|
||||
var testReadEmpty = buildTestPersistence("read_empty", metric.ReadEmptyTests)
|
||||
|
||||
func TestReadEmpty(t *testing.T) {
|
||||
testReadEmpty(t)
|
||||
}
|
||||
|
||||
func BenchmarkReadEmpty(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
testReadEmpty(b)
|
||||
}
|
||||
}
|
||||
|
||||
var testAppendSampleAsPureSparseAppend = buildTestPersistence("append_sample_as_pure_sparse_append", metric.AppendSampleAsPureSparseAppendTests)
|
||||
|
||||
func TestAppendSampleAsPureSparseAppend(t *testing.T) {
|
||||
testAppendSampleAsPureSparseAppend(t)
|
||||
}
|
||||
|
||||
func BenchmarkAppendSampleAsPureSparseAppend(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
testAppendSampleAsPureSparseAppend(b)
|
||||
}
|
||||
}
|
||||
|
||||
var testAppendSampleAsSparseAppendWithReads = buildTestPersistence("append_sample_as_sparse_append_with_reads", metric.AppendSampleAsSparseAppendWithReadsTests)
|
||||
|
||||
func TestAppendSampleAsSparseAppendWithReads(t *testing.T) {
|
||||
testAppendSampleAsSparseAppendWithReads(t)
|
||||
}
|
||||
|
||||
func BenchmarkAppendSampleAsSparseAppendWithReads(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
testAppendSampleAsSparseAppendWithReads(b)
|
||||
}
|
||||
}
|
||||
|
||||
var testAppendSampleAsPureSingleEntityAppend = buildTestPersistence("append_sample_as_pure_single_entity_append", metric.AppendSampleAsPureSingleEntityAppendTests)
|
||||
|
||||
func TestAppendSampleAsPureSingleEntityAppend(t *testing.T) {
|
||||
testAppendSampleAsPureSingleEntityAppend(t)
|
||||
}
|
||||
|
||||
func BenchmarkAppendSampleAsPureSingleEntityAppend(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
testAppendSampleAsPureSingleEntityAppend(b)
|
||||
}
|
||||
}
|
||||
|
||||
func testStochastic(t test.Tester) {
|
||||
persistenceMaker := func() metric.MetricPersistence {
|
||||
temporaryDirectory, err := ioutil.TempDir("", "test_leveldb_stochastic")
|
||||
if err != nil {
|
||||
t.Errorf("Could not create test directory: %q\n", err)
|
||||
}
|
||||
|
||||
p, err := NewLevelDBMetricPersistence(temporaryDirectory)
|
||||
if err != nil {
|
||||
t.Errorf("Could not start up LevelDB: %q\n", err)
|
||||
}
|
||||
|
||||
return p
|
||||
}
|
||||
|
||||
metric.StochasticTests(persistenceMaker, t)
|
||||
}
|
||||
|
||||
func TestStochastic(t *testing.T) {
|
||||
testStochastic(t)
|
||||
}
|
||||
|
||||
func BenchmarkStochastic(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
testStochastic(b)
|
||||
}
|
||||
}
|
|
@ -1,84 +0,0 @@
|
|||
// Copyright 2013 Prometheus Team
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package leveldb
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/prometheus/prometheus/storage/metric"
|
||||
"github.com/prometheus/prometheus/utility/test"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
)
|
||||
|
||||
type purger struct {
|
||||
path string
|
||||
}
|
||||
|
||||
func (p purger) Close() error {
|
||||
return os.RemoveAll(p.path)
|
||||
}
|
||||
|
||||
func buildTestPersistencesMaker(name string, t test.Tester) func() (metric.MetricPersistence, io.Closer) {
|
||||
return func() (metric.MetricPersistence, io.Closer) {
|
||||
temporaryDirectory, err := ioutil.TempDir("", "get_value_at_time")
|
||||
if err != nil {
|
||||
t.Errorf("Could not create test directory: %q\n", err)
|
||||
}
|
||||
|
||||
p, err := NewLevelDBMetricPersistence(temporaryDirectory)
|
||||
if err != nil {
|
||||
t.Errorf("Could not start up LevelDB: %q\n", err)
|
||||
}
|
||||
|
||||
purger := purger{
|
||||
path: temporaryDirectory,
|
||||
}
|
||||
|
||||
return p, purger
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func buildTestPersistence(name string, f func(p metric.MetricPersistence, t test.Tester)) func(t test.Tester) {
|
||||
return func(t test.Tester) {
|
||||
temporaryDirectory, err := ioutil.TempDir("", fmt.Sprintf("test_leveldb_%s", name))
|
||||
|
||||
if err != nil {
|
||||
t.Errorf("Could not create test directory: %q\n", err)
|
||||
return
|
||||
}
|
||||
|
||||
defer func() {
|
||||
err := os.RemoveAll(temporaryDirectory)
|
||||
if err != nil {
|
||||
t.Errorf("Could not remove temporary directory: %q\n", err)
|
||||
}
|
||||
}()
|
||||
|
||||
p, err := NewLevelDBMetricPersistence(temporaryDirectory)
|
||||
if err != nil {
|
||||
t.Errorf("Could not create LevelDB Metric Persistence: %q\n", err)
|
||||
}
|
||||
|
||||
defer func() {
|
||||
err := p.Close()
|
||||
if err != nil {
|
||||
t.Errorf("Anomaly while closing database: %q\n", err)
|
||||
}
|
||||
}()
|
||||
|
||||
f(p, t)
|
||||
}
|
||||
}
|
368
storage/metric/memory.go
Normal file
368
storage/metric/memory.go
Normal file
|
@ -0,0 +1,368 @@
|
|||
// Copyright 2013 Prometheus Team
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package metric
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/prometheus/prometheus/model"
|
||||
"github.com/prometheus/prometheus/storage"
|
||||
"github.com/prometheus/prometheus/utility"
|
||||
"github.com/ryszard/goskiplist/skiplist"
|
||||
"sort"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
// Used as a separator in the format string for generating the internal label
|
||||
// value pair set fingerprints.
|
||||
reservedDelimiter = `"`
|
||||
)
|
||||
|
||||
// Models a given sample entry stored in the in-memory arena.
|
||||
type value interface {
|
||||
// Gets the given value.
|
||||
get() model.SampleValue
|
||||
}
|
||||
|
||||
// Models a single sample value. It presumes that there is either no subsequent
|
||||
// value seen or that any subsequent values are of a different value.
|
||||
type singletonValue model.SampleValue
|
||||
|
||||
func (v singletonValue) get() model.SampleValue {
|
||||
return model.SampleValue(v)
|
||||
}
|
||||
|
||||
type skipListTime time.Time
|
||||
|
||||
func (t skipListTime) LessThan(o skiplist.Ordered) bool {
|
||||
return time.Time(o.(skipListTime)).Before(time.Time(t))
|
||||
}
|
||||
|
||||
type stream struct {
|
||||
metric model.Metric
|
||||
values *skiplist.SkipList
|
||||
}
|
||||
|
||||
func (s stream) add(sample model.Sample) {
|
||||
s.values.Set(skipListTime(sample.Timestamp), singletonValue(sample.Value))
|
||||
}
|
||||
|
||||
func (s stream) forEach(decoder storage.RecordDecoder, filter storage.RecordFilter, operator storage.RecordOperator) (scannedEntireCorpus bool, err error) {
|
||||
iterator := s.values.SeekToLast()
|
||||
if iterator == nil {
|
||||
panic("nil iterator")
|
||||
}
|
||||
|
||||
defer iterator.Close()
|
||||
|
||||
for iterator.Previous() {
|
||||
decodedKey, decodeErr := decoder.DecodeKey(iterator.Key())
|
||||
if decodeErr != nil {
|
||||
continue
|
||||
}
|
||||
decodedValue, decodeErr := decoder.DecodeValue(iterator.Value())
|
||||
if decodeErr != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
switch filter.Filter(decodedKey, decodedValue) {
|
||||
case storage.STOP:
|
||||
return
|
||||
case storage.SKIP:
|
||||
continue
|
||||
case storage.ACCEPT:
|
||||
opErr := operator.Operate(decodedKey, decodedValue)
|
||||
if opErr != nil {
|
||||
if opErr.Continuable {
|
||||
continue
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
scannedEntireCorpus = true
|
||||
return
|
||||
}
|
||||
|
||||
func newStream(metric model.Metric) stream {
|
||||
return stream{
|
||||
values: skiplist.New(),
|
||||
metric: metric,
|
||||
}
|
||||
}
|
||||
|
||||
type memorySeriesStorage struct {
|
||||
fingerprintToSeries map[model.Fingerprint]stream
|
||||
labelPairToFingerprints map[string]model.Fingerprints
|
||||
labelNameToFingerprints map[model.LabelName]model.Fingerprints
|
||||
}
|
||||
|
||||
func (s memorySeriesStorage) AppendSamples(samples model.Samples) (err error) {
|
||||
for _, sample := range samples {
|
||||
s.AppendSample(sample)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (s memorySeriesStorage) AppendSample(sample model.Sample) (err error) {
|
||||
metric := sample.Metric
|
||||
fingerprint := model.NewFingerprintFromMetric(metric)
|
||||
series, ok := s.fingerprintToSeries[fingerprint]
|
||||
|
||||
if !ok {
|
||||
series = newStream(metric)
|
||||
s.fingerprintToSeries[fingerprint] = series
|
||||
|
||||
for k, v := range metric {
|
||||
labelPair := fmt.Sprintf("%s%s%s", k, reservedDelimiter, v)
|
||||
|
||||
labelPairValues := s.labelPairToFingerprints[labelPair]
|
||||
labelPairValues = append(labelPairValues, fingerprint)
|
||||
s.labelPairToFingerprints[labelPair] = labelPairValues
|
||||
|
||||
labelNameValues := s.labelNameToFingerprints[k]
|
||||
labelNameValues = append(labelNameValues, fingerprint)
|
||||
s.labelNameToFingerprints[k] = labelNameValues
|
||||
}
|
||||
}
|
||||
|
||||
series.add(sample)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (s memorySeriesStorage) GetFingerprintsForLabelSet(l model.LabelSet) (fingerprints model.Fingerprints, err error) {
|
||||
|
||||
sets := []utility.Set{}
|
||||
|
||||
for k, v := range l {
|
||||
signature := fmt.Sprintf("%s%s%s", k, reservedDelimiter, v)
|
||||
values := s.labelPairToFingerprints[signature]
|
||||
set := utility.Set{}
|
||||
for _, fingerprint := range values {
|
||||
set.Add(fingerprint)
|
||||
}
|
||||
sets = append(sets, set)
|
||||
}
|
||||
|
||||
setCount := len(sets)
|
||||
if setCount == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
base := sets[0]
|
||||
for i := 1; i < setCount; i++ {
|
||||
base = base.Intersection(sets[i])
|
||||
}
|
||||
for _, e := range base.Elements() {
|
||||
fingerprint := e.(model.Fingerprint)
|
||||
fingerprints = append(fingerprints, fingerprint)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (s memorySeriesStorage) GetFingerprintsForLabelName(l model.LabelName) (fingerprints model.Fingerprints, err error) {
|
||||
values := s.labelNameToFingerprints[l]
|
||||
|
||||
fingerprints = append(fingerprints, values...)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (s memorySeriesStorage) GetMetricForFingerprint(f model.Fingerprint) (metric *model.Metric, err error) {
|
||||
series, ok := s.fingerprintToSeries[f]
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
|
||||
metric = &series.metric
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// XXX: Terrible wart.
|
||||
func interpolateSample(x1, x2 time.Time, y1, y2 float32, e time.Time) model.SampleValue {
|
||||
yDelta := y2 - y1
|
||||
xDelta := x2.Sub(x1)
|
||||
|
||||
dDt := yDelta / float32(xDelta)
|
||||
offset := float32(e.Sub(x1))
|
||||
|
||||
return model.SampleValue(y1 + (offset * dDt))
|
||||
}
|
||||
|
||||
func (s memorySeriesStorage) GetValueAtTime(m model.Metric, t time.Time, p StalenessPolicy) (sample *model.Sample, err error) {
|
||||
fingerprint := model.NewFingerprintFromMetric(m)
|
||||
series, ok := s.fingerprintToSeries[fingerprint]
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
|
||||
iterator := series.values.Seek(skipListTime(t))
|
||||
if iterator == nil {
|
||||
return
|
||||
}
|
||||
|
||||
foundTime := time.Time(iterator.Key().(skipListTime))
|
||||
if foundTime.Equal(t) {
|
||||
value := iterator.Value().(value)
|
||||
sample = &model.Sample{
|
||||
Metric: m,
|
||||
Value: value.get(),
|
||||
Timestamp: t,
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
if t.Sub(foundTime) > p.DeltaAllowance {
|
||||
return
|
||||
}
|
||||
|
||||
secondTime := foundTime
|
||||
secondValue := iterator.Value().(value).get()
|
||||
|
||||
if !iterator.Previous() {
|
||||
sample = &model.Sample{
|
||||
Metric: m,
|
||||
Value: iterator.Value().(value).get(),
|
||||
Timestamp: t,
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
firstTime := time.Time(iterator.Key().(skipListTime))
|
||||
if t.Sub(firstTime) > p.DeltaAllowance {
|
||||
return
|
||||
}
|
||||
|
||||
if firstTime.Sub(secondTime) > p.DeltaAllowance {
|
||||
return
|
||||
}
|
||||
|
||||
firstValue := iterator.Value().(value).get()
|
||||
|
||||
sample = &model.Sample{
|
||||
Metric: m,
|
||||
Value: interpolateSample(firstTime, secondTime, float32(firstValue), float32(secondValue), t),
|
||||
Timestamp: t,
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (s memorySeriesStorage) GetBoundaryValues(m model.Metric, i model.Interval, p StalenessPolicy) (first *model.Sample, second *model.Sample, err error) {
|
||||
first, err = s.GetValueAtTime(m, i.OldestInclusive, p)
|
||||
if err != nil {
|
||||
return
|
||||
} else if first == nil {
|
||||
return
|
||||
}
|
||||
|
||||
second, err = s.GetValueAtTime(m, i.NewestInclusive, p)
|
||||
if err != nil {
|
||||
return
|
||||
} else if second == nil {
|
||||
first = nil
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (s memorySeriesStorage) GetRangeValues(m model.Metric, i model.Interval) (samples *model.SampleSet, err error) {
|
||||
fingerprint := model.NewFingerprintFromMetric(m)
|
||||
series, ok := s.fingerprintToSeries[fingerprint]
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
|
||||
samples = &model.SampleSet{
|
||||
Metric: m,
|
||||
}
|
||||
|
||||
iterator := series.values.Seek(skipListTime(i.NewestInclusive))
|
||||
if iterator == nil {
|
||||
return
|
||||
}
|
||||
|
||||
for {
|
||||
timestamp := time.Time(iterator.Key().(skipListTime))
|
||||
if timestamp.Before(i.OldestInclusive) {
|
||||
break
|
||||
}
|
||||
|
||||
samples.Values = append(samples.Values,
|
||||
model.SamplePair{
|
||||
Value: iterator.Value().(value).get(),
|
||||
Timestamp: timestamp,
|
||||
})
|
||||
|
||||
if !iterator.Next() {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// XXX: We should not explicitly sort here but rather rely on the datastore.
|
||||
// This adds appreciable overhead.
|
||||
if samples != nil {
|
||||
sort.Sort(samples.Values)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (s memorySeriesStorage) Close() (err error) {
|
||||
// This can probably be simplified:
|
||||
//
|
||||
// s.fingerPrintToSeries = map[model.Fingerprint]*stream{}
|
||||
// s.labelPairToFingerprints = map[string]model.Fingerprints{}
|
||||
// s.labelNameToFingerprints = map[model.LabelName]model.Fingerprints{}
|
||||
for fingerprint := range s.fingerprintToSeries {
|
||||
delete(s.fingerprintToSeries, fingerprint)
|
||||
}
|
||||
|
||||
for labelPair := range s.labelPairToFingerprints {
|
||||
delete(s.labelPairToFingerprints, labelPair)
|
||||
}
|
||||
|
||||
for labelName := range s.labelNameToFingerprints {
|
||||
delete(s.labelNameToFingerprints, labelName)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (s memorySeriesStorage) GetAllMetricNames() ([]string, error) {
|
||||
panic("not implemented")
|
||||
}
|
||||
|
||||
func (s memorySeriesStorage) ForEachSample(builder IteratorsForFingerprintBuilder) (err error) {
|
||||
for _, stream := range s.fingerprintToSeries {
|
||||
decoder, filter, operator := builder.ForStream(stream)
|
||||
|
||||
stream.forEach(decoder, filter, operator)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func NewMemorySeriesStorage() memorySeriesStorage {
|
||||
return memorySeriesStorage{
|
||||
fingerprintToSeries: make(map[model.Fingerprint]stream),
|
||||
labelPairToFingerprints: make(map[string]model.Fingerprints),
|
||||
labelNameToFingerprints: make(map[model.LabelName]model.Fingerprints),
|
||||
}
|
||||
}
|
|
@ -1,55 +0,0 @@
|
|||
// Copyright 2013 Prometheus Team
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package memory
|
||||
|
||||
import (
|
||||
"github.com/prometheus/prometheus/storage/metric"
|
||||
"testing"
|
||||
)
|
||||
|
||||
var testGetFingerprintsForLabelSet = buildTestPersistence(metric.GetFingerprintsForLabelSetTests)
|
||||
|
||||
func TestGetFingerprintsForLabelSet(t *testing.T) {
|
||||
testGetFingerprintsForLabelSet(t)
|
||||
}
|
||||
|
||||
func BenchmarkGetFingerprintsForLabelSet(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
testGetFingerprintsForLabelSet(b)
|
||||
}
|
||||
}
|
||||
|
||||
var testGetFingerprintsForLabelName = buildTestPersistence(metric.GetFingerprintsForLabelNameTests)
|
||||
|
||||
func TestGetFingerprintsForLabelName(t *testing.T) {
|
||||
testGetFingerprintsForLabelName(t)
|
||||
}
|
||||
|
||||
func BenchmarkGetFingerprintsForLabelName(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
testGetFingerprintsForLabelName(b)
|
||||
}
|
||||
}
|
||||
|
||||
var testGetMetricForFingerprint = buildTestPersistence(metric.GetMetricForFingerprintTests)
|
||||
|
||||
func TestGetMetricForFingerprint(t *testing.T) {
|
||||
testGetMetricForFingerprint(t)
|
||||
}
|
||||
|
||||
func BenchmarkGetMetricForFingerprint(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
testGetMetricForFingerprint(b)
|
||||
}
|
||||
}
|
|
@ -1,303 +0,0 @@
|
|||
package memory
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/prometheus/prometheus/model"
|
||||
"github.com/prometheus/prometheus/storage/metric"
|
||||
"github.com/prometheus/prometheus/utility"
|
||||
"github.com/ryszard/goskiplist/skiplist"
|
||||
"sort"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
reservedDelimiter = `"`
|
||||
)
|
||||
|
||||
type skipListTime time.Time
|
||||
|
||||
func (t skipListTime) LessThan(o skiplist.Ordered) bool {
|
||||
// return time.Time(t).Before(time.Time(o.(skipListTime)))
|
||||
return time.Time(o.(skipListTime)).Before(time.Time(t))
|
||||
}
|
||||
|
||||
type stream struct {
|
||||
metric model.Metric
|
||||
values *skiplist.SkipList
|
||||
}
|
||||
|
||||
func (s *stream) add(sample model.Sample) {
|
||||
s.values.Set(skipListTime(sample.Timestamp), sample.Value)
|
||||
}
|
||||
|
||||
func newStream(metric model.Metric) *stream {
|
||||
return &stream{
|
||||
values: skiplist.New(),
|
||||
metric: metric,
|
||||
}
|
||||
}
|
||||
|
||||
type memorySeriesStorage struct {
|
||||
fingerprintToSeries map[model.Fingerprint]*stream
|
||||
labelPairToFingerprints map[string]model.Fingerprints
|
||||
labelNameToFingerprints map[model.LabelName]model.Fingerprints
|
||||
}
|
||||
|
||||
func (s *memorySeriesStorage) AppendSample(sample model.Sample) (err error) {
|
||||
metric := sample.Metric
|
||||
fingerprint := metric.Fingerprint()
|
||||
series, ok := s.fingerprintToSeries[fingerprint]
|
||||
|
||||
if !ok {
|
||||
series = newStream(metric)
|
||||
s.fingerprintToSeries[fingerprint] = series
|
||||
|
||||
for k, v := range metric {
|
||||
labelPair := fmt.Sprintf("%s%s%s", k, reservedDelimiter, v)
|
||||
|
||||
labelPairValues := s.labelPairToFingerprints[labelPair]
|
||||
labelPairValues = append(labelPairValues, fingerprint)
|
||||
s.labelPairToFingerprints[labelPair] = labelPairValues
|
||||
|
||||
labelNameValues := s.labelNameToFingerprints[k]
|
||||
labelNameValues = append(labelNameValues, fingerprint)
|
||||
s.labelNameToFingerprints[k] = labelNameValues
|
||||
}
|
||||
}
|
||||
|
||||
series.add(sample)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (s *memorySeriesStorage) GetFingerprintsForLabelSet(l model.LabelSet) (fingerprints model.Fingerprints, err error) {
|
||||
|
||||
sets := []utility.Set{}
|
||||
|
||||
for k, v := range l {
|
||||
signature := fmt.Sprintf("%s%s%s", k, reservedDelimiter, v)
|
||||
values := s.labelPairToFingerprints[signature]
|
||||
set := utility.Set{}
|
||||
for _, fingerprint := range values {
|
||||
set.Add(fingerprint)
|
||||
}
|
||||
sets = append(sets, set)
|
||||
}
|
||||
|
||||
setCount := len(sets)
|
||||
if setCount == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
base := sets[0]
|
||||
for i := 1; i < setCount; i++ {
|
||||
base = base.Intersection(sets[i])
|
||||
}
|
||||
for _, e := range base.Elements() {
|
||||
fingerprint := e.(model.Fingerprint)
|
||||
fingerprints = append(fingerprints, fingerprint)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (s *memorySeriesStorage) GetFingerprintsForLabelName(l model.LabelName) (fingerprints model.Fingerprints, err error) {
|
||||
values := s.labelNameToFingerprints[l]
|
||||
|
||||
fingerprints = append(fingerprints, values...)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (s *memorySeriesStorage) GetMetricForFingerprint(f model.Fingerprint) (metric *model.Metric, err error) {
|
||||
series, ok := s.fingerprintToSeries[f]
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
|
||||
metric = &series.metric
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// XXX: Terrible wart.
|
||||
func interpolate(x1, x2 time.Time, y1, y2 float32, e time.Time) model.SampleValue {
|
||||
yDelta := y2 - y1
|
||||
xDelta := x2.Sub(x1)
|
||||
|
||||
dDt := yDelta / float32(xDelta)
|
||||
offset := float32(e.Sub(x1))
|
||||
|
||||
return model.SampleValue(y1 + (offset * dDt))
|
||||
}
|
||||
|
||||
func (s *memorySeriesStorage) GetValueAtTime(m model.Metric, t time.Time, p metric.StalenessPolicy) (sample *model.Sample, err error) {
|
||||
fingerprint := m.Fingerprint()
|
||||
series, ok := s.fingerprintToSeries[fingerprint]
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
|
||||
iterator := series.values.Seek(skipListTime(t))
|
||||
if iterator == nil {
|
||||
return
|
||||
}
|
||||
|
||||
foundTime := time.Time(iterator.Key().(skipListTime))
|
||||
if foundTime.Equal(t) {
|
||||
sample = &model.Sample{
|
||||
Metric: m,
|
||||
Value: iterator.Value().(model.SampleValue),
|
||||
Timestamp: t,
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
if t.Sub(foundTime) > p.DeltaAllowance {
|
||||
return
|
||||
}
|
||||
|
||||
secondTime := foundTime
|
||||
secondValue := iterator.Value().(model.SampleValue)
|
||||
|
||||
if !iterator.Previous() {
|
||||
sample = &model.Sample{
|
||||
Metric: m,
|
||||
Value: iterator.Value().(model.SampleValue),
|
||||
Timestamp: t,
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
firstTime := time.Time(iterator.Key().(skipListTime))
|
||||
if t.Sub(firstTime) > p.DeltaAllowance {
|
||||
return
|
||||
}
|
||||
|
||||
if firstTime.Sub(secondTime) > p.DeltaAllowance {
|
||||
return
|
||||
}
|
||||
|
||||
firstValue := iterator.Value().(model.SampleValue)
|
||||
|
||||
sample = &model.Sample{
|
||||
Metric: m,
|
||||
Value: interpolate(firstTime, secondTime, float32(firstValue), float32(secondValue), t),
|
||||
Timestamp: t,
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (s *memorySeriesStorage) GetBoundaryValues(m model.Metric, i model.Interval, p metric.StalenessPolicy) (first *model.Sample, second *model.Sample, err error) {
|
||||
first, err = s.GetValueAtTime(m, i.OldestInclusive, p)
|
||||
if err != nil {
|
||||
return
|
||||
} else if first == nil {
|
||||
return
|
||||
}
|
||||
|
||||
second, err = s.GetValueAtTime(m, i.NewestInclusive, p)
|
||||
if err != nil {
|
||||
return
|
||||
} else if second == nil {
|
||||
first = nil
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (s *memorySeriesStorage) GetRangeValues(m model.Metric, i model.Interval) (samples *model.SampleSet, err error) {
|
||||
fingerprint := m.Fingerprint()
|
||||
series, ok := s.fingerprintToSeries[fingerprint]
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
|
||||
samples = &model.SampleSet{
|
||||
Metric: m,
|
||||
}
|
||||
|
||||
iterator := series.values.Seek(skipListTime(i.NewestInclusive))
|
||||
if iterator == nil {
|
||||
return
|
||||
}
|
||||
|
||||
for {
|
||||
timestamp := time.Time(iterator.Key().(skipListTime))
|
||||
if timestamp.Before(i.OldestInclusive) {
|
||||
break
|
||||
}
|
||||
|
||||
samples.Values = append(samples.Values,
|
||||
model.SamplePair{
|
||||
Value: model.SampleValue(iterator.Value().(model.SampleValue)),
|
||||
Timestamp: timestamp,
|
||||
})
|
||||
|
||||
if !iterator.Next() {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// XXX: We should not explicitly sort here but rather rely on the datastore.
|
||||
// This adds appreciable overhead.
|
||||
if samples != nil {
|
||||
sort.Sort(samples.Values)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (s *memorySeriesStorage) Close() (err error) {
|
||||
for fingerprint := range s.fingerprintToSeries {
|
||||
delete(s.fingerprintToSeries, fingerprint)
|
||||
}
|
||||
|
||||
for labelPair := range s.labelPairToFingerprints {
|
||||
delete(s.labelPairToFingerprints, labelPair)
|
||||
}
|
||||
|
||||
for labelName := range s.labelNameToFingerprints {
|
||||
delete(s.labelNameToFingerprints, labelName)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (s *memorySeriesStorage) GetAllMetricNames() (names []string, err error) {
|
||||
panic("not implemented")
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (s *memorySeriesStorage) GetAllLabelNames() (names []string, err error) {
|
||||
panic("not implemented")
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (s *memorySeriesStorage) GetAllLabelPairs() (pairs []model.LabelSet, err error) {
|
||||
panic("not implemented")
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (s *memorySeriesStorage) GetAllMetrics() (metrics []model.LabelSet, err error) {
|
||||
panic("not implemented")
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func NewMemorySeriesStorage() metric.MetricPersistence {
|
||||
return newMemorySeriesStorage()
|
||||
}
|
||||
|
||||
func newMemorySeriesStorage() *memorySeriesStorage {
|
||||
return &memorySeriesStorage{
|
||||
fingerprintToSeries: make(map[model.Fingerprint]*stream),
|
||||
labelPairToFingerprints: make(map[string]model.Fingerprints),
|
||||
labelNameToFingerprints: make(map[model.LabelName]model.Fingerprints),
|
||||
}
|
||||
}
|
|
@ -1,76 +0,0 @@
|
|||
// Copyright 2013 Prometheus Team
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package memory
|
||||
|
||||
import (
|
||||
"github.com/prometheus/prometheus/storage/metric"
|
||||
"github.com/prometheus/prometheus/utility/test"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func testGetValueAtTime(t test.Tester) {
|
||||
persistenceMaker := func() (metric.MetricPersistence, io.Closer) {
|
||||
return NewMemorySeriesStorage(), ioutil.NopCloser(nil)
|
||||
}
|
||||
|
||||
metric.GetValueAtTimeTests(persistenceMaker, t)
|
||||
}
|
||||
|
||||
func TestGetValueAtTime(t *testing.T) {
|
||||
testGetValueAtTime(t)
|
||||
}
|
||||
|
||||
func BenchmarkGetValueAtTime(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
testGetValueAtTime(b)
|
||||
}
|
||||
}
|
||||
|
||||
func testGetBoundaryValues(t test.Tester) {
|
||||
persistenceMaker := func() (metric.MetricPersistence, io.Closer) {
|
||||
return NewMemorySeriesStorage(), ioutil.NopCloser(nil)
|
||||
}
|
||||
|
||||
metric.GetBoundaryValuesTests(persistenceMaker, t)
|
||||
}
|
||||
|
||||
func TestGetBoundaryValues(t *testing.T) {
|
||||
testGetBoundaryValues(t)
|
||||
}
|
||||
|
||||
func BenchmarkGetBoundaryValues(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
testGetBoundaryValues(b)
|
||||
}
|
||||
}
|
||||
|
||||
func testGetRangeValues(t test.Tester) {
|
||||
persistenceMaker := func() (metric.MetricPersistence, io.Closer) {
|
||||
return NewMemorySeriesStorage(), ioutil.NopCloser(nil)
|
||||
}
|
||||
|
||||
metric.GetRangeValuesTests(persistenceMaker, t)
|
||||
}
|
||||
|
||||
func TestGetRangeValues(t *testing.T) {
|
||||
testGetRangeValues(t)
|
||||
}
|
||||
|
||||
func BenchmarkGetRangeValues(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
testGetRangeValues(b)
|
||||
}
|
||||
}
|
|
@ -1,114 +0,0 @@
|
|||
// Copyright 2013 Prometheus Team
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package memory
|
||||
|
||||
import (
|
||||
"github.com/prometheus/prometheus/storage/metric"
|
||||
"github.com/prometheus/prometheus/utility/test"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func buildTestPersistence(f func(p metric.MetricPersistence, t test.Tester)) func(t test.Tester) {
|
||||
return func(t test.Tester) {
|
||||
|
||||
p := NewMemorySeriesStorage()
|
||||
|
||||
defer func() {
|
||||
err := p.Close()
|
||||
if err != nil {
|
||||
t.Errorf("Anomaly while closing database: %q\n", err)
|
||||
}
|
||||
}()
|
||||
|
||||
f(p, t)
|
||||
}
|
||||
}
|
||||
|
||||
var testBasicLifecycle = buildTestPersistence(metric.BasicLifecycleTests)
|
||||
|
||||
func TestBasicLifecycle(t *testing.T) {
|
||||
testBasicLifecycle(t)
|
||||
}
|
||||
|
||||
func BenchmarkBasicLifecycle(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
testBasicLifecycle(b)
|
||||
}
|
||||
}
|
||||
|
||||
var testReadEmpty = buildTestPersistence(metric.ReadEmptyTests)
|
||||
|
||||
func TestReadEmpty(t *testing.T) {
|
||||
testReadEmpty(t)
|
||||
}
|
||||
|
||||
func BenchmarkReadEmpty(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
testReadEmpty(b)
|
||||
}
|
||||
}
|
||||
|
||||
var testAppendSampleAsPureSparseAppend = buildTestPersistence(metric.AppendSampleAsPureSparseAppendTests)
|
||||
|
||||
func TestAppendSampleAsPureSparseAppend(t *testing.T) {
|
||||
testAppendSampleAsPureSparseAppend(t)
|
||||
}
|
||||
|
||||
func BenchmarkAppendSampleAsPureSparseAppend(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
testAppendSampleAsPureSparseAppend(b)
|
||||
}
|
||||
}
|
||||
|
||||
var testAppendSampleAsSparseAppendWithReads = buildTestPersistence(metric.AppendSampleAsSparseAppendWithReadsTests)
|
||||
|
||||
func TestAppendSampleAsSparseAppendWithReads(t *testing.T) {
|
||||
testAppendSampleAsSparseAppendWithReads(t)
|
||||
}
|
||||
|
||||
func BenchmarkAppendSampleAsSparseAppendWithReads(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
testAppendSampleAsSparseAppendWithReads(b)
|
||||
}
|
||||
}
|
||||
|
||||
var testAppendSampleAsPureSingleEntityAppend = buildTestPersistence(metric.AppendSampleAsPureSingleEntityAppendTests)
|
||||
|
||||
func TestAppendSampleAsPureSingleEntityAppend(t *testing.T) {
|
||||
testAppendSampleAsPureSingleEntityAppend(t)
|
||||
}
|
||||
|
||||
func BenchmarkAppendSampleAsPureSingleEntityAppend(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
testAppendSampleAsPureSingleEntityAppend(b)
|
||||
}
|
||||
}
|
||||
|
||||
func testStochastic(t test.Tester) {
|
||||
persistenceMaker := func() metric.MetricPersistence {
|
||||
return NewMemorySeriesStorage()
|
||||
}
|
||||
|
||||
metric.StochasticTests(persistenceMaker, t)
|
||||
}
|
||||
|
||||
func TestStochastic(t *testing.T) {
|
||||
testStochastic(t)
|
||||
}
|
||||
|
||||
func BenchmarkStochastic(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
testStochastic(b)
|
||||
}
|
||||
}
|
460
storage/metric/operation.go
Normal file
460
storage/metric/operation.go
Normal file
|
@ -0,0 +1,460 @@
|
|||
// Copyright 2013 Prometheus Team
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package metric
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"sort"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Encapsulates a primitive query operation.
|
||||
type op interface {
|
||||
// The time at which this operation starts.
|
||||
StartsAt() time.Time
|
||||
}
|
||||
|
||||
// Provides a sortable collection of operations.
|
||||
type ops []op
|
||||
|
||||
func (o ops) Len() int {
|
||||
return len(o)
|
||||
}
|
||||
|
||||
func (o ops) Less(i, j int) bool {
|
||||
return o[i].StartsAt().Before(o[j].StartsAt())
|
||||
}
|
||||
|
||||
func (o ops) Swap(i, j int) {
|
||||
o[i], o[j] = o[j], o[i]
|
||||
}
|
||||
|
||||
// Encapsulates getting values at or adjacent to a specific time.
|
||||
type getValuesAtTimeOp struct {
|
||||
time time.Time
|
||||
}
|
||||
|
||||
func (o getValuesAtTimeOp) String() string {
|
||||
return fmt.Sprintf("getValuesAtTimeOp at %s", o.time)
|
||||
}
|
||||
|
||||
func (g getValuesAtTimeOp) StartsAt() time.Time {
|
||||
return g.time
|
||||
}
|
||||
|
||||
// Encapsulates getting values at a given interval over a duration.
|
||||
type getValuesAtIntervalOp struct {
|
||||
from time.Time
|
||||
through time.Time
|
||||
interval time.Duration
|
||||
}
|
||||
|
||||
func (o getValuesAtIntervalOp) String() string {
|
||||
return fmt.Sprintf("getValuesAtIntervalOp from %s each %s through %s", o.from, o.interval, o.through)
|
||||
}
|
||||
|
||||
func (g getValuesAtIntervalOp) StartsAt() time.Time {
|
||||
return g.from
|
||||
}
|
||||
|
||||
func (g getValuesAtIntervalOp) Through() time.Time {
|
||||
return g.through
|
||||
}
|
||||
|
||||
type getValuesAlongRangeOp struct {
|
||||
from time.Time
|
||||
through time.Time
|
||||
}
|
||||
|
||||
func (o getValuesAlongRangeOp) String() string {
|
||||
return fmt.Sprintf("getValuesAlongRangeOp from %s through %s", o.from, o.through)
|
||||
}
|
||||
|
||||
func (g getValuesAlongRangeOp) StartsAt() time.Time {
|
||||
return g.from
|
||||
}
|
||||
|
||||
func (g getValuesAlongRangeOp) Through() time.Time {
|
||||
return g.through
|
||||
}
|
||||
|
||||
// Provides a collection of getMetricRangeOperation.
|
||||
type getMetricRangeOperations []getValuesAlongRangeOp
|
||||
|
||||
func (s getMetricRangeOperations) Len() int {
|
||||
return len(s)
|
||||
}
|
||||
|
||||
func (s getMetricRangeOperations) Swap(i, j int) {
|
||||
s[i], s[j] = s[j], s[i]
|
||||
}
|
||||
|
||||
// Sorts getMetricRangeOperation according duration in descending order.
|
||||
type rangeDurationSorter struct {
|
||||
getMetricRangeOperations
|
||||
}
|
||||
|
||||
func (s rangeDurationSorter) Less(i, j int) bool {
|
||||
l := s.getMetricRangeOperations[i]
|
||||
r := s.getMetricRangeOperations[j]
|
||||
|
||||
return !l.through.Before(r.through)
|
||||
}
|
||||
|
||||
// Encapsulates a general operation that occurs over a duration.
|
||||
type durationOperator interface {
|
||||
op
|
||||
|
||||
Through() time.Time
|
||||
}
|
||||
|
||||
// Sorts durationOperator by the operation's duration in ascending order.
|
||||
type durationOperators []durationOperator
|
||||
|
||||
func (o durationOperators) Len() int {
|
||||
return len(o)
|
||||
}
|
||||
|
||||
func (o durationOperators) Less(i, j int) bool {
|
||||
return o[i].Through().Before(o[j].Through())
|
||||
}
|
||||
|
||||
func (o durationOperators) Swap(i, j int) {
|
||||
o[i], o[j] = o[j], o[i]
|
||||
}
|
||||
|
||||
// Contains getValuesAtIntervalOp operations.
|
||||
type getValuesAtIntervalOps []getValuesAtIntervalOp
|
||||
|
||||
func (s getValuesAtIntervalOps) Len() int {
|
||||
return len(s)
|
||||
}
|
||||
|
||||
func (s getValuesAtIntervalOps) Swap(i, j int) {
|
||||
s[i], s[j] = s[j], s[i]
|
||||
}
|
||||
|
||||
// Sorts durationOperator by the operation's duration in descending order.
|
||||
type intervalDurationSorter struct {
|
||||
getValuesAtIntervalOps
|
||||
}
|
||||
|
||||
func (s intervalDurationSorter) Less(i, j int) bool {
|
||||
l := s.getValuesAtIntervalOps[i]
|
||||
r := s.getValuesAtIntervalOps[j]
|
||||
|
||||
return !l.through.Before(r.through)
|
||||
}
|
||||
|
||||
// Sorts getValuesAtIntervalOp operations in ascending order by their
|
||||
// frequency.
|
||||
type frequencySorter struct {
|
||||
getValuesAtIntervalOps
|
||||
}
|
||||
|
||||
func (s frequencySorter) Less(i, j int) bool {
|
||||
l := s.getValuesAtIntervalOps[i]
|
||||
r := s.getValuesAtIntervalOps[j]
|
||||
|
||||
return l.interval < r.interval
|
||||
}
|
||||
|
||||
// Selects and returns all operations that are getValuesAtIntervalOps operations.
|
||||
func collectIntervals(ops ops) (intervals map[time.Duration]getValuesAtIntervalOps) {
|
||||
intervals = make(map[time.Duration]getValuesAtIntervalOps)
|
||||
|
||||
for _, operation := range ops {
|
||||
intervalOp, ok := operation.(getValuesAtIntervalOp)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
operations, _ := intervals[intervalOp.interval]
|
||||
|
||||
operations = append(operations, intervalOp)
|
||||
intervals[intervalOp.interval] = operations
|
||||
}
|
||||
|
||||
for _, operations := range intervals {
|
||||
sort.Sort(intervalDurationSorter{operations})
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// Selects and returns all operations that are getValuesAlongRangeOp operations.
|
||||
func collectRanges(ops ops) (ranges getMetricRangeOperations) {
|
||||
for _, operation := range ops {
|
||||
op, ok := operation.(getValuesAlongRangeOp)
|
||||
if ok {
|
||||
ranges = append(ranges, op)
|
||||
}
|
||||
}
|
||||
|
||||
sort.Sort(rangeDurationSorter{ranges})
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func optimizeForward(pending ops) (out ops) {
|
||||
if len(pending) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
var (
|
||||
firstOperation = pending[0]
|
||||
)
|
||||
|
||||
pending = pending[1:len(pending)]
|
||||
|
||||
if _, ok := firstOperation.(getValuesAtTimeOp); ok {
|
||||
out = ops{firstOperation}
|
||||
tail := optimizeForward(pending)
|
||||
|
||||
return append(out, tail...)
|
||||
}
|
||||
|
||||
// If the last value was a scan at a given frequency along an interval,
|
||||
// several optimizations may exist.
|
||||
if operation, ok := firstOperation.(getValuesAtIntervalOp); ok {
|
||||
for _, peekOperation := range pending {
|
||||
if peekOperation.StartsAt().After(operation.Through()) {
|
||||
break
|
||||
}
|
||||
|
||||
// If the type is not a range request, we can't do anything.
|
||||
rangeOperation, ok := peekOperation.(getValuesAlongRangeOp)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
if !rangeOperation.Through().After(operation.Through()) {
|
||||
var (
|
||||
before = getValuesAtIntervalOp(operation)
|
||||
after = getValuesAtIntervalOp(operation)
|
||||
)
|
||||
|
||||
before.through = rangeOperation.from
|
||||
|
||||
// Truncate the get value at interval request if a range request cuts
|
||||
// it off somewhere.
|
||||
var (
|
||||
t = rangeOperation.from
|
||||
)
|
||||
|
||||
for {
|
||||
t = t.Add(operation.interval)
|
||||
|
||||
if t.After(rangeOperation.through) {
|
||||
after.from = t
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
pending = append(ops{before, after}, pending...)
|
||||
sort.Sort(pending)
|
||||
|
||||
return optimizeForward(pending)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if operation, ok := firstOperation.(getValuesAlongRangeOp); ok {
|
||||
for _, peekOperation := range pending {
|
||||
if peekOperation.StartsAt().After(operation.Through()) {
|
||||
break
|
||||
}
|
||||
|
||||
// All values at a specific time may be elided into the range query.
|
||||
if _, ok := peekOperation.(getValuesAtTimeOp); ok {
|
||||
pending = pending[1:len(pending)]
|
||||
continue
|
||||
}
|
||||
|
||||
// Range queries should be concatenated if they overlap.
|
||||
if rangeOperation, ok := peekOperation.(getValuesAlongRangeOp); ok {
|
||||
pending = pending[1:len(pending)]
|
||||
|
||||
if rangeOperation.Through().After(operation.Through()) {
|
||||
operation.through = rangeOperation.through
|
||||
|
||||
var (
|
||||
head = ops{operation}
|
||||
tail = pending
|
||||
)
|
||||
|
||||
pending = append(head, tail...)
|
||||
|
||||
return optimizeForward(pending)
|
||||
}
|
||||
}
|
||||
|
||||
if intervalOperation, ok := peekOperation.(getValuesAtIntervalOp); ok {
|
||||
pending = pending[1:len(pending)]
|
||||
|
||||
if intervalOperation.through.After(operation.Through()) {
|
||||
var (
|
||||
t = intervalOperation.from
|
||||
)
|
||||
for {
|
||||
t = t.Add(intervalOperation.interval)
|
||||
|
||||
if t.After(intervalOperation.through) {
|
||||
intervalOperation.from = t
|
||||
|
||||
pending = append(ops{intervalOperation}, pending...)
|
||||
|
||||
return optimizeForward(pending)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Strictly needed?
|
||||
sort.Sort(pending)
|
||||
|
||||
tail := optimizeForward(pending)
|
||||
|
||||
return append(ops{firstOperation}, tail...)
|
||||
}
|
||||
|
||||
func selectQueriesForTime(time time.Time, queries ops) (out ops) {
|
||||
if len(queries) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
if !queries[0].StartsAt().Equal(time) {
|
||||
return
|
||||
}
|
||||
|
||||
out = append(out, queries[0])
|
||||
tail := selectQueriesForTime(time, queries[1:len(queries)])
|
||||
|
||||
return append(out, tail...)
|
||||
}
|
||||
|
||||
// Flattens queries that occur at the same time according to duration and level
|
||||
// of greed.
|
||||
func optimizeTimeGroup(group ops) (out ops) {
|
||||
var (
|
||||
rangeOperations = collectRanges(group)
|
||||
intervalOperations = collectIntervals(group)
|
||||
|
||||
greediestRange durationOperator
|
||||
greediestIntervals map[time.Duration]durationOperator
|
||||
)
|
||||
|
||||
if len(rangeOperations) > 0 {
|
||||
operations := durationOperators{}
|
||||
for i := 0; i < len(rangeOperations); i++ {
|
||||
operations = append(operations, rangeOperations[i])
|
||||
}
|
||||
|
||||
// intervaledOperations sorts on the basis of the length of the window.
|
||||
sort.Sort(operations)
|
||||
|
||||
greediestRange = operations[len(operations)-1 : len(operations)][0]
|
||||
}
|
||||
|
||||
if len(intervalOperations) > 0 {
|
||||
greediestIntervals = make(map[time.Duration]durationOperator)
|
||||
|
||||
for i, ops := range intervalOperations {
|
||||
operations := durationOperators{}
|
||||
for j := 0; j < len(ops); j++ {
|
||||
operations = append(operations, ops[j])
|
||||
}
|
||||
|
||||
// intervaledOperations sorts on the basis of the length of the window.
|
||||
sort.Sort(operations)
|
||||
|
||||
greediestIntervals[i] = operations[len(operations)-1 : len(operations)][0]
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
containsRange = greediestRange != nil
|
||||
containsInterval = len(greediestIntervals) > 0
|
||||
)
|
||||
|
||||
if containsRange && !containsInterval {
|
||||
out = append(out, greediestRange)
|
||||
} else if !containsRange && containsInterval {
|
||||
intervalOperations := getValuesAtIntervalOps{}
|
||||
for _, o := range greediestIntervals {
|
||||
intervalOperations = append(intervalOperations, o.(getValuesAtIntervalOp))
|
||||
}
|
||||
|
||||
sort.Sort(frequencySorter{intervalOperations})
|
||||
|
||||
for _, o := range intervalOperations {
|
||||
out = append(out, o)
|
||||
}
|
||||
} else if containsRange && containsInterval {
|
||||
out = append(out, greediestRange)
|
||||
for _, op := range greediestIntervals {
|
||||
if !op.Through().After(greediestRange.Through()) {
|
||||
continue
|
||||
}
|
||||
|
||||
// The range operation does not exceed interval. Leave a snippet of
|
||||
// interval.
|
||||
var (
|
||||
truncated = op.(getValuesAtIntervalOp)
|
||||
newIntervalOperation getValuesAtIntervalOp
|
||||
// Refactor
|
||||
remainingSlice = greediestRange.Through().Sub(greediestRange.StartsAt()) / time.Second
|
||||
nextIntervalPoint = time.Duration(math.Ceil(float64(remainingSlice)/float64(truncated.interval)) * float64(truncated.interval/time.Second))
|
||||
nextStart = greediestRange.Through().Add(nextIntervalPoint)
|
||||
)
|
||||
|
||||
newIntervalOperation.from = nextStart
|
||||
newIntervalOperation.interval = truncated.interval
|
||||
newIntervalOperation.through = truncated.Through()
|
||||
// Added back to the pending because additional curation could be
|
||||
// necessary.
|
||||
out = append(out, newIntervalOperation)
|
||||
}
|
||||
} else {
|
||||
// Operation is OK as-is.
|
||||
out = append(out, group[0])
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// Flattens all groups of time according to greed.
|
||||
func optimizeTimeGroups(pending ops) (out ops) {
|
||||
if len(pending) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
sort.Sort(pending)
|
||||
|
||||
nextOperation := pending[0]
|
||||
groupedQueries := selectQueriesForTime(nextOperation.StartsAt(), pending)
|
||||
out = optimizeTimeGroup(groupedQueries)
|
||||
pending = pending[len(groupedQueries):len(pending)]
|
||||
|
||||
tail := optimizeTimeGroups(pending)
|
||||
|
||||
return append(out, tail...)
|
||||
}
|
||||
|
||||
func optimize(pending ops) (out ops) {
|
||||
return optimizeForward(optimizeTimeGroups(pending))
|
||||
}
|
1078
storage/metric/operation_test.go
Normal file
1078
storage/metric/operation_test.go
Normal file
File diff suppressed because it is too large
Load diff
|
@ -16,6 +16,7 @@ package metric
|
|||
import (
|
||||
"github.com/prometheus/prometheus/model"
|
||||
"github.com/prometheus/prometheus/utility/test"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
|
@ -35,7 +36,7 @@ func GetFingerprintsForLabelSetUsesAndForLabelMatchingTests(p MetricPersistence,
|
|||
m[model.LabelName(k)] = model.LabelValue(v)
|
||||
}
|
||||
|
||||
appendSample(p, model.Sample{
|
||||
testAppendSample(p, model.Sample{
|
||||
Value: model.SampleValue(0.0),
|
||||
Timestamp: time.Now(),
|
||||
Metric: m,
|
||||
|
@ -56,3 +57,29 @@ func GetFingerprintsForLabelSetUsesAndForLabelMatchingTests(p MetricPersistence,
|
|||
t.Errorf("did not get a single metric as is expected, got %s", fingerprints)
|
||||
}
|
||||
}
|
||||
|
||||
// Test Definitions Below
|
||||
|
||||
var testLevelDBGetFingerprintsForLabelSetUsesAndForLabelMatching = buildLevelDBTestPersistence("get_fingerprints_for_labelset_uses_and_for_label_matching", GetFingerprintsForLabelSetUsesAndForLabelMatchingTests)
|
||||
|
||||
func TestLevelDBGetFingerprintsForLabelSetUsesAndForLabelMatching(t *testing.T) {
|
||||
testLevelDBGetFingerprintsForLabelSetUsesAndForLabelMatching(t)
|
||||
}
|
||||
|
||||
func BenchmarkLevelDBGetFingerprintsForLabelSetUsesAndForLabelMatching(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
testLevelDBGetFingerprintsForLabelSetUsesAndForLabelMatching(b)
|
||||
}
|
||||
}
|
||||
|
||||
var testMemoryGetFingerprintsForLabelSetUsesAndForLabelMatching = buildMemoryTestPersistence(GetFingerprintsForLabelSetUsesAndForLabelMatchingTests)
|
||||
|
||||
func TestMemoryGetFingerprintsForLabelSetUsesAndForLabelMatching(t *testing.T) {
|
||||
testMemoryGetFingerprintsForLabelSetUsesAndForLabelMatching(t)
|
||||
}
|
||||
|
||||
func BenchmarkMemoryGetFingerprintsForLabelSetUsesAndLabelMatching(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
testMemoryGetFingerprintsForLabelSetUsesAndForLabelMatching(b)
|
||||
}
|
||||
}
|
|
@ -17,6 +17,8 @@ import (
|
|||
"github.com/prometheus/prometheus/model"
|
||||
"github.com/prometheus/prometheus/utility/test"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
|
@ -569,7 +571,7 @@ func GetValueAtTimeTests(persistenceMaker func() (MetricPersistence, io.Closer),
|
|||
}
|
||||
|
||||
for _, value := range context.values {
|
||||
appendSample(p, model.Sample{
|
||||
testAppendSample(p, model.Sample{
|
||||
Value: model.SampleValue(value.value),
|
||||
Timestamp: time.Date(value.year, value.month, value.day, value.hour, 0, 0, 0, time.UTC),
|
||||
Metric: m,
|
||||
|
@ -1014,7 +1016,7 @@ func GetBoundaryValuesTests(persistenceMaker func() (MetricPersistence, io.Close
|
|||
}
|
||||
|
||||
for _, value := range context.values {
|
||||
appendSample(p, model.Sample{
|
||||
testAppendSample(p, model.Sample{
|
||||
Value: model.SampleValue(value.value),
|
||||
Timestamp: time.Date(value.year, value.month, value.day, value.hour, 0, 0, 0, time.UTC),
|
||||
Metric: m,
|
||||
|
@ -1371,7 +1373,7 @@ func GetRangeValuesTests(persistenceMaker func() (MetricPersistence, io.Closer),
|
|||
}
|
||||
|
||||
for _, value := range context.values {
|
||||
appendSample(p, model.Sample{
|
||||
testAppendSample(p, model.Sample{
|
||||
Value: model.SampleValue(value.value),
|
||||
Timestamp: time.Date(value.year, value.month, value.day, value.hour, 0, 0, 0, time.UTC),
|
||||
Metric: m,
|
||||
|
@ -1434,3 +1436,106 @@ func GetRangeValuesTests(persistenceMaker func() (MetricPersistence, io.Closer),
|
|||
}()
|
||||
}
|
||||
}
|
||||
|
||||
// Test Definitions Follow
|
||||
|
||||
func testLevelDBGetValueAtTime(t test.Tester) {
|
||||
persistenceMaker := buildLevelDBTestPersistencesMaker("get_value_at_time", t)
|
||||
GetValueAtTimeTests(persistenceMaker, t)
|
||||
}
|
||||
|
||||
func TestLevelDBGetValueAtTime(t *testing.T) {
|
||||
testLevelDBGetValueAtTime(t)
|
||||
}
|
||||
|
||||
func BenchmarkLevelDBGetValueAtTime(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
testLevelDBGetValueAtTime(b)
|
||||
}
|
||||
}
|
||||
|
||||
func testLevelDBGetBoundaryValues(t test.Tester) {
|
||||
persistenceMaker := buildLevelDBTestPersistencesMaker("get_boundary_values", t)
|
||||
|
||||
GetBoundaryValuesTests(persistenceMaker, t)
|
||||
}
|
||||
|
||||
func TestLevelDBGetBoundaryValues(t *testing.T) {
|
||||
testLevelDBGetBoundaryValues(t)
|
||||
}
|
||||
|
||||
func BenchmarkLevelDBGetBoundaryValues(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
testLevelDBGetBoundaryValues(b)
|
||||
}
|
||||
}
|
||||
|
||||
func testLevelDBGetRangeValues(t test.Tester) {
|
||||
persistenceMaker := buildLevelDBTestPersistencesMaker("get_range_values", t)
|
||||
|
||||
GetRangeValuesTests(persistenceMaker, t)
|
||||
}
|
||||
|
||||
func TestLevelDBGetRangeValues(t *testing.T) {
|
||||
testLevelDBGetRangeValues(t)
|
||||
}
|
||||
|
||||
func BenchmarkLevelDBGetRangeValues(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
testLevelDBGetRangeValues(b)
|
||||
}
|
||||
}
|
||||
|
||||
func testMemoryGetValueAtTime(t test.Tester) {
|
||||
persistenceMaker := func() (MetricPersistence, io.Closer) {
|
||||
return NewMemorySeriesStorage(), ioutil.NopCloser(nil)
|
||||
}
|
||||
|
||||
GetValueAtTimeTests(persistenceMaker, t)
|
||||
}
|
||||
|
||||
func TestMemoryGetValueAtTime(t *testing.T) {
|
||||
testMemoryGetValueAtTime(t)
|
||||
}
|
||||
|
||||
func BenchmarkMemoryGetValueAtTime(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
testMemoryGetValueAtTime(b)
|
||||
}
|
||||
}
|
||||
|
||||
func testMemoryGetBoundaryValues(t test.Tester) {
|
||||
persistenceMaker := func() (MetricPersistence, io.Closer) {
|
||||
return NewMemorySeriesStorage(), ioutil.NopCloser(nil)
|
||||
}
|
||||
|
||||
GetBoundaryValuesTests(persistenceMaker, t)
|
||||
}
|
||||
|
||||
func TestMemoryGetBoundaryValues(t *testing.T) {
|
||||
testMemoryGetBoundaryValues(t)
|
||||
}
|
||||
|
||||
func BenchmarkMemoryGetBoundaryValues(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
testMemoryGetBoundaryValues(b)
|
||||
}
|
||||
}
|
||||
|
||||
func testMemoryGetRangeValues(t test.Tester) {
|
||||
persistenceMaker := func() (MetricPersistence, io.Closer) {
|
||||
return NewMemorySeriesStorage(), ioutil.NopCloser(nil)
|
||||
}
|
||||
|
||||
GetRangeValuesTests(persistenceMaker, t)
|
||||
}
|
||||
|
||||
func TestMemoryGetRangeValues(t *testing.T) {
|
||||
testMemoryGetRangeValues(t)
|
||||
}
|
||||
|
||||
func BenchmarkMemoryGetRangeValues(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
testMemoryGetRangeValues(b)
|
||||
}
|
||||
}
|
54
storage/metric/scanjob.go
Normal file
54
storage/metric/scanjob.go
Normal file
|
@ -0,0 +1,54 @@
|
|||
// Copyright 2013 Prometheus Team
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package metric
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"github.com/prometheus/prometheus/model"
|
||||
)
|
||||
|
||||
// scanJob models a range of queries.
|
||||
type scanJob struct {
|
||||
fingerprint model.Fingerprint
|
||||
operations ops
|
||||
}
|
||||
|
||||
func (s scanJob) String() string {
|
||||
buffer := &bytes.Buffer{}
|
||||
fmt.Fprintf(buffer, "Scan Job { fingerprint=%s ", s.fingerprint)
|
||||
fmt.Fprintf(buffer, " with %d operations [", len(s.operations))
|
||||
for _, operation := range s.operations {
|
||||
fmt.Fprintf(buffer, "%s", operation)
|
||||
}
|
||||
fmt.Fprintf(buffer, "] }")
|
||||
|
||||
return buffer.String()
|
||||
}
|
||||
|
||||
type scanJobs []scanJob
|
||||
|
||||
func (s scanJobs) Len() int {
|
||||
return len(s)
|
||||
}
|
||||
|
||||
func (s scanJobs) Less(i, j int) (less bool) {
|
||||
less = s[i].fingerprint.Less(s[j].fingerprint)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (s scanJobs) Swap(i, j int) {
|
||||
s[i], s[j] = s[j], s[i]
|
||||
}
|
|
@ -17,8 +17,10 @@ import (
|
|||
"fmt"
|
||||
"github.com/prometheus/prometheus/model"
|
||||
"github.com/prometheus/prometheus/utility/test"
|
||||
"io/ioutil"
|
||||
"math"
|
||||
"math/rand"
|
||||
"testing"
|
||||
"testing/quick"
|
||||
"time"
|
||||
)
|
||||
|
@ -431,3 +433,171 @@ func StochasticTests(persistenceMaker func() MetricPersistence, t test.Tester) {
|
|||
t.Error(err)
|
||||
}
|
||||
}
|
||||
|
||||
// Test Definitions Follow
|
||||
|
||||
var testLevelDBBasicLifecycle = buildLevelDBTestPersistence("basic_lifecycle", BasicLifecycleTests)
|
||||
|
||||
func TestLevelDBBasicLifecycle(t *testing.T) {
|
||||
testLevelDBBasicLifecycle(t)
|
||||
}
|
||||
|
||||
func BenchmarkLevelDBBasicLifecycle(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
testLevelDBBasicLifecycle(b)
|
||||
}
|
||||
}
|
||||
|
||||
var testLevelDBReadEmpty = buildLevelDBTestPersistence("read_empty", ReadEmptyTests)
|
||||
|
||||
func TestLevelDBReadEmpty(t *testing.T) {
|
||||
testLevelDBReadEmpty(t)
|
||||
}
|
||||
|
||||
func BenchmarkLevelDBReadEmpty(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
testLevelDBReadEmpty(b)
|
||||
}
|
||||
}
|
||||
|
||||
var testLevelDBAppendSampleAsPureSparseAppend = buildLevelDBTestPersistence("append_sample_as_pure_sparse_append", AppendSampleAsPureSparseAppendTests)
|
||||
|
||||
func TestLevelDBAppendSampleAsPureSparseAppend(t *testing.T) {
|
||||
testLevelDBAppendSampleAsPureSparseAppend(t)
|
||||
}
|
||||
|
||||
func BenchmarkLevelDBAppendSampleAsPureSparseAppend(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
testLevelDBAppendSampleAsPureSparseAppend(b)
|
||||
}
|
||||
}
|
||||
|
||||
var testLevelDBAppendSampleAsSparseAppendWithReads = buildLevelDBTestPersistence("append_sample_as_sparse_append_with_reads", AppendSampleAsSparseAppendWithReadsTests)
|
||||
|
||||
func TestLevelDBAppendSampleAsSparseAppendWithReads(t *testing.T) {
|
||||
testLevelDBAppendSampleAsSparseAppendWithReads(t)
|
||||
}
|
||||
|
||||
func BenchmarkLevelDBAppendSampleAsSparseAppendWithReads(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
testLevelDBAppendSampleAsSparseAppendWithReads(b)
|
||||
}
|
||||
}
|
||||
|
||||
var testLevelDBAppendSampleAsPureSingleEntityAppend = buildLevelDBTestPersistence("append_sample_as_pure_single_entity_append", AppendSampleAsPureSingleEntityAppendTests)
|
||||
|
||||
func TestLevelDBAppendSampleAsPureSingleEntityAppend(t *testing.T) {
|
||||
testLevelDBAppendSampleAsPureSingleEntityAppend(t)
|
||||
}
|
||||
|
||||
func BenchmarkLevelDBAppendSampleAsPureSingleEntityAppend(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
testLevelDBAppendSampleAsPureSingleEntityAppend(b)
|
||||
}
|
||||
}
|
||||
|
||||
func testLevelDBStochastic(t test.Tester) {
|
||||
persistenceMaker := func() MetricPersistence {
|
||||
temporaryDirectory, err := ioutil.TempDir("", "test_leveldb_stochastic")
|
||||
if err != nil {
|
||||
t.Errorf("Could not create test directory: %q\n", err)
|
||||
}
|
||||
|
||||
p, err := NewLevelDBMetricPersistence(temporaryDirectory)
|
||||
if err != nil {
|
||||
t.Errorf("Could not start up LevelDB: %q\n", err)
|
||||
}
|
||||
|
||||
return p
|
||||
}
|
||||
|
||||
StochasticTests(persistenceMaker, t)
|
||||
}
|
||||
|
||||
func TestLevelDBStochastic(t *testing.T) {
|
||||
testLevelDBStochastic(t)
|
||||
}
|
||||
|
||||
func BenchmarkLevelDBStochastic(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
testLevelDBStochastic(b)
|
||||
}
|
||||
}
|
||||
|
||||
var testMemoryBasicLifecycle = buildMemoryTestPersistence(BasicLifecycleTests)
|
||||
|
||||
func TestMemoryBasicLifecycle(t *testing.T) {
|
||||
testMemoryBasicLifecycle(t)
|
||||
}
|
||||
|
||||
func BenchmarkMemoryBasicLifecycle(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
testMemoryBasicLifecycle(b)
|
||||
}
|
||||
}
|
||||
|
||||
var testMemoryReadEmpty = buildMemoryTestPersistence(ReadEmptyTests)
|
||||
|
||||
func TestMemoryReadEmpty(t *testing.T) {
|
||||
testMemoryReadEmpty(t)
|
||||
}
|
||||
|
||||
func BenchmarkMemoryReadEmpty(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
testMemoryReadEmpty(b)
|
||||
}
|
||||
}
|
||||
|
||||
var testMemoryAppendSampleAsPureSparseAppend = buildMemoryTestPersistence(AppendSampleAsPureSparseAppendTests)
|
||||
|
||||
func TestMemoryAppendSampleAsPureSparseAppend(t *testing.T) {
|
||||
testMemoryAppendSampleAsPureSparseAppend(t)
|
||||
}
|
||||
|
||||
func BenchmarkMemoryAppendSampleAsPureSparseAppend(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
testMemoryAppendSampleAsPureSparseAppend(b)
|
||||
}
|
||||
}
|
||||
|
||||
var testMemoryAppendSampleAsSparseAppendWithReads = buildMemoryTestPersistence(AppendSampleAsSparseAppendWithReadsTests)
|
||||
|
||||
func TestMemoryAppendSampleAsSparseAppendWithReads(t *testing.T) {
|
||||
testMemoryAppendSampleAsSparseAppendWithReads(t)
|
||||
}
|
||||
|
||||
func BenchmarkMemoryAppendSampleAsSparseAppendWithReads(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
testMemoryAppendSampleAsSparseAppendWithReads(b)
|
||||
}
|
||||
}
|
||||
|
||||
var testMemoryAppendSampleAsPureSingleEntityAppend = buildMemoryTestPersistence(AppendSampleAsPureSingleEntityAppendTests)
|
||||
|
||||
func TestMemoryAppendSampleAsPureSingleEntityAppend(t *testing.T) {
|
||||
testMemoryAppendSampleAsPureSingleEntityAppend(t)
|
||||
}
|
||||
|
||||
func BenchmarkMemoryAppendSampleAsPureSingleEntityAppend(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
testMemoryAppendSampleAsPureSingleEntityAppend(b)
|
||||
}
|
||||
}
|
||||
|
||||
func testMemoryStochastic(t test.Tester) {
|
||||
persistenceMaker := func() MetricPersistence {
|
||||
return NewMemorySeriesStorage()
|
||||
}
|
||||
|
||||
StochasticTests(persistenceMaker, t)
|
||||
}
|
||||
|
||||
func TestMemoryStochastic(t *testing.T) {
|
||||
testMemoryStochastic(t)
|
||||
}
|
||||
|
||||
func BenchmarkMemoryStochastic(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
testMemoryStochastic(b)
|
||||
}
|
||||
}
|
|
@ -14,13 +14,99 @@
|
|||
package metric
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/prometheus/prometheus/model"
|
||||
"github.com/prometheus/prometheus/utility/test"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"time"
|
||||
)
|
||||
|
||||
func appendSample(p MetricPersistence, s model.Sample, t test.Tester) {
|
||||
var (
|
||||
testInstant = time.Now()
|
||||
)
|
||||
|
||||
func testAppendSample(p MetricPersistence, s model.Sample, t test.Tester) {
|
||||
err := p.AppendSample(s)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
type purger struct {
|
||||
path string
|
||||
}
|
||||
|
||||
func (p purger) Close() error {
|
||||
return os.RemoveAll(p.path)
|
||||
}
|
||||
|
||||
func buildLevelDBTestPersistencesMaker(name string, t test.Tester) func() (MetricPersistence, io.Closer) {
|
||||
return func() (MetricPersistence, io.Closer) {
|
||||
temporaryDirectory, err := ioutil.TempDir("", "get_value_at_time")
|
||||
if err != nil {
|
||||
t.Errorf("Could not create test directory: %q\n", err)
|
||||
}
|
||||
|
||||
p, err := NewLevelDBMetricPersistence(temporaryDirectory)
|
||||
if err != nil {
|
||||
t.Errorf("Could not start up LevelDB: %q\n", err)
|
||||
}
|
||||
|
||||
purger := purger{
|
||||
path: temporaryDirectory,
|
||||
}
|
||||
|
||||
return p, purger
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func buildLevelDBTestPersistence(name string, f func(p MetricPersistence, t test.Tester)) func(t test.Tester) {
|
||||
return func(t test.Tester) {
|
||||
temporaryDirectory, err := ioutil.TempDir("", fmt.Sprintf("test_leveldb_%s", name))
|
||||
|
||||
if err != nil {
|
||||
t.Errorf("Could not create test directory: %q\n", err)
|
||||
return
|
||||
}
|
||||
|
||||
defer func() {
|
||||
err := os.RemoveAll(temporaryDirectory)
|
||||
if err != nil {
|
||||
t.Errorf("Could not remove temporary directory: %q\n", err)
|
||||
}
|
||||
}()
|
||||
|
||||
p, err := NewLevelDBMetricPersistence(temporaryDirectory)
|
||||
if err != nil {
|
||||
t.Errorf("Could not create LevelDB Metric Persistence: %q\n", err)
|
||||
}
|
||||
|
||||
defer func() {
|
||||
err := p.Close()
|
||||
if err != nil {
|
||||
t.Errorf("Anomaly while closing database: %q\n", err)
|
||||
}
|
||||
}()
|
||||
|
||||
f(p, t)
|
||||
}
|
||||
}
|
||||
|
||||
func buildMemoryTestPersistence(f func(p MetricPersistence, t test.Tester)) func(t test.Tester) {
|
||||
return func(t test.Tester) {
|
||||
|
||||
p := NewMemorySeriesStorage()
|
||||
|
||||
defer func() {
|
||||
err := p.Close()
|
||||
if err != nil {
|
||||
t.Errorf("Anomaly while closing database: %q\n", err)
|
||||
}
|
||||
}()
|
||||
|
||||
f(p, t)
|
||||
}
|
||||
}
|
||||
|
|
431
storage/metric/tiered.go
Normal file
431
storage/metric/tiered.go
Normal file
|
@ -0,0 +1,431 @@
|
|||
// Copyright 2013 Prometheus Team
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package metric
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/prometheus/prometheus/model"
|
||||
"github.com/prometheus/prometheus/storage"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
// tieredStorage both persists samples and generates materialized views for
|
||||
// queries.
|
||||
type tieredStorage struct {
|
||||
appendToDiskQueue chan model.Sample
|
||||
appendToMemoryQueue chan model.Sample
|
||||
diskStorage *LevelDBMetricPersistence
|
||||
flushMemoryInterval time.Duration
|
||||
memoryArena memorySeriesStorage
|
||||
memoryTTL time.Duration
|
||||
mutex sync.Mutex
|
||||
viewQueue chan viewJob
|
||||
writeMemoryInterval time.Duration
|
||||
}
|
||||
|
||||
// viewJob encapsulates a request to extract sample values from the datastore.
|
||||
type viewJob struct {
|
||||
builder ViewRequestBuilder
|
||||
output chan View
|
||||
err chan error
|
||||
}
|
||||
|
||||
type Storage interface {
|
||||
AppendSample(model.Sample)
|
||||
MakeView(ViewRequestBuilder, time.Duration) (View, error)
|
||||
Serve()
|
||||
Expose()
|
||||
}
|
||||
|
||||
func NewTieredStorage(appendToMemoryQueueDepth, appendToDiskQueueDepth, viewQueueDepth uint, flushMemoryInterval, writeMemoryInterval, memoryTTL time.Duration) Storage {
|
||||
diskStorage, err := NewLevelDBMetricPersistence("/tmp/metrics-foof")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
return &tieredStorage{
|
||||
appendToDiskQueue: make(chan model.Sample, appendToDiskQueueDepth),
|
||||
appendToMemoryQueue: make(chan model.Sample, appendToMemoryQueueDepth),
|
||||
diskStorage: diskStorage,
|
||||
flushMemoryInterval: flushMemoryInterval,
|
||||
memoryArena: NewMemorySeriesStorage(),
|
||||
memoryTTL: memoryTTL,
|
||||
viewQueue: make(chan viewJob, viewQueueDepth),
|
||||
writeMemoryInterval: writeMemoryInterval,
|
||||
}
|
||||
}
|
||||
|
||||
func (t *tieredStorage) AppendSample(s model.Sample) {
|
||||
t.appendToMemoryQueue <- s
|
||||
}
|
||||
|
||||
func (t *tieredStorage) MakeView(builder ViewRequestBuilder, deadline time.Duration) (view View, err error) {
|
||||
result := make(chan View)
|
||||
errChan := make(chan error)
|
||||
t.viewQueue <- viewJob{
|
||||
builder: builder,
|
||||
output: result,
|
||||
err: errChan,
|
||||
}
|
||||
|
||||
select {
|
||||
case value := <-result:
|
||||
view = value
|
||||
case err = <-errChan:
|
||||
return
|
||||
case <-time.After(deadline):
|
||||
err = fmt.Errorf("MakeView timed out after %s.", deadline)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (t *tieredStorage) Expose() {
|
||||
ticker := time.Tick(5 * time.Second)
|
||||
f := model.NewFingerprintFromRowKey("05232115763668508641-g-97-d")
|
||||
for {
|
||||
<-ticker
|
||||
|
||||
var (
|
||||
first = time.Now()
|
||||
second = first.Add(1 * time.Minute)
|
||||
third = first.Add(2 * time.Minute)
|
||||
)
|
||||
|
||||
vrb := NewViewRequestBuilder()
|
||||
fmt.Printf("vrb -> %s\n", vrb)
|
||||
vrb.GetMetricRange(f, first, second)
|
||||
vrb.GetMetricRange(f, first, third)
|
||||
js := vrb.ScanJobs()
|
||||
consume(js[0])
|
||||
// fmt.Printf("js -> %s\n", js)
|
||||
// js.Represent(t.diskStorage, t.memoryArena)
|
||||
// i, c, _ := t.diskStorage.metricSamples.GetIterator()
|
||||
// start := time.Now()
|
||||
// f, _ := newDiskFrontier(i)
|
||||
// fmt.Printf("df -> %s\n", time.Since(start))
|
||||
// fmt.Printf("df -- -> %s\n", f)
|
||||
// start = time.Now()
|
||||
// // sf, _ := newSeriesFrontier(model.NewFingerprintFromRowKey("05232115763668508641-g-97-d"), *f, i)
|
||||
// // sf, _ := newSeriesFrontier(model.NewFingerprintFromRowKey("16879485108969112708-g-184-s"), *f, i)
|
||||
// sf, _ := newSeriesFrontier(model.NewFingerprintFromRowKey("08437776163162606855-g-169-s"), *f, i)
|
||||
// fmt.Printf("sf -> %s\n", time.Since(start))
|
||||
// fmt.Printf("sf -- -> %s\n", sf)
|
||||
// c.Close()
|
||||
}
|
||||
}
|
||||
|
||||
func (t *tieredStorage) Serve() {
|
||||
var (
|
||||
flushMemoryTicker = time.Tick(t.flushMemoryInterval)
|
||||
writeMemoryTicker = time.Tick(t.writeMemoryInterval)
|
||||
)
|
||||
for {
|
||||
select {
|
||||
case <-writeMemoryTicker:
|
||||
t.writeMemory()
|
||||
case <-flushMemoryTicker:
|
||||
t.flushMemory()
|
||||
case viewRequest := <-t.viewQueue:
|
||||
t.renderView(viewRequest)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (t *tieredStorage) writeMemory() {
|
||||
t.mutex.Lock()
|
||||
defer t.mutex.Unlock()
|
||||
|
||||
pendingLength := len(t.appendToMemoryQueue)
|
||||
|
||||
for i := 0; i < pendingLength; i++ {
|
||||
t.memoryArena.AppendSample(<-t.appendToMemoryQueue)
|
||||
}
|
||||
}
|
||||
|
||||
// Write all pending appends.
|
||||
func (t *tieredStorage) flush() (err error) {
|
||||
t.writeMemory()
|
||||
t.flushMemory()
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
type memoryToDiskFlusher struct {
|
||||
toDiskQueue chan model.Sample
|
||||
disk MetricPersistence
|
||||
olderThan time.Time
|
||||
valuesAccepted int
|
||||
valuesRejected int
|
||||
}
|
||||
|
||||
type memoryToDiskFlusherVisitor struct {
|
||||
stream stream
|
||||
flusher *memoryToDiskFlusher
|
||||
}
|
||||
|
||||
func (f memoryToDiskFlusherVisitor) DecodeKey(in interface{}) (out interface{}, err error) {
|
||||
out = time.Time(in.(skipListTime))
|
||||
return
|
||||
}
|
||||
|
||||
func (f memoryToDiskFlusherVisitor) DecodeValue(in interface{}) (out interface{}, err error) {
|
||||
out = in.(value).get()
|
||||
return
|
||||
}
|
||||
|
||||
func (f memoryToDiskFlusherVisitor) Filter(key, value interface{}) (filterResult storage.FilterResult) {
|
||||
var (
|
||||
recordTime = key.(time.Time)
|
||||
)
|
||||
|
||||
if recordTime.Before(f.flusher.olderThan) {
|
||||
f.flusher.valuesAccepted++
|
||||
|
||||
return storage.ACCEPT
|
||||
}
|
||||
|
||||
f.flusher.valuesRejected++
|
||||
return storage.STOP
|
||||
}
|
||||
|
||||
func (f memoryToDiskFlusherVisitor) Operate(key, value interface{}) (err *storage.OperatorError) {
|
||||
var (
|
||||
recordTime = key.(time.Time)
|
||||
recordValue = value.(model.SampleValue)
|
||||
)
|
||||
|
||||
if len(f.flusher.toDiskQueue) == cap(f.flusher.toDiskQueue) {
|
||||
f.flusher.Flush()
|
||||
}
|
||||
|
||||
f.flusher.toDiskQueue <- model.Sample{
|
||||
Metric: f.stream.metric,
|
||||
Timestamp: recordTime,
|
||||
Value: recordValue,
|
||||
}
|
||||
|
||||
f.stream.values.Delete(skipListTime(recordTime))
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (f *memoryToDiskFlusher) ForStream(stream stream) (decoder storage.RecordDecoder, filter storage.RecordFilter, operator storage.RecordOperator) {
|
||||
visitor := memoryToDiskFlusherVisitor{
|
||||
stream: stream,
|
||||
flusher: f,
|
||||
}
|
||||
|
||||
fmt.Printf("fingerprint -> %s\n", model.NewFingerprintFromMetric(stream.metric).ToRowKey())
|
||||
|
||||
return visitor, visitor, visitor
|
||||
}
|
||||
|
||||
func (f *memoryToDiskFlusher) Flush() {
|
||||
length := len(f.toDiskQueue)
|
||||
samples := model.Samples{}
|
||||
for i := 0; i < length; i++ {
|
||||
samples = append(samples, <-f.toDiskQueue)
|
||||
}
|
||||
fmt.Printf("%d samples to write\n", length)
|
||||
f.disk.AppendSamples(samples)
|
||||
}
|
||||
|
||||
func (f memoryToDiskFlusher) Close() {
|
||||
fmt.Println("memory flusher close")
|
||||
f.Flush()
|
||||
}
|
||||
|
||||
// Persist a whole bunch of samples to the datastore.
|
||||
func (t *tieredStorage) flushMemory() {
|
||||
|
||||
t.mutex.Lock()
|
||||
defer t.mutex.Unlock()
|
||||
|
||||
flusher := &memoryToDiskFlusher{
|
||||
disk: t.diskStorage,
|
||||
olderThan: time.Now().Add(-1 * t.memoryTTL),
|
||||
toDiskQueue: t.appendToDiskQueue,
|
||||
}
|
||||
defer flusher.Close()
|
||||
|
||||
v := time.Now()
|
||||
t.memoryArena.ForEachSample(flusher)
|
||||
fmt.Printf("Done flushing memory in %s", time.Since(v))
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (t *tieredStorage) renderView(viewJob viewJob) (err error) {
|
||||
t.mutex.Lock()
|
||||
defer t.mutex.Unlock()
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
|
||||
func consume(s scanJob) {
|
||||
var (
|
||||
standingOperations = ops{}
|
||||
lastTime = time.Time{}
|
||||
)
|
||||
|
||||
for {
|
||||
if len(s.operations) == 0 {
|
||||
if len(standingOperations) > 0 {
|
||||
var (
|
||||
intervals = collectIntervals(standingOperations)
|
||||
ranges = collectRanges(standingOperations)
|
||||
)
|
||||
|
||||
if len(intervals) > 0 {
|
||||
}
|
||||
|
||||
if len(ranges) > 0 {
|
||||
if len(ranges) > 0 {
|
||||
|
||||
}
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
operation := s.operations[0]
|
||||
if operation.StartsAt().Equal(lastTime) {
|
||||
standingOperations = append(standingOperations, operation)
|
||||
} else {
|
||||
standingOperations = ops{operation}
|
||||
lastTime = operation.StartsAt()
|
||||
}
|
||||
|
||||
s.operations = s.operations[1:len(s.operations)]
|
||||
}
|
||||
}
|
||||
|
||||
func (s scanJobs) Represent(d *LevelDBMetricPersistence, m memorySeriesStorage) (storage *memorySeriesStorage, err error) {
|
||||
|
||||
if len(s) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
iterator, closer, err := d.metricSamples.GetIterator()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
return
|
||||
}
|
||||
defer closer.Close()
|
||||
|
||||
diskFrontier, err := newDiskFrontier(iterator)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
return
|
||||
}
|
||||
if diskFrontier == nil {
|
||||
panic("diskfrontier == nil")
|
||||
}
|
||||
|
||||
for _, job := range s {
|
||||
if len(job.operations) == 0 {
|
||||
panic("len(job.operations) == 0 should never occur")
|
||||
}
|
||||
|
||||
// Determine if the metric is in the known keyspace. This is used as a
|
||||
// high-level heuristic before comparing the timestamps.
|
||||
var (
|
||||
fingerprint = job.fingerprint
|
||||
absentDiskKeyspace = fingerprint.Less(diskFrontier.firstFingerprint) || diskFrontier.lastFingerprint.Less(fingerprint)
|
||||
absentMemoryKeyspace = false
|
||||
)
|
||||
|
||||
if _, ok := m.fingerprintToSeries[fingerprint]; !ok {
|
||||
absentMemoryKeyspace = true
|
||||
}
|
||||
|
||||
var (
|
||||
firstSupertime time.Time
|
||||
lastSupertime time.Time
|
||||
)
|
||||
|
||||
var (
|
||||
_ = absentMemoryKeyspace
|
||||
_ = firstSupertime
|
||||
_ = lastSupertime
|
||||
)
|
||||
|
||||
// If the key is present in the disk keyspace, we should find out the maximum
|
||||
// seek points ahead of time. In the LevelDB case, this will save us from
|
||||
// having to dispose of and recreate the iterator.
|
||||
if !absentDiskKeyspace {
|
||||
seriesFrontier, err := newSeriesFrontier(fingerprint, *diskFrontier, iterator)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if seriesFrontier == nil {
|
||||
panic("ouch")
|
||||
}
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// var (
|
||||
// memoryLowWaterMark time.Time
|
||||
// memoryHighWaterMark time.Time
|
||||
// )
|
||||
|
||||
// if !absentMemoryKeyspace {
|
||||
// }
|
||||
// // if firstDiskFingerprint.Equal(job.fingerprint) {
|
||||
// // for _, operation := range job.operations {
|
||||
// // if o, ok := operation.(getMetricAtTimeOperation); ok {
|
||||
// // if o.StartTime().Before(firstDiskSuperTime) {
|
||||
// // }
|
||||
// // }
|
||||
|
||||
// // if o, ok := operation.(GetMetricAtInterval); ok {
|
||||
// // }
|
||||
// // }
|
||||
// // }
|
||||
// }
|
||||
// // // Compare the metrics on the basis of the keys.
|
||||
// // firstSampleInRange = sort.IsSorted(model.Fingerprints{firstDiskFingerprint, s[0].fingerprint})
|
||||
// // lastSampleInRange = sort.IsSorted(model.Fingerprints{s[s.Len()-1].fingerprint, lastDiskFingerprint})
|
||||
|
||||
// // if firstSampleInRange && firstDiskFingerprint.Equal(s[0].fingerprint) {
|
||||
// // firstSampleInRange = !indexable.DecodeTime(firstKey.Timestamp).After(s.operations[0].StartTime())
|
||||
// // }
|
||||
// // if lastSampleInRange && lastDiskFingerprint.Equal(s[s.Len()-1].fingerprint) {
|
||||
// // lastSampleInRange = !s.operations[s.Len()-1].StartTime().After(indexable.DecodeTime(lastKey.Timestamp))
|
||||
// // }
|
||||
|
||||
// // for _, job := range s {
|
||||
// // operations := job.operations
|
||||
// // numberOfOperations := len(operations)
|
||||
// // for j := 0; j < numberOfOperations; j++ {
|
||||
// // operationTime := operations[j].StartTime()
|
||||
// // group, skipAhead := collectOperationsForTime(operationTime, operations[j:numberOfOperations])
|
||||
// // ranges := collectRanges(group)
|
||||
// // intervals := collectIntervals(group)
|
||||
|
||||
// // fmt.Printf("ranges -> %s\n", ranges)
|
||||
// // if len(ranges) > 0 {
|
||||
// // fmt.Printf("d -> %s\n", peekForLongestRange(ranges, ranges[0].through))
|
||||
// // }
|
||||
|
||||
// // j += skipAhead
|
||||
// // }
|
||||
// // }
|
101
storage/metric/view.go
Normal file
101
storage/metric/view.go
Normal file
|
@ -0,0 +1,101 @@
|
|||
// Copyright 2013 Prometheus Team
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package metric
|
||||
|
||||
import (
|
||||
"github.com/prometheus/prometheus/model"
|
||||
"sort"
|
||||
"time"
|
||||
)
|
||||
|
||||
var (
|
||||
// firstSupertime is the smallest valid supertime that may be seeked to.
|
||||
firstSupertime = []byte{0, 0, 0, 0, 0, 0, 0, 0}
|
||||
// lastSupertime is the largest valid supertime that may be seeked to.
|
||||
lastSupertime = []byte{127, 255, 255, 255, 255, 255, 255, 255}
|
||||
)
|
||||
|
||||
// Represents the summation of all datastore queries that shall be performed to
|
||||
// extract values. Each operation mutates the state of the builder.
|
||||
type ViewRequestBuilder interface {
|
||||
GetMetricAtTime(fingerprint model.Fingerprint, time time.Time)
|
||||
GetMetricAtInterval(fingerprint model.Fingerprint, from, through time.Time, interval time.Duration)
|
||||
GetMetricRange(fingerprint model.Fingerprint, from, through time.Time)
|
||||
ScanJobs() scanJobs
|
||||
}
|
||||
|
||||
// Contains the various unoptimized requests for data.
|
||||
type viewRequestBuilder struct {
|
||||
operations map[model.Fingerprint]ops
|
||||
}
|
||||
|
||||
// Furnishes a ViewRequestBuilder for remarking what types of queries to perform.
|
||||
func NewViewRequestBuilder() viewRequestBuilder {
|
||||
return viewRequestBuilder{
|
||||
operations: make(map[model.Fingerprint]ops),
|
||||
}
|
||||
}
|
||||
|
||||
// Gets for the given Fingerprint either the value at that time if there is an
|
||||
// match or the one or two values adjacent thereto.
|
||||
func (v viewRequestBuilder) GetMetricAtTime(fingerprint model.Fingerprint, time time.Time) {
|
||||
ops := v.operations[fingerprint]
|
||||
ops = append(ops, getValuesAtTimeOp{
|
||||
time: time,
|
||||
})
|
||||
v.operations[fingerprint] = ops
|
||||
}
|
||||
|
||||
// Gets for the given Fingerprint either the value at that interval from From
|
||||
// through Through if there is an match or the one or two values adjacent
|
||||
// for each point.
|
||||
func (v viewRequestBuilder) GetMetricAtInterval(fingerprint model.Fingerprint, from, through time.Time, interval time.Duration) {
|
||||
ops := v.operations[fingerprint]
|
||||
ops = append(ops, getValuesAtIntervalOp{
|
||||
from: from,
|
||||
through: through,
|
||||
interval: interval,
|
||||
})
|
||||
v.operations[fingerprint] = ops
|
||||
}
|
||||
|
||||
// Gets for the given Fingerprint either the values that occur inclusively from
|
||||
// From through Through.
|
||||
func (v viewRequestBuilder) GetMetricRange(fingerprint model.Fingerprint, from, through time.Time) {
|
||||
ops := v.operations[fingerprint]
|
||||
ops = append(ops, getValuesAlongRangeOp{
|
||||
from: from,
|
||||
through: through,
|
||||
})
|
||||
v.operations[fingerprint] = ops
|
||||
}
|
||||
|
||||
// Emits the optimized scans that will occur in the data store. This
|
||||
// effectively resets the ViewRequestBuilder back to a pristine state.
|
||||
func (v viewRequestBuilder) ScanJobs() (j scanJobs) {
|
||||
for fingerprint, operations := range v.operations {
|
||||
sort.Sort(operations)
|
||||
|
||||
j = append(j, scanJob{
|
||||
fingerprint: fingerprint,
|
||||
operations: optimize(operations),
|
||||
})
|
||||
|
||||
delete(v.operations, fingerprint)
|
||||
}
|
||||
|
||||
sort.Sort(j)
|
||||
|
||||
return
|
||||
}
|
183
storage/metric/view_test.go
Normal file
183
storage/metric/view_test.go
Normal file
|
@ -0,0 +1,183 @@
|
|||
// Copyright 2013 Prometheus Team
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package metric
|
||||
|
||||
import (
|
||||
"github.com/prometheus/prometheus/model"
|
||||
"github.com/prometheus/prometheus/utility/test"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func testBuilder(t test.Tester) {
|
||||
type atTime struct {
|
||||
fingerprint string
|
||||
time time.Time
|
||||
}
|
||||
|
||||
type atInterval struct {
|
||||
fingerprint string
|
||||
from time.Time
|
||||
through time.Time
|
||||
interval time.Duration
|
||||
}
|
||||
|
||||
type atRange struct {
|
||||
fingerprint string
|
||||
from time.Time
|
||||
through time.Time
|
||||
}
|
||||
|
||||
type in struct {
|
||||
atTimes []atTime
|
||||
atIntervals []atInterval
|
||||
atRanges []atRange
|
||||
}
|
||||
|
||||
type out []struct {
|
||||
fingerprint string
|
||||
operations ops
|
||||
}
|
||||
|
||||
var scenarios = []struct {
|
||||
in in
|
||||
out out
|
||||
}{
|
||||
// // Ensure that the fingerprint is sorted in proper order.
|
||||
{
|
||||
in: in{
|
||||
atTimes: []atTime{
|
||||
{
|
||||
fingerprint: "0000000000000001111-a-4-a",
|
||||
time: time.Unix(100, 0),
|
||||
},
|
||||
{
|
||||
fingerprint: "0000000000000000000-a-4-a",
|
||||
time: time.Unix(100, 0),
|
||||
},
|
||||
},
|
||||
},
|
||||
out: out{
|
||||
{
|
||||
fingerprint: "00000000000000000000-a-4-a",
|
||||
},
|
||||
{
|
||||
fingerprint: "00000000000000001111-a-4-a",
|
||||
},
|
||||
},
|
||||
},
|
||||
// // Ensure that the fingerprint-timestamp pairs are sorted in proper order.
|
||||
{
|
||||
in: in{
|
||||
atTimes: []atTime{
|
||||
{
|
||||
fingerprint: "1111-a-4-a",
|
||||
time: time.Unix(100, 0),
|
||||
},
|
||||
{
|
||||
fingerprint: "1111-a-4-a",
|
||||
time: time.Unix(200, 0),
|
||||
},
|
||||
{
|
||||
fingerprint: "0-a-4-a",
|
||||
time: time.Unix(100, 0),
|
||||
},
|
||||
{
|
||||
fingerprint: "0-a-4-a",
|
||||
time: time.Unix(0, 0),
|
||||
},
|
||||
},
|
||||
},
|
||||
out: out{
|
||||
{
|
||||
fingerprint: "00000000000000000000-a-4-a",
|
||||
},
|
||||
{
|
||||
fingerprint: "00000000000000001111-a-4-a",
|
||||
},
|
||||
},
|
||||
},
|
||||
// Ensure grouping of operations
|
||||
{
|
||||
in: in{
|
||||
atTimes: []atTime{
|
||||
{
|
||||
fingerprint: "1111-a-4-a",
|
||||
time: time.Unix(100, 0),
|
||||
},
|
||||
},
|
||||
atRanges: []atRange{
|
||||
{
|
||||
fingerprint: "1111-a-4-a",
|
||||
from: time.Unix(100, 0),
|
||||
through: time.Unix(1000, 0),
|
||||
},
|
||||
{
|
||||
fingerprint: "1111-a-4-a",
|
||||
from: time.Unix(100, 0),
|
||||
through: time.Unix(9000, 0),
|
||||
},
|
||||
},
|
||||
},
|
||||
out: out{
|
||||
{
|
||||
fingerprint: "00000000000000001111-a-4-a",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for i, scenario := range scenarios {
|
||||
builder := viewRequestBuilder{
|
||||
operations: map[model.Fingerprint]ops{},
|
||||
}
|
||||
|
||||
for _, atTime := range scenario.in.atTimes {
|
||||
fingerprint := model.NewFingerprintFromRowKey(atTime.fingerprint)
|
||||
builder.GetMetricAtTime(fingerprint, atTime.time)
|
||||
}
|
||||
|
||||
for _, atInterval := range scenario.in.atIntervals {
|
||||
fingerprint := model.NewFingerprintFromRowKey(atInterval.fingerprint)
|
||||
builder.GetMetricAtInterval(fingerprint, atInterval.from, atInterval.through, atInterval.interval)
|
||||
}
|
||||
|
||||
for _, atRange := range scenario.in.atRanges {
|
||||
fingerprint := model.NewFingerprintFromRowKey(atRange.fingerprint)
|
||||
builder.GetMetricRange(fingerprint, atRange.from, atRange.through)
|
||||
}
|
||||
|
||||
jobs := builder.ScanJobs()
|
||||
|
||||
if len(scenario.out) != len(jobs) {
|
||||
t.Fatalf("%d. expected job length of %d, got %d\n", i, len(scenario.out), len(jobs))
|
||||
}
|
||||
|
||||
for j, job := range scenario.out {
|
||||
if jobs[j].fingerprint.ToRowKey() != job.fingerprint {
|
||||
t.Fatalf("%d.%d. expected fingerprint %s, got %s\n", i, j, job.fingerprint, jobs[j].fingerprint.ToRowKey())
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestBuilder(t *testing.T) {
|
||||
testBuilder(t)
|
||||
}
|
||||
|
||||
func BenchmarkBuilder(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
testBuilder(b)
|
||||
}
|
||||
}
|
|
@ -56,3 +56,7 @@ func NewLevelDBMembershipIndex(storageRoot string, cacheCapacity, bitsPerBloomFi
|
|||
|
||||
return
|
||||
}
|
||||
|
||||
func (l *LevelDBMembershipIndex) Commit(batch leveldb.Batch) error {
|
||||
return l.persistence.Commit(batch)
|
||||
}
|
||||
|
|
|
@ -28,6 +28,7 @@ var (
|
|||
leveldbUseParanoidChecks = flag.Bool("leveldbUseParanoidChecks", true, "Whether LevelDB uses expensive checks (bool).")
|
||||
)
|
||||
|
||||
// LevelDBPersistence is an disk-backed sorted key-value store.
|
||||
type LevelDBPersistence struct {
|
||||
cache *levigo.Cache
|
||||
filterPolicy *levigo.FilterPolicy
|
||||
|
@ -37,6 +38,8 @@ type LevelDBPersistence struct {
|
|||
writeOptions *levigo.WriteOptions
|
||||
}
|
||||
|
||||
// LevelDB iterators have a number of resources that need to be closed.
|
||||
// iteratorCloser encapsulates the various ones.
|
||||
type iteratorCloser struct {
|
||||
iterator *levigo.Iterator
|
||||
readOptions *levigo.ReadOptions
|
||||
|
@ -169,6 +172,10 @@ func (l *LevelDBPersistence) Put(key, value coding.Encoder) (err error) {
|
|||
return
|
||||
}
|
||||
|
||||
func (l *LevelDBPersistence) Commit(b Batch) (err error) {
|
||||
return l.storage.Write(l.writeOptions, b.(batch).batch)
|
||||
}
|
||||
|
||||
func (l *LevelDBPersistence) GetAll() (pairs []raw.Pair, err error) {
|
||||
snapshot := l.storage.NewSnapshot()
|
||||
defer l.storage.ReleaseSnapshot(snapshot)
|
||||
|
@ -272,3 +279,47 @@ func (l *LevelDBPersistence) ForEach(decoder storage.RecordDecoder, filter stora
|
|||
scannedEntireCorpus = true
|
||||
return
|
||||
}
|
||||
|
||||
// Batch encapsulates a list of mutations to occur to the datastore. It must
|
||||
// be closed once done.
|
||||
type Batch interface {
|
||||
Delete(coding.Encoder)
|
||||
Put(coding.Encoder, coding.Encoder)
|
||||
Close()
|
||||
}
|
||||
|
||||
func NewBatch() Batch {
|
||||
return batch{
|
||||
batch: levigo.NewWriteBatch(),
|
||||
}
|
||||
}
|
||||
|
||||
type batch struct {
|
||||
batch *levigo.WriteBatch
|
||||
}
|
||||
|
||||
func (b batch) Delete(key coding.Encoder) {
|
||||
keyEncoded, err := key.Encode()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
b.batch.Delete(keyEncoded)
|
||||
}
|
||||
|
||||
func (b batch) Put(key, value coding.Encoder) {
|
||||
keyEncoded, err := key.Encode()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
valueEncoded, err := value.Encode()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
b.batch.Put(keyEncoded, valueEncoded)
|
||||
}
|
||||
|
||||
func (b batch) Close() {
|
||||
b.batch.Close()
|
||||
}
|
||||
|
|
Loading…
Reference in a new issue