Major reorganization of the storage.

Most important, the heads file will now persist all the chunk descs,
too. Implicitly, it will serve as the persisted form of the
fp-to-series map.

Change-Id: Ic867e78f2714d54c3b5733939cc5aef43f7bd08d
This commit is contained in:
Bjoern Rabenstein 2014-09-10 18:41:52 +02:00
parent e7cb9ddb9f
commit 5a128a04a9
11 changed files with 919 additions and 815 deletions

View file

@ -0,0 +1,296 @@
package codec
import (
"bytes"
"encoding"
"encoding/binary"
"io"
"sync"
clientmodel "github.com/prometheus/client_golang/model"
"github.com/prometheus/prometheus/storage/metric"
)
type codable interface {
encoding.BinaryMarshaler
encoding.BinaryUnmarshaler
}
type byteReader interface {
io.Reader
io.ByteReader
}
var bufPool sync.Pool
func getBuf(l int) []byte {
x := bufPool.Get()
if x == nil {
return make([]byte, l)
}
buf := x.([]byte)
if cap(buf) < l {
return make([]byte, l)
}
return buf[:l]
}
func putBuf(buf []byte) {
bufPool.Put(buf)
}
func EncodeVarint(w io.Writer, i int64) error {
buf := getBuf(binary.MaxVarintLen64)
defer putBuf(buf)
bytesWritten := binary.PutVarint(buf, i)
_, err := w.Write(buf[:bytesWritten])
return err
}
func EncodeUint64(w io.Writer, u uint64) error {
buf := getBuf(8)
defer putBuf(buf)
binary.BigEndian.PutUint64(buf, u)
_, err := w.Write(buf)
return err
}
func DecodeUint64(r io.Reader) (uint64, error) {
buf := getBuf(8)
defer putBuf(buf)
if _, err := io.ReadFull(r, buf); err != nil {
return 0, err
}
return binary.BigEndian.Uint64(buf), nil
}
func encodeString(b *bytes.Buffer, s string) error {
if err := EncodeVarint(b, int64(len(s))); err != nil {
return err
}
if _, err := b.WriteString(s); err != nil {
return err
}
return nil
}
func decodeString(b byteReader) (string, error) {
length, err := binary.ReadVarint(b)
if err != nil {
return "", err
}
buf := getBuf(int(length))
defer putBuf(buf)
if _, err := io.ReadFull(b, buf); err != nil {
return "", err
}
return string(buf), nil
}
type CodableMetric clientmodel.Metric
func (m CodableMetric) MarshalBinary() ([]byte, error) {
buf := &bytes.Buffer{}
if err := EncodeVarint(buf, int64(len(m))); err != nil {
return nil, err
}
for l, v := range m {
if err := encodeString(buf, string(l)); err != nil {
return nil, err
}
if err := encodeString(buf, string(v)); err != nil {
return nil, err
}
}
return buf.Bytes(), nil
}
func (m *CodableMetric) UnmarshalBinary(buf []byte) error {
return m.UnmarshalFromReader(bytes.NewReader(buf))
}
func (m *CodableMetric) UnmarshalFromReader(r byteReader) error {
numLabelPairs, err := binary.ReadVarint(r)
if err != nil {
return err
}
*m = make(CodableMetric, numLabelPairs)
for ; numLabelPairs > 0; numLabelPairs-- {
ln, err := decodeString(r)
if err != nil {
return err
}
lv, err := decodeString(r)
if err != nil {
return err
}
(*m)[clientmodel.LabelName(ln)] = clientmodel.LabelValue(lv)
}
return nil
}
type CodableFingerprint clientmodel.Fingerprint
func (fp CodableFingerprint) MarshalBinary() ([]byte, error) {
b := make([]byte, 8)
binary.BigEndian.PutUint64(b, uint64(fp))
return b, nil
}
func (fp *CodableFingerprint) UnmarshalBinary(buf []byte) error {
*fp = CodableFingerprint(binary.BigEndian.Uint64(buf))
return nil
}
type CodableFingerprints clientmodel.Fingerprints
func (fps CodableFingerprints) MarshalBinary() ([]byte, error) {
b := bytes.NewBuffer(make([]byte, 0, binary.MaxVarintLen64+len(fps)*8))
if err := EncodeVarint(b, int64(len(fps))); err != nil {
return nil, err
}
buf := getBuf(8)
defer putBuf(buf)
for _, fp := range fps {
binary.BigEndian.PutUint64(buf, uint64(fp))
if _, err := b.Write(buf[:8]); err != nil {
return nil, err
}
}
return b.Bytes(), nil
}
func (fps *CodableFingerprints) UnmarshalBinary(buf []byte) error {
r := bytes.NewReader(buf)
numFPs, err := binary.ReadVarint(r)
if err != nil {
return err
}
*fps = make(CodableFingerprints, numFPs)
offset := len(buf) - r.Len()
for i, _ := range *fps {
(*fps)[i] = clientmodel.Fingerprint(binary.BigEndian.Uint64(buf[offset+i*8:]))
}
return nil
}
type CodableLabelPair metric.LabelPair
func (lp CodableLabelPair) MarshalBinary() ([]byte, error) {
buf := &bytes.Buffer{}
if err := encodeString(buf, string(lp.Name)); err != nil {
return nil, err
}
if err := encodeString(buf, string(lp.Value)); err != nil {
return nil, err
}
return buf.Bytes(), nil
}
func (lp *CodableLabelPair) UnmarshalBinary(buf []byte) error {
r := bytes.NewReader(buf)
n, err := decodeString(r)
if err != nil {
return err
}
v, err := decodeString(r)
if err != nil {
return err
}
lp.Name = clientmodel.LabelName(n)
lp.Value = clientmodel.LabelValue(v)
return nil
}
type CodableLabelName clientmodel.LabelName
func (l CodableLabelName) MarshalBinary() ([]byte, error) {
buf := &bytes.Buffer{}
if err := encodeString(buf, string(l)); err != nil {
return nil, err
}
return buf.Bytes(), nil
}
func (l *CodableLabelName) UnmarshalBinary(buf []byte) error {
r := bytes.NewReader(buf)
n, err := decodeString(r)
if err != nil {
return err
}
*l = CodableLabelName(n)
return nil
}
type CodableLabelValues clientmodel.LabelValues
func (vs CodableLabelValues) MarshalBinary() ([]byte, error) {
buf := &bytes.Buffer{}
if err := EncodeVarint(buf, int64(len(vs))); err != nil {
return nil, err
}
for _, v := range vs {
if err := encodeString(buf, string(v)); err != nil {
return nil, err
}
}
return buf.Bytes(), nil
}
func (vs *CodableLabelValues) UnmarshalBinary(buf []byte) error {
r := bytes.NewReader(buf)
numValues, err := binary.ReadVarint(r)
if err != nil {
return err
}
*vs = make(CodableLabelValues, numValues)
for i, _ := range *vs {
v, err := decodeString(r)
if err != nil {
return err
}
(*vs)[i] = clientmodel.LabelValue(v)
}
return nil
}
type CodableTimeRange struct {
first, last clientmodel.Timestamp
}
func (tr CodableTimeRange) MarshalBinary() ([]byte, error) {
buf := &bytes.Buffer{}
if err := EncodeVarint(buf, int64(tr.first)); err != nil {
return nil, err
}
if err := EncodeVarint(buf, int64(tr.last)); err != nil {
return nil, err
}
return buf.Bytes(), nil
}
func (tr *CodableTimeRange) UnmarshalBinary(buf []byte) error {
r := bytes.NewReader(buf)
first, err := binary.ReadVarint(r)
if err != nil {
return err
}
last, err := binary.ReadVarint(r)
if err != nil {
return err
}
tr.first = clientmodel.Timestamp(first)
tr.last = clientmodel.Timestamp(last)
return nil
}

View file

@ -1,4 +1,4 @@
package index
package codec
import (
"testing"
@ -6,13 +6,13 @@ import (
clientmodel "github.com/prometheus/client_golang/model"
)
func newCodableFingerprint(fp int64) *codableFingerprint {
cfp := codableFingerprint(fp)
func newCodableFingerprint(fp int64) *CodableFingerprint {
cfp := CodableFingerprint(fp)
return &cfp
}
func newCodableLabelName(ln string) *codableLabelName {
cln := codableLabelName(ln)
func newCodableLabelName(ln string) *CodableLabelName {
cln := CodableLabelName(ln)
return &cln
}
@ -23,29 +23,29 @@ func TestCodec(t *testing.T) {
equal func(in, out codable) bool
}{
{
in: codableMetric{
in: CodableMetric{
"label_1": "value_2",
"label_2": "value_2",
"label_3": "value_3",
},
out: codableMetric{},
out: CodableMetric{},
equal: func(in, out codable) bool {
m1 := clientmodel.Metric(in.(codableMetric))
m2 := clientmodel.Metric(out.(codableMetric))
m1 := clientmodel.Metric(in.(CodableMetric))
m2 := clientmodel.Metric(out.(CodableMetric))
return m1.Equal(m2)
},
}, {
in: newCodableFingerprint(12345),
out: newCodableFingerprint(0),
equal: func(in, out codable) bool {
return *in.(*codableFingerprint) == *out.(*codableFingerprint)
return *in.(*CodableFingerprint) == *out.(*CodableFingerprint)
},
}, {
in: &codableFingerprints{1, 2, 56, 1234},
out: &codableFingerprints{},
in: &CodableFingerprints{1, 2, 56, 1234},
out: &CodableFingerprints{},
equal: func(in, out codable) bool {
fps1 := *in.(*codableFingerprints)
fps2 := *out.(*codableFingerprints)
fps1 := *in.(*CodableFingerprints)
fps2 := *out.(*CodableFingerprints)
if len(fps1) != len(fps2) {
return false
}
@ -57,30 +57,30 @@ func TestCodec(t *testing.T) {
return true
},
}, {
in: &codableLabelPair{
in: &CodableLabelPair{
Name: "label_name",
Value: "label_value",
},
out: &codableLabelPair{},
out: &CodableLabelPair{},
equal: func(in, out codable) bool {
lp1 := *in.(*codableLabelPair)
lp2 := *out.(*codableLabelPair)
lp1 := *in.(*CodableLabelPair)
lp2 := *out.(*CodableLabelPair)
return lp1 == lp2
},
}, {
in: newCodableLabelName("label_name"),
out: newCodableLabelName(""),
equal: func(in, out codable) bool {
ln1 := *in.(*codableLabelName)
ln2 := *out.(*codableLabelName)
ln1 := *in.(*CodableLabelName)
ln2 := *out.(*CodableLabelName)
return ln1 == ln2
},
}, {
in: &codableLabelValues{"value_1", "value_2", "value_3"},
out: &codableLabelValues{},
in: &CodableLabelValues{"value_1", "value_2", "value_3"},
out: &CodableLabelValues{},
equal: func(in, out codable) bool {
lvs1 := *in.(*codableLabelValues)
lvs2 := *out.(*codableLabelValues)
lvs1 := *in.(*CodableLabelValues)
lvs2 := *out.(*CodableLabelValues)
if len(lvs1) != len(lvs2) {
return false
}
@ -92,12 +92,12 @@ func TestCodec(t *testing.T) {
return true
},
}, {
in: &codableMembership{},
out: &codableMembership{},
in: &CodableTimeRange{42, 2001},
out: &CodableTimeRange{},
equal: func(in, out codable) bool {
// We don't care about the membership value. Just test if the
// encoding/decoding works at all.
return true
ln1 := *in.(*CodableTimeRange)
ln2 := *out.(*CodableTimeRange)
return ln1 == ln2
},
},
}

View file

@ -1,37 +0,0 @@
package index
import (
"encoding"
"github.com/syndtr/goleveldb/leveldb"
)
type batch struct {
batch *leveldb.Batch
}
func (b *batch) Put(key, value encoding.BinaryMarshaler) error {
k, err := key.MarshalBinary()
if err != nil {
return err
}
v, err := value.MarshalBinary()
if err != nil {
return err
}
b.batch.Put(k, v)
return nil
}
func (b *batch) Delete(key encoding.BinaryMarshaler) error {
k, err := key.MarshalBinary()
if err != nil {
return err
}
b.batch.Delete(k)
return nil
}
func (b *batch) Reset() {
b.batch.Reset()
}

View file

@ -1,227 +0,0 @@
package index
import (
"bytes"
"encoding"
"encoding/binary"
"io"
"sync"
clientmodel "github.com/prometheus/client_golang/model"
"github.com/prometheus/prometheus/storage/metric"
)
type codable interface {
encoding.BinaryMarshaler
encoding.BinaryUnmarshaler
}
var bufPool sync.Pool
func getBuf(l int) []byte {
x := bufPool.Get()
if x == nil {
return make([]byte, l)
}
buf := x.([]byte)
if cap(buf) < l {
return make([]byte, l)
}
return buf[:l]
}
func putBuf(buf []byte) {
bufPool.Put(buf)
}
func encodeVarint(b *bytes.Buffer, i int) error {
buf := getBuf(binary.MaxVarintLen64)
defer putBuf(buf)
bytesWritten := binary.PutVarint(buf, int64(i))
if _, err := b.Write(buf[:bytesWritten]); err != nil {
return err
}
return nil
}
func encodeString(b *bytes.Buffer, s string) error {
encodeVarint(b, len(s))
if _, err := b.WriteString(s); err != nil {
return err
}
return nil
}
func decodeString(b *bytes.Reader) (string, error) {
length, err := binary.ReadVarint(b)
if err != nil {
return "", err
}
buf := getBuf(int(length))
defer putBuf(buf)
if _, err := io.ReadFull(b, buf); err != nil {
return "", err
}
return string(buf), nil
}
type codableMetric clientmodel.Metric
func (m codableMetric) MarshalBinary() ([]byte, error) {
buf := &bytes.Buffer{}
encodeVarint(buf, len(m))
for l, v := range m {
encodeString(buf, string(l))
encodeString(buf, string(v))
}
return buf.Bytes(), nil
}
func (m codableMetric) UnmarshalBinary(buf []byte) error {
r := bytes.NewReader(buf)
numLabelPairs, err := binary.ReadVarint(r)
if err != nil {
return err
}
for ; numLabelPairs > 0; numLabelPairs-- {
ln, err := decodeString(r)
if err != nil {
return err
}
lv, err := decodeString(r)
if err != nil {
return err
}
m[clientmodel.LabelName(ln)] = clientmodel.LabelValue(lv)
}
return nil
}
type codableFingerprint clientmodel.Fingerprint
func (fp codableFingerprint) MarshalBinary() ([]byte, error) {
b := make([]byte, 8)
binary.BigEndian.PutUint64(b, uint64(fp))
return b, nil
}
func (fp *codableFingerprint) UnmarshalBinary(buf []byte) error {
*fp = codableFingerprint(binary.BigEndian.Uint64(buf))
return nil
}
type codableFingerprints clientmodel.Fingerprints
func (fps codableFingerprints) MarshalBinary() ([]byte, error) {
b := bytes.NewBuffer(make([]byte, 0, binary.MaxVarintLen64+len(fps)*8))
encodeVarint(b, len(fps))
buf := getBuf(8)
defer putBuf(buf)
for _, fp := range fps {
binary.BigEndian.PutUint64(buf, uint64(fp))
if _, err := b.Write(buf[:8]); err != nil {
return nil, err
}
}
return b.Bytes(), nil
}
func (fps *codableFingerprints) UnmarshalBinary(buf []byte) error {
r := bytes.NewReader(buf)
numFPs, err := binary.ReadVarint(r)
if err != nil {
return err
}
*fps = make(codableFingerprints, numFPs)
offset := len(buf) - r.Len()
for i, _ := range *fps {
(*fps)[i] = clientmodel.Fingerprint(binary.BigEndian.Uint64(buf[offset+i*8:]))
}
return nil
}
type codableLabelPair metric.LabelPair
func (lp codableLabelPair) MarshalBinary() ([]byte, error) {
buf := &bytes.Buffer{}
encodeString(buf, string(lp.Name))
encodeString(buf, string(lp.Value))
return buf.Bytes(), nil
}
func (lp *codableLabelPair) UnmarshalBinary(buf []byte) error {
r := bytes.NewReader(buf)
n, err := decodeString(r)
if err != nil {
return err
}
v, err := decodeString(r)
if err != nil {
return err
}
lp.Name = clientmodel.LabelName(n)
lp.Value = clientmodel.LabelValue(v)
return nil
}
type codableLabelName clientmodel.LabelName
func (l codableLabelName) MarshalBinary() ([]byte, error) {
buf := &bytes.Buffer{}
encodeString(buf, string(l))
return buf.Bytes(), nil
}
func (l *codableLabelName) UnmarshalBinary(buf []byte) error {
r := bytes.NewReader(buf)
n, err := decodeString(r)
if err != nil {
return err
}
*l = codableLabelName(n)
return nil
}
type codableLabelValues clientmodel.LabelValues
func (vs codableLabelValues) MarshalBinary() ([]byte, error) {
buf := &bytes.Buffer{}
encodeVarint(buf, len(vs))
for _, v := range vs {
encodeString(buf, string(v))
}
return buf.Bytes(), nil
}
func (vs *codableLabelValues) UnmarshalBinary(buf []byte) error {
r := bytes.NewReader(buf)
numValues, err := binary.ReadVarint(r)
if err != nil {
return err
}
*vs = make(codableLabelValues, numValues)
for i, _ := range *vs {
v, err := decodeString(r)
if err != nil {
return err
}
(*vs)[i] = clientmodel.LabelValue(v)
}
return nil
}
type codableMembership struct{}
func (m codableMembership) MarshalBinary() ([]byte, error) {
return []byte{}, nil
}
func (m codableMembership) UnmarshalBinary(buf []byte) error { return nil }

View file

@ -2,29 +2,27 @@ package index
import (
"flag"
"os"
"path"
"sync"
"github.com/golang/glog"
clientmodel "github.com/prometheus/client_golang/model"
"github.com/prometheus/prometheus/storage/local/codec"
"github.com/prometheus/prometheus/storage/metric"
"github.com/prometheus/prometheus/utility"
)
const (
fingerprintToMetricDir = "fingerprint_to_metric"
fingerprintToMetricDir = "archived_fingerprint_to_metric"
fingerprintTimeRangeDir = "archived_fingerprint_to_timerange"
labelNameToLabelValuesDir = "labelname_to_labelvalues"
labelPairToFingerprintsDir = "labelpair_to_fingerprints"
fingerprintMembershipDir = "fingerprint_membership"
)
var (
fingerprintToMetricCacheSize = flag.Int("storage.fingerprintToMetricCacheSizeBytes", 25*1024*1024, "The size in bytes for the fingerprint to metric index cache.")
labelNameToLabelValuesCacheSize = flag.Int("storage.labelNameToLabelValuesCacheSizeBytes", 25*1024*1024, "The size in bytes for the label name to label values index cache.")
labelPairToFingerprintsCacheSize = flag.Int("storage.labelPairToFingerprintsCacheSizeBytes", 25*1024*1024, "The size in bytes for the label pair to fingerprints index cache.")
fingerprintMembershipCacheSize = flag.Int("storage.fingerprintMembershipCacheSizeBytes", 5*1024*1024, "The size in bytes for the metric membership index cache.")
fingerprintTimeRangeCacheSize = flag.Int("storage.fingerprintTimeRangeCacheSizeBytes", 5*1024*1024, "The size in bytes for the metric time range index cache.")
)
// FingerprintMetricMapping is an in-memory map of fingerprints to metrics.
@ -40,7 +38,7 @@ func (i *FingerprintMetricIndex) IndexBatch(mapping FingerprintMetricMapping) er
b := i.NewBatch()
for fp, m := range mapping {
b.Put(codableFingerprint(fp), codableMetric(m))
b.Put(codec.CodableFingerprint(fp), codec.CodableMetric(m))
}
return i.Commit(b)
@ -51,30 +49,37 @@ func (i *FingerprintMetricIndex) UnindexBatch(mapping FingerprintMetricMapping)
b := i.NewBatch()
for fp, _ := range mapping {
b.Delete(codableFingerprint(fp))
b.Delete(codec.CodableFingerprint(fp))
}
return i.Commit(b)
}
// Lookup looks up a metric by fingerprint.
func (i *FingerprintMetricIndex) Lookup(fp clientmodel.Fingerprint) (m clientmodel.Metric, ok bool, err error) {
m = clientmodel.Metric{}
if ok, err := i.Get(codableFingerprint(fp), codableMetric(m)); !ok {
func (i *FingerprintMetricIndex) Lookup(fp clientmodel.Fingerprint) (clientmodel.Metric, bool, error) {
m := codec.CodableMetric{}
if ok, err := i.Get(codec.CodableFingerprint(fp), &m); !ok {
return nil, false, nil
} else if err != nil {
return nil, false, err
}
return m, true, nil
return clientmodel.Metric(m), true, nil
}
// NewFingerprintMetricIndex returns a FingerprintMetricIndex
// object ready to use.
func NewFingerprintMetricIndex(db KeyValueStore) *FingerprintMetricIndex {
return &FingerprintMetricIndex{
KeyValueStore: db,
func NewFingerprintMetricIndex(basePath string) (*FingerprintMetricIndex, error) {
fingerprintToMetricDB, err := NewLevelDB(LevelDBOptions{
Path: path.Join(basePath, fingerprintToMetricDir),
CacheSizeBytes: *fingerprintToMetricCacheSize,
})
if err != nil {
return nil, err
}
return &FingerprintMetricIndex{
KeyValueStore: fingerprintToMetricDB,
}, nil
}
// LabelNameLabelValuesMapping is an in-memory map of label names to
@ -93,9 +98,9 @@ func (i *LabelNameLabelValuesIndex) IndexBatch(b LabelNameLabelValuesMapping) er
for name, values := range b {
if len(values) == 0 {
batch.Delete(codableLabelName(name))
batch.Delete(codec.CodableLabelName(name))
} else {
batch.Put(codableLabelName(name), codableLabelValues(values))
batch.Put(codec.CodableLabelName(name), codec.CodableLabelValues(values))
}
}
@ -104,7 +109,7 @@ func (i *LabelNameLabelValuesIndex) IndexBatch(b LabelNameLabelValuesMapping) er
// Lookup looks up all label values for a given label name.
func (i *LabelNameLabelValuesIndex) Lookup(l clientmodel.LabelName) (values clientmodel.LabelValues, ok bool, err error) {
ok, err = i.Get(codableLabelName(l), (*codableLabelValues)(&values))
ok, err = i.Get(codec.CodableLabelName(l), (*codec.CodableLabelValues)(&values))
if err != nil {
return nil, false, err
}
@ -117,10 +122,17 @@ func (i *LabelNameLabelValuesIndex) Lookup(l clientmodel.LabelName) (values clie
// NewLabelNameLabelValuesIndex returns a LabelNameLabelValuesIndex
// ready to use.
func NewLabelNameLabelValuesIndex(db KeyValueStore) *LabelNameLabelValuesIndex {
return &LabelNameLabelValuesIndex{
KeyValueStore: db,
func NewLabelNameLabelValuesIndex(basePath string) (*LabelNameLabelValuesIndex, error) {
labelNameToLabelValuesDB, err := NewLevelDB(LevelDBOptions{
Path: path.Join(basePath, labelNameToLabelValuesDir),
CacheSizeBytes: *labelNameToLabelValuesCacheSize,
})
if err != nil {
return nil, err
}
return &LabelNameLabelValuesIndex{
KeyValueStore: labelNameToLabelValuesDB,
}, nil
}
// LabelPairFingerprintsMapping is an in-memory map of label pairs to
@ -139,9 +151,9 @@ func (i *LabelPairFingerprintIndex) IndexBatch(m LabelPairFingerprintsMapping) e
for pair, fps := range m {
if len(fps) == 0 {
batch.Delete(codableLabelPair(pair))
batch.Delete(codec.CodableLabelPair(pair))
} else {
batch.Put(codableLabelPair(pair), codableFingerprints(fps))
batch.Put(codec.CodableLabelPair(pair), codec.CodableFingerprints(fps))
}
}
@ -149,8 +161,8 @@ func (i *LabelPairFingerprintIndex) IndexBatch(m LabelPairFingerprintsMapping) e
}
// Lookup looks up all fingerprints for a given label pair.
func (i *LabelPairFingerprintIndex) Lookup(p *metric.LabelPair) (fps clientmodel.Fingerprints, ok bool, err error) {
ok, err = i.Get((*codableLabelPair)(p), (*codableFingerprints)(&fps))
func (i *LabelPairFingerprintIndex) Lookup(p metric.LabelPair) (fps clientmodel.Fingerprints, ok bool, err error) {
ok, err = i.Get((codec.CodableLabelPair)(p), (*codec.CodableFingerprints)(&fps))
if !ok {
return nil, false, nil
}
@ -163,102 +175,7 @@ func (i *LabelPairFingerprintIndex) Lookup(p *metric.LabelPair) (fps clientmodel
// NewLabelPairFingerprintIndex returns a LabelPairFingerprintIndex
// object ready to use.
func NewLabelPairFingerprintIndex(db KeyValueStore) *LabelPairFingerprintIndex {
return &LabelPairFingerprintIndex{
KeyValueStore: db,
}
}
// FingerprintMembershipIndex models a database tracking the existence
// of metrics by their fingerprints.
type FingerprintMembershipIndex struct {
KeyValueStore
}
// IndexBatch indexes a batch of fingerprints.
func (i *FingerprintMembershipIndex) IndexBatch(b FingerprintMetricMapping) error {
batch := i.NewBatch()
for fp, _ := range b {
batch.Put(codableFingerprint(fp), codableMembership{})
}
return i.Commit(batch)
}
// UnindexBatch unindexes a batch of fingerprints.
func (i *FingerprintMembershipIndex) UnindexBatch(b FingerprintMetricMapping) error {
batch := i.NewBatch()
for fp, _ := range b {
batch.Delete(codableFingerprint(fp))
}
return i.Commit(batch)
}
// Has returns true if the given fingerprint is present.
func (i *FingerprintMembershipIndex) Has(fp clientmodel.Fingerprint) (ok bool, err error) {
return i.KeyValueStore.Has(codableFingerprint(fp))
}
// NewFingerprintMembershipIndex returns a FingerprintMembershipIndex object
// ready to use.
func NewFingerprintMembershipIndex(db KeyValueStore) *FingerprintMembershipIndex {
return &FingerprintMembershipIndex{
KeyValueStore: db,
}
}
// TODO(julius): Currently unused, is it needed?
// SynchronizedIndexer provides naive locking for any MetricIndexer.
type SynchronizedIndexer struct {
mu sync.Mutex
i MetricIndexer
}
// IndexMetrics calls IndexMetrics of the wrapped MetricIndexer after acquiring
// a lock.
func (i *SynchronizedIndexer) IndexMetrics(b FingerprintMetricMapping) error {
i.mu.Lock()
defer i.mu.Unlock()
return i.i.IndexMetrics(b)
}
// diskIndexer is a MetricIndexer that keeps all indexes in levelDBs except the
// fingerprint-to-metric index for non-archived metrics (which is kept in a
// normal in-memory map, but serialized to disk at shutdown and deserialized at
// startup).
//
// TODO: Talk about concurrency!
type diskIndexer struct {
FingerprintToMetric *FingerprintMetricIndex
LabelNameToLabelValues *LabelNameLabelValuesIndex
LabelPairToFingerprints *LabelPairFingerprintIndex
FingerprintMembership *FingerprintMembershipIndex
}
func NewDiskIndexer(basePath string) (MetricIndexer, error) {
err := os.MkdirAll(basePath, 0700)
if err != nil {
return nil, err
}
fingerprintToMetricDB, err := NewLevelDB(LevelDBOptions{
Path: path.Join(basePath, fingerprintToMetricDir),
CacheSizeBytes: *fingerprintToMetricCacheSize,
})
if err != nil {
return nil, err
}
labelNameToLabelValuesDB, err := NewLevelDB(LevelDBOptions{
Path: path.Join(basePath, labelNameToLabelValuesDir),
CacheSizeBytes: *labelNameToLabelValuesCacheSize,
})
if err != nil {
return nil, err
}
func NewLabelPairFingerprintIndex(basePath string) (*LabelPairFingerprintIndex, error) {
labelPairToFingerprintsDB, err := NewLevelDB(LevelDBOptions{
Path: path.Join(basePath, labelPairToFingerprintsDir),
CacheSizeBytes: *labelPairToFingerprintsCacheSize,
@ -266,22 +183,50 @@ func NewDiskIndexer(basePath string) (MetricIndexer, error) {
if err != nil {
return nil, err
}
fingerprintMembershipDB, err := NewLevelDB(LevelDBOptions{
Path: path.Join(basePath, fingerprintMembershipDir),
CacheSizeBytes: *fingerprintMembershipCacheSize,
return &LabelPairFingerprintIndex{
KeyValueStore: labelPairToFingerprintsDB,
}, nil
}
// FingerprintTimeRangeIndex models a database tracking the time ranges
// of metrics by their fingerprints.
type FingerprintTimeRangeIndex struct {
KeyValueStore
}
// UnindexBatch unindexes a batch of fingerprints.
func (i *FingerprintTimeRangeIndex) UnindexBatch(b FingerprintMetricMapping) error {
batch := i.NewBatch()
for fp, _ := range b {
batch.Delete(codec.CodableFingerprint(fp))
}
return i.Commit(batch)
}
// Has returns true if the given fingerprint is present.
func (i *FingerprintTimeRangeIndex) Has(fp clientmodel.Fingerprint) (ok bool, err error) {
return i.KeyValueStore.Has(codec.CodableFingerprint(fp))
}
// NewFingerprintTimeRangeIndex returns a FingerprintTimeRangeIndex object
// ready to use.
func NewFingerprintTimeRangeIndex(basePath string) (*FingerprintTimeRangeIndex, error) {
fingerprintTimeRangeDB, err := NewLevelDB(LevelDBOptions{
Path: path.Join(basePath, fingerprintTimeRangeDir),
CacheSizeBytes: *fingerprintTimeRangeCacheSize,
})
if err != nil {
return nil, err
}
return &diskIndexer{
FingerprintToMetric: NewFingerprintMetricIndex(fingerprintToMetricDB),
LabelNameToLabelValues: NewLabelNameLabelValuesIndex(labelNameToLabelValuesDB),
LabelPairToFingerprints: NewLabelPairFingerprintIndex(labelPairToFingerprintsDB),
FingerprintMembership: NewFingerprintMembershipIndex(fingerprintMembershipDB),
return &FingerprintTimeRangeIndex{
KeyValueStore: fingerprintTimeRangeDB,
}, nil
}
func findUnindexed(i *FingerprintMembershipIndex, b FingerprintMetricMapping) (FingerprintMetricMapping, error) {
func findUnindexed(i *FingerprintTimeRangeIndex, b FingerprintMetricMapping) (FingerprintMetricMapping, error) {
// TODO: Move up? Need to include fp->ts map?
out := FingerprintMetricMapping{}
for fp, m := range b {
@ -297,7 +242,8 @@ func findUnindexed(i *FingerprintMembershipIndex, b FingerprintMetricMapping) (F
return out, nil
}
func findIndexed(i *FingerprintMembershipIndex, b FingerprintMetricMapping) (FingerprintMetricMapping, error) {
func findIndexed(i *FingerprintTimeRangeIndex, b FingerprintMetricMapping) (FingerprintMetricMapping, error) {
// TODO: Move up? Need to include fp->ts map?
out := FingerprintMetricMapping{}
for fp, m := range b {
@ -314,6 +260,7 @@ func findIndexed(i *FingerprintMembershipIndex, b FingerprintMetricMapping) (Fin
}
func extendLabelNameToLabelValuesIndex(i *LabelNameLabelValuesIndex, b FingerprintMetricMapping) (LabelNameLabelValuesMapping, error) {
// TODO: Move up? Need to include fp->ts map?
collection := map[clientmodel.LabelName]utility.Set{}
for _, m := range b {
@ -353,6 +300,7 @@ func extendLabelNameToLabelValuesIndex(i *LabelNameLabelValuesIndex, b Fingerpri
}
func reduceLabelNameToLabelValuesIndex(i *LabelNameLabelValuesIndex, m LabelPairFingerprintsMapping) (LabelNameLabelValuesMapping, error) {
// TODO: Move up? Need to include fp->ts map?
collection := map[clientmodel.LabelName]utility.Set{}
for lp, fps := range m {
@ -393,6 +341,7 @@ func reduceLabelNameToLabelValuesIndex(i *LabelNameLabelValuesIndex, m LabelPair
}
func extendLabelPairIndex(i *LabelPairFingerprintIndex, b FingerprintMetricMapping, remove bool) (LabelPairFingerprintsMapping, error) {
// TODO: Move up? Need to include fp->ts map?
collection := map[metric.LabelPair]utility.Set{}
for fp, m := range b {
@ -403,7 +352,7 @@ func extendLabelPairIndex(i *LabelPairFingerprintIndex, b FingerprintMetricMappi
}
set, ok := collection[pair]
if !ok {
baseFps, _, err := i.Lookup(&pair)
baseFps, _, err := i.Lookup(pair)
if err != nil {
return nil, err
}
@ -438,10 +387,13 @@ func extendLabelPairIndex(i *LabelPairFingerprintIndex, b FingerprintMetricMappi
return batch, nil
}
// TODO: Move IndexMetrics and UndindexMetrics into storage.go.
/*
// IndexMetrics adds the facets of all unindexed metrics found in the given
// FingerprintMetricMapping to the corresponding indices.
func (i *diskIndexer) IndexMetrics(b FingerprintMetricMapping) error {
unindexed, err := findUnindexed(i.FingerprintMembership, b)
unindexed, err := findUnindexed(i.FingerprintTimeRange, b)
if err != nil {
return err
}
@ -466,12 +418,12 @@ func (i *diskIndexer) IndexMetrics(b FingerprintMetricMapping) error {
return err
}
return i.FingerprintMembership.IndexBatch(unindexed)
return i.FingerprintTimeRange.IndexBatch(unindexed)
}
// UnindexMetrics implements MetricIndexer.
func (i *diskIndexer) UnindexMetrics(b FingerprintMetricMapping) error {
indexed, err := findIndexed(i.FingerprintMembership, b)
indexed, err := findIndexed(i.FingerprintTimeRange, b)
if err != nil {
return err
}
@ -493,63 +445,6 @@ func (i *diskIndexer) UnindexMetrics(b FingerprintMetricMapping) error {
return err
}
return i.FingerprintMembership.UnindexBatch(indexed)
}
func (i *diskIndexer) ArchiveMetrics(fp clientmodel.Fingerprint, first, last clientmodel.Timestamp) error {
// TODO: implement.
return nil
}
// GetMetricForFingerprint implements MetricIndexer.
func (i *diskIndexer) GetMetricForFingerprint(fp clientmodel.Fingerprint) (clientmodel.Metric, error) {
m, _, err := i.FingerprintToMetric.Lookup(fp)
return m, err
}
// GetFingerprintsForLabelPair implements MetricIndexer.
func (i *diskIndexer) GetFingerprintsForLabelPair(ln clientmodel.LabelName, lv clientmodel.LabelValue) (clientmodel.Fingerprints, error) {
fps, _, err := i.LabelPairToFingerprints.Lookup(&metric.LabelPair{
Name: ln,
Value: lv,
})
return fps, err
}
// GetLabelValuesForLabelName implements MetricIndexer.
func (i *diskIndexer) GetLabelValuesForLabelName(ln clientmodel.LabelName) (clientmodel.LabelValues, error) {
lvs, _, err := i.LabelNameToLabelValues.Lookup(ln)
return lvs, err
}
// HasFingerprint implements MetricIndexer.
func (i *diskIndexer) HasFingerprint(fp clientmodel.Fingerprint) (bool, error) {
// TODO: modify.
return i.FingerprintMembership.Has(fp)
}
func (i *diskIndexer) HasArchivedFingerprint(clientmodel.Fingerprint) (present bool, first, last clientmodel.Timestamp, err error) {
// TODO: implement.
return false, 0, 0, nil
}
func (i *diskIndexer) Close() error {
var lastError error
if err := i.FingerprintToMetric.Close(); err != nil {
glog.Error("Error closing FingerprintToMetric index DB: ", err)
lastError = err
}
if err := i.LabelNameToLabelValues.Close(); err != nil {
glog.Error("Error closing LabelNameToLabelValues index DB: ", err)
lastError = err
}
if err := i.LabelPairToFingerprints.Close(); err != nil {
glog.Error("Error closing LabelPairToFingerprints index DB: ", err)
lastError = err
}
if err := i.FingerprintMembership.Close(); err != nil {
glog.Error("Error closing FingerprintMembership index DB: ", err)
lastError = err
}
return lastError
return i.FingerprintTimeRange.UnindexBatch(indexed)
}
*/

View file

@ -1,46 +1,6 @@
package index
import (
"encoding"
clientmodel "github.com/prometheus/client_golang/model"
)
// MetricIndexer indexes facets of a clientmodel.Metric. Implementers may or may
// not be concurrency-safe.
type MetricIndexer interface {
// IndexMetrics adds metrics to the index. If the metrics was added
// before and has been archived in the meantime, it is now un-archived.
IndexMetrics(FingerprintMetricMapping) error
// UnindexMetrics removes metrics from the index.
UnindexMetrics(FingerprintMetricMapping) error
// ArchiveMetrics marks the metric with the given fingerprint as
// 'archived', which has to be called if upon eviction of the
// corresponding time series from memory. By calling this method, the
// MetricIndexer learns about the time range of the evicted time series,
// which is used later for the decision if an evicted time series has to
// be moved back into memory. The implementer of MetricIndexer can make
// use of the archived state, e.g. by saving archived metrics in an
// on-disk index and non-archived metrics in an in-memory index.
ArchiveMetrics(fp clientmodel.Fingerprint, first, last clientmodel.Timestamp) error
// GetMetricForFingerprint returns the metric associated with the provided fingerprint.
GetMetricForFingerprint(clientmodel.Fingerprint) (clientmodel.Metric, error)
// GetFingerprintsForLabelPair returns all fingerprints for the provided label pair.
GetFingerprintsForLabelPair(l clientmodel.LabelName, v clientmodel.LabelValue) (clientmodel.Fingerprints, error)
// GetLabelValuesForLabelName returns all label values associated with a given label name.
GetLabelValuesForLabelName(clientmodel.LabelName) (clientmodel.LabelValues, error)
// HasFingerprint returns true if a metric with the given fingerprint
// has been indexed and has NOT been archived yet.
HasFingerprint(clientmodel.Fingerprint) (bool, error)
// HasArchivedFingerprint returns true if a metric with the given
// fingerprint was indexed before and has been archived in the
// meantime. In that case, the time range of the archived metric is also
// returned.
HasArchivedFingerprint(clientmodel.Fingerprint) (present bool, first, last clientmodel.Timestamp, err error)
Close() error
}
import "encoding"
// KeyValueStore persists key/value pairs.
type KeyValueStore interface {

View file

@ -9,7 +9,7 @@ import (
"github.com/syndtr/goleveldb/leveldb/opt"
)
// LevelDB is a LevelDB-backed sorted key-value store.
// LevelDB is a LevelDB-backed sorted KeyValueStore.
type LevelDB struct {
storage *leveldb.DB
readOpts *opt.ReadOptions
@ -21,7 +21,7 @@ type LevelDBOptions struct {
CacheSizeBytes int
}
func NewLevelDB(o LevelDBOptions) (*LevelDB, error) {
func NewLevelDB(o LevelDBOptions) (KeyValueStore, error) {
options := &opt.Options{
Compression: opt.SnappyCompression,
BlockCache: cache.NewLRUCache(o.CacheSizeBytes),
@ -41,7 +41,7 @@ func NewLevelDB(o LevelDBOptions) (*LevelDB, error) {
}
func (l *LevelDB) NewBatch() Batch {
return &batch{
return &LevelDBBatch{
batch: &leveldb.Batch{},
}
}
@ -93,5 +93,36 @@ func (l *LevelDB) Put(key, value encoding.BinaryMarshaler) error {
}
func (l *LevelDB) Commit(b Batch) error {
return l.storage.Write(b.(*batch).batch, l.writeOpts)
return l.storage.Write(b.(*LevelDBBatch).batch, l.writeOpts)
}
// LevelDBBatch is a Batch implementation for LevelDB.
type LevelDBBatch struct {
batch *leveldb.Batch
}
func (b *LevelDBBatch) Put(key, value encoding.BinaryMarshaler) error {
k, err := key.MarshalBinary()
if err != nil {
return err
}
v, err := value.MarshalBinary()
if err != nil {
return err
}
b.batch.Put(k, v)
return nil
}
func (b *LevelDBBatch) Delete(key encoding.BinaryMarshaler) error {
k, err := key.MarshalBinary()
if err != nil {
return err
}
b.batch.Delete(k)
return nil
}
func (b *LevelDBBatch) Reset() {
b.batch.Reset()
}

View file

@ -5,6 +5,9 @@ import (
"github.com/prometheus/prometheus/storage/metric"
)
// SeriesMap maps fingerprints to memory series.
type SeriesMap map[clientmodel.Fingerprint]*memorySeries
type Storage interface {
// AppendSamples stores a group of new samples. Multiple samples for the same
// fingerprint need to be submitted in chronological order, from oldest to
@ -45,12 +48,13 @@ type SeriesIterator interface {
type Persistence interface {
// PersistChunk persists a single chunk of a series.
PersistChunk(clientmodel.Fingerprint, chunk) error
// PersistHeads persists all open (non-full) head chunks.
PersistHeads(map[clientmodel.Fingerprint]*memorySeries) error
// PersistSeriesMapAndHeads persists the fingerprint to memory-series
// mapping and all open (non-full) head chunks.
PersistSeriesMapAndHeads(SeriesMap) error
// DropChunks deletes all chunks from a timeseries whose last sample time is
// before beforeTime.
DropChunks(fp clientmodel.Fingerprint, beforeTime clientmodel.Timestamp) error
DropChunks(fp clientmodel.Fingerprint, beforeTime clientmodel.Timestamp) (allDropped bool, err error)
// LoadChunks loads a group of chunks of a timeseries by their index. The
// chunk with the earliest time will have index 0, the following ones will
@ -58,10 +62,54 @@ type Persistence interface {
LoadChunks(fp clientmodel.Fingerprint, indexes []int) (chunks, error)
// LoadChunkDescs loads chunkDescs for a series up until a given time.
LoadChunkDescs(fp clientmodel.Fingerprint, beforeTime clientmodel.Timestamp) (chunkDescs, error)
// LoadHeads loads all open (non-full) head chunks.
LoadHeads(map[clientmodel.Fingerprint]*memorySeries) error
// LoadSeriesMapAndHeads loads the fingerprint to memory-series mapping
// and all open (non-full) head chunks.
LoadSeriesMapAndHeads() (SeriesMap, error)
// Close releases any held resources.
// GetFingerprintsForLabelPair returns the fingerprints for the given
// label pair.
GetFingerprintsForLabelPair(metric.LabelPair) (clientmodel.Fingerprints, error)
// GetLabelValuesForLabelName returns the label values for the given
// label name.
GetLabelValuesForLabelName(clientmodel.LabelName) (clientmodel.LabelValues, error)
// IndexMetric indexes the given metric for the needs of
// GetFingerprintsForLabelPair and GetLabelValuesForLabelName.
IndexMetric(clientmodel.Metric) error
// UnindexMetric removes references to the given metric from the indexes
// used for GetFingerprintsForLabelPair and
// GetLabelValuesForLabelName. The index of fingerprints to archived
// metrics is not affected by this method. (In fact, never call this
// method for an archived metric. To drop an archived metric, call
// DropArchivedFingerprint.)
UnindexMetric(clientmodel.Metric) error
// ArchiveMetric persists the mapping of the given fingerprint to the
// given metric, together with the first and last timestamp of the
// series belonging to the metric.
ArchiveMetric(
fingerprint clientmodel.Fingerprint, metric clientmodel.Metric,
firstTime, lastTime clientmodel.Timestamp,
) error
// HasArchivedMetric returns whether the archived metric for the given
// fingerprint exists and if yes, what the first and last timestamp in
// the corresponding series is.
HasArchivedMetric(clientmodel.Fingerprint) (
hasMetric bool, firstTime, lastTime clientmodel.Timestamp, err error,
)
// GetArchivedMetric retrieves the archived metric with the given
// fingerprint.
GetArchivedMetric(clientmodel.Fingerprint) (clientmodel.Metric, error)
// DropArchivedMetric deletes an archived fingerprint and its
// corresponding metric entirely. It also un-indexes the metric (no need
// to call UnindexMetric for the deleted metric.)
DropArchivedMetric(clientmodel.Fingerprint) error
// UnarchiveMetric deletes an archived fingerprint and its metric, but
// (in contrast to DropArchivedMetric) does not un-index the metric.
// The method returns true if a metric was actually deleted.
UnarchiveMetric(clientmodel.Fingerprint) (bool, error)
// Close flushes buffered data and releases any held resources.
Close() error
}

View file

@ -12,91 +12,80 @@ import (
clientmodel "github.com/prometheus/client_golang/model"
"github.com/prometheus/prometheus/storage/local/codec"
"github.com/prometheus/prometheus/storage/local/index"
"github.com/prometheus/prometheus/storage/metric"
)
const (
seriesFileName = "series.db"
seriesTempFileName = "series.db.tmp"
headsFileName = "heads.db"
indexDirName = "index"
headsFormatVersion = 1
headsMagicString = "PrometheusHeads"
fileBufSize = 1 << 16 // 64kiB. TODO: Tweak.
chunkHeaderLen = 17
chunkHeaderTypeOffset = 0
chunkHeaderFirstTimeOffset = 1
chunkHeaderLastTimeOffset = 9
headsHeaderLen = 9
headsHeaderFingerprintOffset = 0
headsHeaderTypeOffset = 8
)
type diskPersistence struct {
index.MetricIndexer
basePath string
chunkLen int
buf []byte // Staging space for persisting indexes.
archivedFingerprintToMetrics *index.FingerprintMetricIndex
archivedFingerprintToTimeRange *index.FingerprintTimeRangeIndex
labelPairToFingerprints *index.LabelPairFingerprintIndex
labelNameToLabelValues *index.LabelNameLabelValuesIndex
}
func NewDiskPersistence(basePath string, chunkLen int) (Persistence, error) {
metricIndexer, err := index.NewDiskIndexer(basePath)
if err := os.MkdirAll(basePath, 0700); err != nil {
return nil, err
}
dp := &diskPersistence{
basePath: basePath,
chunkLen: chunkLen,
}
var err error
dp.archivedFingerprintToMetrics, err = index.NewFingerprintMetricIndex(basePath)
if err != nil {
return nil, err
}
dp.archivedFingerprintToTimeRange, err = index.NewFingerprintTimeRangeIndex(basePath)
if err != nil {
return nil, err
}
dp.labelPairToFingerprints, err = index.NewLabelPairFingerprintIndex(basePath)
if err != nil {
return nil, err
}
dp.labelNameToLabelValues, err = index.NewLabelNameLabelValuesIndex(basePath)
if err != nil {
return nil, err
}
return &diskPersistence{
basePath: basePath,
chunkLen: chunkLen,
buf: make([]byte, binary.MaxVarintLen64), // Also sufficient for uint64.
MetricIndexer: metricIndexer,
}, nil
return dp, nil
}
func (p *diskPersistence) dirForFingerprint(fp clientmodel.Fingerprint) string {
fpStr := fp.String()
return fmt.Sprintf("%s/%c%c/%s", p.basePath, fpStr[0], fpStr[1], fpStr[2:])
}
// exists returns true when the given file or directory exists.
func exists(path string) (bool, error) {
_, err := os.Stat(path)
if err == nil {
return true, nil
}
if os.IsNotExist(err) {
return false, nil
}
return false, err
}
func (p *diskPersistence) openChunkFileForWriting(fp clientmodel.Fingerprint) (*os.File, error) {
dirname := p.dirForFingerprint(fp)
ex, err := exists(dirname)
func (p *diskPersistence) GetFingerprintsForLabelPair(lp metric.LabelPair) (clientmodel.Fingerprints, error) {
fps, _, err := p.labelPairToFingerprints.Lookup(lp)
if err != nil {
return nil, err
}
if !ex {
if err := os.MkdirAll(dirname, 0700); err != nil {
return nil, err
}
return fps, nil
}
func (p *diskPersistence) GetLabelValuesForLabelName(ln clientmodel.LabelName) (clientmodel.LabelValues, error) {
lvs, _, err := p.labelNameToLabelValues.Lookup(ln)
if err != nil {
return nil, err
}
return os.OpenFile(path.Join(dirname, seriesFileName), os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0640)
}
func (p *diskPersistence) openChunkFileForReading(fp clientmodel.Fingerprint) (*os.File, error) {
dirname := p.dirForFingerprint(fp)
return os.Open(path.Join(dirname, seriesFileName))
}
func writeChunkHeader(w io.Writer, c chunk) error {
header := make([]byte, chunkHeaderLen)
header[chunkHeaderTypeOffset] = chunkType(c)
binary.LittleEndian.PutUint64(header[chunkHeaderFirstTimeOffset:], uint64(c.firstTime()))
binary.LittleEndian.PutUint64(header[chunkHeaderLastTimeOffset:], uint64(c.lastTime()))
_, err := w.Write(header)
return err
return lvs, nil
}
func (p *diskPersistence) PersistChunk(fp clientmodel.Fingerprint, c chunk) error {
@ -120,10 +109,6 @@ func (p *diskPersistence) PersistChunk(fp clientmodel.Fingerprint, c chunk) erro
return c.marshal(b)
}
func (p *diskPersistence) offsetForChunkIndex(i int) int64 {
return int64(i * (chunkHeaderLen + p.chunkLen))
}
func (p *diskPersistence) LoadChunks(fp clientmodel.Fingerprint, indexes []int) (chunks, error) {
// TODO: we need to verify at some point that file length is a multiple of
// the chunk size. When is the best time to do this, and where to remember
@ -222,40 +207,149 @@ func (p *diskPersistence) LoadChunkDescs(fp clientmodel.Fingerprint, beforeTime
return cds, nil
}
func (p *diskPersistence) headsPath() string {
return path.Join(p.basePath, headsFileName)
}
func (p *diskPersistence) PersistHeads(fpToSeries map[clientmodel.Fingerprint]*memorySeries) error {
func (p *diskPersistence) PersistSeriesMapAndHeads(fingerprintToSeries SeriesMap) error {
f, err := os.OpenFile(p.headsPath(), os.O_WRONLY|os.O_TRUNC|os.O_CREATE, 0640)
if err != nil {
return err
}
header := make([]byte, 9)
for fp, series := range fpToSeries {
head := series.head().chunk
defer f.Close()
w := bufio.NewWriterSize(f, fileBufSize)
binary.LittleEndian.PutUint64(header[headsHeaderFingerprintOffset:], uint64(fp))
header[headsHeaderTypeOffset] = chunkType(head)
_, err := f.Write(header)
if _, err := w.WriteString(headsMagicString); err != nil {
return err
}
if err := codec.EncodeVarint(w, headsFormatVersion); err != nil {
return err
}
if err := codec.EncodeVarint(w, int64(len(fingerprintToSeries))); err != nil {
return err
}
for fp, series := range fingerprintToSeries {
if err := codec.EncodeUint64(w, uint64(fp)); err != nil {
return err
}
buf, err := codec.CodableMetric(series.metric).MarshalBinary()
if err != nil {
return err
}
err = head.marshal(f)
if err != nil {
w.Write(buf)
if err := codec.EncodeVarint(w, int64(len(series.chunkDescs))); err != nil {
return err
}
for i, chunkDesc := range series.chunkDescs {
if i < len(series.chunkDescs)-1 {
if err := codec.EncodeVarint(w, int64(chunkDesc.firstTime())); err != nil {
return err
}
if err := codec.EncodeVarint(w, int64(chunkDesc.lastTime())); err != nil {
return err
}
} else {
// This is the head chunk. Fully marshal it.
if err := w.WriteByte(chunkType(chunkDesc.chunk)); err != nil {
return err
}
if err := chunkDesc.chunk.marshal(w); err != nil {
return err
}
}
}
}
return nil
return w.Flush()
}
func (p *diskPersistence) DropChunks(fp clientmodel.Fingerprint, beforeTime clientmodel.Timestamp) error {
f, err := p.openChunkFileForReading(fp)
func (p *diskPersistence) LoadSeriesMapAndHeads() (SeriesMap, error) {
f, err := os.Open(p.headsPath())
if os.IsNotExist(err) {
return nil
return SeriesMap{}, nil
}
if err != nil {
return err
return nil, err
}
defer f.Close()
r := bufio.NewReaderSize(f, fileBufSize)
buf := make([]byte, len(headsMagicString))
if _, err := io.ReadFull(r, buf); err != nil {
return nil, err
}
magic := string(buf)
if magic != headsMagicString {
return nil, fmt.Errorf(
"unexpected magic string, want %q, got %q",
headsMagicString, magic,
)
}
if version, err := binary.ReadVarint(r); version != headsFormatVersion || err != nil {
return nil, fmt.Errorf("unknown heads format version, want %d", headsFormatVersion)
}
numSeries, err := binary.ReadVarint(r)
if err != nil {
return nil, err
}
fingerprintToSeries := make(SeriesMap, numSeries)
for ; numSeries > 0; numSeries-- {
fp, err := codec.DecodeUint64(r)
if err != nil {
return nil, err
}
var metric codec.CodableMetric
if err := metric.UnmarshalFromReader(r); err != nil {
return nil, err
}
numChunkDescs, err := binary.ReadVarint(r)
if err != nil {
return nil, err
}
chunkDescs := make(chunkDescs, numChunkDescs)
for i := int64(0); i < numChunkDescs-1; i++ {
firstTime, err := binary.ReadVarint(r)
if err != nil {
return nil, err
}
lastTime, err := binary.ReadVarint(r)
if err != nil {
return nil, err
}
chunkDescs[i] = &chunkDesc{
firstTimeField: clientmodel.Timestamp(firstTime),
lastTimeField: clientmodel.Timestamp(lastTime),
}
}
// Head chunk.
chunkType, err := r.ReadByte()
if err != nil {
return nil, err
}
chunk := chunkForType(chunkType)
if err := chunk.unmarshal(r); err != nil {
return nil, err
}
chunkDescs[numChunkDescs-1] = &chunkDesc{
chunk: chunk,
refCount: 1,
}
fingerprintToSeries[clientmodel.Fingerprint(fp)] = &memorySeries{
metric: clientmodel.Metric(metric),
chunkDescs: chunkDescs,
chunkDescsLoaded: true,
}
}
return fingerprintToSeries, nil
}
func (p *diskPersistence) DropChunks(fp clientmodel.Fingerprint, beforeTime clientmodel.Timestamp) (bool, error) {
f, err := p.openChunkFileForReading(fp)
if os.IsNotExist(err) {
return true, nil
}
if err != nil {
return false, err
}
defer f.Close()
@ -263,7 +357,7 @@ func (p *diskPersistence) DropChunks(fp clientmodel.Fingerprint, beforeTime clie
for i := 0; ; i++ {
_, err := f.Seek(p.offsetForChunkIndex(i)+chunkHeaderLastTimeOffset, os.SEEK_SET)
if err != nil {
return err
return false, err
}
lastTimeBuf := make([]byte, 8)
_, err = io.ReadAtLeast(f, lastTimeBuf, 8)
@ -271,12 +365,12 @@ func (p *diskPersistence) DropChunks(fp clientmodel.Fingerprint, beforeTime clie
// We ran into the end of the file without finding any chunks that should
// be kept. Remove the whole file.
if err := os.Remove(f.Name()); err != nil {
return err
return true, err
}
return nil
return true, nil
}
if err != nil {
return err
return false, err
}
lastTime := clientmodel.Timestamp(binary.LittleEndian.Uint64(lastTimeBuf))
if !lastTime.Before(beforeTime) {
@ -289,57 +383,135 @@ func (p *diskPersistence) DropChunks(fp clientmodel.Fingerprint, beforeTime clie
// file.
_, err = f.Seek(-(chunkHeaderLastTimeOffset + 8), os.SEEK_CUR)
if err != nil {
return err
return false, err
}
dirname := p.dirForFingerprint(fp)
temp, err := os.OpenFile(path.Join(dirname, seriesTempFileName), os.O_WRONLY|os.O_CREATE, 0640)
if err != nil {
return err
return false, err
}
defer temp.Close()
if _, err := io.Copy(temp, f); err != nil {
return err
return false, err
}
os.Rename(path.Join(dirname, seriesTempFileName), path.Join(dirname, seriesFileName))
return false, nil
}
func (d *diskPersistence) IndexMetric(m clientmodel.Metric) error {
// TODO: Implement. Possibly in a queue (which needs to be drained before shutdown).
return nil
}
func (p *diskPersistence) LoadHeads(fpToSeries map[clientmodel.Fingerprint]*memorySeries) error {
f, err := os.Open(p.headsPath())
if os.IsNotExist(err) {
// TODO: this should only happen if there never was a shutdown before. In
// that case, all heads should be in order already, since the series got
// created during this process' runtime.
// Still, make this more robust.
return nil
}
header := make([]byte, headsHeaderLen)
for {
_, err := io.ReadAtLeast(f, header, headsHeaderLen)
if err == io.ErrUnexpectedEOF {
// TODO: this should only be ok if n is 0.
break
}
if err != nil {
return nil
}
// TODO: this relies on the implementation (uint64) of Fingerprint.
fp := clientmodel.Fingerprint(binary.LittleEndian.Uint64(header[headsHeaderFingerprintOffset:]))
chunk := chunkForType(header[headsHeaderTypeOffset])
chunk.unmarshal(f)
fpToSeries[fp].chunkDescs = append(fpToSeries[fp].chunkDescs, &chunkDesc{
chunk: chunk,
refCount: 1,
})
}
func (d *diskPersistence) UnindexMetric(m clientmodel.Metric) error {
// TODO: Implement. Possibly in a queue (which needs to be drained before shutdown).
return nil
}
func (d *diskPersistence) ArchiveMetric(
fingerprint clientmodel.Fingerprint, metric clientmodel.Metric,
firstTime, lastTime clientmodel.Timestamp,
) error {
// TODO: Implement.
return nil
}
func (d *diskPersistence) HasArchivedMetric(clientmodel.Fingerprint) (
hasMetric bool, firstTime, lastTime clientmodel.Timestamp, err error,
) {
// TODO: Implement.
return
}
func (d *diskPersistence) GetArchivedMetric(clientmodel.Fingerprint) (clientmodel.Metric, error) {
// TODO: Implement.
return nil, nil
}
func (d *diskPersistence) DropArchivedMetric(clientmodel.Fingerprint) error {
// TODO: Implement. Unindex after drop!
return nil
}
func (d *diskPersistence) UnarchiveMetric(clientmodel.Fingerprint) (bool, error) {
// TODO: Implement.
return false, nil
}
func (d *diskPersistence) Close() error {
// TODO: Move persistHeads here once fingerprintToSeries map is here.
return d.MetricIndexer.Close()
var lastError error
if err := d.archivedFingerprintToMetrics.Close(); err != nil {
lastError = err
glog.Error("Error closing archivedFingerprintToMetric index DB: ", err)
}
if err := d.archivedFingerprintToTimeRange.Close(); err != nil {
lastError = err
glog.Error("Error closing archivedFingerprintToTimeRange index DB: ", err)
}
if err := d.labelPairToFingerprints.Close(); err != nil {
lastError = err
glog.Error("Error closing labelPairToFingerprints index DB: ", err)
}
if err := d.labelNameToLabelValues.Close(); err != nil {
lastError = err
glog.Error("Error closing labelNameToLabelValues index DB: ", err)
}
return lastError
}
func (p *diskPersistence) dirForFingerprint(fp clientmodel.Fingerprint) string {
fpStr := fp.String()
return fmt.Sprintf("%s/%c%c/%s", p.basePath, fpStr[0], fpStr[1], fpStr[2:])
}
func (p *diskPersistence) openChunkFileForWriting(fp clientmodel.Fingerprint) (*os.File, error) {
dirname := p.dirForFingerprint(fp)
ex, err := exists(dirname)
if err != nil {
return nil, err
}
if !ex {
if err := os.MkdirAll(dirname, 0700); err != nil {
return nil, err
}
}
return os.OpenFile(path.Join(dirname, seriesFileName), os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0640)
}
func (p *diskPersistence) openChunkFileForReading(fp clientmodel.Fingerprint) (*os.File, error) {
dirname := p.dirForFingerprint(fp)
return os.Open(path.Join(dirname, seriesFileName))
}
func writeChunkHeader(w io.Writer, c chunk) error {
header := make([]byte, chunkHeaderLen)
header[chunkHeaderTypeOffset] = chunkType(c)
binary.LittleEndian.PutUint64(header[chunkHeaderFirstTimeOffset:], uint64(c.firstTime()))
binary.LittleEndian.PutUint64(header[chunkHeaderLastTimeOffset:], uint64(c.lastTime()))
_, err := w.Write(header)
return err
}
func (p *diskPersistence) offsetForChunkIndex(i int) int64 {
return int64(i * (chunkHeaderLen + p.chunkLen))
}
func (p *diskPersistence) headsPath() string {
return path.Join(p.basePath, headsFileName)
}
// exists returns true when the given file or directory exists.
func exists(path string) (bool, error) {
_, err := os.Stat(path)
if err == nil {
return true, nil
}
if os.IsNotExist(err) {
return false, nil
}
return false, err
}

View file

@ -100,8 +100,11 @@ type memorySeries struct {
mtx sync.Mutex
metric clientmodel.Metric
// Sorted by start time, no overlapping chunk ranges allowed.
chunkDescs chunkDescs
// Sorted by start time, overlapping chunk ranges are forbidden.
chunkDescs chunkDescs
// Whether chunkDescs for chunks on disk are loaded. Even if false, a head
// chunk could be present. In that case, its chunkDesc will be the
// only one in chunkDescs.
chunkDescsLoaded bool
}
@ -171,14 +174,11 @@ func (s *memorySeries) evictOlderThan(t clientmodel.Timestamp) {
}
}
func (s *memorySeries) purgeOlderThan(t clientmodel.Timestamp, p Persistence) (dropSeries bool, err error) {
// purgeOlderThan returns true if all chunks have been purged.
func (s *memorySeries) purgeOlderThan(t clientmodel.Timestamp) bool {
s.mtx.Lock()
defer s.mtx.Unlock()
if err := p.DropChunks(s.metric.Fingerprint(), t); err != nil {
return false, err
}
keepIdx := len(s.chunkDescs)
for i, cd := range s.chunkDescs {
if !cd.lastTime().Before(t) {
@ -193,17 +193,7 @@ func (s *memorySeries) purgeOlderThan(t clientmodel.Timestamp, p Persistence) (d
}
}
s.chunkDescs = s.chunkDescs[keepIdx:]
return len(s.chunkDescs) == 0, nil
}
func (s *memorySeries) close() {
for _, cd := range s.chunkDescs {
if cd.chunk != nil {
cd.evictNow()
}
// TODO: need to handle unwritten heads here.
}
return len(s.chunkDescs) == 0
}
// TODO: in this method (and other places), we just fudge around with chunkDesc

View file

@ -7,9 +7,7 @@ import (
"github.com/golang/glog"
clientmodel "github.com/prometheus/client_golang/model"
"github.com/prometheus/prometheus/storage/metric"
"github.com/prometheus/prometheus/utility"
)
const persistQueueCap = 1024
@ -29,10 +27,7 @@ type memorySeriesStorage struct {
persistDone chan bool
stopServing chan chan<- bool
// TODO: These have to go to persistence.
fingerprintToSeries map[clientmodel.Fingerprint]*memorySeries
labelPairToFingerprints map[metric.LabelPair]utility.Set
labelNameToLabelValues map[clientmodel.LabelName]utility.Set
fingerprintToSeries SeriesMap
memoryEvictionInterval time.Duration
memoryRetentionPeriod time.Duration
@ -52,21 +47,18 @@ type MemorySeriesStorageOptions struct {
PersistenceRetentionPeriod time.Duration
}
func NewMemorySeriesStorage(o *MemorySeriesStorageOptions) (*memorySeriesStorage, error) { // TODO: change to return Storage?
glog.Info("Loading series head chunks...")
/*
if err := o.Persistence.LoadHeads(i.FingerprintToSeries); err != nil {
return nil, err
}
numSeries.Set(float64(len(i.FingerprintToSeries)))
*/
return &memorySeriesStorage{
fingerprintToSeries: map[clientmodel.Fingerprint]*memorySeries{},
labelPairToFingerprints: map[metric.LabelPair]utility.Set{},
labelNameToLabelValues: map[clientmodel.LabelName]utility.Set{},
func NewMemorySeriesStorage(o *MemorySeriesStorageOptions) (Storage, error) {
glog.Info("Loading series map and head chunks...")
fingerprintToSeries, err := o.Persistence.LoadSeriesMapAndHeads()
if err != nil {
return nil, err
}
numSeries.Set(float64(len(fingerprintToSeries)))
persistDone: make(chan bool),
stopServing: make(chan chan<- bool),
return &memorySeriesStorage{
fingerprintToSeries: fingerprintToSeries,
persistDone: make(chan bool),
stopServing: make(chan chan<- bool),
memoryEvictionInterval: o.MemoryEvictionInterval,
memoryRetentionPeriod: o.MemoryRetentionPeriod,
@ -121,25 +113,22 @@ func (s *memorySeriesStorage) getOrCreateSeries(m clientmodel.Metric) *memorySer
s.fingerprintToSeries[fp] = series
numSeries.Set(float64(len(s.fingerprintToSeries)))
for k, v := range m {
labelPair := metric.LabelPair{
Name: k,
Value: v,
}
unarchived, err := s.persistence.UnarchiveMetric(fp)
if err != nil {
glog.Errorf("Error unarchiving fingerprint %v: %v", fp, err)
}
fps, ok := s.labelPairToFingerprints[labelPair]
if !ok {
fps = utility.Set{}
s.labelPairToFingerprints[labelPair] = fps
if unarchived {
// The series existed before, had been archived at some
// point, and has now been unarchived, i.e. it has
// chunks on disk. Set chunkDescsLoaded accordingly so
// that they will be looked at later.
series.chunkDescsLoaded = false
} else {
// This was a genuinely new series, so index the metric.
if err := s.persistence.IndexMetric(m); err != nil {
glog.Errorf("Error indexing metric %v: %v", m, err)
}
fps.Add(fp)
values, ok := s.labelNameToLabelValues[k]
if !ok {
values = utility.Set{}
s.labelNameToLabelValues[k] = values
}
values.Add(v)
}
}
return series
@ -195,6 +184,7 @@ func recordPersist(start time.Time, err error) {
}
func (s *memorySeriesStorage) handlePersistQueue() {
// TODO: Perhaps move this into Persistence?
for req := range s.persistQueue {
// TODO: Make this thread-safe?
persistQueueLength.Set(float64(len(s.persistQueue)))
@ -234,25 +224,20 @@ func (s *memorySeriesStorage) Close() error {
glog.Info("Persist loop stopped.")
glog.Info("Persisting head chunks...")
if err := s.persistHeads(); err != nil {
if err := s.persistence.PersistSeriesMapAndHeads(s.fingerprintToSeries); err != nil {
return err
}
glog.Info("Done persisting head chunks.")
for _, series := range s.fingerprintToSeries {
series.close()
}
s.fingerprintToSeries = nil
if err := s.persistence.Close(); err != nil {
return err
}
s.state = storageStopping
return nil
}
func (s *memorySeriesStorage) persistHeads() error {
return s.persistence.PersistHeads(s.fingerprintToSeries)
}
func (s *memorySeriesStorage) purgePeriodically(stop <-chan bool) {
purgeTicker := time.NewTicker(s.persistencePurgeInterval)
defer purgeTicker.Stop()
@ -276,7 +261,7 @@ func (s *memorySeriesStorage) purgePeriodically(stop <-chan bool) {
glog.Info("Interrupted running series purge.")
return
default:
s.purgeSeries(&fp)
s.purgeSeries(fp)
}
}
glog.Info("Done purging old series data.")
@ -284,57 +269,51 @@ func (s *memorySeriesStorage) purgePeriodically(stop <-chan bool) {
}
}
func (s *memorySeriesStorage) purgeSeries(fp *clientmodel.Fingerprint) {
s.mtx.RLock()
series, ok := s.fingerprintToSeries[*fp]
if !ok {
return
}
s.mtx.RUnlock()
// purgeSeries purges chunks older than persistenceRetentionPeriod from a
// series. If the series contains no chunks after the purge, it is dropped
// entirely.
func (s *memorySeriesStorage) purgeSeries(fp clientmodel.Fingerprint) {
ts := clientmodel.TimestampFromTime(time.Now()).Add(-1 * s.persistenceRetentionPeriod)
drop, err := series.purgeOlderThan(clientmodel.TimestampFromTime(time.Now()).Add(-1*s.persistenceRetentionPeriod), s.persistence)
if err != nil {
glog.Error("Error purging series data: ", err)
}
if drop {
s.dropSeries(fp)
}
}
// Drop a label value from the label names to label values index.
func (s *memorySeriesStorage) dropLabelValue(l clientmodel.LabelName, v clientmodel.LabelValue) {
if set, ok := s.labelNameToLabelValues[l]; ok {
set.Remove(v)
if len(set) == 0 {
delete(s.labelNameToLabelValues, l)
}
}
}
// Drop all references to a series, including any samples.
func (s *memorySeriesStorage) dropSeries(fp *clientmodel.Fingerprint) {
s.mtx.Lock()
// TODO: This is a lock FAR to coarse! However, we cannot lock using the
// memorySeries since we might have none (for series that are on disk
// only). And we really don't want to un-archive a series from disk
// while we are at the same time purging it. A locking per fingerprint
// would be nice. Or something... Have to think about it... Careful,
// more race conditions lurk below. Also unsolved: If there are chunks
// in the persist queue. persistence.DropChunks and
// persistence.PersistChunck needs to be locked on fp level, or
// something. And even then, what happens if everything is dropped, but
// there are still chunks hung in the persist queue? They would later
// re-create a file for a series that doesn't exist anymore...
defer s.mtx.Unlock()
series, ok := s.fingerprintToSeries[*fp]
if !ok {
// First purge persisted chunks. We need to do that anyway.
allDropped, err := s.persistence.DropChunks(fp, ts)
if err != nil {
glog.Error("Error purging persisted chunks: ", err)
}
// Purge chunks from memory accordingly.
if series, ok := s.fingerprintToSeries[fp]; ok {
if series.purgeOlderThan(ts) {
delete(s.fingerprintToSeries, fp)
if err := s.persistence.UnindexMetric(series.metric); err != nil {
glog.Errorf("Error unindexing metric %v: %v", series.metric, err)
}
}
return
}
for k, v := range series.metric {
labelPair := metric.LabelPair{
Name: k,
Value: v,
}
if set, ok := s.labelPairToFingerprints[labelPair]; ok {
set.Remove(*fp)
if len(set) == 0 {
delete(s.labelPairToFingerprints, labelPair)
s.dropLabelValue(k, v)
}
}
// If nothing was in memory, the metric must have been archived. Drop
// the archived metric if there are no persisted chunks left.
if !allDropped {
return
}
if err := s.persistence.DropArchivedMetric(fp); err != nil {
glog.Errorf("Error dropping archived metric for fingerprint %v: %v", fp, err)
}
delete(s.fingerprintToSeries, *fp)
}
func (s *memorySeriesStorage) Serve(started chan<- bool) {
@ -376,78 +355,76 @@ func (s *memorySeriesStorage) GetFingerprintsForLabelMatchers(labelMatchers metr
s.mtx.RLock()
defer s.mtx.RUnlock()
sets := []utility.Set{}
var result map[clientmodel.Fingerprint]struct{}
for _, matcher := range labelMatchers {
intersection := map[clientmodel.Fingerprint]struct{}{}
switch matcher.Type {
case metric.Equal:
set, ok := s.labelPairToFingerprints[metric.LabelPair{
Name: matcher.Name,
Value: matcher.Value,
}]
if !ok {
fps, err := s.persistence.GetFingerprintsForLabelPair(
metric.LabelPair{
Name: matcher.Name,
Value: matcher.Value,
},
)
if err != nil {
glog.Error("Error getting fingerprints for label pair: ", err)
}
if len(fps) == 0 {
return nil
}
sets = append(sets, set)
for _, fp := range fps {
if _, ok := result[fp]; ok || result == nil {
intersection[fp] = struct{}{}
}
}
default:
values := s.getLabelValuesForLabelName(matcher.Name)
values, err := s.persistence.GetLabelValuesForLabelName(matcher.Name)
if err != nil {
glog.Errorf("Error getting label values for label name %q: %v", matcher.Name, err)
}
matches := matcher.Filter(values)
if len(matches) == 0 {
return nil
}
set := utility.Set{}
for _, v := range matches {
subset, ok := s.labelPairToFingerprints[metric.LabelPair{
Name: matcher.Name,
Value: v,
}]
if !ok {
return nil
fps, err := s.persistence.GetFingerprintsForLabelPair(
metric.LabelPair{
Name: matcher.Name,
Value: v,
},
)
if err != nil {
glog.Error("Error getting fingerprints for label pair: ", err)
}
for fp := range subset {
set.Add(fp)
for _, fp := range fps {
if _, ok := result[fp]; ok || result == nil {
intersection[fp] = struct{}{}
}
}
}
sets = append(sets, set)
}
if len(intersection) == 0 {
return nil
}
result = intersection
}
setCount := len(sets)
if setCount == 0 {
return nil
fps := make(clientmodel.Fingerprints, 0, len(result))
for fp := range result {
fps = append(fps, fp)
}
base := sets[0]
for i := 1; i < setCount; i++ {
base = base.Intersection(sets[i])
}
fingerprints := clientmodel.Fingerprints{}
for _, e := range base.Elements() {
fingerprints = append(fingerprints, e.(clientmodel.Fingerprint))
}
return fingerprints
return fps
}
func (s *memorySeriesStorage) GetLabelValuesForLabelName(labelName clientmodel.LabelName) clientmodel.LabelValues {
s.mtx.RLock()
defer s.mtx.RUnlock()
return s.getLabelValuesForLabelName(labelName)
}
func (s *memorySeriesStorage) getLabelValuesForLabelName(labelName clientmodel.LabelName) clientmodel.LabelValues {
set, ok := s.labelNameToLabelValues[labelName]
if !ok {
return nil
lvs, err := s.persistence.GetLabelValuesForLabelName(labelName)
if err != nil {
glog.Errorf("Error getting label values for label name %q: %v", labelName, err)
}
values := make(clientmodel.LabelValues, 0, len(set))
for e := range set {
val := e.(clientmodel.LabelValue)
values = append(values, val)
}
return values
return lvs
}
func (s *memorySeriesStorage) GetMetricForFingerprint(fp clientmodel.Fingerprint) clientmodel.Metric {
@ -455,15 +432,14 @@ func (s *memorySeriesStorage) GetMetricForFingerprint(fp clientmodel.Fingerprint
defer s.mtx.RUnlock()
series, ok := s.fingerprintToSeries[fp]
if !ok {
return nil
if ok {
// TODO: Does this have to be a copy? Ask Julius!
return series.metric
}
metric := clientmodel.Metric{}
for label, value := range series.metric {
metric[label] = value
metric, err := s.persistence.GetArchivedMetric(fp)
if err != nil {
glog.Errorf("Error retrieving archived metric for fingerprint %v: %v", fp, err)
}
return metric
}