mirror of
https://github.com/prometheus/prometheus.git
synced 2024-11-13 17:14:05 -08:00
0fac9bb859
* Add basic initial developer docs for TSDB There's a decent amount of content already out there (blog posts, conference talks, etc), but: * when they get stale, they don't tend to get updated * they still leave me with questions that I'ld like to answer for developers (like me) who want to use, or work with, TSDB What I propose is developer docs inside the prometheus repository. Easy to find and harness the power of the community to expand it and keep it up to date. * perfect is the enemy of good. Let's have a base and incrementally improve * Markdown docs should be broad but not too deep. Source code comments can complement them, and are the ideal place for implementation details. Signed-off-by: Dieter Plaetinck <dieter@grafana.com> * use example code that works out of the box Signed-off-by: Dieter Plaetinck <dieter@grafana.com> * Apply suggestions from code review Co-authored-by: Ganesh Vernekar <15064823+codesome@users.noreply.github.com> Signed-off-by: Dieter Plaetinck <dieter@grafana.com> * PR feedback Signed-off-by: Dieter Plaetinck <dieter@grafana.com> * more docs Signed-off-by: Dieter Plaetinck <dieter@grafana.com> * PR feedback Signed-off-by: Dieter Plaetinck <dieter@grafana.com> * Apply suggestions from code review Signed-off-by: Dieter Plaetinck <dieter@grafana.com> Co-authored-by: Bartlomiej Plotka <bwplotka@gmail.com> * Apply suggestions from code review Signed-off-by: Dieter Plaetinck <dieter@grafana.com> Co-authored-by: Ganesh Vernekar <15064823+codesome@users.noreply.github.com> * feedback Signed-off-by: Dieter Plaetinck <dieter@grafana.com> * Update tsdb/docs/usage.md Signed-off-by: Dieter Plaetinck <dieter@grafana.com> Co-authored-by: Ganesh Vernekar <15064823+codesome@users.noreply.github.com> * final tweaks Signed-off-by: Dieter Plaetinck <dieter@grafana.com> * workaround docs versioning issue Signed-off-by: Dieter Plaetinck <dieter@grafana.com> * Move example code to real executable, testable example. Signed-off-by: Dieter Plaetinck <dieter@grafana.com> * cleanup example test and make sure it always reproduces Signed-off-by: Dieter Plaetinck <dieter@grafana.com> * obtain temp dir in a way that works with older Go versions Signed-off-by: Dieter Plaetinck <dieter@grafana.com> * Fix Ganesh's comments Signed-off-by: Ganesh Vernekar <ganeshvern@gmail.com> Co-authored-by: Ganesh Vernekar <15064823+codesome@users.noreply.github.com> Co-authored-by: Bartlomiej Plotka <bwplotka@gmail.com> Co-authored-by: Ganesh Vernekar <ganeshvern@gmail.com>
319 lines
8.3 KiB
Go
319 lines
8.3 KiB
Go
// Copyright 2018 The Prometheus Authors
|
|
|
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
// you may not use this file except in compliance with the License.
|
|
// You may obtain a copy of the License at
|
|
//
|
|
// http://www.apache.org/licenses/LICENSE-2.0
|
|
//
|
|
// Unless required by applicable law or agreed to in writing, software
|
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
// See the License for the specific language governing permissions and
|
|
// limitations under the License.
|
|
|
|
// Package record contains the various record types used for encoding various Head block data in the WAL and in-memory snapshot.
|
|
package record
|
|
|
|
import (
|
|
"math"
|
|
"sort"
|
|
|
|
"github.com/pkg/errors"
|
|
|
|
"github.com/prometheus/prometheus/model/labels"
|
|
"github.com/prometheus/prometheus/storage"
|
|
"github.com/prometheus/prometheus/tsdb/chunks"
|
|
"github.com/prometheus/prometheus/tsdb/encoding"
|
|
"github.com/prometheus/prometheus/tsdb/tombstones"
|
|
)
|
|
|
|
// Type represents the data type of a record.
|
|
type Type uint8
|
|
|
|
const (
|
|
// Unknown is returned for unrecognised WAL record types.
|
|
Unknown Type = 255
|
|
// Series is used to match WAL records of type Series.
|
|
Series Type = 1
|
|
// Samples is used to match WAL records of type Samples.
|
|
Samples Type = 2
|
|
// Tombstones is used to match WAL records of type Tombstones.
|
|
Tombstones Type = 3
|
|
// Exemplars is used to match WAL records of type Exemplars.
|
|
Exemplars Type = 4
|
|
)
|
|
|
|
// ErrNotFound is returned if a looked up resource was not found. Duplicate ErrNotFound from head.go.
|
|
var ErrNotFound = errors.New("not found")
|
|
|
|
// RefSeries is the series labels with the series ID.
|
|
type RefSeries struct {
|
|
Ref chunks.HeadSeriesRef
|
|
Labels labels.Labels
|
|
}
|
|
|
|
// RefSample is a timestamp/value pair associated with a reference to a series.
|
|
type RefSample struct {
|
|
Ref chunks.HeadSeriesRef
|
|
T int64
|
|
V float64
|
|
}
|
|
|
|
// RefExemplar is an exemplar with it's labels, timestamp, value the exemplar was collected/observed with, and a reference to a series.
|
|
type RefExemplar struct {
|
|
Ref chunks.HeadSeriesRef
|
|
T int64
|
|
V float64
|
|
Labels labels.Labels
|
|
}
|
|
|
|
// Decoder decodes series, sample, and tombstone records.
|
|
// The zero value is ready to use.
|
|
type Decoder struct{}
|
|
|
|
// Type returns the type of the record.
|
|
// Returns RecordUnknown if no valid record type is found.
|
|
func (d *Decoder) Type(rec []byte) Type {
|
|
if len(rec) < 1 {
|
|
return Unknown
|
|
}
|
|
switch t := Type(rec[0]); t {
|
|
case Series, Samples, Tombstones, Exemplars:
|
|
return t
|
|
}
|
|
return Unknown
|
|
}
|
|
|
|
// Series appends series in rec to the given slice.
|
|
func (d *Decoder) Series(rec []byte, series []RefSeries) ([]RefSeries, error) {
|
|
dec := encoding.Decbuf{B: rec}
|
|
|
|
if Type(dec.Byte()) != Series {
|
|
return nil, errors.New("invalid record type")
|
|
}
|
|
for len(dec.B) > 0 && dec.Err() == nil {
|
|
ref := storage.SeriesRef(dec.Be64())
|
|
|
|
lset := make(labels.Labels, dec.Uvarint())
|
|
|
|
for i := range lset {
|
|
lset[i].Name = dec.UvarintStr()
|
|
lset[i].Value = dec.UvarintStr()
|
|
}
|
|
sort.Sort(lset)
|
|
|
|
series = append(series, RefSeries{
|
|
Ref: chunks.HeadSeriesRef(ref),
|
|
Labels: lset,
|
|
})
|
|
}
|
|
if dec.Err() != nil {
|
|
return nil, dec.Err()
|
|
}
|
|
if len(dec.B) > 0 {
|
|
return nil, errors.Errorf("unexpected %d bytes left in entry", len(dec.B))
|
|
}
|
|
return series, nil
|
|
}
|
|
|
|
// Samples appends samples in rec to the given slice.
|
|
func (d *Decoder) Samples(rec []byte, samples []RefSample) ([]RefSample, error) {
|
|
dec := encoding.Decbuf{B: rec}
|
|
|
|
if Type(dec.Byte()) != Samples {
|
|
return nil, errors.New("invalid record type")
|
|
}
|
|
if dec.Len() == 0 {
|
|
return samples, nil
|
|
}
|
|
var (
|
|
baseRef = dec.Be64()
|
|
baseTime = dec.Be64int64()
|
|
)
|
|
for len(dec.B) > 0 && dec.Err() == nil {
|
|
dref := dec.Varint64()
|
|
dtime := dec.Varint64()
|
|
val := dec.Be64()
|
|
|
|
samples = append(samples, RefSample{
|
|
Ref: chunks.HeadSeriesRef(int64(baseRef) + dref),
|
|
T: baseTime + dtime,
|
|
V: math.Float64frombits(val),
|
|
})
|
|
}
|
|
|
|
if dec.Err() != nil {
|
|
return nil, errors.Wrapf(dec.Err(), "decode error after %d samples", len(samples))
|
|
}
|
|
if len(dec.B) > 0 {
|
|
return nil, errors.Errorf("unexpected %d bytes left in entry", len(dec.B))
|
|
}
|
|
return samples, nil
|
|
}
|
|
|
|
// Tombstones appends tombstones in rec to the given slice.
|
|
func (d *Decoder) Tombstones(rec []byte, tstones []tombstones.Stone) ([]tombstones.Stone, error) {
|
|
dec := encoding.Decbuf{B: rec}
|
|
|
|
if Type(dec.Byte()) != Tombstones {
|
|
return nil, errors.New("invalid record type")
|
|
}
|
|
for dec.Len() > 0 && dec.Err() == nil {
|
|
tstones = append(tstones, tombstones.Stone{
|
|
Ref: storage.SeriesRef(dec.Be64()),
|
|
Intervals: tombstones.Intervals{
|
|
{Mint: dec.Varint64(), Maxt: dec.Varint64()},
|
|
},
|
|
})
|
|
}
|
|
if dec.Err() != nil {
|
|
return nil, dec.Err()
|
|
}
|
|
if len(dec.B) > 0 {
|
|
return nil, errors.Errorf("unexpected %d bytes left in entry", len(dec.B))
|
|
}
|
|
return tstones, nil
|
|
}
|
|
|
|
func (d *Decoder) Exemplars(rec []byte, exemplars []RefExemplar) ([]RefExemplar, error) {
|
|
dec := encoding.Decbuf{B: rec}
|
|
t := Type(dec.Byte())
|
|
if t != Exemplars {
|
|
return nil, errors.New("invalid record type")
|
|
}
|
|
|
|
return d.ExemplarsFromBuffer(&dec, exemplars)
|
|
}
|
|
|
|
func (d *Decoder) ExemplarsFromBuffer(dec *encoding.Decbuf, exemplars []RefExemplar) ([]RefExemplar, error) {
|
|
if dec.Len() == 0 {
|
|
return exemplars, nil
|
|
}
|
|
var (
|
|
baseRef = dec.Be64()
|
|
baseTime = dec.Be64int64()
|
|
)
|
|
for len(dec.B) > 0 && dec.Err() == nil {
|
|
dref := dec.Varint64()
|
|
dtime := dec.Varint64()
|
|
val := dec.Be64()
|
|
|
|
lset := make(labels.Labels, dec.Uvarint())
|
|
for i := range lset {
|
|
lset[i].Name = dec.UvarintStr()
|
|
lset[i].Value = dec.UvarintStr()
|
|
}
|
|
sort.Sort(lset)
|
|
|
|
exemplars = append(exemplars, RefExemplar{
|
|
Ref: chunks.HeadSeriesRef(baseRef + uint64(dref)),
|
|
T: baseTime + dtime,
|
|
V: math.Float64frombits(val),
|
|
Labels: lset,
|
|
})
|
|
}
|
|
|
|
if dec.Err() != nil {
|
|
return nil, errors.Wrapf(dec.Err(), "decode error after %d exemplars", len(exemplars))
|
|
}
|
|
if len(dec.B) > 0 {
|
|
return nil, errors.Errorf("unexpected %d bytes left in entry", len(dec.B))
|
|
}
|
|
return exemplars, nil
|
|
}
|
|
|
|
// Encoder encodes series, sample, and tombstones records.
|
|
// The zero value is ready to use.
|
|
type Encoder struct{}
|
|
|
|
// Series appends the encoded series to b and returns the resulting slice.
|
|
func (e *Encoder) Series(series []RefSeries, b []byte) []byte {
|
|
buf := encoding.Encbuf{B: b}
|
|
buf.PutByte(byte(Series))
|
|
|
|
for _, s := range series {
|
|
buf.PutBE64(uint64(s.Ref))
|
|
buf.PutUvarint(len(s.Labels))
|
|
|
|
for _, l := range s.Labels {
|
|
buf.PutUvarintStr(l.Name)
|
|
buf.PutUvarintStr(l.Value)
|
|
}
|
|
}
|
|
return buf.Get()
|
|
}
|
|
|
|
// Samples appends the encoded samples to b and returns the resulting slice.
|
|
func (e *Encoder) Samples(samples []RefSample, b []byte) []byte {
|
|
buf := encoding.Encbuf{B: b}
|
|
buf.PutByte(byte(Samples))
|
|
|
|
if len(samples) == 0 {
|
|
return buf.Get()
|
|
}
|
|
|
|
// Store base timestamp and base reference number of first sample.
|
|
// All samples encode their timestamp and ref as delta to those.
|
|
first := samples[0]
|
|
|
|
buf.PutBE64(uint64(first.Ref))
|
|
buf.PutBE64int64(first.T)
|
|
|
|
for _, s := range samples {
|
|
buf.PutVarint64(int64(s.Ref) - int64(first.Ref))
|
|
buf.PutVarint64(s.T - first.T)
|
|
buf.PutBE64(math.Float64bits(s.V))
|
|
}
|
|
return buf.Get()
|
|
}
|
|
|
|
// Tombstones appends the encoded tombstones to b and returns the resulting slice.
|
|
func (e *Encoder) Tombstones(tstones []tombstones.Stone, b []byte) []byte {
|
|
buf := encoding.Encbuf{B: b}
|
|
buf.PutByte(byte(Tombstones))
|
|
|
|
for _, s := range tstones {
|
|
for _, iv := range s.Intervals {
|
|
buf.PutBE64(uint64(s.Ref))
|
|
buf.PutVarint64(iv.Mint)
|
|
buf.PutVarint64(iv.Maxt)
|
|
}
|
|
}
|
|
return buf.Get()
|
|
}
|
|
|
|
func (e *Encoder) Exemplars(exemplars []RefExemplar, b []byte) []byte {
|
|
buf := encoding.Encbuf{B: b}
|
|
buf.PutByte(byte(Exemplars))
|
|
|
|
if len(exemplars) == 0 {
|
|
return buf.Get()
|
|
}
|
|
|
|
e.EncodeExemplarsIntoBuffer(exemplars, &buf)
|
|
|
|
return buf.Get()
|
|
}
|
|
|
|
func (e *Encoder) EncodeExemplarsIntoBuffer(exemplars []RefExemplar, buf *encoding.Encbuf) {
|
|
// Store base timestamp and base reference number of first sample.
|
|
// All samples encode their timestamp and ref as delta to those.
|
|
first := exemplars[0]
|
|
|
|
buf.PutBE64(uint64(first.Ref))
|
|
buf.PutBE64int64(first.T)
|
|
|
|
for _, ex := range exemplars {
|
|
buf.PutVarint64(int64(ex.Ref) - int64(first.Ref))
|
|
buf.PutVarint64(ex.T - first.T)
|
|
buf.PutBE64(math.Float64bits(ex.V))
|
|
|
|
buf.PutUvarint(len(ex.Labels))
|
|
for _, l := range ex.Labels {
|
|
buf.PutUvarintStr(l.Name)
|
|
buf.PutUvarintStr(l.Value)
|
|
}
|
|
}
|
|
}
|