2015-03-04 04:40:18 -08:00
|
|
|
// Copyright 2014 The Prometheus Authors
|
|
|
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
// you may not use this file except in compliance with the License.
|
|
|
|
// You may obtain a copy of the License at
|
|
|
|
//
|
|
|
|
// http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
//
|
|
|
|
// Unless required by applicable law or agreed to in writing, software
|
|
|
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
// See the License for the specific language governing permissions and
|
|
|
|
// limitations under the License.
|
|
|
|
|
|
|
|
package local
|
|
|
|
|
|
|
|
import (
|
|
|
|
"encoding/binary"
|
|
|
|
"fmt"
|
|
|
|
"io"
|
|
|
|
"math"
|
|
|
|
"sort"
|
|
|
|
|
2015-08-20 08:18:46 -07:00
|
|
|
"github.com/prometheus/common/model"
|
2015-03-04 04:40:18 -08:00
|
|
|
|
|
|
|
"github.com/prometheus/prometheus/storage/metric"
|
|
|
|
)
|
|
|
|
|
|
|
|
// The 37-byte header of a delta-encoded chunk looks like:
|
|
|
|
//
|
|
|
|
// - used buf bytes: 2 bytes
|
|
|
|
// - time double-delta bytes: 1 bytes
|
|
|
|
// - value double-delta bytes: 1 bytes
|
|
|
|
// - is integer: 1 byte
|
|
|
|
// - base time: 8 bytes
|
|
|
|
// - base value: 8 bytes
|
|
|
|
// - base time delta: 8 bytes
|
|
|
|
// - base value delta: 8 bytes
|
|
|
|
const (
|
|
|
|
doubleDeltaHeaderBytes = 37
|
|
|
|
|
|
|
|
doubleDeltaHeaderBufLenOffset = 0
|
|
|
|
doubleDeltaHeaderTimeBytesOffset = 2
|
|
|
|
doubleDeltaHeaderValueBytesOffset = 3
|
|
|
|
doubleDeltaHeaderIsIntOffset = 4
|
|
|
|
doubleDeltaHeaderBaseTimeOffset = 5
|
|
|
|
doubleDeltaHeaderBaseValueOffset = 13
|
|
|
|
doubleDeltaHeaderBaseTimeDeltaOffset = 21
|
|
|
|
doubleDeltaHeaderBaseValueDeltaOffset = 29
|
|
|
|
)
|
|
|
|
|
|
|
|
// A doubleDeltaEncodedChunk adaptively stores sample timestamps and values with
|
|
|
|
// a double-delta encoding of various types (int, float) and bit widths. A base
|
|
|
|
// value and timestamp and a base delta for each is saved in the header. The
|
|
|
|
// payload consists of double-deltas, i.e. deviations from the values and
|
|
|
|
// timestamps calculated by applying the base value and time and the base deltas.
|
|
|
|
// However, once 8 bytes would be needed to encode a double-delta value, a
|
|
|
|
// fall-back to the absolute numbers happens (so that timestamps are saved
|
|
|
|
// directly as int64 and values as float64).
|
|
|
|
// doubleDeltaEncodedChunk implements the chunk interface.
|
|
|
|
type doubleDeltaEncodedChunk []byte
|
|
|
|
|
|
|
|
// newDoubleDeltaEncodedChunk returns a newly allocated doubleDeltaEncodedChunk.
|
|
|
|
func newDoubleDeltaEncodedChunk(tb, vb deltaBytes, isInt bool, length int) *doubleDeltaEncodedChunk {
|
|
|
|
if tb < 1 {
|
|
|
|
panic("need at least 1 time delta byte")
|
|
|
|
}
|
|
|
|
if length < doubleDeltaHeaderBytes+16 {
|
|
|
|
panic(fmt.Errorf(
|
|
|
|
"chunk length %d bytes is insufficient, need at least %d",
|
|
|
|
length, doubleDeltaHeaderBytes+16,
|
|
|
|
))
|
|
|
|
}
|
|
|
|
c := make(doubleDeltaEncodedChunk, doubleDeltaHeaderIsIntOffset+1, length)
|
|
|
|
|
|
|
|
c[doubleDeltaHeaderTimeBytesOffset] = byte(tb)
|
|
|
|
c[doubleDeltaHeaderValueBytesOffset] = byte(vb)
|
|
|
|
if vb < d8 && isInt { // Only use int for fewer than 8 value double-delta bytes.
|
|
|
|
c[doubleDeltaHeaderIsIntOffset] = 1
|
|
|
|
} else {
|
|
|
|
c[doubleDeltaHeaderIsIntOffset] = 0
|
|
|
|
}
|
|
|
|
return &c
|
|
|
|
}
|
|
|
|
|
|
|
|
// add implements chunk.
|
2015-08-22 05:52:35 -07:00
|
|
|
func (c doubleDeltaEncodedChunk) add(s *model.SamplePair) []chunk {
|
2015-03-06 03:53:00 -08:00
|
|
|
if c.len() == 0 {
|
|
|
|
return c.addFirstSample(s)
|
2015-03-04 04:40:18 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
tb := c.timeBytes()
|
|
|
|
vb := c.valueBytes()
|
|
|
|
|
2015-03-06 03:53:00 -08:00
|
|
|
if c.len() == 1 {
|
|
|
|
return c.addSecondSample(s, tb, vb)
|
2015-03-04 04:40:18 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
remainingBytes := cap(c) - len(c)
|
|
|
|
sampleSize := c.sampleSize()
|
|
|
|
|
|
|
|
// Do we generally have space for another sample in this chunk? If not,
|
|
|
|
// overflow into a new one.
|
|
|
|
if remainingBytes < sampleSize {
|
2015-03-13 07:49:07 -07:00
|
|
|
overflowChunks := newChunk().add(s)
|
2015-03-04 04:40:18 -08:00
|
|
|
return []chunk{&c, overflowChunks[0]}
|
|
|
|
}
|
|
|
|
|
2015-08-20 08:18:46 -07:00
|
|
|
projectedTime := c.baseTime() + model.Time(c.len())*c.baseTimeDelta()
|
2015-03-06 07:03:03 -08:00
|
|
|
ddt := s.Timestamp - projectedTime
|
|
|
|
|
2015-08-20 08:18:46 -07:00
|
|
|
projectedValue := c.baseValue() + model.SampleValue(c.len())*c.baseValueDelta()
|
2015-03-06 07:03:03 -08:00
|
|
|
ddv := s.Value - projectedValue
|
2015-03-04 04:40:18 -08:00
|
|
|
|
2015-03-13 07:49:07 -07:00
|
|
|
ntb, nvb, nInt := tb, vb, c.isInt()
|
2015-03-04 04:40:18 -08:00
|
|
|
// If the new sample is incompatible with the current encoding, reencode the
|
|
|
|
// existing chunk data into new chunk(s).
|
2015-03-06 07:03:03 -08:00
|
|
|
if c.isInt() && !isInt64(ddv) {
|
2015-03-13 07:49:07 -07:00
|
|
|
// int->float.
|
|
|
|
nvb = d4
|
|
|
|
nInt = false
|
2015-08-20 08:18:46 -07:00
|
|
|
} else if !c.isInt() && vb == d4 && projectedValue+model.SampleValue(float32(ddv)) != s.Value {
|
2015-03-13 07:49:07 -07:00
|
|
|
// float32->float64.
|
|
|
|
nvb = d8
|
|
|
|
} else {
|
|
|
|
if tb < d8 {
|
|
|
|
// Maybe more bytes for timestamp.
|
|
|
|
ntb = max(tb, bytesNeededForSignedTimestampDelta(ddt))
|
|
|
|
}
|
|
|
|
if c.isInt() && vb < d8 {
|
|
|
|
// Maybe more bytes for sample value.
|
|
|
|
nvb = max(vb, bytesNeededForIntegerSampleValueDelta(ddv))
|
|
|
|
}
|
2015-03-06 07:03:03 -08:00
|
|
|
}
|
2015-03-13 07:49:07 -07:00
|
|
|
if tb != ntb || vb != nvb || c.isInt() != nInt {
|
|
|
|
if len(c)*2 < cap(c) {
|
|
|
|
return transcodeAndAdd(newDoubleDeltaEncodedChunk(ntb, nvb, nInt, cap(c)), &c, s)
|
|
|
|
}
|
|
|
|
// Chunk is already half full. Better create a new one and save the transcoding efforts.
|
|
|
|
overflowChunks := newChunk().add(s)
|
|
|
|
return []chunk{&c, overflowChunks[0]}
|
2015-03-06 07:03:03 -08:00
|
|
|
}
|
|
|
|
|
2015-03-04 04:40:18 -08:00
|
|
|
offset := len(c)
|
|
|
|
c = c[:offset+sampleSize]
|
|
|
|
|
|
|
|
switch tb {
|
|
|
|
case d1:
|
2015-03-06 07:03:03 -08:00
|
|
|
c[offset] = byte(ddt)
|
2015-03-04 04:40:18 -08:00
|
|
|
case d2:
|
2015-03-06 07:03:03 -08:00
|
|
|
binary.LittleEndian.PutUint16(c[offset:], uint16(ddt))
|
2015-03-04 04:40:18 -08:00
|
|
|
case d4:
|
2015-03-06 07:03:03 -08:00
|
|
|
binary.LittleEndian.PutUint32(c[offset:], uint32(ddt))
|
2015-03-04 04:40:18 -08:00
|
|
|
case d8:
|
|
|
|
// Store the absolute value (no delta) in case of d8.
|
|
|
|
binary.LittleEndian.PutUint64(c[offset:], uint64(s.Timestamp))
|
|
|
|
default:
|
|
|
|
panic("invalid number of bytes for time delta")
|
|
|
|
}
|
|
|
|
|
|
|
|
offset += int(tb)
|
|
|
|
|
|
|
|
if c.isInt() {
|
|
|
|
switch vb {
|
|
|
|
case d0:
|
|
|
|
// No-op. Constant delta is stored as base value.
|
|
|
|
case d1:
|
2015-07-13 02:19:11 -07:00
|
|
|
c[offset] = byte(int8(ddv))
|
2015-03-04 04:40:18 -08:00
|
|
|
case d2:
|
2015-07-13 02:19:11 -07:00
|
|
|
binary.LittleEndian.PutUint16(c[offset:], uint16(int16(ddv)))
|
2015-03-04 04:40:18 -08:00
|
|
|
case d4:
|
2015-07-13 02:19:11 -07:00
|
|
|
binary.LittleEndian.PutUint32(c[offset:], uint32(int32(ddv)))
|
2015-03-04 04:40:18 -08:00
|
|
|
// d8 must not happen. Those samples are encoded as float64.
|
|
|
|
default:
|
|
|
|
panic("invalid number of bytes for integer delta")
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
switch vb {
|
|
|
|
case d4:
|
2015-03-06 07:03:03 -08:00
|
|
|
binary.LittleEndian.PutUint32(c[offset:], math.Float32bits(float32(ddv)))
|
2015-03-04 04:40:18 -08:00
|
|
|
case d8:
|
|
|
|
// Store the absolute value (no delta) in case of d8.
|
|
|
|
binary.LittleEndian.PutUint64(c[offset:], math.Float64bits(float64(s.Value)))
|
|
|
|
default:
|
|
|
|
panic("invalid number of bytes for floating point delta")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return []chunk{&c}
|
|
|
|
}
|
|
|
|
|
2015-03-13 07:49:07 -07:00
|
|
|
// clone implements chunk.
|
|
|
|
func (c doubleDeltaEncodedChunk) clone() chunk {
|
|
|
|
clone := make(doubleDeltaEncodedChunk, len(c), cap(c))
|
|
|
|
copy(clone, c)
|
|
|
|
return &clone
|
|
|
|
}
|
|
|
|
|
|
|
|
// firstTime implements chunk.
|
2015-08-20 08:18:46 -07:00
|
|
|
func (c doubleDeltaEncodedChunk) firstTime() model.Time {
|
2015-03-13 07:49:07 -07:00
|
|
|
return c.baseTime()
|
|
|
|
}
|
|
|
|
|
|
|
|
// newIterator implements chunk.
|
|
|
|
func (c *doubleDeltaEncodedChunk) newIterator() chunkIterator {
|
|
|
|
return &doubleDeltaEncodedChunkIterator{
|
2015-04-14 04:46:38 -07:00
|
|
|
c: *c,
|
|
|
|
len: c.len(),
|
|
|
|
baseT: c.baseTime(),
|
|
|
|
baseΔT: c.baseTimeDelta(),
|
|
|
|
baseV: c.baseValue(),
|
|
|
|
baseΔV: c.baseValueDelta(),
|
|
|
|
tBytes: c.timeBytes(),
|
|
|
|
vBytes: c.valueBytes(),
|
|
|
|
isInt: c.isInt(),
|
2015-03-13 07:49:07 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// marshal implements chunk.
|
|
|
|
func (c doubleDeltaEncodedChunk) marshal(w io.Writer) error {
|
|
|
|
if len(c) > math.MaxUint16 {
|
2016-01-25 07:36:36 -08:00
|
|
|
panic("chunk buffer length would overflow a 16 bit uint")
|
2015-03-13 07:49:07 -07:00
|
|
|
}
|
|
|
|
binary.LittleEndian.PutUint16(c[doubleDeltaHeaderBufLenOffset:], uint16(len(c)))
|
|
|
|
|
|
|
|
n, err := w.Write(c[:cap(c)])
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if n != cap(c) {
|
2016-01-25 07:36:36 -08:00
|
|
|
return fmt.Errorf("wanted to write %d bytes, wrote %d", cap(c), n)
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// marshalToBuf implements chunk.
|
|
|
|
func (c doubleDeltaEncodedChunk) marshalToBuf(buf []byte) error {
|
|
|
|
if len(c) > math.MaxUint16 {
|
|
|
|
panic("chunk buffer length would overflow a 16 bit uint")
|
|
|
|
}
|
|
|
|
binary.LittleEndian.PutUint16(c[doubleDeltaHeaderBufLenOffset:], uint16(len(c)))
|
|
|
|
|
|
|
|
n := copy(buf, c)
|
|
|
|
if n != len(c) {
|
|
|
|
return fmt.Errorf("wanted to copy %d bytes to buffer, copied %d", len(c), n)
|
2015-03-13 07:49:07 -07:00
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// unmarshal implements chunk.
|
|
|
|
func (c *doubleDeltaEncodedChunk) unmarshal(r io.Reader) error {
|
|
|
|
*c = (*c)[:cap(*c)]
|
2015-04-13 11:20:26 -07:00
|
|
|
if _, err := io.ReadFull(r, *c); err != nil {
|
|
|
|
return err
|
2015-03-13 07:49:07 -07:00
|
|
|
}
|
|
|
|
*c = (*c)[:binary.LittleEndian.Uint16((*c)[doubleDeltaHeaderBufLenOffset:])]
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2015-04-13 11:20:26 -07:00
|
|
|
// unmarshalFromBuf implements chunk.
|
|
|
|
func (c *doubleDeltaEncodedChunk) unmarshalFromBuf(buf []byte) {
|
|
|
|
*c = (*c)[:cap(*c)]
|
|
|
|
copy(*c, buf)
|
|
|
|
*c = (*c)[:binary.LittleEndian.Uint16((*c)[doubleDeltaHeaderBufLenOffset:])]
|
|
|
|
}
|
|
|
|
|
2015-03-13 07:49:07 -07:00
|
|
|
// encoding implements chunk.
|
|
|
|
func (c doubleDeltaEncodedChunk) encoding() chunkEncoding { return doubleDelta }
|
|
|
|
|
2015-08-20 08:18:46 -07:00
|
|
|
func (c doubleDeltaEncodedChunk) baseTime() model.Time {
|
|
|
|
return model.Time(
|
2015-03-13 07:49:07 -07:00
|
|
|
binary.LittleEndian.Uint64(
|
|
|
|
c[doubleDeltaHeaderBaseTimeOffset:],
|
|
|
|
),
|
|
|
|
)
|
|
|
|
}
|
|
|
|
|
2015-08-20 08:18:46 -07:00
|
|
|
func (c doubleDeltaEncodedChunk) baseValue() model.SampleValue {
|
|
|
|
return model.SampleValue(
|
2015-03-13 07:49:07 -07:00
|
|
|
math.Float64frombits(
|
|
|
|
binary.LittleEndian.Uint64(
|
|
|
|
c[doubleDeltaHeaderBaseValueOffset:],
|
|
|
|
),
|
|
|
|
),
|
|
|
|
)
|
|
|
|
}
|
|
|
|
|
2015-08-20 08:18:46 -07:00
|
|
|
func (c doubleDeltaEncodedChunk) baseTimeDelta() model.Time {
|
2015-04-14 04:46:38 -07:00
|
|
|
if len(c) < doubleDeltaHeaderBaseTimeDeltaOffset+8 {
|
|
|
|
return 0
|
|
|
|
}
|
2015-08-20 08:18:46 -07:00
|
|
|
return model.Time(
|
2015-03-13 07:49:07 -07:00
|
|
|
binary.LittleEndian.Uint64(
|
|
|
|
c[doubleDeltaHeaderBaseTimeDeltaOffset:],
|
|
|
|
),
|
|
|
|
)
|
|
|
|
}
|
|
|
|
|
2015-08-20 08:18:46 -07:00
|
|
|
func (c doubleDeltaEncodedChunk) baseValueDelta() model.SampleValue {
|
2015-04-14 04:46:38 -07:00
|
|
|
if len(c) < doubleDeltaHeaderBaseValueDeltaOffset+8 {
|
|
|
|
return 0
|
|
|
|
}
|
2015-08-20 08:18:46 -07:00
|
|
|
return model.SampleValue(
|
2015-03-13 07:49:07 -07:00
|
|
|
math.Float64frombits(
|
|
|
|
binary.LittleEndian.Uint64(
|
|
|
|
c[doubleDeltaHeaderBaseValueDeltaOffset:],
|
|
|
|
),
|
|
|
|
),
|
|
|
|
)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c doubleDeltaEncodedChunk) timeBytes() deltaBytes {
|
|
|
|
return deltaBytes(c[doubleDeltaHeaderTimeBytesOffset])
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c doubleDeltaEncodedChunk) valueBytes() deltaBytes {
|
|
|
|
return deltaBytes(c[doubleDeltaHeaderValueBytesOffset])
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c doubleDeltaEncodedChunk) sampleSize() int {
|
|
|
|
return int(c.timeBytes() + c.valueBytes())
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c doubleDeltaEncodedChunk) len() int {
|
|
|
|
if len(c) <= doubleDeltaHeaderIsIntOffset+1 {
|
|
|
|
return 0
|
|
|
|
}
|
|
|
|
if len(c) <= doubleDeltaHeaderBaseValueOffset+8 {
|
|
|
|
return 1
|
|
|
|
}
|
|
|
|
return (len(c)-doubleDeltaHeaderBytes)/c.sampleSize() + 2
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c doubleDeltaEncodedChunk) isInt() bool {
|
|
|
|
return c[doubleDeltaHeaderIsIntOffset] == 1
|
|
|
|
}
|
|
|
|
|
2015-03-06 03:53:00 -08:00
|
|
|
// addFirstSample is a helper method only used by c.add(). It adds timestamp and
|
|
|
|
// value as base time and value.
|
2015-08-22 05:52:35 -07:00
|
|
|
func (c doubleDeltaEncodedChunk) addFirstSample(s *model.SamplePair) []chunk {
|
2015-03-06 03:53:00 -08:00
|
|
|
c = c[:doubleDeltaHeaderBaseValueOffset+8]
|
|
|
|
binary.LittleEndian.PutUint64(
|
|
|
|
c[doubleDeltaHeaderBaseTimeOffset:],
|
|
|
|
uint64(s.Timestamp),
|
|
|
|
)
|
|
|
|
binary.LittleEndian.PutUint64(
|
|
|
|
c[doubleDeltaHeaderBaseValueOffset:],
|
|
|
|
math.Float64bits(float64(s.Value)),
|
|
|
|
)
|
|
|
|
return []chunk{&c}
|
|
|
|
}
|
|
|
|
|
|
|
|
// addSecondSample is a helper method only used by c.add(). It calculates the
|
|
|
|
// base delta from the provided sample and adds it to the chunk.
|
2015-08-22 05:52:35 -07:00
|
|
|
func (c doubleDeltaEncodedChunk) addSecondSample(s *model.SamplePair, tb, vb deltaBytes) []chunk {
|
2015-03-06 03:53:00 -08:00
|
|
|
baseTimeDelta := s.Timestamp - c.baseTime()
|
|
|
|
if baseTimeDelta < 0 {
|
2015-07-13 12:12:27 -07:00
|
|
|
panic("base time delta is less than zero")
|
2015-03-06 03:53:00 -08:00
|
|
|
}
|
|
|
|
c = c[:doubleDeltaHeaderBytes]
|
|
|
|
if tb >= d8 || bytesNeededForUnsignedTimestampDelta(baseTimeDelta) >= d8 {
|
|
|
|
// If already the base delta needs d8 (or we are at d8
|
|
|
|
// already, anyway), we better encode this timestamp
|
|
|
|
// directly rather than as a delta and switch everything
|
|
|
|
// to d8.
|
|
|
|
c[doubleDeltaHeaderTimeBytesOffset] = byte(d8)
|
|
|
|
binary.LittleEndian.PutUint64(
|
|
|
|
c[doubleDeltaHeaderBaseTimeDeltaOffset:],
|
|
|
|
uint64(s.Timestamp),
|
|
|
|
)
|
|
|
|
} else {
|
|
|
|
binary.LittleEndian.PutUint64(
|
|
|
|
c[doubleDeltaHeaderBaseTimeDeltaOffset:],
|
|
|
|
uint64(baseTimeDelta),
|
|
|
|
)
|
|
|
|
}
|
|
|
|
baseValue := c.baseValue()
|
|
|
|
baseValueDelta := s.Value - baseValue
|
|
|
|
if vb >= d8 || baseValue+baseValueDelta != s.Value {
|
|
|
|
// If we can't reproduce the original sample value (or
|
|
|
|
// if we are at d8 already, anyway), we better encode
|
|
|
|
// this value directly rather than as a delta and switch
|
|
|
|
// everything to d8.
|
|
|
|
c[doubleDeltaHeaderValueBytesOffset] = byte(d8)
|
|
|
|
c[doubleDeltaHeaderIsIntOffset] = 0
|
|
|
|
binary.LittleEndian.PutUint64(
|
|
|
|
c[doubleDeltaHeaderBaseValueDeltaOffset:],
|
|
|
|
math.Float64bits(float64(s.Value)),
|
|
|
|
)
|
|
|
|
} else {
|
|
|
|
binary.LittleEndian.PutUint64(
|
|
|
|
c[doubleDeltaHeaderBaseValueDeltaOffset:],
|
|
|
|
math.Float64bits(float64(baseValueDelta)),
|
|
|
|
)
|
|
|
|
}
|
|
|
|
return []chunk{&c}
|
|
|
|
}
|
|
|
|
|
2015-03-04 04:40:18 -08:00
|
|
|
// doubleDeltaEncodedChunkIterator implements chunkIterator.
|
|
|
|
type doubleDeltaEncodedChunkIterator struct {
|
2015-04-14 04:46:38 -07:00
|
|
|
c doubleDeltaEncodedChunk
|
|
|
|
len int
|
2015-08-20 08:18:46 -07:00
|
|
|
baseT, baseΔT model.Time
|
|
|
|
baseV, baseΔV model.SampleValue
|
2015-04-14 04:46:38 -07:00
|
|
|
tBytes, vBytes deltaBytes
|
|
|
|
isInt bool
|
2015-03-04 04:40:18 -08:00
|
|
|
}
|
|
|
|
|
2015-04-14 04:46:38 -07:00
|
|
|
// length implements chunkIterator.
|
|
|
|
func (it *doubleDeltaEncodedChunkIterator) length() int { return it.len }
|
|
|
|
|
Streamline series iterator creation
This will fix issue #1035 and will also help to make issue #1264 less
bad.
The fundamental problem in the current code:
In the preload phase, we quite accurately determine which chunks will
be used for the query being executed. However, in the subsequent step
of creating series iterators, the created iterators are referencing
_all_ in-memory chunks in their series, even the un-pinned ones. In
iterator creation, we copy a pointer to each in-memory chunk of a
series into the iterator. While this creates a certain amount of
allocation churn, the worst thing about it is that copying the chunk
pointer out of the chunkDesc requires a mutex acquisition. (Remember
that the iterator will also reference un-pinned chunks, so we need to
acquire the mutex to protect against concurrent eviction.) The worst
case happens if a series doesn't even contain any relevant samples for
the query time range. We notice that during preloading but then we
will still create a series iterator for it. But even for series that
do contain relevant samples, the overhead is quite bad for instant
queries that retrieve a single sample from each series, but still go
through all the effort of series iterator creation. All of that is
particularly bad if a series has many in-memory chunks.
This commit addresses the problem from two sides:
First, it merges preloading and iterator creation into one step,
i.e. the preload call returns an iterator for exactly the preloaded
chunks.
Second, the required mutex acquisition in chunkDesc has been greatly
reduced. That was enabled by a side effect of the first step, which is
that the iterator is only referencing pinned chunks, so there is no
risk of concurrent eviction anymore, and chunks can be accessed
without mutex acquisition.
To simplify the code changes for the above, the long-planned change of
ValueAtTime to ValueAtOrBefore time was performed at the same
time. (It should have been done first, but it kind of accidentally
happened while I was in the middle of writing the series iterator
changes. Sorry for that.) So far, we actively filtered the up to two
values that were returned by ValueAtTime, i.e. we invested work to
retrieve up to two values, and then we invested more work to throw one
of them away.
The SeriesIterator.BoundaryValues method can be removed once #1401 is
fixed. But I really didn't want to load even more changes into this
PR.
Benchmarks:
The BenchmarkFuzz.* benchmarks run 83% faster (i.e. about six times
faster) and allocate 95% fewer bytes. The reason for that is that the
benchmark reads one sample after another from the time series and
creates a new series iterator for each sample read.
To find out how much these improvements matter in practice, I have
mirrored a beefy Prometheus server at SoundCloud that suffers from
both issues #1035 and #1264. To reach steady state that would be
comparable, the server needs to run for 15d. So far, it has run for
1d. The test server currently has only half as many memory time series
and 60% of the memory chunks the main server has. The 90th percentile
rule evaluation cycle time is ~11s on the main server and only ~3s on
the test server. However, these numbers might get much closer over
time.
In addition to performance improvements, this commit removes about 150
LOC.
2016-02-16 09:47:50 -08:00
|
|
|
// valueAtOrBeforeTime implements chunkIterator.
|
|
|
|
func (it *doubleDeltaEncodedChunkIterator) valueAtOrBeforeTime(t model.Time) model.SamplePair {
|
2015-04-14 04:46:38 -07:00
|
|
|
i := sort.Search(it.len, func(i int) bool {
|
Streamline series iterator creation
This will fix issue #1035 and will also help to make issue #1264 less
bad.
The fundamental problem in the current code:
In the preload phase, we quite accurately determine which chunks will
be used for the query being executed. However, in the subsequent step
of creating series iterators, the created iterators are referencing
_all_ in-memory chunks in their series, even the un-pinned ones. In
iterator creation, we copy a pointer to each in-memory chunk of a
series into the iterator. While this creates a certain amount of
allocation churn, the worst thing about it is that copying the chunk
pointer out of the chunkDesc requires a mutex acquisition. (Remember
that the iterator will also reference un-pinned chunks, so we need to
acquire the mutex to protect against concurrent eviction.) The worst
case happens if a series doesn't even contain any relevant samples for
the query time range. We notice that during preloading but then we
will still create a series iterator for it. But even for series that
do contain relevant samples, the overhead is quite bad for instant
queries that retrieve a single sample from each series, but still go
through all the effort of series iterator creation. All of that is
particularly bad if a series has many in-memory chunks.
This commit addresses the problem from two sides:
First, it merges preloading and iterator creation into one step,
i.e. the preload call returns an iterator for exactly the preloaded
chunks.
Second, the required mutex acquisition in chunkDesc has been greatly
reduced. That was enabled by a side effect of the first step, which is
that the iterator is only referencing pinned chunks, so there is no
risk of concurrent eviction anymore, and chunks can be accessed
without mutex acquisition.
To simplify the code changes for the above, the long-planned change of
ValueAtTime to ValueAtOrBefore time was performed at the same
time. (It should have been done first, but it kind of accidentally
happened while I was in the middle of writing the series iterator
changes. Sorry for that.) So far, we actively filtered the up to two
values that were returned by ValueAtTime, i.e. we invested work to
retrieve up to two values, and then we invested more work to throw one
of them away.
The SeriesIterator.BoundaryValues method can be removed once #1401 is
fixed. But I really didn't want to load even more changes into this
PR.
Benchmarks:
The BenchmarkFuzz.* benchmarks run 83% faster (i.e. about six times
faster) and allocate 95% fewer bytes. The reason for that is that the
benchmark reads one sample after another from the time series and
creates a new series iterator for each sample read.
To find out how much these improvements matter in practice, I have
mirrored a beefy Prometheus server at SoundCloud that suffers from
both issues #1035 and #1264. To reach steady state that would be
comparable, the server needs to run for 15d. So far, it has run for
1d. The test server currently has only half as many memory time series
and 60% of the memory chunks the main server has. The 90th percentile
rule evaluation cycle time is ~11s on the main server and only ~3s on
the test server. However, these numbers might get much closer over
time.
In addition to performance improvements, this commit removes about 150
LOC.
2016-02-16 09:47:50 -08:00
|
|
|
return it.timestampAtIndex(i).After(t)
|
2015-03-04 04:40:18 -08:00
|
|
|
})
|
Streamline series iterator creation
This will fix issue #1035 and will also help to make issue #1264 less
bad.
The fundamental problem in the current code:
In the preload phase, we quite accurately determine which chunks will
be used for the query being executed. However, in the subsequent step
of creating series iterators, the created iterators are referencing
_all_ in-memory chunks in their series, even the un-pinned ones. In
iterator creation, we copy a pointer to each in-memory chunk of a
series into the iterator. While this creates a certain amount of
allocation churn, the worst thing about it is that copying the chunk
pointer out of the chunkDesc requires a mutex acquisition. (Remember
that the iterator will also reference un-pinned chunks, so we need to
acquire the mutex to protect against concurrent eviction.) The worst
case happens if a series doesn't even contain any relevant samples for
the query time range. We notice that during preloading but then we
will still create a series iterator for it. But even for series that
do contain relevant samples, the overhead is quite bad for instant
queries that retrieve a single sample from each series, but still go
through all the effort of series iterator creation. All of that is
particularly bad if a series has many in-memory chunks.
This commit addresses the problem from two sides:
First, it merges preloading and iterator creation into one step,
i.e. the preload call returns an iterator for exactly the preloaded
chunks.
Second, the required mutex acquisition in chunkDesc has been greatly
reduced. That was enabled by a side effect of the first step, which is
that the iterator is only referencing pinned chunks, so there is no
risk of concurrent eviction anymore, and chunks can be accessed
without mutex acquisition.
To simplify the code changes for the above, the long-planned change of
ValueAtTime to ValueAtOrBefore time was performed at the same
time. (It should have been done first, but it kind of accidentally
happened while I was in the middle of writing the series iterator
changes. Sorry for that.) So far, we actively filtered the up to two
values that were returned by ValueAtTime, i.e. we invested work to
retrieve up to two values, and then we invested more work to throw one
of them away.
The SeriesIterator.BoundaryValues method can be removed once #1401 is
fixed. But I really didn't want to load even more changes into this
PR.
Benchmarks:
The BenchmarkFuzz.* benchmarks run 83% faster (i.e. about six times
faster) and allocate 95% fewer bytes. The reason for that is that the
benchmark reads one sample after another from the time series and
creates a new series iterator for each sample read.
To find out how much these improvements matter in practice, I have
mirrored a beefy Prometheus server at SoundCloud that suffers from
both issues #1035 and #1264. To reach steady state that would be
comparable, the server needs to run for 15d. So far, it has run for
1d. The test server currently has only half as many memory time series
and 60% of the memory chunks the main server has. The 90th percentile
rule evaluation cycle time is ~11s on the main server and only ~3s on
the test server. However, these numbers might get much closer over
time.
In addition to performance improvements, this commit removes about 150
LOC.
2016-02-16 09:47:50 -08:00
|
|
|
if i == 0 {
|
2016-02-19 07:46:11 -08:00
|
|
|
return ZeroSamplePair
|
Streamline series iterator creation
This will fix issue #1035 and will also help to make issue #1264 less
bad.
The fundamental problem in the current code:
In the preload phase, we quite accurately determine which chunks will
be used for the query being executed. However, in the subsequent step
of creating series iterators, the created iterators are referencing
_all_ in-memory chunks in their series, even the un-pinned ones. In
iterator creation, we copy a pointer to each in-memory chunk of a
series into the iterator. While this creates a certain amount of
allocation churn, the worst thing about it is that copying the chunk
pointer out of the chunkDesc requires a mutex acquisition. (Remember
that the iterator will also reference un-pinned chunks, so we need to
acquire the mutex to protect against concurrent eviction.) The worst
case happens if a series doesn't even contain any relevant samples for
the query time range. We notice that during preloading but then we
will still create a series iterator for it. But even for series that
do contain relevant samples, the overhead is quite bad for instant
queries that retrieve a single sample from each series, but still go
through all the effort of series iterator creation. All of that is
particularly bad if a series has many in-memory chunks.
This commit addresses the problem from two sides:
First, it merges preloading and iterator creation into one step,
i.e. the preload call returns an iterator for exactly the preloaded
chunks.
Second, the required mutex acquisition in chunkDesc has been greatly
reduced. That was enabled by a side effect of the first step, which is
that the iterator is only referencing pinned chunks, so there is no
risk of concurrent eviction anymore, and chunks can be accessed
without mutex acquisition.
To simplify the code changes for the above, the long-planned change of
ValueAtTime to ValueAtOrBefore time was performed at the same
time. (It should have been done first, but it kind of accidentally
happened while I was in the middle of writing the series iterator
changes. Sorry for that.) So far, we actively filtered the up to two
values that were returned by ValueAtTime, i.e. we invested work to
retrieve up to two values, and then we invested more work to throw one
of them away.
The SeriesIterator.BoundaryValues method can be removed once #1401 is
fixed. But I really didn't want to load even more changes into this
PR.
Benchmarks:
The BenchmarkFuzz.* benchmarks run 83% faster (i.e. about six times
faster) and allocate 95% fewer bytes. The reason for that is that the
benchmark reads one sample after another from the time series and
creates a new series iterator for each sample read.
To find out how much these improvements matter in practice, I have
mirrored a beefy Prometheus server at SoundCloud that suffers from
both issues #1035 and #1264. To reach steady state that would be
comparable, the server needs to run for 15d. So far, it has run for
1d. The test server currently has only half as many memory time series
and 60% of the memory chunks the main server has. The 90th percentile
rule evaluation cycle time is ~11s on the main server and only ~3s on
the test server. However, these numbers might get much closer over
time.
In addition to performance improvements, this commit removes about 150
LOC.
2016-02-16 09:47:50 -08:00
|
|
|
}
|
|
|
|
return model.SamplePair{
|
|
|
|
Timestamp: it.timestampAtIndex(i - 1),
|
|
|
|
Value: it.sampleValueAtIndex(i - 1),
|
2015-03-04 04:40:18 -08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-05-20 10:13:06 -07:00
|
|
|
// rangeValues implements chunkIterator.
|
2015-08-22 05:52:35 -07:00
|
|
|
func (it *doubleDeltaEncodedChunkIterator) rangeValues(in metric.Interval) []model.SamplePair {
|
2015-04-14 04:46:38 -07:00
|
|
|
oldest := sort.Search(it.len, func(i int) bool {
|
2015-05-20 10:13:06 -07:00
|
|
|
return !it.timestampAtIndex(i).Before(in.OldestInclusive)
|
2015-03-04 04:40:18 -08:00
|
|
|
})
|
|
|
|
|
2015-04-14 04:46:38 -07:00
|
|
|
newest := sort.Search(it.len, func(i int) bool {
|
2015-05-20 10:13:06 -07:00
|
|
|
return it.timestampAtIndex(i).After(in.NewestInclusive)
|
2015-03-04 04:40:18 -08:00
|
|
|
})
|
|
|
|
|
2015-04-14 04:46:38 -07:00
|
|
|
if oldest == it.len {
|
2015-03-04 04:40:18 -08:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2015-08-22 05:52:35 -07:00
|
|
|
result := make([]model.SamplePair, 0, newest-oldest)
|
2015-03-04 04:40:18 -08:00
|
|
|
for i := oldest; i < newest; i++ {
|
2015-08-22 05:52:35 -07:00
|
|
|
result = append(result, model.SamplePair{
|
2015-05-20 10:13:06 -07:00
|
|
|
Timestamp: it.timestampAtIndex(i),
|
|
|
|
Value: it.sampleValueAtIndex(i),
|
2015-04-14 04:46:38 -07:00
|
|
|
})
|
2015-03-04 04:40:18 -08:00
|
|
|
}
|
|
|
|
return result
|
|
|
|
}
|
|
|
|
|
|
|
|
// contains implements chunkIterator.
|
2015-08-20 08:18:46 -07:00
|
|
|
func (it *doubleDeltaEncodedChunkIterator) contains(t model.Time) bool {
|
2015-05-20 10:13:06 -07:00
|
|
|
return !t.Before(it.baseT) && !t.After(it.timestampAtIndex(it.len-1))
|
2015-04-14 04:46:38 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
// values implements chunkIterator.
|
2015-08-22 05:52:35 -07:00
|
|
|
func (it *doubleDeltaEncodedChunkIterator) values() <-chan *model.SamplePair {
|
|
|
|
valuesChan := make(chan *model.SamplePair)
|
2015-04-14 04:46:38 -07:00
|
|
|
go func() {
|
|
|
|
for i := 0; i < it.len; i++ {
|
2015-08-22 05:52:35 -07:00
|
|
|
valuesChan <- &model.SamplePair{
|
2015-05-20 10:13:06 -07:00
|
|
|
Timestamp: it.timestampAtIndex(i),
|
|
|
|
Value: it.sampleValueAtIndex(i),
|
2015-04-14 04:46:38 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
close(valuesChan)
|
|
|
|
}()
|
|
|
|
return valuesChan
|
|
|
|
}
|
|
|
|
|
2015-05-20 10:13:06 -07:00
|
|
|
// timestampAtIndex implements chunkIterator.
|
2015-08-20 08:18:46 -07:00
|
|
|
func (it *doubleDeltaEncodedChunkIterator) timestampAtIndex(idx int) model.Time {
|
2015-04-14 04:46:38 -07:00
|
|
|
if idx == 0 {
|
|
|
|
return it.baseT
|
|
|
|
}
|
|
|
|
if idx == 1 {
|
|
|
|
// If time bytes are at d8, the time is saved directly rather
|
|
|
|
// than as a difference.
|
|
|
|
if it.tBytes == d8 {
|
|
|
|
return it.baseΔT
|
|
|
|
}
|
|
|
|
return it.baseT + it.baseΔT
|
|
|
|
}
|
|
|
|
|
|
|
|
offset := doubleDeltaHeaderBytes + (idx-2)*int(it.tBytes+it.vBytes)
|
|
|
|
|
|
|
|
switch it.tBytes {
|
|
|
|
case d1:
|
|
|
|
return it.baseT +
|
2015-08-20 08:18:46 -07:00
|
|
|
model.Time(idx)*it.baseΔT +
|
|
|
|
model.Time(int8(it.c[offset]))
|
2015-04-14 04:46:38 -07:00
|
|
|
case d2:
|
|
|
|
return it.baseT +
|
2015-08-20 08:18:46 -07:00
|
|
|
model.Time(idx)*it.baseΔT +
|
|
|
|
model.Time(int16(binary.LittleEndian.Uint16(it.c[offset:])))
|
2015-04-14 04:46:38 -07:00
|
|
|
case d4:
|
|
|
|
return it.baseT +
|
2015-08-20 08:18:46 -07:00
|
|
|
model.Time(idx)*it.baseΔT +
|
|
|
|
model.Time(int32(binary.LittleEndian.Uint32(it.c[offset:])))
|
2015-04-14 04:46:38 -07:00
|
|
|
case d8:
|
|
|
|
// Take absolute value for d8.
|
2015-08-20 08:18:46 -07:00
|
|
|
return model.Time(binary.LittleEndian.Uint64(it.c[offset:]))
|
2015-04-14 04:46:38 -07:00
|
|
|
default:
|
2015-07-13 12:12:27 -07:00
|
|
|
panic("invalid number of bytes for time delta")
|
2015-04-14 04:46:38 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-05-20 10:13:06 -07:00
|
|
|
// lastTimestamp implements chunkIterator.
|
2015-08-20 08:18:46 -07:00
|
|
|
func (it *doubleDeltaEncodedChunkIterator) lastTimestamp() model.Time {
|
2015-05-20 10:13:06 -07:00
|
|
|
return it.timestampAtIndex(it.len - 1)
|
2015-04-14 04:46:38 -07:00
|
|
|
}
|
|
|
|
|
2015-05-20 10:13:06 -07:00
|
|
|
// sampleValueAtIndex implements chunkIterator.
|
2015-08-20 08:18:46 -07:00
|
|
|
func (it *doubleDeltaEncodedChunkIterator) sampleValueAtIndex(idx int) model.SampleValue {
|
2015-04-14 04:46:38 -07:00
|
|
|
if idx == 0 {
|
|
|
|
return it.baseV
|
|
|
|
}
|
|
|
|
if idx == 1 {
|
|
|
|
// If value bytes are at d8, the value is saved directly rather
|
|
|
|
// than as a difference.
|
|
|
|
if it.vBytes == d8 {
|
|
|
|
return it.baseΔV
|
|
|
|
}
|
|
|
|
return it.baseV + it.baseΔV
|
|
|
|
}
|
|
|
|
|
|
|
|
offset := doubleDeltaHeaderBytes + (idx-2)*int(it.tBytes+it.vBytes) + int(it.tBytes)
|
|
|
|
|
|
|
|
if it.isInt {
|
|
|
|
switch it.vBytes {
|
|
|
|
case d0:
|
|
|
|
return it.baseV +
|
2015-08-20 08:18:46 -07:00
|
|
|
model.SampleValue(idx)*it.baseΔV
|
2015-04-14 04:46:38 -07:00
|
|
|
case d1:
|
|
|
|
return it.baseV +
|
2015-08-20 08:18:46 -07:00
|
|
|
model.SampleValue(idx)*it.baseΔV +
|
|
|
|
model.SampleValue(int8(it.c[offset]))
|
2015-04-14 04:46:38 -07:00
|
|
|
case d2:
|
|
|
|
return it.baseV +
|
2015-08-20 08:18:46 -07:00
|
|
|
model.SampleValue(idx)*it.baseΔV +
|
|
|
|
model.SampleValue(int16(binary.LittleEndian.Uint16(it.c[offset:])))
|
2015-04-14 04:46:38 -07:00
|
|
|
case d4:
|
|
|
|
return it.baseV +
|
2015-08-20 08:18:46 -07:00
|
|
|
model.SampleValue(idx)*it.baseΔV +
|
|
|
|
model.SampleValue(int32(binary.LittleEndian.Uint32(it.c[offset:])))
|
2015-04-14 04:46:38 -07:00
|
|
|
// No d8 for ints.
|
|
|
|
default:
|
2015-07-13 12:12:27 -07:00
|
|
|
panic("invalid number of bytes for integer delta")
|
2015-04-14 04:46:38 -07:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
switch it.vBytes {
|
|
|
|
case d4:
|
|
|
|
return it.baseV +
|
2015-08-20 08:18:46 -07:00
|
|
|
model.SampleValue(idx)*it.baseΔV +
|
|
|
|
model.SampleValue(math.Float32frombits(binary.LittleEndian.Uint32(it.c[offset:])))
|
2015-04-14 04:46:38 -07:00
|
|
|
case d8:
|
|
|
|
// Take absolute value for d8.
|
2015-08-20 08:18:46 -07:00
|
|
|
return model.SampleValue(math.Float64frombits(binary.LittleEndian.Uint64(it.c[offset:])))
|
2015-04-14 04:46:38 -07:00
|
|
|
default:
|
2015-07-13 12:12:27 -07:00
|
|
|
panic("invalid number of bytes for floating point delta")
|
2015-04-14 04:46:38 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-05-20 10:13:06 -07:00
|
|
|
// lastSampleValue implements chunkIterator.
|
2015-08-20 08:18:46 -07:00
|
|
|
func (it *doubleDeltaEncodedChunkIterator) lastSampleValue() model.SampleValue {
|
2015-05-20 10:13:06 -07:00
|
|
|
return it.sampleValueAtIndex(it.len - 1)
|
2015-03-04 04:40:18 -08:00
|
|
|
}
|