Append to chunks cannot error

This commit is contained in:
Fabian Reinartz 2016-12-31 10:10:27 +01:00
parent 7280533c42
commit 675f0886f0
5 changed files with 40 additions and 89 deletions

View file

@ -2,7 +2,6 @@ package chunks
import ( import (
"encoding/binary" "encoding/binary"
"errors"
"fmt" "fmt"
) )
@ -25,12 +24,6 @@ const (
EncXOR EncXOR
) )
var (
// ErrChunkFull is returned if the remaining size of a chunk cannot
// fit the appended data.
ErrChunkFull = errors.New("chunk full")
)
// Chunk holds a sequence of sample pairs that can be iterated over and appended to. // Chunk holds a sequence of sample pairs that can be iterated over and appended to.
type Chunk interface { type Chunk interface {
Bytes() []byte Bytes() []byte
@ -51,31 +44,14 @@ func FromData(e Encoding, d []byte) (Chunk, error) {
return nil, fmt.Errorf("unknown chunk encoding: %d", e) return nil, fmt.Errorf("unknown chunk encoding: %d", e)
} }
// Iterator provides iterating access over sample pairs in chunks.
type Iterator interface {
StreamingIterator
// Seek(t int64) bool
// SeekBefore(t int64) bool
// Next() bool
// Values() (int64, float64)
// Err() error
}
// Appender adds sample pairs to a chunk. // Appender adds sample pairs to a chunk.
type Appender interface { type Appender interface {
Append(int64, float64) error Append(int64, float64)
} }
// StreamingIterator is a simple iterator that can only get the next value. // Iterator is a simple iterator that can only get the next value.
type StreamingIterator interface { type Iterator interface {
Values() (int64, float64) Values() (int64, float64)
Err() error Err() error
Next() bool Next() bool
} }
// fancyIterator wraps a StreamingIterator and implements a regular
// Iterator with it.
type fancyIterator struct {
StreamingIterator
}

View file

@ -16,12 +16,12 @@ type pair struct {
} }
func TestChunk(t *testing.T) { func TestChunk(t *testing.T) {
for enc, nc := range map[Encoding]func(sz int) Chunk{ for enc, nc := range map[Encoding]func() Chunk{
EncXOR: func(sz int) Chunk { return NewXORChunk(sz) }, EncXOR: func() Chunk { return NewXORChunk() },
} { } {
t.Run(fmt.Sprintf("%s", enc), func(t *testing.T) { t.Run(fmt.Sprintf("%s", enc), func(t *testing.T) {
for range make([]struct{}, 1) { for range make([]struct{}, 1) {
c := nc(rand.Intn(1024)) c := nc()
if err := testChunk(c); err != nil { if err := testChunk(c); err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -59,13 +59,7 @@ func testChunk(c Chunk) error {
} }
} }
err = app.Append(ts, v) app.Append(ts, v)
if err != nil {
if err == ErrChunkFull {
break
}
return err
}
exp = append(exp, pair{t: ts, v: v}) exp = append(exp, pair{t: ts, v: v})
// fmt.Println("appended", len(c.Bytes()), c.Bytes()) // fmt.Println("appended", len(c.Bytes()), c.Bytes())
} }
@ -85,7 +79,7 @@ func testChunk(c Chunk) error {
return nil return nil
} }
func benchmarkIterator(b *testing.B, newChunk func(int) Chunk) { func benchmarkIterator(b *testing.B, newChunk func() Chunk) {
var ( var (
t = int64(1234123324) t = int64(1234123324)
v = 1243535.123 v = 1243535.123
@ -101,19 +95,20 @@ func benchmarkIterator(b *testing.B, newChunk func(int) Chunk) {
var chunks []Chunk var chunks []Chunk
for i := 0; i < b.N; { for i := 0; i < b.N; {
c := newChunk(1024) c := newChunk()
a, err := c.Appender() a, err := c.Appender()
if err != nil { if err != nil {
b.Fatalf("get appender: %s", err) b.Fatalf("get appender: %s", err)
} }
j := 0
for _, p := range exp { for _, p := range exp {
if err := a.Append(p.t, p.v); err == ErrChunkFull { if j > 250 {
break break
} else if err != nil {
b.Fatal(err)
} }
a.Append(p.t, p.v)
i++ i++
j++
} }
chunks = append(chunks, c) chunks = append(chunks, c)
} }
@ -141,18 +136,18 @@ func benchmarkIterator(b *testing.B, newChunk func(int) Chunk) {
} }
func BenchmarkXORIterator(b *testing.B) { func BenchmarkXORIterator(b *testing.B) {
benchmarkIterator(b, func(sz int) Chunk { benchmarkIterator(b, func() Chunk {
return NewXORChunk(sz) return NewXORChunk()
}) })
} }
func BenchmarkXORAppender(b *testing.B) { func BenchmarkXORAppender(b *testing.B) {
benchmarkAppender(b, func(sz int) Chunk { benchmarkAppender(b, func() Chunk {
return NewXORChunk(sz) return NewXORChunk()
}) })
} }
func benchmarkAppender(b *testing.B, newChunk func(int) Chunk) { func benchmarkAppender(b *testing.B, newChunk func() Chunk) {
var ( var (
t = int64(1234123324) t = int64(1234123324)
v = 1243535.123 v = 1243535.123
@ -171,19 +166,20 @@ func benchmarkAppender(b *testing.B, newChunk func(int) Chunk) {
var chunks []Chunk var chunks []Chunk
for i := 0; i < b.N; { for i := 0; i < b.N; {
c := newChunk(1024) c := newChunk()
a, err := c.Appender() a, err := c.Appender()
if err != nil { if err != nil {
b.Fatalf("get appender: %s", err) b.Fatalf("get appender: %s", err)
} }
j := 0
for _, p := range exp { for _, p := range exp {
if err := a.Append(p.t, p.v); err == ErrChunkFull { if j > 250 {
break break
} else if err != nil {
b.Fatal(err)
} }
a.Append(p.t, p.v)
i++ i++
j++
} }
chunks = append(chunks, c) chunks = append(chunks, c)
} }

View file

@ -11,16 +11,14 @@ import (
type XORChunk struct { type XORChunk struct {
b *bstream b *bstream
num uint16 num uint16
sz int
} }
// NewXORChunk returns a new chunk with XOR encoding of the given size. // NewXORChunk returns a new chunk with XOR encoding of the given size.
func NewXORChunk(size int) *XORChunk { func NewXORChunk() *XORChunk {
b := make([]byte, 2, 128) b := make([]byte, 2, 128)
return &XORChunk{ return &XORChunk{
b: &bstream{stream: b, count: 0}, b: &bstream{stream: b, count: 0},
sz: size,
num: 0, num: 0,
} }
} }
@ -78,7 +76,7 @@ func (c *XORChunk) iterator() *xorIterator {
// Iterator implements the Chunk interface. // Iterator implements the Chunk interface.
func (c *XORChunk) Iterator() Iterator { func (c *XORChunk) Iterator() Iterator {
return fancyIterator{c.iterator()} return c.iterator()
} }
type xorAppender struct { type xorAppender struct {
@ -93,9 +91,8 @@ type xorAppender struct {
trailing uint8 trailing uint8
} }
func (a *xorAppender) Append(t int64, v float64) error { func (a *xorAppender) Append(t int64, v float64) {
var tDelta uint64 var tDelta uint64
l := len(a.b.bytes())
if a.c.num == 0 { if a.c.num == 0 {
buf := make([]byte, binary.MaxVarintLen64) buf := make([]byte, binary.MaxVarintLen64)
@ -140,19 +137,10 @@ func (a *xorAppender) Append(t int64, v float64) error {
a.writeVDelta(v) a.writeVDelta(v)
} }
if len(a.b.bytes()) > a.c.sz {
// If the appended data exceeded the size limit, we truncate
// the underlying data slice back to the length we started with.
a.b.stream = a.b.stream[:l]
return ErrChunkFull
}
a.t = t a.t = t
a.v = v a.v = v
a.c.num++ a.c.num++
a.tDelta = tDelta a.tDelta = tDelta
return nil
} }
func bitRange(x int64, nbits uint8) bool { func bitRange(x int64, nbits uint8) bool {

14
db.go
View file

@ -413,23 +413,15 @@ type chunkDesc struct {
app chunks.Appender // Current appender for the chunks. app chunks.Appender // Current appender for the chunks.
} }
func (cd *chunkDesc) append(ts int64, v float64) (err error) { func (cd *chunkDesc) append(ts int64, v float64) {
if cd.app == nil { if cd.numSamples == 0 {
cd.app, err = cd.chunk.Appender()
if err != nil {
return err
}
cd.firsTimestamp = ts cd.firsTimestamp = ts
} }
if err := cd.app.Append(ts, v); err != nil { cd.app.Append(ts, v)
return err
}
cd.lastTimestamp = ts cd.lastTimestamp = ts
cd.lastValue = v cd.lastValue = v
cd.numSamples++ cd.numSamples++
return nil
} }
// The MultiError type implements the error interface, and contains the // The MultiError type implements the error interface, and contains the

23
head.go
View file

@ -1,7 +1,6 @@
package tsdb package tsdb
import ( import (
"math"
"os" "os"
"sort" "sort"
"sync" "sync"
@ -51,9 +50,7 @@ func OpenHeadBlock(dir string, baseTime int64) (*HeadBlock, error) {
b.create(lset.Hash(), lset) b.create(lset.Hash(), lset)
}, },
sample: func(s hashedSample) { sample: func(s hashedSample) {
if err := b.descs[s.ref].append(s.t, s.v); err != nil { b.descs[s.ref].append(s.t, s.v)
panic(err) // TODO(fabxc): cannot actually error
}
b.stats.SampleCount++ b.stats.SampleCount++
}, },
}) })
@ -151,9 +148,16 @@ func (h *HeadBlock) get(hash uint64, lset labels.Labels) (*chunkDesc, uint32) {
} }
func (h *HeadBlock) create(hash uint64, lset labels.Labels) *chunkDesc { func (h *HeadBlock) create(hash uint64, lset labels.Labels) *chunkDesc {
var err error
cd := &chunkDesc{ cd := &chunkDesc{
lset: lset, lset: lset,
chunk: chunks.NewXORChunk(int(math.MaxInt64)), chunk: chunks.NewXORChunk(),
}
cd.app, err = cd.chunk.Appender()
if err != nil {
// Getting an Appender for a new chunk must not panic.
panic(err)
} }
// Index the new chunk. // Index the new chunk.
ref := len(h.descs) ref := len(h.descs)
@ -226,13 +230,8 @@ func (h *HeadBlock) appendBatch(samples []hashedSample, appended prometheus.Coun
h.create(newHashes[i], s) h.create(newHashes[i], s)
} }
var merr MultiError
for _, s := range samples { for _, s := range samples {
// TODO(fabxc): ensure that this won't be able to actually error in practice. h.descs[s.ref].append(s.t, s.v)
if err := h.descs[s.ref].append(s.t, s.v); err != nil {
merr.Add(err)
continue
}
appended.Inc() appended.Inc()
h.stats.SampleCount++ h.stats.SampleCount++
@ -242,7 +241,7 @@ func (h *HeadBlock) appendBatch(samples []hashedSample, appended prometheus.Coun
} }
} }
return merr.Err() return nil
} }
func (h *HeadBlock) persist(p string) (int64, error) { func (h *HeadBlock) persist(p string) (int64, error) {