mirror of
https://github.com/prometheus/prometheus.git
synced 2024-12-26 06:04:05 -08:00
Merge branch 'dev-2.0-compact' into dev-2.0
This commit is contained in:
commit
3abf54c660
|
@ -7,7 +7,6 @@ package textparse
|
||||||
import (
|
import (
|
||||||
"errors"
|
"errors"
|
||||||
"io"
|
"io"
|
||||||
"reflect"
|
|
||||||
"sort"
|
"sort"
|
||||||
"unsafe"
|
"unsafe"
|
||||||
|
|
||||||
|
@ -106,11 +105,5 @@ func (p *Parser) Metric(l *labels.Labels) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func yoloString(b []byte) string {
|
func yoloString(b []byte) string {
|
||||||
sh := (*reflect.SliceHeader)(unsafe.Pointer(&b))
|
return *((*string)(unsafe.Pointer(&b)))
|
||||||
|
|
||||||
h := reflect.StringHeader{
|
|
||||||
Data: sh.Data,
|
|
||||||
Len: sh.Len,
|
|
||||||
}
|
|
||||||
return *((*string)(unsafe.Pointer(&h)))
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -20,7 +20,6 @@ import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"net/http"
|
"net/http"
|
||||||
"reflect"
|
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
"unsafe"
|
"unsafe"
|
||||||
|
@ -586,13 +585,7 @@ loop:
|
||||||
}
|
}
|
||||||
|
|
||||||
func yoloString(b []byte) string {
|
func yoloString(b []byte) string {
|
||||||
sh := (*reflect.SliceHeader)(unsafe.Pointer(&b))
|
return *((*string)(unsafe.Pointer(&b)))
|
||||||
|
|
||||||
h := reflect.StringHeader{
|
|
||||||
Data: sh.Data,
|
|
||||||
Len: sh.Len,
|
|
||||||
}
|
|
||||||
return *((*string)(unsafe.Pointer(&h)))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (sl *scrapeLoop) report(start time.Time, duration time.Duration, scraped, appended int, err error) error {
|
func (sl *scrapeLoop) report(start time.Time, duration time.Duration, scraped, appended int, err error) error {
|
||||||
|
|
|
@ -141,7 +141,8 @@ func TestPopulateLabels(t *testing.T) {
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
for i, c := range cases {
|
for i, c := range cases {
|
||||||
in := c.in.Clone()
|
in := c.in.Copy()
|
||||||
|
|
||||||
res, orig, err := populateLabels(c.in, c.cfg)
|
res, orig, err := populateLabels(c.in, c.cfg)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("case %d: %s", i, err)
|
t.Fatalf("case %d: %s", i, err)
|
||||||
|
|
|
@ -1,8 +1,4 @@
|
||||||
<<<<<<< HEAD
|
|
||||||
// Copyright 2016 The Prometheus Authors
|
|
||||||
=======
|
|
||||||
// Copyright 2017 The Prometheus Authors
|
// Copyright 2017 The Prometheus Authors
|
||||||
>>>>>>> master
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
// you may not use this file except in compliance with the License.
|
// you may not use this file except in compliance with the License.
|
||||||
// You may obtain a copy of the License at
|
// You may obtain a copy of the License at
|
||||||
|
@ -23,29 +19,12 @@ import (
|
||||||
"github.com/prometheus/common/model"
|
"github.com/prometheus/common/model"
|
||||||
|
|
||||||
"github.com/prometheus/prometheus/config"
|
"github.com/prometheus/prometheus/config"
|
||||||
<<<<<<< HEAD
|
|
||||||
"github.com/prometheus/prometheus/relabel"
|
|
||||||
=======
|
|
||||||
>>>>>>> master
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// Storage allows queueing samples for remote writes.
|
// Storage allows queueing samples for remote writes.
|
||||||
type Storage struct {
|
type Storage struct {
|
||||||
<<<<<<< HEAD
|
|
||||||
mtx sync.RWMutex
|
|
||||||
externalLabels model.LabelSet
|
|
||||||
conf config.RemoteWriteConfig
|
|
||||||
|
|
||||||
queue *StorageQueueManager
|
|
||||||
}
|
|
||||||
|
|
||||||
// New returns a new remote Storage.
|
|
||||||
func New() *Storage {
|
|
||||||
return &Storage{}
|
|
||||||
=======
|
|
||||||
mtx sync.RWMutex
|
mtx sync.RWMutex
|
||||||
queues []*QueueManager
|
queues []*QueueManager
|
||||||
>>>>>>> master
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// ApplyConfig updates the state as the new config requires.
|
// ApplyConfig updates the state as the new config requires.
|
||||||
|
@ -53,28 +32,6 @@ func (s *Storage) ApplyConfig(conf *config.Config) error {
|
||||||
s.mtx.Lock()
|
s.mtx.Lock()
|
||||||
defer s.mtx.Unlock()
|
defer s.mtx.Unlock()
|
||||||
|
|
||||||
<<<<<<< HEAD
|
|
||||||
// TODO: we should only stop & recreate queues which have changes,
|
|
||||||
// as this can be quite disruptive.
|
|
||||||
var newQueue *StorageQueueManager
|
|
||||||
|
|
||||||
if conf.RemoteWriteConfig.URL != nil {
|
|
||||||
c, err := NewClient(conf.RemoteWriteConfig)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
newQueue = NewStorageQueueManager(c, nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
if s.queue != nil {
|
|
||||||
s.queue.Stop()
|
|
||||||
}
|
|
||||||
s.queue = newQueue
|
|
||||||
s.conf = conf.RemoteWriteConfig
|
|
||||||
s.externalLabels = conf.GlobalConfig.ExternalLabels
|
|
||||||
if s.queue != nil {
|
|
||||||
s.queue.Start()
|
|
||||||
=======
|
|
||||||
newQueues := []*QueueManager{}
|
newQueues := []*QueueManager{}
|
||||||
// TODO: we should only stop & recreate queues which have changes,
|
// TODO: we should only stop & recreate queues which have changes,
|
||||||
// as this can be quite disruptive.
|
// as this can be quite disruptive.
|
||||||
|
@ -97,15 +54,14 @@ func (s *Storage) ApplyConfig(conf *config.Config) error {
|
||||||
s.queues = newQueues
|
s.queues = newQueues
|
||||||
for _, q := range s.queues {
|
for _, q := range s.queues {
|
||||||
q.Start()
|
q.Start()
|
||||||
>>>>>>> master
|
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Stop the background processing of the storage queues.
|
// Stop the background processing of the storage queues.
|
||||||
func (s *Storage) Stop() {
|
func (s *Storage) Stop() {
|
||||||
if s.queue != nil {
|
for _, q := range s.queues {
|
||||||
s.queue.Stop()
|
q.Stop()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -114,32 +70,9 @@ func (s *Storage) Append(smpl *model.Sample) error {
|
||||||
s.mtx.RLock()
|
s.mtx.RLock()
|
||||||
defer s.mtx.RUnlock()
|
defer s.mtx.RUnlock()
|
||||||
|
|
||||||
<<<<<<< HEAD
|
|
||||||
if s.queue == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
var snew model.Sample
|
|
||||||
snew = *smpl
|
|
||||||
snew.Metric = smpl.Metric.Clone()
|
|
||||||
|
|
||||||
for ln, lv := range s.externalLabels {
|
|
||||||
if _, ok := smpl.Metric[ln]; !ok {
|
|
||||||
snew.Metric[ln] = lv
|
|
||||||
}
|
|
||||||
}
|
|
||||||
snew.Metric = model.Metric(
|
|
||||||
relabel.Process(model.LabelSet(snew.Metric), s.conf.WriteRelabelConfigs...))
|
|
||||||
|
|
||||||
if snew.Metric == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
s.queue.Append(&snew)
|
|
||||||
=======
|
|
||||||
for _, q := range s.queues {
|
for _, q := range s.queues {
|
||||||
q.Append(smpl)
|
q.Append(smpl)
|
||||||
}
|
}
|
||||||
>>>>>>> master
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -13,7 +13,7 @@ import (
|
||||||
|
|
||||||
// adapter implements a storage.Storage around TSDB.
|
// adapter implements a storage.Storage around TSDB.
|
||||||
type adapter struct {
|
type adapter struct {
|
||||||
db *tsdb.PartitionedDB
|
db *tsdb.DB
|
||||||
}
|
}
|
||||||
|
|
||||||
// Options of the DB storage.
|
// Options of the DB storage.
|
||||||
|
@ -38,7 +38,7 @@ type Options struct {
|
||||||
|
|
||||||
// Open returns a new storage backed by a tsdb database.
|
// Open returns a new storage backed by a tsdb database.
|
||||||
func Open(path string, r prometheus.Registerer, opts *Options) (storage.Storage, error) {
|
func Open(path string, r prometheus.Registerer, opts *Options) (storage.Storage, error) {
|
||||||
db, err := tsdb.OpenPartitioned(path, 1, nil, r, &tsdb.Options{
|
db, err := tsdb.Open(path, nil, r, &tsdb.Options{
|
||||||
WALFlushInterval: 10 * time.Second,
|
WALFlushInterval: 10 * time.Second,
|
||||||
MinBlockDuration: uint64(opts.MinBlockDuration.Seconds() * 1000),
|
MinBlockDuration: uint64(opts.MinBlockDuration.Seconds() * 1000),
|
||||||
MaxBlockDuration: uint64(opts.MaxBlockDuration.Seconds() * 1000),
|
MaxBlockDuration: uint64(opts.MaxBlockDuration.Seconds() * 1000),
|
||||||
|
|
299
vendor/github.com/fabxc/tsdb/chunks.go
generated
vendored
Normal file
299
vendor/github.com/fabxc/tsdb/chunks.go
generated
vendored
Normal file
|
@ -0,0 +1,299 @@
|
||||||
|
package tsdb
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bufio"
|
||||||
|
"encoding/binary"
|
||||||
|
"fmt"
|
||||||
|
"hash"
|
||||||
|
"hash/crc32"
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
|
||||||
|
"github.com/coreos/etcd/pkg/fileutil"
|
||||||
|
"github.com/fabxc/tsdb/chunks"
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
// MagicSeries 4 bytes at the head of series file.
|
||||||
|
MagicChunks = 0x85BD40DD
|
||||||
|
)
|
||||||
|
|
||||||
|
// ChunkMeta holds information about a chunk of data.
|
||||||
|
type ChunkMeta struct {
|
||||||
|
// Ref and Chunk hold either a reference that can be used to retrieve
|
||||||
|
// chunk data or the data itself.
|
||||||
|
// Generally, only one of them is set.
|
||||||
|
Ref uint64
|
||||||
|
Chunk chunks.Chunk
|
||||||
|
|
||||||
|
MinTime, MaxTime int64 // time range the data covers
|
||||||
|
}
|
||||||
|
|
||||||
|
// ChunkWriter serializes a time block of chunked series data.
|
||||||
|
type ChunkWriter interface {
|
||||||
|
// WriteChunks writes several chunks. The data field of the ChunkMetas
|
||||||
|
// must be populated.
|
||||||
|
// After returning successfully, the Ref fields in the ChunkMetas
|
||||||
|
// is set and can be used to retrieve the chunks from the written data.
|
||||||
|
WriteChunks(chunks ...ChunkMeta) error
|
||||||
|
|
||||||
|
// Close writes any required finalization and closes the resources
|
||||||
|
// associated with the underlying writer.
|
||||||
|
Close() error
|
||||||
|
}
|
||||||
|
|
||||||
|
// chunkWriter implements the ChunkWriter interface for the standard
|
||||||
|
// serialization format.
|
||||||
|
type chunkWriter struct {
|
||||||
|
dirFile *os.File
|
||||||
|
files []*os.File
|
||||||
|
wbuf *bufio.Writer
|
||||||
|
n int64
|
||||||
|
crc32 hash.Hash
|
||||||
|
|
||||||
|
segmentSize int64
|
||||||
|
}
|
||||||
|
|
||||||
|
const (
|
||||||
|
defaultChunkSegmentSize = 512 * 1024 * 1024
|
||||||
|
|
||||||
|
chunksFormatV1 = 1
|
||||||
|
)
|
||||||
|
|
||||||
|
func newChunkWriter(dir string) (*chunkWriter, error) {
|
||||||
|
if err := os.MkdirAll(dir, 0777); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
dirFile, err := fileutil.OpenDir(dir)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
cw := &chunkWriter{
|
||||||
|
dirFile: dirFile,
|
||||||
|
n: 0,
|
||||||
|
crc32: crc32.New(crc32.MakeTable(crc32.Castagnoli)),
|
||||||
|
segmentSize: defaultChunkSegmentSize,
|
||||||
|
}
|
||||||
|
return cw, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *chunkWriter) tail() *os.File {
|
||||||
|
if len(w.files) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return w.files[len(w.files)-1]
|
||||||
|
}
|
||||||
|
|
||||||
|
// finalizeTail writes all pending data to the current tail file,
|
||||||
|
// truncates its size, and closes it.
|
||||||
|
func (w *chunkWriter) finalizeTail() error {
|
||||||
|
tf := w.tail()
|
||||||
|
if tf == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := w.wbuf.Flush(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := fileutil.Fsync(tf); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
// As the file was pre-allocated, we truncate any superfluous zero bytes.
|
||||||
|
off, err := tf.Seek(0, os.SEEK_CUR)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := tf.Truncate(off); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return tf.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *chunkWriter) cut() error {
|
||||||
|
// Sync current tail to disk and close.
|
||||||
|
w.finalizeTail()
|
||||||
|
|
||||||
|
p, _, err := nextSequenceFile(w.dirFile.Name(), "")
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
f, err := os.OpenFile(p, os.O_WRONLY|os.O_CREATE, 0666)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err = fileutil.Preallocate(f, w.segmentSize, true); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err = w.dirFile.Sync(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Write header metadata for new file.
|
||||||
|
|
||||||
|
metab := make([]byte, 8)
|
||||||
|
binary.BigEndian.PutUint32(metab[:4], MagicChunks)
|
||||||
|
metab[4] = chunksFormatV1
|
||||||
|
|
||||||
|
if _, err := f.Write(metab); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
w.files = append(w.files, f)
|
||||||
|
if w.wbuf != nil {
|
||||||
|
w.wbuf.Reset(f)
|
||||||
|
} else {
|
||||||
|
w.wbuf = bufio.NewWriterSize(f, 8*1024*1024)
|
||||||
|
}
|
||||||
|
w.n = 8
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *chunkWriter) write(wr io.Writer, b []byte) error {
|
||||||
|
n, err := wr.Write(b)
|
||||||
|
w.n += int64(n)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *chunkWriter) WriteChunks(chks ...ChunkMeta) error {
|
||||||
|
// Calculate maximum space we need and cut a new segment in case
|
||||||
|
// we don't fit into the current one.
|
||||||
|
maxLen := int64(binary.MaxVarintLen32)
|
||||||
|
for _, c := range chks {
|
||||||
|
maxLen += binary.MaxVarintLen32 + 1
|
||||||
|
maxLen += int64(len(c.Chunk.Bytes()))
|
||||||
|
}
|
||||||
|
newsz := w.n + maxLen
|
||||||
|
|
||||||
|
if w.wbuf == nil || w.n > w.segmentSize || newsz > w.segmentSize && maxLen <= w.segmentSize {
|
||||||
|
if err := w.cut(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Write chunks sequentially and set the reference field in the ChunkMeta.
|
||||||
|
w.crc32.Reset()
|
||||||
|
wr := io.MultiWriter(w.crc32, w.wbuf)
|
||||||
|
|
||||||
|
b := make([]byte, binary.MaxVarintLen32)
|
||||||
|
n := binary.PutUvarint(b, uint64(len(chks)))
|
||||||
|
|
||||||
|
if err := w.write(wr, b[:n]); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
seq := uint64(w.seq()) << 32
|
||||||
|
|
||||||
|
for i := range chks {
|
||||||
|
chk := &chks[i]
|
||||||
|
|
||||||
|
chk.Ref = seq | uint64(w.n)
|
||||||
|
|
||||||
|
n = binary.PutUvarint(b, uint64(len(chk.Chunk.Bytes())))
|
||||||
|
|
||||||
|
if err := w.write(wr, b[:n]); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := w.write(wr, []byte{byte(chk.Chunk.Encoding())}); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := w.write(wr, chk.Chunk.Bytes()); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
chk.Chunk = nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := w.write(w.wbuf, w.crc32.Sum(nil)); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *chunkWriter) seq() int {
|
||||||
|
return len(w.files) - 1
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *chunkWriter) Close() error {
|
||||||
|
return w.finalizeTail()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ChunkReader provides reading access of serialized time series data.
|
||||||
|
type ChunkReader interface {
|
||||||
|
// Chunk returns the series data chunk with the given reference.
|
||||||
|
Chunk(ref uint64) (chunks.Chunk, error)
|
||||||
|
|
||||||
|
// Close releases all underlying resources of the reader.
|
||||||
|
Close() error
|
||||||
|
}
|
||||||
|
|
||||||
|
// chunkReader implements a SeriesReader for a serialized byte stream
|
||||||
|
// of series data.
|
||||||
|
type chunkReader struct {
|
||||||
|
// The underlying bytes holding the encoded series data.
|
||||||
|
bs [][]byte
|
||||||
|
|
||||||
|
// Closers for resources behind the byte slices.
|
||||||
|
cs []io.Closer
|
||||||
|
}
|
||||||
|
|
||||||
|
// newChunkReader returns a new chunkReader based on mmaped files found in dir.
|
||||||
|
func newChunkReader(dir string) (*chunkReader, error) {
|
||||||
|
files, err := sequenceFiles(dir, "")
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
var cr chunkReader
|
||||||
|
|
||||||
|
for _, fn := range files {
|
||||||
|
f, err := openMmapFile(fn)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrapf(err, "mmap files")
|
||||||
|
}
|
||||||
|
cr.cs = append(cr.cs, f)
|
||||||
|
cr.bs = append(cr.bs, f.b)
|
||||||
|
}
|
||||||
|
|
||||||
|
for i, b := range cr.bs {
|
||||||
|
if len(b) < 4 {
|
||||||
|
return nil, errors.Wrapf(errInvalidSize, "validate magic in segment %d", i)
|
||||||
|
}
|
||||||
|
// Verify magic number.
|
||||||
|
if m := binary.BigEndian.Uint32(b[:4]); m != MagicChunks {
|
||||||
|
return nil, fmt.Errorf("invalid magic number %x", m)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return &cr, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *chunkReader) Close() error {
|
||||||
|
return closeAll(s.cs...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *chunkReader) Chunk(ref uint64) (chunks.Chunk, error) {
|
||||||
|
var (
|
||||||
|
seq = int(ref >> 32)
|
||||||
|
off = int((ref << 32) >> 32)
|
||||||
|
)
|
||||||
|
if seq >= len(s.bs) {
|
||||||
|
return nil, errors.Errorf("reference sequence %d out of range", seq)
|
||||||
|
}
|
||||||
|
b := s.bs[seq]
|
||||||
|
|
||||||
|
if int(off) >= len(b) {
|
||||||
|
return nil, errors.Errorf("offset %d beyond data size %d", off, len(b))
|
||||||
|
}
|
||||||
|
b = b[off:]
|
||||||
|
|
||||||
|
l, n := binary.Uvarint(b)
|
||||||
|
if n < 0 {
|
||||||
|
return nil, fmt.Errorf("reading chunk length failed")
|
||||||
|
}
|
||||||
|
b = b[n:]
|
||||||
|
enc := chunks.Encoding(b[0])
|
||||||
|
|
||||||
|
c, err := chunks.FromData(enc, b[1:1+l])
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return c, nil
|
||||||
|
}
|
2
vendor/github.com/fabxc/tsdb/compact.go
generated
vendored
2
vendor/github.com/fabxc/tsdb/compact.go
generated
vendored
|
@ -163,6 +163,8 @@ func (c *compactor) Compact(dirs ...string) (err error) {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
defer b.Close()
|
||||||
|
|
||||||
blocks = append(blocks, b)
|
blocks = append(blocks, b)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
293
vendor/github.com/fabxc/tsdb/db.go
generated
vendored
293
vendor/github.com/fabxc/tsdb/db.go
generated
vendored
|
@ -6,10 +6,8 @@ import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"math"
|
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"reflect"
|
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
|
@ -87,17 +85,24 @@ const sep = '\xff'
|
||||||
// DB handles reads and writes of time series falling into
|
// DB handles reads and writes of time series falling into
|
||||||
// a hashed partition of a seriedb.
|
// a hashed partition of a seriedb.
|
||||||
type DB struct {
|
type DB struct {
|
||||||
dir string
|
dir string
|
||||||
lockf lockfile.Lockfile
|
lockf lockfile.Lockfile
|
||||||
|
|
||||||
logger log.Logger
|
logger log.Logger
|
||||||
metrics *dbMetrics
|
metrics *dbMetrics
|
||||||
opts *Options
|
opts *Options
|
||||||
|
|
||||||
|
// Mutex for that must be held when modifying the general
|
||||||
|
// block layout.
|
||||||
mtx sync.RWMutex
|
mtx sync.RWMutex
|
||||||
persisted []*persistedBlock
|
persisted []*persistedBlock
|
||||||
heads []*headBlock
|
|
||||||
seqBlocks map[int]Block
|
seqBlocks map[int]Block
|
||||||
headGen uint8
|
|
||||||
|
// Mutex that must be held when modifying just the head blocks
|
||||||
|
// or the general layout.
|
||||||
|
headmtx sync.RWMutex
|
||||||
|
heads []*headBlock
|
||||||
|
headGen uint8
|
||||||
|
|
||||||
compactor Compactor
|
compactor Compactor
|
||||||
|
|
||||||
|
@ -200,7 +205,15 @@ func (db *DB) run() {
|
||||||
case <-db.compactc:
|
case <-db.compactc:
|
||||||
db.metrics.compactionsTriggered.Inc()
|
db.metrics.compactionsTriggered.Inc()
|
||||||
|
|
||||||
if err := db.compact(); err != nil {
|
var merr MultiError
|
||||||
|
|
||||||
|
changes, err := db.compact()
|
||||||
|
merr.Add(err)
|
||||||
|
|
||||||
|
if changes {
|
||||||
|
merr.Add(db.reloadBlocks())
|
||||||
|
}
|
||||||
|
if err := merr.Err(); err != nil {
|
||||||
db.logger.Log("msg", "compaction failed", "err", err)
|
db.logger.Log("msg", "compaction failed", "err", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -210,12 +223,17 @@ func (db *DB) run() {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (db *DB) compact() error {
|
func (db *DB) compact() (changes bool, err error) {
|
||||||
changes := false
|
|
||||||
// Check whether we have pending head blocks that are ready to be persisted.
|
// Check whether we have pending head blocks that are ready to be persisted.
|
||||||
// They have the highest priority.
|
// They have the highest priority.
|
||||||
db.mtx.RLock()
|
db.headmtx.RLock()
|
||||||
|
|
||||||
|
var singles []*headBlock
|
||||||
|
|
||||||
|
// Collect head blocks that are ready for compaction. Write them after
|
||||||
|
// returning the lock to not block Appenders.
|
||||||
|
// Selected blocks are semantically ensured to not be written to afterwards
|
||||||
|
// by appendable().
|
||||||
if len(db.heads) > db.opts.AppendableBlocks {
|
if len(db.heads) > db.opts.AppendableBlocks {
|
||||||
for _, h := range db.heads[:len(db.heads)-db.opts.AppendableBlocks] {
|
for _, h := range db.heads[:len(db.heads)-db.opts.AppendableBlocks] {
|
||||||
// Blocks that won't be appendable when instantiating a new appender
|
// Blocks that won't be appendable when instantiating a new appender
|
||||||
|
@ -224,36 +242,38 @@ func (db *DB) compact() error {
|
||||||
if atomic.LoadUint64(&h.activeWriters) > 0 {
|
if atomic.LoadUint64(&h.activeWriters) > 0 {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
singles = append(singles, h)
|
||||||
db.logger.Log("msg", "write head", "seq", h.Meta().Sequence)
|
|
||||||
|
|
||||||
select {
|
|
||||||
case <-db.stopc:
|
|
||||||
db.mtx.RUnlock()
|
|
||||||
return nil
|
|
||||||
default:
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := db.compactor.Write(h.Dir(), h); err != nil {
|
|
||||||
db.mtx.RUnlock()
|
|
||||||
return errors.Wrap(err, "persist head block")
|
|
||||||
}
|
|
||||||
changes = true
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
db.mtx.RUnlock()
|
db.headmtx.RUnlock()
|
||||||
|
|
||||||
|
Loop:
|
||||||
|
for _, h := range singles {
|
||||||
|
db.logger.Log("msg", "write head", "seq", h.Meta().Sequence)
|
||||||
|
|
||||||
|
select {
|
||||||
|
case <-db.stopc:
|
||||||
|
break Loop
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
|
||||||
|
if err = db.compactor.Write(h.Dir(), h); err != nil {
|
||||||
|
return changes, errors.Wrap(err, "persist head block")
|
||||||
|
}
|
||||||
|
changes = true
|
||||||
|
}
|
||||||
|
|
||||||
// Check for compactions of multiple blocks.
|
// Check for compactions of multiple blocks.
|
||||||
for {
|
for {
|
||||||
plans, err := db.compactor.Plan(db.dir)
|
plans, err := db.compactor.Plan(db.dir)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "plan compaction")
|
return changes, errors.Wrap(err, "plan compaction")
|
||||||
}
|
}
|
||||||
|
|
||||||
select {
|
select {
|
||||||
case <-db.stopc:
|
case <-db.stopc:
|
||||||
return nil
|
return false, nil
|
||||||
default:
|
default:
|
||||||
}
|
}
|
||||||
// We just execute compactions sequentially to not cause too extreme
|
// We just execute compactions sequentially to not cause too extreme
|
||||||
|
@ -264,7 +284,7 @@ func (db *DB) compact() error {
|
||||||
db.logger.Log("msg", "compact blocks", "seq", fmt.Sprintf("%v", p))
|
db.logger.Log("msg", "compact blocks", "seq", fmt.Sprintf("%v", p))
|
||||||
|
|
||||||
if err := db.compactor.Compact(p...); err != nil {
|
if err := db.compactor.Compact(p...); err != nil {
|
||||||
return errors.Wrapf(err, "compact", p)
|
return changes, errors.Wrapf(err, "compact %s", p)
|
||||||
}
|
}
|
||||||
changes = true
|
changes = true
|
||||||
}
|
}
|
||||||
|
@ -274,10 +294,7 @@ func (db *DB) compact() error {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if changes {
|
return changes, nil
|
||||||
return errors.Wrap(db.reloadBlocks(), "reload blocks")
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// func (db *DB) retentionCutoff() error {
|
// func (db *DB) retentionCutoff() error {
|
||||||
|
@ -315,6 +332,9 @@ func (db *DB) reloadBlocks() error {
|
||||||
db.mtx.Lock()
|
db.mtx.Lock()
|
||||||
defer db.mtx.Unlock()
|
defer db.mtx.Unlock()
|
||||||
|
|
||||||
|
db.headmtx.Lock()
|
||||||
|
defer db.headmtx.Unlock()
|
||||||
|
|
||||||
dirs, err := blockDirs(db.dir)
|
dirs, err := blockDirs(db.dir)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "find blocks")
|
return errors.Wrap(err, "find blocks")
|
||||||
|
@ -336,17 +356,20 @@ func (db *DB) reloadBlocks() error {
|
||||||
|
|
||||||
for i, meta := range metas {
|
for i, meta := range metas {
|
||||||
b, ok := db.seqBlocks[meta.Sequence]
|
b, ok := db.seqBlocks[meta.Sequence]
|
||||||
if !ok {
|
|
||||||
return errors.Errorf("missing block for sequence %d", meta.Sequence)
|
|
||||||
}
|
|
||||||
|
|
||||||
if meta.Compaction.Generation == 0 {
|
if meta.Compaction.Generation == 0 {
|
||||||
|
if !ok {
|
||||||
|
b, err = openHeadBlock(dirs[i], db.logger)
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrapf(err, "load head at %s", dirs[i])
|
||||||
|
}
|
||||||
|
}
|
||||||
if meta.ULID != b.Meta().ULID {
|
if meta.ULID != b.Meta().ULID {
|
||||||
return errors.Errorf("head block ULID changed unexpectedly")
|
return errors.Errorf("head block ULID changed unexpectedly")
|
||||||
}
|
}
|
||||||
heads = append(heads, b.(*headBlock))
|
heads = append(heads, b.(*headBlock))
|
||||||
} else {
|
} else {
|
||||||
if meta.ULID != b.Meta().ULID {
|
if ok && meta.ULID != b.Meta().ULID {
|
||||||
if err := b.Close(); err != nil {
|
if err := b.Close(); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -381,18 +404,22 @@ func (db *DB) Close() error {
|
||||||
close(db.stopc)
|
close(db.stopc)
|
||||||
<-db.donec
|
<-db.donec
|
||||||
|
|
||||||
var merr MultiError
|
// Lock mutex and leave it locked so we panic if there's a bug causing
|
||||||
|
// the block to be used afterwards.
|
||||||
db.mtx.Lock()
|
db.mtx.Lock()
|
||||||
defer db.mtx.Unlock()
|
|
||||||
|
var g errgroup.Group
|
||||||
|
|
||||||
for _, pb := range db.persisted {
|
for _, pb := range db.persisted {
|
||||||
merr.Add(pb.Close())
|
g.Go(pb.Close)
|
||||||
}
|
}
|
||||||
for _, hb := range db.heads {
|
for _, hb := range db.heads {
|
||||||
merr.Add(hb.Close())
|
g.Go(hb.Close)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var merr MultiError
|
||||||
|
|
||||||
|
merr.Add(g.Wait())
|
||||||
merr.Add(db.lockf.Unlock())
|
merr.Add(db.lockf.Unlock())
|
||||||
|
|
||||||
return merr.Err()
|
return merr.Err()
|
||||||
|
@ -403,9 +430,14 @@ func (db *DB) Appender() Appender {
|
||||||
db.mtx.RLock()
|
db.mtx.RLock()
|
||||||
a := &dbAppender{db: db}
|
a := &dbAppender{db: db}
|
||||||
|
|
||||||
|
db.headmtx.RLock()
|
||||||
|
|
||||||
for _, b := range db.appendable() {
|
for _, b := range db.appendable() {
|
||||||
a.heads = append(a.heads, b.Appender().(*headAppender))
|
a.heads = append(a.heads, b.Appender().(*headAppender))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
db.headmtx.RUnlock()
|
||||||
|
|
||||||
return a
|
return a
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -428,19 +460,6 @@ func (a *dbAppender) Add(lset labels.Labels, t int64, v float64) (uint64, error)
|
||||||
return ref | (uint64(h.generation) << 40), nil
|
return ref | (uint64(h.generation) << 40), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (a *dbAppender) hashedAdd(hash uint64, lset labels.Labels, t int64, v float64) (uint64, error) {
|
|
||||||
h, err := a.appenderFor(t)
|
|
||||||
if err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
ref, err := h.hashedAdd(hash, lset, t, v)
|
|
||||||
if err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
a.samples++
|
|
||||||
return ref | (uint64(h.generation) << 40), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *dbAppender) AddFast(ref uint64, t int64, v float64) error {
|
func (a *dbAppender) AddFast(ref uint64, t int64, v float64) error {
|
||||||
// We store the head generation in the 4th byte and use it to reject
|
// We store the head generation in the 4th byte and use it to reject
|
||||||
// stale references.
|
// stale references.
|
||||||
|
@ -468,15 +487,12 @@ func (a *dbAppender) AddFast(ref uint64, t int64, v float64) error {
|
||||||
func (a *dbAppender) appenderFor(t int64) (*headAppender, error) {
|
func (a *dbAppender) appenderFor(t int64) (*headAppender, error) {
|
||||||
// If there's no fitting head block for t, ensure it gets created.
|
// If there's no fitting head block for t, ensure it gets created.
|
||||||
if len(a.heads) == 0 || t >= a.heads[len(a.heads)-1].meta.MaxTime {
|
if len(a.heads) == 0 || t >= a.heads[len(a.heads)-1].meta.MaxTime {
|
||||||
a.db.mtx.RUnlock()
|
a.db.headmtx.Lock()
|
||||||
|
|
||||||
if err := a.db.ensureHead(t); err != nil {
|
if err := a.db.ensureHead(t); err != nil {
|
||||||
a.db.mtx.RLock()
|
a.db.headmtx.Unlock()
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
a.db.mtx.RLock()
|
|
||||||
|
|
||||||
if len(a.heads) == 0 {
|
if len(a.heads) == 0 {
|
||||||
for _, b := range a.db.appendable() {
|
for _, b := range a.db.appendable() {
|
||||||
a.heads = append(a.heads, b.Appender().(*headAppender))
|
a.heads = append(a.heads, b.Appender().(*headAppender))
|
||||||
|
@ -489,6 +505,8 @@ func (a *dbAppender) appenderFor(t int64) (*headAppender, error) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
a.db.headmtx.Unlock()
|
||||||
}
|
}
|
||||||
for i := len(a.heads) - 1; i >= 0; i-- {
|
for i := len(a.heads) - 1; i >= 0; i-- {
|
||||||
if h := a.heads[i]; t >= h.meta.MinTime {
|
if h := a.heads[i]; t >= h.meta.MinTime {
|
||||||
|
@ -499,10 +517,9 @@ func (a *dbAppender) appenderFor(t int64) (*headAppender, error) {
|
||||||
return nil, ErrNotFound
|
return nil, ErrNotFound
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ensureHead makes sure that there is a head block for the timestamp t if
|
||||||
|
// it is within or after the currently appendable window.
|
||||||
func (db *DB) ensureHead(t int64) error {
|
func (db *DB) ensureHead(t int64) error {
|
||||||
db.mtx.Lock()
|
|
||||||
defer db.mtx.Unlock()
|
|
||||||
|
|
||||||
// Initial case for a new database: we must create the first
|
// Initial case for a new database: we must create the first
|
||||||
// AppendableBlocks-1 front padding heads.
|
// AppendableBlocks-1 front padding heads.
|
||||||
if len(db.heads) == 0 {
|
if len(db.heads) == 0 {
|
||||||
|
@ -557,31 +574,6 @@ func (db *DB) appendable() []*headBlock {
|
||||||
return db.heads[len(db.heads)-db.opts.AppendableBlocks:]
|
return db.heads[len(db.heads)-db.opts.AppendableBlocks:]
|
||||||
}
|
}
|
||||||
|
|
||||||
func (db *DB) compactable() []Block {
|
|
||||||
db.mtx.RLock()
|
|
||||||
defer db.mtx.RUnlock()
|
|
||||||
|
|
||||||
var blocks []Block
|
|
||||||
for _, pb := range db.persisted {
|
|
||||||
blocks = append(blocks, pb)
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(db.heads) <= db.opts.AppendableBlocks {
|
|
||||||
return blocks
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, h := range db.heads[:len(db.heads)-db.opts.AppendableBlocks] {
|
|
||||||
// Blocks that won't be appendable when instantiating a new appender
|
|
||||||
// might still have active appenders on them.
|
|
||||||
// Abort at the first one we encounter.
|
|
||||||
if atomic.LoadUint64(&h.activeWriters) > 0 {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
blocks = append(blocks, h)
|
|
||||||
}
|
|
||||||
return blocks
|
|
||||||
}
|
|
||||||
|
|
||||||
func intervalOverlap(amin, amax, bmin, bmax int64) bool {
|
func intervalOverlap(amin, amax, bmin, bmax int64) bool {
|
||||||
if bmin >= amin && bmin <= amax {
|
if bmin >= amin && bmin <= amax {
|
||||||
return true
|
return true
|
||||||
|
@ -718,123 +710,6 @@ func nextSequenceFile(dir, prefix string) (string, int, error) {
|
||||||
return filepath.Join(dir, fmt.Sprintf("%s%0.6d", prefix, i+1)), int(i + 1), nil
|
return filepath.Join(dir, fmt.Sprintf("%s%0.6d", prefix, i+1)), int(i + 1), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// PartitionedDB is a time series storage.
|
|
||||||
type PartitionedDB struct {
|
|
||||||
logger log.Logger
|
|
||||||
dir string
|
|
||||||
|
|
||||||
partitionPow uint
|
|
||||||
Partitions []*DB
|
|
||||||
}
|
|
||||||
|
|
||||||
func isPowTwo(x int) bool {
|
|
||||||
return x > 0 && (x&(x-1)) == 0
|
|
||||||
}
|
|
||||||
|
|
||||||
// OpenPartitioned or create a new DB.
|
|
||||||
func OpenPartitioned(dir string, n int, l log.Logger, r prometheus.Registerer, opts *Options) (*PartitionedDB, error) {
|
|
||||||
if !isPowTwo(n) {
|
|
||||||
return nil, errors.Errorf("%d is not a power of two", n)
|
|
||||||
}
|
|
||||||
if opts == nil {
|
|
||||||
opts = DefaultOptions
|
|
||||||
}
|
|
||||||
if l == nil {
|
|
||||||
l = log.NewLogfmtLogger(os.Stdout)
|
|
||||||
l = log.NewContext(l).With("ts", log.DefaultTimestampUTC, "caller", log.DefaultCaller)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := os.MkdirAll(dir, 0777); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
c := &PartitionedDB{
|
|
||||||
logger: l,
|
|
||||||
dir: dir,
|
|
||||||
partitionPow: uint(math.Log2(float64(n))),
|
|
||||||
}
|
|
||||||
|
|
||||||
// Initialize vertical partitiondb.
|
|
||||||
// TODO(fabxc): validate partition number to be power of 2, which is required
|
|
||||||
// for the bitshift-modulo when finding the right partition.
|
|
||||||
for i := 0; i < n; i++ {
|
|
||||||
l := log.NewContext(l).With("partition", i)
|
|
||||||
d := partitionDir(dir, i)
|
|
||||||
|
|
||||||
s, err := Open(d, l, r, opts)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("initializing partition %q failed: %s", d, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
c.Partitions = append(c.Partitions, s)
|
|
||||||
}
|
|
||||||
|
|
||||||
return c, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func partitionDir(base string, i int) string {
|
|
||||||
return filepath.Join(base, fmt.Sprintf("p-%0.4d", i))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Close the database.
|
|
||||||
func (db *PartitionedDB) Close() error {
|
|
||||||
var g errgroup.Group
|
|
||||||
|
|
||||||
for _, partition := range db.Partitions {
|
|
||||||
g.Go(partition.Close)
|
|
||||||
}
|
|
||||||
|
|
||||||
return g.Wait()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Appender returns a new appender against the database.
|
|
||||||
func (db *PartitionedDB) Appender() Appender {
|
|
||||||
app := &partitionedAppender{db: db}
|
|
||||||
|
|
||||||
for _, p := range db.Partitions {
|
|
||||||
app.partitions = append(app.partitions, p.Appender().(*dbAppender))
|
|
||||||
}
|
|
||||||
return app
|
|
||||||
}
|
|
||||||
|
|
||||||
type partitionedAppender struct {
|
|
||||||
db *PartitionedDB
|
|
||||||
partitions []*dbAppender
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *partitionedAppender) Add(lset labels.Labels, t int64, v float64) (uint64, error) {
|
|
||||||
h := lset.Hash()
|
|
||||||
p := h >> (64 - a.db.partitionPow)
|
|
||||||
|
|
||||||
ref, err := a.partitions[p].hashedAdd(h, lset, t, v)
|
|
||||||
if err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
return ref | (p << 48), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *partitionedAppender) AddFast(ref uint64, t int64, v float64) error {
|
|
||||||
p := uint8((ref << 8) >> 56)
|
|
||||||
return a.partitions[p].AddFast(ref, t, v)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *partitionedAppender) Commit() error {
|
|
||||||
var merr MultiError
|
|
||||||
|
|
||||||
for _, p := range a.partitions {
|
|
||||||
merr.Add(p.Commit())
|
|
||||||
}
|
|
||||||
return merr.Err()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *partitionedAppender) Rollback() error {
|
|
||||||
var merr MultiError
|
|
||||||
|
|
||||||
for _, p := range a.partitions {
|
|
||||||
merr.Add(p.Rollback())
|
|
||||||
}
|
|
||||||
return merr.Err()
|
|
||||||
}
|
|
||||||
|
|
||||||
// The MultiError type implements the error interface, and contains the
|
// The MultiError type implements the error interface, and contains the
|
||||||
// Errors used to construct it.
|
// Errors used to construct it.
|
||||||
type MultiError []error
|
type MultiError []error
|
||||||
|
@ -878,13 +753,7 @@ func (es MultiError) Err() error {
|
||||||
}
|
}
|
||||||
|
|
||||||
func yoloString(b []byte) string {
|
func yoloString(b []byte) string {
|
||||||
sh := (*reflect.SliceHeader)(unsafe.Pointer(&b))
|
return *((*string)(unsafe.Pointer(&b)))
|
||||||
|
|
||||||
h := reflect.StringHeader{
|
|
||||||
Data: sh.Data,
|
|
||||||
Len: sh.Len,
|
|
||||||
}
|
|
||||||
return *((*string)(unsafe.Pointer(&h)))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func closeAll(cs ...io.Closer) error {
|
func closeAll(cs ...io.Closer) error {
|
||||||
|
|
10
vendor/github.com/fabxc/tsdb/db_amd64.go
generated
vendored
10
vendor/github.com/fabxc/tsdb/db_amd64.go
generated
vendored
|
@ -1,10 +0,0 @@
|
||||||
package tsdb
|
|
||||||
|
|
||||||
// maxMapSize represents the largest mmap size supported by Bolt.
|
|
||||||
const maxMapSize = 0xFFFFFFFFFFFF // 256TB
|
|
||||||
|
|
||||||
// maxAllocSize is the size used when creating array pointers.
|
|
||||||
const maxAllocSize = 0x7FFFFFFF
|
|
||||||
|
|
||||||
// Are unaligned load/stores broken on this arch?
|
|
||||||
var brokenUnaligned = false
|
|
20
vendor/github.com/fabxc/tsdb/head.go
generated
vendored
20
vendor/github.com/fabxc/tsdb/head.go
generated
vendored
|
@ -44,7 +44,6 @@ type headBlock struct {
|
||||||
|
|
||||||
activeWriters uint64
|
activeWriters uint64
|
||||||
|
|
||||||
symbols map[string]struct{}
|
|
||||||
// descs holds all chunk descs for the head block. Each chunk implicitly
|
// descs holds all chunk descs for the head block. Each chunk implicitly
|
||||||
// is assigned the index as its ID.
|
// is assigned the index as its ID.
|
||||||
series []*memSeries
|
series []*memSeries
|
||||||
|
@ -145,8 +144,12 @@ func (h *headBlock) inBounds(t int64) bool {
|
||||||
|
|
||||||
// Close syncs all data and closes underlying resources of the head block.
|
// Close syncs all data and closes underlying resources of the head block.
|
||||||
func (h *headBlock) Close() error {
|
func (h *headBlock) Close() error {
|
||||||
|
// Lock mutex and leave it locked so we panic if there's a bug causing
|
||||||
|
// the block to be used afterwards.
|
||||||
|
h.mtx.Lock()
|
||||||
|
|
||||||
if err := h.wal.Close(); err != nil {
|
if err := h.wal.Close(); err != nil {
|
||||||
return err
|
return errors.Wrapf(err, "close WAL for head %s", h.dir)
|
||||||
}
|
}
|
||||||
// Check whether the head block still exists in the underlying dir
|
// Check whether the head block still exists in the underlying dir
|
||||||
// or has already been replaced with a compacted version or removed.
|
// or has already been replaced with a compacted version or removed.
|
||||||
|
@ -219,10 +222,8 @@ type refdSample struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (a *headAppender) Add(lset labels.Labels, t int64, v float64) (uint64, error) {
|
func (a *headAppender) Add(lset labels.Labels, t int64, v float64) (uint64, error) {
|
||||||
return a.hashedAdd(lset.Hash(), lset, t, v)
|
hash := lset.Hash()
|
||||||
}
|
|
||||||
|
|
||||||
func (a *headAppender) hashedAdd(hash uint64, lset labels.Labels, t int64, v float64) (uint64, error) {
|
|
||||||
if ms := a.get(hash, lset); ms != nil {
|
if ms := a.get(hash, lset); ms != nil {
|
||||||
return uint64(ms.ref), a.AddFast(uint64(ms.ref), t, v)
|
return uint64(ms.ref), a.AddFast(uint64(ms.ref), t, v)
|
||||||
}
|
}
|
||||||
|
@ -526,13 +527,6 @@ func (h *headBlock) create(hash uint64, lset labels.Labels) *memSeries {
|
||||||
return s
|
return s
|
||||||
}
|
}
|
||||||
|
|
||||||
func (h *headBlock) fullness() float64 {
|
|
||||||
h.metamtx.RLock()
|
|
||||||
defer h.metamtx.RUnlock()
|
|
||||||
|
|
||||||
return float64(h.meta.Stats.NumSamples) / float64(h.meta.Stats.NumSeries+1) / 250
|
|
||||||
}
|
|
||||||
|
|
||||||
func (h *headBlock) updateMapping() {
|
func (h *headBlock) updateMapping() {
|
||||||
h.mtx.RLock()
|
h.mtx.RLock()
|
||||||
|
|
||||||
|
@ -582,7 +576,7 @@ type memSeries struct {
|
||||||
lastValue float64
|
lastValue float64
|
||||||
sampleBuf [4]sample
|
sampleBuf [4]sample
|
||||||
|
|
||||||
app chunks.Appender // Current appender for the chunkdb.
|
app chunks.Appender // Current appender for the chunk.
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *memSeries) cut() *memChunk {
|
func (s *memSeries) cut() *memChunk {
|
||||||
|
|
776
vendor/github.com/fabxc/tsdb/index.go
generated
vendored
Normal file
776
vendor/github.com/fabxc/tsdb/index.go
generated
vendored
Normal file
|
@ -0,0 +1,776 @@
|
||||||
|
package tsdb
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bufio"
|
||||||
|
"encoding/binary"
|
||||||
|
"fmt"
|
||||||
|
"hash"
|
||||||
|
"hash/crc32"
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"sort"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/coreos/etcd/pkg/fileutil"
|
||||||
|
"github.com/fabxc/tsdb/labels"
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
// MagicIndex 4 bytes at the head of an index file.
|
||||||
|
MagicIndex = 0xBAAAD700
|
||||||
|
|
||||||
|
indexFormatV1 = 1
|
||||||
|
)
|
||||||
|
|
||||||
|
const compactionPageBytes = minSectorSize * 64
|
||||||
|
|
||||||
|
// IndexWriter serialized the index for a block of series data.
|
||||||
|
// The methods must generally be called in order they are specified.
|
||||||
|
type IndexWriter interface {
|
||||||
|
// AddSeries populates the index writer witha series and its offsets
|
||||||
|
// of chunks that the index can reference.
|
||||||
|
// The reference number is used to resolve a series against the postings
|
||||||
|
// list iterator. It only has to be available during the write processing.
|
||||||
|
AddSeries(ref uint32, l labels.Labels, chunks ...ChunkMeta) error
|
||||||
|
|
||||||
|
// WriteLabelIndex serializes an index from label names to values.
|
||||||
|
// The passed in values chained tuples of strings of the length of names.
|
||||||
|
WriteLabelIndex(names []string, values []string) error
|
||||||
|
|
||||||
|
// WritePostings writes a postings list for a single label pair.
|
||||||
|
WritePostings(name, value string, it Postings) error
|
||||||
|
|
||||||
|
// Close writes any finalization and closes theresources associated with
|
||||||
|
// the underlying writer.
|
||||||
|
Close() error
|
||||||
|
}
|
||||||
|
|
||||||
|
type indexWriterSeries struct {
|
||||||
|
labels labels.Labels
|
||||||
|
chunks []ChunkMeta // series file offset of chunks
|
||||||
|
offset uint32 // index file offset of series reference
|
||||||
|
}
|
||||||
|
|
||||||
|
// indexWriter implements the IndexWriter interface for the standard
|
||||||
|
// serialization format.
|
||||||
|
type indexWriter struct {
|
||||||
|
f *os.File
|
||||||
|
bufw *bufio.Writer
|
||||||
|
n int64
|
||||||
|
started bool
|
||||||
|
|
||||||
|
// Reusable memory.
|
||||||
|
b []byte
|
||||||
|
uint32s []uint32
|
||||||
|
|
||||||
|
series map[uint32]*indexWriterSeries
|
||||||
|
symbols map[string]uint32 // symbol offsets
|
||||||
|
labelIndexes []hashEntry // label index offsets
|
||||||
|
postings []hashEntry // postings lists offsets
|
||||||
|
|
||||||
|
crc32 hash.Hash
|
||||||
|
}
|
||||||
|
|
||||||
|
func newIndexWriter(dir string) (*indexWriter, error) {
|
||||||
|
df, err := fileutil.OpenDir(dir)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
f, err := os.OpenFile(filepath.Join(dir, "index"), os.O_CREATE|os.O_WRONLY, 0666)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if err := fileutil.Fsync(df); err != nil {
|
||||||
|
return nil, errors.Wrap(err, "sync dir")
|
||||||
|
}
|
||||||
|
|
||||||
|
iw := &indexWriter{
|
||||||
|
f: f,
|
||||||
|
bufw: bufio.NewWriterSize(f, 1<<22),
|
||||||
|
n: 0,
|
||||||
|
|
||||||
|
// Reusable memory.
|
||||||
|
b: make([]byte, 0, 1<<23),
|
||||||
|
uint32s: make([]uint32, 0, 1<<15),
|
||||||
|
|
||||||
|
// Caches.
|
||||||
|
symbols: make(map[string]uint32, 1<<13),
|
||||||
|
series: make(map[uint32]*indexWriterSeries, 1<<16),
|
||||||
|
crc32: crc32.New(crc32.MakeTable(crc32.Castagnoli)),
|
||||||
|
}
|
||||||
|
if err := iw.writeMeta(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return iw, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *indexWriter) write(wr io.Writer, b []byte) error {
|
||||||
|
n, err := wr.Write(b)
|
||||||
|
w.n += int64(n)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// section writes a CRC32 checksummed section of length l and guarded by flag.
|
||||||
|
func (w *indexWriter) section(l int, flag byte, f func(w io.Writer) error) error {
|
||||||
|
w.crc32.Reset()
|
||||||
|
wr := io.MultiWriter(w.crc32, w.bufw)
|
||||||
|
|
||||||
|
b := [5]byte{flag, 0, 0, 0, 0}
|
||||||
|
binary.BigEndian.PutUint32(b[1:], uint32(l))
|
||||||
|
|
||||||
|
if err := w.write(wr, b[:]); err != nil {
|
||||||
|
return errors.Wrap(err, "writing header")
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := f(wr); err != nil {
|
||||||
|
return errors.Wrap(err, "write contents")
|
||||||
|
}
|
||||||
|
if err := w.write(w.bufw, w.crc32.Sum(nil)); err != nil {
|
||||||
|
return errors.Wrap(err, "writing checksum")
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *indexWriter) writeMeta() error {
|
||||||
|
b := [8]byte{}
|
||||||
|
|
||||||
|
binary.BigEndian.PutUint32(b[:4], MagicIndex)
|
||||||
|
b[4] = flagStd
|
||||||
|
|
||||||
|
return w.write(w.bufw, b[:])
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *indexWriter) AddSeries(ref uint32, lset labels.Labels, chunks ...ChunkMeta) error {
|
||||||
|
if _, ok := w.series[ref]; ok {
|
||||||
|
return errors.Errorf("series with reference %d already added", ref)
|
||||||
|
}
|
||||||
|
// Populate the symbol table from all label sets we have to reference.
|
||||||
|
for _, l := range lset {
|
||||||
|
w.symbols[l.Name] = 0
|
||||||
|
w.symbols[l.Value] = 0
|
||||||
|
}
|
||||||
|
|
||||||
|
w.series[ref] = &indexWriterSeries{
|
||||||
|
labels: lset,
|
||||||
|
chunks: chunks,
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *indexWriter) writeSymbols() error {
|
||||||
|
// Generate sorted list of strings we will store as reference table.
|
||||||
|
symbols := make([]string, 0, len(w.symbols))
|
||||||
|
for s := range w.symbols {
|
||||||
|
symbols = append(symbols, s)
|
||||||
|
}
|
||||||
|
sort.Strings(symbols)
|
||||||
|
|
||||||
|
// The start of the section plus a 5 byte section header are our base.
|
||||||
|
// TODO(fabxc): switch to relative offsets and hold sections in a TOC.
|
||||||
|
base := uint32(w.n) + 5
|
||||||
|
|
||||||
|
buf := [binary.MaxVarintLen32]byte{}
|
||||||
|
w.b = append(w.b[:0], flagStd)
|
||||||
|
|
||||||
|
for _, s := range symbols {
|
||||||
|
w.symbols[s] = base + uint32(len(w.b))
|
||||||
|
|
||||||
|
n := binary.PutUvarint(buf[:], uint64(len(s)))
|
||||||
|
w.b = append(w.b, buf[:n]...)
|
||||||
|
w.b = append(w.b, s...)
|
||||||
|
}
|
||||||
|
|
||||||
|
return w.section(len(w.b), flagStd, func(wr io.Writer) error {
|
||||||
|
return w.write(wr, w.b)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
type indexWriterSeriesSlice []*indexWriterSeries
|
||||||
|
|
||||||
|
func (s indexWriterSeriesSlice) Len() int { return len(s) }
|
||||||
|
func (s indexWriterSeriesSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
|
||||||
|
|
||||||
|
func (s indexWriterSeriesSlice) Less(i, j int) bool {
|
||||||
|
return labels.Compare(s[i].labels, s[j].labels) < 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *indexWriter) writeSeries() error {
|
||||||
|
// Series must be stored sorted along their labels.
|
||||||
|
series := make(indexWriterSeriesSlice, 0, len(w.series))
|
||||||
|
|
||||||
|
for _, s := range w.series {
|
||||||
|
series = append(series, s)
|
||||||
|
}
|
||||||
|
sort.Sort(series)
|
||||||
|
|
||||||
|
// Current end of file plus 5 bytes for section header.
|
||||||
|
// TODO(fabxc): switch to relative offsets.
|
||||||
|
base := uint32(w.n) + 5
|
||||||
|
|
||||||
|
w.b = w.b[:0]
|
||||||
|
buf := make([]byte, binary.MaxVarintLen64)
|
||||||
|
|
||||||
|
for _, s := range series {
|
||||||
|
// Write label set symbol references.
|
||||||
|
s.offset = base + uint32(len(w.b))
|
||||||
|
|
||||||
|
n := binary.PutUvarint(buf, uint64(len(s.labels)))
|
||||||
|
w.b = append(w.b, buf[:n]...)
|
||||||
|
|
||||||
|
for _, l := range s.labels {
|
||||||
|
n = binary.PutUvarint(buf, uint64(w.symbols[l.Name]))
|
||||||
|
w.b = append(w.b, buf[:n]...)
|
||||||
|
n = binary.PutUvarint(buf, uint64(w.symbols[l.Value]))
|
||||||
|
w.b = append(w.b, buf[:n]...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Write chunks meta data including reference into chunk file.
|
||||||
|
n = binary.PutUvarint(buf, uint64(len(s.chunks)))
|
||||||
|
w.b = append(w.b, buf[:n]...)
|
||||||
|
|
||||||
|
for _, c := range s.chunks {
|
||||||
|
n = binary.PutVarint(buf, c.MinTime)
|
||||||
|
w.b = append(w.b, buf[:n]...)
|
||||||
|
n = binary.PutVarint(buf, c.MaxTime)
|
||||||
|
w.b = append(w.b, buf[:n]...)
|
||||||
|
|
||||||
|
n = binary.PutUvarint(buf, uint64(c.Ref))
|
||||||
|
w.b = append(w.b, buf[:n]...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return w.section(len(w.b), flagStd, func(wr io.Writer) error {
|
||||||
|
return w.write(wr, w.b)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *indexWriter) init() error {
|
||||||
|
if err := w.writeSymbols(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := w.writeSeries(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
w.started = true
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *indexWriter) WriteLabelIndex(names []string, values []string) error {
|
||||||
|
if !w.started {
|
||||||
|
if err := w.init(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
valt, err := newStringTuples(values, len(names))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
sort.Sort(valt)
|
||||||
|
|
||||||
|
w.labelIndexes = append(w.labelIndexes, hashEntry{
|
||||||
|
name: strings.Join(names, string(sep)),
|
||||||
|
offset: uint32(w.n),
|
||||||
|
})
|
||||||
|
|
||||||
|
buf := make([]byte, binary.MaxVarintLen32)
|
||||||
|
n := binary.PutUvarint(buf, uint64(len(names)))
|
||||||
|
|
||||||
|
l := n + len(values)*4
|
||||||
|
|
||||||
|
return w.section(l, flagStd, func(wr io.Writer) error {
|
||||||
|
// First byte indicates tuple size for index.
|
||||||
|
if err := w.write(wr, buf[:n]); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, v := range valt.s {
|
||||||
|
binary.BigEndian.PutUint32(buf, w.symbols[v])
|
||||||
|
|
||||||
|
if err := w.write(wr, buf[:4]); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *indexWriter) WritePostings(name, value string, it Postings) error {
|
||||||
|
if !w.started {
|
||||||
|
if err := w.init(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
key := name + string(sep) + value
|
||||||
|
|
||||||
|
w.postings = append(w.postings, hashEntry{
|
||||||
|
name: key,
|
||||||
|
offset: uint32(w.n),
|
||||||
|
})
|
||||||
|
|
||||||
|
// Order of the references in the postings list does not imply order
|
||||||
|
// of the series references within the persisted block they are mapped to.
|
||||||
|
// We have to sort the new references again.
|
||||||
|
refs := w.uint32s[:0]
|
||||||
|
|
||||||
|
for it.Next() {
|
||||||
|
s, ok := w.series[it.At()]
|
||||||
|
if !ok {
|
||||||
|
return errors.Errorf("series for reference %d not found", it.At())
|
||||||
|
}
|
||||||
|
refs = append(refs, s.offset)
|
||||||
|
}
|
||||||
|
if err := it.Err(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
sort.Sort(uint32slice(refs))
|
||||||
|
|
||||||
|
w.b = w.b[:0]
|
||||||
|
buf := make([]byte, 4)
|
||||||
|
|
||||||
|
for _, r := range refs {
|
||||||
|
binary.BigEndian.PutUint32(buf, r)
|
||||||
|
w.b = append(w.b, buf...)
|
||||||
|
}
|
||||||
|
|
||||||
|
w.uint32s = refs[:0]
|
||||||
|
|
||||||
|
return w.section(len(w.b), flagStd, func(wr io.Writer) error {
|
||||||
|
return w.write(wr, w.b)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
type uint32slice []uint32
|
||||||
|
|
||||||
|
func (s uint32slice) Len() int { return len(s) }
|
||||||
|
func (s uint32slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
|
||||||
|
func (s uint32slice) Less(i, j int) bool { return s[i] < s[j] }
|
||||||
|
|
||||||
|
type hashEntry struct {
|
||||||
|
name string
|
||||||
|
offset uint32
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *indexWriter) writeHashmap(h []hashEntry) error {
|
||||||
|
w.b = w.b[:0]
|
||||||
|
buf := [binary.MaxVarintLen32]byte{}
|
||||||
|
|
||||||
|
for _, e := range h {
|
||||||
|
n := binary.PutUvarint(buf[:], uint64(len(e.name)))
|
||||||
|
w.b = append(w.b, buf[:n]...)
|
||||||
|
w.b = append(w.b, e.name...)
|
||||||
|
|
||||||
|
n = binary.PutUvarint(buf[:], uint64(e.offset))
|
||||||
|
w.b = append(w.b, buf[:n]...)
|
||||||
|
}
|
||||||
|
|
||||||
|
return w.section(len(w.b), flagStd, func(wr io.Writer) error {
|
||||||
|
return w.write(wr, w.b)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *indexWriter) finalize() error {
|
||||||
|
// Write out hash maps to jump to correct label index and postings sections.
|
||||||
|
lo := uint32(w.n)
|
||||||
|
if err := w.writeHashmap(w.labelIndexes); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
po := uint32(w.n)
|
||||||
|
if err := w.writeHashmap(w.postings); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Terminate index file with offsets to hashmaps. This is the entry Pointer
|
||||||
|
// for any index query.
|
||||||
|
// TODO(fabxc): also store offset to series section to allow plain
|
||||||
|
// iteration over all existing series?
|
||||||
|
b := [8]byte{}
|
||||||
|
binary.BigEndian.PutUint32(b[:4], lo)
|
||||||
|
binary.BigEndian.PutUint32(b[4:], po)
|
||||||
|
|
||||||
|
return w.write(w.bufw, b[:])
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *indexWriter) Close() error {
|
||||||
|
if err := w.finalize(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := w.bufw.Flush(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := fileutil.Fsync(w.f); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return w.f.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
// IndexReader provides reading access of serialized index data.
|
||||||
|
type IndexReader interface {
|
||||||
|
// LabelValues returns the possible label values
|
||||||
|
LabelValues(names ...string) (StringTuples, error)
|
||||||
|
|
||||||
|
// Postings returns the postings list iterator for the label pair.
|
||||||
|
Postings(name, value string) (Postings, error)
|
||||||
|
|
||||||
|
// Series returns the series for the given reference.
|
||||||
|
Series(ref uint32) (labels.Labels, []ChunkMeta, error)
|
||||||
|
|
||||||
|
// LabelIndices returns the label pairs for which indices exist.
|
||||||
|
LabelIndices() ([][]string, error)
|
||||||
|
|
||||||
|
// Close released the underlying resources of the reader.
|
||||||
|
Close() error
|
||||||
|
}
|
||||||
|
|
||||||
|
// StringTuples provides access to a sorted list of string tuples.
|
||||||
|
type StringTuples interface {
|
||||||
|
// Total number of tuples in the list.
|
||||||
|
Len() int
|
||||||
|
// At returns the tuple at position i.
|
||||||
|
At(i int) ([]string, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
type indexReader struct {
|
||||||
|
// The underlying byte slice holding the encoded series data.
|
||||||
|
b []byte
|
||||||
|
|
||||||
|
// Close that releases the underlying resources of the byte slice.
|
||||||
|
c io.Closer
|
||||||
|
|
||||||
|
// Cached hashmaps of section offsets.
|
||||||
|
labels map[string]uint32
|
||||||
|
postings map[string]uint32
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
errInvalidSize = fmt.Errorf("invalid size")
|
||||||
|
errInvalidFlag = fmt.Errorf("invalid flag")
|
||||||
|
)
|
||||||
|
|
||||||
|
// newIndexReader returns a new indexReader on the given directory.
|
||||||
|
func newIndexReader(dir string) (*indexReader, error) {
|
||||||
|
f, err := openMmapFile(filepath.Join(dir, "index"))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
r := &indexReader{b: f.b, c: f}
|
||||||
|
|
||||||
|
// Verify magic number.
|
||||||
|
if len(f.b) < 4 {
|
||||||
|
return nil, errors.Wrap(errInvalidSize, "index header")
|
||||||
|
}
|
||||||
|
if m := binary.BigEndian.Uint32(r.b[:4]); m != MagicIndex {
|
||||||
|
return nil, errors.Errorf("invalid magic number %x", m)
|
||||||
|
}
|
||||||
|
|
||||||
|
// The last two 4 bytes hold the pointers to the hashmaps.
|
||||||
|
loff := binary.BigEndian.Uint32(r.b[len(r.b)-8 : len(r.b)-4])
|
||||||
|
poff := binary.BigEndian.Uint32(r.b[len(r.b)-4:])
|
||||||
|
|
||||||
|
flag, b, err := r.section(loff)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrapf(err, "label index hashmap section at %d", loff)
|
||||||
|
}
|
||||||
|
if r.labels, err = readHashmap(flag, b); err != nil {
|
||||||
|
return nil, errors.Wrap(err, "read label index hashmap")
|
||||||
|
}
|
||||||
|
flag, b, err = r.section(poff)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrapf(err, "postings hashmap section at %d", loff)
|
||||||
|
}
|
||||||
|
if r.postings, err = readHashmap(flag, b); err != nil {
|
||||||
|
return nil, errors.Wrap(err, "read postings hashmap")
|
||||||
|
}
|
||||||
|
|
||||||
|
return r, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func readHashmap(flag byte, b []byte) (map[string]uint32, error) {
|
||||||
|
if flag != flagStd {
|
||||||
|
return nil, errInvalidFlag
|
||||||
|
}
|
||||||
|
h := make(map[string]uint32, 512)
|
||||||
|
|
||||||
|
for len(b) > 0 {
|
||||||
|
l, n := binary.Uvarint(b)
|
||||||
|
if n < 1 {
|
||||||
|
return nil, errors.Wrap(errInvalidSize, "read key length")
|
||||||
|
}
|
||||||
|
b = b[n:]
|
||||||
|
|
||||||
|
if len(b) < int(l) {
|
||||||
|
return nil, errors.Wrap(errInvalidSize, "read key")
|
||||||
|
}
|
||||||
|
s := string(b[:l])
|
||||||
|
b = b[l:]
|
||||||
|
|
||||||
|
o, n := binary.Uvarint(b)
|
||||||
|
if n < 1 {
|
||||||
|
return nil, errors.Wrap(errInvalidSize, "read offset value")
|
||||||
|
}
|
||||||
|
b = b[n:]
|
||||||
|
|
||||||
|
h[s] = uint32(o)
|
||||||
|
}
|
||||||
|
|
||||||
|
return h, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *indexReader) Close() error {
|
||||||
|
return r.c.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *indexReader) section(o uint32) (byte, []byte, error) {
|
||||||
|
b := r.b[o:]
|
||||||
|
|
||||||
|
if len(b) < 5 {
|
||||||
|
return 0, nil, errors.Wrap(errInvalidSize, "read header")
|
||||||
|
}
|
||||||
|
|
||||||
|
flag := b[0]
|
||||||
|
l := binary.BigEndian.Uint32(b[1:5])
|
||||||
|
|
||||||
|
b = b[5:]
|
||||||
|
|
||||||
|
// b must have the given length plus 4 bytes for the CRC32 checksum.
|
||||||
|
if len(b) < int(l)+4 {
|
||||||
|
return 0, nil, errors.Wrap(errInvalidSize, "section content")
|
||||||
|
}
|
||||||
|
return flag, b[:l], nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *indexReader) lookupSymbol(o uint32) (string, error) {
|
||||||
|
if int(o) > len(r.b) {
|
||||||
|
return "", errors.Errorf("invalid symbol offset %d", o)
|
||||||
|
}
|
||||||
|
l, n := binary.Uvarint(r.b[o:])
|
||||||
|
if n < 0 {
|
||||||
|
return "", errors.New("reading symbol length failed")
|
||||||
|
}
|
||||||
|
|
||||||
|
end := int(o) + n + int(l)
|
||||||
|
if end > len(r.b) {
|
||||||
|
return "", errors.New("invalid length")
|
||||||
|
}
|
||||||
|
b := r.b[int(o)+n : end]
|
||||||
|
|
||||||
|
return yoloString(b), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *indexReader) LabelValues(names ...string) (StringTuples, error) {
|
||||||
|
key := strings.Join(names, string(sep))
|
||||||
|
off, ok := r.labels[key]
|
||||||
|
if !ok {
|
||||||
|
return nil, fmt.Errorf("label index doesn't exist")
|
||||||
|
}
|
||||||
|
|
||||||
|
flag, b, err := r.section(off)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrapf(err, "section at %d", off)
|
||||||
|
}
|
||||||
|
if flag != flagStd {
|
||||||
|
return nil, errInvalidFlag
|
||||||
|
}
|
||||||
|
l, n := binary.Uvarint(b)
|
||||||
|
if n < 1 {
|
||||||
|
return nil, errors.Wrap(errInvalidSize, "read label index size")
|
||||||
|
}
|
||||||
|
|
||||||
|
st := &serializedStringTuples{
|
||||||
|
l: int(l),
|
||||||
|
b: b[n:],
|
||||||
|
lookup: r.lookupSymbol,
|
||||||
|
}
|
||||||
|
return st, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *indexReader) LabelIndices() ([][]string, error) {
|
||||||
|
res := [][]string{}
|
||||||
|
|
||||||
|
for s := range r.labels {
|
||||||
|
res = append(res, strings.Split(s, string(sep)))
|
||||||
|
}
|
||||||
|
return res, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *indexReader) Series(ref uint32) (labels.Labels, []ChunkMeta, error) {
|
||||||
|
k, n := binary.Uvarint(r.b[ref:])
|
||||||
|
if n < 1 {
|
||||||
|
return nil, nil, errors.Wrap(errInvalidSize, "number of labels")
|
||||||
|
}
|
||||||
|
|
||||||
|
b := r.b[int(ref)+n:]
|
||||||
|
lbls := make(labels.Labels, 0, k)
|
||||||
|
|
||||||
|
for i := 0; i < 2*int(k); i += 2 {
|
||||||
|
o, m := binary.Uvarint(b)
|
||||||
|
if m < 1 {
|
||||||
|
return nil, nil, errors.Wrap(errInvalidSize, "symbol offset")
|
||||||
|
}
|
||||||
|
n, err := r.lookupSymbol(uint32(o))
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, errors.Wrap(err, "symbol lookup")
|
||||||
|
}
|
||||||
|
b = b[m:]
|
||||||
|
|
||||||
|
o, m = binary.Uvarint(b)
|
||||||
|
if m < 1 {
|
||||||
|
return nil, nil, errors.Wrap(errInvalidSize, "symbol offset")
|
||||||
|
}
|
||||||
|
v, err := r.lookupSymbol(uint32(o))
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, errors.Wrap(err, "symbol lookup")
|
||||||
|
}
|
||||||
|
b = b[m:]
|
||||||
|
|
||||||
|
lbls = append(lbls, labels.Label{
|
||||||
|
Name: n,
|
||||||
|
Value: v,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// Read the chunks meta data.
|
||||||
|
k, n = binary.Uvarint(b)
|
||||||
|
if n < 1 {
|
||||||
|
return nil, nil, errors.Wrap(errInvalidSize, "number of chunks")
|
||||||
|
}
|
||||||
|
|
||||||
|
b = b[n:]
|
||||||
|
chunks := make([]ChunkMeta, 0, k)
|
||||||
|
|
||||||
|
for i := 0; i < int(k); i++ {
|
||||||
|
firstTime, n := binary.Varint(b)
|
||||||
|
if n < 1 {
|
||||||
|
return nil, nil, errors.Wrap(errInvalidSize, "first time")
|
||||||
|
}
|
||||||
|
b = b[n:]
|
||||||
|
|
||||||
|
lastTime, n := binary.Varint(b)
|
||||||
|
if n < 1 {
|
||||||
|
return nil, nil, errors.Wrap(errInvalidSize, "last time")
|
||||||
|
}
|
||||||
|
b = b[n:]
|
||||||
|
|
||||||
|
o, n := binary.Uvarint(b)
|
||||||
|
if n < 1 {
|
||||||
|
return nil, nil, errors.Wrap(errInvalidSize, "chunk offset")
|
||||||
|
}
|
||||||
|
b = b[n:]
|
||||||
|
|
||||||
|
chunks = append(chunks, ChunkMeta{
|
||||||
|
Ref: o,
|
||||||
|
MinTime: firstTime,
|
||||||
|
MaxTime: lastTime,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
return lbls, chunks, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *indexReader) Postings(name, value string) (Postings, error) {
|
||||||
|
key := name + string(sep) + value
|
||||||
|
|
||||||
|
off, ok := r.postings[key]
|
||||||
|
if !ok {
|
||||||
|
return nil, ErrNotFound
|
||||||
|
}
|
||||||
|
|
||||||
|
flag, b, err := r.section(off)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrapf(err, "section at %d", off)
|
||||||
|
}
|
||||||
|
|
||||||
|
if flag != flagStd {
|
||||||
|
return nil, errors.Wrapf(errInvalidFlag, "section at %d", off)
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO(fabxc): just read into memory as an intermediate solution.
|
||||||
|
// Add iterator over serialized data.
|
||||||
|
var l []uint32
|
||||||
|
|
||||||
|
for len(b) > 0 {
|
||||||
|
if len(b) < 4 {
|
||||||
|
return nil, errors.Wrap(errInvalidSize, "plain postings entry")
|
||||||
|
}
|
||||||
|
l = append(l, binary.BigEndian.Uint32(b[:4]))
|
||||||
|
|
||||||
|
b = b[4:]
|
||||||
|
}
|
||||||
|
|
||||||
|
return &listPostings{list: l, idx: -1}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type stringTuples struct {
|
||||||
|
l int // tuple length
|
||||||
|
s []string // flattened tuple entries
|
||||||
|
}
|
||||||
|
|
||||||
|
func newStringTuples(s []string, l int) (*stringTuples, error) {
|
||||||
|
if len(s)%l != 0 {
|
||||||
|
return nil, errors.Wrap(errInvalidSize, "string tuple list")
|
||||||
|
}
|
||||||
|
return &stringTuples{s: s, l: l}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *stringTuples) Len() int { return len(t.s) / t.l }
|
||||||
|
func (t *stringTuples) At(i int) ([]string, error) { return t.s[i : i+t.l], nil }
|
||||||
|
|
||||||
|
func (t *stringTuples) Swap(i, j int) {
|
||||||
|
c := make([]string, t.l)
|
||||||
|
copy(c, t.s[i:i+t.l])
|
||||||
|
|
||||||
|
for k := 0; k < t.l; k++ {
|
||||||
|
t.s[i+k] = t.s[j+k]
|
||||||
|
t.s[j+k] = c[k]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *stringTuples) Less(i, j int) bool {
|
||||||
|
for k := 0; k < t.l; k++ {
|
||||||
|
d := strings.Compare(t.s[i+k], t.s[j+k])
|
||||||
|
|
||||||
|
if d < 0 {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
if d > 0 {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
type serializedStringTuples struct {
|
||||||
|
l int
|
||||||
|
b []byte
|
||||||
|
lookup func(uint32) (string, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *serializedStringTuples) Len() int {
|
||||||
|
// TODO(fabxc): Cache this?
|
||||||
|
return len(t.b) / (4 * t.l)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *serializedStringTuples) At(i int) ([]string, error) {
|
||||||
|
if len(t.b) < (i+t.l)*4 {
|
||||||
|
return nil, errInvalidSize
|
||||||
|
}
|
||||||
|
res := make([]string, 0, t.l)
|
||||||
|
|
||||||
|
for k := 0; k < t.l; k++ {
|
||||||
|
offset := binary.BigEndian.Uint32(t.b[(i+k)*4:])
|
||||||
|
|
||||||
|
s, err := t.lookup(offset)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrap(err, "symbol lookup")
|
||||||
|
}
|
||||||
|
res = append(res, s)
|
||||||
|
}
|
||||||
|
|
||||||
|
return res, nil
|
||||||
|
}
|
243
vendor/github.com/fabxc/tsdb/querier.go
generated
vendored
243
vendor/github.com/fabxc/tsdb/querier.go
generated
vendored
|
@ -47,7 +47,9 @@ type querier struct {
|
||||||
func (s *DB) Querier(mint, maxt int64) Querier {
|
func (s *DB) Querier(mint, maxt int64) Querier {
|
||||||
s.mtx.RLock()
|
s.mtx.RLock()
|
||||||
|
|
||||||
|
s.headmtx.RLock()
|
||||||
blocks := s.blocksForInterval(mint, maxt)
|
blocks := s.blocksForInterval(mint, maxt)
|
||||||
|
s.headmtx.RUnlock()
|
||||||
|
|
||||||
sq := &querier{
|
sq := &querier{
|
||||||
blocks: make([]Querier, 0, len(blocks)),
|
blocks: make([]Querier, 0, len(blocks)),
|
||||||
|
@ -74,6 +76,9 @@ func (s *DB) Querier(mint, maxt int64) Querier {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (q *querier) LabelValues(n string) ([]string, error) {
|
func (q *querier) LabelValues(n string) ([]string, error) {
|
||||||
|
if len(q.blocks) == 0 {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
res, err := q.blocks[0].LabelValues(n)
|
res, err := q.blocks[0].LabelValues(n)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
@ -161,12 +166,16 @@ func (q *blockQuerier) Select(ms ...labels.Matcher) SeriesSet {
|
||||||
}
|
}
|
||||||
|
|
||||||
return &blockSeriesSet{
|
return &blockSeriesSet{
|
||||||
index: q.index,
|
set: &populatedChunkSeries{
|
||||||
chunks: q.chunks,
|
set: &baseChunkSeries{
|
||||||
it: p,
|
p: p,
|
||||||
absent: absent,
|
index: q.index,
|
||||||
mint: q.mint,
|
absent: absent,
|
||||||
maxt: q.maxt,
|
},
|
||||||
|
chunks: q.chunks,
|
||||||
|
mint: q.mint,
|
||||||
|
maxt: q.maxt,
|
||||||
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -231,69 +240,6 @@ func (q *blockQuerier) Close() error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// partitionedQuerier merges query results from a set of partition querieres.
|
|
||||||
type partitionedQuerier struct {
|
|
||||||
mint, maxt int64
|
|
||||||
partitions []Querier
|
|
||||||
}
|
|
||||||
|
|
||||||
// Querier returns a new querier over the database for the given
|
|
||||||
// time range.
|
|
||||||
func (db *PartitionedDB) Querier(mint, maxt int64) Querier {
|
|
||||||
q := &partitionedQuerier{
|
|
||||||
mint: mint,
|
|
||||||
maxt: maxt,
|
|
||||||
}
|
|
||||||
for _, s := range db.Partitions {
|
|
||||||
q.partitions = append(q.partitions, s.Querier(mint, maxt))
|
|
||||||
}
|
|
||||||
|
|
||||||
return q
|
|
||||||
}
|
|
||||||
|
|
||||||
func (q *partitionedQuerier) Select(ms ...labels.Matcher) SeriesSet {
|
|
||||||
// We gather the non-overlapping series from every partition and simply
|
|
||||||
// return their union.
|
|
||||||
r := &mergedSeriesSet{}
|
|
||||||
|
|
||||||
for _, s := range q.partitions {
|
|
||||||
r.sets = append(r.sets, s.Select(ms...))
|
|
||||||
}
|
|
||||||
if len(r.sets) == 0 {
|
|
||||||
return nopSeriesSet{}
|
|
||||||
}
|
|
||||||
return r
|
|
||||||
}
|
|
||||||
|
|
||||||
func (q *partitionedQuerier) LabelValues(n string) ([]string, error) {
|
|
||||||
res, err := q.partitions[0].LabelValues(n)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
for _, sq := range q.partitions[1:] {
|
|
||||||
pr, err := sq.LabelValues(n)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
// Merge new values into deduplicated result.
|
|
||||||
res = mergeStrings(res, pr)
|
|
||||||
}
|
|
||||||
return res, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (q *partitionedQuerier) LabelValuesFor(string, labels.Label) ([]string, error) {
|
|
||||||
return nil, fmt.Errorf("not implemented")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (q *partitionedQuerier) Close() error {
|
|
||||||
var merr MultiError
|
|
||||||
|
|
||||||
for _, sq := range q.partitions {
|
|
||||||
merr.Add(sq.Close())
|
|
||||||
}
|
|
||||||
return merr.Err()
|
|
||||||
}
|
|
||||||
|
|
||||||
func mergeStrings(a, b []string) []string {
|
func mergeStrings(a, b []string) []string {
|
||||||
maxl := len(a)
|
maxl := len(a)
|
||||||
if len(b) > len(a) {
|
if len(b) > len(a) {
|
||||||
|
@ -422,23 +368,31 @@ func (s *partitionSeriesSet) Next() bool {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
// blockSeriesSet is a set of series from an inverted index query.
|
type chunkSeriesSet interface {
|
||||||
type blockSeriesSet struct {
|
Next() bool
|
||||||
index IndexReader
|
At() (labels.Labels, []ChunkMeta)
|
||||||
chunks ChunkReader
|
Err() error
|
||||||
it Postings // postings list referencing series
|
|
||||||
absent []string // labels that must not be set for result series
|
|
||||||
mint, maxt int64 // considered time range
|
|
||||||
|
|
||||||
err error
|
|
||||||
cur Series
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *blockSeriesSet) Next() bool {
|
// baseChunkSeries loads the label set and chunk references for a postings
|
||||||
// Step through the postings iterator to find potential series.
|
// list from an index. It filters out series that have labels set that should be unset.
|
||||||
outer:
|
type baseChunkSeries struct {
|
||||||
for s.it.Next() {
|
p Postings
|
||||||
lset, chunks, err := s.index.Series(s.it.At())
|
index IndexReader
|
||||||
|
absent []string // labels that must be unset in results.
|
||||||
|
|
||||||
|
lset labels.Labels
|
||||||
|
chks []ChunkMeta
|
||||||
|
err error
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *baseChunkSeries) At() (labels.Labels, []ChunkMeta) { return s.lset, s.chks }
|
||||||
|
func (s *baseChunkSeries) Err() error { return s.err }
|
||||||
|
|
||||||
|
func (s *baseChunkSeries) Next() bool {
|
||||||
|
Outer:
|
||||||
|
for s.p.Next() {
|
||||||
|
lset, chunks, err := s.index.Series(s.p.At())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
s.err = err
|
s.err = err
|
||||||
return false
|
return false
|
||||||
|
@ -447,35 +401,87 @@ outer:
|
||||||
// If a series contains a label that must be absent, it is skipped as well.
|
// If a series contains a label that must be absent, it is skipped as well.
|
||||||
for _, abs := range s.absent {
|
for _, abs := range s.absent {
|
||||||
if lset.Get(abs) != "" {
|
if lset.Get(abs) != "" {
|
||||||
continue outer
|
continue Outer
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
ser := &chunkSeries{
|
s.lset = lset
|
||||||
labels: lset,
|
s.chks = chunks
|
||||||
chunks: make([]ChunkMeta, 0, len(chunks)),
|
|
||||||
chunk: s.chunks.Chunk,
|
return true
|
||||||
}
|
}
|
||||||
// Only use chunks that fit the time range.
|
if err := s.p.Err(); err != nil {
|
||||||
for _, c := range chunks {
|
s.err = err
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// populatedChunkSeries loads chunk data from a store for a set of series
|
||||||
|
// with known chunk references. It filters out chunks that do not fit the
|
||||||
|
// given time range.
|
||||||
|
type populatedChunkSeries struct {
|
||||||
|
set chunkSeriesSet
|
||||||
|
chunks ChunkReader
|
||||||
|
mint, maxt int64
|
||||||
|
|
||||||
|
err error
|
||||||
|
chks []ChunkMeta
|
||||||
|
lset labels.Labels
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *populatedChunkSeries) At() (labels.Labels, []ChunkMeta) { return s.lset, s.chks }
|
||||||
|
func (s *populatedChunkSeries) Err() error { return s.err }
|
||||||
|
|
||||||
|
func (s *populatedChunkSeries) Next() bool {
|
||||||
|
for s.set.Next() {
|
||||||
|
lset, chks := s.set.At()
|
||||||
|
|
||||||
|
for i := range chks {
|
||||||
|
c := &chks[i]
|
||||||
|
|
||||||
if c.MaxTime < s.mint {
|
if c.MaxTime < s.mint {
|
||||||
|
chks = chks[1:]
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if c.MinTime > s.maxt {
|
if c.MinTime > s.maxt {
|
||||||
|
chks = chks[:i]
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
ser.chunks = append(ser.chunks, c)
|
c.Chunk, s.err = s.chunks.Chunk(c.Ref)
|
||||||
|
if s.err != nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
}
|
}
|
||||||
// If no chunks of the series apply to the time range, skip it.
|
if len(chks) == 0 {
|
||||||
if len(ser.chunks) == 0 {
|
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
s.cur = ser
|
s.lset = lset
|
||||||
|
s.chks = chks
|
||||||
|
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
if s.it.Err() != nil {
|
if err := s.set.Err(); err != nil {
|
||||||
s.err = s.it.Err()
|
s.err = err
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// blockSeriesSet is a set of series from an inverted index query.
|
||||||
|
type blockSeriesSet struct {
|
||||||
|
set chunkSeriesSet
|
||||||
|
err error
|
||||||
|
cur Series
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *blockSeriesSet) Next() bool {
|
||||||
|
for s.set.Next() {
|
||||||
|
lset, chunks := s.set.At()
|
||||||
|
s.cur = &chunkSeries{labels: lset, chunks: chunks}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
if s.set.Err() != nil {
|
||||||
|
s.err = s.set.Err()
|
||||||
}
|
}
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
@ -488,10 +494,6 @@ func (s *blockSeriesSet) Err() error { return s.err }
|
||||||
type chunkSeries struct {
|
type chunkSeries struct {
|
||||||
labels labels.Labels
|
labels labels.Labels
|
||||||
chunks []ChunkMeta // in-order chunk refs
|
chunks []ChunkMeta // in-order chunk refs
|
||||||
|
|
||||||
// chunk is a function that retrieves chunks based on a reference
|
|
||||||
// number contained in the chunk meta information.
|
|
||||||
chunk func(ref uint64) (chunks.Chunk, error)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *chunkSeries) Labels() labels.Labels {
|
func (s *chunkSeries) Labels() labels.Labels {
|
||||||
|
@ -499,21 +501,7 @@ func (s *chunkSeries) Labels() labels.Labels {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *chunkSeries) Iterator() SeriesIterator {
|
func (s *chunkSeries) Iterator() SeriesIterator {
|
||||||
var cs []chunks.Chunk
|
return newChunkSeriesIterator(s.chunks)
|
||||||
var mints []int64
|
|
||||||
|
|
||||||
for _, co := range s.chunks {
|
|
||||||
c, err := s.chunk(co.Ref)
|
|
||||||
if err != nil {
|
|
||||||
panic(err) // TODO(fabxc): add error series iterator.
|
|
||||||
}
|
|
||||||
cs = append(cs, c)
|
|
||||||
mints = append(mints, co.MinTime)
|
|
||||||
}
|
|
||||||
|
|
||||||
// TODO(fabxc): consider pushing chunk retrieval further down. In practice, we
|
|
||||||
// probably have to touch all chunks anyway and it doesn't matter.
|
|
||||||
return newChunkSeriesIterator(mints, cs)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// SeriesIterator iterates over the data of a time series.
|
// SeriesIterator iterates over the data of a time series.
|
||||||
|
@ -599,43 +587,38 @@ func (it *chainedSeriesIterator) Err() error {
|
||||||
// chunkSeriesIterator implements a series iterator on top
|
// chunkSeriesIterator implements a series iterator on top
|
||||||
// of a list of time-sorted, non-overlapping chunks.
|
// of a list of time-sorted, non-overlapping chunks.
|
||||||
type chunkSeriesIterator struct {
|
type chunkSeriesIterator struct {
|
||||||
mints []int64 // minimum timestamps for each iterator
|
chunks []ChunkMeta
|
||||||
chunks []chunks.Chunk
|
|
||||||
|
|
||||||
i int
|
i int
|
||||||
cur chunks.Iterator
|
cur chunks.Iterator
|
||||||
}
|
}
|
||||||
|
|
||||||
func newChunkSeriesIterator(mints []int64, cs []chunks.Chunk) *chunkSeriesIterator {
|
func newChunkSeriesIterator(cs []ChunkMeta) *chunkSeriesIterator {
|
||||||
if len(mints) != len(cs) {
|
|
||||||
panic("chunk references and chunks length don't match")
|
|
||||||
}
|
|
||||||
return &chunkSeriesIterator{
|
return &chunkSeriesIterator{
|
||||||
mints: mints,
|
|
||||||
chunks: cs,
|
chunks: cs,
|
||||||
i: 0,
|
i: 0,
|
||||||
cur: cs[0].Iterator(),
|
cur: cs[0].Chunk.Iterator(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (it *chunkSeriesIterator) Seek(t int64) (ok bool) {
|
func (it *chunkSeriesIterator) Seek(t int64) (ok bool) {
|
||||||
// Only do binary search forward to stay in line with other iterators
|
// Only do binary search forward to stay in line with other iterators
|
||||||
// that can only move forward.
|
// that can only move forward.
|
||||||
x := sort.Search(len(it.mints[it.i:]), func(i int) bool { return it.mints[i] >= t })
|
x := sort.Search(len(it.chunks[it.i:]), func(i int) bool { return it.chunks[i].MinTime >= t })
|
||||||
x += it.i
|
x += it.i
|
||||||
|
|
||||||
// If the timestamp was not found, it might be in the last chunk.
|
// If the timestamp was not found, it might be in the last chunk.
|
||||||
if x == len(it.mints) {
|
if x == len(it.chunks) {
|
||||||
x--
|
x--
|
||||||
}
|
}
|
||||||
// Go to previous chunk if the chunk doesn't exactly start with t.
|
// Go to previous chunk if the chunk doesn't exactly start with t.
|
||||||
// If we are already at the first chunk, we use it as it's the best we have.
|
// If we are already at the first chunk, we use it as it's the best we have.
|
||||||
if x > 0 && it.mints[x] > t {
|
if x > 0 && it.chunks[x].MinTime > t {
|
||||||
x--
|
x--
|
||||||
}
|
}
|
||||||
|
|
||||||
it.i = x
|
it.i = x
|
||||||
it.cur = it.chunks[x].Iterator()
|
it.cur = it.chunks[x].Chunk.Iterator()
|
||||||
|
|
||||||
for it.cur.Next() {
|
for it.cur.Next() {
|
||||||
t0, _ := it.cur.At()
|
t0, _ := it.cur.At()
|
||||||
|
@ -662,7 +645,7 @@ func (it *chunkSeriesIterator) Next() bool {
|
||||||
}
|
}
|
||||||
|
|
||||||
it.i++
|
it.i++
|
||||||
it.cur = it.chunks[it.i].Iterator()
|
it.cur = it.chunks[it.i].Chunk.Iterator()
|
||||||
|
|
||||||
return it.Next()
|
return it.Next()
|
||||||
}
|
}
|
||||||
|
|
459
vendor/github.com/fabxc/tsdb/reader.go
generated
vendored
459
vendor/github.com/fabxc/tsdb/reader.go
generated
vendored
|
@ -1,459 +0,0 @@
|
||||||
package tsdb
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/binary"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"path/filepath"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
"github.com/fabxc/tsdb/chunks"
|
|
||||||
"github.com/fabxc/tsdb/labels"
|
|
||||||
"github.com/pkg/errors"
|
|
||||||
)
|
|
||||||
|
|
||||||
// ChunkReader provides reading access of serialized time series data.
|
|
||||||
type ChunkReader interface {
|
|
||||||
// Chunk returns the series data chunk with the given reference.
|
|
||||||
Chunk(ref uint64) (chunks.Chunk, error)
|
|
||||||
|
|
||||||
// Close releases all underlying resources of the reader.
|
|
||||||
Close() error
|
|
||||||
}
|
|
||||||
|
|
||||||
// chunkReader implements a SeriesReader for a serialized byte stream
|
|
||||||
// of series data.
|
|
||||||
type chunkReader struct {
|
|
||||||
// The underlying bytes holding the encoded series data.
|
|
||||||
bs [][]byte
|
|
||||||
|
|
||||||
// Closers for resources behind the byte slices.
|
|
||||||
cs []io.Closer
|
|
||||||
}
|
|
||||||
|
|
||||||
// newChunkReader returns a new chunkReader based on mmaped files found in dir.
|
|
||||||
func newChunkReader(dir string) (*chunkReader, error) {
|
|
||||||
files, err := sequenceFiles(dir, "")
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
var cr chunkReader
|
|
||||||
|
|
||||||
for _, fn := range files {
|
|
||||||
f, err := openMmapFile(fn)
|
|
||||||
if err != nil {
|
|
||||||
return nil, errors.Wrapf(err, "mmap files")
|
|
||||||
}
|
|
||||||
cr.cs = append(cr.cs, f)
|
|
||||||
cr.bs = append(cr.bs, f.b)
|
|
||||||
}
|
|
||||||
|
|
||||||
for i, b := range cr.bs {
|
|
||||||
if len(b) < 4 {
|
|
||||||
return nil, errors.Wrapf(errInvalidSize, "validate magic in segment %d", i)
|
|
||||||
}
|
|
||||||
// Verify magic number.
|
|
||||||
if m := binary.BigEndian.Uint32(b[:4]); m != MagicSeries {
|
|
||||||
return nil, fmt.Errorf("invalid magic number %x", m)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return &cr, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *chunkReader) Close() error {
|
|
||||||
return closeAll(s.cs...)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *chunkReader) Chunk(ref uint64) (chunks.Chunk, error) {
|
|
||||||
var (
|
|
||||||
seq = int(ref >> 32)
|
|
||||||
off = int((ref << 32) >> 32)
|
|
||||||
)
|
|
||||||
if seq >= len(s.bs) {
|
|
||||||
return nil, errors.Errorf("reference sequence %d out of range", seq)
|
|
||||||
}
|
|
||||||
b := s.bs[seq]
|
|
||||||
|
|
||||||
if int(off) >= len(b) {
|
|
||||||
return nil, errors.Errorf("offset %d beyond data size %d", off, len(b))
|
|
||||||
}
|
|
||||||
b = b[off:]
|
|
||||||
|
|
||||||
l, n := binary.Uvarint(b)
|
|
||||||
if n < 0 {
|
|
||||||
return nil, fmt.Errorf("reading chunk length failed")
|
|
||||||
}
|
|
||||||
b = b[n:]
|
|
||||||
enc := chunks.Encoding(b[0])
|
|
||||||
|
|
||||||
c, err := chunks.FromData(enc, b[1:1+l])
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return c, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// IndexReader provides reading access of serialized index data.
|
|
||||||
type IndexReader interface {
|
|
||||||
// LabelValues returns the possible label values
|
|
||||||
LabelValues(names ...string) (StringTuples, error)
|
|
||||||
|
|
||||||
// Postings returns the postings list iterator for the label pair.
|
|
||||||
Postings(name, value string) (Postings, error)
|
|
||||||
|
|
||||||
// Series returns the series for the given reference.
|
|
||||||
Series(ref uint32) (labels.Labels, []ChunkMeta, error)
|
|
||||||
|
|
||||||
// LabelIndices returns the label pairs for which indices exist.
|
|
||||||
LabelIndices() ([][]string, error)
|
|
||||||
|
|
||||||
// Close released the underlying resources of the reader.
|
|
||||||
Close() error
|
|
||||||
}
|
|
||||||
|
|
||||||
// StringTuples provides access to a sorted list of string tuples.
|
|
||||||
type StringTuples interface {
|
|
||||||
// Total number of tuples in the list.
|
|
||||||
Len() int
|
|
||||||
// At returns the tuple at position i.
|
|
||||||
At(i int) ([]string, error)
|
|
||||||
}
|
|
||||||
|
|
||||||
type indexReader struct {
|
|
||||||
// The underlying byte slice holding the encoded series data.
|
|
||||||
b []byte
|
|
||||||
|
|
||||||
// Close that releases the underlying resources of the byte slice.
|
|
||||||
c io.Closer
|
|
||||||
|
|
||||||
// Cached hashmaps of section offsets.
|
|
||||||
labels map[string]uint32
|
|
||||||
postings map[string]uint32
|
|
||||||
}
|
|
||||||
|
|
||||||
var (
|
|
||||||
errInvalidSize = fmt.Errorf("invalid size")
|
|
||||||
errInvalidFlag = fmt.Errorf("invalid flag")
|
|
||||||
)
|
|
||||||
|
|
||||||
// newIndexReader returns a new indexReader on the given directory.
|
|
||||||
func newIndexReader(dir string) (*indexReader, error) {
|
|
||||||
f, err := openMmapFile(filepath.Join(dir, "index"))
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
r := &indexReader{b: f.b, c: f}
|
|
||||||
|
|
||||||
// Verify magic number.
|
|
||||||
if len(f.b) < 4 {
|
|
||||||
return nil, errors.Wrap(errInvalidSize, "index header")
|
|
||||||
}
|
|
||||||
if m := binary.BigEndian.Uint32(r.b[:4]); m != MagicIndex {
|
|
||||||
return nil, errors.Errorf("invalid magic number %x", m)
|
|
||||||
}
|
|
||||||
|
|
||||||
// The last two 4 bytes hold the pointers to the hashmaps.
|
|
||||||
loff := binary.BigEndian.Uint32(r.b[len(r.b)-8 : len(r.b)-4])
|
|
||||||
poff := binary.BigEndian.Uint32(r.b[len(r.b)-4:])
|
|
||||||
|
|
||||||
flag, b, err := r.section(loff)
|
|
||||||
if err != nil {
|
|
||||||
return nil, errors.Wrapf(err, "label index hashmap section at %d", loff)
|
|
||||||
}
|
|
||||||
if r.labels, err = readHashmap(flag, b); err != nil {
|
|
||||||
return nil, errors.Wrap(err, "read label index hashmap")
|
|
||||||
}
|
|
||||||
flag, b, err = r.section(poff)
|
|
||||||
if err != nil {
|
|
||||||
return nil, errors.Wrapf(err, "postings hashmap section at %d", loff)
|
|
||||||
}
|
|
||||||
if r.postings, err = readHashmap(flag, b); err != nil {
|
|
||||||
return nil, errors.Wrap(err, "read postings hashmap")
|
|
||||||
}
|
|
||||||
|
|
||||||
return r, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func readHashmap(flag byte, b []byte) (map[string]uint32, error) {
|
|
||||||
if flag != flagStd {
|
|
||||||
return nil, errInvalidFlag
|
|
||||||
}
|
|
||||||
h := make(map[string]uint32, 512)
|
|
||||||
|
|
||||||
for len(b) > 0 {
|
|
||||||
l, n := binary.Uvarint(b)
|
|
||||||
if n < 1 {
|
|
||||||
return nil, errors.Wrap(errInvalidSize, "read key length")
|
|
||||||
}
|
|
||||||
b = b[n:]
|
|
||||||
|
|
||||||
if len(b) < int(l) {
|
|
||||||
return nil, errors.Wrap(errInvalidSize, "read key")
|
|
||||||
}
|
|
||||||
s := string(b[:l])
|
|
||||||
b = b[l:]
|
|
||||||
|
|
||||||
o, n := binary.Uvarint(b)
|
|
||||||
if n < 1 {
|
|
||||||
return nil, errors.Wrap(errInvalidSize, "read offset value")
|
|
||||||
}
|
|
||||||
b = b[n:]
|
|
||||||
|
|
||||||
h[s] = uint32(o)
|
|
||||||
}
|
|
||||||
|
|
||||||
return h, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *indexReader) Close() error {
|
|
||||||
return r.c.Close()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *indexReader) section(o uint32) (byte, []byte, error) {
|
|
||||||
b := r.b[o:]
|
|
||||||
|
|
||||||
if len(b) < 5 {
|
|
||||||
return 0, nil, errors.Wrap(errInvalidSize, "read header")
|
|
||||||
}
|
|
||||||
|
|
||||||
flag := b[0]
|
|
||||||
l := binary.BigEndian.Uint32(b[1:5])
|
|
||||||
|
|
||||||
b = b[5:]
|
|
||||||
|
|
||||||
// b must have the given length plus 4 bytes for the CRC32 checksum.
|
|
||||||
if len(b) < int(l)+4 {
|
|
||||||
return 0, nil, errors.Wrap(errInvalidSize, "section content")
|
|
||||||
}
|
|
||||||
return flag, b[:l], nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *indexReader) lookupSymbol(o uint32) (string, error) {
|
|
||||||
if int(o) > len(r.b) {
|
|
||||||
return "", errors.Errorf("invalid symbol offset %d", o)
|
|
||||||
}
|
|
||||||
l, n := binary.Uvarint(r.b[o:])
|
|
||||||
if n < 0 {
|
|
||||||
return "", errors.New("reading symbol length failed")
|
|
||||||
}
|
|
||||||
|
|
||||||
end := int(o) + n + int(l)
|
|
||||||
if end > len(r.b) {
|
|
||||||
return "", errors.New("invalid length")
|
|
||||||
}
|
|
||||||
b := r.b[int(o)+n : end]
|
|
||||||
|
|
||||||
return yoloString(b), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *indexReader) LabelValues(names ...string) (StringTuples, error) {
|
|
||||||
key := strings.Join(names, string(sep))
|
|
||||||
off, ok := r.labels[key]
|
|
||||||
if !ok {
|
|
||||||
return nil, fmt.Errorf("label index doesn't exist")
|
|
||||||
}
|
|
||||||
|
|
||||||
flag, b, err := r.section(off)
|
|
||||||
if err != nil {
|
|
||||||
return nil, errors.Wrapf(err, "section at %d", off)
|
|
||||||
}
|
|
||||||
if flag != flagStd {
|
|
||||||
return nil, errInvalidFlag
|
|
||||||
}
|
|
||||||
l, n := binary.Uvarint(b)
|
|
||||||
if n < 1 {
|
|
||||||
return nil, errors.Wrap(errInvalidSize, "read label index size")
|
|
||||||
}
|
|
||||||
|
|
||||||
st := &serializedStringTuples{
|
|
||||||
l: int(l),
|
|
||||||
b: b[n:],
|
|
||||||
lookup: r.lookupSymbol,
|
|
||||||
}
|
|
||||||
return st, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *indexReader) LabelIndices() ([][]string, error) {
|
|
||||||
res := [][]string{}
|
|
||||||
|
|
||||||
for s := range r.labels {
|
|
||||||
res = append(res, strings.Split(s, string(sep)))
|
|
||||||
}
|
|
||||||
return res, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *indexReader) Series(ref uint32) (labels.Labels, []ChunkMeta, error) {
|
|
||||||
k, n := binary.Uvarint(r.b[ref:])
|
|
||||||
if n < 1 {
|
|
||||||
return nil, nil, errors.Wrap(errInvalidSize, "number of labels")
|
|
||||||
}
|
|
||||||
|
|
||||||
b := r.b[int(ref)+n:]
|
|
||||||
lbls := make(labels.Labels, 0, k)
|
|
||||||
|
|
||||||
for i := 0; i < 2*int(k); i += 2 {
|
|
||||||
o, m := binary.Uvarint(b)
|
|
||||||
if m < 1 {
|
|
||||||
return nil, nil, errors.Wrap(errInvalidSize, "symbol offset")
|
|
||||||
}
|
|
||||||
n, err := r.lookupSymbol(uint32(o))
|
|
||||||
if err != nil {
|
|
||||||
return nil, nil, errors.Wrap(err, "symbol lookup")
|
|
||||||
}
|
|
||||||
b = b[m:]
|
|
||||||
|
|
||||||
o, m = binary.Uvarint(b)
|
|
||||||
if m < 1 {
|
|
||||||
return nil, nil, errors.Wrap(errInvalidSize, "symbol offset")
|
|
||||||
}
|
|
||||||
v, err := r.lookupSymbol(uint32(o))
|
|
||||||
if err != nil {
|
|
||||||
return nil, nil, errors.Wrap(err, "symbol lookup")
|
|
||||||
}
|
|
||||||
b = b[m:]
|
|
||||||
|
|
||||||
lbls = append(lbls, labels.Label{
|
|
||||||
Name: n,
|
|
||||||
Value: v,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// Read the chunks meta data.
|
|
||||||
k, n = binary.Uvarint(b)
|
|
||||||
if n < 1 {
|
|
||||||
return nil, nil, errors.Wrap(errInvalidSize, "number of chunks")
|
|
||||||
}
|
|
||||||
|
|
||||||
b = b[n:]
|
|
||||||
chunks := make([]ChunkMeta, 0, k)
|
|
||||||
|
|
||||||
for i := 0; i < int(k); i++ {
|
|
||||||
firstTime, n := binary.Varint(b)
|
|
||||||
if n < 1 {
|
|
||||||
return nil, nil, errors.Wrap(errInvalidSize, "first time")
|
|
||||||
}
|
|
||||||
b = b[n:]
|
|
||||||
|
|
||||||
lastTime, n := binary.Varint(b)
|
|
||||||
if n < 1 {
|
|
||||||
return nil, nil, errors.Wrap(errInvalidSize, "last time")
|
|
||||||
}
|
|
||||||
b = b[n:]
|
|
||||||
|
|
||||||
o, n := binary.Uvarint(b)
|
|
||||||
if n < 1 {
|
|
||||||
return nil, nil, errors.Wrap(errInvalidSize, "chunk offset")
|
|
||||||
}
|
|
||||||
b = b[n:]
|
|
||||||
|
|
||||||
chunks = append(chunks, ChunkMeta{
|
|
||||||
Ref: o,
|
|
||||||
MinTime: firstTime,
|
|
||||||
MaxTime: lastTime,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
return lbls, chunks, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *indexReader) Postings(name, value string) (Postings, error) {
|
|
||||||
key := name + string(sep) + value
|
|
||||||
|
|
||||||
off, ok := r.postings[key]
|
|
||||||
if !ok {
|
|
||||||
return nil, ErrNotFound
|
|
||||||
}
|
|
||||||
|
|
||||||
flag, b, err := r.section(off)
|
|
||||||
if err != nil {
|
|
||||||
return nil, errors.Wrapf(err, "section at %d", off)
|
|
||||||
}
|
|
||||||
|
|
||||||
if flag != flagStd {
|
|
||||||
return nil, errors.Wrapf(errInvalidFlag, "section at %d", off)
|
|
||||||
}
|
|
||||||
|
|
||||||
// TODO(fabxc): just read into memory as an intermediate solution.
|
|
||||||
// Add iterator over serialized data.
|
|
||||||
var l []uint32
|
|
||||||
|
|
||||||
for len(b) > 0 {
|
|
||||||
if len(b) < 4 {
|
|
||||||
return nil, errors.Wrap(errInvalidSize, "plain postings entry")
|
|
||||||
}
|
|
||||||
l = append(l, binary.BigEndian.Uint32(b[:4]))
|
|
||||||
|
|
||||||
b = b[4:]
|
|
||||||
}
|
|
||||||
|
|
||||||
return &listPostings{list: l, idx: -1}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
type stringTuples struct {
|
|
||||||
l int // tuple length
|
|
||||||
s []string // flattened tuple entries
|
|
||||||
}
|
|
||||||
|
|
||||||
func newStringTuples(s []string, l int) (*stringTuples, error) {
|
|
||||||
if len(s)%l != 0 {
|
|
||||||
return nil, errors.Wrap(errInvalidSize, "string tuple list")
|
|
||||||
}
|
|
||||||
return &stringTuples{s: s, l: l}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *stringTuples) Len() int { return len(t.s) / t.l }
|
|
||||||
func (t *stringTuples) At(i int) ([]string, error) { return t.s[i : i+t.l], nil }
|
|
||||||
|
|
||||||
func (t *stringTuples) Swap(i, j int) {
|
|
||||||
c := make([]string, t.l)
|
|
||||||
copy(c, t.s[i:i+t.l])
|
|
||||||
|
|
||||||
for k := 0; k < t.l; k++ {
|
|
||||||
t.s[i+k] = t.s[j+k]
|
|
||||||
t.s[j+k] = c[k]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *stringTuples) Less(i, j int) bool {
|
|
||||||
for k := 0; k < t.l; k++ {
|
|
||||||
d := strings.Compare(t.s[i+k], t.s[j+k])
|
|
||||||
|
|
||||||
if d < 0 {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
if d > 0 {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
type serializedStringTuples struct {
|
|
||||||
l int
|
|
||||||
b []byte
|
|
||||||
lookup func(uint32) (string, error)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *serializedStringTuples) Len() int {
|
|
||||||
// TODO(fabxc): Cache this?
|
|
||||||
return len(t.b) / (4 * t.l)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *serializedStringTuples) At(i int) ([]string, error) {
|
|
||||||
if len(t.b) < (i+t.l)*4 {
|
|
||||||
return nil, errInvalidSize
|
|
||||||
}
|
|
||||||
res := make([]string, 0, t.l)
|
|
||||||
|
|
||||||
for k := 0; k < t.l; k++ {
|
|
||||||
offset := binary.BigEndian.Uint32(t.b[(i+k)*4:])
|
|
||||||
|
|
||||||
s, err := t.lookup(offset)
|
|
||||||
if err != nil {
|
|
||||||
return nil, errors.Wrap(err, "symbol lookup")
|
|
||||||
}
|
|
||||||
res = append(res, s)
|
|
||||||
}
|
|
||||||
|
|
||||||
return res, nil
|
|
||||||
}
|
|
9
vendor/github.com/fabxc/tsdb/wal.go
generated
vendored
9
vendor/github.com/fabxc/tsdb/wal.go
generated
vendored
|
@ -265,8 +265,9 @@ func (w *WAL) Close() error {
|
||||||
close(w.stopc)
|
close(w.stopc)
|
||||||
<-w.donec
|
<-w.donec
|
||||||
|
|
||||||
|
// Lock mutex and leave it locked so we panic if there's a bug causing
|
||||||
|
// the block to be used afterwards.
|
||||||
w.mtx.Lock()
|
w.mtx.Lock()
|
||||||
defer w.mtx.Unlock()
|
|
||||||
|
|
||||||
if err := w.sync(); err != nil {
|
if err := w.sync(); err != nil {
|
||||||
return err
|
return err
|
||||||
|
@ -447,7 +448,11 @@ func (r *WALReader) nextEntry() (WALEntryType, byte, []byte, error) {
|
||||||
cr := r.rs[r.cur]
|
cr := r.rs[r.cur]
|
||||||
|
|
||||||
et, flag, b, err := r.entry(cr)
|
et, flag, b, err := r.entry(cr)
|
||||||
if err == io.EOF {
|
// If we reached the end of the reader, advance to the next one
|
||||||
|
// and close.
|
||||||
|
// Do not close on the last one as it will still be appended to.
|
||||||
|
// XXX(fabxc): leaky abstraction.
|
||||||
|
if err == io.EOF && r.cur < len(r.rs)-1 {
|
||||||
// Current reader completed, close and move to the next one.
|
// Current reader completed, close and move to the next one.
|
||||||
if err := cr.Close(); err != nil {
|
if err := cr.Close(); err != nil {
|
||||||
return 0, 0, nil, err
|
return 0, 0, nil, err
|
||||||
|
|
591
vendor/github.com/fabxc/tsdb/writer.go
generated
vendored
591
vendor/github.com/fabxc/tsdb/writer.go
generated
vendored
|
@ -1,591 +0,0 @@
|
||||||
package tsdb
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bufio"
|
|
||||||
"encoding/binary"
|
|
||||||
"hash"
|
|
||||||
"hash/crc32"
|
|
||||||
"io"
|
|
||||||
"os"
|
|
||||||
"path/filepath"
|
|
||||||
"sort"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
"github.com/bradfitz/slice"
|
|
||||||
"github.com/coreos/etcd/pkg/fileutil"
|
|
||||||
"github.com/fabxc/tsdb/chunks"
|
|
||||||
"github.com/fabxc/tsdb/labels"
|
|
||||||
"github.com/pkg/errors"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
// MagicSeries 4 bytes at the head of series file.
|
|
||||||
MagicSeries = 0x85BD40DD
|
|
||||||
|
|
||||||
// MagicIndex 4 bytes at the head of an index file.
|
|
||||||
MagicIndex = 0xBAAAD700
|
|
||||||
)
|
|
||||||
|
|
||||||
const compactionPageBytes = minSectorSize * 64
|
|
||||||
|
|
||||||
// ChunkWriter serializes a time block of chunked series data.
|
|
||||||
type ChunkWriter interface {
|
|
||||||
// WriteChunks writes several chunks. The data field of the ChunkMetas
|
|
||||||
// must be populated.
|
|
||||||
// After returning successfully, the Ref fields in the ChunkMetas
|
|
||||||
// is set and can be used to retrieve the chunks from the written data.
|
|
||||||
WriteChunks(chunks ...ChunkMeta) error
|
|
||||||
|
|
||||||
// Close writes any required finalization and closes the resources
|
|
||||||
// associated with the underlying writer.
|
|
||||||
Close() error
|
|
||||||
}
|
|
||||||
|
|
||||||
// chunkWriter implements the ChunkWriter interface for the standard
|
|
||||||
// serialization format.
|
|
||||||
type chunkWriter struct {
|
|
||||||
dirFile *os.File
|
|
||||||
files []*os.File
|
|
||||||
wbuf *bufio.Writer
|
|
||||||
n int64
|
|
||||||
crc32 hash.Hash
|
|
||||||
|
|
||||||
segmentSize int64
|
|
||||||
}
|
|
||||||
|
|
||||||
const (
|
|
||||||
defaultChunkSegmentSize = 512 * 1024 * 1024
|
|
||||||
|
|
||||||
chunksFormatV1 = 1
|
|
||||||
indexFormatV1 = 1
|
|
||||||
)
|
|
||||||
|
|
||||||
func newChunkWriter(dir string) (*chunkWriter, error) {
|
|
||||||
if err := os.MkdirAll(dir, 0777); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
dirFile, err := fileutil.OpenDir(dir)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
cw := &chunkWriter{
|
|
||||||
dirFile: dirFile,
|
|
||||||
n: 0,
|
|
||||||
crc32: crc32.New(crc32.MakeTable(crc32.Castagnoli)),
|
|
||||||
segmentSize: defaultChunkSegmentSize,
|
|
||||||
}
|
|
||||||
return cw, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *chunkWriter) tail() *os.File {
|
|
||||||
if len(w.files) == 0 {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return w.files[len(w.files)-1]
|
|
||||||
}
|
|
||||||
|
|
||||||
// finalizeTail writes all pending data to the current tail file,
|
|
||||||
// truncates its size, and closes it.
|
|
||||||
func (w *chunkWriter) finalizeTail() error {
|
|
||||||
tf := w.tail()
|
|
||||||
if tf == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := w.wbuf.Flush(); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err := fileutil.Fsync(tf); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
// As the file was pre-allocated, we truncate any superfluous zero bytes.
|
|
||||||
off, err := tf.Seek(0, os.SEEK_CUR)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err := tf.Truncate(off); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return tf.Close()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *chunkWriter) cut() error {
|
|
||||||
// Sync current tail to disk and close.
|
|
||||||
w.finalizeTail()
|
|
||||||
|
|
||||||
p, _, err := nextSequenceFile(w.dirFile.Name(), "")
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
f, err := os.OpenFile(p, os.O_WRONLY|os.O_CREATE, 0666)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err = fileutil.Preallocate(f, w.segmentSize, true); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err = w.dirFile.Sync(); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Write header metadata for new file.
|
|
||||||
|
|
||||||
metab := make([]byte, 8)
|
|
||||||
binary.BigEndian.PutUint32(metab[:4], MagicSeries)
|
|
||||||
metab[4] = chunksFormatV1
|
|
||||||
|
|
||||||
if _, err := f.Write(metab); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
w.files = append(w.files, f)
|
|
||||||
if w.wbuf != nil {
|
|
||||||
w.wbuf.Reset(f)
|
|
||||||
} else {
|
|
||||||
w.wbuf = bufio.NewWriterSize(f, 8*1024*1024)
|
|
||||||
}
|
|
||||||
w.n = 8
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *chunkWriter) write(wr io.Writer, b []byte) error {
|
|
||||||
n, err := wr.Write(b)
|
|
||||||
w.n += int64(n)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *chunkWriter) WriteChunks(chks ...ChunkMeta) error {
|
|
||||||
// Calculate maximum space we need and cut a new segment in case
|
|
||||||
// we don't fit into the current one.
|
|
||||||
maxLen := int64(binary.MaxVarintLen32)
|
|
||||||
for _, c := range chks {
|
|
||||||
maxLen += binary.MaxVarintLen32 + 1
|
|
||||||
maxLen += int64(len(c.Chunk.Bytes()))
|
|
||||||
}
|
|
||||||
newsz := w.n + maxLen
|
|
||||||
|
|
||||||
if w.wbuf == nil || w.n > w.segmentSize || newsz > w.segmentSize && maxLen <= w.segmentSize {
|
|
||||||
if err := w.cut(); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Write chunks sequentially and set the reference field in the ChunkMeta.
|
|
||||||
w.crc32.Reset()
|
|
||||||
wr := io.MultiWriter(w.crc32, w.wbuf)
|
|
||||||
|
|
||||||
b := make([]byte, binary.MaxVarintLen32)
|
|
||||||
n := binary.PutUvarint(b, uint64(len(chks)))
|
|
||||||
|
|
||||||
if err := w.write(wr, b[:n]); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
seq := uint64(w.seq()) << 32
|
|
||||||
|
|
||||||
for i := range chks {
|
|
||||||
chk := &chks[i]
|
|
||||||
|
|
||||||
chk.Ref = seq | uint64(w.n)
|
|
||||||
|
|
||||||
n = binary.PutUvarint(b, uint64(len(chk.Chunk.Bytes())))
|
|
||||||
|
|
||||||
if err := w.write(wr, b[:n]); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err := w.write(wr, []byte{byte(chk.Chunk.Encoding())}); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err := w.write(wr, chk.Chunk.Bytes()); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
chk.Chunk = nil
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := w.write(w.wbuf, w.crc32.Sum(nil)); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *chunkWriter) seq() int {
|
|
||||||
return len(w.files) - 1
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *chunkWriter) Close() error {
|
|
||||||
return w.finalizeTail()
|
|
||||||
}
|
|
||||||
|
|
||||||
// ChunkMeta holds information about a chunk of data.
|
|
||||||
type ChunkMeta struct {
|
|
||||||
// Ref and Chunk hold either a reference that can be used to retrieve
|
|
||||||
// chunk data or the data itself.
|
|
||||||
// Generally, only one of them is set.
|
|
||||||
Ref uint64
|
|
||||||
Chunk chunks.Chunk
|
|
||||||
|
|
||||||
MinTime, MaxTime int64 // time range the data covers
|
|
||||||
}
|
|
||||||
|
|
||||||
// IndexWriter serialized the index for a block of series data.
|
|
||||||
// The methods must generally be called in order they are specified.
|
|
||||||
type IndexWriter interface {
|
|
||||||
// AddSeries populates the index writer witha series and its offsets
|
|
||||||
// of chunks that the index can reference.
|
|
||||||
// The reference number is used to resolve a series against the postings
|
|
||||||
// list iterator. It only has to be available during the write processing.
|
|
||||||
AddSeries(ref uint32, l labels.Labels, chunks ...ChunkMeta) error
|
|
||||||
|
|
||||||
// WriteLabelIndex serializes an index from label names to values.
|
|
||||||
// The passed in values chained tuples of strings of the length of names.
|
|
||||||
WriteLabelIndex(names []string, values []string) error
|
|
||||||
|
|
||||||
// WritePostings writes a postings list for a single label pair.
|
|
||||||
WritePostings(name, value string, it Postings) error
|
|
||||||
|
|
||||||
// Close writes any finalization and closes theresources associated with
|
|
||||||
// the underlying writer.
|
|
||||||
Close() error
|
|
||||||
}
|
|
||||||
|
|
||||||
type indexWriterSeries struct {
|
|
||||||
labels labels.Labels
|
|
||||||
chunks []ChunkMeta // series file offset of chunks
|
|
||||||
offset uint32 // index file offset of series reference
|
|
||||||
}
|
|
||||||
|
|
||||||
// indexWriter implements the IndexWriter interface for the standard
|
|
||||||
// serialization format.
|
|
||||||
type indexWriter struct {
|
|
||||||
f *os.File
|
|
||||||
bufw *bufio.Writer
|
|
||||||
n int64
|
|
||||||
started bool
|
|
||||||
|
|
||||||
series map[uint32]*indexWriterSeries
|
|
||||||
symbols map[string]uint32 // symbol offsets
|
|
||||||
labelIndexes []hashEntry // label index offsets
|
|
||||||
postings []hashEntry // postings lists offsets
|
|
||||||
|
|
||||||
crc32 hash.Hash
|
|
||||||
}
|
|
||||||
|
|
||||||
func newIndexWriter(dir string) (*indexWriter, error) {
|
|
||||||
df, err := fileutil.OpenDir(dir)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
f, err := os.OpenFile(filepath.Join(dir, "index"), os.O_CREATE|os.O_WRONLY, 0666)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if err := fileutil.Fsync(df); err != nil {
|
|
||||||
return nil, errors.Wrap(err, "sync dir")
|
|
||||||
}
|
|
||||||
|
|
||||||
iw := &indexWriter{
|
|
||||||
f: f,
|
|
||||||
bufw: bufio.NewWriterSize(f, 1*1024*1024),
|
|
||||||
n: 0,
|
|
||||||
symbols: make(map[string]uint32, 4096),
|
|
||||||
series: make(map[uint32]*indexWriterSeries, 4096),
|
|
||||||
crc32: crc32.New(crc32.MakeTable(crc32.Castagnoli)),
|
|
||||||
}
|
|
||||||
if err := iw.writeMeta(); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return iw, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *indexWriter) write(wr io.Writer, b []byte) error {
|
|
||||||
n, err := wr.Write(b)
|
|
||||||
w.n += int64(n)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// section writes a CRC32 checksummed section of length l and guarded by flag.
|
|
||||||
func (w *indexWriter) section(l uint32, flag byte, f func(w io.Writer) error) error {
|
|
||||||
w.crc32.Reset()
|
|
||||||
wr := io.MultiWriter(w.crc32, w.bufw)
|
|
||||||
|
|
||||||
b := [5]byte{flag, 0, 0, 0, 0}
|
|
||||||
binary.BigEndian.PutUint32(b[1:], l)
|
|
||||||
|
|
||||||
if err := w.write(wr, b[:]); err != nil {
|
|
||||||
return errors.Wrap(err, "writing header")
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := f(wr); err != nil {
|
|
||||||
return errors.Wrap(err, "write contents")
|
|
||||||
}
|
|
||||||
if err := w.write(w.bufw, w.crc32.Sum(nil)); err != nil {
|
|
||||||
return errors.Wrap(err, "writing checksum")
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *indexWriter) writeMeta() error {
|
|
||||||
b := [8]byte{}
|
|
||||||
|
|
||||||
binary.BigEndian.PutUint32(b[:4], MagicIndex)
|
|
||||||
b[4] = flagStd
|
|
||||||
|
|
||||||
return w.write(w.bufw, b[:])
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *indexWriter) AddSeries(ref uint32, lset labels.Labels, chunks ...ChunkMeta) error {
|
|
||||||
if _, ok := w.series[ref]; ok {
|
|
||||||
return errors.Errorf("series with reference %d already added", ref)
|
|
||||||
}
|
|
||||||
// Populate the symbol table from all label sets we have to reference.
|
|
||||||
for _, l := range lset {
|
|
||||||
w.symbols[l.Name] = 0
|
|
||||||
w.symbols[l.Value] = 0
|
|
||||||
}
|
|
||||||
|
|
||||||
w.series[ref] = &indexWriterSeries{
|
|
||||||
labels: lset,
|
|
||||||
chunks: chunks,
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *indexWriter) writeSymbols() error {
|
|
||||||
// Generate sorted list of strings we will store as reference table.
|
|
||||||
symbols := make([]string, 0, len(w.symbols))
|
|
||||||
for s := range w.symbols {
|
|
||||||
symbols = append(symbols, s)
|
|
||||||
}
|
|
||||||
sort.Strings(symbols)
|
|
||||||
|
|
||||||
// The start of the section plus a 5 byte section header are our base.
|
|
||||||
// TODO(fabxc): switch to relative offsets and hold sections in a TOC.
|
|
||||||
base := uint32(w.n) + 5
|
|
||||||
|
|
||||||
buf := [binary.MaxVarintLen32]byte{}
|
|
||||||
b := append(make([]byte, 0, 4096), flagStd)
|
|
||||||
|
|
||||||
for _, s := range symbols {
|
|
||||||
w.symbols[s] = base + uint32(len(b))
|
|
||||||
|
|
||||||
n := binary.PutUvarint(buf[:], uint64(len(s)))
|
|
||||||
b = append(b, buf[:n]...)
|
|
||||||
b = append(b, s...)
|
|
||||||
}
|
|
||||||
|
|
||||||
l := uint32(len(b))
|
|
||||||
|
|
||||||
return w.section(l, flagStd, func(wr io.Writer) error {
|
|
||||||
return w.write(wr, b)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *indexWriter) writeSeries() error {
|
|
||||||
// Series must be stored sorted along their labels.
|
|
||||||
series := make([]*indexWriterSeries, 0, len(w.series))
|
|
||||||
|
|
||||||
for _, s := range w.series {
|
|
||||||
series = append(series, s)
|
|
||||||
}
|
|
||||||
slice.Sort(series, func(i, j int) bool {
|
|
||||||
return labels.Compare(series[i].labels, series[j].labels) < 0
|
|
||||||
})
|
|
||||||
|
|
||||||
// Current end of file plus 5 bytes for section header.
|
|
||||||
// TODO(fabxc): switch to relative offsets.
|
|
||||||
base := uint32(w.n) + 5
|
|
||||||
|
|
||||||
b := make([]byte, 0, 1<<20) // 1MiB
|
|
||||||
buf := make([]byte, binary.MaxVarintLen64)
|
|
||||||
|
|
||||||
for _, s := range series {
|
|
||||||
// Write label set symbol references.
|
|
||||||
s.offset = base + uint32(len(b))
|
|
||||||
|
|
||||||
n := binary.PutUvarint(buf, uint64(len(s.labels)))
|
|
||||||
b = append(b, buf[:n]...)
|
|
||||||
|
|
||||||
for _, l := range s.labels {
|
|
||||||
n = binary.PutUvarint(buf, uint64(w.symbols[l.Name]))
|
|
||||||
b = append(b, buf[:n]...)
|
|
||||||
n = binary.PutUvarint(buf, uint64(w.symbols[l.Value]))
|
|
||||||
b = append(b, buf[:n]...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Write chunks meta data including reference into chunk file.
|
|
||||||
n = binary.PutUvarint(buf, uint64(len(s.chunks)))
|
|
||||||
b = append(b, buf[:n]...)
|
|
||||||
|
|
||||||
for _, c := range s.chunks {
|
|
||||||
n = binary.PutVarint(buf, c.MinTime)
|
|
||||||
b = append(b, buf[:n]...)
|
|
||||||
n = binary.PutVarint(buf, c.MaxTime)
|
|
||||||
b = append(b, buf[:n]...)
|
|
||||||
|
|
||||||
n = binary.PutUvarint(buf, uint64(c.Ref))
|
|
||||||
b = append(b, buf[:n]...)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
l := uint32(len(b))
|
|
||||||
|
|
||||||
return w.section(l, flagStd, func(wr io.Writer) error {
|
|
||||||
return w.write(wr, b)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *indexWriter) init() error {
|
|
||||||
if err := w.writeSymbols(); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err := w.writeSeries(); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
w.started = true
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *indexWriter) WriteLabelIndex(names []string, values []string) error {
|
|
||||||
if !w.started {
|
|
||||||
if err := w.init(); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
valt, err := newStringTuples(values, len(names))
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
sort.Sort(valt)
|
|
||||||
|
|
||||||
w.labelIndexes = append(w.labelIndexes, hashEntry{
|
|
||||||
name: strings.Join(names, string(sep)),
|
|
||||||
offset: uint32(w.n),
|
|
||||||
})
|
|
||||||
|
|
||||||
buf := make([]byte, binary.MaxVarintLen32)
|
|
||||||
n := binary.PutUvarint(buf, uint64(len(names)))
|
|
||||||
|
|
||||||
l := uint32(n) + uint32(len(values)*4)
|
|
||||||
|
|
||||||
return w.section(l, flagStd, func(wr io.Writer) error {
|
|
||||||
// First byte indicates tuple size for index.
|
|
||||||
if err := w.write(wr, buf[:n]); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, v := range valt.s {
|
|
||||||
binary.BigEndian.PutUint32(buf, w.symbols[v])
|
|
||||||
|
|
||||||
if err := w.write(wr, buf[:4]); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *indexWriter) WritePostings(name, value string, it Postings) error {
|
|
||||||
if !w.started {
|
|
||||||
if err := w.init(); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
key := name + string(sep) + value
|
|
||||||
|
|
||||||
w.postings = append(w.postings, hashEntry{
|
|
||||||
name: key,
|
|
||||||
offset: uint32(w.n),
|
|
||||||
})
|
|
||||||
|
|
||||||
b := make([]byte, 0, 4096)
|
|
||||||
buf := [4]byte{}
|
|
||||||
|
|
||||||
// Order of the references in the postings list does not imply order
|
|
||||||
// of the series references within the persisted block they are mapped to.
|
|
||||||
// We have to sort the new references again.
|
|
||||||
var refs []uint32
|
|
||||||
|
|
||||||
for it.Next() {
|
|
||||||
s, ok := w.series[it.At()]
|
|
||||||
if !ok {
|
|
||||||
return errors.Errorf("series for reference %d not found", it.At())
|
|
||||||
}
|
|
||||||
refs = append(refs, s.offset)
|
|
||||||
}
|
|
||||||
if err := it.Err(); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
slice.Sort(refs, func(i, j int) bool { return refs[i] < refs[j] })
|
|
||||||
|
|
||||||
for _, r := range refs {
|
|
||||||
binary.BigEndian.PutUint32(buf[:], r)
|
|
||||||
b = append(b, buf[:]...)
|
|
||||||
}
|
|
||||||
|
|
||||||
return w.section(uint32(len(b)), flagStd, func(wr io.Writer) error {
|
|
||||||
return w.write(wr, b)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
type hashEntry struct {
|
|
||||||
name string
|
|
||||||
offset uint32
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *indexWriter) writeHashmap(h []hashEntry) error {
|
|
||||||
b := make([]byte, 0, 4096)
|
|
||||||
buf := [binary.MaxVarintLen32]byte{}
|
|
||||||
|
|
||||||
for _, e := range h {
|
|
||||||
n := binary.PutUvarint(buf[:], uint64(len(e.name)))
|
|
||||||
b = append(b, buf[:n]...)
|
|
||||||
b = append(b, e.name...)
|
|
||||||
|
|
||||||
n = binary.PutUvarint(buf[:], uint64(e.offset))
|
|
||||||
b = append(b, buf[:n]...)
|
|
||||||
}
|
|
||||||
|
|
||||||
return w.section(uint32(len(b)), flagStd, func(wr io.Writer) error {
|
|
||||||
return w.write(wr, b)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *indexWriter) finalize() error {
|
|
||||||
// Write out hash maps to jump to correct label index and postings sections.
|
|
||||||
lo := uint32(w.n)
|
|
||||||
if err := w.writeHashmap(w.labelIndexes); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
po := uint32(w.n)
|
|
||||||
if err := w.writeHashmap(w.postings); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Terminate index file with offsets to hashmaps. This is the entry Pointer
|
|
||||||
// for any index query.
|
|
||||||
// TODO(fabxc): also store offset to series section to allow plain
|
|
||||||
// iteration over all existing series?
|
|
||||||
b := [8]byte{}
|
|
||||||
binary.BigEndian.PutUint32(b[:4], lo)
|
|
||||||
binary.BigEndian.PutUint32(b[4:], po)
|
|
||||||
|
|
||||||
return w.write(w.bufw, b[:])
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *indexWriter) Close() error {
|
|
||||||
if err := w.finalize(); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err := w.bufw.Flush(); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err := fileutil.Fsync(w.f); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return w.f.Close()
|
|
||||||
}
|
|
20
vendor/github.com/influxdb/influxdb/LICENSE
generated
vendored
Normal file
20
vendor/github.com/influxdb/influxdb/LICENSE
generated
vendored
Normal file
|
@ -0,0 +1,20 @@
|
||||||
|
The MIT License (MIT)
|
||||||
|
|
||||||
|
Copyright (c) 2013-2015 Errplane Inc.
|
||||||
|
|
||||||
|
Permission is hereby granted, free of charge, to any person obtaining a copy of
|
||||||
|
this software and associated documentation files (the "Software"), to deal in
|
||||||
|
the Software without restriction, including without limitation the rights to
|
||||||
|
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
|
||||||
|
the Software, and to permit persons to whom the Software is furnished to do so,
|
||||||
|
subject to the following conditions:
|
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be included in all
|
||||||
|
copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
|
||||||
|
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
|
||||||
|
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
|
||||||
|
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||||
|
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
180
vendor/github.com/influxdb/influxdb/client/influxdb.go
generated
vendored
Normal file
180
vendor/github.com/influxdb/influxdb/client/influxdb.go
generated
vendored
Normal file
|
@ -0,0 +1,180 @@
|
||||||
|
package client
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"fmt"
|
||||||
|
"io/ioutil"
|
||||||
|
"net/http"
|
||||||
|
"net/url"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/influxdb/influxdb/tsdb"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
// DefaultTimeout is the default connection timeout used to connect to an InfluxDB instance
|
||||||
|
DefaultTimeout = 0
|
||||||
|
)
|
||||||
|
|
||||||
|
// Config is used to specify what server to connect to.
|
||||||
|
// URL: The URL of the server connecting to.
|
||||||
|
// Username/Password are optional. They will be passed via basic auth if provided.
|
||||||
|
// UserAgent: If not provided, will default "InfluxDBClient",
|
||||||
|
// Timeout: If not provided, will default to 0 (no timeout)
|
||||||
|
type Config struct {
|
||||||
|
URL url.URL
|
||||||
|
Username string
|
||||||
|
Password string
|
||||||
|
UserAgent string
|
||||||
|
Timeout time.Duration
|
||||||
|
Precision string
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewConfig will create a config to be used in connecting to the client
|
||||||
|
func NewConfig() Config {
|
||||||
|
return Config{
|
||||||
|
Timeout: DefaultTimeout,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Client is used to make calls to the server.
|
||||||
|
type Client struct {
|
||||||
|
url url.URL
|
||||||
|
username string
|
||||||
|
password string
|
||||||
|
httpClient *http.Client
|
||||||
|
userAgent string
|
||||||
|
precision string
|
||||||
|
}
|
||||||
|
|
||||||
|
const (
|
||||||
|
ConsistencyOne = "one"
|
||||||
|
ConsistencyAll = "all"
|
||||||
|
ConsistencyQuorum = "quorum"
|
||||||
|
ConsistencyAny = "any"
|
||||||
|
)
|
||||||
|
|
||||||
|
// NewClient will instantiate and return a connected client to issue commands to the server.
|
||||||
|
func NewClient(c Config) (*Client, error) {
|
||||||
|
client := Client{
|
||||||
|
url: c.URL,
|
||||||
|
username: c.Username,
|
||||||
|
password: c.Password,
|
||||||
|
httpClient: &http.Client{Timeout: c.Timeout},
|
||||||
|
userAgent: c.UserAgent,
|
||||||
|
precision: c.Precision,
|
||||||
|
}
|
||||||
|
if client.userAgent == "" {
|
||||||
|
client.userAgent = "InfluxDBClient"
|
||||||
|
}
|
||||||
|
return &client, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Write takes BatchPoints and allows for writing of multiple points with defaults
|
||||||
|
// If successful, error is nil and Response is nil
|
||||||
|
// If an error occurs, Response may contain additional information if populated.
|
||||||
|
func (c *Client) Write(bp BatchPoints) (*Response, error) {
|
||||||
|
u := c.url
|
||||||
|
u.Path = "write"
|
||||||
|
|
||||||
|
var b bytes.Buffer
|
||||||
|
for _, p := range bp.Points {
|
||||||
|
if p.Raw != "" {
|
||||||
|
if _, err := b.WriteString(p.Raw); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
for k, v := range bp.Tags {
|
||||||
|
if p.Tags == nil {
|
||||||
|
p.Tags = make(map[string]string, len(bp.Tags))
|
||||||
|
}
|
||||||
|
p.Tags[k] = v
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, err := b.WriteString(p.MarshalString()); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := b.WriteByte('\n'); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
req, err := http.NewRequest("POST", u.String(), &b)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
req.Header.Set("Content-Type", "")
|
||||||
|
req.Header.Set("User-Agent", c.userAgent)
|
||||||
|
if c.username != "" {
|
||||||
|
req.SetBasicAuth(c.username, c.password)
|
||||||
|
}
|
||||||
|
params := req.URL.Query()
|
||||||
|
params.Set("db", bp.Database)
|
||||||
|
params.Set("rp", bp.RetentionPolicy)
|
||||||
|
params.Set("precision", bp.Precision)
|
||||||
|
params.Set("consistency", bp.WriteConsistency)
|
||||||
|
req.URL.RawQuery = params.Encode()
|
||||||
|
|
||||||
|
resp, err := c.httpClient.Do(req)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
var response Response
|
||||||
|
body, err := ioutil.ReadAll(resp.Body)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if resp.StatusCode != http.StatusNoContent && resp.StatusCode != http.StatusOK {
|
||||||
|
var err = fmt.Errorf(string(body))
|
||||||
|
response.Err = err
|
||||||
|
return &response, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Structs
|
||||||
|
|
||||||
|
// Response represents a list of statement results.
|
||||||
|
type Response struct {
|
||||||
|
Err error
|
||||||
|
}
|
||||||
|
|
||||||
|
// Point defines the fields that will be written to the database
|
||||||
|
// Measurement, Time, and Fields are required
|
||||||
|
// Precision can be specified if the time is in epoch format (integer).
|
||||||
|
// Valid values for Precision are n, u, ms, s, m, and h
|
||||||
|
type Point struct {
|
||||||
|
Measurement string
|
||||||
|
Tags map[string]string
|
||||||
|
Time time.Time
|
||||||
|
Fields map[string]interface{}
|
||||||
|
Precision string
|
||||||
|
Raw string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *Point) MarshalString() string {
|
||||||
|
return tsdb.NewPoint(p.Measurement, p.Tags, p.Fields, p.Time).String()
|
||||||
|
}
|
||||||
|
|
||||||
|
// BatchPoints is used to send batched data in a single write.
|
||||||
|
// Database and Points are required
|
||||||
|
// If no retention policy is specified, it will use the databases default retention policy.
|
||||||
|
// If tags are specified, they will be "merged" with all points. If a point already has that tag, it is ignored.
|
||||||
|
// If time is specified, it will be applied to any point with an empty time.
|
||||||
|
// Precision can be specified if the time is in epoch format (integer).
|
||||||
|
// Valid values for Precision are n, u, ms, s, m, and h
|
||||||
|
type BatchPoints struct {
|
||||||
|
Points []Point `json:"points,omitempty"`
|
||||||
|
Database string `json:"database,omitempty"`
|
||||||
|
RetentionPolicy string `json:"retentionPolicy,omitempty"`
|
||||||
|
Tags map[string]string `json:"tags,omitempty"`
|
||||||
|
Time time.Time `json:"time,omitempty"`
|
||||||
|
Precision string `json:"precision,omitempty"`
|
||||||
|
WriteConsistency string `json:"-"`
|
||||||
|
}
|
1392
vendor/github.com/influxdb/influxdb/tsdb/points.go
generated
vendored
Normal file
1392
vendor/github.com/influxdb/influxdb/tsdb/points.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load diff
2
vendor/github.com/oklog/ulid/AUTHORS.md
generated
vendored
Normal file
2
vendor/github.com/oklog/ulid/AUTHORS.md
generated
vendored
Normal file
|
@ -0,0 +1,2 @@
|
||||||
|
- Peter Bourgon (@peterbourgon)
|
||||||
|
- Tomás Senart (@tsenart)
|
12
vendor/github.com/oklog/ulid/CHANGELOG.md
generated
vendored
Normal file
12
vendor/github.com/oklog/ulid/CHANGELOG.md
generated
vendored
Normal file
|
@ -0,0 +1,12 @@
|
||||||
|
## 0.3.0 / 2017-01-03
|
||||||
|
|
||||||
|
* Implement ULID.Compare method
|
||||||
|
|
||||||
|
## 0.2.0 / 2016-12-13
|
||||||
|
|
||||||
|
* Remove year 2262 Timestamp bug. (#1)
|
||||||
|
* Gracefully handle invalid encodings when parsing.
|
||||||
|
|
||||||
|
## 0.1.0 / 2016-12-06
|
||||||
|
|
||||||
|
* First ULID release
|
17
vendor/github.com/oklog/ulid/CONTRIBUTING.md
generated
vendored
Normal file
17
vendor/github.com/oklog/ulid/CONTRIBUTING.md
generated
vendored
Normal file
|
@ -0,0 +1,17 @@
|
||||||
|
# Contributing
|
||||||
|
|
||||||
|
We use GitHub to manage reviews of pull requests.
|
||||||
|
|
||||||
|
* If you have a trivial fix or improvement, go ahead and create a pull
|
||||||
|
request, addressing (with `@...`) one or more of the maintainers
|
||||||
|
(see [AUTHORS.md](AUTHORS.md)) in the description of the pull request.
|
||||||
|
|
||||||
|
* If you plan to do something more involved, first propose your ideas
|
||||||
|
in a Github issue. This will avoid unnecessary work and surely give
|
||||||
|
you and us a good deal of inspiration.
|
||||||
|
|
||||||
|
* Relevant coding style guidelines are the [Go Code Review
|
||||||
|
Comments](https://code.google.com/p/go-wiki/wiki/CodeReviewComments)
|
||||||
|
and the _Formatting and style_ section of Peter Bourgon's [Go: Best
|
||||||
|
Practices for Production
|
||||||
|
Environments](http://peter.bourgon.org/go-in-production/#formatting-and-style).
|
201
vendor/github.com/oklog/ulid/LICENSE
generated
vendored
Normal file
201
vendor/github.com/oklog/ulid/LICENSE
generated
vendored
Normal file
|
@ -0,0 +1,201 @@
|
||||||
|
Apache License
|
||||||
|
Version 2.0, January 2004
|
||||||
|
http://www.apache.org/licenses/
|
||||||
|
|
||||||
|
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||||
|
|
||||||
|
1. Definitions.
|
||||||
|
|
||||||
|
"License" shall mean the terms and conditions for use, reproduction,
|
||||||
|
and distribution as defined by Sections 1 through 9 of this document.
|
||||||
|
|
||||||
|
"Licensor" shall mean the copyright owner or entity authorized by
|
||||||
|
the copyright owner that is granting the License.
|
||||||
|
|
||||||
|
"Legal Entity" shall mean the union of the acting entity and all
|
||||||
|
other entities that control, are controlled by, or are under common
|
||||||
|
control with that entity. For the purposes of this definition,
|
||||||
|
"control" means (i) the power, direct or indirect, to cause the
|
||||||
|
direction or management of such entity, whether by contract or
|
||||||
|
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||||
|
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||||
|
|
||||||
|
"You" (or "Your") shall mean an individual or Legal Entity
|
||||||
|
exercising permissions granted by this License.
|
||||||
|
|
||||||
|
"Source" form shall mean the preferred form for making modifications,
|
||||||
|
including but not limited to software source code, documentation
|
||||||
|
source, and configuration files.
|
||||||
|
|
||||||
|
"Object" form shall mean any form resulting from mechanical
|
||||||
|
transformation or translation of a Source form, including but
|
||||||
|
not limited to compiled object code, generated documentation,
|
||||||
|
and conversions to other media types.
|
||||||
|
|
||||||
|
"Work" shall mean the work of authorship, whether in Source or
|
||||||
|
Object form, made available under the License, as indicated by a
|
||||||
|
copyright notice that is included in or attached to the work
|
||||||
|
(an example is provided in the Appendix below).
|
||||||
|
|
||||||
|
"Derivative Works" shall mean any work, whether in Source or Object
|
||||||
|
form, that is based on (or derived from) the Work and for which the
|
||||||
|
editorial revisions, annotations, elaborations, or other modifications
|
||||||
|
represent, as a whole, an original work of authorship. For the purposes
|
||||||
|
of this License, Derivative Works shall not include works that remain
|
||||||
|
separable from, or merely link (or bind by name) to the interfaces of,
|
||||||
|
the Work and Derivative Works thereof.
|
||||||
|
|
||||||
|
"Contribution" shall mean any work of authorship, including
|
||||||
|
the original version of the Work and any modifications or additions
|
||||||
|
to that Work or Derivative Works thereof, that is intentionally
|
||||||
|
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||||
|
or by an individual or Legal Entity authorized to submit on behalf of
|
||||||
|
the copyright owner. For the purposes of this definition, "submitted"
|
||||||
|
means any form of electronic, verbal, or written communication sent
|
||||||
|
to the Licensor or its representatives, including but not limited to
|
||||||
|
communication on electronic mailing lists, source code control systems,
|
||||||
|
and issue tracking systems that are managed by, or on behalf of, the
|
||||||
|
Licensor for the purpose of discussing and improving the Work, but
|
||||||
|
excluding communication that is conspicuously marked or otherwise
|
||||||
|
designated in writing by the copyright owner as "Not a Contribution."
|
||||||
|
|
||||||
|
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||||
|
on behalf of whom a Contribution has been received by Licensor and
|
||||||
|
subsequently incorporated within the Work.
|
||||||
|
|
||||||
|
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||||
|
this License, each Contributor hereby grants to You a perpetual,
|
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||||
|
copyright license to reproduce, prepare Derivative Works of,
|
||||||
|
publicly display, publicly perform, sublicense, and distribute the
|
||||||
|
Work and such Derivative Works in Source or Object form.
|
||||||
|
|
||||||
|
3. Grant of Patent License. Subject to the terms and conditions of
|
||||||
|
this License, each Contributor hereby grants to You a perpetual,
|
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||||
|
(except as stated in this section) patent license to make, have made,
|
||||||
|
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||||
|
where such license applies only to those patent claims licensable
|
||||||
|
by such Contributor that are necessarily infringed by their
|
||||||
|
Contribution(s) alone or by combination of their Contribution(s)
|
||||||
|
with the Work to which such Contribution(s) was submitted. If You
|
||||||
|
institute patent litigation against any entity (including a
|
||||||
|
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||||
|
or a Contribution incorporated within the Work constitutes direct
|
||||||
|
or contributory patent infringement, then any patent licenses
|
||||||
|
granted to You under this License for that Work shall terminate
|
||||||
|
as of the date such litigation is filed.
|
||||||
|
|
||||||
|
4. Redistribution. You may reproduce and distribute copies of the
|
||||||
|
Work or Derivative Works thereof in any medium, with or without
|
||||||
|
modifications, and in Source or Object form, provided that You
|
||||||
|
meet the following conditions:
|
||||||
|
|
||||||
|
(a) You must give any other recipients of the Work or
|
||||||
|
Derivative Works a copy of this License; and
|
||||||
|
|
||||||
|
(b) You must cause any modified files to carry prominent notices
|
||||||
|
stating that You changed the files; and
|
||||||
|
|
||||||
|
(c) You must retain, in the Source form of any Derivative Works
|
||||||
|
that You distribute, all copyright, patent, trademark, and
|
||||||
|
attribution notices from the Source form of the Work,
|
||||||
|
excluding those notices that do not pertain to any part of
|
||||||
|
the Derivative Works; and
|
||||||
|
|
||||||
|
(d) If the Work includes a "NOTICE" text file as part of its
|
||||||
|
distribution, then any Derivative Works that You distribute must
|
||||||
|
include a readable copy of the attribution notices contained
|
||||||
|
within such NOTICE file, excluding those notices that do not
|
||||||
|
pertain to any part of the Derivative Works, in at least one
|
||||||
|
of the following places: within a NOTICE text file distributed
|
||||||
|
as part of the Derivative Works; within the Source form or
|
||||||
|
documentation, if provided along with the Derivative Works; or,
|
||||||
|
within a display generated by the Derivative Works, if and
|
||||||
|
wherever such third-party notices normally appear. The contents
|
||||||
|
of the NOTICE file are for informational purposes only and
|
||||||
|
do not modify the License. You may add Your own attribution
|
||||||
|
notices within Derivative Works that You distribute, alongside
|
||||||
|
or as an addendum to the NOTICE text from the Work, provided
|
||||||
|
that such additional attribution notices cannot be construed
|
||||||
|
as modifying the License.
|
||||||
|
|
||||||
|
You may add Your own copyright statement to Your modifications and
|
||||||
|
may provide additional or different license terms and conditions
|
||||||
|
for use, reproduction, or distribution of Your modifications, or
|
||||||
|
for any such Derivative Works as a whole, provided Your use,
|
||||||
|
reproduction, and distribution of the Work otherwise complies with
|
||||||
|
the conditions stated in this License.
|
||||||
|
|
||||||
|
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||||
|
any Contribution intentionally submitted for inclusion in the Work
|
||||||
|
by You to the Licensor shall be under the terms and conditions of
|
||||||
|
this License, without any additional terms or conditions.
|
||||||
|
Notwithstanding the above, nothing herein shall supersede or modify
|
||||||
|
the terms of any separate license agreement you may have executed
|
||||||
|
with Licensor regarding such Contributions.
|
||||||
|
|
||||||
|
6. Trademarks. This License does not grant permission to use the trade
|
||||||
|
names, trademarks, service marks, or product names of the Licensor,
|
||||||
|
except as required for reasonable and customary use in describing the
|
||||||
|
origin of the Work and reproducing the content of the NOTICE file.
|
||||||
|
|
||||||
|
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||||
|
agreed to in writing, Licensor provides the Work (and each
|
||||||
|
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||||
|
implied, including, without limitation, any warranties or conditions
|
||||||
|
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||||
|
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||||
|
appropriateness of using or redistributing the Work and assume any
|
||||||
|
risks associated with Your exercise of permissions under this License.
|
||||||
|
|
||||||
|
8. Limitation of Liability. In no event and under no legal theory,
|
||||||
|
whether in tort (including negligence), contract, or otherwise,
|
||||||
|
unless required by applicable law (such as deliberate and grossly
|
||||||
|
negligent acts) or agreed to in writing, shall any Contributor be
|
||||||
|
liable to You for damages, including any direct, indirect, special,
|
||||||
|
incidental, or consequential damages of any character arising as a
|
||||||
|
result of this License or out of the use or inability to use the
|
||||||
|
Work (including but not limited to damages for loss of goodwill,
|
||||||
|
work stoppage, computer failure or malfunction, or any and all
|
||||||
|
other commercial damages or losses), even if such Contributor
|
||||||
|
has been advised of the possibility of such damages.
|
||||||
|
|
||||||
|
9. Accepting Warranty or Additional Liability. While redistributing
|
||||||
|
the Work or Derivative Works thereof, You may choose to offer,
|
||||||
|
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||||
|
or other liability obligations and/or rights consistent with this
|
||||||
|
License. However, in accepting such obligations, You may act only
|
||||||
|
on Your own behalf and on Your sole responsibility, not on behalf
|
||||||
|
of any other Contributor, and only if You agree to indemnify,
|
||||||
|
defend, and hold each Contributor harmless for any liability
|
||||||
|
incurred by, or claims asserted against, such Contributor by reason
|
||||||
|
of your accepting any such warranty or additional liability.
|
||||||
|
|
||||||
|
END OF TERMS AND CONDITIONS
|
||||||
|
|
||||||
|
APPENDIX: How to apply the Apache License to your work.
|
||||||
|
|
||||||
|
To apply the Apache License to your work, attach the following
|
||||||
|
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||||
|
replaced with your own identifying information. (Don't include
|
||||||
|
the brackets!) The text should be enclosed in the appropriate
|
||||||
|
comment syntax for the file format. We also recommend that a
|
||||||
|
file or class name and description of purpose be included on the
|
||||||
|
same "printed page" as the copyright notice for easier
|
||||||
|
identification within third-party archives.
|
||||||
|
|
||||||
|
Copyright [yyyy] [name of copyright owner]
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
147
vendor/github.com/oklog/ulid/README.md
generated
vendored
Normal file
147
vendor/github.com/oklog/ulid/README.md
generated
vendored
Normal file
|
@ -0,0 +1,147 @@
|
||||||
|
# Universally Unique Lexicographically Sortable Identifier
|
||||||
|
|
||||||
|
![Project status](https://img.shields.io/badge/version-0.3.0-yellow.svg)
|
||||||
|
[![Build Status](https://secure.travis-ci.org/oklog/ulid.png)](http://travis-ci.org/oklog/ulid)
|
||||||
|
[![Go Report Card](https://goreportcard.com/badge/oklog/ulid?cache=0)](https://goreportcard.com/report/oklog/ulid)
|
||||||
|
[![Coverage Status](https://coveralls.io/repos/github/oklog/ulid/badge.svg?branch=master&cache=0)](https://coveralls.io/github/oklog/ulid?branch=master)
|
||||||
|
[![GoDoc](https://godoc.org/github.com/oklog/ulid?status.svg)](https://godoc.org/github.com/oklog/ulid)
|
||||||
|
[![Apache 2 licensed](https://img.shields.io/badge/license-Apache2-blue.svg)](https://raw.githubusercontent.com/oklog/ulid/master/LICENSE)
|
||||||
|
|
||||||
|
A Go port of [alizain/ulid](https://github.com/alizain/ulid) with binary format implemented.
|
||||||
|
|
||||||
|
## Background
|
||||||
|
|
||||||
|
A GUID/UUID can be suboptimal for many use-cases because:
|
||||||
|
|
||||||
|
- It isn't the most character efficient way of encoding 128 bits
|
||||||
|
- UUID v1/v2 is impractical in many environments, as it requires access to a unique, stable MAC address
|
||||||
|
- UUID v3/v5 requires a unique seed and produces randomly distributed IDs, which can cause fragmentation in many data structures
|
||||||
|
- UUID v4 provides no other information than randomness which can cause fragmentation in many data structures
|
||||||
|
|
||||||
|
A ULID however:
|
||||||
|
|
||||||
|
- Is compatible with UUID/GUID's
|
||||||
|
- 1.21e+24 unique ULIDs per millisecond (1,208,925,819,614,629,174,706,176 to be exact)
|
||||||
|
- Lexicographically sortable
|
||||||
|
- Canonically encoded as a 26 character string, as opposed to the 36 character UUID
|
||||||
|
- Uses Crockford's base32 for better efficiency and readability (5 bits per character)
|
||||||
|
- Case insensitive
|
||||||
|
- No special characters (URL safe)
|
||||||
|
|
||||||
|
## Install
|
||||||
|
|
||||||
|
```shell
|
||||||
|
go get github.com/oklog/ulid
|
||||||
|
```
|
||||||
|
|
||||||
|
## Usage
|
||||||
|
|
||||||
|
An ULID is constructed with a `time.Time` and an `io.Reader` entropy source.
|
||||||
|
This design allows for greater flexibility in choosing your trade-offs.
|
||||||
|
|
||||||
|
Please note that `rand.Rand` from the `math` package is *not* safe for concurrent use.
|
||||||
|
Instantiate one per long living go-routine or use a `sync.Pool` if you want to avoid the potential contention of a locked `rand.Source` as its been frequently observed in the package level functions.
|
||||||
|
|
||||||
|
```go
|
||||||
|
func ExampleULID() {
|
||||||
|
t := time.Unix(1000000, 0)
|
||||||
|
entropy := rand.New(rand.NewSource(t.UnixNano()))
|
||||||
|
fmt.Println(ulid.MustNew(ulid.Timestamp(t), entropy))
|
||||||
|
// Output: 0000XSNJG0MQJHBF4QX1EFD6Y3
|
||||||
|
}
|
||||||
|
|
||||||
|
```
|
||||||
|
|
||||||
|
## Specification
|
||||||
|
|
||||||
|
Below is the current specification of ULID as implemented in this repository.
|
||||||
|
|
||||||
|
### Components
|
||||||
|
|
||||||
|
**Timestamp**
|
||||||
|
- 48 bits
|
||||||
|
- UNIX-time in milliseconds
|
||||||
|
- Won't run out of space till the year 10895 AD
|
||||||
|
|
||||||
|
**Entropy**
|
||||||
|
- 80 bits
|
||||||
|
- User defined entropy source.
|
||||||
|
|
||||||
|
### Encoding
|
||||||
|
|
||||||
|
[Crockford's Base32](http://www.crockford.com/wrmg/base32.html) is used as shown.
|
||||||
|
This alphabet excludes the letters I, L, O, and U to avoid confusion and abuse.
|
||||||
|
|
||||||
|
```
|
||||||
|
0123456789ABCDEFGHJKMNPQRSTVWXYZ
|
||||||
|
```
|
||||||
|
|
||||||
|
### Binary Layout and Byte Order
|
||||||
|
|
||||||
|
The components are encoded as 16 octets. Each component is encoded with the Most Significant Byte first (network byte order).
|
||||||
|
|
||||||
|
```
|
||||||
|
0 1 2 3
|
||||||
|
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
|
||||||
|
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||||
|
| 32_bit_uint_time_high |
|
||||||
|
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||||
|
| 16_bit_uint_time_low | 16_bit_uint_random |
|
||||||
|
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||||
|
| 32_bit_uint_random |
|
||||||
|
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||||
|
| 32_bit_uint_random |
|
||||||
|
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||||
|
```
|
||||||
|
|
||||||
|
### String Representation
|
||||||
|
|
||||||
|
```
|
||||||
|
01AN4Z07BY 79KA1307SR9X4MV3
|
||||||
|
|----------| |----------------|
|
||||||
|
Timestamp Entropy
|
||||||
|
10 chars 16 chars
|
||||||
|
48bits 80bits
|
||||||
|
base32 base32
|
||||||
|
```
|
||||||
|
|
||||||
|
## Test
|
||||||
|
|
||||||
|
```shell
|
||||||
|
go test ./...
|
||||||
|
```
|
||||||
|
|
||||||
|
## Benchmarks
|
||||||
|
|
||||||
|
On a Intel Core i7 Ivy Bridge 2.7 GHz, MacOS 10.12.1 and Go 1.8.0beta1
|
||||||
|
|
||||||
|
```
|
||||||
|
BenchmarkNew/WithCryptoEntropy-8 2000000 771 ns/op 20.73 MB/s 16 B/op 1 allocs/op
|
||||||
|
BenchmarkNew/WithEntropy-8 20000000 65.8 ns/op 243.01 MB/s 16 B/op 1 allocs/op
|
||||||
|
BenchmarkNew/WithoutEntropy-8 50000000 30.0 ns/op 534.06 MB/s 16 B/op 1 allocs/op
|
||||||
|
BenchmarkMustNew/WithCryptoEntropy-8 2000000 781 ns/op 20.48 MB/s 16 B/op 1 allocs/op
|
||||||
|
BenchmarkMustNew/WithEntropy-8 20000000 70.0 ns/op 228.51 MB/s 16 B/op 1 allocs/op
|
||||||
|
BenchmarkMustNew/WithoutEntropy-8 50000000 34.6 ns/op 462.98 MB/s 16 B/op 1 allocs/op
|
||||||
|
BenchmarkParse-8 50000000 30.0 ns/op 866.16 MB/s 0 B/op 0 allocs/op
|
||||||
|
BenchmarkMustParse-8 50000000 35.2 ns/op 738.94 MB/s 0 B/op 0 allocs/op
|
||||||
|
BenchmarkString-8 20000000 64.9 ns/op 246.40 MB/s 32 B/op 1 allocs/op
|
||||||
|
BenchmarkMarshal/Text-8 20000000 55.8 ns/op 286.84 MB/s 32 B/op 1 allocs/op
|
||||||
|
BenchmarkMarshal/TextTo-8 100000000 22.4 ns/op 714.91 MB/s 0 B/op 0 allocs/op
|
||||||
|
BenchmarkMarshal/Binary-8 300000000 4.02 ns/op 3981.77 MB/s 0 B/op 0 allocs/op
|
||||||
|
BenchmarkMarshal/BinaryTo-8 2000000000 1.18 ns/op 13551.75 MB/s 0 B/op 0 allocs/op
|
||||||
|
BenchmarkUnmarshal/Text-8 100000000 20.5 ns/op 1265.27 MB/s 0 B/op 0 allocs/op
|
||||||
|
BenchmarkUnmarshal/Binary-8 300000000 4.94 ns/op 3240.01 MB/s 0 B/op 0 allocs/op
|
||||||
|
BenchmarkNow-8 100000000 15.1 ns/op 528.09 MB/s 0 B/op 0 allocs/op
|
||||||
|
BenchmarkTimestamp-8 2000000000 0.29 ns/op 27271.59 MB/s 0 B/op 0 allocs/op
|
||||||
|
BenchmarkTime-8 2000000000 0.58 ns/op 13717.80 MB/s 0 B/op 0 allocs/op
|
||||||
|
BenchmarkSetTime-8 2000000000 0.89 ns/op 9023.95 MB/s 0 B/op 0 allocs/op
|
||||||
|
BenchmarkEntropy-8 200000000 7.62 ns/op 1311.66 MB/s 0 B/op 0 allocs/op
|
||||||
|
BenchmarkSetEntropy-8 2000000000 0.88 ns/op 11376.54 MB/s 0 B/op 0 allocs/op
|
||||||
|
BenchmarkCompare-8 200000000 7.34 ns/op 4359.23 MB/s 0 B/op 0 allocs/op
|
||||||
|
```
|
||||||
|
|
||||||
|
## Prior Art
|
||||||
|
|
||||||
|
- [alizain/ulid](https://github.com/alizain/ulid)
|
||||||
|
- [RobThree/NUlid](https://github.com/RobThree/NUlid)
|
||||||
|
- [imdario/go-ulid](https://github.com/imdario/go-ulid)
|
332
vendor/github.com/oklog/ulid/ulid.go
generated
vendored
Normal file
332
vendor/github.com/oklog/ulid/ulid.go
generated
vendored
Normal file
|
@ -0,0 +1,332 @@
|
||||||
|
// Copyright 2016 The Oklog Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package ulid
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"errors"
|
||||||
|
"io"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
/*
|
||||||
|
An ULID is a 16 byte Universally Unique Lexicographically Sortable Identifier
|
||||||
|
|
||||||
|
The components are encoded as 16 octets.
|
||||||
|
Each component is encoded with the MSB first (network byte order).
|
||||||
|
|
||||||
|
0 1 2 3
|
||||||
|
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
|
||||||
|
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||||
|
| 32_bit_uint_time_high |
|
||||||
|
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||||
|
| 16_bit_uint_time_low | 16_bit_uint_random |
|
||||||
|
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||||
|
| 32_bit_uint_random |
|
||||||
|
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||||
|
| 32_bit_uint_random |
|
||||||
|
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||||
|
*/
|
||||||
|
type ULID [16]byte
|
||||||
|
|
||||||
|
var (
|
||||||
|
// ErrDataSize is returned when parsing or unmarshaling ULIDs with the wrong
|
||||||
|
// data size.
|
||||||
|
ErrDataSize = errors.New("ulid: bad data size when unmarshaling")
|
||||||
|
|
||||||
|
// ErrBufferSize is returned when marshalling ULIDs to a buffer of insufficient
|
||||||
|
// size.
|
||||||
|
ErrBufferSize = errors.New("ulid: bad buffer size when marshaling")
|
||||||
|
|
||||||
|
// ErrBigTime is returned when constructing an ULID with a time that is larger
|
||||||
|
// than MaxTime.
|
||||||
|
ErrBigTime = errors.New("ulid: time too big")
|
||||||
|
)
|
||||||
|
|
||||||
|
// New returns an ULID with the given Unix milliseconds timestamp and an
|
||||||
|
// optional entropy source. Use the Timestamp function to convert
|
||||||
|
// a time.Time to Unix milliseconds.
|
||||||
|
//
|
||||||
|
// ErrBigTime is returned when passing a timestamp bigger than MaxTime.
|
||||||
|
// Reading from the entropy source may also return an error.
|
||||||
|
func New(ms uint64, entropy io.Reader) (id ULID, err error) {
|
||||||
|
if err = id.SetTime(ms); err != nil {
|
||||||
|
return id, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if entropy != nil {
|
||||||
|
_, err = entropy.Read(id[6:])
|
||||||
|
}
|
||||||
|
|
||||||
|
return id, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// MustNew is a convenience function equivalent to New that panics on failure
|
||||||
|
// instead of returning an error.
|
||||||
|
func MustNew(ms uint64, entropy io.Reader) ULID {
|
||||||
|
id, err := New(ms, entropy)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return id
|
||||||
|
}
|
||||||
|
|
||||||
|
// Parse parses an encoded ULID, returning an error in case of failure.
|
||||||
|
//
|
||||||
|
// ErrDataSize is returned if the len(ulid) is different from an encoded
|
||||||
|
// ULID's length. Invalid encodings produce undefined ULIDs.
|
||||||
|
func Parse(ulid string) (id ULID, err error) {
|
||||||
|
return id, id.UnmarshalText([]byte(ulid))
|
||||||
|
}
|
||||||
|
|
||||||
|
// MustParse is a convenience function equivalent to Parse that panics on failure
|
||||||
|
// instead of returning an error.
|
||||||
|
func MustParse(ulid string) ULID {
|
||||||
|
id, err := Parse(ulid)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return id
|
||||||
|
}
|
||||||
|
|
||||||
|
// String returns a lexicographically sortable string encoded ULID
|
||||||
|
// (26 characters, non-standard base 32) e.g. 01AN4Z07BY79KA1307SR9X4MV3
|
||||||
|
// Format: tttttttttteeeeeeeeeeeeeeee where t is time and e is entropy
|
||||||
|
func (id ULID) String() string {
|
||||||
|
ulid := make([]byte, EncodedSize)
|
||||||
|
_ = id.MarshalTextTo(ulid)
|
||||||
|
return string(ulid)
|
||||||
|
}
|
||||||
|
|
||||||
|
// MarshalBinary implements the encoding.BinaryMarshaler interface by
|
||||||
|
// returning the ULID as a byte slice.
|
||||||
|
func (id ULID) MarshalBinary() ([]byte, error) {
|
||||||
|
ulid := make([]byte, len(id))
|
||||||
|
return ulid, id.MarshalBinaryTo(ulid)
|
||||||
|
}
|
||||||
|
|
||||||
|
// MarshalBinaryTo writes the binary encoding of the ULID to the given buffer.
|
||||||
|
// ErrBufferSize is returned when the len(dst) != 16.
|
||||||
|
func (id ULID) MarshalBinaryTo(dst []byte) error {
|
||||||
|
if len(dst) != len(id) {
|
||||||
|
return ErrBufferSize
|
||||||
|
}
|
||||||
|
|
||||||
|
copy(dst, id[:])
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnmarshalBinary implements the encoding.BinaryUnmarshaler interface by
|
||||||
|
// copying the passed data and converting it to an ULID. ErrDataSize is
|
||||||
|
// returned if the data length is different from ULID length.
|
||||||
|
func (id *ULID) UnmarshalBinary(data []byte) error {
|
||||||
|
if len(data) != len(*id) {
|
||||||
|
return ErrDataSize
|
||||||
|
}
|
||||||
|
|
||||||
|
copy((*id)[:], data)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Encoding is the base 32 encoding alphabet used in ULID strings.
|
||||||
|
const Encoding = "0123456789ABCDEFGHJKMNPQRSTVWXYZ"
|
||||||
|
|
||||||
|
// MarshalText implements the encoding.TextMarshaler interface by
|
||||||
|
// returning the string encoded ULID.
|
||||||
|
func (id ULID) MarshalText() ([]byte, error) {
|
||||||
|
ulid := make([]byte, EncodedSize)
|
||||||
|
return ulid, id.MarshalTextTo(ulid)
|
||||||
|
}
|
||||||
|
|
||||||
|
// MarshalTextTo writes the ULID as a string to the given buffer.
|
||||||
|
// ErrBufferSize is returned when the len(dst) != 26.
|
||||||
|
func (id ULID) MarshalTextTo(dst []byte) error {
|
||||||
|
// Optimized unrolled loop ahead.
|
||||||
|
// From https://github.com/RobThree/NUlid
|
||||||
|
|
||||||
|
if len(dst) != EncodedSize {
|
||||||
|
return ErrBufferSize
|
||||||
|
}
|
||||||
|
|
||||||
|
// 10 byte timestamp
|
||||||
|
dst[0] = Encoding[(id[0]&224)>>5]
|
||||||
|
dst[1] = Encoding[id[0]&31]
|
||||||
|
dst[2] = Encoding[(id[1]&248)>>3]
|
||||||
|
dst[3] = Encoding[((id[1]&7)<<2)|((id[2]&192)>>6)]
|
||||||
|
dst[4] = Encoding[(id[2]&62)>>1]
|
||||||
|
dst[5] = Encoding[((id[2]&1)<<4)|((id[3]&240)>>4)]
|
||||||
|
dst[6] = Encoding[((id[3]&15)<<1)|((id[4]&128)>>7)]
|
||||||
|
dst[7] = Encoding[(id[4]&124)>>2]
|
||||||
|
dst[8] = Encoding[((id[4]&3)<<3)|((id[5]&224)>>5)]
|
||||||
|
dst[9] = Encoding[id[5]&31]
|
||||||
|
|
||||||
|
// 16 bytes of entropy
|
||||||
|
dst[10] = Encoding[(id[6]&248)>>3]
|
||||||
|
dst[11] = Encoding[((id[6]&7)<<2)|((id[7]&192)>>6)]
|
||||||
|
dst[12] = Encoding[(id[7]&62)>>1]
|
||||||
|
dst[13] = Encoding[((id[7]&1)<<4)|((id[8]&240)>>4)]
|
||||||
|
dst[14] = Encoding[((id[8]&15)<<1)|((id[9]&128)>>7)]
|
||||||
|
dst[15] = Encoding[(id[9]&124)>>2]
|
||||||
|
dst[16] = Encoding[((id[9]&3)<<3)|((id[10]&224)>>5)]
|
||||||
|
dst[17] = Encoding[id[10]&31]
|
||||||
|
dst[18] = Encoding[(id[11]&248)>>3]
|
||||||
|
dst[19] = Encoding[((id[11]&7)<<2)|((id[12]&192)>>6)]
|
||||||
|
dst[20] = Encoding[(id[12]&62)>>1]
|
||||||
|
dst[21] = Encoding[((id[12]&1)<<4)|((id[13]&240)>>4)]
|
||||||
|
dst[22] = Encoding[((id[13]&15)<<1)|((id[14]&128)>>7)]
|
||||||
|
dst[23] = Encoding[(id[14]&124)>>2]
|
||||||
|
dst[24] = Encoding[((id[14]&3)<<3)|((id[15]&224)>>5)]
|
||||||
|
dst[25] = Encoding[id[15]&31]
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Byte to index table for O(1) lookups when unmarshaling.
|
||||||
|
// We use 0xFF as sentinel value for invalid indexes.
|
||||||
|
var dec = [...]byte{
|
||||||
|
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
|
||||||
|
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
|
||||||
|
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
|
||||||
|
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
|
||||||
|
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x01,
|
||||||
|
0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0xFF, 0xFF,
|
||||||
|
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E,
|
||||||
|
0x0F, 0x10, 0x11, 0xFF, 0x12, 0x13, 0xFF, 0x14, 0x15, 0xFF,
|
||||||
|
0x16, 0x17, 0x18, 0x19, 0x1A, 0xFF, 0x1B, 0x1C, 0x1D, 0x1E,
|
||||||
|
0x1F, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x0A, 0x0B, 0x0C,
|
||||||
|
0x0D, 0x0E, 0x0F, 0x10, 0x11, 0xFF, 0x12, 0x13, 0xFF, 0x14,
|
||||||
|
0x15, 0xFF, 0x16, 0x17, 0x18, 0x19, 0x1A, 0xFF, 0x1B, 0x1C,
|
||||||
|
0x1D, 0x1E, 0x1F, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
|
||||||
|
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
|
||||||
|
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
|
||||||
|
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
|
||||||
|
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
|
||||||
|
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
|
||||||
|
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
|
||||||
|
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
|
||||||
|
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
|
||||||
|
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
|
||||||
|
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
|
||||||
|
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
|
||||||
|
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
|
||||||
|
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
|
||||||
|
}
|
||||||
|
|
||||||
|
// EncodedSize is the length of a text encoded ULID.
|
||||||
|
const EncodedSize = 26
|
||||||
|
|
||||||
|
// UnmarshalText implements the encoding.TextUnmarshaler interface by
|
||||||
|
// parsing the data as string encoded ULID.
|
||||||
|
//
|
||||||
|
// ErrDataSize is returned if the len(v) is different from an encoded
|
||||||
|
// ULID's length. Invalid encodings produce undefined ULIDs.
|
||||||
|
func (id *ULID) UnmarshalText(v []byte) error {
|
||||||
|
// Optimized unrolled loop ahead.
|
||||||
|
// From https://github.com/RobThree/NUlid
|
||||||
|
if len(v) != EncodedSize {
|
||||||
|
return ErrDataSize
|
||||||
|
}
|
||||||
|
|
||||||
|
// 6 bytes timestamp (48 bits)
|
||||||
|
(*id)[0] = ((dec[v[0]] << 5) | dec[v[1]])
|
||||||
|
(*id)[1] = ((dec[v[2]] << 3) | (dec[v[3]] >> 2))
|
||||||
|
(*id)[2] = ((dec[v[3]] << 6) | (dec[v[4]] << 1) | (dec[v[5]] >> 4))
|
||||||
|
(*id)[3] = ((dec[v[5]] << 4) | (dec[v[6]] >> 1))
|
||||||
|
(*id)[4] = ((dec[v[6]] << 7) | (dec[v[7]] << 2) | (dec[v[8]] >> 3))
|
||||||
|
(*id)[5] = ((dec[v[8]] << 5) | dec[v[9]])
|
||||||
|
|
||||||
|
// 10 bytes of entropy (80 bits)
|
||||||
|
(*id)[6] = ((dec[v[10]] << 3) | (dec[v[11]] >> 2))
|
||||||
|
(*id)[7] = ((dec[v[11]] << 6) | (dec[v[12]] << 1) | (dec[v[13]] >> 4))
|
||||||
|
(*id)[8] = ((dec[v[13]] << 4) | (dec[v[14]] >> 1))
|
||||||
|
(*id)[9] = ((dec[v[14]] << 7) | (dec[v[15]] << 2) | (dec[v[16]] >> 3))
|
||||||
|
(*id)[10] = ((dec[v[16]] << 5) | dec[v[17]])
|
||||||
|
(*id)[11] = ((dec[v[18]] << 3) | dec[v[19]]>>2)
|
||||||
|
(*id)[12] = ((dec[v[19]] << 6) | (dec[v[20]] << 1) | (dec[v[21]] >> 4))
|
||||||
|
(*id)[13] = ((dec[v[21]] << 4) | (dec[v[22]] >> 1))
|
||||||
|
(*id)[14] = ((dec[v[22]] << 7) | (dec[v[23]] << 2) | (dec[v[24]] >> 3))
|
||||||
|
(*id)[15] = ((dec[v[24]] << 5) | dec[v[25]])
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Time returns the Unix time in milliseconds encoded in the ULID.
|
||||||
|
func (id ULID) Time() uint64 {
|
||||||
|
return uint64(id[5]) | uint64(id[4])<<8 |
|
||||||
|
uint64(id[3])<<16 | uint64(id[2])<<24 |
|
||||||
|
uint64(id[1])<<32 | uint64(id[0])<<40
|
||||||
|
}
|
||||||
|
|
||||||
|
// maxTime is the maximum Unix time in milliseconds that can be
|
||||||
|
// represented in an ULID.
|
||||||
|
var maxTime = ULID{0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}.Time()
|
||||||
|
|
||||||
|
// MaxTime returns the maximum Unix time in milliseconds that
|
||||||
|
// can be encoded in an ULID.
|
||||||
|
func MaxTime() uint64 { return maxTime }
|
||||||
|
|
||||||
|
// Now is a convenience function that returns the current
|
||||||
|
// UTC time in Unix milliseconds. Equivalent to:
|
||||||
|
// Timestamp(time.Now().UTC())
|
||||||
|
func Now() uint64 { return Timestamp(time.Now().UTC()) }
|
||||||
|
|
||||||
|
// Timestamp converts a time.Time to Unix milliseconds.
|
||||||
|
//
|
||||||
|
// Because of the way ULID stores time, times from the year
|
||||||
|
// 10889 produces undefined results.
|
||||||
|
func Timestamp(t time.Time) uint64 {
|
||||||
|
return uint64(t.Unix())*1000 +
|
||||||
|
uint64(t.Nanosecond()/int(time.Millisecond))
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetTime sets the time component of the ULID to the given Unix time
|
||||||
|
// in milliseconds.
|
||||||
|
func (id *ULID) SetTime(ms uint64) error {
|
||||||
|
if ms > maxTime {
|
||||||
|
return ErrBigTime
|
||||||
|
}
|
||||||
|
|
||||||
|
(*id)[0] = byte(ms >> 40)
|
||||||
|
(*id)[1] = byte(ms >> 32)
|
||||||
|
(*id)[2] = byte(ms >> 24)
|
||||||
|
(*id)[3] = byte(ms >> 16)
|
||||||
|
(*id)[4] = byte(ms >> 8)
|
||||||
|
(*id)[5] = byte(ms)
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Entropy returns the entropy from the ULID.
|
||||||
|
func (id ULID) Entropy() []byte {
|
||||||
|
e := make([]byte, 10)
|
||||||
|
copy(e, id[6:])
|
||||||
|
return e
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetEntropy sets the ULID entropy to the passed byte slice.
|
||||||
|
// ErrDataSize is returned if len(e) != 10.
|
||||||
|
func (id *ULID) SetEntropy(e []byte) error {
|
||||||
|
if len(e) != 10 {
|
||||||
|
return ErrDataSize
|
||||||
|
}
|
||||||
|
|
||||||
|
copy((*id)[6:], e)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Compare returns an integer comparing id and other lexicographically.
|
||||||
|
// The result will be 0 if id==other, -1 if id < other, and +1 if id > other.
|
||||||
|
func (id ULID) Compare(other ULID) int {
|
||||||
|
return bytes.Compare(id[:], other[:])
|
||||||
|
}
|
20
vendor/vendor.json
vendored
20
vendor/vendor.json
vendored
|
@ -368,22 +368,22 @@
|
||||||
"revisionTime": "2016-09-30T00:14:02Z"
|
"revisionTime": "2016-09-30T00:14:02Z"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"checksumSHA1": "L/5bfnMJXzbLXj+vN7Ph1F23+T4=",
|
"checksumSHA1": "Aj4Cn1RClamxluIri/LQMnK/yB4=",
|
||||||
"path": "github.com/fabxc/tsdb",
|
"path": "github.com/fabxc/tsdb",
|
||||||
"revision": "cc0a7c82793515d6311801b7eb3ef8562e61f4c3",
|
"revision": "ca1bc920b795cfc670002e7643471b0277e79a9b",
|
||||||
"revisionTime": "2017-03-02T20:54:30Z"
|
"revisionTime": "2017-03-08T15:54:13Z"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"checksumSHA1": "uVzWuLvF646YjiKomsc2CR1ua58=",
|
"checksumSHA1": "uVzWuLvF646YjiKomsc2CR1ua58=",
|
||||||
"path": "github.com/fabxc/tsdb/chunks",
|
"path": "github.com/fabxc/tsdb/chunks",
|
||||||
"revision": "012cf4ef254e34a10befd0b592bcfa5b1794e92b",
|
"revision": "ca1bc920b795cfc670002e7643471b0277e79a9b",
|
||||||
"revisionTime": "2017-02-04T10:53:52Z"
|
"revisionTime": "2017-03-08T15:54:13Z"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"checksumSHA1": "0Nl+7XBhC+XLpkgkWc6cEtW37aE=",
|
"checksumSHA1": "0Nl+7XBhC+XLpkgkWc6cEtW37aE=",
|
||||||
"path": "github.com/fabxc/tsdb/labels",
|
"path": "github.com/fabxc/tsdb/labels",
|
||||||
"revision": "012cf4ef254e34a10befd0b592bcfa5b1794e92b",
|
"revision": "ca1bc920b795cfc670002e7643471b0277e79a9b",
|
||||||
"revisionTime": "2017-02-04T10:53:52Z"
|
"revisionTime": "2017-03-08T15:54:13Z"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"checksumSHA1": "ww7LVo7jNJ1o6sfRcromEHKyY+o=",
|
"checksumSHA1": "ww7LVo7jNJ1o6sfRcromEHKyY+o=",
|
||||||
|
@ -573,6 +573,12 @@
|
||||||
"revision": "1d49c987357a327b5b03aa84cbddd582c328615d",
|
"revision": "1d49c987357a327b5b03aa84cbddd582c328615d",
|
||||||
"revisionTime": "2016-09-28T00:14:32Z"
|
"revisionTime": "2016-09-28T00:14:32Z"
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"checksumSHA1": "B1iGaUz7NrjEmCjVdIgH5pvkTe8=",
|
||||||
|
"path": "github.com/oklog/ulid",
|
||||||
|
"revision": "66bb6560562feca7045b23db1ae85b01260f87c5",
|
||||||
|
"revisionTime": "2017-01-17T20:06:51Z"
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"checksumSHA1": "3YJklSuzSE1Rt8A+2dhiWSmf/fw=",
|
"checksumSHA1": "3YJklSuzSE1Rt8A+2dhiWSmf/fw=",
|
||||||
"origin": "k8s.io/client-go/1.5/vendor/github.com/pborman/uuid",
|
"origin": "k8s.io/client-go/1.5/vendor/github.com/pborman/uuid",
|
||||||
|
|
Loading…
Reference in a new issue