Merge pull request #65 from prometheus/comment-fixes

Minor comment fixes and additions.
This commit is contained in:
Fabian Reinartz 2017-04-28 16:07:16 +02:00 committed by GitHub
commit 5b3cb11f3d
7 changed files with 18 additions and 17 deletions

View file

@ -35,7 +35,7 @@ type DiskBlock interface {
// Index returns an IndexReader over the block's data. // Index returns an IndexReader over the block's data.
Index() IndexReader Index() IndexReader
// Series returns a SeriesReader over the block's data. // Chunks returns a ChunkReader over the block's data.
Chunks() ChunkReader Chunks() ChunkReader
// Close releases all underlying resources of the block. // Close releases all underlying resources of the block.

View file

@ -28,7 +28,7 @@ import (
) )
const ( const (
// MagicChunks is 4 bytes at the head of series file. // MagicChunks is 4 bytes at the head of a series file.
MagicChunks = 0x85BD40DD MagicChunks = 0x85BD40DD
) )
@ -45,10 +45,10 @@ type ChunkMeta struct {
// ChunkWriter serializes a time block of chunked series data. // ChunkWriter serializes a time block of chunked series data.
type ChunkWriter interface { type ChunkWriter interface {
// WriteChunks writes several chunks. The data field of the ChunkMetas // WriteChunks writes several chunks. The Chunk field of the ChunkMetas
// must be populated. // must be populated.
// After returning successfully, the Ref fields in the ChunkMetas // After returning successfully, the Ref fields in the ChunkMetas
// is set and can be used to retrieve the chunks from the written data. // are set and can be used to retrieve the chunks from the written data.
WriteChunks(chunks ...*ChunkMeta) error WriteChunks(chunks ...*ChunkMeta) error
// Close writes any required finalization and closes the resources // Close writes any required finalization and closes the resources
@ -174,9 +174,9 @@ func (w *chunkWriter) write(wr io.Writer, b []byte) error {
func (w *chunkWriter) WriteChunks(chks ...*ChunkMeta) error { func (w *chunkWriter) WriteChunks(chks ...*ChunkMeta) error {
// Calculate maximum space we need and cut a new segment in case // Calculate maximum space we need and cut a new segment in case
// we don't fit into the current one. // we don't fit into the current one.
maxLen := int64(binary.MaxVarintLen32) maxLen := int64(binary.MaxVarintLen32) // The number of chunks.
for _, c := range chks { for _, c := range chks {
maxLen += binary.MaxVarintLen32 + 1 maxLen += binary.MaxVarintLen32 + 1 // The number of bytes in the chunk and its encoding.
maxLen += int64(len(c.Chunk.Bytes())) maxLen += int64(len(c.Chunk.Bytes()))
} }
newsz := w.n + maxLen newsz := w.n + maxLen

View file

@ -15,7 +15,7 @@ package chunks
import "io" import "io"
// bstream is a stream of bits // bstream is a stream of bits.
type bstream struct { type bstream struct {
stream []byte // the data stream stream []byte // the data stream
count uint8 // how many bits are valid in current byte count uint8 // how many bits are valid in current byte

View file

@ -18,7 +18,7 @@ import (
"fmt" "fmt"
) )
// Encoding is the identifier for a chunk encoding // Encoding is the identifier for a chunk encoding.
type Encoding uint8 type Encoding uint8
func (e Encoding) String() string { func (e Encoding) String() string {

View file

@ -39,10 +39,10 @@ const (
const compactionPageBytes = minSectorSize * 64 const compactionPageBytes = minSectorSize * 64
// IndexWriter serialized the index for a block of series data. // IndexWriter serializes the index for a block of series data.
// The methods must generally be called in order they are specified. // The methods must generally be called in the order they are specified in.
type IndexWriter interface { type IndexWriter interface {
// AddSeries populates the index writer witha series and its offsets // AddSeries populates the index writer with a series and its offsets
// of chunks that the index can reference. // of chunks that the index can reference.
// The reference number is used to resolve a series against the postings // The reference number is used to resolve a series against the postings
// list iterator. It only has to be available during the write processing. // list iterator. It only has to be available during the write processing.

View file

@ -40,7 +40,7 @@ func (b *BufferedSeriesIterator) Seek(t int64) bool {
t0 := t - b.buf.delta t0 := t - b.buf.delta
// If the delta would cause us to seek backwards, preserve the buffer // If the delta would cause us to seek backwards, preserve the buffer
// and just continue regular advancment while filling the buffer on the way. // and just continue regular advancement while filling the buffer on the way.
if t0 > b.lastTime { if t0 > b.lastTime {
b.buf.reset() b.buf.reset()

11
wal.go
View file

@ -147,7 +147,7 @@ func (w *WAL) initSegments() error {
if len(fns) == 0 { if len(fns) == 0 {
return nil return nil
} }
// We must open all file in read mode as we may have to truncate along // We must open all files in read/write mode as we may have to truncate along
// the way and any file may become the tail. // the way and any file may become the tail.
for _, fn := range fns { for _, fn := range fns {
f, err := os.OpenFile(fn, os.O_RDWR, 0666) f, err := os.OpenFile(fn, os.O_RDWR, 0666)
@ -178,10 +178,10 @@ func (w *WAL) initSegments() error {
return nil return nil
} }
// cut finishes the currently active segments and open the next one. // cut finishes the currently active segments and opens the next one.
// The encoder is reset to point to the new segment. // The encoder is reset to point to the new segment.
func (w *WAL) cut() error { func (w *WAL) cut() error {
// Sync current tail to disc and close. // Sync current tail to disk and close.
if tf := w.tail(); tf != nil { if tf := w.tail(); tf != nil {
if err := w.sync(); err != nil { if err := w.sync(); err != nil {
return err return err
@ -276,7 +276,7 @@ func (w *WAL) run(interval time.Duration) {
} }
} }
// Close sync all data and closes the underlying resources. // Close syncs all data and closes the underlying resources.
func (w *WAL) Close() error { func (w *WAL) Close() error {
close(w.stopc) close(w.stopc)
<-w.donec <-w.donec
@ -309,9 +309,10 @@ func (w *WAL) entry(et WALEntryType, flag byte, buf []byte) error {
w.mtx.Lock() w.mtx.Lock()
defer w.mtx.Unlock() defer w.mtx.Unlock()
// Cut to the next segment if exceeds the file size unless it would also // Cut to the next segment if the entry exceeds the file size unless it would also
// exceed the size of a new segment. // exceed the size of a new segment.
var ( var (
// 6-byte header + 4-byte CRC32 + buf.
sz = int64(6 + 4 + len(buf)) sz = int64(6 + 4 + len(buf))
newsz = w.curN + sz newsz = w.curN + sz
) )