mirror of
https://github.com/prometheus/prometheus.git
synced 2025-01-13 06:47:28 -08:00
Merge pull request #307 from mjtrangoni/fixes
Fix some megacheck and unconvert issues
This commit is contained in:
commit
8a301b126a
2
block.go
2
block.go
|
@ -474,7 +474,7 @@ func (pb *Block) CleanTombstones(dest string, c Compactor) (bool, error) {
|
||||||
numStones := 0
|
numStones := 0
|
||||||
|
|
||||||
pb.tombstones.Iter(func(id uint64, ivs Intervals) error {
|
pb.tombstones.Iter(func(id uint64, ivs Intervals) error {
|
||||||
for _ = range ivs {
|
for range ivs {
|
||||||
numStones++
|
numStones++
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -259,7 +259,7 @@ func (it *xorIterator) Next() bool {
|
||||||
it.err = err
|
it.err = err
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
it.t = int64(t)
|
it.t = t
|
||||||
it.val = math.Float64frombits(v)
|
it.val = math.Float64frombits(v)
|
||||||
|
|
||||||
it.numRead++
|
it.numRead++
|
||||||
|
|
|
@ -133,7 +133,7 @@ func (w *Writer) finalizeTail() error {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
// As the file was pre-allocated, we truncate any superfluous zero bytes.
|
// As the file was pre-allocated, we truncate any superfluous zero bytes.
|
||||||
off, err := tf.Seek(0, os.SEEK_CUR)
|
off, err := tf.Seek(0, io.SeekCurrent)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -349,7 +349,7 @@ func (s *Reader) Chunk(ref uint64) (chunkenc.Chunk, error) {
|
||||||
}
|
}
|
||||||
b := s.bs[seq]
|
b := s.bs[seq]
|
||||||
|
|
||||||
if int(off) >= b.Len() {
|
if off >= b.Len() {
|
||||||
return nil, errors.Errorf("offset %d beyond data size %d", off, b.Len())
|
return nil, errors.Errorf("offset %d beyond data size %d", off, b.Len())
|
||||||
}
|
}
|
||||||
// With the minimum chunk length this should never cause us reading
|
// With the minimum chunk length this should never cause us reading
|
||||||
|
|
7
db.go
7
db.go
|
@ -725,10 +725,7 @@ func (db *DB) Delete(mint, maxt int64, ms ...labels.Matcher) error {
|
||||||
g.Go(func() error {
|
g.Go(func() error {
|
||||||
return db.head.Delete(mint, maxt, ms...)
|
return db.head.Delete(mint, maxt, ms...)
|
||||||
})
|
})
|
||||||
if err := g.Wait(); err != nil {
|
return g.Wait()
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// CleanTombstones re-writes any blocks with tombstones.
|
// CleanTombstones re-writes any blocks with tombstones.
|
||||||
|
@ -737,7 +734,7 @@ func (db *DB) CleanTombstones() error {
|
||||||
defer db.cmtx.Unlock()
|
defer db.cmtx.Unlock()
|
||||||
|
|
||||||
start := time.Now()
|
start := time.Now()
|
||||||
defer db.metrics.tombCleanTimer.Observe(float64(time.Since(start).Seconds()))
|
defer db.metrics.tombCleanTimer.Observe(time.Since(start).Seconds())
|
||||||
|
|
||||||
db.mtx.RLock()
|
db.mtx.RLock()
|
||||||
blocks := db.blocks[:]
|
blocks := db.blocks[:]
|
||||||
|
|
|
@ -780,7 +780,7 @@ func (r *Reader) readSymbols(off int) error {
|
||||||
|
|
||||||
for d.err() == nil && d.len() > 0 && cnt > 0 {
|
for d.err() == nil && d.len() > 0 && cnt > 0 {
|
||||||
s := d.uvarintStr()
|
s := d.uvarintStr()
|
||||||
r.symbols[uint32(nextPos)] = s
|
r.symbols[nextPos] = s
|
||||||
|
|
||||||
if r.version == 2 {
|
if r.version == 2 {
|
||||||
nextPos++
|
nextPos++
|
||||||
|
@ -800,7 +800,7 @@ func (r *Reader) readOffsetTable(off uint64, f func([]string, uint64) error) err
|
||||||
cnt := d.be32()
|
cnt := d.be32()
|
||||||
|
|
||||||
for d.err() == nil && d.len() > 0 && cnt > 0 {
|
for d.err() == nil && d.len() > 0 && cnt > 0 {
|
||||||
keyCount := int(d.uvarint())
|
keyCount := d.uvarint()
|
||||||
keys := make([]string, 0, keyCount)
|
keys := make([]string, 0, keyCount)
|
||||||
|
|
||||||
for i := 0; i < keyCount; i++ {
|
for i := 0; i < keyCount; i++ {
|
||||||
|
@ -1038,7 +1038,7 @@ func (dec *Decoder) Series(b []byte, lbls *labels.Labels, chks *[]chunks.Meta) e
|
||||||
|
|
||||||
d := decbuf{b: b}
|
d := decbuf{b: b}
|
||||||
|
|
||||||
k := int(d.uvarint())
|
k := d.uvarint()
|
||||||
|
|
||||||
for i := 0; i < k; i++ {
|
for i := 0; i < k; i++ {
|
||||||
lno := uint32(d.uvarint())
|
lno := uint32(d.uvarint())
|
||||||
|
@ -1061,7 +1061,7 @@ func (dec *Decoder) Series(b []byte, lbls *labels.Labels, chks *[]chunks.Meta) e
|
||||||
}
|
}
|
||||||
|
|
||||||
// Read the chunks meta data.
|
// Read the chunks meta data.
|
||||||
k = int(d.uvarint())
|
k = d.uvarint()
|
||||||
|
|
||||||
if k == 0 {
|
if k == 0 {
|
||||||
return nil
|
return nil
|
||||||
|
|
8
wal.go
8
wal.go
|
@ -290,7 +290,7 @@ func (w *SegmentWAL) truncate(err error, file int, lastOffset int64) error {
|
||||||
w.files = w.files[:file+1]
|
w.files = w.files[:file+1]
|
||||||
|
|
||||||
// Seek the current file to the last valid offset where we continue writing from.
|
// Seek the current file to the last valid offset where we continue writing from.
|
||||||
_, err = w.files[file].Seek(lastOffset, os.SEEK_SET)
|
_, err = w.files[file].Seek(lastOffset, io.SeekStart)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -393,7 +393,7 @@ func (w *SegmentWAL) Truncate(mint int64, keep func(uint64) bool) error {
|
||||||
return errors.Wrap(r.Err(), "read candidate WAL files")
|
return errors.Wrap(r.Err(), "read candidate WAL files")
|
||||||
}
|
}
|
||||||
|
|
||||||
off, err := csf.Seek(0, os.SEEK_CUR)
|
off, err := csf.Seek(0, io.SeekCurrent)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -583,7 +583,7 @@ func (w *SegmentWAL) cut() error {
|
||||||
// in the new segment.
|
// in the new segment.
|
||||||
go func() {
|
go func() {
|
||||||
w.actorc <- func() error {
|
w.actorc <- func() error {
|
||||||
off, err := hf.Seek(0, os.SEEK_CUR)
|
off, err := hf.Seek(0, io.SeekCurrent)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrapf(err, "finish old segment %s", hf.Name())
|
return errors.Wrapf(err, "finish old segment %s", hf.Name())
|
||||||
}
|
}
|
||||||
|
@ -1024,7 +1024,7 @@ func (r *walReader) next() bool {
|
||||||
|
|
||||||
// Remember the offset after the last correctly read entry. If the next one
|
// Remember the offset after the last correctly read entry. If the next one
|
||||||
// is corrupted, this is where we can safely truncate.
|
// is corrupted, this is where we can safely truncate.
|
||||||
r.lastOffset, r.err = cf.Seek(0, os.SEEK_CUR)
|
r.lastOffset, r.err = cf.Seek(0, io.SeekCurrent)
|
||||||
if r.err != nil {
|
if r.err != nil {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
|
@ -15,6 +15,7 @@ package tsdb
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"encoding/binary"
|
"encoding/binary"
|
||||||
|
"io"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"math/rand"
|
"math/rand"
|
||||||
"os"
|
"os"
|
||||||
|
@ -305,7 +306,7 @@ func TestWALRestoreCorrupted(t *testing.T) {
|
||||||
testutil.Ok(t, err)
|
testutil.Ok(t, err)
|
||||||
defer f.Close()
|
defer f.Close()
|
||||||
|
|
||||||
off, err := f.Seek(0, os.SEEK_END)
|
off, err := f.Seek(0, io.SeekEnd)
|
||||||
testutil.Ok(t, err)
|
testutil.Ok(t, err)
|
||||||
|
|
||||||
testutil.Ok(t, f.Truncate(off-1))
|
testutil.Ok(t, f.Truncate(off-1))
|
||||||
|
@ -318,7 +319,7 @@ func TestWALRestoreCorrupted(t *testing.T) {
|
||||||
testutil.Ok(t, err)
|
testutil.Ok(t, err)
|
||||||
defer f.Close()
|
defer f.Close()
|
||||||
|
|
||||||
off, err := f.Seek(0, os.SEEK_END)
|
off, err := f.Seek(0, io.SeekEnd)
|
||||||
testutil.Ok(t, err)
|
testutil.Ok(t, err)
|
||||||
|
|
||||||
testutil.Ok(t, f.Truncate(off-8))
|
testutil.Ok(t, f.Truncate(off-8))
|
||||||
|
@ -331,7 +332,7 @@ func TestWALRestoreCorrupted(t *testing.T) {
|
||||||
testutil.Ok(t, err)
|
testutil.Ok(t, err)
|
||||||
defer f.Close()
|
defer f.Close()
|
||||||
|
|
||||||
off, err := f.Seek(0, os.SEEK_END)
|
off, err := f.Seek(0, io.SeekEnd)
|
||||||
testutil.Ok(t, err)
|
testutil.Ok(t, err)
|
||||||
|
|
||||||
// Write junk before checksum starts.
|
// Write junk before checksum starts.
|
||||||
|
@ -346,7 +347,7 @@ func TestWALRestoreCorrupted(t *testing.T) {
|
||||||
testutil.Ok(t, err)
|
testutil.Ok(t, err)
|
||||||
defer f.Close()
|
defer f.Close()
|
||||||
|
|
||||||
off, err := f.Seek(0, os.SEEK_END)
|
off, err := f.Seek(0, io.SeekEnd)
|
||||||
testutil.Ok(t, err)
|
testutil.Ok(t, err)
|
||||||
|
|
||||||
// Write junk into checksum
|
// Write junk into checksum
|
||||||
|
|
Loading…
Reference in a new issue