mirror of
https://github.com/prometheus/prometheus.git
synced 2025-01-12 06:17:27 -08:00
remplace errors.Errorf by fmt.Errorf
Signed-off-by: Matthieu MOREL <matthieu.morel35@gmail.com>
This commit is contained in:
parent
1bfb3ed062
commit
dd8871379a
|
@ -17,6 +17,7 @@ package tsdb
|
|||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
@ -238,7 +239,7 @@ func readMetaFile(dir string) (*BlockMeta, int64, error) {
|
|||
return nil, 0, err
|
||||
}
|
||||
if m.Version != metaVersion1 {
|
||||
return nil, 0, errors.Errorf("unexpected meta file version %d", m.Version)
|
||||
return nil, 0, fmt.Errorf("unexpected meta file version %d", m.Version)
|
||||
}
|
||||
|
||||
return &m, int64(len(b)), nil
|
||||
|
|
|
@ -151,7 +151,7 @@ func NewLeveledCompactor(ctx context.Context, r prometheus.Registerer, l log.Log
|
|||
|
||||
func NewLeveledCompactorWithChunkSize(ctx context.Context, r prometheus.Registerer, l log.Logger, ranges []int64, pool chunkenc.Pool, maxBlockChunkSegmentSize int64, mergeFunc storage.VerticalChunkSeriesMergeFunc) (*LeveledCompactor, error) {
|
||||
if len(ranges) == 0 {
|
||||
return nil, errors.Errorf("at least one range must be provided")
|
||||
return nil, fmt.Errorf("at least one range must be provided")
|
||||
}
|
||||
if pool == nil {
|
||||
pool = chunkenc.NewPool()
|
||||
|
|
|
@ -662,7 +662,7 @@ func (db *DBReadOnly) Block(blockID string) (BlockReader, error) {
|
|||
|
||||
_, err := os.Stat(filepath.Join(db.dir, blockID))
|
||||
if os.IsNotExist(err) {
|
||||
return nil, errors.Errorf("invalid block ID %s", blockID)
|
||||
return nil, fmt.Errorf("invalid block ID %s", blockID)
|
||||
}
|
||||
|
||||
block, err := OpenBlock(db.logger, filepath.Join(db.dir, blockID), nil)
|
||||
|
@ -1834,10 +1834,10 @@ func (db *DB) ForceHeadMMap() {
|
|||
// will create a new block containing all data that's currently in the memory buffer/WAL.
|
||||
func (db *DB) Snapshot(dir string, withHead bool) error {
|
||||
if dir == db.dir {
|
||||
return errors.Errorf("cannot snapshot into base directory")
|
||||
return fmt.Errorf("cannot snapshot into base directory")
|
||||
}
|
||||
if _, err := ulid.ParseStrict(dir); err == nil {
|
||||
return errors.Errorf("dir must not be a valid ULID")
|
||||
return fmt.Errorf("dir must not be a valid ULID")
|
||||
}
|
||||
|
||||
db.cmtx.Lock()
|
||||
|
|
|
@ -3082,7 +3082,7 @@ func deleteNonBlocks(dbDir string) error {
|
|||
}
|
||||
for _, dir := range dirs {
|
||||
if ok := isBlockDir(dir); !ok {
|
||||
return errors.Errorf("root folder:%v still hase non block directory:%v", dbDir, dir.Name())
|
||||
return fmt.Errorf("root folder:%v still hase non block directory:%v", dbDir, dir.Name())
|
||||
}
|
||||
}
|
||||
return nil
|
||||
|
|
|
@ -224,11 +224,11 @@ func NewHead(r prometheus.Registerer, l log.Logger, wal, wbl *wlog.WL, opts *Hea
|
|||
// even if ooo is not enabled yet.
|
||||
capMax := opts.OutOfOrderCapMax.Load()
|
||||
if capMax <= 0 || capMax > 255 {
|
||||
return nil, errors.Errorf("OOOCapMax of %d is invalid. must be > 0 and <= 255", capMax)
|
||||
return nil, fmt.Errorf("OOOCapMax of %d is invalid. must be > 0 and <= 255", capMax)
|
||||
}
|
||||
|
||||
if opts.ChunkRange < 1 {
|
||||
return nil, errors.Errorf("invalid chunk range %d", opts.ChunkRange)
|
||||
return nil, fmt.Errorf("invalid chunk range %d", opts.ChunkRange)
|
||||
}
|
||||
if opts.SeriesCallback == nil {
|
||||
opts.SeriesCallback = &noopSeriesLifecycleCallback{}
|
||||
|
@ -857,7 +857,7 @@ func (h *Head) loadMmappedChunks(refSeries map[chunks.HeadSeriesRef]*memSeries)
|
|||
slice := mmappedChunks[seriesRef]
|
||||
if len(slice) > 0 && slice[len(slice)-1].maxTime >= mint {
|
||||
h.metrics.mmapChunkCorruptionTotal.Inc()
|
||||
return errors.Errorf("out of sequence m-mapped chunk for series ref %d, last chunk: [%d, %d], new: [%d, %d]",
|
||||
return fmt.Errorf("out of sequence m-mapped chunk for series ref %d, last chunk: [%d, %d], new: [%d, %d]",
|
||||
seriesRef, slice[len(slice)-1].minTime, slice[len(slice)-1].maxTime, mint, maxt)
|
||||
}
|
||||
slice = append(slice, &mmappedChunk{
|
||||
|
@ -872,7 +872,7 @@ func (h *Head) loadMmappedChunks(refSeries map[chunks.HeadSeriesRef]*memSeries)
|
|||
|
||||
if len(ms.mmappedChunks) > 0 && ms.mmappedChunks[len(ms.mmappedChunks)-1].maxTime >= mint {
|
||||
h.metrics.mmapChunkCorruptionTotal.Inc()
|
||||
return errors.Errorf("out of sequence m-mapped chunk for series ref %d, last chunk: [%d, %d], new: [%d, %d]",
|
||||
return fmt.Errorf("out of sequence m-mapped chunk for series ref %d, last chunk: [%d, %d], new: [%d, %d]",
|
||||
seriesRef, ms.mmappedChunks[len(ms.mmappedChunks)-1].minTime, ms.mmappedChunks[len(ms.mmappedChunks)-1].maxTime,
|
||||
mint, maxt)
|
||||
}
|
||||
|
|
|
@ -970,7 +970,7 @@ func decodeSeriesFromChunkSnapshot(d *record.Decoder, b []byte) (csr chunkSnapsh
|
|||
dec := encoding.Decbuf{B: b}
|
||||
|
||||
if flag := dec.Byte(); flag != chunkSnapshotRecordTypeSeries {
|
||||
return csr, errors.Errorf("invalid record type %x", flag)
|
||||
return csr, fmt.Errorf("invalid record type %x", flag)
|
||||
}
|
||||
|
||||
csr.ref = chunks.HeadSeriesRef(dec.Be64())
|
||||
|
@ -1018,7 +1018,7 @@ func decodeSeriesFromChunkSnapshot(d *record.Decoder, b []byte) (csr chunkSnapsh
|
|||
|
||||
err = dec.Err()
|
||||
if err != nil && len(dec.B) > 0 {
|
||||
err = errors.Errorf("unexpected %d bytes left in entry", len(dec.B))
|
||||
err = fmt.Errorf("unexpected %d bytes left in entry", len(dec.B))
|
||||
}
|
||||
|
||||
return
|
||||
|
@ -1041,7 +1041,7 @@ func decodeTombstonesSnapshotRecord(b []byte) (tombstones.Reader, error) {
|
|||
dec := encoding.Decbuf{B: b}
|
||||
|
||||
if flag := dec.Byte(); flag != chunkSnapshotRecordTypeTombstones {
|
||||
return nil, errors.Errorf("invalid record type %x", flag)
|
||||
return nil, fmt.Errorf("invalid record type %x", flag)
|
||||
}
|
||||
|
||||
tr, err := tombstones.Decode(dec.UvarintBytes())
|
||||
|
@ -1254,7 +1254,7 @@ func LastChunkSnapshot(dir string) (string, int, int, error) {
|
|||
continue
|
||||
}
|
||||
if !fi.IsDir() {
|
||||
return "", 0, 0, errors.Errorf("chunk snapshot %s is not a directory", fi.Name())
|
||||
return "", 0, 0, fmt.Errorf("chunk snapshot %s is not a directory", fi.Name())
|
||||
}
|
||||
|
||||
splits := strings.Split(fi.Name()[len(chunkSnapshotPrefix):], ".")
|
||||
|
@ -1492,7 +1492,7 @@ Outer:
|
|||
default:
|
||||
// This is a record type we don't understand. It is either and old format from earlier versions,
|
||||
// or a new format and the code was rolled back to old version.
|
||||
loopErr = errors.Errorf("unsupported snapshot record type 0b%b", rec[0])
|
||||
loopErr = fmt.Errorf("unsupported snapshot record type 0b%b", rec[0])
|
||||
break Outer
|
||||
}
|
||||
}
|
||||
|
|
|
@ -710,7 +710,7 @@ func createFakeReaderAndNotPopulatedChunks(s ...[]chunks.Sample) (*fakeChunksRea
|
|||
func (r *fakeChunksReader) Chunk(meta chunks.Meta) (chunkenc.Chunk, error) {
|
||||
chk, ok := r.chks[meta.Ref]
|
||||
if !ok {
|
||||
return nil, errors.Errorf("chunk not found at ref %v", meta.Ref)
|
||||
return nil, fmt.Errorf("chunk not found at ref %v", meta.Ref)
|
||||
}
|
||||
return chk, nil
|
||||
}
|
||||
|
@ -1831,7 +1831,7 @@ func (m mockIndex) Symbols() index.StringIter {
|
|||
|
||||
func (m *mockIndex) AddSeries(ref storage.SeriesRef, l labels.Labels, chunks ...chunks.Meta) error {
|
||||
if _, ok := m.series[ref]; ok {
|
||||
return errors.Errorf("series with reference %d already added", ref)
|
||||
return fmt.Errorf("series with reference %d already added", ref)
|
||||
}
|
||||
l.Range(func(lbl labels.Label) {
|
||||
m.symbols[lbl.Name] = struct{}{}
|
||||
|
@ -1852,7 +1852,7 @@ func (m *mockIndex) AddSeries(ref storage.SeriesRef, l labels.Labels, chunks ...
|
|||
func (m mockIndex) WritePostings(name, value string, it index.Postings) error {
|
||||
l := labels.Label{Name: name, Value: value}
|
||||
if _, ok := m.postings[l]; ok {
|
||||
return errors.Errorf("postings for %s already added", l)
|
||||
return fmt.Errorf("postings for %s already added", l)
|
||||
}
|
||||
ep, err := index.ExpandPostings(it)
|
||||
if err != nil {
|
||||
|
|
|
@ -15,6 +15,7 @@ package tsdb
|
|||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
@ -124,7 +125,7 @@ func readBogusMetaFile(dir string) (*BlockMeta, error) {
|
|||
return nil, err
|
||||
}
|
||||
if m.Version != metaVersion1 && m.Version != 2 {
|
||||
return nil, errors.Errorf("unexpected meta file version %d", m.Version)
|
||||
return nil, fmt.Errorf("unexpected meta file version %d", m.Version)
|
||||
}
|
||||
return &m, nil
|
||||
}
|
||||
|
|
14
tsdb/wal.go
14
tsdb/wal.go
|
@ -525,14 +525,14 @@ func (w *SegmentWAL) openSegmentFile(name string) (*os.File, error) {
|
|||
case err != nil:
|
||||
return nil, errors.Wrapf(err, "validate meta %q", f.Name())
|
||||
case n != 8:
|
||||
return nil, errors.Errorf("invalid header size %d in %q", n, f.Name())
|
||||
return nil, fmt.Errorf("invalid header size %d in %q", n, f.Name())
|
||||
}
|
||||
|
||||
if m := binary.BigEndian.Uint32(metab[:4]); m != WALMagic {
|
||||
return nil, errors.Errorf("invalid magic header %x in %q", m, f.Name())
|
||||
return nil, fmt.Errorf("invalid magic header %x in %q", m, f.Name())
|
||||
}
|
||||
if metab[4] != WALFormatDefault {
|
||||
return nil, errors.Errorf("unknown WAL segment format %d in %q", metab[4], f.Name())
|
||||
return nil, fmt.Errorf("unknown WAL segment format %d in %q", metab[4], f.Name())
|
||||
}
|
||||
hasError = false
|
||||
return f, nil
|
||||
|
@ -1052,7 +1052,7 @@ func (e walCorruptionErr) Error() string {
|
|||
|
||||
func (r *walReader) corruptionErr(s string, args ...interface{}) error {
|
||||
return walCorruptionErr{
|
||||
err: errors.Errorf(s, args...),
|
||||
err: fmt.Errorf(s, args...),
|
||||
file: r.cur,
|
||||
lastOffset: r.lastOffset,
|
||||
}
|
||||
|
@ -1124,7 +1124,7 @@ func (r *walReader) decodeSeries(flag byte, b []byte, res *[]record.RefSeries) e
|
|||
return dec.Err()
|
||||
}
|
||||
if len(dec.B) > 0 {
|
||||
return errors.Errorf("unexpected %d bytes left in entry", len(dec.B))
|
||||
return fmt.Errorf("unexpected %d bytes left in entry", len(dec.B))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -1156,7 +1156,7 @@ func (r *walReader) decodeSamples(flag byte, b []byte, res *[]record.RefSample)
|
|||
return errors.Wrapf(dec.Err(), "decode error after %d samples", len(*res))
|
||||
}
|
||||
if len(dec.B) > 0 {
|
||||
return errors.Errorf("unexpected %d bytes left in entry", len(dec.B))
|
||||
return fmt.Errorf("unexpected %d bytes left in entry", len(dec.B))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -1176,7 +1176,7 @@ func (r *walReader) decodeDeletes(flag byte, b []byte, res *[]tombstones.Stone)
|
|||
return dec.Err()
|
||||
}
|
||||
if len(dec.B) > 0 {
|
||||
return errors.Errorf("unexpected %d bytes left in entry", len(dec.B))
|
||||
return fmt.Errorf("unexpected %d bytes left in entry", len(dec.B))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -747,12 +747,12 @@ func checkpointNum(dir string) (int, error) {
|
|||
// dir may contain a hidden directory, so only check the base directory
|
||||
chunks := strings.Split(filepath.Base(dir), ".")
|
||||
if len(chunks) != 2 {
|
||||
return 0, errors.Errorf("invalid checkpoint dir string: %s", dir)
|
||||
return 0, fmt.Errorf("invalid checkpoint dir string: %s", dir)
|
||||
}
|
||||
|
||||
result, err := strconv.Atoi(chunks[1])
|
||||
if err != nil {
|
||||
return 0, errors.Errorf("invalid checkpoint dir string: %s", dir)
|
||||
return 0, fmt.Errorf("invalid checkpoint dir string: %s", dir)
|
||||
}
|
||||
|
||||
return result, nil
|
||||
|
|
Loading…
Reference in a new issue