Fix linter issues

Signed-off-by: Marco Pracucci <marco@pracucci.com>
This commit is contained in:
Marco Pracucci 2024-01-05 13:12:23 +01:00
parent ca7dbc4342
commit 7c8e9a2a76
No known key found for this signature in database
GPG key ID: 74C1BD403D2DF9B5
12 changed files with 29 additions and 27 deletions

View file

@ -317,7 +317,7 @@ func TestForStateAddSamples(t *testing.T) {
filteredRes = append(filteredRes, smpl)
} else {
// If not 'ALERTS_FOR_STATE', it has to be 'ALERTS'.
require.Equal(t, smplName, "ALERTS")
require.Equal(t, "ALERTS", smplName)
}
}
for i := range test.result {

View file

@ -863,7 +863,7 @@ func TestConcatenatingChunkSeriesMerger(t *testing.T) {
count, err := merged.ChunkCount()
require.NoError(t, err)
require.Equal(t, len(expChks), count)
require.Len(t, expChks, count)
})
}
}

View file

@ -102,7 +102,7 @@ func TestNewListChunkSeriesFromSamples(t *testing.T) {
count, err := series.ChunkCount()
require.NoError(t, err)
require.Equal(t, len(chks), count, "should have one chunk per group of samples")
require.Len(t, chks, count, "should have one chunk per group of samples")
}
// TestSeriesSetToChunkSet test the property of SeriesSet that says

View file

@ -2,8 +2,9 @@ package tsdb
import (
"context"
"errors"
"fmt"
"github.com/pkg/errors"
"go.uber.org/atomic"
"golang.org/x/sync/semaphore"
@ -68,10 +69,10 @@ func (bw *asyncBlockWriter) loop() (res asyncBlockWriterResult) {
ref := storage.SeriesRef(0)
for sw := range bw.seriesChan {
if err := bw.chunkw.WriteChunks(sw.chks...); err != nil {
return asyncBlockWriterResult{err: errors.Wrap(err, "write chunks")}
return asyncBlockWriterResult{err: fmt.Errorf("write chunks: %w", err)}
}
if err := bw.indexw.AddSeries(ref, sw.lbls, sw.chks...); err != nil {
return asyncBlockWriterResult{err: errors.Wrap(err, "add series")}
return asyncBlockWriterResult{err: fmt.Errorf("add series: %w", err)}
}
stats.NumChunks += uint64(len(sw.chks))
@ -82,7 +83,7 @@ func (bw *asyncBlockWriter) loop() (res asyncBlockWriterResult) {
for _, chk := range sw.chks {
if err := bw.chunkPool.Put(chk.Chunk); err != nil {
return asyncBlockWriterResult{err: errors.Wrap(err, "put chunk")}
return asyncBlockWriterResult{err: fmt.Errorf("put chunk: %w", err)}
}
}
ref++
@ -90,16 +91,16 @@ func (bw *asyncBlockWriter) loop() (res asyncBlockWriterResult) {
err := bw.closeSemaphore.Acquire(context.Background(), 1)
if err != nil {
return asyncBlockWriterResult{err: errors.Wrap(err, "failed to acquire semaphore before closing writers")}
return asyncBlockWriterResult{err: fmt.Errorf("failed to acquire semaphore before closing writers: %w", err)}
}
defer bw.closeSemaphore.Release(1)
// If everything went fine with writing so far, close writers.
if err := bw.chunkw.Close(); err != nil {
return asyncBlockWriterResult{err: errors.Wrap(err, "closing chunk writer")}
return asyncBlockWriterResult{err: fmt.Errorf("closing chunk writer: %w", err)}
}
if err := bw.indexw.Close(); err != nil {
return asyncBlockWriterResult{err: errors.Wrap(err, "closing index writer")}
return asyncBlockWriterResult{err: fmt.Errorf("closing index writer: %w", err)}
}
return asyncBlockWriterResult{stats: stats}
@ -118,7 +119,7 @@ func (bw *asyncBlockWriter) addSeries(lbls labels.Labels, chks []chunks.Meta) er
// then we should return that error too, otherwise it may be never reported
// and we'll never know the actual root cause.
if bw.result.err != nil {
return errors.Wrap(bw.result.err, errAsyncBlockWriterNotRunning.Error())
return fmt.Errorf("%s: %w", errAsyncBlockWriterNotRunning.Error(), bw.result.err)
}
return errAsyncBlockWriterNotRunning
}

View file

@ -15,6 +15,7 @@ package chunks
import (
"encoding/binary"
"errors"
"math/rand"
"os"
"strconv"
@ -22,7 +23,6 @@ import (
"testing"
"time"
"github.com/pkg/errors"
"github.com/stretchr/testify/require"
"github.com/prometheus/prometheus/tsdb/chunkenc"
@ -185,7 +185,7 @@ func TestChunkDiskMapper_WriteUnsupportedChunk_Chunk_IterateChunks(t *testing.T)
ucSeriesRef, ucChkRef, ucMint, ucMaxt, uchunk := writeUnsupportedChunk(t, 0, hrw)
// Checking on-disk bytes for the first file.
require.Equal(t, 1, len(hrw.mmappedChunkFiles), "expected 1 mmapped file, got %d", len(hrw.mmappedChunkFiles))
require.Len(t, hrw.mmappedChunkFiles, 1, "expected 1 mmapped file, got %d", len(hrw.mmappedChunkFiles))
require.Equal(t, len(hrw.mmappedChunkFiles), len(hrw.closers))
// Testing IterateAllChunks method.
@ -206,7 +206,7 @@ func TestChunkDiskMapper_WriteUnsupportedChunk_Chunk_IterateChunks(t *testing.T)
// The chunk encoding is unknown so Chunk() should fail but us the caller
// are ok with that. Above we asserted that the encoding we expected was
// EncUnsupportedXOR
require.NotNil(t, err)
require.Error(t, err)
require.Contains(t, err.Error(), "invalid chunk encoding \"<unknown>\"")
require.Nil(t, actChunk)

View file

@ -1243,7 +1243,7 @@ func populateSymbols(ctx context.Context, mergeFunc storage.VerticalChunkSeriesM
}
}
if err != io.EOF {
if !errors.Is(err, io.EOF) {
return fmt.Errorf("iterating symbols: %w", err)
}

View file

@ -615,7 +615,7 @@ func TestCompaction_CompactWithSplitting(t *testing.T) {
}
// Check that symbols table covered all symbols found from series.
require.Equal(t, 0, len(seriesSymbols))
require.Empty(t, seriesSymbols)
}
require.Equal(t, uint64(series), totalSeries)

View file

@ -2918,7 +2918,7 @@ func TestHeadShardedPostings(t *testing.T) {
expected = append(expected, p.At())
}
require.NoError(t, p.Err())
require.Greater(t, len(expected), 0)
require.NotEmpty(t, expected)
// Query the same postings for each shard.
const shardCount = uint64(4)

View file

@ -262,7 +262,7 @@ func TestIndexRW_Postings(t *testing.T) {
expected = append(expected, p.At())
}
require.NoError(t, p.Err())
require.Greater(t, len(expected), 0)
require.NotEmpty(t, expected)
// Query the same postings for each shard.
const shardCount = uint64(4)

View file

@ -15,11 +15,11 @@ package tsdb
import (
"context"
"errors"
"fmt"
"math"
"github.com/oklog/ulid"
"github.com/pkg/errors"
"golang.org/x/exp/slices"
"github.com/prometheus/prometheus/model/histogram"
@ -470,7 +470,7 @@ func labelValuesFromSeries(r IndexReader, labelName string, refs []storage.Serie
for _, ref := range refs {
err := r.Series(ref, &builder, nil)
// Postings may be stale. Skip if no underlying series exists.
if errors.Cause(err) == storage.ErrNotFound {
if errors.Is(err, storage.ErrNotFound) {
continue
}
if err != nil {

View file

@ -3,6 +3,7 @@ package tsdb
import (
"container/heap"
"encoding/gob"
"errors"
"fmt"
"io"
"os"
@ -12,7 +13,7 @@ import (
"github.com/golang/snappy"
"github.com/prometheus/prometheus/tsdb/errors"
tsdb_errors "github.com/prometheus/prometheus/tsdb/errors"
)
// symbolFlushers writes symbols to provided files in background goroutines.
@ -178,7 +179,7 @@ func writeSymbolsToFile(filename string, symbols []string) error {
sn := snappy.NewBufferedWriter(f)
enc := gob.NewEncoder(sn)
errs := errors.NewMulti()
errs := tsdb_errors.NewMulti()
for _, s := range symbols {
err := enc.Encode(s)
@ -270,7 +271,7 @@ func newSymbolsIterator(filenames []string) (*symbolsIterator, error) {
func (sit *symbolsIterator) NextSymbol() (string, error) {
for len(sit.heap) > 0 {
result, err := sit.heap[0].Next()
if err == io.EOF {
if errors.Is(err, io.EOF) {
// End of file, remove it from heap, and try next file.
heap.Remove(&sit.heap, 0)
continue
@ -296,7 +297,7 @@ func (sit *symbolsIterator) NextSymbol() (string, error) {
// Close all files.
func (sit *symbolsIterator) Close() error {
errs := errors.NewMulti()
errs := tsdb_errors.NewMulti()
for _, f := range sit.files {
errs.Add(f.Close())
}

View file

@ -55,8 +55,8 @@ func testSymbolsBatchAndIterationWithFlushersConcurrency(t *testing.T, flushersC
var w, prev string
for w, err = it.NextSymbol(); err == nil; w, err = it.NextSymbol() {
if !first {
require.True(t, w != "")
require.True(t, prev < w)
require.NotEqual(t, "", w)
require.Less(t, prev, w)
}
first = false
@ -67,5 +67,5 @@ func testSymbolsBatchAndIterationWithFlushersConcurrency(t *testing.T, flushersC
prev = w
}
require.Equal(t, io.EOF, err)
require.Equal(t, 0, len(allWords))
require.Empty(t, allWords)
}