Refactor tsdb tests to use testify.

Signed-off-by: Paweł Szulik <paul.szulik@gmail.com>
This commit is contained in:
Paweł Szulik 2021-09-02 17:43:54 +02:00 committed by Bryan Boreham
parent 34875ae8c7
commit 5961f78186
8 changed files with 50 additions and 113 deletions

View file

@ -27,7 +27,6 @@ import (
"path/filepath"
"sort"
"strconv"
"strings"
"sync"
"testing"
"time"
@ -2327,9 +2326,7 @@ func TestBlockRanges(t *testing.T) {
app := db.Appender(ctx)
lbl := labels.FromStrings("a", "b")
_, err = app.Append(0, lbl, firstBlockMaxT-1, rand.Float64())
if err == nil {
t.Fatalf("appending a sample with a timestamp covered by a previous block shouldn't be possible")
}
require.Error(t, err, "appending a sample with a timestamp covered by a previous block shouldn't be possible")
_, err = app.Append(0, lbl, firstBlockMaxT+1, rand.Float64())
require.NoError(t, err)
_, err = app.Append(0, lbl, firstBlockMaxT+2, rand.Float64())
@ -2347,9 +2344,8 @@ func TestBlockRanges(t *testing.T) {
}
require.Len(t, db.Blocks(), 2, "no new block created after the set timeout")
if db.Blocks()[0].Meta().MaxTime > db.Blocks()[1].Meta().MinTime {
t.Fatalf("new block overlaps old:%v,new:%v", db.Blocks()[0].Meta(), db.Blocks()[1].Meta())
}
require.LessOrEqual(t, db.Blocks()[1].Meta().MinTime, db.Blocks()[0].Meta().MaxTime,
"new block overlaps old:%v,new:%v", db.Blocks()[0].Meta(), db.Blocks()[1].Meta())
// Test that wal records are skipped when an existing block covers the same time ranges
// and compaction doesn't create an overlapping block.
@ -2389,9 +2385,8 @@ func TestBlockRanges(t *testing.T) {
require.Len(t, db.Blocks(), 4, "no new block created after the set timeout")
if db.Blocks()[2].Meta().MaxTime > db.Blocks()[3].Meta().MinTime {
t.Fatalf("new block overlaps old:%v,new:%v", db.Blocks()[2].Meta(), db.Blocks()[3].Meta())
}
require.LessOrEqual(t, db.Blocks()[3].Meta().MinTime, db.Blocks()[2].Meta().MaxTime,
"new block overlaps old:%v,new:%v", db.Blocks()[2].Meta(), db.Blocks()[3].Meta())
}
// TestDBReadOnly ensures that opening a DB in readonly mode doesn't modify any files on the disk.
@ -3180,9 +3175,8 @@ func TestOpen_VariousBlockStates(t *testing.T) {
var loaded int
for _, l := range loadedBlocks {
if _, ok := expectedLoadedDirs[filepath.Join(tmpDir, l.meta.ULID.String())]; !ok {
t.Fatal("unexpected block", l.meta.ULID, "was loaded")
}
_, ok := expectedLoadedDirs[filepath.Join(tmpDir, l.meta.ULID.String())]
require.True(t, ok, "unexpected block", l.meta.ULID, "was loaded")
loaded++
}
require.Len(t, expectedLoadedDirs, loaded)
@ -3193,9 +3187,8 @@ func TestOpen_VariousBlockStates(t *testing.T) {
var ignored int
for _, f := range files {
if _, ok := expectedRemovedDirs[filepath.Join(tmpDir, f.Name())]; ok {
t.Fatal("expected", filepath.Join(tmpDir, f.Name()), "to be removed, but still exists")
}
_, ok := expectedRemovedDirs[filepath.Join(tmpDir, f.Name())]
require.False(t, ok, "expected", filepath.Join(tmpDir, f.Name()), "to be removed, but still exists")
if _, ok := expectedIgnoredDirs[filepath.Join(tmpDir, f.Name())]; ok {
ignored++
}
@ -3486,8 +3479,8 @@ func testQuerierShouldNotPanicIfHeadChunkIsTruncatedWhileReadingQueriedChunks(t
// the "cannot populate chunk XXX: not found" error occurred. This error can occur
// when the iterator tries to fetch an head chunk which has been offloaded because
// of the head compaction in the meanwhile.
if firstErr != nil && !strings.Contains(firstErr.Error(), "cannot populate chunk") {
t.Fatalf("unexpected error: %s", firstErr.Error())
if firstErr != nil {
require.ErrorContains(t, firstErr, "cannot populate chunk", "unexpected error: %s", firstErr.Error())
}
}

View file

@ -18,6 +18,8 @@ import (
"path/filepath"
"testing"
"github.com/stretchr/testify/require"
"github.com/prometheus/prometheus/util/testutil"
)
@ -27,54 +29,35 @@ func TestLocking(t *testing.T) {
fileName := filepath.Join(dir.Path(), "LOCK")
if _, err := os.Stat(fileName); err == nil {
t.Fatalf("File %q unexpectedly exists.", fileName)
}
_, err := os.Stat(fileName)
require.Error(t, err, "File %q unexpectedly exists.", fileName)
lock, existed, err := Flock(fileName)
if err != nil {
t.Fatalf("Error locking file %q: %s", fileName, err)
}
if existed {
t.Errorf("File %q reported as existing during locking.", fileName)
}
require.NoError(t, err, "Error locking file %q: %s", fileName, err)
require.False(t, existed, "File %q reported as existing during locking.", fileName)
// File must now exist.
if _, err = os.Stat(fileName); err != nil {
t.Errorf("Could not stat file %q expected to exist: %s", fileName, err)
}
_, err = os.Stat(fileName)
require.NoError(t, err, "Could not stat file %q expected to exist: %s", fileName, err)
// Try to lock again.
lockedAgain, existed, err := Flock(fileName)
if err == nil {
t.Fatalf("File %q locked twice.", fileName)
}
if lockedAgain != nil {
t.Error("Unsuccessful locking did not return nil.")
}
if !existed {
t.Errorf("Existing file %q not recognized.", fileName)
}
require.Error(t, err, "File %q locked twice.", fileName)
require.Nil(t, lockedAgain, "Unsuccessful locking did not return nil.")
require.True(t, existed, "Existing file %q not recognized.", fileName)
if err := lock.Release(); err != nil {
t.Errorf("Error releasing lock for file %q: %s", fileName, err)
}
err = lock.Release()
require.NoError(t, err, "Error releasing lock for file %q: %s", fileName, err)
// File must still exist.
if _, err = os.Stat(fileName); err != nil {
t.Errorf("Could not stat file %q expected to exist: %s", fileName, err)
}
_, err = os.Stat(fileName)
require.NoError(t, err, "Could not stat file %q expected to exist: %s", fileName, err)
// Lock existing file.
lock, existed, err = Flock(fileName)
if err != nil {
t.Fatalf("Error locking file %q: %s", fileName, err)
}
if !existed {
t.Errorf("Existing file %q not recognized.", fileName)
}
require.NoError(t, err, "Error locking file %q: %s", fileName, err)
require.True(t, existed, "Existing file %q not recognized.", fileName)
if err := lock.Release(); err != nil {
t.Errorf("Error releasing lock for file %q: %s", fileName, err)
}
err = lock.Release()
require.NoError(t, err, "Error releasing lock for file %q: %s", fileName, err)
}

View file

@ -206,7 +206,7 @@ func readTestWAL(t testing.TB, dir string) (recs []interface{}) {
require.NoError(t, err)
recs = append(recs, exemplars)
default:
t.Fatalf("unknown record type")
require.Fail(t, "unknown record type")
}
}
require.NoError(t, r.Err())
@ -1371,7 +1371,7 @@ func TestDeletedSamplesAndSeriesStillInWALAfterCheckpoint(t *testing.T) {
case []record.RefMetadata:
metadata++
default:
t.Fatalf("unknown record type")
require.Fail(t, "unknown record type")
}
}
require.Equal(t, 1, series)
@ -1620,9 +1620,7 @@ func TestComputeChunkEndTime(t *testing.T) {
for testName, tc := range cases {
t.Run(testName, func(t *testing.T) {
got := computeChunkEndTime(tc.start, tc.cur, tc.max, tc.ratioToFull)
if got != tc.res {
t.Errorf("expected %d for (start: %d, cur: %d, max: %d, ratioToFull: %f), got %d", tc.res, tc.start, tc.cur, tc.max, tc.ratioToFull, got)
}
require.Equal(t, tc.res, got, "(start: %d, cur: %d, max: %d)", tc.start, tc.cur, tc.max)
})
}
}

View file

@ -61,9 +61,7 @@ func TestMemPostings_ensureOrder(t *testing.T) {
ok := sort.SliceIsSorted(l, func(i, j int) bool {
return l[i] < l[j]
})
if !ok {
t.Fatalf("postings list %v is not sorted", l)
}
require.True(t, ok, "postings list %v is not sorted", l)
}
}
}
@ -214,9 +212,7 @@ func TestIntersect(t *testing.T) {
for _, c := range cases {
t.Run("", func(t *testing.T) {
if c.res == nil {
t.Fatal("intersect result expectancy cannot be nil")
}
require.NotNil(t, c.res, "intersect result expectancy cannot be nil")
expected, err := ExpandPostings(c.res)
require.NoError(t, err)
@ -228,9 +224,7 @@ func TestIntersect(t *testing.T) {
return
}
if i == EmptyPostings() {
t.Fatal("intersect unexpected result: EmptyPostings sentinel")
}
require.NotEqual(t, EmptyPostings(), i, "intersect unexpected result: EmptyPostings sentinel")
res, err := ExpandPostings(i)
require.NoError(t, err)
@ -501,9 +495,7 @@ func TestMergedPostings(t *testing.T) {
for _, c := range cases {
t.Run("", func(t *testing.T) {
if c.res == nil {
t.Fatal("merge result expectancy cannot be nil")
}
require.NotNil(t, c.res, "merge result expectancy cannot be nil")
ctx := context.Background()
@ -517,9 +509,7 @@ func TestMergedPostings(t *testing.T) {
return
}
if m == EmptyPostings() {
t.Fatal("merge unexpected result: EmptyPostings sentinel")
}
require.NotEqual(t, EmptyPostings(), m, "merge unexpected result: EmptyPostings sentinel")
res, err := ExpandPostings(m)
require.NoError(t, err)
@ -897,9 +887,7 @@ func TestWithoutPostings(t *testing.T) {
for _, c := range cases {
t.Run("", func(t *testing.T) {
if c.res == nil {
t.Fatal("without result expectancy cannot be nil")
}
require.NotNil(t, c.res, "without result expectancy cannot be nil")
expected, err := ExpandPostings(c.res)
require.NoError(t, err)
@ -911,9 +899,7 @@ func TestWithoutPostings(t *testing.T) {
return
}
if w == EmptyPostings() {
t.Fatal("without unexpected result: EmptyPostings sentinel")
}
require.NotEqual(t, EmptyPostings(), w, "without unexpected result: EmptyPostings sentinel")
res, err := ExpandPostings(w)
require.NoError(t, err)

View file

@ -2702,22 +2702,7 @@ func TestFindSetMatches(t *testing.T) {
}
for _, c := range cases {
matches := findSetMatches(c.pattern)
if len(c.exp) == 0 {
if len(matches) != 0 {
t.Errorf("Evaluating %s, unexpected result %v", c.pattern, matches)
}
} else {
if len(matches) != len(c.exp) {
t.Errorf("Evaluating %s, length of result not equal to exp", c.pattern)
} else {
for i := 0; i < len(c.exp); i++ {
if c.exp[i] != matches[i] {
t.Errorf("Evaluating %s, unexpected result %s", c.pattern, matches[i])
}
}
}
}
require.Equal(t, c.exp, findSetMatches(c.pattern), "Evaluating %s, unexpected result.", c.pattern)
}
}
@ -3016,9 +3001,7 @@ func TestPostingsForMatchers(t *testing.T) {
}
}
require.NoError(t, p.Err())
if len(exp) != 0 {
t.Errorf("Evaluating %v, missing results %+v", c.matchers, exp)
}
require.Empty(t, exp, "Evaluating %v", c.matchers)
})
}
}
@ -3101,9 +3084,7 @@ func TestClose(t *testing.T) {
createBlock(t, dir, genSeries(1, 1, 10, 20))
db, err := Open(dir, nil, nil, DefaultOptions(), nil)
if err != nil {
t.Fatalf("Opening test storage failed: %s", err)
}
require.NoError(t, err, "Opening test storage failed: %s", err)
defer func() {
require.NoError(t, db.Close())
}()

View file

@ -528,7 +528,7 @@ func TestMigrateWAL_Fuzz(t *testing.T) {
require.NoError(t, err)
res = append(res, s)
default:
t.Fatalf("unknown record type %d", dec.Type(rec))
require.Fail(t, "unknown record type %d", dec.Type(rec))
}
}
require.NoError(t, r.Err())

View file

@ -182,16 +182,14 @@ func TestReader(t *testing.T) {
t.Logf("record %d", j)
rec := r.Record()
if j >= len(c.exp) {
t.Fatal("received more records than expected")
}
require.Less(t, j, len(c.exp), "received more records than expected")
require.Equal(t, c.exp[j], rec, "Bytes within record did not match expected Bytes")
}
if !c.fail && r.Err() != nil {
t.Fatalf("unexpected error: %s", r.Err())
if !c.fail {
require.NoError(t, r.Err(), "unexpected error: %s", r.Err())
}
if c.fail && r.Err() == nil {
t.Fatalf("expected error but got none")
if c.fail {
require.Error(t, r.Err(), "expected error but got none")
}
})
}

View file

@ -192,9 +192,7 @@ func TestWALRepair_ReadingError(t *testing.T) {
require.Len(t, result, test.intactRecs, "Wrong number of intact records")
for i, r := range result {
if !bytes.Equal(records[i], r) {
t.Fatalf("record %d diverges: want %x, got %x", i, records[i][:10], r[:10])
}
require.True(t, bytes.Equal(records[i], r), "record %d diverges: want %x, got %x", i, records[i][:10], r[:10])
}
// Make sure there is a new 0 size Segment after the corrupted Segment.