Refactor test assertions (#8110)

* Refactor test assertions

This pull request gets rid of assert.True where possible to use
fine-grained assertions.

Signed-off-by: Julien Pivotto <roidelapluie@inuits.eu>
This commit is contained in:
Julien Pivotto 2020-10-27 11:06:53 +01:00 committed by GitHub
parent 2cbc0f9bfe
commit 1282d1b39c
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
40 changed files with 200 additions and 203 deletions

View file

@ -298,7 +298,7 @@ func (p *queryLogTest) run(t *testing.T) {
if p.exactQueryCount() { if p.exactQueryCount() {
assert.Equal(t, 1, qc) assert.Equal(t, 1, qc)
} else { } else {
assert.True(t, qc > 0, "no queries logged") assert.Greater(t, qc, 0, "no queries logged")
} }
p.validateLastQuery(t, ql) p.validateLastQuery(t, ql)
@ -324,7 +324,7 @@ func (p *queryLogTest) run(t *testing.T) {
if p.exactQueryCount() { if p.exactQueryCount() {
assert.Equal(t, qc, len(ql)) assert.Equal(t, qc, len(ql))
} else { } else {
assert.True(t, len(ql) > qc, "no queries logged") assert.Greater(t, len(ql), qc, "no queries logged")
} }
p.validateLastQuery(t, ql) p.validateLastQuery(t, ql)
qc = len(ql) qc = len(ql)
@ -355,7 +355,7 @@ func (p *queryLogTest) run(t *testing.T) {
if p.exactQueryCount() { if p.exactQueryCount() {
assert.Equal(t, qc, len(ql)) assert.Equal(t, qc, len(ql))
} else { } else {
assert.True(t, len(ql) > qc, "no queries logged") assert.Greater(t, len(ql), qc, "no queries logged")
} }
p.validateLastQuery(t, ql) p.validateLastQuery(t, ql)
@ -368,7 +368,7 @@ func (p *queryLogTest) run(t *testing.T) {
if p.exactQueryCount() { if p.exactQueryCount() {
assert.Equal(t, 1, qc) assert.Equal(t, 1, qc)
} else { } else {
assert.True(t, qc > 0, "no queries logged") assert.Greater(t, qc, 0, "no queries logged")
} }
} }

View file

@ -19,7 +19,6 @@ import (
"net/url" "net/url"
"path/filepath" "path/filepath"
"regexp" "regexp"
"strings"
"testing" "testing"
"time" "time"
@ -763,8 +762,8 @@ func TestElideSecrets(t *testing.T) {
yamlConfig := string(config) yamlConfig := string(config)
matches := secretRe.FindAllStringIndex(yamlConfig, -1) matches := secretRe.FindAllStringIndex(yamlConfig, -1)
assert.True(t, len(matches) == 10, "wrong number of secret matches found") assert.Equal(t, 10, len(matches), "wrong number of secret matches found")
assert.True(t, !strings.Contains(yamlConfig, "mysecret"), assert.NotContains(t, yamlConfig, "mysecret",
"yaml marshal reveals authentication credentials.") "yaml marshal reveals authentication credentials.")
} }
@ -1027,7 +1026,7 @@ func TestBadConfigs(t *testing.T) {
for _, ee := range expectedErrors { for _, ee := range expectedErrors {
_, err := LoadFile("testdata/" + ee.filename) _, err := LoadFile("testdata/" + ee.filename)
assert.Error(t, err, "%s", ee.filename) assert.Error(t, err, "%s", ee.filename)
assert.True(t, strings.Contains(err.Error(), ee.errMsg), assert.Contains(t, err.Error(), ee.errMsg,
"Expected error for %s to contain %q but got: %s", ee.filename, ee.errMsg, err) "Expected error for %s to contain %q but got: %s", ee.filename, ee.errMsg, err)
} }
} }

View file

@ -275,7 +275,7 @@ func checkOneTarget(t *testing.T, tg []*targetgroup.Group) {
assert.Equal(t, target.Source, string(target.Labels["__meta_consul_service"])) assert.Equal(t, target.Source, string(target.Labels["__meta_consul_service"]))
if target.Source == "test" { if target.Source == "test" {
// test service should have one node. // test service should have one node.
assert.True(t, len(target.Targets) > 0, "Test service should have one node") assert.Greater(t, len(target.Targets), 0, "Test service should have one node")
} }
} }

View file

@ -15,7 +15,6 @@ package openstack
import ( import (
"context" "context"
"strings"
"testing" "testing"
"github.com/prometheus/common/model" "github.com/prometheus/common/model"
@ -96,5 +95,5 @@ func TestOpenstackSDHypervisorRefreshWithDoneContext(t *testing.T) {
cancel() cancel()
_, err := hypervisor.refresh(ctx) _, err := hypervisor.refresh(ctx)
assert.Error(t, err) assert.Error(t, err)
assert.True(t, strings.Contains(err.Error(), context.Canceled.Error()), "%q doesn't contain %q", err, context.Canceled) assert.Contains(t, err.Error(), context.Canceled.Error(), "%q doesn't contain %q", err, context.Canceled)
} }

View file

@ -16,7 +16,6 @@ package openstack
import ( import (
"context" "context"
"fmt" "fmt"
"strings"
"testing" "testing"
"github.com/prometheus/common/model" "github.com/prometheus/common/model"
@ -135,5 +134,5 @@ func TestOpenstackSDInstanceRefreshWithDoneContext(t *testing.T) {
cancel() cancel()
_, err := hypervisor.refresh(ctx) _, err := hypervisor.refresh(ctx)
assert.Error(t, err) assert.Error(t, err)
assert.True(t, strings.Contains(err.Error(), context.Canceled.Error()), "%q doesn't contain %q", err, context.Canceled) assert.Contains(t, err.Error(), context.Canceled.Error(), "%q doesn't contain %q", err, context.Canceled)
} }

View file

@ -87,7 +87,7 @@ func TestTritonSDNew(t *testing.T) {
assert.NoError(t, err) assert.NoError(t, err)
assert.NotNil(t, td) assert.NotNil(t, td)
assert.NotNil(t, td.client) assert.NotNil(t, td.client)
assert.True(t, td.interval != 0, "") assert.NotZero(t, td.interval)
assert.NotNil(t, td.sdConfig) assert.NotNil(t, td.sdConfig)
assert.Equal(t, conf.Account, td.sdConfig.Account) assert.Equal(t, conf.Account, td.sdConfig.Account)
assert.Equal(t, conf.DNSSuffix, td.sdConfig.DNSSuffix) assert.Equal(t, conf.DNSSuffix, td.sdConfig.DNSSuffix)
@ -98,7 +98,7 @@ func TestTritonSDNew(t *testing.T) {
func TestTritonSDNewBadConfig(t *testing.T) { func TestTritonSDNewBadConfig(t *testing.T) {
td, err := newTritonDiscovery(badconf) td, err := newTritonDiscovery(badconf)
assert.Error(t, err) assert.Error(t, err)
assert.True(t, td == nil, "") assert.Nil(t, td)
} }
func TestTritonSDNewGroupsConfig(t *testing.T) { func TestTritonSDNewGroupsConfig(t *testing.T) {
@ -106,7 +106,7 @@ func TestTritonSDNewGroupsConfig(t *testing.T) {
assert.NoError(t, err) assert.NoError(t, err)
assert.NotNil(t, td) assert.NotNil(t, td)
assert.NotNil(t, td.client) assert.NotNil(t, td.client)
assert.True(t, td.interval != 0, "") assert.NotZero(t, td.interval)
assert.NotNil(t, td.sdConfig) assert.NotNil(t, td.sdConfig)
assert.Equal(t, groupsconf.Account, td.sdConfig.Account) assert.Equal(t, groupsconf.Account, td.sdConfig.Account)
assert.Equal(t, groupsconf.DNSSuffix, td.sdConfig.DNSSuffix) assert.Equal(t, groupsconf.DNSSuffix, td.sdConfig.DNSSuffix)
@ -120,8 +120,8 @@ func TestTritonSDNewCNConfig(t *testing.T) {
assert.NoError(t, err) assert.NoError(t, err)
assert.NotNil(t, td) assert.NotNil(t, td)
assert.NotNil(t, td.client) assert.NotNil(t, td.client)
assert.True(t, td.interval != 0, "") assert.NotZero(t, td.interval)
assert.NotNil(t, td.sdConfig) assert.NotZero(t, td.sdConfig)
assert.Equal(t, cnconf.Role, td.sdConfig.Role) assert.Equal(t, cnconf.Role, td.sdConfig.Role)
assert.Equal(t, cnconf.Account, td.sdConfig.Account) assert.Equal(t, cnconf.Account, td.sdConfig.Account)
assert.Equal(t, cnconf.DNSSuffix, td.sdConfig.DNSSuffix) assert.Equal(t, cnconf.DNSSuffix, td.sdConfig.DNSSuffix)
@ -131,7 +131,7 @@ func TestTritonSDNewCNConfig(t *testing.T) {
func TestTritonSDRefreshNoTargets(t *testing.T) { func TestTritonSDRefreshNoTargets(t *testing.T) {
tgts := testTritonSDRefresh(t, conf, "{\"containers\":[]}") tgts := testTritonSDRefresh(t, conf, "{\"containers\":[]}")
assert.True(t, tgts == nil, "") assert.Nil(t, tgts)
} }
func TestTritonSDRefreshMultipleTargets(t *testing.T) { func TestTritonSDRefreshMultipleTargets(t *testing.T) {
@ -234,12 +234,12 @@ func testTritonSDRefresh(t *testing.T, c SDConfig, dstr string) []model.LabelSet
host, strport, err := net.SplitHostPort(u.Host) host, strport, err := net.SplitHostPort(u.Host)
assert.NoError(t, err) assert.NoError(t, err)
assert.True(t, host != "", "") assert.NotEmpty(t, host)
assert.True(t, strport != "", "") assert.NotEmpty(t, strport)
port, err := strconv.Atoi(strport) port, err := strconv.Atoi(strport)
assert.NoError(t, err) assert.NoError(t, err)
assert.True(t, port != 0, "") assert.NotZero(t, port)
td.sdConfig.Port = port td.sdConfig.Port = port

View file

@ -83,7 +83,7 @@ func TestHandlerNextBatch(t *testing.T) {
assert.NoError(t, alertsEqual(expected[0:maxBatchSize], h.nextBatch())) assert.NoError(t, alertsEqual(expected[0:maxBatchSize], h.nextBatch()))
assert.NoError(t, alertsEqual(expected[maxBatchSize:2*maxBatchSize], h.nextBatch())) assert.NoError(t, alertsEqual(expected[maxBatchSize:2*maxBatchSize], h.nextBatch()))
assert.NoError(t, alertsEqual(expected[2*maxBatchSize:], h.nextBatch())) assert.NoError(t, alertsEqual(expected[2*maxBatchSize:], h.nextBatch()))
assert.True(t, len(h.queue) == 0, "Expected queue to be empty but got %d alerts", len(h.queue)) assert.Equal(t, 0, len(h.queue), "Expected queue to be empty but got %d alerts", len(h.queue))
} }
func alertsEqual(a, b []*Alert) error { func alertsEqual(a, b []*Alert) error {
@ -201,7 +201,7 @@ func TestHandlerSendAll(t *testing.T) {
checkNoErr() checkNoErr()
status2.Store(int32(http.StatusInternalServerError)) status2.Store(int32(http.StatusInternalServerError))
assert.True(t, !h.sendAll(h.queue...), "all sends succeeded unexpectedly") assert.False(t, h.sendAll(h.queue...), "all sends succeeded unexpectedly")
checkNoErr() checkNoErr()
} }

View file

@ -387,10 +387,7 @@ func TestLabels_FromStrings(t *testing.T) {
assert.Equal(t, expected, labels, "unexpected labelset") assert.Equal(t, expected, labels, "unexpected labelset")
defer func() { recover() }() assert.Panics(t, func() { FromStrings("aaa", "111", "bbb") })
FromStrings("aaa", "111", "bbb")
assert.True(t, false, "did not panic as expected")
} }
func TestLabels_Compare(t *testing.T) { func TestLabels_Compare(t *testing.T) {
@ -640,8 +637,8 @@ func TestLabels_Hash(t *testing.T) {
{Name: "baz", Value: "qux"}, {Name: "baz", Value: "qux"},
} }
assert.Equal(t, lbls.Hash(), lbls.Hash()) assert.Equal(t, lbls.Hash(), lbls.Hash())
assert.True(t, lbls.Hash() != Labels{lbls[1], lbls[0]}.Hash(), "unordered labels match.") assert.NotEqual(t, lbls.Hash(), Labels{lbls[1], lbls[0]}.Hash(), "unordered labels match.")
assert.True(t, lbls.Hash() != Labels{lbls[0]}.Hash(), "different labels match.") assert.NotEqual(t, lbls.Hash(), Labels{lbls[0]}.Hash(), "different labels match.")
} }
var benchmarkLabelsResult uint64 var benchmarkLabelsResult uint64

View file

@ -440,7 +440,7 @@ func TestTargetLabelValidity(t *testing.T) {
{"foo${bar}foo", true}, {"foo${bar}foo", true},
} }
for _, test := range tests { for _, test := range tests {
assert.True(t, relabelTarget.Match([]byte(test.str)) == test.valid, assert.Equal(t, test.valid, relabelTarget.Match([]byte(test.str)),
"Expected %q to be %v", test.str, test.valid) "Expected %q to be %v", test.str, test.valid)
} }
} }

View file

@ -85,12 +85,12 @@ const (
type MetricType string type MetricType string
const ( const (
MetricTypeCounter = "counter" MetricTypeCounter = MetricType("counter")
MetricTypeGauge = "gauge" MetricTypeGauge = MetricType("gauge")
MetricTypeHistogram = "histogram" MetricTypeHistogram = MetricType("histogram")
MetricTypeGaugeHistogram = "gaugehistogram" MetricTypeGaugeHistogram = MetricType("gaugehistogram")
MetricTypeSummary = "summary" MetricTypeSummary = MetricType("summary")
MetricTypeInfo = "info" MetricTypeInfo = MetricType("info")
MetricTypeStateset = "stateset" MetricTypeStateset = MetricType("stateset")
MetricTypeUnknown = "unknown" MetricTypeUnknown = MetricType("unknown")
) )

View file

@ -1129,7 +1129,7 @@ func TestQueryLogger_basic(t *testing.T) {
assert.Equal(t, 2*l, len(f1.logs)) assert.Equal(t, 2*l, len(f1.logs))
// Test that we close the query logger when unsetting it. // Test that we close the query logger when unsetting it.
assert.True(t, !f1.closed, "expected f1 to be open, got closed") assert.False(t, f1.closed, "expected f1 to be open, got closed")
engine.SetQueryLogger(nil) engine.SetQueryLogger(nil)
assert.True(t, f1.closed, "expected f1 to be closed, got open") assert.True(t, f1.closed, "expected f1 to be closed, got open")
queryExec() queryExec()
@ -1138,11 +1138,11 @@ func TestQueryLogger_basic(t *testing.T) {
f2 := NewFakeQueryLogger() f2 := NewFakeQueryLogger()
f3 := NewFakeQueryLogger() f3 := NewFakeQueryLogger()
engine.SetQueryLogger(f2) engine.SetQueryLogger(f2)
assert.True(t, !f2.closed, "expected f2 to be open, got closed") assert.False(t, f2.closed, "expected f2 to be open, got closed")
queryExec() queryExec()
engine.SetQueryLogger(f3) engine.SetQueryLogger(f3)
assert.True(t, f2.closed, "expected f2 to be closed, got open") assert.True(t, f2.closed, "expected f2 to be closed, got open")
assert.True(t, !f3.closed, "expected f3 to be open, got closed") assert.False(t, f3.closed, "expected f3 to be open, got closed")
queryExec() queryExec()
} }

View file

@ -15,7 +15,6 @@ package promql
import ( import (
"context" "context"
"fmt"
"testing" "testing"
"time" "time"
@ -56,19 +55,19 @@ func TestDeriv(t *testing.T) {
assert.NoError(t, result.Err) assert.NoError(t, result.Err)
vec, _ := result.Vector() vec, _ := result.Vector()
assert.True(t, len(vec) == 1, "Expected 1 result, got %d", len(vec)) assert.Equal(t, 1, len(vec), "Expected 1 result, got %d", len(vec))
assert.True(t, vec[0].V == 0.0, "Expected 0.0 as value, got %f", vec[0].V) assert.Equal(t, 0.0, vec[0].V, "Expected 0.0 as value, got %f", vec[0].V)
} }
func TestFunctionList(t *testing.T) { func TestFunctionList(t *testing.T) {
// Test that Functions and parser.Functions list the same functions. // Test that Functions and parser.Functions list the same functions.
for i := range FunctionCalls { for i := range FunctionCalls {
_, ok := parser.Functions[i] _, ok := parser.Functions[i]
assert.True(t, ok, fmt.Sprintf("function %s exists in promql package, but not in parser package", i)) assert.True(t, ok, "function %s exists in promql package, but not in parser package", i)
} }
for i := range parser.Functions { for i := range parser.Functions {
_, ok := FunctionCalls[i] _, ok := FunctionCalls[i]
assert.True(t, ok, (fmt.Sprintf("function %s exists in parser package, but not in promql package", i))) assert.True(t, ok, "function %s exists in parser package, but not in promql package", i)
} }
} }

View file

@ -15,7 +15,6 @@ package parser
import ( import (
"math" "math"
"strings"
"testing" "testing"
"time" "time"
@ -2659,14 +2658,14 @@ func TestParseExpressions(t *testing.T) {
expr, err := ParseExpr(test.input) expr, err := ParseExpr(test.input)
// Unexpected errors are always caused by a bug. // Unexpected errors are always caused by a bug.
assert.True(t, err != errUnexpected, "unexpected error occurred") assert.NotEqual(t, err, errUnexpected, "unexpected error occurred")
if !test.fail { if !test.fail {
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, test.expected, expr, "error on input '%s'", test.input) assert.Equal(t, test.expected, expr, "error on input '%s'", test.input)
} else { } else {
assert.Error(t, err) assert.Error(t, err)
assert.True(t, strings.Contains(err.Error(), test.errMsg), "unexpected error on input '%s', expected '%s', got '%s'", test.input, test.errMsg, err.Error()) assert.Contains(t, err.Error(), test.errMsg, "unexpected error on input '%s', expected '%s', got '%s'", test.input, test.errMsg, err.Error())
errorList, ok := err.(ParseErrors) errorList, ok := err.(ParseErrors)
@ -2804,7 +2803,7 @@ func TestParseSeries(t *testing.T) {
metric, vals, err := ParseSeriesDesc(test.input) metric, vals, err := ParseSeriesDesc(test.input)
// Unexpected errors are always caused by a bug. // Unexpected errors are always caused by a bug.
assert.True(t, err != errUnexpected, "unexpected error occurred") assert.NotEqual(t, err, errUnexpected, "unexpected error occurred")
if !test.fail { if !test.fail {
assert.NoError(t, err) assert.NoError(t, err)

View file

@ -134,9 +134,9 @@ func TestLazyLoader_WithSamplesTill(t *testing.T) {
// Get the series for the matcher. // Get the series for the matcher.
ss := querier.Select(false, nil, matchers...) ss := querier.Select(false, nil, matchers...)
assert.True(t, ss.Next(), "") assert.True(t, ss.Next())
storageSeries := ss.At() storageSeries := ss.At()
assert.True(t, !ss.Next(), "Expecting only 1 series") assert.False(t, ss.Next(), "Expecting only 1 series")
// Convert `storage.Series` to `promql.Series`. // Convert `storage.Series` to `promql.Series`.
got := Series{ got := Series{

View file

@ -15,6 +15,7 @@ package rules
import ( import (
"context" "context"
"html/template"
"testing" "testing"
"time" "time"
@ -33,16 +34,16 @@ func TestAlertingRuleHTMLSnippet(t *testing.T) {
assert.NoError(t, err) assert.NoError(t, err)
rule := NewAlertingRule("testrule", expr, 0, labels.FromStrings("html", "<b>BOLD</b>"), labels.FromStrings("html", "<b>BOLD</b>"), nil, false, nil) rule := NewAlertingRule("testrule", expr, 0, labels.FromStrings("html", "<b>BOLD</b>"), labels.FromStrings("html", "<b>BOLD</b>"), nil, false, nil)
const want = `alert: <a href="/test/prefix/graph?g0.expr=ALERTS%7Balertname%3D%22testrule%22%7D&g0.tab=1">testrule</a> const want = template.HTML(`alert: <a href="/test/prefix/graph?g0.expr=ALERTS%7Balertname%3D%22testrule%22%7D&g0.tab=1">testrule</a>
expr: <a href="/test/prefix/graph?g0.expr=foo%7Bhtml%3D%22%3Cb%3EBOLD%3Cb%3E%22%7D&g0.tab=1">foo{html=&#34;&lt;b&gt;BOLD&lt;b&gt;&#34;}</a> expr: <a href="/test/prefix/graph?g0.expr=foo%7Bhtml%3D%22%3Cb%3EBOLD%3Cb%3E%22%7D&g0.tab=1">foo{html=&#34;&lt;b&gt;BOLD&lt;b&gt;&#34;}</a>
labels: labels:
html: '&lt;b&gt;BOLD&lt;/b&gt;' html: '&lt;b&gt;BOLD&lt;/b&gt;'
annotations: annotations:
html: '&lt;b&gt;BOLD&lt;/b&gt;' html: '&lt;b&gt;BOLD&lt;/b&gt;'
` `)
got := rule.HTMLSnippet("/test/prefix") got := rule.HTMLSnippet("/test/prefix")
assert.True(t, want == got, "incorrect HTML snippet; want:\n\n|%v|\n\ngot:\n\n|%v|", want, got) assert.Equal(t, want, got, "incorrect HTML snippet; want:\n\n|%v|\n\ngot:\n\n|%v|", want, got)
} }
func TestAlertingRuleState(t *testing.T) { func TestAlertingRuleState(t *testing.T) {
@ -81,7 +82,7 @@ func TestAlertingRuleState(t *testing.T) {
rule := NewAlertingRule(test.name, nil, 0, nil, nil, nil, true, nil) rule := NewAlertingRule(test.name, nil, 0, nil, nil, nil, true, nil)
rule.active = test.active rule.active = test.active
got := rule.State() got := rule.State()
assert.True(t, test.want == got, "test case %d unexpected AlertState, want:%d got:%d", i, test.want, got) assert.Equal(t, test.want, got, "test case %d unexpected AlertState, want:%d got:%d", i, test.want, got)
} }
} }

View file

@ -172,7 +172,7 @@ func TestAlertingRule(t *testing.T) {
for i := range test.result { for i := range test.result {
test.result[i].T = timestamp.FromTime(evalTime) test.result[i].T = timestamp.FromTime(evalTime)
} }
assert.True(t, len(test.result) == len(filteredRes), "%d. Number of samples in expected and actual output don't match (%d vs. %d)", i, len(test.result), len(res)) assert.Equal(t, len(test.result), len(filteredRes), "%d. Number of samples in expected and actual output don't match (%d vs. %d)", i, len(test.result), len(res))
sort.Slice(filteredRes, func(i, j int) bool { sort.Slice(filteredRes, func(i, j int) bool {
return labels.Compare(filteredRes[i].Metric, filteredRes[j].Metric) < 0 return labels.Compare(filteredRes[i].Metric, filteredRes[j].Metric) < 0
@ -180,7 +180,7 @@ func TestAlertingRule(t *testing.T) {
assert.Equal(t, test.result, filteredRes) assert.Equal(t, test.result, filteredRes)
for _, aa := range rule.ActiveAlerts() { for _, aa := range rule.ActiveAlerts() {
assert.True(t, aa.Labels.Get(model.MetricNameLabel) == "", "%s label set on active alert: %s", model.MetricNameLabel, aa.Labels) assert.Zero(t, aa.Labels.Get(model.MetricNameLabel), "%s label set on active alert: %s", model.MetricNameLabel, aa.Labels)
} }
} }
} }
@ -325,7 +325,7 @@ func TestForStateAddSamples(t *testing.T) {
test.result[i].V = forState test.result[i].V = forState
} }
} }
assert.True(t, len(test.result) == len(filteredRes), "%d. Number of samples in expected and actual output don't match (%d vs. %d)", i, len(test.result), len(res)) assert.Equal(t, len(test.result), len(filteredRes), "%d. Number of samples in expected and actual output don't match (%d vs. %d)", i, len(test.result), len(res))
sort.Slice(filteredRes, func(i, j int) bool { sort.Slice(filteredRes, func(i, j int) bool {
return labels.Compare(filteredRes[i].Metric, filteredRes[j].Metric) < 0 return labels.Compare(filteredRes[i].Metric, filteredRes[j].Metric) < 0
@ -333,7 +333,7 @@ func TestForStateAddSamples(t *testing.T) {
assert.Equal(t, test.result, filteredRes) assert.Equal(t, test.result, filteredRes)
for _, aa := range rule.ActiveAlerts() { for _, aa := range rule.ActiveAlerts() {
assert.True(t, aa.Labels.Get(model.MetricNameLabel) == "", "%s label set on active alert: %s", model.MetricNameLabel, aa.Labels) assert.Zero(t, aa.Labels.Get(model.MetricNameLabel), "%s label set on active alert: %s", model.MetricNameLabel, aa.Labels)
} }
} }
@ -402,7 +402,7 @@ func TestForStateRestore(t *testing.T) {
exp := rule.ActiveAlerts() exp := rule.ActiveAlerts()
for _, aa := range exp { for _, aa := range exp {
assert.True(t, aa.Labels.Get(model.MetricNameLabel) == "", "%s label set on active alert: %s", model.MetricNameLabel, aa.Labels) assert.Zero(t, aa.Labels.Get(model.MetricNameLabel), "%s label set on active alert: %s", model.MetricNameLabel, aa.Labels)
} }
sort.Slice(exp, func(i, j int) bool { sort.Slice(exp, func(i, j int) bool {
return labels.Compare(exp[i].Labels, exp[j].Labels) < 0 return labels.Compare(exp[i].Labels, exp[j].Labels) < 0
@ -466,7 +466,7 @@ func TestForStateRestore(t *testing.T) {
got := newRule.ActiveAlerts() got := newRule.ActiveAlerts()
for _, aa := range got { for _, aa := range got {
assert.True(t, aa.Labels.Get(model.MetricNameLabel) == "", "%s label set on active alert: %s", model.MetricNameLabel, aa.Labels) assert.Zero(t, aa.Labels.Get(model.MetricNameLabel), "%s label set on active alert: %s", model.MetricNameLabel, aa.Labels)
} }
sort.Slice(got, func(i, j int) bool { sort.Slice(got, func(i, j int) bool {
return labels.Compare(got[i].Labels, got[j].Labels) < 0 return labels.Compare(got[i].Labels, got[j].Labels) < 0
@ -494,7 +494,7 @@ func TestForStateRestore(t *testing.T) {
// Difference in time should be within 1e6 ns, i.e. 1ms // Difference in time should be within 1e6 ns, i.e. 1ms
// (due to conversion between ns & ms, float64 & int64). // (due to conversion between ns & ms, float64 & int64).
activeAtDiff := float64(e.ActiveAt.Unix() + int64(tst.downDuration/time.Second) - got[i].ActiveAt.Unix()) activeAtDiff := float64(e.ActiveAt.Unix() + int64(tst.downDuration/time.Second) - got[i].ActiveAt.Unix())
assert.True(t, math.Abs(activeAtDiff) == 0, "'for' state restored time is wrong") assert.Equal(t, 0.0, math.Abs(activeAtDiff), "'for' state restored time is wrong")
} }
} }
} }
@ -727,7 +727,7 @@ func TestUpdate(t *testing.T) {
err := ruleManager.Update(10*time.Second, files, nil) err := ruleManager.Update(10*time.Second, files, nil)
assert.NoError(t, err) assert.NoError(t, err)
assert.True(t, len(ruleManager.groups) > 0, "expected non-empty rule groups") assert.Greater(t, len(ruleManager.groups), 0, "expected non-empty rule groups")
ogs := map[string]*Group{} ogs := map[string]*Group{}
for h, g := range ruleManager.groups { for h, g := range ruleManager.groups {
g.seriesInPreviousEval = []map[string]labels.Labels{ g.seriesInPreviousEval = []map[string]labels.Labels{
@ -748,7 +748,7 @@ func TestUpdate(t *testing.T) {
// Groups will be recreated if updated. // Groups will be recreated if updated.
rgs, errs := rulefmt.ParseFile("fixtures/rules.yaml") rgs, errs := rulefmt.ParseFile("fixtures/rules.yaml")
assert.True(t, len(errs) == 0, "file parsing failures") assert.Equal(t, 0, len(errs), "file parsing failures")
tmpFile, err := ioutil.TempFile("", "rules.test.*.yaml") tmpFile, err := ioutil.TempFile("", "rules.test.*.yaml")
assert.NoError(t, err) assert.NoError(t, err)
@ -885,7 +885,7 @@ func TestNotify(t *testing.T) {
// Alert sent right away // Alert sent right away
group.Eval(ctx, time.Unix(1, 0)) group.Eval(ctx, time.Unix(1, 0))
assert.Equal(t, 1, len(lastNotified)) assert.Equal(t, 1, len(lastNotified))
assert.True(t, !lastNotified[0].ValidUntil.IsZero(), "ValidUntil should not be zero") assert.NotZero(t, lastNotified[0].ValidUntil, "ValidUntil should not be zero")
// Alert is not sent 1s later // Alert is not sent 1s later
group.Eval(ctx, time.Unix(2, 0)) group.Eval(ctx, time.Unix(2, 0))
@ -1160,6 +1160,6 @@ func TestGroupHasAlertingRules(t *testing.T) {
for i, test := range tests { for i, test := range tests {
got := test.group.HasAlertingRules() got := test.group.HasAlertingRules()
assert.True(t, test.want == got, "test case %d failed, expected:%t got:%t", i, test.want, got) assert.Equal(t, test.want, got, "test case %d failed, expected:%t got:%t", i, test.want, got)
} }
} }

View file

@ -15,6 +15,7 @@ package rules
import ( import (
"context" "context"
"html/template"
"testing" "testing"
"time" "time"
@ -83,14 +84,14 @@ func TestRecordingRuleHTMLSnippet(t *testing.T) {
assert.NoError(t, err) assert.NoError(t, err)
rule := NewRecordingRule("testrule", expr, labels.FromStrings("html", "<b>BOLD</b>")) rule := NewRecordingRule("testrule", expr, labels.FromStrings("html", "<b>BOLD</b>"))
const want = `record: <a href="/test/prefix/graph?g0.expr=testrule&g0.tab=1">testrule</a> const want = template.HTML(`record: <a href="/test/prefix/graph?g0.expr=testrule&g0.tab=1">testrule</a>
expr: <a href="/test/prefix/graph?g0.expr=foo%7Bhtml%3D%22%3Cb%3EBOLD%3Cb%3E%22%7D&g0.tab=1">foo{html=&#34;&lt;b&gt;BOLD&lt;b&gt;&#34;}</a> expr: <a href="/test/prefix/graph?g0.expr=foo%7Bhtml%3D%22%3Cb%3EBOLD%3Cb%3E%22%7D&g0.tab=1">foo{html=&#34;&lt;b&gt;BOLD&lt;b&gt;&#34;}</a>
labels: labels:
html: '&lt;b&gt;BOLD&lt;/b&gt;' html: '&lt;b&gt;BOLD&lt;/b&gt;'
` `)
got := rule.HTMLSnippet("/test/prefix") got := rule.HTMLSnippet("/test/prefix")
assert.True(t, want == got, "incorrect HTML snippet; want:\n\n%s\n\ngot:\n\n%s", want, got) assert.Equal(t, want, got, "incorrect HTML snippet; want:\n\n%s\n\ngot:\n\n%s", want, got)
} }
// TestRuleEvalDuplicate tests for duplicate labels in recorded metrics, see #5529. // TestRuleEvalDuplicate tests for duplicate labels in recorded metrics, see #5529.

View file

@ -231,8 +231,8 @@ func TestScrapePoolStop(t *testing.T) {
assert.Equal(t, numTargets, len(stopped), "Unexpected number of stopped loops") assert.Equal(t, numTargets, len(stopped), "Unexpected number of stopped loops")
mtx.Unlock() mtx.Unlock()
assert.True(t, len(sp.activeTargets) == 0, "Targets were not cleared on stopping: %d left", len(sp.activeTargets)) assert.Equal(t, 0, len(sp.activeTargets), "Targets were not cleared on stopping: %d left", len(sp.activeTargets))
assert.True(t, len(sp.loops) == 0, "Loops were not cleared on stopping: %d left", len(sp.loops)) assert.Equal(t, 0, len(sp.loops), "Loops were not cleared on stopping: %d left", len(sp.loops))
} }
func TestScrapePoolReload(t *testing.T) { func TestScrapePoolReload(t *testing.T) {
@ -872,19 +872,19 @@ test_metric 1
md, ok := cache.GetMetadata("test_metric") md, ok := cache.GetMetadata("test_metric")
assert.True(t, ok, "expected metadata to be present") assert.True(t, ok, "expected metadata to be present")
assert.True(t, textparse.MetricTypeCounter == md.Type, "unexpected metric type") assert.Equal(t, textparse.MetricTypeCounter, md.Type, "unexpected metric type")
assert.Equal(t, "some help text", md.Help) assert.Equal(t, "some help text", md.Help)
assert.Equal(t, "metric", md.Unit) assert.Equal(t, "metric", md.Unit)
md, ok = cache.GetMetadata("test_metric_no_help") md, ok = cache.GetMetadata("test_metric_no_help")
assert.True(t, ok, "expected metadata to be present") assert.True(t, ok, "expected metadata to be present")
assert.True(t, textparse.MetricTypeGauge == md.Type, "unexpected metric type") assert.Equal(t, textparse.MetricTypeGauge, md.Type, "unexpected metric type")
assert.Equal(t, "", md.Help) assert.Equal(t, "", md.Help)
assert.Equal(t, "", md.Unit) assert.Equal(t, "", md.Unit)
md, ok = cache.GetMetadata("test_metric_no_type") md, ok = cache.GetMetadata("test_metric_no_type")
assert.True(t, ok, "expected metadata to be present") assert.True(t, ok, "expected metadata to be present")
assert.True(t, textparse.MetricTypeUnknown == md.Type, "unexpected metric type") assert.Equal(t, textparse.MetricTypeUnknown, md.Type, "unexpected metric type")
assert.Equal(t, "other help text", md.Help) assert.Equal(t, "other help text", md.Help)
assert.Equal(t, "", md.Unit) assert.Equal(t, "", md.Unit)
} }
@ -1352,7 +1352,7 @@ func TestScrapeLoopAppendSampleLimit(t *testing.T) {
value := metric.GetCounter().GetValue() value := metric.GetCounter().GetValue()
change := value - beforeMetricValue change := value - beforeMetricValue
assert.True(t, change == 1, "Unexpected change of sample limit metric: %f", change) assert.Equal(t, 1.0, change, "Unexpected change of sample limit metric: %f", change)
// And verify that we got the samples that fit under the limit. // And verify that we got the samples that fit under the limit.
want := []sample{ want := []sample{
@ -1765,7 +1765,7 @@ func TestTargetScrapeScrapeNotFound(t *testing.T) {
} }
_, err = ts.scrape(context.Background(), ioutil.Discard) _, err = ts.scrape(context.Background(), ioutil.Discard)
assert.True(t, strings.Contains(err.Error(), "404"), "Expected \"404 NotFound\" error but got: %s", err) assert.Contains(t, err.Error(), "404", "Expected \"404 NotFound\" error but got: %s", err)
} }
// testScraper implements the scraper interface and allows setting values // testScraper implements the scraper interface and allows setting values
@ -2118,15 +2118,15 @@ func TestReuseScrapeCache(t *testing.T) {
sp.reload(s.newConfig) sp.reload(s.newConfig)
for fp, newCacheAddr := range cacheAddr(sp) { for fp, newCacheAddr := range cacheAddr(sp) {
if s.keep { if s.keep {
assert.True(t, initCacheAddr[fp] == newCacheAddr, "step %d: old cache and new cache are not the same", i) assert.Equal(t, initCacheAddr[fp], newCacheAddr, "step %d: old cache and new cache are not the same", i)
} else { } else {
assert.True(t, initCacheAddr[fp] != newCacheAddr, "step %d: old cache and new cache are the same", i) assert.NotEqual(t, initCacheAddr[fp], newCacheAddr, "step %d: old cache and new cache are the same", i)
} }
} }
initCacheAddr = cacheAddr(sp) initCacheAddr = cacheAddr(sp)
sp.reload(s.newConfig) sp.reload(s.newConfig)
for fp, newCacheAddr := range cacheAddr(sp) { for fp, newCacheAddr := range cacheAddr(sp) {
assert.True(t, initCacheAddr[fp] == newCacheAddr, "step %d: reloading the exact config invalidates the cache", i) assert.Equal(t, initCacheAddr[fp], newCacheAddr, "step %d: reloading the exact config invalidates the cache", i)
} }
} }
} }

View file

@ -77,9 +77,9 @@ func TestSampleRing(t *testing.T) {
} }
if found { if found {
assert.True(t, sold.t >= s.t-c.delta, "%d: unexpected sample %d in buffer; buffer %v", i, sold.t, buffered) assert.GreaterOrEqual(t, sold.t, s.t-c.delta, "%d: unexpected sample %d in buffer; buffer %v", i, sold.t, buffered)
} else { } else {
assert.True(t, sold.t < s.t-c.delta, "%d: expected sample %d to be in buffer but was not; buffer %v", i, sold.t, buffered) assert.Less(t, sold.t, s.t-c.delta, "%d: expected sample %d to be in buffer but was not; buffer %v", i, sold.t, buffered)
} }
} }
} }
@ -137,7 +137,7 @@ func TestBufferedSeriesIterator(t *testing.T) {
sampleEq(101, 10) sampleEq(101, 10)
bufferEq([]sample{{t: 99, v: 8}, {t: 100, v: 9}}) bufferEq([]sample{{t: 99, v: 8}, {t: 100, v: 9}})
assert.True(t, !it.Next(), "next succeeded unexpectedly") assert.False(t, it.Next(), "next succeeded unexpectedly")
} }
// At() should not be called once Next() returns false. // At() should not be called once Next() returns false.
@ -147,7 +147,7 @@ func TestBufferedSeriesIteratorNoBadAt(t *testing.T) {
m := &mockSeriesIterator{ m := &mockSeriesIterator{
seek: func(int64) bool { return false }, seek: func(int64) bool { return false },
at: func() (int64, float64) { at: func() (int64, float64) {
assert.True(t, !done, "unexpectedly done") assert.False(t, done, "unexpectedly done")
done = true done = true
return 0, 0 return 0, 0
}, },

View file

@ -174,7 +174,7 @@ func TestFanoutErrors(t *testing.T) {
} }
if tc.warning != nil { if tc.warning != nil {
assert.True(t, len(ss.Warnings()) > 0, "warnings expected") assert.Greater(t, len(ss.Warnings()), 0, "warnings expected")
assert.Error(t, ss.Warnings()[0]) assert.Error(t, ss.Warnings()[0])
assert.Equal(t, tc.warning.Error(), ss.Warnings()[0].Error()) assert.Equal(t, tc.warning.Error(), ss.Warnings()[0].Error())
} }
@ -199,7 +199,7 @@ func TestFanoutErrors(t *testing.T) {
} }
if tc.warning != nil { if tc.warning != nil {
assert.True(t, len(ss.Warnings()) > 0, "warnings expected") assert.Greater(t, len(ss.Warnings()), 0, "warnings expected")
assert.Error(t, ss.Warnings()[0]) assert.Error(t, ss.Warnings()[0])
assert.Equal(t, tc.warning.Error(), ss.Warnings()[0].Error()) assert.Equal(t, tc.warning.Error(), ss.Warnings()[0].Error())
} }

View file

@ -206,7 +206,7 @@ func TestMergeQuerierWithChainMerger(t *testing.T) {
assert.Equal(t, expErr, actErr) assert.Equal(t, expErr, actErr)
assert.Equal(t, expSmpl, actSmpl) assert.Equal(t, expSmpl, actSmpl)
} }
assert.True(t, !tc.expected.Next(), "Expected Next() to be false") assert.False(t, tc.expected.Next(), "Expected Next() to be false")
}) })
} }
} }
@ -376,7 +376,7 @@ func TestMergeChunkQuerierWithNoVerticalChunkSeriesMerger(t *testing.T) {
} }
assert.NoError(t, merged.Err()) assert.NoError(t, merged.Err())
assert.True(t, !tc.expected.Next(), "Expected Next() to be false") assert.False(t, tc.expected.Next(), "Expected Next() to be false")
}) })
} }
} }

View file

@ -53,7 +53,7 @@ func TestChunkedReaderCanReadFromChunkedWriter(t *testing.T) {
for ; i < 4; i++ { for ; i < 4; i++ {
msg, err := r.Next() msg, err := r.Next()
assert.NoError(t, err) assert.NoError(t, err)
assert.True(t, i < len(msgs), "more messages then expected") assert.Less(t, i, len(msgs), "more messages then expected")
assert.Equal(t, msgs[i], msg) assert.Equal(t, msgs[i], msg)
} }
@ -62,7 +62,7 @@ func TestChunkedReaderCanReadFromChunkedWriter(t *testing.T) {
msg, err := r.Next() msg, err := r.Next()
assert.NoError(t, err) assert.NoError(t, err)
assert.True(t, i < len(msgs), "more messages then expected") assert.Less(t, i, len(msgs), "more messages then expected")
assert.Equal(t, msgs[i], msg) assert.Equal(t, msgs[i], msg)
_, err = r.Next() _, err = r.Next()

View file

@ -140,7 +140,7 @@ func TestConcreteSeriesSet(t *testing.T) {
assert.Equal(t, series1, c.At(), "Unexpected series returned.") assert.Equal(t, series1, c.At(), "Unexpected series returned.")
assert.True(t, c.Next(), "Expected Next() to be true.") assert.True(t, c.Next(), "Expected Next() to be true.")
assert.Equal(t, series2, c.At(), "Unexpected series returned.") assert.Equal(t, series2, c.At(), "Unexpected series returned.")
assert.True(t, !c.Next(), "Expected Next() to be false.") assert.False(t, c.Next(), "Expected Next() to be false.")
} }
func TestConcreteSeriesClonesLabels(t *testing.T) { func TestConcreteSeriesClonesLabels(t *testing.T) {
@ -185,7 +185,7 @@ func TestFromQueryResultWithDuplicates(t *testing.T) {
assert.True(t, isErrSeriesSet, "Expected resulting series to be an errSeriesSet") assert.True(t, isErrSeriesSet, "Expected resulting series to be an errSeriesSet")
errMessage := errSeries.Err().Error() errMessage := errSeries.Err().Error()
assert.True(t, errMessage == "duplicate label with name: foo", fmt.Sprintf("Expected error to be from duplicate label, but got: %s", errMessage)) assert.Equal(t, "duplicate label with name: foo", errMessage, fmt.Sprintf("Expected error to be from duplicate label, but got: %s", errMessage))
} }
func TestNegotiateResponseType(t *testing.T) { func TestNegotiateResponseType(t *testing.T) {

View file

@ -33,7 +33,7 @@ func TestIntern(t *testing.T) {
interned, ok := interner.pool[testString] interned, ok := interner.pool[testString]
assert.Equal(t, true, ok) assert.Equal(t, true, ok)
assert.True(t, interned.refs.Load() == 1, fmt.Sprintf("expected refs to be 1 but it was %d", interned.refs.Load())) assert.Equal(t, int64(1), interned.refs.Load(), fmt.Sprintf("expected refs to be 1 but it was %d", interned.refs.Load()))
} }
func TestIntern_MultiRef(t *testing.T) { func TestIntern_MultiRef(t *testing.T) {
@ -44,13 +44,13 @@ func TestIntern_MultiRef(t *testing.T) {
interned, ok := interner.pool[testString] interned, ok := interner.pool[testString]
assert.Equal(t, true, ok) assert.Equal(t, true, ok)
assert.True(t, interned.refs.Load() == 1, fmt.Sprintf("expected refs to be 1 but it was %d", interned.refs.Load())) assert.Equal(t, int64(1), interned.refs.Load(), fmt.Sprintf("expected refs to be 1 but it was %d", interned.refs.Load()))
interner.intern(testString) interner.intern(testString)
interned, ok = interner.pool[testString] interned, ok = interner.pool[testString]
assert.Equal(t, true, ok) assert.Equal(t, true, ok)
assert.True(t, interned.refs.Load() == 2, fmt.Sprintf("expected refs to be 2 but it was %d", interned.refs.Load())) assert.Equal(t, int64(2), interned.refs.Load(), fmt.Sprintf("expected refs to be 2 but it was %d", interned.refs.Load()))
} }
func TestIntern_DeleteRef(t *testing.T) { func TestIntern_DeleteRef(t *testing.T) {
@ -61,7 +61,7 @@ func TestIntern_DeleteRef(t *testing.T) {
interned, ok := interner.pool[testString] interned, ok := interner.pool[testString]
assert.Equal(t, true, ok) assert.Equal(t, true, ok)
assert.True(t, interned.refs.Load() == 1, fmt.Sprintf("expected refs to be 1 but it was %d", interned.refs.Load())) assert.Equal(t, int64(1), interned.refs.Load(), fmt.Sprintf("expected refs to be 1 but it was %d", interned.refs.Load()))
interner.release(testString) interner.release(testString)
_, ok = interner.pool[testString] _, ok = interner.pool[testString]
@ -75,7 +75,7 @@ func TestIntern_MultiRef_Concurrent(t *testing.T) {
interner.intern(testString) interner.intern(testString)
interned, ok := interner.pool[testString] interned, ok := interner.pool[testString]
assert.Equal(t, true, ok) assert.Equal(t, true, ok)
assert.True(t, interned.refs.Load() == 1, fmt.Sprintf("expected refs to be 1 but it was %d", interned.refs.Load())) assert.Equal(t, int64(1), interned.refs.Load(), fmt.Sprintf("expected refs to be 1 but it was %d", interned.refs.Load()))
go interner.release(testString) go interner.release(testString)
@ -87,5 +87,5 @@ func TestIntern_MultiRef_Concurrent(t *testing.T) {
interned, ok = interner.pool[testString] interned, ok = interner.pool[testString]
interner.mtx.RUnlock() interner.mtx.RUnlock()
assert.Equal(t, true, ok) assert.Equal(t, true, ok)
assert.True(t, interned.refs.Load() == 1, fmt.Sprintf("expected refs to be 1 but it was %d", interned.refs.Load())) assert.Equal(t, int64(1), interned.refs.Load(), fmt.Sprintf("expected refs to be 1 but it was %d", interned.refs.Load()))
} }

View file

@ -334,7 +334,7 @@ func TestReleaseNoninternedString(t *testing.T) {
} }
metric := client_testutil.ToFloat64(noReferenceReleases) metric := client_testutil.ToFloat64(noReferenceReleases)
assert.True(t, metric == 0, "expected there to be no calls to release for strings that were not already interned: %d", int(metric)) assert.Equal(t, 0.0, metric, "expected there to be no calls to release for strings that were not already interned: %d", int(metric))
} }
func TestShouldReshard(t *testing.T) { func TestShouldReshard(t *testing.T) {
@ -725,10 +725,10 @@ func TestCalculateDesiredShards(t *testing.T) {
t.Log("desiredShards", m.numShards, "pendingSamples", pendingSamples) t.Log("desiredShards", m.numShards, "pendingSamples", pendingSamples)
m.numShards = m.calculateDesiredShards() m.numShards = m.calculateDesiredShards()
assert.True(t, m.numShards >= minShards, "Shards are too low. desiredShards=%d, minShards=%d, t_seconds=%d", m.numShards, minShards, ts/time.Second) assert.GreaterOrEqual(t, m.numShards, minShards, "Shards are too low. desiredShards=%d, minShards=%d, t_seconds=%d", m.numShards, minShards, ts/time.Second)
assert.True(t, m.numShards <= maxShards, "Shards are too high. desiredShards=%d, maxShards=%d, t_seconds=%d", m.numShards, maxShards, ts/time.Second) assert.LessOrEqual(t, m.numShards, maxShards, "Shards are too high. desiredShards=%d, maxShards=%d, t_seconds=%d", m.numShards, maxShards, ts/time.Second)
} }
assert.True(t, pendingSamples == 0, "Remote write never caught up, there are still %d pending samples.", pendingSamples) assert.Equal(t, int64(0), pendingSamples, "Remote write never caught up, there are still %d pending samples.", pendingSamples)
} }
func TestQueueManagerMetrics(t *testing.T) { func TestQueueManagerMetrics(t *testing.T) {

View file

@ -362,12 +362,12 @@ func TestWriteStorageApplyConfigsPartialUpdate(t *testing.T) {
assert.Equal(t, 3, len(s.queues)) assert.Equal(t, 3, len(s.queues))
_, hashExists := s.queues[hashes[0]] _, hashExists := s.queues[hashes[0]]
assert.True(t, !hashExists, "The queue for the first remote write configuration should have been restarted because the relabel configuration has changed.") assert.False(t, hashExists, "The queue for the first remote write configuration should have been restarted because the relabel configuration has changed.")
q, hashExists := s.queues[hashes[1]] q, hashExists := s.queues[hashes[1]]
assert.True(t, hashExists, "Hash of unchanged queue should have remained the same") assert.True(t, hashExists, "Hash of unchanged queue should have remained the same")
assert.True(t, q == queues[1], "Pointer of unchanged queue should have remained the same") assert.Equal(t, q, queues[1], "Pointer of unchanged queue should have remained the same")
_, hashExists = s.queues[hashes[2]] _, hashExists = s.queues[hashes[2]]
assert.True(t, !hashExists, "The queue for the third remote write configuration should have been restarted because the timeout has changed.") assert.False(t, hashExists, "The queue for the third remote write configuration should have been restarted because the timeout has changed.")
storeHashes() storeHashes()
secondClient := s.queues[hashes[1]].client() secondClient := s.queues[hashes[1]].client()
@ -381,7 +381,7 @@ func TestWriteStorageApplyConfigsPartialUpdate(t *testing.T) {
assert.True(t, hashExists, "Pointer of unchanged queue should have remained the same") assert.True(t, hashExists, "Pointer of unchanged queue should have remained the same")
q, hashExists = s.queues[hashes[1]] q, hashExists = s.queues[hashes[1]]
assert.True(t, hashExists, "Hash of queue with secret change should have remained the same") assert.True(t, hashExists, "Hash of queue with secret change should have remained the same")
assert.True(t, secondClient != q.client(), "Pointer of a client with a secret change should not be the same") assert.NotEqual(t, secondClient, q.client(), "Pointer of a client with a secret change should not be the same")
_, hashExists = s.queues[hashes[2]] _, hashExists = s.queues[hashes[2]]
assert.True(t, hashExists, "Pointer of unchanged queue should have remained the same") assert.True(t, hashExists, "Pointer of unchanged queue should have remained the same")
@ -395,7 +395,7 @@ func TestWriteStorageApplyConfigsPartialUpdate(t *testing.T) {
assert.Equal(t, 2, len(s.queues)) assert.Equal(t, 2, len(s.queues))
_, hashExists = s.queues[hashes[0]] _, hashExists = s.queues[hashes[0]]
assert.True(t, !hashExists, "If a config is removed, the queue should be stopped and recreated.") assert.False(t, hashExists, "If a config is removed, the queue should be stopped and recreated.")
_, hashExists = s.queues[hashes[1]] _, hashExists = s.queues[hashes[1]]
assert.True(t, hashExists, "Pointer of unchanged queue should have remained the same") assert.True(t, hashExists, "Pointer of unchanged queue should have remained the same")
_, hashExists = s.queues[hashes[2]] _, hashExists = s.queues[hashes[2]]

View file

@ -51,7 +51,7 @@ func TestBlockMetaMustNeverBeVersion2(t *testing.T) {
meta, _, err := readMetaFile(dir) meta, _, err := readMetaFile(dir)
assert.NoError(t, err) assert.NoError(t, err)
assert.True(t, meta.Version != 2, "meta.json version must never be 2") assert.NotEqual(t, 2, meta.Version, "meta.json version must never be 2")
} }
func TestSetCompactionFailed(t *testing.T) { func TestSetCompactionFailed(t *testing.T) {
@ -181,7 +181,7 @@ func TestCorruptedChunk(t *testing.T) {
blockDir := createBlock(t, tmpdir, []storage.Series{series}) blockDir := createBlock(t, tmpdir, []storage.Series{series})
files, err := sequenceFiles(chunkDir(blockDir)) files, err := sequenceFiles(chunkDir(blockDir))
assert.NoError(t, err) assert.NoError(t, err)
assert.True(t, len(files) > 0, "No chunk created.") assert.Greater(t, len(files), 0, "No chunk created.")
f, err := os.OpenFile(files[0], os.O_RDWR, 0666) f, err := os.OpenFile(files[0], os.O_RDWR, 0666)
assert.NoError(t, err) assert.NoError(t, err)
@ -204,7 +204,7 @@ func TestCorruptedChunk(t *testing.T) {
set := querier.Select(false, nil, labels.MustNewMatcher(labels.MatchEqual, "a", "b")) set := querier.Select(false, nil, labels.MustNewMatcher(labels.MatchEqual, "a", "b"))
// Check chunk errors during iter time. // Check chunk errors during iter time.
assert.True(t, set.Next(), "") assert.True(t, set.Next())
it := set.At().Iterator() it := set.At().Iterator()
assert.Equal(t, false, it.Next()) assert.Equal(t, false, it.Next())
assert.Equal(t, tc.iterErr.Error(), it.Err().Error()) assert.Equal(t, tc.iterErr.Error(), it.Err().Error())
@ -244,7 +244,7 @@ func TestBlockSize(t *testing.T) {
{ {
assert.NoError(t, blockInit.Delete(1, 10, labels.MustNewMatcher(labels.MatchRegexp, "", ".*"))) assert.NoError(t, blockInit.Delete(1, 10, labels.MustNewMatcher(labels.MatchRegexp, "", ".*")))
expAfterDelete := blockInit.Size() expAfterDelete := blockInit.Size()
assert.True(t, expAfterDelete > expSizeInit, "after a delete the block size should be bigger as the tombstone file should grow %v > %v", expAfterDelete, expSizeInit) assert.Greater(t, expAfterDelete, expSizeInit, "after a delete the block size should be bigger as the tombstone file should grow %v > %v", expAfterDelete, expSizeInit)
actAfterDelete, err := fileutil.DirSize(blockDirInit) actAfterDelete, err := fileutil.DirSize(blockDirInit)
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, expAfterDelete, actAfterDelete, "after a delete reported block size doesn't match actual disk size") assert.Equal(t, expAfterDelete, actAfterDelete, "after a delete reported block size doesn't match actual disk size")
@ -261,7 +261,7 @@ func TestBlockSize(t *testing.T) {
expAfterCompact := blockAfterCompact.Size() expAfterCompact := blockAfterCompact.Size()
actAfterCompact, err := fileutil.DirSize(blockAfterCompact.Dir()) actAfterCompact, err := fileutil.DirSize(blockAfterCompact.Dir())
assert.NoError(t, err) assert.NoError(t, err)
assert.True(t, actAfterDelete > actAfterCompact, "after a delete and compaction the block size should be smaller %v,%v", actAfterDelete, actAfterCompact) assert.Greater(t, actAfterDelete, actAfterCompact, "after a delete and compaction the block size should be smaller %v,%v", actAfterDelete, actAfterCompact)
assert.Equal(t, expAfterCompact, actAfterCompact, "after a delete and compaction reported block size doesn't match actual disk size") assert.Equal(t, expAfterCompact, actAfterCompact, "after a delete and compaction reported block size doesn't match actual disk size")
} }
} }

View file

@ -104,7 +104,8 @@ func TestChunkDiskMapper_WriteChunk_Chunk_IterateChunks(t *testing.T) {
} }
// Checking on-disk bytes for the first file. // Checking on-disk bytes for the first file.
assert.True(t, len(hrw.mmappedChunkFiles) == 3 && len(hrw.closers) == 3, "expected 3 mmapped files, got %d", len(hrw.mmappedChunkFiles)) assert.Equal(t, 3, len(hrw.mmappedChunkFiles), "expected 3 mmapped files, got %d", len(hrw.mmappedChunkFiles))
assert.Equal(t, len(hrw.mmappedChunkFiles), len(hrw.closers))
actualBytes, err := ioutil.ReadFile(firstFileName) actualBytes, err := ioutil.ReadFile(firstFileName)
assert.NoError(t, err) assert.NoError(t, err)
@ -225,9 +226,9 @@ func TestChunkDiskMapper_Truncate(t *testing.T) {
hrw, err = NewChunkDiskMapper(dir, chunkenc.NewPool()) hrw, err = NewChunkDiskMapper(dir, chunkenc.NewPool())
assert.NoError(t, err) assert.NoError(t, err)
assert.True(t, !hrw.fileMaxtSet, "") assert.False(t, hrw.fileMaxtSet)
assert.NoError(t, hrw.IterateAllChunks(func(_, _ uint64, _, _ int64, _ uint16) error { return nil })) assert.NoError(t, hrw.IterateAllChunks(func(_, _ uint64, _, _ int64, _ uint16) error { return nil }))
assert.True(t, hrw.fileMaxtSet, "") assert.True(t, hrw.fileMaxtSet)
verifyFiles([]int{3, 4, 5, 6, 7, 8}) verifyFiles([]int{3, 4, 5, 6, 7, 8})
// New file is created after restart even if last file was empty. // New file is created after restart even if last file was empty.
@ -395,14 +396,14 @@ func TestHeadReadWriter_ReadRepairOnEmptyLastFile(t *testing.T) {
// Open chunk disk mapper again, corrupt file should be removed. // Open chunk disk mapper again, corrupt file should be removed.
hrw, err = NewChunkDiskMapper(dir, chunkenc.NewPool()) hrw, err = NewChunkDiskMapper(dir, chunkenc.NewPool())
assert.NoError(t, err) assert.NoError(t, err)
assert.True(t, !hrw.fileMaxtSet, "") assert.False(t, hrw.fileMaxtSet)
assert.NoError(t, hrw.IterateAllChunks(func(_, _ uint64, _, _ int64, _ uint16) error { return nil })) assert.NoError(t, hrw.IterateAllChunks(func(_, _ uint64, _, _ int64, _ uint16) error { return nil }))
assert.True(t, hrw.fileMaxtSet, "") assert.True(t, hrw.fileMaxtSet)
// Removed from memory. // Removed from memory.
assert.Equal(t, 3, len(hrw.mmappedChunkFiles)) assert.Equal(t, 3, len(hrw.mmappedChunkFiles))
for idx := range hrw.mmappedChunkFiles { for idx := range hrw.mmappedChunkFiles {
assert.True(t, idx <= lastFile, "file index is bigger than previous last file") assert.LessOrEqual(t, idx, lastFile, "file index is bigger than previous last file")
} }
// Removed even from disk. // Removed even from disk.
@ -412,7 +413,7 @@ func TestHeadReadWriter_ReadRepairOnEmptyLastFile(t *testing.T) {
for _, fi := range files { for _, fi := range files {
seq, err := strconv.ParseUint(fi.Name(), 10, 64) seq, err := strconv.ParseUint(fi.Name(), 10, 64)
assert.NoError(t, err) assert.NoError(t, err)
assert.True(t, seq <= uint64(lastFile), "file index on disk is bigger than previous last file") assert.LessOrEqual(t, seq, uint64(lastFile), "file index on disk is bigger than previous last file")
} }
} }
@ -426,9 +427,9 @@ func testChunkDiskMapper(t *testing.T) *ChunkDiskMapper {
hrw, err := NewChunkDiskMapper(tmpdir, chunkenc.NewPool()) hrw, err := NewChunkDiskMapper(tmpdir, chunkenc.NewPool())
assert.NoError(t, err) assert.NoError(t, err)
assert.True(t, !hrw.fileMaxtSet, "") assert.False(t, hrw.fileMaxtSet)
assert.NoError(t, hrw.IterateAllChunks(func(_, _ uint64, _, _ int64, _ uint16) error { return nil })) assert.NoError(t, hrw.IterateAllChunks(func(_, _ uint64, _, _ int64, _ uint16) error { return nil }))
assert.True(t, hrw.fileMaxtSet, "") assert.True(t, hrw.fileMaxtSet)
return hrw return hrw
} }

View file

@ -1150,7 +1150,7 @@ func TestDisableAutoCompactions(t *testing.T) {
time.Sleep(10 * time.Millisecond) time.Sleep(10 * time.Millisecond)
} }
assert.True(t, prom_testutil.ToFloat64(db.metrics.compactionsSkipped) > 0.0, "No compaction was skipped after the set timeout.") assert.Greater(t, prom_testutil.ToFloat64(db.metrics.compactionsSkipped), 0.0, "No compaction was skipped after the set timeout.")
assert.Equal(t, 0, len(db.blocks)) assert.Equal(t, 0, len(db.blocks))
// Enable the compaction, trigger it and check that the block is persisted. // Enable the compaction, trigger it and check that the block is persisted.
@ -1165,7 +1165,7 @@ func TestDisableAutoCompactions(t *testing.T) {
} }
time.Sleep(100 * time.Millisecond) time.Sleep(100 * time.Millisecond)
} }
assert.True(t, len(db.Blocks()) > 0, "No block was persisted after the set timeout.") assert.Greater(t, len(db.Blocks()), 0, "No block was persisted after the set timeout.")
} }
// TestCancelCompactions ensures that when the db is closed // TestCancelCompactions ensures that when the db is closed

View file

@ -304,7 +304,7 @@ func TestDBAppenderAddRef(t *testing.T) {
ref2, err := app2.Add(labels.FromStrings("a", "b"), 133, 1) ref2, err := app2.Add(labels.FromStrings("a", "b"), 133, 1)
assert.NoError(t, err) assert.NoError(t, err)
assert.True(t, ref1 == ref2, "") assert.Equal(t, ref1, ref2)
// Reference must be valid to add another sample. // Reference must be valid to add another sample.
err = app2.AddFast(ref2, 143, 2) err = app2.AddFast(ref2, 143, 2)
@ -719,7 +719,7 @@ Outer:
}) })
if len(expSamples) == 0 { if len(expSamples) == 0 {
assert.True(t, res.Next() == false, "") assert.False(t, res.Next())
continue continue
} }
@ -948,7 +948,7 @@ func TestWALSegmentSizeOptions(t *testing.T) {
assert.Equal(t, int64(DefaultOptions().WALSegmentSize), f.Size(), "WAL file size doesn't match WALSegmentSize option, filename: %v", f.Name()) assert.Equal(t, int64(DefaultOptions().WALSegmentSize), f.Size(), "WAL file size doesn't match WALSegmentSize option, filename: %v", f.Name())
} }
lastFile := files[len(files)-1] lastFile := files[len(files)-1]
assert.True(t, int64(DefaultOptions().WALSegmentSize) > lastFile.Size(), "last WAL file size is not smaller than the WALSegmentSize option, filename: %v", lastFile.Name()) assert.Greater(t, int64(DefaultOptions().WALSegmentSize), lastFile.Size(), "last WAL file size is not smaller than the WALSegmentSize option, filename: %v", lastFile.Name())
}, },
// Custom Wal Size. // Custom Wal Size.
2 * 32 * 1024: func(dbDir string, segmentSize int) { 2 * 32 * 1024: func(dbDir string, segmentSize int) {
@ -960,13 +960,13 @@ func TestWALSegmentSizeOptions(t *testing.T) {
files = append(files, f) files = append(files, f)
} }
} }
assert.True(t, len(files) > 1, "current WALSegmentSize should result in more than a single WAL file.") assert.Greater(t, len(files), 1, "current WALSegmentSize should result in more than a single WAL file.")
// All the full segment files (all but the last) should match the segment size option. // All the full segment files (all but the last) should match the segment size option.
for _, f := range files[:len(files)-1] { for _, f := range files[:len(files)-1] {
assert.Equal(t, int64(segmentSize), f.Size(), "WAL file size doesn't match WALSegmentSize option, filename: %v", f.Name()) assert.Equal(t, int64(segmentSize), f.Size(), "WAL file size doesn't match WALSegmentSize option, filename: %v", f.Name())
} }
lastFile := files[len(files)-1] lastFile := files[len(files)-1]
assert.True(t, int64(segmentSize) > lastFile.Size(), "last WAL file size is not smaller than the WALSegmentSize option, filename: %v", lastFile.Name()) assert.Greater(t, int64(segmentSize), lastFile.Size(), "last WAL file size is not smaller than the WALSegmentSize option, filename: %v", lastFile.Name())
}, },
// Wal disabled. // Wal disabled.
-1: func(dbDir string, segmentSize int) { -1: func(dbDir string, segmentSize int) {
@ -1069,7 +1069,7 @@ func TestTombstoneClean(t *testing.T) {
}) })
if len(expSamples) == 0 { if len(expSamples) == 0 {
assert.True(t, res.Next() == false, "") assert.False(t, res.Next())
continue continue
} }
@ -1295,7 +1295,7 @@ func TestSizeRetention(t *testing.T) {
assert.Equal(t, 1, actRetentionCount, "metric retention count mismatch") assert.Equal(t, 1, actRetentionCount, "metric retention count mismatch")
assert.Equal(t, actSize, expSize, "metric db size doesn't match actual disk size") assert.Equal(t, actSize, expSize, "metric db size doesn't match actual disk size")
assert.True(t, expSize <= sizeLimit, "actual size (%v) is expected to be less than or equal to limit (%v)", expSize, sizeLimit) assert.LessOrEqual(t, expSize, sizeLimit, "actual size (%v) is expected to be less than or equal to limit (%v)", expSize, sizeLimit)
assert.Equal(t, len(blocks)-1, len(actBlocks), "new block count should be decreased from:%v to:%v", len(blocks), len(blocks)-1) assert.Equal(t, len(blocks)-1, len(actBlocks), "new block count should be decreased from:%v to:%v", len(blocks), len(blocks)-1)
assert.Equal(t, expBlocks[0].MaxTime, actBlocks[0].meta.MaxTime, "maxT mismatch of the first block") assert.Equal(t, expBlocks[0].MaxTime, actBlocks[0].meta.MaxTime, "maxT mismatch of the first block")
assert.Equal(t, expBlocks[len(expBlocks)-1].MaxTime, actBlocks[len(actBlocks)-1].meta.MaxTime, "maxT mismatch of the last block") assert.Equal(t, expBlocks[len(expBlocks)-1].MaxTime, actBlocks[len(actBlocks)-1].meta.MaxTime, "maxT mismatch of the last block")
@ -1425,7 +1425,7 @@ func TestOverlappingBlocksDetectsAllOverlaps(t *testing.T) {
metas[i] = BlockMeta{MinTime: int64(i * 10), MaxTime: int64((i + 1) * 10)} metas[i] = BlockMeta{MinTime: int64(i * 10), MaxTime: int64((i + 1) * 10)}
} }
assert.True(t, len(OverlappingBlocks(metas)) == 0, "we found unexpected overlaps") assert.Equal(t, 0, len(OverlappingBlocks(metas)), "we found unexpected overlaps")
// Add overlapping blocks. We've to establish order again since we aren't interested // Add overlapping blocks. We've to establish order again since we aren't interested
// in trivial overlaps caused by unorderedness. // in trivial overlaps caused by unorderedness.
@ -1563,7 +1563,7 @@ func TestChunkAtBlockBoundary(t *testing.T) {
chunkCount++ chunkCount++
} }
} }
assert.True(t, chunkCount == 1, "expected 1 chunk in block %s, got %d", meta.ULID, chunkCount) assert.Equal(t, 1, chunkCount, "expected 1 chunk in block %s, got %d", meta.ULID, chunkCount)
} }
} }
@ -1592,7 +1592,7 @@ func TestQuerierWithBoundaryChunks(t *testing.T) {
err = db.Compact() err = db.Compact()
assert.NoError(t, err) assert.NoError(t, err)
assert.True(t, len(db.blocks) >= 3, "invalid test, less than three blocks in DB") assert.GreaterOrEqual(t, len(db.blocks), 3, "invalid test, less than three blocks in DB")
q, err := db.Querier(context.TODO(), blockRange, 2*blockRange) q, err := db.Querier(context.TODO(), blockRange, 2*blockRange)
assert.NoError(t, err) assert.NoError(t, err)
@ -1764,7 +1764,7 @@ func TestNoEmptyBlocks(t *testing.T) {
app = db.Appender(ctx) app = db.Appender(ctx)
_, err = app.Add(defaultLabel, 1, 0) _, err = app.Add(defaultLabel, 1, 0)
assert.True(t, err == storage.ErrOutOfBounds, "the head should be truncated so no samples in the past should be allowed") assert.Equal(t, storage.ErrOutOfBounds, err, "the head should be truncated so no samples in the past should be allowed")
// Adding new blocks. // Adding new blocks.
currentTime := db.Head().MaxTime() currentTime := db.Head().MaxTime()
@ -1781,7 +1781,7 @@ func TestNoEmptyBlocks(t *testing.T) {
actBlocks, err = blockDirs(db.Dir()) actBlocks, err = blockDirs(db.Dir())
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, len(db.Blocks()), len(actBlocks)) assert.Equal(t, len(db.Blocks()), len(actBlocks))
assert.True(t, len(actBlocks) == 1, "No blocks created when compacting with >0 samples") assert.Equal(t, 1, len(actBlocks), "No blocks created when compacting with >0 samples")
}) })
t.Run(`When no new block is created from head, and there are some blocks on disk t.Run(`When no new block is created from head, and there are some blocks on disk
@ -2120,7 +2120,7 @@ func TestDBReadOnly(t *testing.T) {
expBlocks = dbWritable.Blocks() expBlocks = dbWritable.Blocks()
expDbSize, err := fileutil.DirSize(dbWritable.Dir()) expDbSize, err := fileutil.DirSize(dbWritable.Dir())
assert.NoError(t, err) assert.NoError(t, err)
assert.True(t, expDbSize > dbSizeBeforeAppend, "db size didn't increase after an append") assert.Greater(t, expDbSize, dbSizeBeforeAppend, "db size didn't increase after an append")
q, err := dbWritable.Querier(context.TODO(), math.MinInt64, math.MaxInt64) q, err := dbWritable.Querier(context.TODO(), math.MinInt64, math.MaxInt64)
assert.NoError(t, err) assert.NoError(t, err)
@ -2559,7 +2559,7 @@ func TestChunkWriter_ReadAfterWrite(t *testing.T) {
for i, f := range files { for i, f := range files {
size := int(f.Size()) size := int(f.Size())
// Verify that the segment is the same or smaller than the expected size. // Verify that the segment is the same or smaller than the expected size.
assert.True(t, chunks.SegmentHeaderSize+test.expSegmentSizes[i] >= size, "Segment:%v should NOT be bigger than:%v actual:%v", i, chunks.SegmentHeaderSize+test.expSegmentSizes[i], size) assert.GreaterOrEqual(t, chunks.SegmentHeaderSize+test.expSegmentSizes[i], size, "Segment:%v should NOT be bigger than:%v actual:%v", i, chunks.SegmentHeaderSize+test.expSegmentSizes[i], size)
sizeAct += size sizeAct += size
} }

View file

@ -296,7 +296,7 @@ func TestHead_WALMultiRef(t *testing.T) {
assert.NoError(t, app.Commit()) assert.NoError(t, app.Commit())
assert.Equal(t, 4.0, prom_testutil.ToFloat64(head.metrics.chunksCreated)) assert.Equal(t, 4.0, prom_testutil.ToFloat64(head.metrics.chunksCreated))
assert.True(t, ref1 != ref2, "Refs are the same") assert.NotEqual(t, ref1, ref2, "Refs are the same")
assert.NoError(t, head.Close()) assert.NoError(t, head.Close())
w, err = wal.New(nil, nil, w.Dir(), false) w, err = wal.New(nil, nil, w.Dir(), false)
@ -370,8 +370,8 @@ func TestHead_Truncate(t *testing.T) {
{minTime: 3000, maxTime: 3999}, {minTime: 3000, maxTime: 3999},
}, h.series.getByID(s2.ref).mmappedChunks) }, h.series.getByID(s2.ref).mmappedChunks)
assert.True(t, h.series.getByID(s3.ref) == nil, "") assert.Nil(t, h.series.getByID(s3.ref))
assert.True(t, h.series.getByID(s4.ref) == nil, "") assert.Nil(t, h.series.getByID(s4.ref))
postingsA1, _ := index.ExpandPostings(h.postings.Get("a", "1")) postingsA1, _ := index.ExpandPostings(h.postings.Get("a", "1"))
postingsA2, _ := index.ExpandPostings(h.postings.Get("a", "2")) postingsA2, _ := index.ExpandPostings(h.postings.Get("a", "2"))
@ -384,8 +384,8 @@ func TestHead_Truncate(t *testing.T) {
assert.Equal(t, []uint64{s2.ref}, postingsA2) assert.Equal(t, []uint64{s2.ref}, postingsA2)
assert.Equal(t, []uint64{s1.ref, s2.ref}, postingsB1) assert.Equal(t, []uint64{s1.ref, s2.ref}, postingsB1)
assert.Equal(t, []uint64{s1.ref, s2.ref}, postingsAll) assert.Equal(t, []uint64{s1.ref, s2.ref}, postingsAll)
assert.True(t, postingsB2 == nil, "") assert.Nil(t, postingsB2)
assert.True(t, postingsC1 == nil, "") assert.Nil(t, postingsC1)
assert.Equal(t, map[string]struct{}{ assert.Equal(t, map[string]struct{}{
"": {}, // from 'all' postings list "": {}, // from 'all' postings list
@ -437,7 +437,7 @@ func TestMemSeries_truncateChunks(t *testing.T) {
for i := 0; i < 4000; i += 5 { for i := 0; i < 4000; i += 5 {
ok, _ := s.append(int64(i), float64(i), 0, chunkDiskMapper) ok, _ := s.append(int64(i), float64(i), 0, chunkDiskMapper)
assert.True(t, ok == true, "sample append failed") assert.True(t, ok, "sample append failed")
} }
// Check that truncate removes half of the chunks and afterwards // Check that truncate removes half of the chunks and afterwards
@ -456,7 +456,7 @@ func TestMemSeries_truncateChunks(t *testing.T) {
assert.Equal(t, int64(2000), s.mmappedChunks[0].minTime) assert.Equal(t, int64(2000), s.mmappedChunks[0].minTime)
_, _, err = s.chunk(0, chunkDiskMapper) _, _, err = s.chunk(0, chunkDiskMapper)
assert.True(t, err == storage.ErrNotFound, "first chunks not gone") assert.Equal(t, storage.ErrNotFound, err, "first chunks not gone")
assert.Equal(t, countBefore/2, len(s.mmappedChunks)+1) // +1 for the head chunk. assert.Equal(t, countBefore/2, len(s.mmappedChunks)+1) // +1 for the head chunk.
chk, _, err = s.chunk(lastID, chunkDiskMapper) chk, _, err = s.chunk(lastID, chunkDiskMapper)
assert.NoError(t, err) assert.NoError(t, err)
@ -466,11 +466,11 @@ func TestMemSeries_truncateChunks(t *testing.T) {
// after truncation. // after truncation.
it1 := s.iterator(s.chunkID(len(s.mmappedChunks)), nil, chunkDiskMapper, nil) it1 := s.iterator(s.chunkID(len(s.mmappedChunks)), nil, chunkDiskMapper, nil)
_, ok := it1.(*memSafeIterator) _, ok := it1.(*memSafeIterator)
assert.True(t, ok == true, "") assert.True(t, ok)
it2 := s.iterator(s.chunkID(len(s.mmappedChunks)-1), nil, chunkDiskMapper, nil) it2 := s.iterator(s.chunkID(len(s.mmappedChunks)-1), nil, chunkDiskMapper, nil)
_, ok = it2.(*memSafeIterator) _, ok = it2.(*memSafeIterator)
assert.True(t, ok == false, "non-last chunk incorrectly wrapped with sample buffer") assert.False(t, ok, "non-last chunk incorrectly wrapped with sample buffer")
} }
func TestHeadDeleteSeriesWithoutSamples(t *testing.T) { func TestHeadDeleteSeriesWithoutSamples(t *testing.T) {
@ -656,7 +656,7 @@ func TestDeleteUntilCurMax(t *testing.T) {
assert.True(t, res.Next(), "series is not present") assert.True(t, res.Next(), "series is not present")
s := res.At() s := res.At()
it := s.Iterator() it := s.Iterator()
assert.True(t, !it.Next(), "expected no samples") assert.False(t, it.Next(), "expected no samples")
for res.Next() { for res.Next() {
} }
assert.NoError(t, res.Err()) assert.NoError(t, res.Err())
@ -980,7 +980,7 @@ func TestMemSeries_append(t *testing.T) {
ok, chunkCreated = s.append(999, 2, 0, chunkDiskMapper) ok, chunkCreated = s.append(999, 2, 0, chunkDiskMapper)
assert.True(t, ok, "append failed") assert.True(t, ok, "append failed")
assert.True(t, !chunkCreated, "second sample should use same chunk") assert.False(t, chunkCreated, "second sample should use same chunk")
ok, chunkCreated = s.append(1000, 3, 0, chunkDiskMapper) ok, chunkCreated = s.append(1000, 3, 0, chunkDiskMapper)
assert.True(t, ok, "append failed") assert.True(t, ok, "append failed")
@ -988,11 +988,13 @@ func TestMemSeries_append(t *testing.T) {
ok, chunkCreated = s.append(1001, 4, 0, chunkDiskMapper) ok, chunkCreated = s.append(1001, 4, 0, chunkDiskMapper)
assert.True(t, ok, "append failed") assert.True(t, ok, "append failed")
assert.True(t, !chunkCreated, "second sample should use same chunk") assert.False(t, chunkCreated, "second sample should use same chunk")
assert.True(t, len(s.mmappedChunks) == 1, "there should be only 1 mmapped chunk") assert.Equal(t, 1, len(s.mmappedChunks), "there should be only 1 mmapped chunk")
assert.True(t, s.mmappedChunks[0].minTime == 998 && s.mmappedChunks[0].maxTime == 999, "wrong chunk range") assert.Equal(t, int64(998), s.mmappedChunks[0].minTime, "wrong chunk range")
assert.True(t, s.headChunk.minTime == 1000 && s.headChunk.maxTime == 1001, "wrong chunk range") assert.Equal(t, int64(999), s.mmappedChunks[0].maxTime, "wrong chunk range")
assert.Equal(t, int64(1000), s.headChunk.minTime, "wrong chunk range")
assert.Equal(t, int64(1001), s.headChunk.maxTime, "wrong chunk range")
// Fill the range [1000,2000) with many samples. Intermediate chunks should be cut // Fill the range [1000,2000) with many samples. Intermediate chunks should be cut
// at approximately 120 samples per chunk. // at approximately 120 samples per chunk.
@ -1001,13 +1003,13 @@ func TestMemSeries_append(t *testing.T) {
assert.True(t, ok, "append failed") assert.True(t, ok, "append failed")
} }
assert.True(t, len(s.mmappedChunks)+1 > 7, "expected intermediate chunks") assert.Greater(t, len(s.mmappedChunks)+1, 7, "expected intermediate chunks")
// All chunks but the first and last should now be moderately full. // All chunks but the first and last should now be moderately full.
for i, c := range s.mmappedChunks[1:] { for i, c := range s.mmappedChunks[1:] {
chk, err := chunkDiskMapper.Chunk(c.ref) chk, err := chunkDiskMapper.Chunk(c.ref)
assert.NoError(t, err) assert.NoError(t, err)
assert.True(t, chk.NumSamples() > 100, "unexpected small chunk %d of length %d", i, chk.NumSamples()) assert.Greater(t, chk.NumSamples(), 100, "unexpected small chunk %d of length %d", i, chk.NumSamples())
} }
} }
@ -1028,7 +1030,7 @@ func TestGCChunkAccess(t *testing.T) {
assert.True(t, chunkCreated, "chunks was not created") assert.True(t, chunkCreated, "chunks was not created")
ok, chunkCreated = s.append(999, 999, 0, h.chunkDiskMapper) ok, chunkCreated = s.append(999, 999, 0, h.chunkDiskMapper)
assert.True(t, ok, "series append failed") assert.True(t, ok, "series append failed")
assert.True(t, !chunkCreated, "chunks was created") assert.False(t, chunkCreated, "chunks was created")
// A new chunks should be created here as it's beyond the chunk range. // A new chunks should be created here as it's beyond the chunk range.
ok, chunkCreated = s.append(1000, 1000, 0, h.chunkDiskMapper) ok, chunkCreated = s.append(1000, 1000, 0, h.chunkDiskMapper)
@ -1036,7 +1038,7 @@ func TestGCChunkAccess(t *testing.T) {
assert.True(t, chunkCreated, "chunks was not created") assert.True(t, chunkCreated, "chunks was not created")
ok, chunkCreated = s.append(1999, 1999, 0, h.chunkDiskMapper) ok, chunkCreated = s.append(1999, 1999, 0, h.chunkDiskMapper)
assert.True(t, ok, "series append failed") assert.True(t, ok, "series append failed")
assert.True(t, !chunkCreated, "chunks was created") assert.False(t, chunkCreated, "chunks was created")
idx := h.indexRange(0, 1500) idx := h.indexRange(0, 1500)
var ( var (
@ -1082,7 +1084,7 @@ func TestGCSeriesAccess(t *testing.T) {
assert.True(t, chunkCreated, "chunks was not created") assert.True(t, chunkCreated, "chunks was not created")
ok, chunkCreated = s.append(999, 999, 0, h.chunkDiskMapper) ok, chunkCreated = s.append(999, 999, 0, h.chunkDiskMapper)
assert.True(t, ok, "series append failed") assert.True(t, ok, "series append failed")
assert.True(t, !chunkCreated, "chunks was created") assert.False(t, chunkCreated, "chunks was created")
// A new chunks should be created here as it's beyond the chunk range. // A new chunks should be created here as it's beyond the chunk range.
ok, chunkCreated = s.append(1000, 1000, 0, h.chunkDiskMapper) ok, chunkCreated = s.append(1000, 1000, 0, h.chunkDiskMapper)
@ -1090,7 +1092,7 @@ func TestGCSeriesAccess(t *testing.T) {
assert.True(t, chunkCreated, "chunks was not created") assert.True(t, chunkCreated, "chunks was not created")
ok, chunkCreated = s.append(1999, 1999, 0, h.chunkDiskMapper) ok, chunkCreated = s.append(1999, 1999, 0, h.chunkDiskMapper)
assert.True(t, ok, "series append failed") assert.True(t, ok, "series append failed")
assert.True(t, !chunkCreated, "chunks was created") assert.False(t, chunkCreated, "chunks was created")
idx := h.indexRange(0, 2000) idx := h.indexRange(0, 2000)
var ( var (
@ -1135,7 +1137,7 @@ func TestUncommittedSamplesNotLostOnTruncate(t *testing.T) {
assert.NoError(t, err) assert.NoError(t, err)
assert.NoError(t, h.Truncate(2000)) assert.NoError(t, h.Truncate(2000))
assert.True(t, nil != h.series.getByHash(lset.Hash(), lset), "series should not have been garbage collected") assert.NotNil(t, h.series.getByHash(lset.Hash(), lset), "series should not have been garbage collected")
assert.NoError(t, app.Commit()) assert.NoError(t, app.Commit())
@ -1165,7 +1167,7 @@ func TestRemoveSeriesAfterRollbackAndTruncate(t *testing.T) {
assert.NoError(t, err) assert.NoError(t, err)
assert.NoError(t, h.Truncate(2000)) assert.NoError(t, h.Truncate(2000))
assert.True(t, nil != h.series.getByHash(lset.Hash(), lset), "series should not have been garbage collected") assert.NotNil(t, h.series.getByHash(lset.Hash(), lset), "series should not have been garbage collected")
assert.NoError(t, app.Rollback()) assert.NoError(t, app.Rollback())
@ -1332,7 +1334,7 @@ func TestHeadReadWriterRepair(t *testing.T) {
assert.True(t, chunkCreated, "chunk was not created") assert.True(t, chunkCreated, "chunk was not created")
ok, chunkCreated = s.append(int64(i*chunkRange)+chunkRange-1, float64(i*chunkRange), 0, h.chunkDiskMapper) ok, chunkCreated = s.append(int64(i*chunkRange)+chunkRange-1, float64(i*chunkRange), 0, h.chunkDiskMapper)
assert.True(t, ok, "series append failed") assert.True(t, ok, "series append failed")
assert.True(t, !chunkCreated, "chunk was created") assert.False(t, chunkCreated, "chunk was created")
assert.NoError(t, h.chunkDiskMapper.CutNewFile()) assert.NoError(t, h.chunkDiskMapper.CutNewFile())
} }
assert.NoError(t, h.Close()) assert.NoError(t, h.Close())
@ -1731,7 +1733,7 @@ func TestOutOfOrderSamplesMetric(t *testing.T) {
assert.Equal(t, int64(math.MinInt64), db.head.minValidTime.Load()) assert.Equal(t, int64(math.MinInt64), db.head.minValidTime.Load())
assert.NoError(t, db.Compact()) assert.NoError(t, db.Compact())
assert.True(t, db.head.minValidTime.Load() > 0, "") assert.Greater(t, db.head.minValidTime.Load(), int64(0))
app = db.Appender(ctx) app = db.Appender(ctx)
_, err = app.Add(labels.FromStrings("a", "b"), db.head.minValidTime.Load()-2, 99) _, err = app.Add(labels.FromStrings("a", "b"), db.head.minValidTime.Load()-2, 99)

View file

@ -437,7 +437,7 @@ func TestPersistence_index_e2e(t *testing.T) {
var chks, expchks []chunks.Meta var chks, expchks []chunks.Meta
for gotp.Next() { for gotp.Next() {
assert.True(t, expp.Next() == true, "") assert.True(t, expp.Next())
ref := gotp.At() ref := gotp.At()
@ -449,7 +449,7 @@ func TestPersistence_index_e2e(t *testing.T) {
assert.Equal(t, explset, lset) assert.Equal(t, explset, lset)
assert.Equal(t, expchks, chks) assert.Equal(t, expchks, chks)
} }
assert.True(t, expp.Next() == false, "Expected no more postings for %q=%q", p.Name, p.Value) assert.False(t, expp.Next(), "Expected no more postings for %q=%q", p.Name, p.Value)
assert.NoError(t, gotp.Err()) assert.NoError(t, gotp.Err())
} }

View file

@ -557,7 +557,7 @@ func TestRemovedNextStackoverflow(t *testing.T) {
} }
assert.NoError(t, rp.Err()) assert.NoError(t, rp.Err())
assert.True(t, !gotElem, "") assert.False(t, gotElem)
} }
func TestRemovedPostingsSeek(t *testing.T) { func TestRemovedPostingsSeek(t *testing.T) {
@ -664,12 +664,12 @@ func TestBigEndian(t *testing.T) {
t.Run("Iteration", func(t *testing.T) { t.Run("Iteration", func(t *testing.T) {
bep := newBigEndianPostings(beLst) bep := newBigEndianPostings(beLst)
for i := 0; i < num; i++ { for i := 0; i < num; i++ {
assert.True(t, bep.Next() == true, "") assert.True(t, bep.Next())
assert.Equal(t, uint64(ls[i]), bep.At()) assert.Equal(t, uint64(ls[i]), bep.At())
} }
assert.True(t, bep.Next() == false, "") assert.False(t, bep.Next())
assert.True(t, bep.Err() == nil, "") assert.NoError(t, bep.Err())
}) })
t.Run("Seek", func(t *testing.T) { t.Run("Seek", func(t *testing.T) {
@ -715,7 +715,7 @@ func TestBigEndian(t *testing.T) {
for _, v := range table { for _, v := range table {
assert.Equal(t, v.found, bep.Seek(uint64(v.seek))) assert.Equal(t, v.found, bep.Seek(uint64(v.seek)))
assert.Equal(t, uint64(v.val), bep.At()) assert.Equal(t, uint64(v.val), bep.At())
assert.True(t, bep.Err() == nil, "") assert.NoError(t, bep.Err())
} }
}) })
} }
@ -872,5 +872,5 @@ func TestMemPostings_Delete(t *testing.T) {
deleted := p.Get("lbl1", "b") deleted := p.Get("lbl1", "b")
expanded, err = ExpandPostings(deleted) expanded, err = ExpandPostings(deleted)
assert.NoError(t, err) assert.NoError(t, err)
assert.True(t, 0 == len(expanded), "expected empty postings, got %v", expanded) assert.Equal(t, 0, len(expanded), "expected empty postings, got %v", expanded)
} }

View file

@ -860,9 +860,9 @@ func TestPopulateWithDelSeriesIterator_DoubleSeek(t *testing.T) {
) )
it := newPopulateWithDelGenericSeriesIterator(f, chkMetas, nil).toSeriesIterator() it := newPopulateWithDelGenericSeriesIterator(f, chkMetas, nil).toSeriesIterator()
assert.True(t, it.Seek(1), "") assert.True(t, it.Seek(1))
assert.True(t, it.Seek(2), "") assert.True(t, it.Seek(2))
assert.True(t, it.Seek(2), "") assert.True(t, it.Seek(2))
ts, v := it.At() ts, v := it.At()
assert.Equal(t, int64(2), ts) assert.Equal(t, int64(2), ts)
assert.Equal(t, float64(2), v) assert.Equal(t, float64(2), v)
@ -878,12 +878,12 @@ func TestPopulateWithDelSeriesIterator_SeekInCurrentChunk(t *testing.T) {
) )
it := newPopulateWithDelGenericSeriesIterator(f, chkMetas, nil).toSeriesIterator() it := newPopulateWithDelGenericSeriesIterator(f, chkMetas, nil).toSeriesIterator()
assert.True(t, it.Next(), "") assert.True(t, it.Next())
ts, v := it.At() ts, v := it.At()
assert.Equal(t, int64(1), ts) assert.Equal(t, int64(1), ts)
assert.Equal(t, float64(2), v) assert.Equal(t, float64(2), v)
assert.True(t, it.Seek(4), "") assert.True(t, it.Seek(4))
ts, v = it.At() ts, v = it.At()
assert.Equal(t, int64(5), ts) assert.Equal(t, int64(5), ts)
assert.Equal(t, float64(6), v) assert.Equal(t, float64(6), v)
@ -1018,7 +1018,7 @@ func TestDeletedIterator(t *testing.T) {
} }
} }
assert.True(t, i < 1000, "") assert.Less(t, i, int64(1000))
ts, v := it.At() ts, v := it.At()
assert.Equal(t, act[i].t, ts) assert.Equal(t, act[i].t, ts)
@ -1033,7 +1033,7 @@ func TestDeletedIterator(t *testing.T) {
} }
} }
assert.True(t, i >= 1000, "") assert.GreaterOrEqual(t, i, int64(1000))
assert.NoError(t, it.Err()) assert.NoError(t, it.Err())
} }
} }

View file

@ -123,5 +123,5 @@ func TestRepairBadIndexVersion(t *testing.T) {
meta, _, err := readMetaFile(tmpDbDir) meta, _, err := readMetaFile(tmpDbDir)
assert.NoError(t, err) assert.NoError(t, err)
assert.True(t, meta.Version == metaVersion1, "unexpected meta version %d", meta.Version) assert.Equal(t, metaVersion1, meta.Version, "unexpected meta version %d", meta.Version)
} }

View file

@ -111,29 +111,29 @@ func TestBufferedSeriesIterator(t *testing.T) {
{t: 101, v: 10}, {t: 101, v: 10},
}), 2) }), 2)
assert.True(t, it.Seek(-123) == true, "seek failed") assert.True(t, it.Seek(-123), "seek failed")
sampleEq(1, 2) sampleEq(1, 2)
bufferEq(nil) bufferEq(nil)
assert.True(t, it.Next() == true, "next failed") assert.True(t, it.Next(), "next failed")
sampleEq(2, 3) sampleEq(2, 3)
bufferEq([]sample{{t: 1, v: 2}}) bufferEq([]sample{{t: 1, v: 2}})
assert.True(t, it.Next() == true, "next failed") assert.True(t, it.Next(), "next failed")
assert.True(t, it.Next() == true, "next failed") assert.True(t, it.Next(), "next failed")
assert.True(t, it.Next() == true, "next failed") assert.True(t, it.Next(), "next failed")
sampleEq(5, 6) sampleEq(5, 6)
bufferEq([]sample{{t: 2, v: 3}, {t: 3, v: 4}, {t: 4, v: 5}}) bufferEq([]sample{{t: 2, v: 3}, {t: 3, v: 4}, {t: 4, v: 5}})
assert.True(t, it.Seek(5) == true, "seek failed") assert.True(t, it.Seek(5), "seek failed")
sampleEq(5, 6) sampleEq(5, 6)
bufferEq([]sample{{t: 2, v: 3}, {t: 3, v: 4}, {t: 4, v: 5}}) bufferEq([]sample{{t: 2, v: 3}, {t: 3, v: 4}, {t: 4, v: 5}})
assert.True(t, it.Seek(101) == true, "seek failed") assert.True(t, it.Seek(101), "seek failed")
sampleEq(101, 10) sampleEq(101, 10)
bufferEq([]sample{{t: 99, v: 8}, {t: 100, v: 9}}) bufferEq([]sample{{t: 99, v: 8}, {t: 100, v: 9}})
assert.True(t, it.Next() == false, "next succeeded unexpectedly") assert.False(t, it.Next(), "next succeeded unexpectedly")
} }
type listSeriesIterator struct { type listSeriesIterator struct {

View file

@ -213,7 +213,7 @@ func TestCheckpoint(t *testing.T) {
samples, err := dec.Samples(rec, nil) samples, err := dec.Samples(rec, nil)
assert.NoError(t, err) assert.NoError(t, err)
for _, s := range samples { for _, s := range samples {
assert.True(t, s.T >= last/2, "sample with wrong timestamp") assert.GreaterOrEqual(t, s.T, last/2, "sample with wrong timestamp")
} }
} }
} }

View file

@ -221,7 +221,7 @@ func TestReader_Live(t *testing.T) {
reader := NewLiveReader(logger, NewLiveReaderMetrics(nil), readFd) reader := NewLiveReader(logger, NewLiveReaderMetrics(nil), readFd)
for _, exp := range testReaderCases[i].exp { for _, exp := range testReaderCases[i].exp {
for !reader.Next() { for !reader.Next() {
assert.True(t, reader.Err() == io.EOF, "expect EOF, got: %v", reader.Err()) assert.Equal(t, io.EOF, reader.Err(), "expect EOF, got: %v", reader.Err())
runtime.Gosched() runtime.Gosched()
} }
@ -229,7 +229,7 @@ func TestReader_Live(t *testing.T) {
assert.Equal(t, exp, actual, "read wrong record") assert.Equal(t, exp, actual, "read wrong record")
} }
assert.True(t, !reader.Next(), "unexpected record") assert.False(t, reader.Next(), "unexpected record")
if testReaderCases[i].fail { if testReaderCases[i].fail {
assert.Error(t, reader.Err()) assert.Error(t, reader.Err())
} }
@ -341,7 +341,7 @@ func TestReaderFuzz(t *testing.T) {
assert.True(t, reader.Next(), "expected record: %v", reader.Err()) assert.True(t, reader.Next(), "expected record: %v", reader.Err())
assert.Equal(t, expected, reader.Record(), "read wrong record") assert.Equal(t, expected, reader.Record(), "read wrong record")
} }
assert.True(t, !reader.Next(), "unexpected record") assert.False(t, reader.Next(), "unexpected record")
}) })
} }
} }
@ -391,7 +391,7 @@ func TestReaderFuzz_Live(t *testing.T) {
assert.True(t, ok, "unexpected record") assert.True(t, ok, "unexpected record")
assert.Equal(t, expected, rec, "record does not match expected") assert.Equal(t, expected, rec, "record does not match expected")
} }
assert.True(t, r.Err() == io.EOF, "expected EOF, got: %v", r.Err()) assert.Equal(t, io.EOF, r.Err(), "expected EOF, got: %v", r.Err())
return true return true
} }
@ -411,7 +411,7 @@ func TestReaderFuzz_Live(t *testing.T) {
fi, err := os.Stat(SegmentName(dir, seg.i)) fi, err := os.Stat(SegmentName(dir, seg.i))
assert.NoError(t, err) assert.NoError(t, err)
assert.True(t, r.Offset() == fi.Size(), "expected to have read whole segment, but read %d of %d", r.Offset(), fi.Size()) assert.Equal(t, r.Offset(), fi.Size(), "expected to have read whole segment, but read %d of %d", r.Offset(), fi.Size())
seg, err = OpenReadSegment(SegmentName(dir, seg.i+1)) seg, err = OpenReadSegment(SegmentName(dir, seg.i+1))
assert.NoError(t, err) assert.NoError(t, err)
@ -427,7 +427,7 @@ func TestReaderFuzz_Live(t *testing.T) {
} }
} }
assert.True(t, r.Err() == io.EOF, "expected EOF") assert.Equal(t, io.EOF, r.Err(), "expected EOF")
}) })
} }
} }
@ -473,8 +473,8 @@ func TestLiveReaderCorrupt_ShortFile(t *testing.T) {
defer seg.Close() defer seg.Close()
r := NewLiveReader(logger, nil, seg) r := NewLiveReader(logger, nil, seg)
assert.True(t, r.Next() == false, "expected no records") assert.False(t, r.Next(), "expected no records")
assert.True(t, r.Err() == io.EOF, "expected error, got: %v", r.Err()) assert.Equal(t, io.EOF, r.Err(), "expected error, got: %v", r.Err())
} }
func TestLiveReaderCorrupt_RecordTooLongAndShort(t *testing.T) { func TestLiveReaderCorrupt_RecordTooLongAndShort(t *testing.T) {
@ -521,8 +521,8 @@ func TestLiveReaderCorrupt_RecordTooLongAndShort(t *testing.T) {
defer seg.Close() defer seg.Close()
r := NewLiveReader(logger, NewLiveReaderMetrics(nil), seg) r := NewLiveReader(logger, NewLiveReaderMetrics(nil), seg)
assert.True(t, r.Next() == false, "expected no records") assert.False(t, r.Next(), "expected no records")
assert.True(t, r.Err().Error() == "record length greater than a single page: 65542 > 32768", "expected error, got: %v", r.Err()) assert.EqualError(t, r.Err(), "record length greater than a single page: 65542 > 32768", "expected error, got: %v", r.Err())
} }
func TestReaderData(t *testing.T) { func TestReaderData(t *testing.T) {

View file

@ -294,7 +294,7 @@ func TestCorruptAndCarryOn(t *testing.T) {
assert.Equal(t, recordSize, len(reader.Record())) assert.Equal(t, recordSize, len(reader.Record()))
} }
assert.Equal(t, 4, i, "not enough records") assert.Equal(t, 4, i, "not enough records")
assert.True(t, !reader.Next(), "unexpected record") assert.False(t, reader.Next(), "unexpected record")
corruptionErr := reader.Err() corruptionErr := reader.Err()
assert.Error(t, corruptionErr) assert.Error(t, corruptionErr)
@ -336,7 +336,7 @@ func TestCorruptAndCarryOn(t *testing.T) {
assert.Equal(t, recordSize, len(reader.Record())) assert.Equal(t, recordSize, len(reader.Record()))
} }
assert.Equal(t, 9, i, "wrong number of records") assert.Equal(t, 9, i, "wrong number of records")
assert.True(t, !reader.Next(), "unexpected record") assert.False(t, reader.Next(), "unexpected record")
assert.Equal(t, nil, reader.Err()) assert.Equal(t, nil, reader.Err())
sr.Close() sr.Close()
} }
@ -380,7 +380,7 @@ func TestSegmentMetric(t *testing.T) {
err = w.Log(buf) err = w.Log(buf)
assert.NoError(t, err) assert.NoError(t, err)
} }
assert.True(t, client_testutil.ToFloat64(w.metrics.currentSegment) == initialSegment+1, "segment metric did not increment after segment rotation") assert.Equal(t, initialSegment+1, client_testutil.ToFloat64(w.metrics.currentSegment), "segment metric did not increment after segment rotation")
assert.NoError(t, w.Close()) assert.NoError(t, w.Close())
} }
@ -421,7 +421,7 @@ func TestCompression(t *testing.T) {
compressedSize, err := fileutil.DirSize(dirCompressed) compressedSize, err := fileutil.DirSize(dirCompressed)
assert.NoError(t, err) assert.NoError(t, err)
assert.True(t, float64(uncompressedSize)*0.75 > float64(compressedSize), "Compressing zeroes should save at least 25%% space - uncompressedSize: %d, compressedSize: %d", uncompressedSize, compressedSize) assert.Greater(t, float64(uncompressedSize)*0.75, float64(compressedSize), "Compressing zeroes should save at least 25%% space - uncompressedSize: %d, compressedSize: %d", uncompressedSize, compressedSize)
} }
func BenchmarkWAL_LogBatched(b *testing.B) { func BenchmarkWAL_LogBatched(b *testing.B) {

View file

@ -478,7 +478,7 @@ func cleanupSnapshot(t *testing.T, dbDir string, resp *http.Response) {
b, err := ioutil.ReadAll(resp.Body) b, err := ioutil.ReadAll(resp.Body)
assert.NoError(t, err) assert.NoError(t, err)
assert.NoError(t, json.Unmarshal(b, snapshot)) assert.NoError(t, json.Unmarshal(b, snapshot))
assert.True(t, snapshot.Data.Name != "", "snapshot directory not returned") assert.NotZero(t, snapshot.Data.Name, "snapshot directory not returned")
assert.NoError(t, os.Remove(filepath.Join(dbDir, "snapshots", snapshot.Data.Name))) assert.NoError(t, os.Remove(filepath.Join(dbDir, "snapshots", snapshot.Data.Name)))
assert.NoError(t, os.Remove(filepath.Join(dbDir, "snapshots"))) assert.NoError(t, os.Remove(filepath.Join(dbDir, "snapshots")))
} }