golangci-lint: enable testifylint linter (#13254)

Signed-off-by: Matthieu MOREL <matthieu.morel35@gmail.com>
This commit is contained in:
Matthieu MOREL 2023-12-07 12:35:01 +01:00 committed by GitHub
parent cef8aca8e8
commit 9c4782f1cc
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
90 changed files with 622 additions and 608 deletions

View file

@ -151,7 +151,7 @@ jobs:
uses: golangci/golangci-lint-action@3a919529898de77ec3da873e3063ca4b10e7f5cc # v3.7.0
with:
args: --verbose
version: v1.54.2
version: v1.55.2
fuzzing:
uses: ./.github/workflows/fuzzing.yml
if: github.event_name == 'pull_request'

View file

@ -23,6 +23,7 @@ linters:
- nolintlint
- predeclared
- revive
- testifylint
- unconvert
- unused
@ -117,3 +118,19 @@ linters-settings:
disabled: true
- name: var-declaration
- name: var-naming
testifylint:
disable:
- float-compare
- go-require
enable:
- bool-compare
- compares
- empty
- error-is-as
- error-nil
- expected-actual
- len
- require-error
- suite-dont-use-pkg
- suite-extra-assert-call

View file

@ -61,7 +61,7 @@ PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_
SKIP_GOLANGCI_LINT :=
GOLANGCI_LINT :=
GOLANGCI_LINT_OPTS ?=
GOLANGCI_LINT_VERSION ?= v1.54.2
GOLANGCI_LINT_VERSION ?= v1.55.2
# golangci-lint only supports linux, darwin and windows platforms on i386/amd64.
# windows isn't included here because of the path separator being different.
ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux darwin))

View file

@ -346,7 +346,7 @@ func getCurrentGaugeValuesFor(t *testing.T, reg prometheus.Gatherer, metricNames
continue
}
require.Equal(t, 1, len(g.GetMetric()))
require.Len(t, g.GetMetric(), 1)
if _, ok := res[m]; ok {
t.Error("expected only one metric family for", m)
t.FailNow()

View file

@ -284,7 +284,7 @@ func (p *queryLogTest) run(t *testing.T) {
if !p.enabledAtStart {
p.query(t)
require.Equal(t, 0, len(readQueryLog(t, queryLogFile.Name())))
require.Empty(t, readQueryLog(t, queryLogFile.Name()))
p.setQueryLog(t, queryLogFile.Name())
p.reloadConfig(t)
}
@ -309,7 +309,7 @@ func (p *queryLogTest) run(t *testing.T) {
p.query(t)
ql = readQueryLog(t, queryLogFile.Name())
require.Equal(t, qc, len(ql))
require.Len(t, ql, qc)
qc = len(ql)
p.setQueryLog(t, queryLogFile.Name())
@ -320,7 +320,7 @@ func (p *queryLogTest) run(t *testing.T) {
ql = readQueryLog(t, queryLogFile.Name())
if p.exactQueryCount() {
require.Equal(t, qc, len(ql))
require.Len(t, ql, qc)
} else {
require.Greater(t, len(ql), qc, "no queries logged")
}
@ -340,7 +340,7 @@ func (p *queryLogTest) run(t *testing.T) {
require.NoError(t, os.Rename(queryLogFile.Name(), newFile.Name()))
ql = readQueryLog(t, newFile.Name())
if p.exactQueryCount() {
require.Equal(t, qc, len(ql))
require.Len(t, ql, qc)
}
p.validateLastQuery(t, ql)
qc = len(ql)
@ -351,7 +351,7 @@ func (p *queryLogTest) run(t *testing.T) {
ql = readQueryLog(t, newFile.Name())
if p.exactQueryCount() {
require.Equal(t, qc, len(ql))
require.Len(t, ql, qc)
} else {
require.Greater(t, len(ql), qc, "no queries logged")
}

View file

@ -61,7 +61,7 @@ func queryAllSeries(t testing.TB, q storage.Querier, expectedMinTime, expectedMa
func testBlocks(t *testing.T, db *tsdb.DB, expectedMinTime, expectedMaxTime, expectedBlockDuration int64, expectedSamples []backfillSample, expectedNumBlocks int) {
blocks := db.Blocks()
require.Equal(t, expectedNumBlocks, len(blocks), "did not create correct number of blocks")
require.Len(t, blocks, expectedNumBlocks, "did not create correct number of blocks")
for i, block := range blocks {
require.Equal(t, block.MinTime()/expectedBlockDuration, (block.MaxTime()-1)/expectedBlockDuration, "block %d contains data outside of one aligned block duration", i)

View file

@ -56,7 +56,7 @@ func TestQueryRange(t *testing.T) {
defer s.Close()
urlObject, err := url.Parse(s.URL)
require.Equal(t, nil, err)
require.NoError(t, err)
p := &promqlPrinter{}
exitCode := QueryRange(urlObject, http.DefaultTransport, map[string]string{}, "up", "0", "300", 0, p)
@ -79,7 +79,7 @@ func TestQueryInstant(t *testing.T) {
defer s.Close()
urlObject, err := url.Parse(s.URL)
require.Equal(t, nil, err)
require.NoError(t, err)
p := &promqlPrinter{}
exitCode := QueryInstant(urlObject, http.DefaultTransport, "up", "300", p)

View file

@ -91,13 +91,13 @@ func TestBackfillRuleIntegration(t *testing.T) {
for _, err := range errs {
require.NoError(t, err)
}
require.Equal(t, 3, len(ruleImporter.groups))
require.Len(t, ruleImporter.groups, 3)
group1 := ruleImporter.groups[path1+";group0"]
require.NotNil(t, group1)
const defaultInterval = 60
require.Equal(t, defaultInterval*time.Second, group1.Interval())
gRules := group1.Rules()
require.Equal(t, 1, len(gRules))
require.Len(t, gRules, 1)
require.Equal(t, "rule1", gRules[0].Name())
require.Equal(t, "ruleExpr", gRules[0].Query().String())
require.Equal(t, 1, gRules[0].Labels().Len())
@ -106,7 +106,7 @@ func TestBackfillRuleIntegration(t *testing.T) {
require.NotNil(t, group2)
require.Equal(t, defaultInterval*time.Second, group2.Interval())
g2Rules := group2.Rules()
require.Equal(t, 2, len(g2Rules))
require.Len(t, g2Rules, 2)
require.Equal(t, "grp2_rule1", g2Rules[0].Name())
require.Equal(t, "grp2_rule1_expr", g2Rules[0].Query().String())
require.Equal(t, 0, g2Rules[0].Labels().Len())
@ -122,7 +122,7 @@ func TestBackfillRuleIntegration(t *testing.T) {
require.NoError(t, err)
blocks := db.Blocks()
require.Equal(t, (i+1)*tt.expectedBlockCount, len(blocks))
require.Len(t, blocks, (i+1)*tt.expectedBlockCount)
q, err := db.Querier(math.MinInt64, math.MaxInt64)
require.NoError(t, err)

View file

@ -35,7 +35,7 @@ func TestSDCheckResult(t *testing.T) {
}}
reg, err := relabel.NewRegexp("(.*)")
require.Nil(t, err)
require.NoError(t, err)
scrapeConfig := &config.ScrapeConfig{
ScrapeInterval: model.Duration(1 * time.Minute),

View file

@ -1457,8 +1457,8 @@ func TestRemoteWriteRetryOnRateLimit(t *testing.T) {
got := &Config{}
require.NoError(t, yaml.UnmarshalStrict(out, got))
require.Equal(t, true, got.RemoteWriteConfigs[0].QueueConfig.RetryOnRateLimit)
require.Equal(t, false, got.RemoteWriteConfigs[1].QueueConfig.RetryOnRateLimit)
require.True(t, got.RemoteWriteConfigs[0].QueueConfig.RetryOnRateLimit)
require.False(t, got.RemoteWriteConfigs[1].QueueConfig.RetryOnRateLimit)
}
func TestLoadConfig(t *testing.T) {
@ -1475,9 +1475,9 @@ func TestLoadConfig(t *testing.T) {
func TestScrapeIntervalLarger(t *testing.T) {
c, err := LoadFile("testdata/scrape_interval_larger.good.yml", false, false, log.NewNopLogger())
require.NoError(t, err)
require.Equal(t, 1, len(c.ScrapeConfigs))
require.Len(t, c.ScrapeConfigs, 1)
for _, sc := range c.ScrapeConfigs {
require.Equal(t, true, sc.ScrapeInterval >= sc.ScrapeTimeout)
require.GreaterOrEqual(t, sc.ScrapeInterval, sc.ScrapeTimeout)
}
}
@ -1493,7 +1493,7 @@ func TestElideSecrets(t *testing.T) {
yamlConfig := string(config)
matches := secretRe.FindAllStringIndex(yamlConfig, -1)
require.Equal(t, 22, len(matches), "wrong number of secret matches found")
require.Len(t, matches, 22, "wrong number of secret matches found")
require.NotContains(t, yamlConfig, "mysecret",
"yaml marshal reveals authentication credentials.")
}
@ -2063,7 +2063,7 @@ func TestAgentMode(t *testing.T) {
c, err := LoadFile("testdata/agent_mode.without_remote_writes.yml", true, false, log.NewNopLogger())
require.NoError(t, err)
require.Len(t, c.RemoteWriteConfigs, 0)
require.Empty(t, c.RemoteWriteConfigs)
c, err = LoadFile("testdata/agent_mode.good.yml", true, false, log.NewNopLogger())
require.NoError(t, err)
@ -2257,5 +2257,5 @@ func TestScrapeConfigDisableCompression(t *testing.T) {
got := &Config{}
require.NoError(t, yaml.UnmarshalStrict(out, got))
require.Equal(t, false, got.ScrapeConfigs[0].EnableCompression)
require.False(t, got.ScrapeConfigs[0].EnableCompression)
}

View file

@ -269,7 +269,7 @@ func TestNewAzureResourceFromID(t *testing.T) {
},
} {
actual, err := newAzureResourceFromID(tc.id, nil)
require.Nil(t, err)
require.NoError(t, err)
require.Equal(t, tc.expected.Name, actual.Name)
require.Equal(t, tc.expected.ResourceGroupName, actual.ResourceGroupName)
}

View file

@ -268,13 +268,13 @@ func newDiscovery(t *testing.T, config *SDConfig) *Discovery {
}
func checkOneTarget(t *testing.T, tg []*targetgroup.Group) {
require.Equal(t, 1, len(tg))
require.Len(t, tg, 1)
target := tg[0]
require.Equal(t, "test-dc", string(target.Labels["__meta_consul_dc"]))
require.Equal(t, target.Source, string(target.Labels["__meta_consul_service"]))
if target.Source == "test" {
// test service should have one node.
require.Greater(t, len(target.Targets), 0, "Test service should have one node")
require.NotEmpty(t, target.Targets, "Test service should have one node")
}
}
@ -313,7 +313,7 @@ func TestNoTargets(t *testing.T) {
}()
targets := (<-ch)[0].Targets
require.Equal(t, 0, len(targets))
require.Empty(t, targets)
cancel()
<-ch
}
@ -484,7 +484,7 @@ oauth2:
return
}
require.Equal(t, config, test.expected)
require.Equal(t, test.expected, config)
})
}
}

View file

@ -56,12 +56,12 @@ func TestDigitalOceanSDRefresh(t *testing.T) {
tgs, err := d.refresh(ctx)
require.NoError(t, err)
require.Equal(t, 1, len(tgs))
require.Len(t, tgs, 1)
tg := tgs[0]
require.NotNil(t, tg)
require.NotNil(t, tg.Targets)
require.Equal(t, 4, len(tg.Targets))
require.Len(t, tg.Targets, 4)
for i, lbls := range []model.LabelSet{
{

View file

@ -184,17 +184,17 @@ func TestFetchApps(t *testing.T) {
apps, err := fetchApps(context.TODO(), ts.URL, &http.Client{})
require.NoError(t, err)
require.Equal(t, len(apps.Applications), 2)
require.Equal(t, apps.Applications[0].Name, "CONFIG-SERVICE")
require.Equal(t, apps.Applications[1].Name, "META-SERVICE")
require.Len(t, apps.Applications, 2)
require.Equal(t, "CONFIG-SERVICE", apps.Applications[0].Name)
require.Equal(t, "META-SERVICE", apps.Applications[1].Name)
require.Equal(t, len(apps.Applications[1].Instances), 2)
require.Equal(t, apps.Applications[1].Instances[0].InstanceID, "meta-service002.test.com:meta-service:8080")
require.Equal(t, apps.Applications[1].Instances[0].Metadata.Items[0].XMLName.Local, "project")
require.Equal(t, apps.Applications[1].Instances[0].Metadata.Items[0].Content, "meta-service")
require.Equal(t, apps.Applications[1].Instances[0].Metadata.Items[1].XMLName.Local, "management.port")
require.Equal(t, apps.Applications[1].Instances[0].Metadata.Items[1].Content, "8090")
require.Equal(t, apps.Applications[1].Instances[1].InstanceID, "meta-service001.test.com:meta-service:8080")
require.Len(t, apps.Applications[1].Instances, 2)
require.Equal(t, "meta-service002.test.com:meta-service:8080", apps.Applications[1].Instances[0].InstanceID)
require.Equal(t, "project", apps.Applications[1].Instances[0].Metadata.Items[0].XMLName.Local)
require.Equal(t, "meta-service", apps.Applications[1].Instances[0].Metadata.Items[0].Content)
require.Equal(t, "management.port", apps.Applications[1].Instances[0].Metadata.Items[1].XMLName.Local)
require.Equal(t, "8090", apps.Applications[1].Instances[0].Metadata.Items[1].Content)
require.Equal(t, "meta-service001.test.com:meta-service:8080", apps.Applications[1].Instances[1].InstanceID)
}
func Test500ErrorHttpResponse(t *testing.T) {

View file

@ -55,7 +55,7 @@ func TestEurekaSDHandleError(t *testing.T) {
tgs, err := testUpdateServices(respHandler)
require.EqualError(t, err, errTesting)
require.Equal(t, len(tgs), 0)
require.Empty(t, tgs)
}
func TestEurekaSDEmptyList(t *testing.T) {
@ -72,7 +72,7 @@ func TestEurekaSDEmptyList(t *testing.T) {
)
tgs, err := testUpdateServices(respHandler)
require.NoError(t, err)
require.Equal(t, len(tgs), 1)
require.Len(t, tgs, 1)
}
func TestEurekaSDSendGroup(t *testing.T) {
@ -232,11 +232,11 @@ func TestEurekaSDSendGroup(t *testing.T) {
tgs, err := testUpdateServices(respHandler)
require.NoError(t, err)
require.Equal(t, len(tgs), 1)
require.Len(t, tgs, 1)
tg := tgs[0]
require.Equal(t, tg.Source, "eureka")
require.Equal(t, len(tg.Targets), 4)
require.Equal(t, "eureka", tg.Source)
require.Len(t, tg.Targets, 4)
tgt := tg.Targets[0]
require.Equal(t, tgt[model.AddressLabel], model.LabelValue("config-service001.test.com:8080"))

View file

@ -48,12 +48,12 @@ func TestHCloudSDRefresh(t *testing.T) {
targetGroups, err := d.refresh(context.Background())
require.NoError(t, err)
require.Equal(t, 1, len(targetGroups))
require.Len(t, targetGroups, 1)
targetGroup := targetGroups[0]
require.NotNil(t, targetGroup, "targetGroup should not be nil")
require.NotNil(t, targetGroup.Targets, "targetGroup.targets should not be nil")
require.Equal(t, 3, len(targetGroup.Targets))
require.Len(t, targetGroup.Targets, 3)
for i, labelSet := range []model.LabelSet{
{

View file

@ -47,12 +47,12 @@ func TestRobotSDRefresh(t *testing.T) {
targetGroups, err := d.refresh(context.Background())
require.NoError(t, err)
require.Equal(t, 1, len(targetGroups))
require.Len(t, targetGroups, 1)
targetGroup := targetGroups[0]
require.NotNil(t, targetGroup, "targetGroup should not be nil")
require.NotNil(t, targetGroup.Targets, "targetGroup.targets should not be nil")
require.Equal(t, 2, len(targetGroup.Targets))
require.Len(t, targetGroup.Targets, 2)
for i, labelSet := range []model.LabelSet{
{
@ -98,5 +98,5 @@ func TestRobotSDRefreshHandleError(t *testing.T) {
require.Error(t, err)
require.Equal(t, "non 2xx status '401' response during hetzner service discovery with role robot", err.Error())
require.Equal(t, 0, len(targetGroups))
require.Empty(t, targetGroups)
}

View file

@ -62,7 +62,7 @@ func TestHTTPValidRefresh(t *testing.T) {
Source: urlSource(ts.URL+"/http_sd.good.json", 0),
},
}
require.Equal(t, tgs, expectedTargets)
require.Equal(t, expectedTargets, tgs)
require.Equal(t, 0.0, getFailureCount())
}

View file

@ -48,12 +48,12 @@ func TestIONOSServerRefresh(t *testing.T) {
tgs, err := d.refresh(ctx)
require.NoError(t, err)
require.Equal(t, 1, len(tgs))
require.Len(t, tgs, 1)
tg := tgs[0]
require.NotNil(t, tg)
require.NotNil(t, tg.Targets)
require.Equal(t, 2, len(tg.Targets))
require.Len(t, tg.Targets, 2)
for i, lbls := range []model.LabelSet{
{

View file

@ -29,7 +29,7 @@ func Test_EndpointSliceAdaptor_v1(t *testing.T) {
require.Equal(t, endpointSlice.ObjectMeta.Namespace, adaptor.namespace())
require.Equal(t, endpointSlice.AddressType, v1.AddressType(adaptor.addressType()))
require.Equal(t, endpointSlice.Labels, adaptor.labels())
require.Equal(t, endpointSlice.Labels[v1.LabelServiceName], "testendpoints")
require.Equal(t, "testendpoints", endpointSlice.Labels[v1.LabelServiceName])
for i, endpointAdaptor := range adaptor.endpoints() {
require.Equal(t, endpointSlice.Endpoints[i].Addresses, endpointAdaptor.addresses())
@ -57,7 +57,7 @@ func Test_EndpointSliceAdaptor_v1beta1(t *testing.T) {
require.Equal(t, endpointSlice.ObjectMeta.Namespace, adaptor.namespace())
require.Equal(t, endpointSlice.AddressType, v1beta1.AddressType(adaptor.addressType()))
require.Equal(t, endpointSlice.Labels, adaptor.labels())
require.Equal(t, endpointSlice.Labels[v1beta1.LabelServiceName], "testendpoints")
require.Equal(t, "testendpoints", endpointSlice.Labels[v1beta1.LabelServiceName])
for i, endpointAdaptor := range adaptor.endpoints() {
require.Equal(t, endpointSlice.Endpoints[i].Addresses, endpointAdaptor.addresses())

View file

@ -61,12 +61,12 @@ func TestLinodeSDRefresh(t *testing.T) {
tgs, err := d.refresh(context.Background())
require.NoError(t, err)
require.Equal(t, 1, len(tgs))
require.Len(t, tgs, 1)
tg := tgs[0]
require.NotNil(t, tg)
require.NotNil(t, tg.Targets)
require.Equal(t, 4, len(tg.Targets))
require.Len(t, tg.Targets, 4)
for i, lbls := range []model.LabelSet{
{

View file

@ -790,21 +790,21 @@ func TestTargetSetTargetGroupsPresentOnConfigReload(t *testing.T) {
discoveryManager.ApplyConfig(c)
syncedTargets := <-discoveryManager.SyncCh()
require.Equal(t, 1, len(syncedTargets))
require.Len(t, syncedTargets, 1)
verifySyncedPresence(t, syncedTargets, "prometheus", "{__address__=\"foo:9090\"}", true)
require.Equal(t, 1, len(syncedTargets["prometheus"]))
require.Len(t, syncedTargets["prometheus"], 1)
p := pk("static", "prometheus", 0)
verifyPresence(t, discoveryManager.targets, p, "{__address__=\"foo:9090\"}", true)
require.Equal(t, 1, len(discoveryManager.targets))
require.Len(t, discoveryManager.targets, 1)
discoveryManager.ApplyConfig(c)
syncedTargets = <-discoveryManager.SyncCh()
verifyPresence(t, discoveryManager.targets, p, "{__address__=\"foo:9090\"}", true)
require.Equal(t, 1, len(discoveryManager.targets))
require.Equal(t, 1, len(syncedTargets))
require.Len(t, discoveryManager.targets, 1)
require.Len(t, syncedTargets, 1)
verifySyncedPresence(t, syncedTargets, "prometheus", "{__address__=\"foo:9090\"}", true)
require.Equal(t, 1, len(syncedTargets["prometheus"]))
require.Len(t, syncedTargets["prometheus"], 1)
}
func TestTargetSetTargetGroupsPresentOnConfigRename(t *testing.T) {
@ -822,12 +822,12 @@ func TestTargetSetTargetGroupsPresentOnConfigRename(t *testing.T) {
discoveryManager.ApplyConfig(c)
syncedTargets := <-discoveryManager.SyncCh()
require.Equal(t, 1, len(syncedTargets))
require.Len(t, syncedTargets, 1)
verifySyncedPresence(t, syncedTargets, "prometheus", "{__address__=\"foo:9090\"}", true)
require.Equal(t, 1, len(syncedTargets["prometheus"]))
require.Len(t, syncedTargets["prometheus"], 1)
p := pk("static", "prometheus", 0)
verifyPresence(t, discoveryManager.targets, p, "{__address__=\"foo:9090\"}", true)
require.Equal(t, 1, len(discoveryManager.targets))
require.Len(t, discoveryManager.targets, 1)
c["prometheus2"] = c["prometheus"]
delete(c, "prometheus")
@ -836,10 +836,10 @@ func TestTargetSetTargetGroupsPresentOnConfigRename(t *testing.T) {
syncedTargets = <-discoveryManager.SyncCh()
p = pk("static", "prometheus2", 0)
verifyPresence(t, discoveryManager.targets, p, "{__address__=\"foo:9090\"}", true)
require.Equal(t, 1, len(discoveryManager.targets))
require.Equal(t, 1, len(syncedTargets))
require.Len(t, discoveryManager.targets, 1)
require.Len(t, syncedTargets, 1)
verifySyncedPresence(t, syncedTargets, "prometheus2", "{__address__=\"foo:9090\"}", true)
require.Equal(t, 1, len(syncedTargets["prometheus2"]))
require.Len(t, syncedTargets["prometheus2"], 1)
}
func TestTargetSetTargetGroupsPresentOnConfigDuplicateAndDeleteOriginal(t *testing.T) {
@ -860,24 +860,24 @@ func TestTargetSetTargetGroupsPresentOnConfigDuplicateAndDeleteOriginal(t *testi
c["prometheus2"] = c["prometheus"]
discoveryManager.ApplyConfig(c)
syncedTargets := <-discoveryManager.SyncCh()
require.Equal(t, 2, len(syncedTargets))
require.Len(t, syncedTargets, 2)
verifySyncedPresence(t, syncedTargets, "prometheus", "{__address__=\"foo:9090\"}", true)
require.Equal(t, 1, len(syncedTargets["prometheus"]))
require.Len(t, syncedTargets["prometheus"], 1)
verifySyncedPresence(t, syncedTargets, "prometheus2", "{__address__=\"foo:9090\"}", true)
require.Equal(t, 1, len(syncedTargets["prometheus2"]))
require.Len(t, syncedTargets["prometheus2"], 1)
p := pk("static", "prometheus", 0)
verifyPresence(t, discoveryManager.targets, p, "{__address__=\"foo:9090\"}", true)
require.Equal(t, 2, len(discoveryManager.targets))
require.Len(t, discoveryManager.targets, 2)
delete(c, "prometheus")
discoveryManager.ApplyConfig(c)
syncedTargets = <-discoveryManager.SyncCh()
p = pk("static", "prometheus2", 0)
verifyPresence(t, discoveryManager.targets, p, "{__address__=\"foo:9090\"}", true)
require.Equal(t, 1, len(discoveryManager.targets))
require.Equal(t, 1, len(syncedTargets))
require.Len(t, discoveryManager.targets, 1)
require.Len(t, syncedTargets, 1)
verifySyncedPresence(t, syncedTargets, "prometheus2", "{__address__=\"foo:9090\"}", true)
require.Equal(t, 1, len(syncedTargets["prometheus2"]))
require.Len(t, syncedTargets["prometheus2"], 1)
}
func TestTargetSetTargetGroupsPresentOnConfigChange(t *testing.T) {
@ -895,9 +895,9 @@ func TestTargetSetTargetGroupsPresentOnConfigChange(t *testing.T) {
discoveryManager.ApplyConfig(c)
syncedTargets := <-discoveryManager.SyncCh()
require.Equal(t, 1, len(syncedTargets))
require.Len(t, syncedTargets, 1)
verifySyncedPresence(t, syncedTargets, "prometheus", "{__address__=\"foo:9090\"}", true)
require.Equal(t, 1, len(syncedTargets["prometheus"]))
require.Len(t, syncedTargets["prometheus"], 1)
var mu sync.Mutex
c["prometheus2"] = Configs{
@ -912,33 +912,33 @@ func TestTargetSetTargetGroupsPresentOnConfigChange(t *testing.T) {
// Original targets should be present as soon as possible.
syncedTargets = <-discoveryManager.SyncCh()
mu.Unlock()
require.Equal(t, 1, len(syncedTargets))
require.Len(t, syncedTargets, 1)
verifySyncedPresence(t, syncedTargets, "prometheus", "{__address__=\"foo:9090\"}", true)
require.Equal(t, 1, len(syncedTargets["prometheus"]))
require.Len(t, syncedTargets["prometheus"], 1)
// prometheus2 configs should be ready on second sync.
syncedTargets = <-discoveryManager.SyncCh()
require.Equal(t, 2, len(syncedTargets))
require.Len(t, syncedTargets, 2)
verifySyncedPresence(t, syncedTargets, "prometheus", "{__address__=\"foo:9090\"}", true)
require.Equal(t, 1, len(syncedTargets["prometheus"]))
require.Len(t, syncedTargets["prometheus"], 1)
verifySyncedPresence(t, syncedTargets, "prometheus2", "{__address__=\"bar:9090\"}", true)
require.Equal(t, 1, len(syncedTargets["prometheus2"]))
require.Len(t, syncedTargets["prometheus2"], 1)
p := pk("static", "prometheus", 0)
verifyPresence(t, discoveryManager.targets, p, "{__address__=\"foo:9090\"}", true)
p = pk("lockstatic", "prometheus2", 1)
verifyPresence(t, discoveryManager.targets, p, "{__address__=\"bar:9090\"}", true)
require.Equal(t, 2, len(discoveryManager.targets))
require.Len(t, discoveryManager.targets, 2)
// Delete part of config and ensure only original targets exist.
delete(c, "prometheus2")
discoveryManager.ApplyConfig(c)
syncedTargets = <-discoveryManager.SyncCh()
require.Equal(t, 1, len(discoveryManager.targets))
require.Len(t, discoveryManager.targets, 1)
verifyPresence(t, discoveryManager.targets, pk("static", "prometheus", 0), "{__address__=\"foo:9090\"}", true)
require.Equal(t, 1, len(syncedTargets))
require.Len(t, syncedTargets, 1)
verifySyncedPresence(t, syncedTargets, "prometheus", "{__address__=\"foo:9090\"}", true)
require.Equal(t, 1, len(syncedTargets["prometheus"]))
require.Len(t, syncedTargets["prometheus"], 1)
}
func TestTargetSetRecreatesTargetGroupsOnConfigChange(t *testing.T) {
@ -959,25 +959,25 @@ func TestTargetSetRecreatesTargetGroupsOnConfigChange(t *testing.T) {
p := pk("static", "prometheus", 0)
verifyPresence(t, discoveryManager.targets, p, "{__address__=\"foo:9090\"}", true)
verifyPresence(t, discoveryManager.targets, p, "{__address__=\"bar:9090\"}", true)
require.Equal(t, 1, len(discoveryManager.targets))
require.Equal(t, 1, len(syncedTargets))
require.Len(t, discoveryManager.targets, 1)
require.Len(t, syncedTargets, 1)
verifySyncedPresence(t, syncedTargets, "prometheus", "{__address__=\"foo:9090\"}", true)
verifySyncedPresence(t, syncedTargets, "prometheus", "{__address__=\"bar:9090\"}", true)
require.Equal(t, 2, len(syncedTargets["prometheus"]))
require.Len(t, syncedTargets["prometheus"], 2)
c["prometheus"] = Configs{
staticConfig("foo:9090"),
}
discoveryManager.ApplyConfig(c)
syncedTargets = <-discoveryManager.SyncCh()
require.Equal(t, 1, len(discoveryManager.targets))
require.Len(t, discoveryManager.targets, 1)
p = pk("static", "prometheus", 1)
verifyPresence(t, discoveryManager.targets, p, "{__address__=\"foo:9090\"}", true)
verifyPresence(t, discoveryManager.targets, p, "{__address__=\"bar:9090\"}", false)
require.Equal(t, 1, len(discoveryManager.targets))
require.Equal(t, 1, len(syncedTargets))
require.Len(t, discoveryManager.targets, 1)
require.Len(t, syncedTargets, 1)
verifySyncedPresence(t, syncedTargets, "prometheus", "{__address__=\"foo:9090\"}", true)
require.Equal(t, 1, len(syncedTargets["prometheus"]))
require.Len(t, syncedTargets["prometheus"], 1)
}
func TestDiscovererConfigs(t *testing.T) {
@ -1001,12 +1001,12 @@ func TestDiscovererConfigs(t *testing.T) {
verifyPresence(t, discoveryManager.targets, p, "{__address__=\"bar:9090\"}", true)
p = pk("static", "prometheus", 1)
verifyPresence(t, discoveryManager.targets, p, "{__address__=\"baz:9090\"}", true)
require.Equal(t, 2, len(discoveryManager.targets))
require.Equal(t, 1, len(syncedTargets))
require.Len(t, discoveryManager.targets, 2)
require.Len(t, syncedTargets, 1)
verifySyncedPresence(t, syncedTargets, "prometheus", "{__address__=\"foo:9090\"}", true)
verifySyncedPresence(t, syncedTargets, "prometheus", "{__address__=\"bar:9090\"}", true)
verifySyncedPresence(t, syncedTargets, "prometheus", "{__address__=\"baz:9090\"}", true)
require.Equal(t, 3, len(syncedTargets["prometheus"]))
require.Len(t, syncedTargets["prometheus"], 3)
}
// TestTargetSetRecreatesEmptyStaticConfigs ensures that reloading a config file after
@ -1029,9 +1029,9 @@ func TestTargetSetRecreatesEmptyStaticConfigs(t *testing.T) {
syncedTargets := <-discoveryManager.SyncCh()
p := pk("static", "prometheus", 0)
verifyPresence(t, discoveryManager.targets, p, "{__address__=\"foo:9090\"}", true)
require.Equal(t, 1, len(syncedTargets))
require.Len(t, syncedTargets, 1)
verifySyncedPresence(t, syncedTargets, "prometheus", "{__address__=\"foo:9090\"}", true)
require.Equal(t, 1, len(syncedTargets["prometheus"]))
require.Len(t, syncedTargets["prometheus"], 1)
c["prometheus"] = Configs{
StaticConfig{{}},
@ -1052,8 +1052,8 @@ func TestTargetSetRecreatesEmptyStaticConfigs(t *testing.T) {
if len(group.Targets) != 0 {
t.Fatalf("Invalid number of targets: expected 0, got %d", len(group.Targets))
}
require.Equal(t, 1, len(syncedTargets))
require.Equal(t, 1, len(syncedTargets["prometheus"]))
require.Len(t, syncedTargets, 1)
require.Len(t, syncedTargets["prometheus"], 1)
if lbls := syncedTargets["prometheus"][0].Labels; lbls != nil {
t.Fatalf("Unexpected Group: expected nil Labels, got %v", lbls)
}
@ -1082,11 +1082,11 @@ func TestIdenticalConfigurationsAreCoalesced(t *testing.T) {
if len(discoveryManager.providers) != 1 {
t.Fatalf("Invalid number of providers: expected 1, got %d", len(discoveryManager.providers))
}
require.Equal(t, 2, len(syncedTargets))
require.Len(t, syncedTargets, 2)
verifySyncedPresence(t, syncedTargets, "prometheus", "{__address__=\"foo:9090\"}", true)
require.Equal(t, 1, len(syncedTargets["prometheus"]))
require.Len(t, syncedTargets["prometheus"], 1)
verifySyncedPresence(t, syncedTargets, "prometheus2", "{__address__=\"foo:9090\"}", true)
require.Equal(t, 1, len(syncedTargets["prometheus2"]))
require.Len(t, syncedTargets["prometheus2"], 1)
}
func TestApplyConfigDoesNotModifyStaticTargets(t *testing.T) {

View file

@ -44,12 +44,12 @@ host: %s
tgs, err := d.refresh(ctx)
require.NoError(t, err)
require.Equal(t, 1, len(tgs))
require.Len(t, tgs, 1)
tg := tgs[0]
require.NotNil(t, tg)
require.NotNil(t, tg.Targets)
require.Equal(t, 3, len(tg.Targets))
require.Len(t, tg.Targets, 3)
for i, lbls := range []model.LabelSet{
{

View file

@ -45,12 +45,12 @@ host: %s
tgs, err := d.refresh(ctx)
require.NoError(t, err)
require.Equal(t, 1, len(tgs))
require.Len(t, tgs, 1)
tg := tgs[0]
require.NotNil(t, tg)
require.NotNil(t, tg.Targets)
require.Equal(t, 5, len(tg.Targets))
require.Len(t, tg.Targets, 5)
for i, lbls := range []model.LabelSet{
{

View file

@ -45,12 +45,12 @@ host: %s
tgs, err := d.refresh(ctx)
require.NoError(t, err)
require.Equal(t, 1, len(tgs))
require.Len(t, tgs, 1)
tg := tgs[0]
require.NotNil(t, tg)
require.NotNil(t, tg.Targets)
require.Equal(t, 15, len(tg.Targets))
require.Len(t, tg.Targets, 15)
for i, lbls := range []model.LabelSet{
{
@ -339,12 +339,12 @@ filters:
tgs, err := d.refresh(ctx)
require.NoError(t, err)
require.Equal(t, 1, len(tgs))
require.Len(t, tgs, 1)
tg := tgs[0]
require.NotNil(t, tg, "tg should not be nil")
require.NotNil(t, tg.Targets, "tg.targets should not be nil")
require.Equal(t, 4, len(tg.Targets))
require.Len(t, tg.Targets, 4)
for i, lbls := range []model.LabelSet{
{

View file

@ -45,12 +45,12 @@ host: %s
tgs, err := d.refresh(ctx)
require.NoError(t, err)
require.Equal(t, 1, len(tgs))
require.Len(t, tgs, 1)
tg := tgs[0]
require.NotNil(t, tg)
require.NotNil(t, tg.Targets)
require.Equal(t, 27, len(tg.Targets))
require.Len(t, tg.Targets, 27)
for i, lbls := range []model.LabelSet{
{

View file

@ -147,12 +147,12 @@ func TestNomadSDRefresh(t *testing.T) {
tgs, err := d.refresh(context.Background())
require.NoError(t, err)
require.Equal(t, 1, len(tgs))
require.Len(t, tgs, 1)
tg := tgs[0]
require.NotNil(t, tg)
require.NotNil(t, tg.Targets)
require.Equal(t, 1, len(tg.Targets))
require.Len(t, tg.Targets, 1)
lbls := model.LabelSet{
"__address__": model.LabelValue("127.0.0.1:30456"),

View file

@ -53,12 +53,12 @@ func TestOpenstackSDHypervisorRefresh(t *testing.T) {
hypervisor, _ := mock.openstackAuthSuccess()
ctx := context.Background()
tgs, err := hypervisor.refresh(ctx)
require.Equal(t, 1, len(tgs))
require.Len(t, tgs, 1)
tg := tgs[0]
require.NoError(t, err)
require.NotNil(t, tg)
require.NotNil(t, tg.Targets)
require.Equal(t, 2, len(tg.Targets))
require.Len(t, tg.Targets, 2)
for l, v := range map[string]string{
"__address__": "172.16.70.14:0",

View file

@ -61,12 +61,12 @@ func TestOpenstackSDInstanceRefresh(t *testing.T) {
tgs, err := instance.refresh(ctx)
require.NoError(t, err)
require.Equal(t, 1, len(tgs))
require.Len(t, tgs, 1)
tg := tgs[0]
require.NotNil(t, tg)
require.NotNil(t, tg.Targets)
require.Equal(t, 4, len(tg.Targets))
require.Len(t, tg.Targets, 4)
for i, lbls := range []model.LabelSet{
{

View file

@ -47,11 +47,11 @@ consumer_key: %s`, mock.URL, ovhcloudApplicationKeyTest, ovhcloudApplicationSecr
targetGroups, err := d.refresh(ctx)
require.NoError(t, err)
require.Equal(t, 1, len(targetGroups))
require.Len(t, targetGroups, 1)
targetGroup := targetGroups[0]
require.NotNil(t, targetGroup)
require.NotNil(t, targetGroup.Targets)
require.Equal(t, 1, len(targetGroup.Targets))
require.Len(t, targetGroup.Targets, 1)
for i, lbls := range []model.LabelSet{
{

View file

@ -49,11 +49,11 @@ consumer_key: %s`, mock.URL, ovhcloudApplicationKeyTest, ovhcloudApplicationSecr
targetGroups, err := d.refresh(ctx)
require.NoError(t, err)
require.Equal(t, 1, len(targetGroups))
require.Len(t, targetGroups, 1)
targetGroup := targetGroups[0]
require.NotNil(t, targetGroup)
require.NotNil(t, targetGroup.Targets)
require.Equal(t, 1, len(targetGroup.Targets))
require.Len(t, targetGroup.Targets, 1)
for i, lbls := range []model.LabelSet{
{
"__address__": "192.0.2.1",

View file

@ -105,7 +105,7 @@ func TestPuppetDBRefresh(t *testing.T) {
Source: ts.URL + "/pdb/query/v4?query=vhosts",
},
}
require.Equal(t, tgs, expectedTargets)
require.Equal(t, expectedTargets, tgs)
}
func TestPuppetDBRefreshWithParameters(t *testing.T) {
@ -156,7 +156,7 @@ func TestPuppetDBRefreshWithParameters(t *testing.T) {
Source: ts.URL + "/pdb/query/v4?query=vhosts",
},
}
require.Equal(t, tgs, expectedTargets)
require.Equal(t, expectedTargets, tgs)
}
func TestPuppetDBInvalidCode(t *testing.T) {

View file

@ -55,12 +55,12 @@ api_url: %s
tgs, err := d.refresh(ctx)
require.NoError(t, err)
require.Equal(t, 1, len(tgs))
require.Len(t, tgs, 1)
tg := tgs[0]
require.NotNil(t, tg)
require.NotNil(t, tg.Targets)
require.Equal(t, 2, len(tg.Targets))
require.Len(t, tg.Targets, 2)
for i, lbls := range []model.LabelSet{
{
@ -161,5 +161,5 @@ api_url: %s
tgs, err := d.refresh(ctx)
require.NoError(t, err)
require.Equal(t, 1, len(tgs))
require.Len(t, tgs, 1)
}

View file

@ -155,7 +155,7 @@ func TestTritonSDRefreshMultipleTargets(t *testing.T) {
tgts := testTritonSDRefresh(t, conf, dstr)
require.NotNil(t, tgts)
require.Equal(t, 2, len(tgts))
require.Len(t, tgts, 2)
}
func TestTritonSDRefreshNoServer(t *testing.T) {
@ -163,7 +163,7 @@ func TestTritonSDRefreshNoServer(t *testing.T) {
_, err := td.refresh(context.Background())
require.Error(t, err)
require.Equal(t, strings.Contains(err.Error(), "an error occurred when requesting targets from the discovery endpoint"), true)
require.True(t, strings.Contains(err.Error(), "an error occurred when requesting targets from the discovery endpoint"))
}
func TestTritonSDRefreshCancelled(t *testing.T) {
@ -173,7 +173,7 @@ func TestTritonSDRefreshCancelled(t *testing.T) {
cancel()
_, err := td.refresh(ctx)
require.Error(t, err)
require.Equal(t, strings.Contains(err.Error(), context.Canceled.Error()), true)
require.True(t, strings.Contains(err.Error(), context.Canceled.Error()))
}
func TestTritonSDRefreshCNsUUIDOnly(t *testing.T) {
@ -188,7 +188,7 @@ func TestTritonSDRefreshCNsUUIDOnly(t *testing.T) {
tgts := testTritonSDRefresh(t, cnconf, dstr)
require.NotNil(t, tgts)
require.Equal(t, 2, len(tgts))
require.Len(t, tgts, 2)
}
func TestTritonSDRefreshCNsWithHostname(t *testing.T) {
@ -205,7 +205,7 @@ func TestTritonSDRefreshCNsWithHostname(t *testing.T) {
tgts := testTritonSDRefresh(t, cnconf, dstr)
require.NotNil(t, tgts)
require.Equal(t, 2, len(tgts))
require.Len(t, tgts, 2)
}
func testTritonSDRefresh(t *testing.T, c SDConfig, dstr string) []model.LabelSet {
@ -235,7 +235,7 @@ func testTritonSDRefresh(t *testing.T, c SDConfig, dstr string) []model.LabelSet
tgs, err := td.refresh(context.Background())
require.NoError(t, err)
require.Equal(t, 1, len(tgs))
require.Len(t, tgs, 1)
tg := tgs[0]
require.NotNil(t, tg)

View file

@ -55,7 +55,7 @@ func TestUyuniSDHandleError(t *testing.T) {
tgs, err := testUpdateServices(respHandler)
require.EqualError(t, err, errTesting)
require.Equal(t, len(tgs), 0)
require.Empty(t, tgs)
}
func TestUyuniSDLogin(t *testing.T) {
@ -87,7 +87,7 @@ func TestUyuniSDLogin(t *testing.T) {
tgs, err := testUpdateServices(respHandler)
require.EqualError(t, err, errTesting)
require.Equal(t, len(tgs), 0)
require.Empty(t, tgs)
}
func TestUyuniSDSkipLogin(t *testing.T) {
@ -119,5 +119,5 @@ func TestUyuniSDSkipLogin(t *testing.T) {
tgs, err := md.refresh(context.Background())
require.EqualError(t, err, errTesting)
require.Equal(t, len(tgs), 0)
require.Empty(t, tgs)
}

View file

@ -56,12 +56,12 @@ func TestVultrSDRefresh(t *testing.T) {
tgs, err := d.refresh(ctx)
require.NoError(t, err)
require.Equal(t, 1, len(tgs))
require.Len(t, tgs, 1)
tg := tgs[0]
require.NotNil(t, tg)
require.NotNil(t, tg.Targets)
require.Equal(t, 3, len(tg.Targets))
require.Len(t, tg.Targets, 3)
for i, k := range []model.LabelSet{
{

View file

@ -53,14 +53,14 @@ func TestMakeXDSResourceHttpEndpointEmptyServerURLScheme(t *testing.T) {
require.Empty(t, endpointURL)
require.Error(t, err)
require.Equal(t, err.Error(), "invalid xDS server URL")
require.Equal(t, "invalid xDS server URL", err.Error())
}
func TestMakeXDSResourceHttpEndpointEmptyServerURLHost(t *testing.T) {
endpointURL, err := makeXDSResourceHTTPEndpointURL(ProtocolV3, urlMustParse("grpc://127.0.0.1"), "monitoring")
require.Empty(t, endpointURL)
require.NotNil(t, err)
require.Error(t, err)
require.Contains(t, err.Error(), "must be either 'http' or 'https'")
}
@ -68,7 +68,7 @@ func TestMakeXDSResourceHttpEndpoint(t *testing.T) {
endpointURL, err := makeXDSResourceHTTPEndpointURL(ProtocolV3, urlMustParse("http://127.0.0.1:5000"), "monitoring")
require.NoError(t, err)
require.Equal(t, endpointURL.String(), "http://127.0.0.1:5000/v3/discovery:monitoring")
require.Equal(t, "http://127.0.0.1:5000/v3/discovery:monitoring", endpointURL.String())
}
func TestCreateNewHTTPResourceClient(t *testing.T) {
@ -89,8 +89,8 @@ func TestCreateNewHTTPResourceClient(t *testing.T) {
require.NoError(t, err)
require.Equal(t, client.endpoint, "http://127.0.0.1:5000/v3/discovery:monitoring?param1=v1")
require.Equal(t, client.client.Timeout, 1*time.Minute)
require.Equal(t, "http://127.0.0.1:5000/v3/discovery:monitoring?param1=v1", client.endpoint)
require.Equal(t, 1*time.Minute, client.client.Timeout)
}
func createTestHTTPResourceClient(t *testing.T, conf *HTTPResourceClientConfig, protocolVersion ProtocolVersion, responder discoveryResponder) (*HTTPResourceClient, func()) {
@ -138,7 +138,7 @@ func TestHTTPResourceClientFetchFullResponse(t *testing.T) {
require.NotNil(t, res)
require.Equal(t, client.ResourceTypeURL(), res.TypeUrl)
require.Len(t, res.Resources, 0)
require.Empty(t, res.Resources)
require.Equal(t, "abc", client.latestNonce, "Nonce not cached")
require.Equal(t, "1", client.latestVersion, "Version not cached")

View file

@ -129,7 +129,7 @@ func TestKumaMadsV1ResourceParserInvalidTypeURL(t *testing.T) {
func TestKumaMadsV1ResourceParserEmptySlice(t *testing.T) {
resources := make([]*anypb.Any, 0)
groups, err := kumaMadsV1ResourceParser(resources, KumaMadsV1ResourceTypeURL)
require.Len(t, groups, 0)
require.Empty(t, groups)
require.NoError(t, err)
}

View file

@ -1782,8 +1782,8 @@ func TestReverseFloatBucketIterator(t *testing.T) {
for it.Next() {
actBuckets = append(actBuckets, it.At())
}
require.Greater(t, len(expBuckets), 0)
require.Greater(t, len(actBuckets), 0)
require.NotEmpty(t, expBuckets)
require.NotEmpty(t, actBuckets)
require.Equal(t, expBuckets, actBuckets)
// Negative buckets.
@ -1798,8 +1798,8 @@ func TestReverseFloatBucketIterator(t *testing.T) {
for it.Next() {
actBuckets = append(actBuckets, it.At())
}
require.Greater(t, len(expBuckets), 0)
require.Greater(t, len(actBuckets), 0)
require.NotEmpty(t, expBuckets)
require.NotEmpty(t, actBuckets)
require.Equal(t, expBuckets, actBuckets)
}

View file

@ -193,10 +193,10 @@ groups:
_, errs := Parse([]byte(group))
require.Len(t, errs, 2, "Expected two errors")
var err00 *Error
require.True(t, errors.As(errs[0], &err00))
require.ErrorAs(t, errs[0], &err00)
err0 := err00.Err.node
var err01 *Error
require.True(t, errors.As(errs[1], &err01))
require.ErrorAs(t, errs[1], &err01)
err1 := err01.Err.node
require.NotEqual(t, err0, err1, "Error nodes should not be the same")
}

View file

@ -269,9 +269,9 @@ foo_total 17.0 1520879607.789 # {id="counter-test"} 5`
require.Equal(t, exp[i].v, v)
require.Equal(t, exp[i].lset, res)
if exp[i].e == nil {
require.Equal(t, false, found)
require.False(t, found)
} else {
require.Equal(t, true, found)
require.True(t, found)
require.Equal(t, *exp[i].e, e)
}
@ -296,7 +296,7 @@ foo_total 17.0 1520879607.789 # {id="counter-test"} 5`
i++
}
require.Equal(t, len(exp), i)
require.Len(t, exp, i)
}
func TestOpenMetricsParseErrors(t *testing.T) {

View file

@ -209,7 +209,7 @@ testmetric{label="\"bar\""} 1`
i++
}
require.Equal(t, len(exp), i)
require.Len(t, exp, i)
}
func TestPromParseErrors(t *testing.T) {

View file

@ -1989,22 +1989,22 @@ func TestProtobufParse(t *testing.T) {
if ts != nil {
require.Equal(t, exp[i].t, *ts, "i: %d", i)
} else {
require.Equal(t, exp[i].t, int64(0), "i: %d", i)
require.Equal(t, int64(0), exp[i].t, "i: %d", i)
}
require.Equal(t, exp[i].v, v, "i: %d", i)
require.Equal(t, exp[i].lset, res, "i: %d", i)
if len(exp[i].e) == 0 {
require.Equal(t, false, eFound, "i: %d", i)
require.False(t, eFound, "i: %d", i)
} else {
require.Equal(t, true, eFound, "i: %d", i)
require.True(t, eFound, "i: %d", i)
require.Equal(t, exp[i].e[0], e, "i: %d", i)
require.False(t, p.Exemplar(&e), "too many exemplars returned, i: %d", i)
}
if exp[i].ct != nil {
require.Equal(t, true, ctFound, "i: %d", i)
require.True(t, ctFound, "i: %d", i)
require.Equal(t, exp[i].ct.String(), ct.String(), "i: %d", i)
} else {
require.Equal(t, false, ctFound, "i: %d", i)
require.False(t, ctFound, "i: %d", i)
}
case EntryHistogram:
@ -2014,7 +2014,7 @@ func TestProtobufParse(t *testing.T) {
if ts != nil {
require.Equal(t, exp[i].t, *ts, "i: %d", i)
} else {
require.Equal(t, exp[i].t, int64(0), "i: %d", i)
require.Equal(t, int64(0), exp[i].t, "i: %d", i)
}
require.Equal(t, exp[i].lset, res, "i: %d", i)
require.Equal(t, exp[i].m, string(m), "i: %d", i)
@ -2028,7 +2028,7 @@ func TestProtobufParse(t *testing.T) {
require.Equal(t, exp[i].e[j], e, "i: %d", i)
e = exemplar.Exemplar{}
}
require.Equal(t, len(exp[i].e), j, "not enough exemplars found, i: %d", i)
require.Len(t, exp[i].e, j, "not enough exemplars found, i: %d", i)
case EntryType:
m, typ := p.Type()
@ -2051,7 +2051,7 @@ func TestProtobufParse(t *testing.T) {
i++
}
require.Equal(t, len(exp), i)
require.Len(t, exp, i)
})
}
}

View file

@ -83,7 +83,7 @@ func TestHandlerNextBatch(t *testing.T) {
require.NoError(t, alertsEqual(expected[0:maxBatchSize], h.nextBatch()))
require.NoError(t, alertsEqual(expected[maxBatchSize:2*maxBatchSize], h.nextBatch()))
require.NoError(t, alertsEqual(expected[2*maxBatchSize:], h.nextBatch()))
require.Equal(t, 0, len(h.queue), "Expected queue to be empty but got %d alerts", len(h.queue))
require.Empty(t, h.queue, "Expected queue to be empty but got %d alerts", len(h.queue))
}
func alertsEqual(a, b []*Alert) error {
@ -482,7 +482,7 @@ alerting:
`
err := yaml.UnmarshalStrict([]byte(s), cfg)
require.NoError(t, err, "Unable to load YAML config.")
require.Equal(t, 1, len(cfg.AlertingConfig.AlertmanagerConfigs))
require.Len(t, cfg.AlertingConfig.AlertmanagerConfigs, 1)
err = n.ApplyConfig(cfg)
require.NoError(t, err, "Error applying the config.")
@ -533,7 +533,7 @@ alerting:
`
err := yaml.UnmarshalStrict([]byte(s), cfg)
require.NoError(t, err, "Unable to load YAML config.")
require.Equal(t, 1, len(cfg.AlertingConfig.AlertmanagerConfigs))
require.Len(t, cfg.AlertingConfig.AlertmanagerConfigs, 1)
err = n.ApplyConfig(cfg)
require.NoError(t, err, "Error applying the config.")

View file

@ -140,7 +140,7 @@ func TestQueryTimeout(t *testing.T) {
require.Error(t, res.Err, "expected timeout error but got none")
var e ErrQueryTimeout
require.True(t, errors.As(res.Err, &e), "expected timeout error but got: %s", res.Err)
require.ErrorAs(t, res.Err, &e, "expected timeout error but got: %s", res.Err)
}
const errQueryCanceled = ErrQueryCanceled("test statement execution")
@ -239,14 +239,14 @@ func TestQueryError(t *testing.T) {
res := vectorQuery.Exec(ctx)
require.Error(t, res.Err, "expected error on failed select but got none")
require.True(t, errors.Is(res.Err, errStorage), "expected error doesn't match")
require.ErrorIs(t, res.Err, errStorage, "expected error doesn't match")
matrixQuery, err := engine.NewInstantQuery(ctx, queryable, nil, "foo[1m]", time.Unix(1, 0))
require.NoError(t, err)
res = matrixQuery.Exec(ctx)
require.Error(t, res.Err, "expected error on failed select but got none")
require.True(t, errors.Is(res.Err, errStorage), "expected error doesn't match")
require.ErrorIs(t, res.Err, errStorage, "expected error doesn't match")
}
type noopHintRecordingQueryable struct {
@ -635,7 +635,7 @@ func TestEngineShutdown(t *testing.T) {
require.Error(t, res2.Err, "expected error on querying with canceled context but got none")
var e ErrQueryCanceled
require.True(t, errors.As(res2.Err, &e), "expected cancellation error but got: %s", res2.Err)
require.ErrorAs(t, res2.Err, &e, "expected cancellation error but got: %s", res2.Err)
}
func TestEngineEvalStmtTimestamps(t *testing.T) {
@ -2057,7 +2057,7 @@ func TestQueryLogger_basic(t *testing.T) {
l := len(f1.logs)
queryExec()
require.Equal(t, 2*l, len(f1.logs))
require.Len(t, f1.logs, 2*l)
// Test that we close the query logger when unsetting it.
require.False(t, f1.closed, "expected f1 to be open, got closed")
@ -3003,8 +3003,8 @@ func TestEngineOptsValidation(t *testing.T) {
require.Equal(t, c.expError, err1)
require.Equal(t, c.expError, err2)
} else {
require.Nil(t, err1)
require.Nil(t, err2)
require.NoError(t, err1)
require.NoError(t, err2)
}
}
}

View file

@ -64,7 +64,7 @@ func TestDeriv(t *testing.T) {
require.NoError(t, result.Err)
vec, _ := result.Vector()
require.Equal(t, 1, len(vec), "Expected 1 result, got %d", len(vec))
require.Len(t, vec, 1, "Expected 1 result, got %d", len(vec))
require.Equal(t, 0.0, vec[0].F, "Expected 0.0 as value, got %f", vec[0].F)
}

View file

@ -3579,9 +3579,9 @@ func TestParseExpressions(t *testing.T) {
require.True(t, ok, "unexpected error type")
for _, e := range errorList {
require.True(t, 0 <= e.PositionRange.Start, "parse error has negative position\nExpression '%s'\nError: %v", test.input, e)
require.True(t, e.PositionRange.Start <= e.PositionRange.End, "parse error has negative length\nExpression '%s'\nError: %v", test.input, e)
require.True(t, e.PositionRange.End <= posrange.Pos(len(test.input)), "parse error is not contained in input\nExpression '%s'\nError: %v", test.input, e)
require.LessOrEqual(t, 0, e.PositionRange.Start, "parse error has negative position\nExpression '%s'\nError: %v", test.input, e)
require.LessOrEqual(t, e.PositionRange.Start, e.PositionRange.End, "parse error has negative length\nExpression '%s'\nError: %v", test.input, e)
require.LessOrEqual(t, e.PositionRange.End, posrange.Pos(len(test.input)), "parse error is not contained in input\nExpression '%s'\nError: %v", test.input, e)
}
}
})

View file

@ -123,7 +123,7 @@ func TestMMapFile(t *testing.T) {
bytes := make([]byte, 4)
n, err := f.Read(bytes)
require.Equal(t, n, 2)
require.Equal(t, 2, n)
require.NoError(t, err, "Unexpected error while reading file.")
require.Equal(t, fileAsBytes, bytes[:2], "Mmap failed")

View file

@ -185,7 +185,7 @@ func TestAlertingRuleLabelsUpdate(t *testing.T) {
evalTime := baseTime.Add(time.Duration(len(results)) * time.Minute)
res, err := rule.Eval(context.TODO(), evalTime, EngineQueryFunc(testEngine, storage), nil, 0)
require.NoError(t, err)
require.Equal(t, 0, len(res))
require.Empty(t, res)
}
func TestAlertingRuleExternalLabelsInTemplate(t *testing.T) {
@ -828,7 +828,7 @@ func TestKeepFiringFor(t *testing.T) {
evalTime := baseTime.Add(time.Duration(len(results)) * time.Minute)
res, err := rule.Eval(context.TODO(), evalTime, EngineQueryFunc(testEngine, storage), nil, 0)
require.NoError(t, err)
require.Equal(t, 0, len(res))
require.Empty(t, res)
}
func TestPendingAndKeepFiringFor(t *testing.T) {
@ -880,7 +880,7 @@ func TestPendingAndKeepFiringFor(t *testing.T) {
evalTime := baseTime.Add(time.Minute)
res, err = rule.Eval(context.TODO(), evalTime, EngineQueryFunc(testEngine, storage), nil, 0)
require.NoError(t, err)
require.Equal(t, 0, len(res))
require.Empty(t, res)
}
// TestAlertingEvalWithOrigin checks that the alerting rule details are passed through the context.

View file

@ -167,7 +167,7 @@ func TestAlertingRule(t *testing.T) {
filteredRes = append(filteredRes, smpl)
} else {
// If not 'ALERTS', it has to be 'ALERTS_FOR_STATE'.
require.Equal(t, smplName, "ALERTS_FOR_STATE")
require.Equal(t, "ALERTS_FOR_STATE", smplName)
}
}
for i := range test.result {
@ -313,7 +313,7 @@ func TestForStateAddSamples(t *testing.T) {
filteredRes = append(filteredRes, smpl)
} else {
// If not 'ALERTS_FOR_STATE', it has to be 'ALERTS'.
require.Equal(t, smplName, "ALERTS")
require.Equal(t, "ALERTS", smplName)
}
}
for i := range test.result {
@ -471,12 +471,12 @@ func TestForStateRestore(t *testing.T) {
// Checking if we have restored it correctly.
switch {
case tst.noRestore:
require.Equal(t, tst.num, len(got))
require.Len(t, got, tst.num)
for _, e := range got {
require.Equal(t, e.ActiveAt, restoreTime)
}
case tst.gracePeriod:
require.Equal(t, tst.num, len(got))
require.Len(t, got, tst.num)
for _, e := range got {
require.Equal(t, opts.ForGracePeriod, e.ActiveAt.Add(alertForDuration).Sub(restoreTime))
}
@ -725,7 +725,7 @@ func TestUpdate(t *testing.T) {
err := ruleManager.Update(10*time.Second, files, labels.EmptyLabels(), "", nil)
require.NoError(t, err)
require.Greater(t, len(ruleManager.groups), 0, "expected non-empty rule groups")
require.NotEmpty(t, ruleManager.groups, "expected non-empty rule groups")
ogs := map[string]*Group{}
for h, g := range ruleManager.groups {
g.seriesInPreviousEval = []map[string]labels.Labels{
@ -746,7 +746,7 @@ func TestUpdate(t *testing.T) {
// Groups will be recreated if updated.
rgs, errs := rulefmt.ParseFile("fixtures/rules.yaml")
require.Equal(t, 0, len(errs), "file parsing failures")
require.Empty(t, errs, "file parsing failures")
tmpFile, err := os.CreateTemp("", "rules.test.*.yaml")
require.NoError(t, err)
@ -889,20 +889,20 @@ func TestNotify(t *testing.T) {
// Alert sent right away
group.Eval(ctx, time.Unix(1, 0))
require.Equal(t, 1, len(lastNotified))
require.Len(t, lastNotified, 1)
require.NotZero(t, lastNotified[0].ValidUntil, "ValidUntil should not be zero")
// Alert is not sent 1s later
group.Eval(ctx, time.Unix(2, 0))
require.Equal(t, 0, len(lastNotified))
require.Empty(t, lastNotified)
// Alert is resent at t=5s
group.Eval(ctx, time.Unix(5, 0))
require.Equal(t, 1, len(lastNotified))
require.Len(t, lastNotified, 1)
// Resolution alert sent right away
group.Eval(ctx, time.Unix(6, 0))
require.Equal(t, 1, len(lastNotified))
require.Len(t, lastNotified, 1)
}
func TestMetricsUpdate(t *testing.T) {
@ -1100,7 +1100,7 @@ func TestMetricsStalenessOnManagerShutdown(t *testing.T) {
require.NoError(t, err)
ruleManager.Stop()
stopped = true
require.True(t, time.Since(start) < 1*time.Second, "rule manager does not stop early")
require.Less(t, time.Since(start), 1*time.Second, "rule manager does not stop early")
time.Sleep(5 * time.Second)
require.Equal(t, 0, countStaleNaN(t, storage), "invalid count of staleness markers after stopping the engine")
}

View file

@ -111,14 +111,14 @@ func TestDroppedTargetsList(t *testing.T) {
)
sp.Sync(tgs)
sp.Sync(tgs)
require.Equal(t, expectedLength, len(sp.droppedTargets))
require.Len(t, sp.droppedTargets, expectedLength)
require.Equal(t, expectedLength, sp.droppedTargetsCount)
require.Equal(t, expectedLabelSetString, sp.droppedTargets[0].DiscoveredLabels().String())
// Check that count is still correct when we don't retain all dropped targets.
sp.config.KeepDroppedTargets = 1
sp.Sync(tgs)
require.Equal(t, 1, len(sp.droppedTargets))
require.Len(t, sp.droppedTargets, 1)
require.Equal(t, expectedLength, sp.droppedTargetsCount)
}
@ -242,11 +242,11 @@ func TestScrapePoolStop(t *testing.T) {
}
mtx.Lock()
require.Equal(t, numTargets, len(stopped), "Unexpected number of stopped loops")
require.Len(t, stopped, numTargets, "Unexpected number of stopped loops")
mtx.Unlock()
require.Equal(t, 0, len(sp.activeTargets), "Targets were not cleared on stopping: %d left", len(sp.activeTargets))
require.Equal(t, 0, len(sp.loops), "Loops were not cleared on stopping: %d left", len(sp.loops))
require.Empty(t, sp.activeTargets, "Targets were not cleared on stopping: %d left", len(sp.activeTargets))
require.Empty(t, sp.loops, "Loops were not cleared on stopping: %d left", len(sp.loops))
}
func TestScrapePoolReload(t *testing.T) {
@ -333,11 +333,11 @@ func TestScrapePoolReload(t *testing.T) {
}
mtx.Lock()
require.Equal(t, numTargets, len(stopped), "Unexpected number of stopped loops")
require.Len(t, stopped, numTargets, "Unexpected number of stopped loops")
mtx.Unlock()
require.Equal(t, sp.activeTargets, beforeTargets, "Reloading affected target states unexpectedly")
require.Equal(t, numTargets, len(sp.loops), "Unexpected number of stopped loops after reload")
require.Len(t, sp.loops, numTargets, "Unexpected number of stopped loops after reload")
}
func TestScrapePoolReloadPreserveRelabeledIntervalTimeout(t *testing.T) {
@ -437,10 +437,10 @@ func TestScrapePoolTargetLimit(t *testing.T) {
for _, l := range sp.loops {
lerr := l.(*testLoop).getForcedError()
if shouldErr {
require.NotNil(t, lerr, "error was expected for %d targets with a limit of %d", targets, limit)
require.Error(t, lerr, "error was expected for %d targets with a limit of %d", targets, limit)
require.Equal(t, fmt.Sprintf("target_limit exceeded (number of targets: %d, limit: %d)", targets, limit), lerr.Error())
} else {
require.Equal(t, nil, lerr)
require.NoError(t, lerr)
}
}
}
@ -582,8 +582,8 @@ func TestScrapePoolRaces(t *testing.T) {
dropped := sp.DroppedTargets()
expectedActive, expectedDropped := len(tgts[0].Targets), 0
require.Equal(t, expectedActive, len(active), "Invalid number of active targets")
require.Equal(t, expectedDropped, len(dropped), "Invalid number of dropped targets")
require.Len(t, active, expectedActive, "Invalid number of active targets")
require.Len(t, dropped, expectedDropped, "Invalid number of dropped targets")
for i := 0; i < 20; i++ {
time.Sleep(10 * time.Millisecond)
@ -633,7 +633,7 @@ func TestScrapePoolScrapeLoopsStarted(t *testing.T) {
}))
sp.Sync(tgs)
require.Equal(t, 1, len(sp.loops))
require.Len(t, sp.loops, 1)
wg.Wait()
for _, l := range sp.loops {
@ -1123,7 +1123,7 @@ func TestScrapeLoopRunCreatesStaleMarkersOnFailedScrape(t *testing.T) {
// 1 successfully scraped sample, 1 stale marker after first fail, 5 report samples for
// each scrape successful or not.
require.Equal(t, 27, len(appender.resultFloats), "Appended samples not as expected:\n%s", appender)
require.Len(t, appender.resultFloats, 27, "Appended samples not as expected:\n%s", appender)
require.Equal(t, 42.0, appender.resultFloats[0].f, "Appended first sample not as expected")
require.True(t, value.IsStaleNaN(appender.resultFloats[6].f),
"Appended second sample not as expected. Wanted: stale NaN Got: %x", math.Float64bits(appender.resultFloats[6].f))
@ -1170,7 +1170,7 @@ func TestScrapeLoopRunCreatesStaleMarkersOnParseFailure(t *testing.T) {
// 1 successfully scraped sample, 1 stale marker after first fail, 5 report samples for
// each scrape successful or not.
require.Equal(t, 17, len(appender.resultFloats), "Appended samples not as expected:\n%s", appender)
require.Len(t, appender.resultFloats, 17, "Appended samples not as expected:\n%s", appender)
require.Equal(t, 42.0, appender.resultFloats[0].f, "Appended first sample not as expected")
require.True(t, value.IsStaleNaN(appender.resultFloats[6].f),
"Appended second sample not as expected. Wanted: stale NaN Got: %x", math.Float64bits(appender.resultFloats[6].f))
@ -1237,7 +1237,7 @@ func TestScrapeLoopCache(t *testing.T) {
// 1 successfully scraped sample, 1 stale marker after first fail, 5 report samples for
// each scrape successful or not.
require.Equal(t, 26, len(appender.resultFloats), "Appended samples not as expected:\n%s", appender)
require.Len(t, appender.resultFloats, 26, "Appended samples not as expected:\n%s", appender)
}
func TestScrapeLoopCacheMemoryExhaustionProtection(t *testing.T) {
@ -2529,7 +2529,7 @@ func TestTargetScraperBodySizeLimit(t *testing.T) {
require.NoError(t, err)
_, err = ts.readResponse(context.Background(), resp, &buf)
require.NoError(t, err)
require.Equal(t, len(responseBody), buf.Len())
require.Len(t, responseBody, buf.Len())
// Target response gzip compressed body, scrape without body size limit.
gzipResponse = true
buf.Reset()
@ -2537,7 +2537,7 @@ func TestTargetScraperBodySizeLimit(t *testing.T) {
require.NoError(t, err)
_, err = ts.readResponse(context.Background(), resp, &buf)
require.NoError(t, err)
require.Equal(t, len(responseBody), buf.Len())
require.Len(t, responseBody, buf.Len())
}
// testScraper implements the scraper interface and allows setting values
@ -2642,7 +2642,7 @@ func TestScrapeLoopDiscardDuplicateLabels(t *testing.T) {
q, err := s.Querier(time.Time{}.UnixNano(), 0)
require.NoError(t, err)
series := q.Select(ctx, false, nil, labels.MustNewMatcher(labels.MatchRegexp, "__name__", ".*"))
require.Equal(t, false, series.Next(), "series found in tsdb")
require.False(t, series.Next(), "series found in tsdb")
require.NoError(t, series.Err())
// We add a good metric to check that it is recorded.
@ -2654,9 +2654,9 @@ func TestScrapeLoopDiscardDuplicateLabels(t *testing.T) {
q, err = s.Querier(time.Time{}.UnixNano(), 0)
require.NoError(t, err)
series = q.Select(ctx, false, nil, labels.MustNewMatcher(labels.MatchEqual, "le", "500"))
require.Equal(t, true, series.Next(), "series not found in tsdb")
require.True(t, series.Next(), "series not found in tsdb")
require.NoError(t, series.Err())
require.Equal(t, false, series.Next(), "more than one series found in tsdb")
require.False(t, series.Next(), "more than one series found in tsdb")
}
func TestScrapeLoopDiscardUnnamedMetrics(t *testing.T) {
@ -2684,7 +2684,7 @@ func TestScrapeLoopDiscardUnnamedMetrics(t *testing.T) {
q, err := s.Querier(time.Time{}.UnixNano(), 0)
require.NoError(t, err)
series := q.Select(ctx, false, nil, labels.MustNewMatcher(labels.MatchRegexp, "__name__", ".*"))
require.Equal(t, false, series.Next(), "series found in tsdb")
require.False(t, series.Next(), "series found in tsdb")
require.NoError(t, series.Err())
}
@ -2744,14 +2744,14 @@ func TestReusableConfig(t *testing.T) {
}
for i, m := range match {
require.Equal(t, true, reusableCache(variants[m[0]], variants[m[1]]), "match test %d", i)
require.Equal(t, true, reusableCache(variants[m[1]], variants[m[0]]), "match test %d", i)
require.Equal(t, true, reusableCache(variants[m[1]], variants[m[1]]), "match test %d", i)
require.Equal(t, true, reusableCache(variants[m[0]], variants[m[0]]), "match test %d", i)
require.True(t, reusableCache(variants[m[0]], variants[m[1]]), "match test %d", i)
require.True(t, reusableCache(variants[m[1]], variants[m[0]]), "match test %d", i)
require.True(t, reusableCache(variants[m[1]], variants[m[1]]), "match test %d", i)
require.True(t, reusableCache(variants[m[0]], variants[m[0]]), "match test %d", i)
}
for i, m := range noMatch {
require.Equal(t, false, reusableCache(variants[m[0]], variants[m[1]]), "not match test %d", i)
require.Equal(t, false, reusableCache(variants[m[1]], variants[m[0]]), "not match test %d", i)
require.False(t, reusableCache(variants[m[0]], variants[m[1]]), "not match test %d", i)
require.False(t, reusableCache(variants[m[1]], variants[m[0]]), "not match test %d", i)
}
}
@ -3317,7 +3317,7 @@ test_summary_count 199
Targets: []model.LabelSet{{model.AddressLabel: model.LabelValue(testURL.Host)}},
},
})
require.Equal(t, 1, len(sp.ActiveTargets()))
require.Len(t, sp.ActiveTargets(), 1)
select {
case <-time.After(5 * time.Second):
@ -3394,7 +3394,7 @@ func TestScrapeLoopRunCreatesStaleMarkersOnFailedScrapeForTimestampedMetrics(t *
// 1 successfully scraped sample, 1 stale marker after first fail, 5 report samples for
// each scrape successful or not.
require.Equal(t, 27, len(appender.resultFloats), "Appended samples not as expected:\n%s", appender)
require.Len(t, appender.resultFloats, 27, "Appended samples not as expected:\n%s", appender)
require.Equal(t, 42.0, appender.resultFloats[0].f, "Appended first sample not as expected")
require.True(t, value.IsStaleNaN(appender.resultFloats[6].f),
"Appended second sample not as expected. Wanted: stale NaN Got: %x", math.Float64bits(appender.resultFloats[6].f))
@ -3449,7 +3449,7 @@ func TestScrapeLoopCompression(t *testing.T) {
Targets: []model.LabelSet{{model.AddressLabel: model.LabelValue(testURL.Host)}},
},
})
require.Equal(t, 1, len(sp.ActiveTargets()))
require.Len(t, sp.ActiveTargets(), 1)
select {
case <-time.After(5 * time.Second):

View file

@ -100,7 +100,7 @@ func TestFanout_SelectSorted(t *testing.T) {
}
require.Equal(t, labelsResult, outputLabel)
require.Equal(t, inputTotalSize, len(result))
require.Len(t, result, inputTotalSize)
})
t.Run("chunk querier", func(t *testing.T) {
querier, err := fanoutStorage.ChunkQuerier(0, 8000)
@ -128,7 +128,7 @@ func TestFanout_SelectSorted(t *testing.T) {
require.NoError(t, seriesSet.Err())
require.Equal(t, labelsResult, outputLabel)
require.Equal(t, inputTotalSize, len(result))
require.Len(t, result, inputTotalSize)
})
}
@ -178,7 +178,7 @@ func TestFanoutErrors(t *testing.T) {
}
if tc.warning != nil {
require.Greater(t, len(ss.Warnings()), 0, "warnings expected")
require.NotEmpty(t, ss.Warnings(), "warnings expected")
w := ss.Warnings()
require.Error(t, w.AsErrors()[0])
require.Equal(t, tc.warning.Error(), w.AsStrings("", 0)[0])
@ -204,7 +204,7 @@ func TestFanoutErrors(t *testing.T) {
}
if tc.warning != nil {
require.Greater(t, len(ss.Warnings()), 0, "warnings expected")
require.NotEmpty(t, ss.Warnings(), "warnings expected")
w := ss.Warnings()
require.Error(t, w.AsErrors()[0])
require.Equal(t, tc.warning.Error(), w.AsStrings("", 0)[0])

View file

@ -68,26 +68,26 @@ func TestMemoizedSeriesIterator(t *testing.T) {
fSample{t: 400, f: 12},
}), 2)
require.Equal(t, it.Seek(-123), chunkenc.ValFloat, "seek failed")
require.Equal(t, chunkenc.ValFloat, it.Seek(-123), "seek failed")
sampleEq(1, 2, nil)
prevSampleEq(0, 0, nil, false)
require.Equal(t, it.Seek(5), chunkenc.ValFloat, "seek failed")
require.Equal(t, chunkenc.ValFloat, it.Seek(5), "seek failed")
sampleEq(5, 6, nil)
prevSampleEq(4, 5, nil, true)
// Seek to a histogram sample with a previous float sample.
require.Equal(t, it.Seek(102), chunkenc.ValFloatHistogram, "seek failed")
require.Equal(t, chunkenc.ValFloatHistogram, it.Seek(102), "seek failed")
sampleEq(102, 10, tsdbutil.GenerateTestFloatHistogram(0))
prevSampleEq(101, 10, nil, true)
// Attempt to seek backwards (no-op).
require.Equal(t, it.Seek(50), chunkenc.ValFloatHistogram, "seek failed")
require.Equal(t, chunkenc.ValFloatHistogram, it.Seek(50), "seek failed")
sampleEq(102, 10, tsdbutil.GenerateTestFloatHistogram(0))
prevSampleEq(101, 10, nil, true)
// Seek to a float histogram sample with a previous histogram sample.
require.Equal(t, it.Seek(104), chunkenc.ValFloatHistogram, "seek failed")
require.Equal(t, chunkenc.ValFloatHistogram, it.Seek(104), "seek failed")
sampleEq(104, 0, tsdbutil.GenerateTestFloatHistogram(2))
prevSampleEq(103, 0, tsdbutil.GenerateTestFloatHistogram(1), true)
@ -101,7 +101,7 @@ func TestMemoizedSeriesIterator(t *testing.T) {
sampleEq(400, 12, nil)
prevSampleEq(399, 0, tsdbutil.GenerateTestFloatHistogram(6), true)
require.Equal(t, it.Seek(1024), chunkenc.ValNone, "seek succeeded unexpectedly")
require.Equal(t, chunkenc.ValNone, it.Seek(1024), "seek succeeded unexpectedly")
}
func BenchmarkMemoizedSeriesIterator(b *testing.B) {

View file

@ -1547,7 +1547,7 @@ func TestMergeGenericQuerierWithSecondaries_ErrorHandling(t *testing.T) {
}
require.Subset(t, tcase.expectedWarnings, res.Warnings())
require.Equal(t, tcase.expectedErrs[0], res.Err())
require.True(t, errors.Is(res.Err(), tcase.expectedErrs[0]), "expected error doesn't match")
require.ErrorIs(t, res.Err(), tcase.expectedErrs[0], "expected error doesn't match")
require.Equal(t, tcase.expectedSelectsSeries, lbls)
for _, qr := range q.queriers {
@ -1563,7 +1563,7 @@ func TestMergeGenericQuerierWithSecondaries_ErrorHandling(t *testing.T) {
t.Run("LabelNames", func(t *testing.T) {
res, w, err := q.LabelNames(ctx)
require.Subset(t, tcase.expectedWarnings, w)
require.True(t, errors.Is(err, tcase.expectedErrs[1]), "expected error doesn't match")
require.ErrorIs(t, err, tcase.expectedErrs[1], "expected error doesn't match")
require.Equal(t, tcase.expectedLabels, res)
if err != nil {
@ -1578,7 +1578,7 @@ func TestMergeGenericQuerierWithSecondaries_ErrorHandling(t *testing.T) {
t.Run("LabelValues", func(t *testing.T) {
res, w, err := q.LabelValues(ctx, "test")
require.Subset(t, tcase.expectedWarnings, w)
require.True(t, errors.Is(err, tcase.expectedErrs[2]), "expected error doesn't match")
require.ErrorIs(t, err, tcase.expectedErrs[2], "expected error doesn't match")
require.Equal(t, tcase.expectedLabels, res)
if err != nil {
@ -1594,7 +1594,7 @@ func TestMergeGenericQuerierWithSecondaries_ErrorHandling(t *testing.T) {
matcher := labels.MustNewMatcher(labels.MatchEqual, "otherLabel", "someValue")
res, w, err := q.LabelValues(ctx, "test2", matcher)
require.Subset(t, tcase.expectedWarnings, w)
require.True(t, errors.Is(err, tcase.expectedErrs[3]), "expected error doesn't match")
require.ErrorIs(t, err, tcase.expectedErrs[3], "expected error doesn't match")
require.Equal(t, tcase.expectedLabels, res)
if err != nil {

View file

@ -100,7 +100,7 @@ func (ad *AzureAdTestSuite) TestAzureAdRoundTripper() {
ad.mockCredential.On("GetToken", mock.Anything, mock.Anything).Return(*testToken, nil)
tokenProvider, err := newTokenProvider(c.cfg, ad.mockCredential)
ad.Assert().NoError(err)
ad.Require().NoError(err)
rt := &azureADRoundTripper{
next: promhttp.RoundTripperFunc(func(req *http.Request) (*http.Response, error) {
@ -113,15 +113,15 @@ func (ad *AzureAdTestSuite) TestAzureAdRoundTripper() {
cli := &http.Client{Transport: rt}
req, err := http.NewRequest(http.MethodPost, "https://example.com", strings.NewReader("Hello, world!"))
ad.Assert().NoError(err)
ad.Require().NoError(err)
_, err = cli.Do(req)
ad.Assert().NoError(err)
ad.Assert().NotNil(gotReq)
ad.Require().NoError(err)
ad.NotNil(gotReq)
origReq := gotReq
ad.Assert().NotEmpty(origReq.Header.Get("Authorization"))
ad.Assert().Equal("Bearer "+testTokenString, origReq.Header.Get("Authorization"))
ad.NotEmpty(origReq.Header.Get("Authorization"))
ad.Equal("Bearer "+testTokenString, origReq.Header.Get("Authorization"))
}
}
@ -258,9 +258,9 @@ func (s *TokenProviderTestSuite) TestNewTokenProvider() {
if c.err != "" {
actualTokenProvider, actualErr := newTokenProvider(c.cfg, s.mockCredential)
s.Assert().Nil(actualTokenProvider)
s.Assert().NotNil(actualErr)
s.Assert().ErrorContains(actualErr, c.err)
s.Nil(actualTokenProvider)
s.Require().Error(actualErr)
s.Require().ErrorContains(actualErr, c.err)
} else {
testToken := &azcore.AccessToken{
Token: testTokenString,
@ -272,21 +272,21 @@ func (s *TokenProviderTestSuite) TestNewTokenProvider() {
actualTokenProvider, actualErr := newTokenProvider(c.cfg, s.mockCredential)
s.Assert().NotNil(actualTokenProvider)
s.Assert().Nil(actualErr)
s.Assert().NotNil(actualTokenProvider.getAccessToken(context.Background()))
s.NotNil(actualTokenProvider)
s.Require().NoError(actualErr)
s.NotNil(actualTokenProvider.getAccessToken(context.Background()))
// Token set to refresh at half of the expiry time. The test tokens are set to expiry in 5s.
// Hence, the 4 seconds wait to check if the token is refreshed.
time.Sleep(4 * time.Second)
s.Assert().NotNil(actualTokenProvider.getAccessToken(context.Background()))
s.NotNil(actualTokenProvider.getAccessToken(context.Background()))
s.mockCredential.AssertNumberOfCalls(s.T(), "GetToken", 2*mockGetTokenCallCounter)
mockGetTokenCallCounter++
accessToken, err := actualTokenProvider.getAccessToken(context.Background())
s.Assert().Nil(err)
s.Assert().NotEqual(accessToken, testTokenString)
s.Require().NoError(err)
s.NotEqual(testTokenString, accessToken)
}
}
}

View file

@ -46,7 +46,7 @@ func TestChunkedReaderCanReadFromChunkedWriter(t *testing.T) {
for _, msg := range msgs {
n, err := w.Write(msg)
require.NoError(t, err)
require.Equal(t, len(msg), n)
require.Len(t, msg, n)
}
i := 0

View file

@ -755,7 +755,7 @@ func TestStreamResponse(t *testing.T) {
maxBytesInFrame,
&sync.Pool{})
require.Nil(t, warning)
require.Nil(t, err)
require.NoError(t, err)
expectData := []*prompb.ChunkedSeries{{
Labels: lbs1,
Chunks: []prompb.Chunk{chunk, chunk},

View file

@ -32,7 +32,7 @@ func TestIntern(t *testing.T) {
interner.intern(testString)
interned, ok := interner.pool[testString]
require.Equal(t, true, ok)
require.True(t, ok)
require.Equal(t, int64(1), interned.refs.Load(), fmt.Sprintf("expected refs to be 1 but it was %d", interned.refs.Load()))
}
@ -43,13 +43,13 @@ func TestIntern_MultiRef(t *testing.T) {
interner.intern(testString)
interned, ok := interner.pool[testString]
require.Equal(t, true, ok)
require.True(t, ok)
require.Equal(t, int64(1), interned.refs.Load(), fmt.Sprintf("expected refs to be 1 but it was %d", interned.refs.Load()))
interner.intern(testString)
interned, ok = interner.pool[testString]
require.Equal(t, true, ok)
require.True(t, ok)
require.Equal(t, int64(2), interned.refs.Load(), fmt.Sprintf("expected refs to be 2 but it was %d", interned.refs.Load()))
}
@ -60,12 +60,12 @@ func TestIntern_DeleteRef(t *testing.T) {
interner.intern(testString)
interned, ok := interner.pool[testString]
require.Equal(t, true, ok)
require.True(t, ok)
require.Equal(t, int64(1), interned.refs.Load(), fmt.Sprintf("expected refs to be 1 but it was %d", interned.refs.Load()))
interner.release(testString)
_, ok = interner.pool[testString]
require.Equal(t, false, ok)
require.False(t, ok)
}
func TestIntern_MultiRef_Concurrent(t *testing.T) {
@ -74,7 +74,7 @@ func TestIntern_MultiRef_Concurrent(t *testing.T) {
interner.intern(testString)
interned, ok := interner.pool[testString]
require.Equal(t, true, ok)
require.True(t, ok)
require.Equal(t, int64(1), interned.refs.Load(), fmt.Sprintf("expected refs to be 1 but it was %d", interned.refs.Load()))
go interner.release(testString)
@ -86,6 +86,6 @@ func TestIntern_MultiRef_Concurrent(t *testing.T) {
interner.mtx.RLock()
interned, ok = interner.pool[testString]
interner.mtx.RUnlock()
require.Equal(t, true, ok)
require.True(t, ok)
require.Equal(t, int64(1), interned.refs.Load(), fmt.Sprintf("expected refs to be 1 but it was %d", interned.refs.Load()))
}

View file

@ -94,7 +94,7 @@ func TestWatchScrapeManager_NotReady(t *testing.T) {
}
mw := NewMetadataWatcher(nil, smm, "", wt, interval, deadline)
require.Equal(t, false, mw.ready())
require.False(t, mw.ready())
mw.collect()

View file

@ -188,7 +188,7 @@ func TestMetadataDelivery(t *testing.T) {
m.AppendMetadata(context.Background(), metadata)
require.Equal(t, numMetadata, len(c.receivedMetadata))
require.Len(t, c.receivedMetadata, numMetadata)
// One more write than the rounded qoutient should be performed in order to get samples that didn't
// fit into MaxSamplesPerSend.
require.Equal(t, numMetadata/mcfg.MaxSamplesPerSend+1, c.writesReceived)
@ -318,9 +318,9 @@ func TestSeriesReset(t *testing.T) {
}
m.StoreSeries(series, i)
}
require.Equal(t, numSegments*numSeries, len(m.seriesLabels))
require.Len(t, m.seriesLabels, numSegments*numSeries)
m.SeriesReset(2)
require.Equal(t, numSegments*numSeries/2, len(m.seriesLabels))
require.Len(t, m.seriesLabels, numSegments*numSeries/2)
}
func TestReshard(t *testing.T) {
@ -1288,7 +1288,7 @@ func TestQueueManagerMetrics(t *testing.T) {
// Make sure metrics pass linting.
problems, err := client_testutil.GatherAndLint(reg)
require.NoError(t, err)
require.Equal(t, 0, len(problems), "Metric linting problems detected: %v", problems)
require.Empty(t, problems, "Metric linting problems detected: %v", problems)
// Make sure all metrics were unregistered. A failure here means you need
// unregister a metric in `queueManagerMetrics.unregister()`.

View file

@ -97,7 +97,7 @@ func TestSampledReadEndpoint(t *testing.T) {
err = proto.Unmarshal(uncompressed, &resp)
require.NoError(t, err)
require.Equal(t, 2, len(resp.Results), "Expected 2 results.")
require.Len(t, resp.Results, 2, "Expected 2 results.")
require.Equal(t, &prompb.QueryResult{
Timeseries: []*prompb.TimeSeries{
@ -191,7 +191,7 @@ func BenchmarkStreamReadEndpoint(b *testing.B) {
results = append(results, res)
}
require.Equal(b, 6, len(results), "Expected 6 results.")
require.Len(b, results, 6, "Expected 6 results.")
}
}
@ -291,7 +291,7 @@ func TestStreamReadEndpoint(t *testing.T) {
results = append(results, res)
}
require.Equal(t, 6, len(results), "Expected 6 results.")
require.Len(t, results, 6, "Expected 6 results.")
require.Equal(t, []*prompb.ChunkedReadResponse{
{

View file

@ -186,7 +186,7 @@ func TestSeriesSetFilter(t *testing.T) {
filtered := newSeriesSetFilter(FromQueryResult(true, tc.in), tc.toRemove)
act, ws, err := ToQueryResult(filtered, 1e6)
require.NoError(t, err)
require.Equal(t, 0, len(ws))
require.Empty(t, ws)
require.Equal(t, tc.expected, act)
}
}

View file

@ -44,10 +44,10 @@ func TestStorageLifecycle(t *testing.T) {
require.NoError(t, s.ApplyConfig(conf))
// make sure remote write has a queue.
require.Equal(t, 1, len(s.rws.queues))
require.Len(t, s.rws.queues, 1)
// make sure remote write has a queue.
require.Equal(t, 1, len(s.queryables))
require.Len(t, s.queryables, 1)
err := s.Close()
require.NoError(t, err)
@ -62,13 +62,13 @@ func TestUpdateRemoteReadConfigs(t *testing.T) {
GlobalConfig: config.GlobalConfig{},
}
require.NoError(t, s.ApplyConfig(conf))
require.Equal(t, 0, len(s.queryables))
require.Empty(t, s.queryables)
conf.RemoteReadConfigs = []*config.RemoteReadConfig{
baseRemoteReadConfig("http://test-storage.com"),
}
require.NoError(t, s.ApplyConfig(conf))
require.Equal(t, 1, len(s.queryables))
require.Len(t, s.queryables, 1)
err := s.Close()
require.NoError(t, err)
@ -85,14 +85,14 @@ func TestFilterExternalLabels(t *testing.T) {
},
}
require.NoError(t, s.ApplyConfig(conf))
require.Equal(t, 0, len(s.queryables))
require.Empty(t, s.queryables)
conf.RemoteReadConfigs = []*config.RemoteReadConfig{
baseRemoteReadConfig("http://test-storage.com"),
}
require.NoError(t, s.ApplyConfig(conf))
require.Equal(t, 1, len(s.queryables))
require.Len(t, s.queryables, 1)
require.Equal(t, 1, s.queryables[0].(*sampleAndChunkQueryableClient).externalLabels.Len())
err := s.Close()
@ -110,7 +110,7 @@ func TestIgnoreExternalLabels(t *testing.T) {
},
}
require.NoError(t, s.ApplyConfig(conf))
require.Equal(t, 0, len(s.queryables))
require.Empty(t, s.queryables)
conf.RemoteReadConfigs = []*config.RemoteReadConfig{
baseRemoteReadConfig("http://test-storage.com"),
@ -119,7 +119,7 @@ func TestIgnoreExternalLabels(t *testing.T) {
conf.RemoteReadConfigs[0].FilterExternalLabels = false
require.NoError(t, s.ApplyConfig(conf))
require.Equal(t, 1, len(s.queryables))
require.Len(t, s.queryables, 1)
require.Equal(t, 0, s.queryables[0].(*sampleAndChunkQueryableClient).externalLabels.Len())
err := s.Close()

View file

@ -228,7 +228,7 @@ func BenchmarkRemoteWriteOOOSamples(b *testing.B) {
recorder := httptest.NewRecorder()
handler.ServeHTTP(recorder, req)
require.Equal(b, http.StatusNoContent, recorder.Code)
require.Equal(b, db.Head().NumSeries(), uint64(1000))
require.Equal(b, uint64(1000), db.Head().NumSeries())
var bufRequests [][]byte
for i := 0; i < 100; i++ {
@ -245,7 +245,7 @@ func BenchmarkRemoteWriteOOOSamples(b *testing.B) {
recorder = httptest.NewRecorder()
handler.ServeHTTP(recorder, req)
require.Equal(b, http.StatusNoContent, recorder.Code)
require.Equal(b, db.Head().NumSeries(), uint64(1000))
require.Equal(b, uint64(1000), db.Head().NumSeries())
}
}

View file

@ -212,7 +212,7 @@ func TestWriteStorageLifecycle(t *testing.T) {
},
}
require.NoError(t, s.ApplyConfig(conf))
require.Equal(t, 1, len(s.queues))
require.Len(t, s.queues, 1)
err := s.Close()
require.NoError(t, err)
@ -233,14 +233,14 @@ func TestUpdateExternalLabels(t *testing.T) {
hash, err := toHash(conf.RemoteWriteConfigs[0])
require.NoError(t, err)
require.NoError(t, s.ApplyConfig(conf))
require.Equal(t, 1, len(s.queues))
require.Equal(t, 0, len(s.queues[hash].externalLabels))
require.Len(t, s.queues, 1)
require.Empty(t, s.queues[hash].externalLabels)
conf.GlobalConfig.ExternalLabels = externalLabels
hash, err = toHash(conf.RemoteWriteConfigs[0])
require.NoError(t, err)
require.NoError(t, s.ApplyConfig(conf))
require.Equal(t, 1, len(s.queues))
require.Len(t, s.queues, 1)
require.Equal(t, []labels.Label{{Name: "external", Value: "true"}}, s.queues[hash].externalLabels)
err = s.Close()
@ -262,10 +262,10 @@ func TestWriteStorageApplyConfigsIdempotent(t *testing.T) {
require.NoError(t, err)
require.NoError(t, s.ApplyConfig(conf))
require.Equal(t, 1, len(s.queues))
require.Len(t, s.queues, 1)
require.NoError(t, s.ApplyConfig(conf))
require.Equal(t, 1, len(s.queues))
require.Len(t, s.queues, 1)
_, hashExists := s.queues[hash]
require.True(t, hashExists, "Queue pointer should have remained the same")
@ -312,7 +312,7 @@ func TestWriteStorageApplyConfigsPartialUpdate(t *testing.T) {
}
}
require.NoError(t, s.ApplyConfig(conf))
require.Equal(t, 3, len(s.queues))
require.Len(t, s.queues, 3)
hashes := make([]string, len(conf.RemoteWriteConfigs))
queues := make([]*QueueManager, len(conf.RemoteWriteConfigs))
@ -334,7 +334,7 @@ func TestWriteStorageApplyConfigsPartialUpdate(t *testing.T) {
RemoteWriteConfigs: []*config.RemoteWriteConfig{c0, c1, c2},
}
require.NoError(t, s.ApplyConfig(conf))
require.Equal(t, 3, len(s.queues))
require.Len(t, s.queues, 3)
_, hashExists := s.queues[hashes[0]]
require.False(t, hashExists, "The queue for the first remote write configuration should have been restarted because the relabel configuration has changed.")
@ -350,7 +350,7 @@ func TestWriteStorageApplyConfigsPartialUpdate(t *testing.T) {
c1.HTTPClientConfig.BearerToken = "bar"
err := s.ApplyConfig(conf)
require.NoError(t, err)
require.Equal(t, 3, len(s.queues))
require.Len(t, s.queues, 3)
_, hashExists = s.queues[hashes[0]]
require.True(t, hashExists, "Pointer of unchanged queue should have remained the same")
@ -367,7 +367,7 @@ func TestWriteStorageApplyConfigsPartialUpdate(t *testing.T) {
RemoteWriteConfigs: []*config.RemoteWriteConfig{c1, c2},
}
require.NoError(t, s.ApplyConfig(conf))
require.Equal(t, 2, len(s.queues))
require.Len(t, s.queues, 2)
_, hashExists = s.queues[hashes[0]]
require.False(t, hashExists, "If a config is removed, the queue should be stopped and recreated.")
@ -399,9 +399,9 @@ func TestOTLPWriteHandler(t *testing.T) {
resp := recorder.Result()
require.Equal(t, http.StatusOK, resp.StatusCode)
require.Equal(t, 12, len(appendable.samples)) // 1 (counter) + 1 (gauge) + 1 (target_info) + 7 (hist_bucket) + 2 (hist_sum, hist_count)
require.Equal(t, 1, len(appendable.histograms)) // 1 (exponential histogram)
require.Equal(t, 1, len(appendable.exemplars)) // 1 (exemplar)
require.Len(t, appendable.samples, 12) // 1 (counter) + 1 (gauge) + 1 (target_info) + 7 (hist_bucket) + 2 (hist_sum, hist_count)
require.Len(t, appendable.histograms, 1) // 1 (exponential histogram)
require.Len(t, appendable.exemplars, 1) // 1 (exemplar)
}
func generateOTLPWriteRequest(t *testing.T) pmetricotlp.ExportRequest {

View file

@ -118,7 +118,7 @@ func TestChunkSeriesSetToSeriesSet(t *testing.T) {
j := 0
for iter.Next() == chunkenc.ValFloat {
ts, v := iter.At()
require.EqualValues(t, series[i].samples[j], fSample{t: ts, f: v})
require.EqualValues(t, fSample{t: ts, f: v}, series[i].samples[j])
j++
}
}

View file

@ -131,5 +131,5 @@ func TestTracerProviderShutdown(t *testing.T) {
// Check if we closed the done channel.
_, ok := <-m.done
require.Equal(t, ok, false)
require.False(t, ok)
}

View file

@ -59,14 +59,14 @@ func TestSetCompactionFailed(t *testing.T) {
blockDir := createBlock(t, tmpdir, genSeries(1, 1, 0, 1))
b, err := OpenBlock(nil, blockDir, nil)
require.NoError(t, err)
require.Equal(t, false, b.meta.Compaction.Failed)
require.False(t, b.meta.Compaction.Failed)
require.NoError(t, b.setCompactionFailed())
require.Equal(t, true, b.meta.Compaction.Failed)
require.True(t, b.meta.Compaction.Failed)
require.NoError(t, b.Close())
b, err = OpenBlock(nil, blockDir, nil)
require.NoError(t, err)
require.Equal(t, true, b.meta.Compaction.Failed)
require.True(t, b.meta.Compaction.Failed)
require.NoError(t, b.Close())
}
@ -166,7 +166,7 @@ func TestCorruptedChunk(t *testing.T) {
require.NoError(t, err)
n, err := f.Write([]byte("x"))
require.NoError(t, err)
require.Equal(t, n, 1)
require.Equal(t, 1, n)
},
iterErr: errors.New("cannot populate chunk 8 from block 00000000000000000000000000: checksum mismatch expected:cfc0526c, actual:34815eae"),
},
@ -178,7 +178,7 @@ func TestCorruptedChunk(t *testing.T) {
blockDir := createBlock(t, tmpdir, []storage.Series{series})
files, err := sequenceFiles(chunkDir(blockDir))
require.NoError(t, err)
require.Greater(t, len(files), 0, "No chunk created.")
require.NotEmpty(t, files, "No chunk created.")
f, err := os.OpenFile(files[0], os.O_RDWR, 0o666)
require.NoError(t, err)
@ -224,7 +224,7 @@ func TestLabelValuesWithMatchers(t *testing.T) {
blockDir := createBlock(t, tmpdir, seriesEntries)
files, err := sequenceFiles(chunkDir(blockDir))
require.NoError(t, err)
require.Greater(t, len(files), 0, "No chunk created.")
require.NotEmpty(t, files, "No chunk created.")
// Check open err.
block, err := OpenBlock(nil, blockDir, nil)
@ -352,16 +352,14 @@ func TestReadIndexFormatV1(t *testing.T) {
q, err := NewBlockQuerier(block, 0, 1000)
require.NoError(t, err)
require.Equal(t, query(t, q, labels.MustNewMatcher(labels.MatchEqual, "foo", "bar")),
map[string][]chunks.Sample{`{foo="bar"}`: {sample{t: 1, f: 2}}})
require.Equal(t, map[string][]chunks.Sample{`{foo="bar"}`: {sample{t: 1, f: 2}}}, query(t, q, labels.MustNewMatcher(labels.MatchEqual, "foo", "bar")))
q, err = NewBlockQuerier(block, 0, 1000)
require.NoError(t, err)
require.Equal(t, query(t, q, labels.MustNewMatcher(labels.MatchNotRegexp, "foo", "^.?$")),
map[string][]chunks.Sample{
`{foo="bar"}`: {sample{t: 1, f: 2}},
`{foo="baz"}`: {sample{t: 3, f: 4}},
})
require.Equal(t, map[string][]chunks.Sample{
`{foo="bar"}`: {sample{t: 1, f: 2}},
`{foo="baz"}`: {sample{t: 3, f: 4}},
}, query(t, q, labels.MustNewMatcher(labels.MatchNotRegexp, "foo", "^.?$")))
}
func BenchmarkLabelValuesWithMatchers(b *testing.B) {
@ -383,7 +381,7 @@ func BenchmarkLabelValuesWithMatchers(b *testing.B) {
blockDir := createBlock(b, tmpdir, seriesEntries)
files, err := sequenceFiles(chunkDir(blockDir))
require.NoError(b, err)
require.Greater(b, len(files), 0, "No chunk created.")
require.NotEmpty(b, files, "No chunk created.")
// Check open err.
block, err := OpenBlock(nil, blockDir, nil)
@ -402,7 +400,7 @@ func BenchmarkLabelValuesWithMatchers(b *testing.B) {
for benchIdx := 0; benchIdx < b.N; benchIdx++ {
actualValues, err := indexReader.LabelValues(ctx, "b_tens", matchers...)
require.NoError(b, err)
require.Equal(b, 9, len(actualValues))
require.Len(b, actualValues, 9)
}
}
@ -436,7 +434,7 @@ func TestLabelNamesWithMatchers(t *testing.T) {
blockDir := createBlock(t, tmpdir, seriesEntries)
files, err := sequenceFiles(chunkDir(blockDir))
require.NoError(t, err)
require.Greater(t, len(files), 0, "No chunk created.")
require.NotEmpty(t, files, "No chunk created.")
// Check open err.
block, err := OpenBlock(nil, blockDir, nil)

View file

@ -246,8 +246,8 @@ func TestFloatHistogramChunkBucketChanges(t *testing.T) {
// This is how span changes will be handled.
hApp, _ := app.(*FloatHistogramAppender)
posInterjections, negInterjections, ok, cr := hApp.appendable(h2.ToFloat())
require.Greater(t, len(posInterjections), 0)
require.Greater(t, len(negInterjections), 0)
require.NotEmpty(t, posInterjections)
require.NotEmpty(t, negInterjections)
require.True(t, ok) // Only new buckets came in.
require.False(t, cr)
c, app = hApp.recode(posInterjections, negInterjections, h2.PositiveSpans, h2.NegativeSpans)
@ -348,8 +348,8 @@ func TestFloatHistogramChunkAppendable(t *testing.T) {
h2.PositiveBuckets = []float64{7, 5, 1, 3, 1, 0, 2, 5, 5, 0, 1}
posInterjections, negInterjections, ok, cr := hApp.appendable(h2)
require.Greater(t, len(posInterjections), 0)
require.Equal(t, 0, len(negInterjections))
require.NotEmpty(t, posInterjections)
require.Empty(t, negInterjections)
require.True(t, ok) // Only new buckets came in.
require.False(t, cr)
@ -370,8 +370,8 @@ func TestFloatHistogramChunkAppendable(t *testing.T) {
h2.PositiveBuckets = []float64{6, 3, 2, 4, 5, 1}
posInterjections, negInterjections, ok, cr := hApp.appendable(h2)
require.Equal(t, 0, len(posInterjections))
require.Equal(t, 0, len(negInterjections))
require.Empty(t, posInterjections)
require.Empty(t, negInterjections)
require.False(t, ok) // Need to cut a new chunk.
require.True(t, cr)
@ -385,8 +385,8 @@ func TestFloatHistogramChunkAppendable(t *testing.T) {
h2.PositiveBuckets = []float64{6, 2, 3, 2, 4, 5, 1}
posInterjections, negInterjections, ok, cr := hApp.appendable(h2)
require.Equal(t, 0, len(posInterjections))
require.Equal(t, 0, len(negInterjections))
require.Empty(t, posInterjections)
require.Empty(t, negInterjections)
require.False(t, ok) // Need to cut a new chunk.
require.True(t, cr)
@ -406,8 +406,8 @@ func TestFloatHistogramChunkAppendable(t *testing.T) {
h2.PositiveBuckets = []float64{7, 5, 1, 3, 1, 0, 2, 5, 5, 0, 0}
posInterjections, negInterjections, ok, cr := hApp.appendable(h2)
require.Equal(t, 0, len(posInterjections))
require.Equal(t, 0, len(negInterjections))
require.Empty(t, posInterjections)
require.Empty(t, negInterjections)
require.False(t, ok) // Need to cut a new chunk.
require.True(t, cr)
@ -433,8 +433,8 @@ func TestFloatHistogramChunkAppendable(t *testing.T) {
h2.PositiveBuckets = []float64{1, 2, 5, 3, 3, 2, 4, 5, 1}
posInterjections, negInterjections, ok, cr := hApp.appendable(h2)
require.Equal(t, 0, len(posInterjections))
require.Equal(t, 0, len(negInterjections))
require.Empty(t, posInterjections)
require.Empty(t, negInterjections)
require.False(t, ok) // Need to cut a new chunk.
require.True(t, cr)
@ -810,10 +810,10 @@ func TestFloatHistogramChunkAppendableGauge(t *testing.T) {
h2.PositiveBuckets = []float64{7, 5, 1, 3, 1, 0, 2, 5, 5, 0, 1}
pI, nI, pBackwardI, nBackwardI, _, _, ok := hApp.appendableGauge(h2)
require.Greater(t, len(pI), 0)
require.Len(t, nI, 0)
require.Len(t, pBackwardI, 0)
require.Len(t, nBackwardI, 0)
require.NotEmpty(t, pI)
require.Empty(t, nI)
require.Empty(t, pBackwardI)
require.Empty(t, nBackwardI)
require.True(t, ok)
assertRecodedFloatHistogramChunkOnAppend(t, c, hApp, ts+1, h2, GaugeType)
@ -834,10 +834,10 @@ func TestFloatHistogramChunkAppendableGauge(t *testing.T) {
h2.PositiveBuckets = []float64{6, 3, 3, 2, 5, 1}
pI, nI, pBackwardI, nBackwardI, _, _, ok := hApp.appendableGauge(h2)
require.Len(t, pI, 0)
require.Len(t, nI, 0)
require.Greater(t, len(pBackwardI), 0)
require.Len(t, nBackwardI, 0)
require.Empty(t, pI)
require.Empty(t, nI)
require.NotEmpty(t, pBackwardI)
require.Empty(t, nBackwardI)
require.True(t, ok)
assertNoNewFloatHistogramChunkOnAppend(t, c, hApp, ts+1, h2, GaugeType)
@ -856,10 +856,10 @@ func TestFloatHistogramChunkAppendableGauge(t *testing.T) {
h2.PositiveBuckets = []float64{6, 3, 2, 4, 5, 1}
pI, nI, pBackwardI, nBackwardI, _, _, ok := hApp.appendableGauge(h2)
require.Greater(t, len(pI), 0)
require.Greater(t, len(pBackwardI), 0)
require.Len(t, nI, 0)
require.Len(t, nBackwardI, 0)
require.NotEmpty(t, pI)
require.NotEmpty(t, pBackwardI)
require.Empty(t, nI)
require.Empty(t, nBackwardI)
require.True(t, ok)
assertRecodedFloatHistogramChunkOnAppend(t, c, hApp, ts+1, h2, GaugeType)
@ -872,10 +872,10 @@ func TestFloatHistogramChunkAppendableGauge(t *testing.T) {
h2.PositiveBuckets = []float64{6, 2, 3, 2, 4, 5, 1}
pI, nI, pBackwardI, nBackwardI, _, _, ok := hApp.appendableGauge(h2)
require.Len(t, pI, 0)
require.Len(t, nI, 0)
require.Len(t, pBackwardI, 0)
require.Len(t, nBackwardI, 0)
require.Empty(t, pI)
require.Empty(t, nI)
require.Empty(t, pBackwardI)
require.Empty(t, nBackwardI)
require.True(t, ok)
assertNoNewFloatHistogramChunkOnAppend(t, c, hApp, ts+1, h2, GaugeType)
@ -894,10 +894,10 @@ func TestFloatHistogramChunkAppendableGauge(t *testing.T) {
h2.PositiveBuckets = []float64{7, 5, 1, 3, 1, 0, 2, 5, 5, 0, 0}
pI, nI, pBackwardI, nBackwardI, _, _, ok := hApp.appendableGauge(h2)
require.Greater(t, len(pI), 0)
require.Len(t, nI, 0)
require.Len(t, pBackwardI, 0)
require.Len(t, nBackwardI, 0)
require.NotEmpty(t, pI)
require.Empty(t, nI)
require.Empty(t, pBackwardI)
require.Empty(t, nBackwardI)
require.True(t, ok)
assertRecodedFloatHistogramChunkOnAppend(t, c, hApp, ts+1, h2, GaugeType)
@ -920,10 +920,10 @@ func TestFloatHistogramChunkAppendableGauge(t *testing.T) {
h2.PositiveBuckets = []float64{1, 2, 5, 3, 3, 2, 4, 5, 1}
pI, nI, pBackwardI, nBackwardI, _, _, ok := hApp.appendableGauge(h2)
require.Greater(t, len(pI), 0)
require.Len(t, nI, 0)
require.Len(t, pBackwardI, 0)
require.Len(t, nBackwardI, 0)
require.NotEmpty(t, pI)
require.Empty(t, nI)
require.Empty(t, pBackwardI)
require.Empty(t, nBackwardI)
require.True(t, ok)
assertRecodedFloatHistogramChunkOnAppend(t, c, hApp, ts+1, h2, GaugeType)

View file

@ -257,8 +257,8 @@ func TestHistogramChunkBucketChanges(t *testing.T) {
// This is how span changes will be handled.
hApp, _ := app.(*HistogramAppender)
posInterjections, negInterjections, ok, cr := hApp.appendable(h2)
require.Greater(t, len(posInterjections), 0)
require.Greater(t, len(negInterjections), 0)
require.NotEmpty(t, posInterjections)
require.NotEmpty(t, negInterjections)
require.True(t, ok) // Only new buckets came in.
require.False(t, cr)
c, app = hApp.recode(posInterjections, negInterjections, h2.PositiveSpans, h2.NegativeSpans)
@ -365,8 +365,8 @@ func TestHistogramChunkAppendable(t *testing.T) {
h2.PositiveBuckets = []int64{7, -2, -4, 2, -2, -1, 2, 3, 0, -5, 1} // 7 5 1 3 1 0 2 5 5 0 1 (total 30)
posInterjections, negInterjections, ok, cr := hApp.appendable(h2)
require.Greater(t, len(posInterjections), 0)
require.Equal(t, 0, len(negInterjections))
require.NotEmpty(t, posInterjections)
require.Empty(t, negInterjections)
require.True(t, ok) // Only new buckets came in.
require.False(t, cr)
@ -386,8 +386,8 @@ func TestHistogramChunkAppendable(t *testing.T) {
h2.PositiveBuckets = []int64{6, -3, -1, 2, 1, -4} // counts: 6, 3, 2, 4, 5, 1 (total 21)
posInterjections, negInterjections, ok, cr := hApp.appendable(h2)
require.Equal(t, 0, len(posInterjections))
require.Equal(t, 0, len(negInterjections))
require.Empty(t, posInterjections)
require.Empty(t, negInterjections)
require.False(t, ok) // Need to cut a new chunk.
require.True(t, cr)
@ -401,8 +401,8 @@ func TestHistogramChunkAppendable(t *testing.T) {
h2.PositiveBuckets = []int64{6, -4, 1, -1, 2, 1, -4} // counts: 6, 2, 3, 2, 4, 5, 1 (total 23)
posInterjections, negInterjections, ok, cr := hApp.appendable(h2)
require.Equal(t, 0, len(posInterjections))
require.Equal(t, 0, len(negInterjections))
require.Empty(t, posInterjections)
require.Empty(t, negInterjections)
require.False(t, ok) // Need to cut a new chunk.
require.True(t, cr)
@ -425,8 +425,8 @@ func TestHistogramChunkAppendable(t *testing.T) {
h2.PositiveBuckets = []int64{7, -2, -4, 2, -2, -1, 2, 3, 0, -5, 0} // 7 5 1 3 1 0 2 5 5 0 0 (total 29)
posInterjections, negInterjections, ok, cr := hApp.appendable(h2)
require.Equal(t, 0, len(posInterjections))
require.Equal(t, 0, len(negInterjections))
require.Empty(t, posInterjections)
require.Empty(t, negInterjections)
require.False(t, ok) // Need to cut a new chunk.
require.True(t, cr)
@ -455,8 +455,8 @@ func TestHistogramChunkAppendable(t *testing.T) {
h2.PositiveBuckets = []int64{1, 1, 3, -2, 0, -1, 2, 1, -4} // counts: 1, 2, 5, 3, 3, 2, 4, 5, 1 (total 26)
posInterjections, negInterjections, ok, cr := hApp.appendable(h2)
require.Equal(t, 0, len(posInterjections))
require.Equal(t, 0, len(negInterjections))
require.Empty(t, posInterjections)
require.Empty(t, negInterjections)
require.False(t, ok) // Need to cut a new chunk.
require.True(t, cr)
@ -987,10 +987,10 @@ func TestHistogramChunkAppendableGauge(t *testing.T) {
h2.PositiveBuckets = []int64{7, -2, -4, 2, -2, -1, 2, 3, 0, -5, 1} // {7, 5, 1, 3, 1, 0, 2, 5, 5, 0, 1}
pI, nI, pBackwardI, nBackwardI, _, _, ok := hApp.appendableGauge(h2)
require.Greater(t, len(pI), 0)
require.Len(t, nI, 0)
require.Len(t, pBackwardI, 0)
require.Len(t, nBackwardI, 0)
require.NotEmpty(t, pI)
require.Empty(t, nI)
require.Empty(t, pBackwardI)
require.Empty(t, nBackwardI)
require.True(t, ok)
newc, recoded, _, err := hApp.AppendHistogram(nil, ts+1, h2, false)
@ -1015,10 +1015,10 @@ func TestHistogramChunkAppendableGauge(t *testing.T) {
h2.PositiveBuckets = []int64{6, -3, 0, -1, 3, -4} // {6, 3, 3, 2, 5, 1}
pI, nI, pBackwardI, nBackwardI, _, _, ok := hApp.appendableGauge(h2)
require.Len(t, pI, 0)
require.Len(t, nI, 0)
require.Greater(t, len(pBackwardI), 0)
require.Len(t, nBackwardI, 0)
require.Empty(t, pI)
require.Empty(t, nI)
require.NotEmpty(t, pBackwardI)
require.Empty(t, nBackwardI)
require.True(t, ok)
newc, recoded, _, err := hApp.AppendHistogram(nil, ts+1, h2, false)
@ -1041,10 +1041,10 @@ func TestHistogramChunkAppendableGauge(t *testing.T) {
h2.PositiveBuckets = []int64{6, -3, -1, 2, 1, -4} // {6, 3, 2, 4, 5, 1}
pI, nI, pBackwardI, nBackwardI, _, _, ok := hApp.appendableGauge(h2)
require.Greater(t, len(pI), 0)
require.Greater(t, len(pBackwardI), 0)
require.Len(t, nI, 0)
require.Len(t, nBackwardI, 0)
require.NotEmpty(t, pI)
require.NotEmpty(t, pBackwardI)
require.Empty(t, nI)
require.Empty(t, nBackwardI)
require.True(t, ok)
newc, recoded, _, err := hApp.AppendHistogram(nil, ts+1, h2, false)
@ -1061,10 +1061,10 @@ func TestHistogramChunkAppendableGauge(t *testing.T) {
h2.PositiveBuckets = []int64{6, -4, 1, -1, 2, 1, -4} // {6, 2, 3, 2, 4, 5, 1}
pI, nI, pBackwardI, nBackwardI, _, _, ok := hApp.appendableGauge(h2)
require.Len(t, pI, 0)
require.Len(t, nI, 0)
require.Len(t, pBackwardI, 0)
require.Len(t, nBackwardI, 0)
require.Empty(t, pI)
require.Empty(t, nI)
require.Empty(t, pBackwardI)
require.Empty(t, nBackwardI)
require.True(t, ok)
newc, recoded, _, err := hApp.AppendHistogram(nil, ts+1, h2, false)
@ -1087,10 +1087,10 @@ func TestHistogramChunkAppendableGauge(t *testing.T) {
h2.PositiveBuckets = []int64{7, -2, -4, 2, -2, -1, 2, 3, 0, -5, 0} // {7, 5, 1, 3, 1, 0, 2, 5, 5, 0, 0}
pI, nI, pBackwardI, nBackwardI, _, _, ok := hApp.appendableGauge(h2)
require.Greater(t, len(pI), 0)
require.Len(t, nI, 0)
require.Len(t, pBackwardI, 0)
require.Len(t, nBackwardI, 0)
require.NotEmpty(t, pI)
require.Empty(t, nI)
require.Empty(t, pBackwardI)
require.Empty(t, nBackwardI)
require.True(t, ok)
newc, recoded, _, err := hApp.AppendHistogram(nil, ts+1, h2, false)
@ -1117,10 +1117,10 @@ func TestHistogramChunkAppendableGauge(t *testing.T) {
h2.PositiveBuckets = []int64{1, 1, 3, -2, 0, -1, 2, 1, -4} // {1, 2, 5, 3, 3, 2, 4, 5, 1}
pI, nI, pBackwardI, nBackwardI, _, _, ok := hApp.appendableGauge(h2)
require.Greater(t, len(pI), 0)
require.Len(t, nI, 0)
require.Len(t, pBackwardI, 0)
require.Len(t, nBackwardI, 0)
require.NotEmpty(t, pI)
require.Empty(t, nI)
require.Empty(t, pBackwardI)
require.Empty(t, nBackwardI)
require.True(t, ok)
newc, recoded, _, err := hApp.AppendHistogram(nil, ts+1, h2, false)

View file

@ -128,7 +128,7 @@ func TestChunkDiskMapper_WriteChunk_Chunk_IterateChunks(t *testing.T) {
}
// Checking on-disk bytes for the first file.
require.Equal(t, 3, len(hrw.mmappedChunkFiles), "expected 3 mmapped files, got %d", len(hrw.mmappedChunkFiles))
require.Len(t, hrw.mmappedChunkFiles, 3, "expected 3 mmapped files, got %d", len(hrw.mmappedChunkFiles))
require.Equal(t, len(hrw.mmappedChunkFiles), len(hrw.closers))
actualBytes, err := os.ReadFile(firstFileName)
@ -173,7 +173,7 @@ func TestChunkDiskMapper_WriteChunk_Chunk_IterateChunks(t *testing.T) {
idx++
return nil
}))
require.Equal(t, len(expectedData), idx)
require.Len(t, expectedData, idx)
}
// TestChunkDiskMapper_Truncate tests
@ -214,7 +214,7 @@ func TestChunkDiskMapper_Truncate(t *testing.T) {
for _, i := range remainingFiles {
_, ok := hrw.mmappedChunkFiles[i]
require.Equal(t, true, ok)
require.True(t, ok)
}
}
@ -471,7 +471,7 @@ func TestHeadReadWriter_ReadRepairOnEmptyLastFile(t *testing.T) {
nonEmptyFile() // 2.
nonEmptyFile() // 3.
require.Equal(t, 3, len(hrw.mmappedChunkFiles))
require.Len(t, hrw.mmappedChunkFiles, 3)
lastFile := 0
for idx := range hrw.mmappedChunkFiles {
if idx > lastFile {
@ -500,7 +500,7 @@ func TestHeadReadWriter_ReadRepairOnEmptyLastFile(t *testing.T) {
hrw = createChunkDiskMapper(t, dir)
// Removed from memory.
require.Equal(t, 3, len(hrw.mmappedChunkFiles))
require.Len(t, hrw.mmappedChunkFiles, 3)
for idx := range hrw.mmappedChunkFiles {
require.LessOrEqual(t, idx, lastFile, "file index is bigger than previous last file")
}
@ -508,7 +508,7 @@ func TestHeadReadWriter_ReadRepairOnEmptyLastFile(t *testing.T) {
// Removed even from disk.
files, err := os.ReadDir(dir)
require.NoError(t, err)
require.Equal(t, 3, len(files))
require.Len(t, files, 3)
for _, fi := range files {
seq, err := strconv.ParseUint(fi.Name(), 10, 64)
require.NoError(t, err)

View file

@ -29,33 +29,33 @@ func (q *writeJobQueue) assertInvariants(t *testing.T) {
totalSize := 0
for s := q.first; s != nil; s = s.nextSegment {
require.True(t, s.segment != nil)
require.NotNil(t, s.segment)
// Next read index is lower or equal than next write index (we cannot past written jobs)
require.True(t, s.nextRead <= s.nextWrite)
require.LessOrEqual(t, s.nextRead, s.nextWrite)
// Number of unread elements in this segment.
totalSize += s.nextWrite - s.nextRead
// First segment can be partially read, other segments were not read yet.
if s == q.first {
require.True(t, s.nextRead >= 0)
require.GreaterOrEqual(t, s.nextRead, 0)
} else {
require.True(t, s.nextRead == 0)
require.Equal(t, 0, s.nextRead)
}
// If first shard is empty (everything was read from it already), it must have extra capacity for
// additional elements, otherwise it would have been removed.
if s == q.first && s.nextRead == s.nextWrite {
require.True(t, s.nextWrite < len(s.segment))
require.Less(t, s.nextWrite, len(s.segment))
}
// Segments in the middle are full.
if s != q.first && s != q.last {
require.True(t, s.nextWrite == len(s.segment))
require.Len(t, s.segment, s.nextWrite)
}
// Last segment must have at least one element, or we wouldn't have created it.
require.True(t, s.nextWrite > 0)
require.Greater(t, s.nextWrite, 0)
}
require.Equal(t, q.size, totalSize)
@ -307,7 +307,7 @@ func TestQueuePushPopManyGoroutines(t *testing.T) {
readersWG.Wait()
// Check if we have all expected values
require.Equal(t, writeGoroutines*writes, len(refs))
require.Len(t, refs, writeGoroutines*writes)
}
func TestQueueSegmentIsKeptEvenIfEmpty(t *testing.T) {

View file

@ -1222,7 +1222,7 @@ func TestDisableAutoCompactions(t *testing.T) {
}
require.Greater(t, prom_testutil.ToFloat64(db.metrics.compactionsSkipped), 0.0, "No compaction was skipped after the set timeout.")
require.Equal(t, 0, len(db.blocks))
require.Empty(t, db.blocks)
// Enable the compaction, trigger it and check that the block is persisted.
db.EnableCompactions()
@ -1236,7 +1236,7 @@ func TestDisableAutoCompactions(t *testing.T) {
}
time.Sleep(100 * time.Millisecond)
}
require.Greater(t, len(db.Blocks()), 0, "No block was persisted after the set timeout.")
require.NotEmpty(t, db.Blocks(), "No block was persisted after the set timeout.")
}
// TestCancelCompactions ensures that when the db is closed
@ -1259,7 +1259,7 @@ func TestCancelCompactions(t *testing.T) {
{
db, err := open(tmpdir, log.NewNopLogger(), nil, DefaultOptions(), []int64{1, 2000}, nil)
require.NoError(t, err)
require.Equal(t, 3, len(db.Blocks()), "initial block count mismatch")
require.Len(t, db.Blocks(), 3, "initial block count mismatch")
require.Equal(t, 0.0, prom_testutil.ToFloat64(db.compactor.(*LeveledCompactor).metrics.Ran), "initial compaction counter mismatch")
db.compactc <- struct{}{} // Trigger a compaction.
for prom_testutil.ToFloat64(db.compactor.(*LeveledCompactor).metrics.PopulatingBlocks) <= 0 {
@ -1278,7 +1278,7 @@ func TestCancelCompactions(t *testing.T) {
{
db, err := open(tmpdirCopy, log.NewNopLogger(), nil, DefaultOptions(), []int64{1, 2000}, nil)
require.NoError(t, err)
require.Equal(t, 3, len(db.Blocks()), "initial block count mismatch")
require.Len(t, db.Blocks(), 3, "initial block count mismatch")
require.Equal(t, 0.0, prom_testutil.ToFloat64(db.compactor.(*LeveledCompactor).metrics.Ran), "initial compaction counter mismatch")
db.compactc <- struct{}{} // Trigger a compaction.
@ -1291,7 +1291,7 @@ func TestCancelCompactions(t *testing.T) {
actT := time.Since(start)
expT := timeCompactionUninterrupted / 2 // Closing the db in the middle of compaction should less than half the time.
require.True(t, actT < expT, "closing the db took more than expected. exp: <%v, act: %v", expT, actT)
require.Less(t, actT, expT, "closing the db took more than expected. exp: <%v, act: %v", expT, actT)
// Make sure that no blocks were marked as compaction failed.
// This checks that the `context.Canceled` error is properly checked at all levels:

View file

@ -127,7 +127,7 @@ func query(t testing.TB, q storage.Querier, matchers ...*labels.Matcher) map[str
result[name] = samples
}
require.NoError(t, ss.Err())
require.Equal(t, 0, len(ss.Warnings()))
require.Empty(t, ss.Warnings())
return result
}
@ -177,7 +177,7 @@ func queryChunks(t testing.TB, q storage.ChunkQuerier, matchers ...*labels.Match
result[name] = chks
}
require.NoError(t, ss.Err())
require.Equal(t, 0, len(ss.Warnings()))
require.Empty(t, ss.Warnings())
return result
}
@ -200,7 +200,7 @@ func TestDB_reloadOrder(t *testing.T) {
require.NoError(t, db.reloadBlocks())
blocks := db.Blocks()
require.Equal(t, 3, len(blocks))
require.Len(t, blocks, 3)
require.Equal(t, metas[1].MinTime, blocks[0].Meta().MinTime)
require.Equal(t, metas[1].MaxTime, blocks[0].Meta().MaxTime)
require.Equal(t, metas[0].MinTime, blocks[1].Meta().MinTime)
@ -471,7 +471,7 @@ Outer:
require.Equal(t, eok, rok)
if !eok {
require.Equal(t, 0, len(res.Warnings()))
require.Empty(t, res.Warnings())
continue Outer
}
sexp := expss.At()
@ -679,7 +679,7 @@ func TestDB_Snapshot(t *testing.T) {
require.NoError(t, series.Err())
}
require.NoError(t, seriesSet.Err())
require.Equal(t, 0, len(seriesSet.Warnings()))
require.Empty(t, seriesSet.Warnings())
require.Equal(t, 1000.0, sum)
}
@ -728,7 +728,7 @@ func TestDB_Snapshot_ChunksOutsideOfCompactedRange(t *testing.T) {
require.NoError(t, series.Err())
}
require.NoError(t, seriesSet.Err())
require.Equal(t, 0, len(seriesSet.Warnings()))
require.Empty(t, seriesSet.Warnings())
// Since we snapshotted with MaxTime - 10, so expect 10 less samples.
require.Equal(t, 1000.0-10, sum)
@ -804,7 +804,7 @@ Outer:
require.Equal(t, eok, rok)
if !eok {
require.Equal(t, 0, len(res.Warnings()))
require.Empty(t, res.Warnings())
continue Outer
}
sexp := expss.At()
@ -972,7 +972,7 @@ func TestDB_e2e(t *testing.T) {
}
require.NoError(t, ss.Err())
require.Equal(t, 0, len(ss.Warnings()))
require.Empty(t, ss.Warnings())
require.Equal(t, expected, result)
q.Close()
@ -1004,7 +1004,7 @@ func TestWALFlushedOnDBClose(t *testing.T) {
values, ws, err := q.LabelValues(ctx, "labelname")
require.NoError(t, err)
require.Equal(t, 0, len(ws))
require.Empty(t, ws)
require.Equal(t, []string{"labelvalue"}, values)
}
@ -1041,7 +1041,7 @@ func TestWALSegmentSizeOptions(t *testing.T) {
files = append(files, fi)
}
}
require.Greater(t, len(files), 1, "current WALSegmentSize should result in more than a single WAL file.")
require.NotEmpty(t, files, "current WALSegmentSize should result in more than a single WAL file.")
// All the full segment files (all but the last) should match the segment size option.
for _, f := range files[:len(files)-1] {
require.Equal(t, int64(segmentSize), f.Size(), "WAL file size doesn't match WALSegmentSize option, filename: %v", f.Name())
@ -1254,7 +1254,7 @@ func TestTombstoneClean(t *testing.T) {
require.Equal(t, errExp, errRes)
require.Equal(t, smplExp, smplRes)
}
require.Equal(t, 0, len(res.Warnings()))
require.Empty(t, res.Warnings())
for _, b := range db.Blocks() {
require.Equal(t, tombstones.NewMemTombstones(), b.tombstones)
@ -1302,7 +1302,7 @@ func TestTombstoneCleanResultEmptyBlock(t *testing.T) {
// After cleaning tombstones that covers the entire block, no blocks should be left behind.
actualBlockDirs, err := blockDirs(db.dir)
require.NoError(t, err)
require.Equal(t, 0, len(actualBlockDirs))
require.Empty(t, actualBlockDirs)
}
// TestTombstoneCleanFail tests that a failing TombstoneClean doesn't leave any blocks behind.
@ -1348,7 +1348,7 @@ func TestTombstoneCleanFail(t *testing.T) {
require.NoError(t, err)
// Only one block should have been replaced by a new block.
require.Equal(t, len(oldBlockDirs), len(actualBlockDirs))
require.Equal(t, len(intersection(oldBlockDirs, actualBlockDirs)), len(actualBlockDirs)-1)
require.Len(t, intersection(oldBlockDirs, actualBlockDirs), len(actualBlockDirs)-1)
}
// TestTombstoneCleanRetentionLimitsRace tests that a CleanTombstones operation
@ -1640,9 +1640,9 @@ func TestSizeRetention(t *testing.T) {
require.NoError(t, err)
require.Equal(t, 1, actRetentionCount, "metric retention count mismatch")
require.Equal(t, actSize, expSize, "metric db size doesn't match actual disk size")
require.Equal(t, expSize, actSize, "metric db size doesn't match actual disk size")
require.LessOrEqual(t, expSize, sizeLimit, "actual size (%v) is expected to be less than or equal to limit (%v)", expSize, sizeLimit)
require.Equal(t, len(blocks)-1, len(actBlocks), "new block count should be decreased from:%v to:%v", len(blocks), len(blocks)-1)
require.Len(t, actBlocks, len(blocks)-1, "new block count should be decreased from:%v to:%v", len(blocks), len(blocks)-1)
require.Equal(t, expBlocks[0].MaxTime, actBlocks[0].meta.MaxTime, "maxT mismatch of the first block")
require.Equal(t, expBlocks[len(expBlocks)-1].MaxTime, actBlocks[len(actBlocks)-1].meta.MaxTime, "maxT mismatch of the last block")
}
@ -1666,7 +1666,7 @@ func TestSizeRetentionMetric(t *testing.T) {
}()
actMaxBytes := int64(prom_testutil.ToFloat64(db.metrics.maxBytes))
require.Equal(t, actMaxBytes, c.expMaxBytes, "metric retention limit bytes mismatch")
require.Equal(t, c.expMaxBytes, actMaxBytes, "metric retention limit bytes mismatch")
}
}
@ -1739,7 +1739,7 @@ func TestNotMatcherSelectsLabelsUnsetSeries(t *testing.T) {
ss := q.Select(ctx, false, nil, c.selector...)
lres, _, ws, err := expandSeriesSet(ss)
require.NoError(t, err)
require.Equal(t, 0, len(ws))
require.Empty(t, ws)
require.Equal(t, c.series, lres)
}
}
@ -1772,7 +1772,7 @@ func TestOverlappingBlocksDetectsAllOverlaps(t *testing.T) {
metas[i] = BlockMeta{MinTime: int64(i * 10), MaxTime: int64((i + 1) * 10)}
}
require.Equal(t, 0, len(OverlappingBlocks(metas)), "we found unexpected overlaps")
require.Empty(t, OverlappingBlocks(metas), "we found unexpected overlaps")
// Add overlapping blocks. We've to establish order again since we aren't interested
// in trivial overlaps caused by unorderedness.
@ -2071,7 +2071,7 @@ func TestNoEmptyBlocks(t *testing.T) {
actBlocks, err := blockDirs(db.Dir())
require.NoError(t, err)
require.Equal(t, len(db.Blocks()), len(actBlocks))
require.Equal(t, 0, len(actBlocks))
require.Empty(t, actBlocks)
require.Equal(t, 0, int(prom_testutil.ToFloat64(db.compactor.(*LeveledCompactor).metrics.Ran)), "no compaction should be triggered here")
})
@ -2091,7 +2091,7 @@ func TestNoEmptyBlocks(t *testing.T) {
actBlocks, err := blockDirs(db.Dir())
require.NoError(t, err)
require.Equal(t, len(db.Blocks()), len(actBlocks))
require.Equal(t, 0, len(actBlocks))
require.Empty(t, actBlocks)
app = db.Appender(ctx)
_, err = app.Append(0, defaultLabel, 1, 0)
@ -2112,7 +2112,7 @@ func TestNoEmptyBlocks(t *testing.T) {
actBlocks, err = blockDirs(db.Dir())
require.NoError(t, err)
require.Equal(t, len(db.Blocks()), len(actBlocks))
require.Equal(t, 1, len(actBlocks), "No blocks created when compacting with >0 samples")
require.Len(t, actBlocks, 1, "No blocks created when compacting with >0 samples")
})
t.Run(`When no new block is created from head, and there are some blocks on disk
@ -2144,8 +2144,8 @@ func TestNoEmptyBlocks(t *testing.T) {
}
oldBlocks := db.Blocks()
require.NoError(t, db.reloadBlocks()) // Reload the db to register the new blocks.
require.Equal(t, len(blocks)+len(oldBlocks), len(db.Blocks())) // Ensure all blocks are registered.
require.NoError(t, db.reloadBlocks()) // Reload the db to register the new blocks.
require.Len(t, db.Blocks(), len(blocks)+len(oldBlocks)) // Ensure all blocks are registered.
require.NoError(t, db.Delete(ctx, math.MinInt64, math.MaxInt64, defaultMatcher))
require.NoError(t, db.Compact(ctx))
require.Equal(t, 5, int(prom_testutil.ToFloat64(db.compactor.(*LeveledCompactor).metrics.Ran)), "compaction should have been triggered here once for each block that have tombstones")
@ -2153,7 +2153,7 @@ func TestNoEmptyBlocks(t *testing.T) {
actBlocks, err := blockDirs(db.Dir())
require.NoError(t, err)
require.Equal(t, len(db.Blocks()), len(actBlocks))
require.Equal(t, 1, len(actBlocks), "All samples are deleted. Only the most recent block should remain after compaction.")
require.Len(t, actBlocks, 1, "All samples are deleted. Only the most recent block should remain after compaction.")
})
}
@ -2254,7 +2254,7 @@ func TestDB_LabelNames(t *testing.T) {
var ws annotations.Annotations
labelNames, ws, err = q.LabelNames(ctx)
require.NoError(t, err)
require.Equal(t, 0, len(ws))
require.Empty(t, ws)
require.NoError(t, q.Close())
require.Equal(t, tst.exp2, labelNames)
}
@ -2283,7 +2283,7 @@ func TestCorrectNumTombstones(t *testing.T) {
err := db.Compact(ctx)
require.NoError(t, err)
require.Equal(t, 1, len(db.blocks))
require.Len(t, db.blocks, 1)
require.NoError(t, db.Delete(ctx, 0, 1, defaultMatcher))
require.Equal(t, uint64(1), db.blocks[0].meta.Stats.NumTombstones)
@ -2346,7 +2346,7 @@ func TestBlockRanges(t *testing.T) {
}
time.Sleep(100 * time.Millisecond)
}
require.Equal(t, 2, len(db.Blocks()), "no new block created after the set timeout")
require.Len(t, db.Blocks(), 2, "no new block created after the set timeout")
if db.Blocks()[0].Meta().MaxTime > db.Blocks()[1].Meta().MinTime {
t.Fatalf("new block overlaps old:%v,new:%v", db.Blocks()[0].Meta(), db.Blocks()[1].Meta())
@ -2374,7 +2374,7 @@ func TestBlockRanges(t *testing.T) {
require.NoError(t, err)
defer db.Close()
require.Equal(t, 3, len(db.Blocks()), "db doesn't include expected number of blocks")
require.Len(t, db.Blocks(), 3, "db doesn't include expected number of blocks")
require.Equal(t, db.Blocks()[2].Meta().MaxTime, thirdBlockMaxt, "unexpected maxt of the last block")
app = db.Appender(ctx)
@ -2388,7 +2388,7 @@ func TestBlockRanges(t *testing.T) {
time.Sleep(100 * time.Millisecond)
}
require.Equal(t, 4, len(db.Blocks()), "no new block created after the set timeout")
require.Len(t, db.Blocks(), 4, "no new block created after the set timeout")
if db.Blocks()[2].Meta().MaxTime > db.Blocks()[3].Meta().MinTime {
t.Fatalf("new block overlaps old:%v,new:%v", db.Blocks()[2].Meta(), db.Blocks()[3].Meta())
@ -2569,7 +2569,7 @@ func TestDBReadOnly_FlushWAL(t *testing.T) {
defer func() { require.NoError(t, db.Close()) }()
blocks, err := db.Blocks()
require.NoError(t, err)
require.Equal(t, len(blocks), 1)
require.Len(t, blocks, 1)
querier, err := db.Querier(0, int64(maxt)-1)
require.NoError(t, err)
@ -2589,7 +2589,7 @@ func TestDBReadOnly_FlushWAL(t *testing.T) {
require.NoError(t, series.Err())
}
require.NoError(t, seriesSet.Err())
require.Equal(t, 0, len(seriesSet.Warnings()))
require.Empty(t, seriesSet.Warnings())
require.Equal(t, 1000.0, sum)
}
@ -2648,7 +2648,7 @@ func TestDBCannotSeePartialCommits(t *testing.T) {
ss := querier.Select(ctx, false, nil, labels.MustNewMatcher(labels.MatchEqual, "foo", "bar"))
_, seriesSet, ws, err := expandSeriesSet(ss)
require.NoError(t, err)
require.Equal(t, 0, len(ws))
require.Empty(t, ws)
values := map[float64]struct{}{}
for _, series := range seriesSet {
@ -2692,13 +2692,13 @@ func TestDBQueryDoesntSeeAppendsAfterCreation(t *testing.T) {
ss := querierBeforeAdd.Select(ctx, false, nil, labels.MustNewMatcher(labels.MatchEqual, "foo", "bar"))
_, seriesSet, ws, err := expandSeriesSet(ss)
require.NoError(t, err)
require.Equal(t, 0, len(ws))
require.Empty(t, ws)
require.Equal(t, map[string][]sample{}, seriesSet)
ss = querierAfterAddButBeforeCommit.Select(ctx, false, nil, labels.MustNewMatcher(labels.MatchEqual, "foo", "bar"))
_, seriesSet, ws, err = expandSeriesSet(ss)
require.NoError(t, err)
require.Equal(t, 0, len(ws))
require.Empty(t, ws)
require.Equal(t, map[string][]sample{}, seriesSet)
// This commit is after the queriers are created, so should not be returned.
@ -2709,14 +2709,14 @@ func TestDBQueryDoesntSeeAppendsAfterCreation(t *testing.T) {
ss = querierBeforeAdd.Select(ctx, false, nil, labels.MustNewMatcher(labels.MatchEqual, "foo", "bar"))
_, seriesSet, ws, err = expandSeriesSet(ss)
require.NoError(t, err)
require.Equal(t, 0, len(ws))
require.Empty(t, ws)
require.Equal(t, map[string][]sample{}, seriesSet)
// Series exists but has no samples for querier created after Add.
ss = querierAfterAddButBeforeCommit.Select(ctx, false, nil, labels.MustNewMatcher(labels.MatchEqual, "foo", "bar"))
_, seriesSet, ws, err = expandSeriesSet(ss)
require.NoError(t, err)
require.Equal(t, 0, len(ws))
require.Empty(t, ws)
require.Equal(t, map[string][]sample{`{foo="bar"}`: {}}, seriesSet)
querierAfterCommit, err := db.Querier(0, 1000000)
@ -2727,7 +2727,7 @@ func TestDBQueryDoesntSeeAppendsAfterCreation(t *testing.T) {
ss = querierAfterCommit.Select(ctx, false, nil, labels.MustNewMatcher(labels.MatchEqual, "foo", "bar"))
_, seriesSet, ws, err = expandSeriesSet(ss)
require.NoError(t, err)
require.Equal(t, 0, len(ws))
require.Empty(t, ws)
require.Equal(t, map[string][]sample{`{foo="bar"}`: {{t: 0, f: 0}}}, seriesSet)
}
@ -2874,7 +2874,7 @@ func TestChunkWriter_ReadAfterWrite(t *testing.T) {
files, err := os.ReadDir(tempDir)
require.NoError(t, err)
require.Equal(t, test.expSegmentsCount, len(files), "expected segments count mismatch")
require.Len(t, files, test.expSegmentsCount, "expected segments count mismatch")
// Verify that all data is written to the segments.
sizeExp := 0
@ -3024,7 +3024,7 @@ func TestCompactHead(t *testing.T) {
require.NoError(t, deleteNonBlocks(db.Dir()))
db, err = Open(dbDir, log.NewNopLogger(), prometheus.NewRegistry(), tsdbCfg, nil)
require.NoError(t, err)
require.Equal(t, 1, len(db.Blocks()))
require.Len(t, db.Blocks(), 1)
require.Equal(t, int64(maxt), db.Head().MinTime())
defer func() { require.NoError(t, db.Close()) }()
querier, err := db.Querier(0, int64(maxt)-1)
@ -3186,7 +3186,7 @@ func TestOpen_VariousBlockStates(t *testing.T) {
}
loaded++
}
require.Equal(t, len(expectedLoadedDirs), loaded)
require.Len(t, expectedLoadedDirs, loaded)
require.NoError(t, db.Close())
files, err := os.ReadDir(tmpDir)
@ -3201,7 +3201,7 @@ func TestOpen_VariousBlockStates(t *testing.T) {
ignored++
}
}
require.Equal(t, len(expectedIgnoredDirs), ignored)
require.Len(t, expectedIgnoredDirs, ignored)
_, err = os.Stat(tmpCheckpointDir)
require.True(t, os.IsNotExist(err))
_, err = os.Stat(tmpChunkSnapshotDir)
@ -3254,7 +3254,7 @@ func TestOneCheckpointPerCompactCall(t *testing.T) {
require.Equal(t, 1.0, prom_testutil.ToFloat64(db.head.metrics.checkpointCreationTotal))
// As the data spans for 59 blocks, 58 go to disk and 1 remains in Head.
require.Equal(t, 58, len(db.Blocks()))
require.Len(t, db.Blocks(), 58)
// Though WAL was truncated only once, head should be truncated after each compaction.
require.Equal(t, 58.0, prom_testutil.ToFloat64(db.head.metrics.headTruncateTotal))
@ -3287,7 +3287,7 @@ func TestOneCheckpointPerCompactCall(t *testing.T) {
db.DisableCompactions()
// 1 block more.
require.Equal(t, 59, len(db.Blocks()))
require.Len(t, db.Blocks(), 59)
// No series in Head because of this new block.
require.Equal(t, 0, int(db.head.NumSeries()))
@ -3312,7 +3312,7 @@ func TestOneCheckpointPerCompactCall(t *testing.T) {
require.Equal(t, 1.0, prom_testutil.ToFloat64(db.head.metrics.checkpointCreationTotal))
// No new blocks should be created as there was not data in between the new samples and the blocks.
require.Equal(t, 59, len(db.Blocks()))
require.Len(t, db.Blocks(), 59)
// The compaction should have only truncated first 2/3 of WAL (while also rotating the files).
first, last, err = wlog.Segments(db.head.wal.Dir())
@ -3449,7 +3449,7 @@ func testQuerierShouldNotPanicIfHeadChunkIsTruncatedWhileReadingQueriedChunks(t
iterators = append(iterators, it)
}
require.NoError(t, seriesSet.Err())
require.Equal(t, actualSeries, numSeries)
require.Equal(t, numSeries, actualSeries)
// Compact the TSDB head again.
require.NoError(t, db.Compact(ctx))
@ -3583,7 +3583,7 @@ func testChunkQuerierShouldNotPanicIfHeadChunkIsTruncatedWhileReadingQueriedChun
}
}
require.NoError(t, seriesSet.Err())
require.Equal(t, actualSeries, numSeries)
require.Equal(t, numSeries, actualSeries)
// Compact the TSDB head again.
require.NoError(t, db.Compact(ctx))
@ -4106,9 +4106,9 @@ func TestDBPanicOnMmappingHeadChunk(t *testing.T) {
numSamples := int(170*time.Minute/time.Millisecond) / int(itvl)
addSamples(numSamples)
require.Len(t, db.Blocks(), 0)
require.Empty(t, db.Blocks())
require.NoError(t, db.Compact(ctx))
require.Len(t, db.Blocks(), 0)
require.Empty(t, db.Blocks())
// Restarting.
require.NoError(t, db.Close())
@ -4121,7 +4121,7 @@ func TestDBPanicOnMmappingHeadChunk(t *testing.T) {
numSamples = int(20*time.Minute/time.Millisecond) / int(itvl)
addSamples(numSamples)
require.Len(t, db.Blocks(), 0)
require.Empty(t, db.Blocks())
require.NoError(t, db.Compact(ctx))
require.Len(t, db.Blocks(), 1)
@ -4467,13 +4467,13 @@ func TestOOOCompaction(t *testing.T) {
require.NoError(t, err)
require.False(t, created)
require.Greater(t, ms.ooo.oooHeadChunk.chunk.NumSamples(), 0)
require.Equal(t, 14, len(ms.ooo.oooMmappedChunks)) // 7 original, 7 duplicate.
require.Len(t, ms.ooo.oooMmappedChunks, 14) // 7 original, 7 duplicate.
}
checkNonEmptyOOOChunk(series1)
checkNonEmptyOOOChunk(series2)
// No blocks before compaction.
require.Equal(t, len(db.Blocks()), 0)
require.Empty(t, db.Blocks())
// There is a 0th WBL file.
require.NoError(t, db.head.wbl.Sync()) // syncing to make sure wbl is flushed in windows
@ -4489,7 +4489,7 @@ func TestOOOCompaction(t *testing.T) {
require.NoError(t, db.CompactOOOHead(ctx))
// 3 blocks exist now. [0, 120), [120, 240), [240, 360)
require.Equal(t, len(db.Blocks()), 3)
require.Len(t, db.Blocks(), 3)
verifyDBSamples() // Blocks created out of OOO head now.
@ -4541,7 +4541,7 @@ func TestOOOCompaction(t *testing.T) {
// Since this is a forced compaction, this block is not aligned with 2h.
err = db.CompactHead(NewRangeHead(db.head, 250*time.Minute.Milliseconds(), 350*time.Minute.Milliseconds()))
require.NoError(t, err)
require.Equal(t, len(db.Blocks()), 4) // [0, 120), [120, 240), [240, 360), [250, 351)
require.Len(t, db.Blocks(), 4) // [0, 120), [120, 240), [240, 360), [250, 351)
verifySamples(db.Blocks()[3], 250, 350)
verifyDBSamples() // Blocks created out of normal and OOO head now. But not merged.
@ -4556,7 +4556,7 @@ func TestOOOCompaction(t *testing.T) {
// This will merge overlapping block.
require.NoError(t, db.Compact(ctx))
require.Equal(t, len(db.Blocks()), 3) // [0, 120), [120, 240), [240, 360)
require.Len(t, db.Blocks(), 3) // [0, 120), [120, 240), [240, 360)
verifySamples(db.Blocks()[0], 90, 119)
verifySamples(db.Blocks()[1], 120, 239)
verifySamples(db.Blocks()[2], 240, 350) // Merged block.
@ -4612,19 +4612,19 @@ func TestOOOCompactionWithNormalCompaction(t *testing.T) {
// If the normal Head is not compacted, the OOO head compaction does not take place.
require.NoError(t, db.Compact(ctx))
require.Equal(t, len(db.Blocks()), 0)
require.Empty(t, db.Blocks())
// Add more in-order samples in future that would trigger the compaction.
addSamples(400, 450)
// No blocks before compaction.
require.Equal(t, len(db.Blocks()), 0)
require.Empty(t, db.Blocks())
// Compacts normal and OOO head.
require.NoError(t, db.Compact(ctx))
// 2 blocks exist now. [0, 120), [250, 360)
require.Equal(t, len(db.Blocks()), 2)
require.Len(t, db.Blocks(), 2)
require.Equal(t, int64(0), db.Blocks()[0].MinTime())
require.Equal(t, 120*time.Minute.Milliseconds(), db.Blocks()[0].MaxTime())
require.Equal(t, 250*time.Minute.Milliseconds(), db.Blocks()[1].MinTime())
@ -4713,19 +4713,19 @@ func TestOOOCompactionWithDisabledWriteLog(t *testing.T) {
// If the normal Head is not compacted, the OOO head compaction does not take place.
require.NoError(t, db.Compact(ctx))
require.Equal(t, len(db.Blocks()), 0)
require.Empty(t, db.Blocks())
// Add more in-order samples in future that would trigger the compaction.
addSamples(400, 450)
// No blocks before compaction.
require.Equal(t, len(db.Blocks()), 0)
require.Empty(t, db.Blocks())
// Compacts normal and OOO head.
require.NoError(t, db.Compact(ctx))
// 2 blocks exist now. [0, 120), [250, 360)
require.Equal(t, len(db.Blocks()), 2)
require.Len(t, db.Blocks(), 2)
require.Equal(t, int64(0), db.Blocks()[0].MinTime())
require.Equal(t, 120*time.Minute.Milliseconds(), db.Blocks()[0].MaxTime())
require.Equal(t, 250*time.Minute.Milliseconds(), db.Blocks()[1].MinTime())
@ -4809,7 +4809,7 @@ func TestOOOQueryAfterRestartWithSnapshotAndRemovedWBL(t *testing.T) {
ms, created, err := db.head.getOrCreate(lbls.Hash(), lbls)
require.NoError(t, err)
require.False(t, created)
require.Equal(t, 2, len(ms.ooo.oooMmappedChunks))
require.Len(t, ms.ooo.oooMmappedChunks, 2)
require.NotNil(t, ms.ooo.oooHeadChunk)
}
@ -4828,7 +4828,7 @@ func TestOOOQueryAfterRestartWithSnapshotAndRemovedWBL(t *testing.T) {
ms, created, err := db.head.getOrCreate(lbls.Hash(), lbls)
require.NoError(t, err)
require.False(t, created)
require.Equal(t, 2, len(ms.ooo.oooMmappedChunks))
require.Len(t, ms.ooo.oooMmappedChunks, 2)
require.Equal(t, 109*time.Minute.Milliseconds(), ms.ooo.oooMmappedChunks[1].maxTime)
require.Nil(t, ms.ooo.oooHeadChunk) // Because of missing wbl.
}
@ -4857,9 +4857,9 @@ func TestOOOQueryAfterRestartWithSnapshotAndRemovedWBL(t *testing.T) {
verifySamples(90, 109)
// Compaction should also work fine.
require.Equal(t, len(db.Blocks()), 0)
require.Empty(t, db.Blocks())
require.NoError(t, db.CompactOOOHead(ctx))
require.Equal(t, len(db.Blocks()), 1) // One block from OOO data.
require.Len(t, db.Blocks(), 1) // One block from OOO data.
require.Equal(t, int64(0), db.Blocks()[0].MinTime())
require.Equal(t, 120*time.Minute.Milliseconds(), db.Blocks()[0].MaxTime())
@ -4952,7 +4952,7 @@ func Test_Querier_OOOQuery(t *testing.T) {
seriesSet := query(t, querier, labels.MustNewMatcher(labels.MatchEqual, "foo", "bar1"))
require.NotNil(t, seriesSet[series1.String()])
require.Equal(t, 1, len(seriesSet))
require.Len(t, seriesSet, 1)
require.Equal(t, expSamples, seriesSet[series1.String()])
require.GreaterOrEqual(t, float64(oooSamples), prom_testutil.ToFloat64(db.head.metrics.outOfOrderSamplesAppended), "number of ooo appended samples mismatch")
})
@ -5037,7 +5037,7 @@ func Test_ChunkQuerier_OOOQuery(t *testing.T) {
chks := queryChunks(t, querier, labels.MustNewMatcher(labels.MatchEqual, "foo", "bar1"))
require.NotNil(t, chks[series1.String()])
require.Equal(t, 1, len(chks))
require.Len(t, chks, 1)
require.Equal(t, float64(oooSamples), prom_testutil.ToFloat64(db.head.metrics.outOfOrderSamplesAppended), "number of ooo appended samples mismatch")
var gotSamples []chunks.Sample
for _, chunk := range chks[series1.String()] {
@ -5312,7 +5312,7 @@ func TestWBLAndMmapReplay(t *testing.T) {
s1MmapSamples = append(s1MmapSamples, sample{t: ts, f: val})
}
}
require.Greater(t, len(s1MmapSamples), 0)
require.NotEmpty(t, s1MmapSamples)
require.NoError(t, db.Close())
@ -5461,7 +5461,7 @@ func TestOOOCompactionFailure(t *testing.T) {
addSample(90, 310)
// No blocks before compaction.
require.Equal(t, len(db.Blocks()), 0)
require.Empty(t, db.Blocks())
// There is a 0th WBL file.
verifyFirstWBLFileIs0 := func(count int) {
@ -5494,7 +5494,7 @@ func TestOOOCompactionFailure(t *testing.T) {
for i := 0; i < 5; i++ {
require.Error(t, db.CompactOOOHead(ctx))
}
require.Equal(t, len(db.Blocks()), 0)
require.Empty(t, db.Blocks())
// M-map files don't change after failed compaction.
verifyMmapFiles("000001")
@ -5505,7 +5505,7 @@ func TestOOOCompactionFailure(t *testing.T) {
db.compactor = originalCompactor
require.NoError(t, db.CompactOOOHead(ctx))
oldBlocks := db.Blocks()
require.Equal(t, len(db.Blocks()), 3)
require.Len(t, db.Blocks(), 3)
// Check that the ooo chunks were removed.
ms, created, err := db.head.getOrCreate(series1.Hash(), series1)
@ -5516,7 +5516,7 @@ func TestOOOCompactionFailure(t *testing.T) {
// The failed compaction should not have left the ooo Head corrupted.
// Hence, expect no new blocks with another OOO compaction call.
require.NoError(t, db.CompactOOOHead(ctx))
require.Equal(t, len(db.Blocks()), 3)
require.Len(t, db.Blocks(), 3)
require.Equal(t, oldBlocks, db.Blocks())
// There should be a single m-map file
@ -5558,7 +5558,7 @@ func TestOOOCompactionFailure(t *testing.T) {
// Since this is a forced compaction, this block is not aligned with 2h.
err = db.CompactHead(NewRangeHead(db.head, 250*time.Minute.Milliseconds(), 350*time.Minute.Milliseconds()))
require.NoError(t, err)
require.Equal(t, len(db.Blocks()), 4) // [0, 120), [120, 240), [240, 360), [250, 351)
require.Len(t, db.Blocks(), 4) // [0, 120), [120, 240), [240, 360), [250, 351)
verifySamples(db.Blocks()[3], 250, 350)
// The compaction also clears out the old m-map files. Including
@ -5903,9 +5903,9 @@ func TestOutOfOrderRuntimeConfig(t *testing.T) {
require.NoError(t, err)
require.Greater(t, size, int64(0))
require.Len(t, db.Blocks(), 0)
require.Empty(t, db.Blocks())
require.NoError(t, db.compactOOOHead(ctx))
require.Greater(t, len(db.Blocks()), 0)
require.NotEmpty(t, db.Blocks())
// WBL is empty.
size, err = db.head.wbl.Size()
@ -5925,7 +5925,7 @@ func TestOutOfOrderRuntimeConfig(t *testing.T) {
// OOO of 59m old fails.
s := addSamples(t, db, 251, 260, false, nil)
require.Len(t, s, 0)
require.Empty(t, s)
verifySamples(t, db, allSamples)
oldWblPtr := fmt.Sprintf("%p", db.head.wbl)
@ -5960,7 +5960,7 @@ func TestOutOfOrderRuntimeConfig(t *testing.T) {
// OOO of 49m old fails.
s := addSamples(t, db, 261, 270, false, nil)
require.Len(t, s, 0)
require.Empty(t, s)
// WBL does not change.
newWblPtr := fmt.Sprintf("%p", db.head.wbl)
@ -5991,7 +5991,7 @@ func TestOutOfOrderRuntimeConfig(t *testing.T) {
// OOO fails.
s := addSamples(t, db, 251, 260, false, nil)
require.Len(t, s, 0)
require.Empty(t, s)
verifySamples(t, db, allSamples)
require.Nil(t, db.head.wbl)
@ -6028,7 +6028,7 @@ func TestOutOfOrderRuntimeConfig(t *testing.T) {
// OOO within old time window fails.
s := addSamples(t, db, 290, 309, false, nil)
require.Len(t, s, 0)
require.Empty(t, s)
// WBL does not change and is not removed.
newWblPtr := fmt.Sprintf("%p", db.head.wbl)
@ -6050,7 +6050,7 @@ func TestOutOfOrderRuntimeConfig(t *testing.T) {
// OOO fails.
s := addSamples(t, db, 290, 309, false, nil)
require.Len(t, s, 0)
require.Empty(t, s)
verifySamples(t, db, allSamples)
require.Nil(t, db.head.wbl)
@ -6060,7 +6060,7 @@ func TestOutOfOrderRuntimeConfig(t *testing.T) {
// OOO still fails.
s = addSamples(t, db, 290, 309, false, nil)
require.Len(t, s, 0)
require.Empty(t, s)
verifySamples(t, db, allSamples)
require.Nil(t, db.head.wbl)
})
@ -6320,7 +6320,7 @@ func TestDiskFillingUpAfterDisablingOOO(t *testing.T) {
db.DisableCompactions()
ms := db.head.series.getByHash(series1.Hash(), series1)
require.Greater(t, len(ms.ooo.oooMmappedChunks), 0, "OOO mmap chunk was not replayed")
require.NotEmpty(t, ms.ooo.oooMmappedChunks, "OOO mmap chunk was not replayed")
checkMmapFileContents := func(contains, notContains []string) {
mmapDir := mmappedChunksDir(db.head.opts.ChunkDirRoot)
@ -6684,7 +6684,7 @@ func TestQueryHistogramFromBlocksWithCompaction(t *testing.T) {
}
}
require.Len(t, db.Blocks(), 0)
require.Empty(t, db.Blocks())
require.NoError(t, db.reload())
require.Len(t, db.Blocks(), len(blockSeries))

View file

@ -88,7 +88,7 @@ func TestAddExemplar(t *testing.T) {
}
require.NoError(t, es.AddExemplar(l, e))
require.Equal(t, es.index[string(l.Bytes(nil))].newest, 0, "exemplar was not stored correctly")
require.Equal(t, 0, es.index[string(l.Bytes(nil))].newest, "exemplar was not stored correctly")
e2 := exemplar.Exemplar{
Labels: labels.FromStrings("traceID", "zxcvb"),
@ -97,7 +97,7 @@ func TestAddExemplar(t *testing.T) {
}
require.NoError(t, es.AddExemplar(l, e2))
require.Equal(t, es.index[string(l.Bytes(nil))].newest, 1, "exemplar was not stored correctly, location of newest exemplar for series in index did not update")
require.Equal(t, 1, es.index[string(l.Bytes(nil))].newest, "exemplar was not stored correctly, location of newest exemplar for series in index did not update")
require.True(t, es.exemplars[es.index[string(l.Bytes(nil))].newest].exemplar.Equals(e2), "exemplar was not stored correctly, expected %+v got: %+v", e2, es.exemplars[es.index[string(l.Bytes(nil))].newest].exemplar)
require.NoError(t, es.AddExemplar(l, e2), "no error is expected attempting to add duplicate exemplar")
@ -145,7 +145,7 @@ func TestStorageOverflow(t *testing.T) {
require.NoError(t, err, "error creating label matcher for exemplar query")
ret, err := es.Select(100, 110, []*labels.Matcher{m})
require.NoError(t, err)
require.True(t, len(ret) == 1, "select should have returned samples for a single series only")
require.Len(t, ret, 1, "select should have returned samples for a single series only")
require.True(t, reflect.DeepEqual(eList[1:], ret[0].Exemplars), "select did not return expected exemplars\n\texpected: %+v\n\tactual: %+v\n", eList[1:], ret[0].Exemplars)
}
@ -171,7 +171,7 @@ func TestSelectExemplar(t *testing.T) {
require.NoError(t, err, "error creating label matcher for exemplar query")
ret, err := es.Select(0, 100, []*labels.Matcher{m})
require.NoError(t, err)
require.True(t, len(ret) == 1, "select should have returned samples for a single series only")
require.Len(t, ret, 1, "select should have returned samples for a single series only")
expectedResult := []exemplar.Exemplar{e}
require.True(t, reflect.DeepEqual(expectedResult, ret[0].Exemplars), "select did not return expected exemplars\n\texpected: %+v\n\tactual: %+v\n", expectedResult, ret[0].Exemplars)
@ -209,15 +209,15 @@ func TestSelectExemplar_MultiSeries(t *testing.T) {
require.NoError(t, err, "error creating label matcher for exemplar query")
ret, err := es.Select(100, 200, []*labels.Matcher{m})
require.NoError(t, err)
require.True(t, len(ret) == 1, "select should have returned samples for a single series only")
require.True(t, len(ret[0].Exemplars) == 3, "didn't get expected 8 exemplars, got %d", len(ret[0].Exemplars))
require.Len(t, ret, 1, "select should have returned samples for a single series only")
require.Len(t, ret[0].Exemplars, 3, "didn't get expected 8 exemplars, got %d", len(ret[0].Exemplars))
m, err = labels.NewMatcher(labels.MatchEqual, labels.MetricName, l1Name)
require.NoError(t, err, "error creating label matcher for exemplar query")
ret, err = es.Select(100, 200, []*labels.Matcher{m})
require.NoError(t, err)
require.True(t, len(ret) == 1, "select should have returned samples for a single series only")
require.True(t, len(ret[0].Exemplars) == 2, "didn't get expected 8 exemplars, got %d", len(ret[0].Exemplars))
require.Len(t, ret, 1, "select should have returned samples for a single series only")
require.Len(t, ret[0].Exemplars, 2, "didn't get expected 8 exemplars, got %d", len(ret[0].Exemplars))
}
func TestSelectExemplar_TimeRange(t *testing.T) {
@ -243,8 +243,8 @@ func TestSelectExemplar_TimeRange(t *testing.T) {
require.NoError(t, err, "error creating label matcher for exemplar query")
ret, err := es.Select(102, 104, []*labels.Matcher{m})
require.NoError(t, err)
require.True(t, len(ret) == 1, "select should have returned samples for a single series only")
require.True(t, len(ret[0].Exemplars) == 3, "didn't get expected two exemplars %d, %+v", len(ret[0].Exemplars), ret)
require.Len(t, ret, 1, "select should have returned samples for a single series only")
require.Len(t, ret[0].Exemplars, 3, "didn't get expected two exemplars %d, %+v", len(ret[0].Exemplars), ret)
}
// Test to ensure that even though a series matches more than one matcher from the
@ -281,7 +281,7 @@ func TestSelectExemplar_DuplicateSeries(t *testing.T) {
ret, err := es.Select(0, 100, m...)
require.NoError(t, err)
require.True(t, len(ret) == 1, "select should have returned samples for a single series only")
require.Len(t, ret, 1, "select should have returned samples for a single series only")
}
func TestIndexOverwrite(t *testing.T) {

View file

@ -152,7 +152,7 @@ func TestBoundedChunk(t *testing.T) {
// it.Next() should keep returning no value.
for i := 0; i < 10; i++ {
require.True(t, it.Next() == chunkenc.ValNone)
require.Equal(t, chunkenc.ValNone, it.Next())
}
require.Equal(t, tc.expSamples, samples)
@ -216,8 +216,8 @@ func TestMemSeries_chunk(t *testing.T) {
name: "firstChunkID > ix",
setup: func(t *testing.T, s *memSeries, cdm *chunks.ChunkDiskMapper) {
appendSamples(t, s, 0, chunkRange, cdm)
require.Len(t, s.mmappedChunks, 0, "wrong number of mmappedChunks")
require.Equal(t, s.headChunks.len(), 1, "wrong number of headChunks")
require.Empty(t, s.mmappedChunks, "wrong number of mmappedChunks")
require.Equal(t, 1, s.headChunks.len(), "wrong number of headChunks")
require.Equal(t, int64(0), s.headChunks.oldest().minTime, "wrong minTime on last headChunks element")
require.Equal(t, chunkRange-chunkStep, s.headChunks.maxTime, "wrong maxTime on first headChunks element")
s.firstChunkID = 5
@ -229,8 +229,8 @@ func TestMemSeries_chunk(t *testing.T) {
name: "call ix=0 on memSeries with no mmapped chunks",
setup: func(t *testing.T, s *memSeries, cdm *chunks.ChunkDiskMapper) {
appendSamples(t, s, 0, chunkRange, cdm)
require.Len(t, s.mmappedChunks, 0, "wrong number of mmappedChunks")
require.Equal(t, s.headChunks.len(), 1, "wrong number of headChunks")
require.Empty(t, s.mmappedChunks, "wrong number of mmappedChunks")
require.Equal(t, 1, s.headChunks.len(), "wrong number of headChunks")
require.Equal(t, int64(0), s.headChunks.oldest().minTime, "wrong minTime on last headChunks element")
require.Equal(t, chunkRange-chunkStep, s.headChunks.maxTime, "wrong maxTime on first headChunks element")
},
@ -241,8 +241,8 @@ func TestMemSeries_chunk(t *testing.T) {
name: "call ix=1 on memSeries with no mmapped chunks",
setup: func(t *testing.T, s *memSeries, cdm *chunks.ChunkDiskMapper) {
appendSamples(t, s, 0, chunkRange, cdm)
require.Len(t, s.mmappedChunks, 0, "wrong number of mmappedChunks")
require.Equal(t, s.headChunks.len(), 1, "wrong number of headChunks")
require.Empty(t, s.mmappedChunks, "wrong number of mmappedChunks")
require.Equal(t, 1, s.headChunks.len(), "wrong number of headChunks")
require.Equal(t, int64(0), s.headChunks.oldest().minTime, "wrong minTime on last headChunks element")
require.Equal(t, chunkRange-chunkStep, s.headChunks.maxTime, "wrong maxTime on first headChunks element")
},
@ -253,8 +253,8 @@ func TestMemSeries_chunk(t *testing.T) {
name: "call ix=10 on memSeries with no mmapped chunks",
setup: func(t *testing.T, s *memSeries, cdm *chunks.ChunkDiskMapper) {
appendSamples(t, s, 0, chunkRange, cdm)
require.Len(t, s.mmappedChunks, 0, "wrong number of mmappedChunks")
require.Equal(t, s.headChunks.len(), 1, "wrong number of headChunks")
require.Empty(t, s.mmappedChunks, "wrong number of mmappedChunks")
require.Equal(t, 1, s.headChunks.len(), "wrong number of headChunks")
require.Equal(t, int64(0), s.headChunks.oldest().minTime, "wrong minTime on last headChunks element")
require.Equal(t, chunkRange-chunkStep, s.headChunks.maxTime, "wrong maxTime on first headChunks element")
},
@ -267,7 +267,7 @@ func TestMemSeries_chunk(t *testing.T) {
appendSamples(t, s, 0, chunkRange*4, cdm)
s.mmapChunks(cdm)
require.Len(t, s.mmappedChunks, 3, "wrong number of mmappedChunks")
require.Equal(t, s.headChunks.len(), 1, "wrong number of headChunks")
require.Equal(t, 1, s.headChunks.len(), "wrong number of headChunks")
require.Equal(t, chunkRange*3, s.headChunks.oldest().minTime, "wrong minTime on last headChunks element")
require.Equal(t, (chunkRange*4)-chunkStep, s.headChunks.maxTime, "wrong maxTime on first headChunks element")
},
@ -280,7 +280,7 @@ func TestMemSeries_chunk(t *testing.T) {
appendSamples(t, s, 0, chunkRange*4, cdm)
s.mmapChunks(cdm)
require.Len(t, s.mmappedChunks, 3, "wrong number of mmappedChunks")
require.Equal(t, s.headChunks.len(), 1, "wrong number of headChunks")
require.Equal(t, 1, s.headChunks.len(), "wrong number of headChunks")
require.Equal(t, chunkRange*3, s.headChunks.oldest().minTime, "wrong minTime on last headChunks element")
require.Equal(t, (chunkRange*4)-chunkStep, s.headChunks.maxTime, "wrong maxTime on first headChunks element")
},
@ -293,7 +293,7 @@ func TestMemSeries_chunk(t *testing.T) {
appendSamples(t, s, 0, chunkRange*4, cdm)
s.mmapChunks(cdm)
require.Len(t, s.mmappedChunks, 3, "wrong number of mmappedChunks")
require.Equal(t, s.headChunks.len(), 1, "wrong number of headChunks")
require.Equal(t, 1, s.headChunks.len(), "wrong number of headChunks")
require.Equal(t, chunkRange*3, s.headChunks.oldest().minTime, "wrong minTime on last headChunks element")
require.Equal(t, (chunkRange*4)-chunkStep, s.headChunks.maxTime, "wrong maxTime on first headChunks element")
},
@ -306,7 +306,7 @@ func TestMemSeries_chunk(t *testing.T) {
appendSamples(t, s, 0, chunkRange*4, cdm)
s.mmapChunks(cdm)
require.Len(t, s.mmappedChunks, 3, "wrong number of mmappedChunks")
require.Equal(t, s.headChunks.len(), 1, "wrong number of headChunks")
require.Equal(t, 1, s.headChunks.len(), "wrong number of headChunks")
require.Equal(t, chunkRange*3, s.headChunks.oldest().minTime, "wrong minTime on last headChunks element")
require.Equal(t, (chunkRange*4)-chunkStep, s.headChunks.maxTime, "wrong maxTime on first headChunks element")
s.headChunks = nil
@ -320,7 +320,7 @@ func TestMemSeries_chunk(t *testing.T) {
appendSamples(t, s, 0, chunkRange*4, cdm)
s.mmapChunks(cdm)
require.Len(t, s.mmappedChunks, 3, "wrong number of mmappedChunks")
require.Equal(t, s.headChunks.len(), 1, "wrong number of headChunks")
require.Equal(t, 1, s.headChunks.len(), "wrong number of headChunks")
require.Equal(t, chunkRange*3, s.headChunks.oldest().minTime, "wrong minTime on last headChunks element")
require.Equal(t, (chunkRange*4)-chunkStep, s.headChunks.maxTime, "wrong maxTime on first headChunks element")
s.headChunks = nil
@ -334,7 +334,7 @@ func TestMemSeries_chunk(t *testing.T) {
appendSamples(t, s, 0, chunkRange*4, cdm)
s.mmapChunks(cdm)
require.Len(t, s.mmappedChunks, 3, "wrong number of mmappedChunks")
require.Equal(t, s.headChunks.len(), 1, "wrong number of headChunks")
require.Equal(t, 1, s.headChunks.len(), "wrong number of headChunks")
require.Equal(t, chunkRange*3, s.headChunks.oldest().minTime, "wrong minTime on last headChunks element")
require.Equal(t, (chunkRange*4)-chunkStep, s.headChunks.maxTime, "wrong maxTime on first headChunks element")
s.headChunks = nil
@ -348,7 +348,7 @@ func TestMemSeries_chunk(t *testing.T) {
appendSamples(t, s, 0, chunkRange*4, cdm)
s.mmapChunks(cdm)
require.Len(t, s.mmappedChunks, 3, "wrong number of mmappedChunks")
require.Equal(t, s.headChunks.len(), 1, "wrong number of headChunks")
require.Equal(t, 1, s.headChunks.len(), "wrong number of headChunks")
require.Equal(t, chunkRange*3, s.headChunks.oldest().minTime, "wrong minTime on last headChunks element")
require.Equal(t, (chunkRange*4)-chunkStep, s.headChunks.maxTime, "wrong maxTime on first headChunks element")
cdm.Close()
@ -362,7 +362,7 @@ func TestMemSeries_chunk(t *testing.T) {
appendSamples(t, s, 0, chunkRange*4, cdm)
s.mmapChunks(cdm)
require.Len(t, s.mmappedChunks, 3, "wrong number of mmappedChunks")
require.Equal(t, s.headChunks.len(), 1, "wrong number of headChunks")
require.Equal(t, 1, s.headChunks.len(), "wrong number of headChunks")
require.Equal(t, chunkRange*3, s.headChunks.oldest().minTime, "wrong minTime on last headChunks element")
require.Equal(t, (chunkRange*4)-chunkStep, s.headChunks.maxTime, "wrong maxTime on first headChunks element")
cdm.Close()
@ -374,8 +374,8 @@ func TestMemSeries_chunk(t *testing.T) {
name: "call ix=0 on memSeries with 3 head chunks and no mmapped chunks",
setup: func(t *testing.T, s *memSeries, cdm *chunks.ChunkDiskMapper) {
appendSamples(t, s, 0, chunkRange*3, cdm)
require.Len(t, s.mmappedChunks, 0, "wrong number of mmappedChunks")
require.Equal(t, s.headChunks.len(), 3, "wrong number of headChunks")
require.Empty(t, s.mmappedChunks, "wrong number of mmappedChunks")
require.Equal(t, 3, s.headChunks.len(), "wrong number of headChunks")
require.Equal(t, int64(0), s.headChunks.oldest().minTime, "wrong minTime on last headChunks element")
require.Equal(t, (chunkRange*3)-chunkStep, s.headChunks.maxTime, "wrong maxTime on first headChunks element")
},
@ -386,8 +386,8 @@ func TestMemSeries_chunk(t *testing.T) {
name: "call ix=1 on memSeries with 3 head chunks and no mmapped chunks",
setup: func(t *testing.T, s *memSeries, cdm *chunks.ChunkDiskMapper) {
appendSamples(t, s, 0, chunkRange*3, cdm)
require.Len(t, s.mmappedChunks, 0, "wrong number of mmappedChunks")
require.Equal(t, s.headChunks.len(), 3, "wrong number of headChunks")
require.Empty(t, s.mmappedChunks, "wrong number of mmappedChunks")
require.Equal(t, 3, s.headChunks.len(), "wrong number of headChunks")
require.Equal(t, int64(0), s.headChunks.oldest().minTime, "wrong minTime on last headChunks element")
require.Equal(t, (chunkRange*3)-chunkStep, s.headChunks.maxTime, "wrong maxTime on first headChunks element")
},
@ -398,8 +398,8 @@ func TestMemSeries_chunk(t *testing.T) {
name: "call ix=10 on memSeries with 3 head chunks and no mmapped chunks",
setup: func(t *testing.T, s *memSeries, cdm *chunks.ChunkDiskMapper) {
appendSamples(t, s, 0, chunkRange*3, cdm)
require.Len(t, s.mmappedChunks, 0, "wrong number of mmappedChunks")
require.Equal(t, s.headChunks.len(), 3, "wrong number of headChunks")
require.Empty(t, s.mmappedChunks, "wrong number of mmappedChunks")
require.Equal(t, 3, s.headChunks.len(), "wrong number of headChunks")
require.Equal(t, int64(0), s.headChunks.oldest().minTime, "wrong minTime on last headChunks element")
require.Equal(t, (chunkRange*3)-chunkStep, s.headChunks.maxTime, "wrong maxTime on first headChunks element")
},
@ -412,10 +412,10 @@ func TestMemSeries_chunk(t *testing.T) {
appendSamples(t, s, 0, chunkRange*4, cdm)
s.mmapChunks(cdm)
require.Len(t, s.mmappedChunks, 3, "wrong number of mmappedChunks")
require.Equal(t, s.headChunks.len(), 1, "wrong number of headChunks")
require.Equal(t, 1, s.headChunks.len(), "wrong number of headChunks")
appendSamples(t, s, chunkRange*4, chunkRange*6, cdm)
require.Equal(t, s.headChunks.len(), 3, "wrong number of headChunks")
require.Equal(t, 3, s.headChunks.len(), "wrong number of headChunks")
require.Len(t, s.mmappedChunks, 3, "wrong number of mmappedChunks")
require.Equal(t, chunkRange*3, s.headChunks.oldest().minTime, "wrong minTime on last headChunks element")
require.Equal(t, (chunkRange*6)-chunkStep, s.headChunks.maxTime, "wrong maxTime on first headChunks element")
@ -429,10 +429,10 @@ func TestMemSeries_chunk(t *testing.T) {
appendSamples(t, s, 0, chunkRange*4, cdm)
s.mmapChunks(cdm)
require.Len(t, s.mmappedChunks, 3, "wrong number of mmappedChunks")
require.Equal(t, s.headChunks.len(), 1, "wrong number of headChunks")
require.Equal(t, 1, s.headChunks.len(), "wrong number of headChunks")
appendSamples(t, s, chunkRange*4, chunkRange*6, cdm)
require.Equal(t, s.headChunks.len(), 3, "wrong number of headChunks")
require.Equal(t, 3, s.headChunks.len(), "wrong number of headChunks")
require.Len(t, s.mmappedChunks, 3, "wrong number of mmappedChunks")
require.Equal(t, chunkRange*3, s.headChunks.oldest().minTime, "wrong minTime on last headChunks element")
require.Equal(t, (chunkRange*6)-chunkStep, s.headChunks.maxTime, "wrong maxTime on first headChunks element")
@ -446,10 +446,10 @@ func TestMemSeries_chunk(t *testing.T) {
appendSamples(t, s, 0, chunkRange*4, cdm)
s.mmapChunks(cdm)
require.Len(t, s.mmappedChunks, 3, "wrong number of mmappedChunks")
require.Equal(t, s.headChunks.len(), 1, "wrong number of headChunks")
require.Equal(t, 1, s.headChunks.len(), "wrong number of headChunks")
appendSamples(t, s, chunkRange*4, chunkRange*6, cdm)
require.Equal(t, s.headChunks.len(), 3, "wrong number of headChunks")
require.Equal(t, 3, s.headChunks.len(), "wrong number of headChunks")
require.Len(t, s.mmappedChunks, 3, "wrong number of mmappedChunks")
require.Equal(t, chunkRange*3, s.headChunks.oldest().minTime, "wrong minTime on last headChunks element")
require.Equal(t, (chunkRange*6)-chunkStep, s.headChunks.maxTime, "wrong maxTime on first headChunks element")
@ -463,10 +463,10 @@ func TestMemSeries_chunk(t *testing.T) {
appendSamples(t, s, 0, chunkRange*4, cdm)
s.mmapChunks(cdm)
require.Len(t, s.mmappedChunks, 3, "wrong number of mmappedChunks")
require.Equal(t, s.headChunks.len(), 1, "wrong number of headChunks")
require.Equal(t, 1, s.headChunks.len(), "wrong number of headChunks")
appendSamples(t, s, chunkRange*4, chunkRange*6, cdm)
require.Equal(t, s.headChunks.len(), 3, "wrong number of headChunks")
require.Equal(t, 3, s.headChunks.len(), "wrong number of headChunks")
require.Len(t, s.mmappedChunks, 3, "wrong number of mmappedChunks")
require.Equal(t, chunkRange*3, s.headChunks.oldest().minTime, "wrong minTime on last headChunks element")
require.Equal(t, (chunkRange*6)-chunkStep, s.headChunks.maxTime, "wrong maxTime on first headChunks element")
@ -480,10 +480,10 @@ func TestMemSeries_chunk(t *testing.T) {
appendSamples(t, s, 0, chunkRange*4, cdm)
s.mmapChunks(cdm)
require.Len(t, s.mmappedChunks, 3, "wrong number of mmappedChunks")
require.Equal(t, s.headChunks.len(), 1, "wrong number of headChunks")
require.Equal(t, 1, s.headChunks.len(), "wrong number of headChunks")
appendSamples(t, s, chunkRange*4, chunkRange*6, cdm)
require.Equal(t, s.headChunks.len(), 3, "wrong number of headChunks")
require.Equal(t, 3, s.headChunks.len(), "wrong number of headChunks")
require.Len(t, s.mmappedChunks, 3, "wrong number of mmappedChunks")
require.Equal(t, chunkRange*3, s.headChunks.oldest().minTime, "wrong minTime on last headChunks element")
require.Equal(t, (chunkRange*6)-chunkStep, s.headChunks.maxTime, "wrong maxTime on first headChunks element")
@ -498,10 +498,10 @@ func TestMemSeries_chunk(t *testing.T) {
appendSamples(t, s, 0, chunkRange*4, cdm)
s.mmapChunks(cdm)
require.Len(t, s.mmappedChunks, 3, "wrong number of mmappedChunks")
require.Equal(t, s.headChunks.len(), 1, "wrong number of headChunks")
require.Equal(t, 1, s.headChunks.len(), "wrong number of headChunks")
appendSamples(t, s, chunkRange*4, chunkRange*6, cdm)
require.Equal(t, s.headChunks.len(), 3, "wrong number of headChunks")
require.Equal(t, 3, s.headChunks.len(), "wrong number of headChunks")
require.Len(t, s.mmappedChunks, 3, "wrong number of mmappedChunks")
require.Equal(t, chunkRange*3, s.headChunks.oldest().minTime, "wrong minTime on last headChunks element")
require.Equal(t, (chunkRange*6)-chunkStep, s.headChunks.maxTime, "wrong maxTime on first headChunks element")

View file

@ -697,7 +697,7 @@ func TestHead_ReadWAL(t *testing.T) {
require.NoError(t, err)
e, err := q.Select(0, 1000, []*labels.Matcher{labels.MustNewMatcher(labels.MatchEqual, "a", "1")})
require.NoError(t, err)
require.Equal(t, e[0].Exemplars[0], exemplar.Exemplar{Ts: 100, Value: 1, Labels: labels.FromStrings("traceID", "asdf")})
require.Equal(t, exemplar.Exemplar{Ts: 100, Value: 1, Labels: labels.FromStrings("traceID", "asdf")}, e[0].Exemplars[0])
})
}
}
@ -1086,12 +1086,12 @@ func TestMemSeries_truncateChunks_scenarios(t *testing.T) {
} else {
require.Nil(t, series.headChunks, "head chunk is present")
}
require.Equal(t, tc.mmappedChunks, len(series.mmappedChunks), "wrong number of mmapped chunks")
require.Len(t, series.mmappedChunks, tc.mmappedChunks, "wrong number of mmapped chunks")
truncated := series.truncateChunksBefore(tc.truncateBefore, 0)
require.Equal(t, tc.expectedTruncated, truncated, "wrong number of truncated chunks returned")
require.Equal(t, tc.expectedMmap, len(series.mmappedChunks), "wrong number of mmappedChunks after truncation")
require.Len(t, series.mmappedChunks, tc.expectedMmap, "wrong number of mmappedChunks after truncation")
if tc.expectedHead > 0 {
require.NotNil(t, series.headChunks, "headChunks should is nil after truncation")
@ -1256,7 +1256,7 @@ func TestHeadDeleteSimple(t *testing.T) {
if !eok {
require.NoError(t, h.Close())
require.NoError(t, actSeriesSet.Err())
require.Equal(t, 0, len(actSeriesSet.Warnings()))
require.Empty(t, actSeriesSet.Warnings())
continue Outer
}
expSeries := expSeriesSet.At()
@ -1304,7 +1304,7 @@ func TestDeleteUntilCurMax(t *testing.T) {
for res.Next() {
}
require.NoError(t, res.Err())
require.Equal(t, 0, len(res.Warnings()))
require.Empty(t, res.Warnings())
// Add again and test for presence.
app = hb.Appender(context.Background())
@ -1323,7 +1323,7 @@ func TestDeleteUntilCurMax(t *testing.T) {
for res.Next() {
}
require.NoError(t, res.Err())
require.Equal(t, 0, len(res.Warnings()))
require.Empty(t, res.Warnings())
}
func TestDeletedSamplesAndSeriesStillInWALAfterCheckpoint(t *testing.T) {
@ -1524,7 +1524,7 @@ func TestDelete_e2e(t *testing.T) {
require.Equal(t, smplExp, smplRes)
}
require.NoError(t, ss.Err())
require.Equal(t, 0, len(ss.Warnings()))
require.Empty(t, ss.Warnings())
}
}
}
@ -1654,7 +1654,7 @@ func TestMemSeries_append(t *testing.T) {
require.False(t, chunkCreated, "second sample should use same chunk")
s.mmapChunks(chunkDiskMapper)
require.Equal(t, 1, len(s.mmappedChunks), "there should be only 1 mmapped chunk")
require.Len(t, s.mmappedChunks, 1, "there should be only 1 mmapped chunk")
require.Equal(t, int64(998), s.mmappedChunks[0].minTime, "wrong chunk range")
require.Equal(t, int64(999), s.mmappedChunks[0].maxTime, "wrong chunk range")
require.Equal(t, int64(1000), s.headChunks.minTime, "wrong chunk range")
@ -1721,7 +1721,7 @@ func TestMemSeries_appendHistogram(t *testing.T) {
require.False(t, chunkCreated, "second sample should use same chunk")
s.mmapChunks(chunkDiskMapper)
require.Equal(t, 1, len(s.mmappedChunks), "there should be only 1 mmapped chunk")
require.Len(t, s.mmappedChunks, 1, "there should be only 1 mmapped chunk")
require.Equal(t, int64(998), s.mmappedChunks[0].minTime, "wrong chunk range")
require.Equal(t, int64(999), s.mmappedChunks[0].maxTime, "wrong chunk range")
require.Equal(t, int64(1000), s.headChunks.minTime, "wrong chunk range")
@ -1732,7 +1732,7 @@ func TestMemSeries_appendHistogram(t *testing.T) {
require.False(t, chunkCreated, "third sample should trigger a re-encoded chunk")
s.mmapChunks(chunkDiskMapper)
require.Equal(t, 1, len(s.mmappedChunks), "there should be only 1 mmapped chunk")
require.Len(t, s.mmappedChunks, 1, "there should be only 1 mmapped chunk")
require.Equal(t, int64(998), s.mmappedChunks[0].minTime, "wrong chunk range")
require.Equal(t, int64(999), s.mmappedChunks[0].maxTime, "wrong chunk range")
require.Equal(t, int64(1000), s.headChunks.minTime, "wrong chunk range")
@ -1831,7 +1831,7 @@ func TestGCChunkAccess(t *testing.T) {
require.NoError(t, idx.Series(1, &builder, &chunks))
require.Equal(t, labels.FromStrings("a", "1"), builder.Labels())
require.Equal(t, 2, len(chunks))
require.Len(t, chunks, 2)
cr, err := h.chunksRange(0, 1500, nil)
require.NoError(t, err)
@ -1890,7 +1890,7 @@ func TestGCSeriesAccess(t *testing.T) {
require.NoError(t, idx.Series(1, &builder, &chunks))
require.Equal(t, labels.FromStrings("a", "1"), builder.Labels())
require.Equal(t, 2, len(chunks))
require.Len(t, chunks, 2)
cr, err := h.chunksRange(0, 2000, nil)
require.NoError(t, err)
@ -1932,11 +1932,11 @@ func TestUncommittedSamplesNotLostOnTruncate(t *testing.T) {
defer q.Close()
ss := q.Select(context.Background(), false, nil, labels.MustNewMatcher(labels.MatchEqual, "a", "1"))
require.Equal(t, true, ss.Next())
require.True(t, ss.Next())
for ss.Next() {
}
require.NoError(t, ss.Err())
require.Equal(t, 0, len(ss.Warnings()))
require.Empty(t, ss.Warnings())
}
func TestRemoveSeriesAfterRollbackAndTruncate(t *testing.T) {
@ -1961,8 +1961,8 @@ func TestRemoveSeriesAfterRollbackAndTruncate(t *testing.T) {
require.NoError(t, err)
ss := q.Select(context.Background(), false, nil, labels.MustNewMatcher(labels.MatchEqual, "a", "1"))
require.Equal(t, false, ss.Next())
require.Equal(t, 0, len(ss.Warnings()))
require.False(t, ss.Next())
require.Empty(t, ss.Warnings())
require.NoError(t, q.Close())
// Truncate again, this time the series should be deleted
@ -1985,7 +1985,7 @@ func TestHead_LogRollback(t *testing.T) {
require.NoError(t, app.Rollback())
recs := readTestWAL(t, w.Dir())
require.Equal(t, 1, len(recs))
require.Len(t, recs, 1)
series, ok := recs[0].([]record.RefSeries)
require.True(t, ok, "expected series record but got %+v", recs[0])
@ -2209,7 +2209,7 @@ func TestHeadReadWriterRepair(t *testing.T) {
// take effect without another chunk being written.
files, err := os.ReadDir(mmappedChunksDir(dir))
require.NoError(t, err)
require.Equal(t, 6, len(files))
require.Len(t, files, 6)
// Corrupt the 4th file by writing a random byte to series ref.
f, err := os.OpenFile(filepath.Join(mmappedChunksDir(dir), files[3].Name()), os.O_WRONLY, 0o666)
@ -2235,7 +2235,7 @@ func TestHeadReadWriterRepair(t *testing.T) {
{
files, err := os.ReadDir(mmappedChunksDir(dir))
require.NoError(t, err)
require.Equal(t, 3, len(files))
require.Len(t, files, 3)
}
}
@ -2321,7 +2321,7 @@ func TestMemSeriesIsolation(t *testing.T) {
ss := querier.Select(context.Background(), false, nil, labels.MustNewMatcher(labels.MatchEqual, "foo", "bar"))
_, seriesSet, ws, err := expandSeriesSet(ss)
require.NoError(t, err)
require.Equal(t, 0, len(ws))
require.Empty(t, ws)
for _, series := range seriesSet {
return int(series[len(series)-1].f)
@ -2725,8 +2725,8 @@ func TestHeadLabelNamesValuesWithMinMaxRange(t *testing.T) {
require.NoError(t, err)
}
require.NoError(t, app.Commit())
require.Equal(t, head.MinTime(), firstSeriesTimestamp)
require.Equal(t, head.MaxTime(), lastSeriesTimestamp)
require.Equal(t, firstSeriesTimestamp, head.MinTime())
require.Equal(t, lastSeriesTimestamp, head.MaxTime())
testCases := []struct {
name string
@ -3007,7 +3007,7 @@ func BenchmarkHeadLabelValuesWithMatchers(b *testing.B) {
for benchIdx := 0; benchIdx < b.N; benchIdx++ {
actualValues, err := headIdxReader.LabelValues(ctx, "b_tens", matchers...)
require.NoError(b, err)
require.Equal(b, 9, len(actualValues))
require.Len(b, actualValues, 9)
}
}
@ -3115,7 +3115,7 @@ func TestChunkNotFoundHeadGCRace(t *testing.T) {
defer wg.Done()
// Compacting head while the querier spans the compaction time.
require.NoError(t, db.Compact(ctx))
require.Greater(t, len(db.Blocks()), 0)
require.NotEmpty(t, db.Blocks())
}()
// Give enough time for compaction to finish.
@ -3178,7 +3178,7 @@ func TestDataMissingOnQueryDuringCompaction(t *testing.T) {
defer wg.Done()
// Compacting head while the querier spans the compaction time.
require.NoError(t, db.Compact(ctx))
require.Greater(t, len(db.Blocks()), 0)
require.NotEmpty(t, db.Blocks())
}()
// Give enough time for compaction to finish.
@ -3907,7 +3907,7 @@ func TestSnapshotError(t *testing.T) {
require.NotNil(t, head.series.getByHash(lbls.Hash(), lbls))
tm, err := head.tombstones.Get(1)
require.NoError(t, err)
require.NotEqual(t, 0, len(tm))
require.NotEmpty(t, tm)
head.opts.EnableMemorySnapshotOnShutdown = true
require.NoError(t, head.Close()) // This will create a snapshot.
@ -3939,7 +3939,7 @@ func TestSnapshotError(t *testing.T) {
require.Nil(t, head.series.getByHash(lbls.Hash(), lbls))
tm, err = head.tombstones.Get(1)
require.NoError(t, err)
require.Equal(t, 0, len(tm))
require.Empty(t, tm)
}
func TestHistogramMetrics(t *testing.T) {
@ -4104,8 +4104,8 @@ func testHistogramStaleSampleHelper(t *testing.T, floatHistogram bool) {
s := head.series.getByHash(l.Hash(), l)
require.NotNil(t, s)
require.NotNil(t, s.headChunks)
require.Equal(t, s.headChunks.len(), 1)
require.Equal(t, 0, len(s.mmappedChunks))
require.Equal(t, 1, s.headChunks.len())
require.Empty(t, s.mmappedChunks)
testQuery(1)
// Adding stale in different appender and continuing series after a stale sample.
@ -4139,8 +4139,8 @@ func testHistogramStaleSampleHelper(t *testing.T, floatHistogram bool) {
s = head.series.getByHash(l.Hash(), l)
require.NotNil(t, s)
require.NotNil(t, s.headChunks)
require.Equal(t, s.headChunks.len(), 1)
require.Equal(t, 1, len(s.mmappedChunks))
require.Equal(t, 1, s.headChunks.len())
require.Len(t, s.mmappedChunks, 1)
testQuery(2)
}
@ -4480,7 +4480,7 @@ func TestChunkSnapshotReplayBug(t *testing.T) {
labels.MustNewMatcher(labels.MatchEqual, "__name__", "request_duration"),
labels.MustNewMatcher(labels.MatchNotEqual, "status_code", "200"),
)
require.Len(t, series, 0, "there should be no series found")
require.Empty(t, series, "there should be no series found")
}
func TestChunkSnapshotTakenAfterIncompleteSnapshot(t *testing.T) {
@ -4514,7 +4514,7 @@ func TestChunkSnapshotTakenAfterIncompleteSnapshot(t *testing.T) {
// Verify the snapshot.
name, idx, offset, err := LastChunkSnapshot(dir)
require.NoError(t, err)
require.True(t, name != "")
require.NotEqual(t, "", name)
require.Equal(t, 0, idx)
require.Greater(t, offset, 0)
}
@ -4873,7 +4873,7 @@ func TestReplayAfterMmapReplayError(t *testing.T) {
}
files, err := os.ReadDir(filepath.Join(dir, "chunks_head"))
require.Equal(t, 5, len(files))
require.Len(t, files, 5)
// Corrupt a m-map file.
mmapFilePath := filepath.Join(dir, "chunks_head", "000002")
@ -4888,7 +4888,7 @@ func TestReplayAfterMmapReplayError(t *testing.T) {
// There should be less m-map files due to corruption.
files, err = os.ReadDir(filepath.Join(dir, "chunks_head"))
require.Equal(t, 2, len(files))
require.Len(t, files, 2)
// Querying should not panic.
q, err := NewBlockQuerier(h, 0, lastTs)

View file

@ -205,7 +205,7 @@ func TestIndexRW_Postings(t *testing.T) {
err := ir.Series(p.At(), &builder, &c)
require.NoError(t, err)
require.Equal(t, 0, len(c))
require.Empty(t, c)
require.Equal(t, series[i], builder.Labels())
}
require.NoError(t, p.Err())

View file

@ -977,7 +977,7 @@ func TestMemPostings_Delete(t *testing.T) {
deleted := p.Get("lbl1", "b")
expanded, err = ExpandPostings(deleted)
require.NoError(t, err)
require.Equal(t, 0, len(expanded), "expected empty postings, got %v", expanded)
require.Empty(t, expanded, "expected empty postings, got %v", expanded)
}
func TestFindIntersectingPostings(t *testing.T) {

View file

@ -33,7 +33,7 @@ func TestPostingsStats(t *testing.T) {
stats.push(Stat{Name: "Stuff", Count: 3000000})
data := stats.get()
require.Equal(t, 10, len(data))
require.Len(t, data, 10)
for i := 0; i < heapLength; i++ {
require.Equal(t, uint64(max-i), data[i].Count)
}
@ -51,7 +51,7 @@ func TestPostingsStats2(t *testing.T) {
data := stats.get()
require.Equal(t, 4, len(data))
require.Len(t, data, 4)
require.Equal(t, uint64(11), data[0].Count)
}

View file

@ -491,7 +491,7 @@ func TestOOOHeadChunkReader_Chunk(t *testing.T) {
})
require.Nil(t, iterable)
require.Equal(t, err, fmt.Errorf("not found"))
require.Equal(t, c, nil)
require.Nil(t, c)
})
tests := []struct {

View file

@ -213,7 +213,7 @@ func testBlockQuerier(t *testing.T, c blockQuerierTestCase, ir IndexReader, cr C
require.Equal(t, eok, rok)
if !eok {
require.Equal(t, 0, len(res.Warnings()))
require.Empty(t, res.Warnings())
break
}
sexp := c.exp.At()
@ -248,7 +248,7 @@ func testBlockQuerier(t *testing.T, c blockQuerierTestCase, ir IndexReader, cr C
require.Equal(t, eok, rok)
if !eok {
require.Equal(t, 0, len(res.Warnings()))
require.Empty(t, res.Warnings())
break
}
sexpChks := c.expChks.At()
@ -2068,7 +2068,7 @@ func BenchmarkMergedSeriesSet(b *testing.B) {
i++
}
require.NoError(b, ms.Err())
require.Equal(b, len(lbls), i)
require.Len(b, lbls, i)
}
})
}
@ -2503,7 +2503,7 @@ func BenchmarkQuerySeek(b *testing.B) {
require.NoError(b, it.Err())
}
require.NoError(b, ss.Err())
require.Equal(b, 0, len(ss.Warnings()))
require.Empty(b, ss.Warnings())
})
}
}
@ -2631,7 +2631,7 @@ func BenchmarkSetMatcher(b *testing.B) {
for ss.Next() {
}
require.NoError(b, ss.Err())
require.Equal(b, 0, len(ss.Warnings()))
require.Empty(b, ss.Warnings())
}
})
}
@ -3233,7 +3233,7 @@ func benchQuery(b *testing.B, expExpansions int, q storage.Querier, selectors la
actualExpansions++
}
require.NoError(b, ss.Err())
require.Equal(b, 0, len(ss.Warnings()))
require.Empty(b, ss.Warnings())
require.Equal(b, expExpansions, actualExpansions)
require.NoError(b, ss.Err())
}
@ -3415,7 +3415,7 @@ func TestBlockBaseSeriesSet(t *testing.T) {
i++
}
require.Equal(t, len(tc.expIdxs), i)
require.Len(t, tc.expIdxs, i)
require.NoError(t, bcs.Err())
}
}
@ -3654,7 +3654,7 @@ func TestQueryWithOneChunkCompletelyDeleted(t *testing.T) {
chk := it.At()
cit := chk.Chunk.Iterator(nil)
for vt := cit.Next(); vt != chunkenc.ValNone; vt = cit.Next() {
require.Equal(t, vt, chunkenc.ValFloatHistogram, "Only float histograms expected, other sample types should have been deleted.")
require.Equal(t, chunkenc.ValFloatHistogram, vt, "Only float histograms expected, other sample types should have been deleted.")
sampleCount++
}
}

View file

@ -15,7 +15,6 @@
package record
import (
"errors"
"math/rand"
"testing"
@ -209,7 +208,7 @@ func TestRecord_Corrupted(t *testing.T) {
corrupted := enc.Samples(samples, nil)[:8]
_, err := dec.Samples(corrupted, nil)
require.True(t, errors.Is(err, encoding.ErrInvalidSize))
require.ErrorIs(t, err, encoding.ErrInvalidSize)
})
t.Run("Test corrupted tombstone record", func(t *testing.T) {
@ -232,7 +231,7 @@ func TestRecord_Corrupted(t *testing.T) {
corrupted := enc.Exemplars(exemplars, nil)[:8]
_, err := dec.Exemplars(corrupted, nil)
require.True(t, errors.Is(err, encoding.ErrInvalidSize))
require.ErrorIs(t, err, encoding.ErrInvalidSize)
})
t.Run("Test corrupted metadata record", func(t *testing.T) {
@ -242,7 +241,7 @@ func TestRecord_Corrupted(t *testing.T) {
corrupted := enc.Metadata(meta, nil)[:8]
_, err := dec.Metadata(corrupted, nil)
require.True(t, errors.Is(err, encoding.ErrInvalidSize))
require.ErrorIs(t, err, encoding.ErrInvalidSize)
})
t.Run("Test corrupted histogram record", func(t *testing.T) {
@ -267,7 +266,7 @@ func TestRecord_Corrupted(t *testing.T) {
corrupted := enc.HistogramSamples(histograms, nil)[:8]
_, err := dec.HistogramSamples(corrupted, nil)
require.True(t, errors.Is(err, encoding.ErrInvalidSize))
require.ErrorIs(t, err, encoding.ErrInvalidSize)
})
}

View file

@ -49,7 +49,7 @@ func TestSegmentWAL_cut(t *testing.T) {
require.NoError(t, w.cut())
// Cutting creates a new file.
require.Equal(t, 2, len(w.files))
require.Len(t, w.files, 2)
require.NoError(t, w.write(WALEntrySeries, 1, []byte("Hello World!!")))
@ -409,7 +409,7 @@ func TestWALRestoreCorrupted(t *testing.T) {
r := w2.Reader()
serf := func(l []record.RefSeries) {
require.Equal(t, 0, len(l))
require.Empty(t, l)
}
// Weird hack to check order of reads.

View file

@ -229,7 +229,7 @@ func TestCheckpoint(t *testing.T) {
// Only the new checkpoint should be left.
files, err := os.ReadDir(dir)
require.NoError(t, err)
require.Equal(t, 1, len(files))
require.Len(t, files, 1)
require.Equal(t, "checkpoint.00000106", files[0].Name())
sr, err := NewSegmentsReader(filepath.Join(dir, "checkpoint.00000106"))

View file

@ -344,7 +344,7 @@ func TestReaderFuzz(t *testing.T) {
r := reader.Record()
// Expected value may come as nil or empty slice, so it requires special comparison.
if len(expected) == 0 {
require.Len(t, r, 0)
require.Empty(t, r)
} else {
require.Equal(t, expected, r, "read wrong record")
}
@ -395,7 +395,7 @@ func TestReaderFuzz_Live(t *testing.T) {
require.True(t, ok, "unexpected record")
// Expected value may come as nil or empty slice, so it requires special comparison.
if len(expected) == 0 {
require.Len(t, rec, 0)
require.Empty(t, rec)
} else {
require.Equal(t, expected, rec, "record does not match expected")
}

View file

@ -189,7 +189,7 @@ func TestWALRepair_ReadingError(t *testing.T) {
result = append(result, append(b, r.Record()...))
}
require.NoError(t, r.Err())
require.Equal(t, test.intactRecs, len(result), "Wrong number of intact records")
require.Len(t, result, test.intactRecs, "Wrong number of intact records")
for i, r := range result {
if !bytes.Equal(records[i], r) {
@ -283,7 +283,7 @@ func TestCorruptAndCarryOn(t *testing.T) {
reader := NewReader(sr)
i := 0
for ; i < 4 && reader.Next(); i++ {
require.Equal(t, recordSize, len(reader.Record()))
require.Len(t, reader.Record(), recordSize)
}
require.Equal(t, 4, i, "not enough records")
require.False(t, reader.Next(), "unexpected record")
@ -301,8 +301,8 @@ func TestCorruptAndCarryOn(t *testing.T) {
require.NoError(t, err)
// Ensure that we have a completely clean slate after repairing.
require.Equal(t, w.segment.Index(), 1) // We corrupted segment 0.
require.Equal(t, w.donePages, 0)
require.Equal(t, 1, w.segment.Index()) // We corrupted segment 0.
require.Equal(t, 0, w.donePages)
for i := 0; i < 5; i++ {
buf := make([]byte, recordSize)
@ -325,11 +325,11 @@ func TestCorruptAndCarryOn(t *testing.T) {
reader := NewReader(sr)
i := 0
for ; i < 9 && reader.Next(); i++ {
require.Equal(t, recordSize, len(reader.Record()))
require.Len(t, reader.Record(), recordSize)
}
require.Equal(t, 9, i, "wrong number of records")
require.False(t, reader.Next(), "unexpected record")
require.Equal(t, nil, reader.Err())
require.NoError(t, reader.Err())
sr.Close()
}
}
@ -456,7 +456,7 @@ func TestLogPartialWrite(t *testing.T) {
for i := 1; i <= testData.numRecords; i++ {
if err := w.Log(record); i == testData.faultyRecord {
require.Error(t, io.ErrShortWrite, err)
require.ErrorIs(t, io.ErrShortWrite, err)
} else {
require.NoError(t, err)
}

View file

@ -204,7 +204,7 @@ func TestParseAndPushMetricsTextAndFormat(t *testing.T) {
expected, err := MetricTextToWriteRequest(input, labels)
require.NoError(t, err)
require.Equal(t, writeRequestFixture, expected)
require.Equal(t, expected, writeRequestFixture)
}
func TestMetricTextToWriteRequestErrorParsingFloatValue(t *testing.T) {
@ -217,7 +217,7 @@ func TestMetricTextToWriteRequestErrorParsingFloatValue(t *testing.T) {
labels := map[string]string{"job": "promtool"}
_, err := MetricTextToWriteRequest(input, labels)
require.Equal(t, err.Error(), "text format parsing error in line 4: expected float as value, got \"1027Error\"")
require.Equal(t, "text format parsing error in line 4: expected float as value, got \"1027Error\"", err.Error())
}
func TestMetricTextToWriteRequestErrorParsingMetricType(t *testing.T) {
@ -229,5 +229,5 @@ func TestMetricTextToWriteRequestErrorParsingMetricType(t *testing.T) {
labels := map[string]string{"job": "promtool"}
_, err := MetricTextToWriteRequest(input, labels)
require.Equal(t, err.Error(), "text format parsing error in line 3: unknown metric type \"info\"")
require.Equal(t, "text format parsing error in line 3: unknown metric type \"info\"", err.Error())
}

View file

@ -28,19 +28,19 @@ func TestPool(t *testing.T) {
t.Run("provides correct values", func(t *testing.T) {
pool := zeropool.New(func() []byte { return make([]byte, 1024) })
item1 := pool.Get()
require.Equal(t, 1024, len(item1))
require.Len(t, item1, 1024)
item2 := pool.Get()
require.Equal(t, 1024, len(item2))
require.Len(t, item2, 1024)
pool.Put(item1)
pool.Put(item2)
item1 = pool.Get()
require.Equal(t, 1024, len(item1))
require.Len(t, item1, 1024)
item2 = pool.Get()
require.Equal(t, 1024, len(item2))
require.Len(t, item2, 1024)
})
t.Run("is not racy", func(t *testing.T) {

View file

@ -872,7 +872,7 @@ func TestStats(t *testing.T) {
name: "stats is blank",
param: "",
expected: func(t *testing.T, i interface{}) {
require.IsType(t, i, &QueryData{})
require.IsType(t, &QueryData{}, i)
qd := i.(*QueryData)
require.Nil(t, qd.Stats)
},
@ -881,7 +881,7 @@ func TestStats(t *testing.T) {
name: "stats is true",
param: "true",
expected: func(t *testing.T, i interface{}) {
require.IsType(t, i, &QueryData{})
require.IsType(t, &QueryData{}, i)
qd := i.(*QueryData)
require.NotNil(t, qd.Stats)
qs := qd.Stats.Builtin()
@ -896,7 +896,7 @@ func TestStats(t *testing.T) {
name: "stats is all",
param: "all",
expected: func(t *testing.T, i interface{}) {
require.IsType(t, i, &QueryData{})
require.IsType(t, &QueryData{}, i)
qd := i.(*QueryData)
require.NotNil(t, qd.Stats)
qs := qd.Stats.Builtin()
@ -917,12 +917,12 @@ func TestStats(t *testing.T) {
},
param: "known",
expected: func(t *testing.T, i interface{}) {
require.IsType(t, i, &QueryData{})
require.IsType(t, &QueryData{}, i)
qd := i.(*QueryData)
require.NotNil(t, qd.Stats)
j, err := json.Marshal(qd.Stats)
require.NoError(t, err)
require.JSONEq(t, string(j), `{"custom":"Custom Value"}`)
require.JSONEq(t, `{"custom":"Custom Value"}`, string(j))
},
},
} {