Testify: move to require (#8122)

* Testify: move to require

Moving testify to require to fail tests early in case of errors.

Signed-off-by: Julien Pivotto <roidelapluie@inuits.eu>

* More moves

Signed-off-by: Julien Pivotto <roidelapluie@inuits.eu>
This commit is contained in:
Julien Pivotto 2020-10-29 10:43:23 +01:00 committed by GitHub
parent 3d8826a3d4
commit 6c56a1faaa
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
98 changed files with 6275 additions and 3282 deletions

View file

@ -28,7 +28,7 @@ import (
"github.com/go-kit/kit/log"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/common/model"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/prometheus/prometheus/notifier"
"github.com/prometheus/prometheus/pkg/labels"
@ -98,9 +98,9 @@ func TestComputeExternalURL(t *testing.T) {
for _, test := range tests {
_, err := computeExternalURL(test.input, "0.0.0.0:9090")
if test.valid {
assert.NoError(t, err)
require.NoError(t, err)
} else {
assert.Error(t, err, "input=%q", test.input)
require.Error(t, err, "input=%q", test.input)
}
}
}
@ -116,11 +116,11 @@ func TestFailedStartupExitCode(t *testing.T) {
prom := exec.Command(promPath, "-test.main", "--config.file="+fakeInputFile)
err := prom.Run()
assert.Error(t, err)
require.Error(t, err)
if exitError, ok := err.(*exec.ExitError); ok {
status := exitError.Sys().(syscall.WaitStatus)
assert.Equal(t, expectedExitStatus, status.ExitStatus())
require.Equal(t, expectedExitStatus, status.ExitStatus())
} else {
t.Errorf("unable to retrieve the exit status for prometheus: %v", err)
}
@ -189,7 +189,7 @@ func TestSendAlerts(t *testing.T) {
if len(tc.in) == 0 {
t.Fatalf("sender called with 0 alert")
}
assert.Equal(t, tc.exp, alerts)
require.Equal(t, tc.exp, alerts)
})
sendAlerts(senderFunc, "http://localhost:9090")(context.TODO(), "up", tc.in...)
})
@ -206,14 +206,14 @@ func TestWALSegmentSizeBounds(t *testing.T) {
// Log stderr in case of failure.
stderr, err := prom.StderrPipe()
assert.NoError(t, err)
require.NoError(t, err)
go func() {
slurp, _ := ioutil.ReadAll(stderr)
t.Log(string(slurp))
}()
err = prom.Start()
assert.NoError(t, err)
require.NoError(t, err)
if expectedExitStatus == 0 {
done := make(chan error, 1)
@ -228,10 +228,10 @@ func TestWALSegmentSizeBounds(t *testing.T) {
}
err = prom.Wait()
assert.Error(t, err)
require.Error(t, err)
if exitError, ok := err.(*exec.ExitError); ok {
status := exitError.Sys().(syscall.WaitStatus)
assert.Equal(t, expectedExitStatus, status.ExitStatus())
require.Equal(t, expectedExitStatus, status.ExitStatus())
} else {
t.Errorf("unable to retrieve the exit status for prometheus: %v", err)
}
@ -240,21 +240,21 @@ func TestWALSegmentSizeBounds(t *testing.T) {
func TestTimeMetrics(t *testing.T) {
tmpDir, err := ioutil.TempDir("", "time_metrics_e2e")
assert.NoError(t, err)
require.NoError(t, err)
defer func() {
assert.NoError(t, os.RemoveAll(tmpDir))
require.NoError(t, os.RemoveAll(tmpDir))
}()
reg := prometheus.NewRegistry()
db, err := openDBWithMetrics(tmpDir, log.NewNopLogger(), reg, nil)
assert.NoError(t, err)
require.NoError(t, err)
defer func() {
assert.NoError(t, db.Close())
require.NoError(t, db.Close())
}()
// Check initial values.
assert.Equal(t, map[string]float64{
require.Equal(t, map[string]float64{
"prometheus_tsdb_lowest_timestamp_seconds": float64(math.MaxInt64) / 1000,
"prometheus_tsdb_head_min_time_seconds": float64(math.MaxInt64) / 1000,
"prometheus_tsdb_head_max_time_seconds": float64(math.MinInt64) / 1000,
@ -266,14 +266,14 @@ func TestTimeMetrics(t *testing.T) {
app := db.Appender(context.Background())
_, err = app.Add(labels.FromStrings(model.MetricNameLabel, "a"), 1000, 1)
assert.NoError(t, err)
require.NoError(t, err)
_, err = app.Add(labels.FromStrings(model.MetricNameLabel, "a"), 2000, 1)
assert.NoError(t, err)
require.NoError(t, err)
_, err = app.Add(labels.FromStrings(model.MetricNameLabel, "a"), 3000, 1)
assert.NoError(t, err)
assert.NoError(t, app.Commit())
require.NoError(t, err)
require.NoError(t, app.Commit())
assert.Equal(t, map[string]float64{
require.Equal(t, map[string]float64{
"prometheus_tsdb_lowest_timestamp_seconds": 1.0,
"prometheus_tsdb_head_min_time_seconds": 1.0,
"prometheus_tsdb_head_max_time_seconds": 3.0,
@ -286,7 +286,7 @@ func TestTimeMetrics(t *testing.T) {
func getCurrentGaugeValuesFor(t *testing.T, reg prometheus.Gatherer, metricNames ...string) map[string]float64 {
f, err := reg.Gather()
assert.NoError(t, err)
require.NoError(t, err)
res := make(map[string]float64, len(metricNames))
for _, g := range f {
@ -295,7 +295,7 @@ func getCurrentGaugeValuesFor(t *testing.T, reg prometheus.Gatherer, metricNames
continue
}
assert.Equal(t, 1, len(g.GetMetric()))
require.Equal(t, 1, len(g.GetMetric()))
if _, ok := res[m]; ok {
t.Error("expected only one metric family for", m)
t.FailNow()

View file

@ -30,7 +30,7 @@ import (
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
type origin int
@ -82,22 +82,22 @@ func (p *queryLogTest) waitForPrometheus() error {
// then reloads the configuration if needed.
func (p *queryLogTest) setQueryLog(t *testing.T, queryLogFile string) {
err := p.configFile.Truncate(0)
assert.NoError(t, err)
require.NoError(t, err)
_, err = p.configFile.Seek(0, 0)
assert.NoError(t, err)
require.NoError(t, err)
if queryLogFile != "" {
_, err = p.configFile.Write([]byte(fmt.Sprintf("global:\n query_log_file: %s\n", queryLogFile)))
assert.NoError(t, err)
require.NoError(t, err)
}
_, err = p.configFile.Write([]byte(p.configuration()))
assert.NoError(t, err)
require.NoError(t, err)
}
// reloadConfig reloads the configuration using POST.
func (p *queryLogTest) reloadConfig(t *testing.T) {
r, err := http.Post(fmt.Sprintf("http://%s:%d%s/-/reload", p.host, p.port, p.prefix), "text/plain", nil)
assert.NoError(t, err)
assert.Equal(t, 200, r.StatusCode)
require.NoError(t, err)
require.Equal(t, 200, r.StatusCode)
}
// query runs a query according to the test origin.
@ -111,8 +111,8 @@ func (p *queryLogTest) query(t *testing.T) {
p.prefix,
url.QueryEscape("query_with_api"),
))
assert.NoError(t, err)
assert.Equal(t, 200, r.StatusCode)
require.NoError(t, err)
require.Equal(t, 200, r.StatusCode)
case consoleOrigin:
r, err := http.Get(fmt.Sprintf(
"http://%s:%d%s/consoles/test.html",
@ -120,8 +120,8 @@ func (p *queryLogTest) query(t *testing.T) {
p.port,
p.prefix,
))
assert.NoError(t, err)
assert.Equal(t, 200, r.StatusCode)
require.NoError(t, err)
require.Equal(t, 200, r.StatusCode)
case ruleOrigin:
time.Sleep(2 * time.Second)
default:
@ -147,15 +147,15 @@ func (p *queryLogTest) queryString() string {
// test parameters.
func (p *queryLogTest) validateLastQuery(t *testing.T, ql []queryLogLine) {
q := ql[len(ql)-1]
assert.Equal(t, p.queryString(), q.Params.Query)
require.Equal(t, p.queryString(), q.Params.Query)
switch p.origin {
case apiOrigin:
assert.Equal(t, 5, q.Params.Step)
assert.Equal(t, "1970-01-01T00:00:00.000Z", q.Params.Start)
assert.Equal(t, "1970-01-01T01:00:00.000Z", q.Params.End)
require.Equal(t, 5, q.Params.Step)
require.Equal(t, "1970-01-01T00:00:00.000Z", q.Params.Start)
require.Equal(t, "1970-01-01T01:00:00.000Z", q.Params.End)
default:
assert.Equal(t, 0, q.Params.Step)
require.Equal(t, 0, q.Params.Step)
}
if p.origin != ruleOrigin {
@ -163,17 +163,17 @@ func (p *queryLogTest) validateLastQuery(t *testing.T, ql []queryLogLine) {
if host == "[::1]" {
host = "::1"
}
assert.Equal(t, host, q.Request.ClientIP)
require.Equal(t, host, q.Request.ClientIP)
}
switch p.origin {
case apiOrigin:
assert.Equal(t, p.prefix+"/api/v1/query_range", q.Request.Path)
require.Equal(t, p.prefix+"/api/v1/query_range", q.Request.Path)
case consoleOrigin:
assert.Equal(t, p.prefix+"/consoles/test.html", q.Request.Path)
require.Equal(t, p.prefix+"/consoles/test.html", q.Request.Path)
case ruleOrigin:
assert.Equal(t, "querylogtest", q.RuleGroup.Name)
assert.Equal(t, filepath.Join(p.cwd, "testdata", "rules", "test.yml"), q.RuleGroup.File)
require.Equal(t, "querylogtest", q.RuleGroup.Name)
require.Equal(t, filepath.Join(p.cwd, "testdata", "rules", "test.yml"), q.RuleGroup.File)
default:
panic("unknown origin")
}
@ -234,10 +234,10 @@ func (p *queryLogTest) run(t *testing.T) {
// Setup temporary files for this test.
queryLogFile, err := ioutil.TempFile("", "query")
assert.NoError(t, err)
require.NoError(t, err)
defer os.Remove(queryLogFile.Name())
p.configFile, err = ioutil.TempFile("", "config")
assert.NoError(t, err)
require.NoError(t, err)
defer os.Remove(p.configFile.Name())
if p.enabledAtStart {
@ -247,9 +247,9 @@ func (p *queryLogTest) run(t *testing.T) {
}
dir, err := ioutil.TempDir("", "query_log_test")
assert.NoError(t, err)
require.NoError(t, err)
defer func() {
assert.NoError(t, os.RemoveAll(dir))
require.NoError(t, os.RemoveAll(dir))
}()
params := append([]string{
@ -264,7 +264,7 @@ func (p *queryLogTest) run(t *testing.T) {
// Log stderr in case of failure.
stderr, err := prom.StderrPipe()
assert.NoError(t, err)
require.NoError(t, err)
// We use a WaitGroup to avoid calling t.Log after the test is done.
var wg sync.WaitGroup
@ -276,17 +276,17 @@ func (p *queryLogTest) run(t *testing.T) {
wg.Done()
}()
assert.NoError(t, prom.Start())
require.NoError(t, prom.Start())
defer func() {
prom.Process.Kill()
prom.Wait()
}()
assert.NoError(t, p.waitForPrometheus())
require.NoError(t, p.waitForPrometheus())
if !p.enabledAtStart {
p.query(t)
assert.Equal(t, 0, len(readQueryLog(t, queryLogFile.Name())))
require.Equal(t, 0, len(readQueryLog(t, queryLogFile.Name())))
p.setQueryLog(t, queryLogFile.Name())
p.reloadConfig(t)
}
@ -296,9 +296,9 @@ func (p *queryLogTest) run(t *testing.T) {
ql := readQueryLog(t, queryLogFile.Name())
qc := len(ql)
if p.exactQueryCount() {
assert.Equal(t, 1, qc)
require.Equal(t, 1, qc)
} else {
assert.Greater(t, qc, 0, "no queries logged")
require.Greater(t, qc, 0, "no queries logged")
}
p.validateLastQuery(t, ql)
@ -311,7 +311,7 @@ func (p *queryLogTest) run(t *testing.T) {
p.query(t)
ql = readQueryLog(t, queryLogFile.Name())
assert.Equal(t, qc, len(ql))
require.Equal(t, qc, len(ql))
qc = len(ql)
p.setQueryLog(t, queryLogFile.Name())
@ -322,9 +322,9 @@ func (p *queryLogTest) run(t *testing.T) {
ql = readQueryLog(t, queryLogFile.Name())
if p.exactQueryCount() {
assert.Equal(t, qc, len(ql))
require.Equal(t, qc, len(ql))
} else {
assert.Greater(t, len(ql), qc, "no queries logged")
require.Greater(t, len(ql), qc, "no queries logged")
}
p.validateLastQuery(t, ql)
qc = len(ql)
@ -336,13 +336,13 @@ func (p *queryLogTest) run(t *testing.T) {
}
// Move the file, Prometheus should still write to the old file.
newFile, err := ioutil.TempFile("", "newLoc")
assert.NoError(t, err)
assert.NoError(t, newFile.Close())
require.NoError(t, err)
require.NoError(t, newFile.Close())
defer os.Remove(newFile.Name())
assert.NoError(t, os.Rename(queryLogFile.Name(), newFile.Name()))
require.NoError(t, os.Rename(queryLogFile.Name(), newFile.Name()))
ql = readQueryLog(t, newFile.Name())
if p.exactQueryCount() {
assert.Equal(t, qc, len(ql))
require.Equal(t, qc, len(ql))
}
p.validateLastQuery(t, ql)
qc = len(ql)
@ -353,9 +353,9 @@ func (p *queryLogTest) run(t *testing.T) {
ql = readQueryLog(t, newFile.Name())
if p.exactQueryCount() {
assert.Equal(t, qc, len(ql))
require.Equal(t, qc, len(ql))
} else {
assert.Greater(t, len(ql), qc, "no queries logged")
require.Greater(t, len(ql), qc, "no queries logged")
}
p.validateLastQuery(t, ql)
@ -366,9 +366,9 @@ func (p *queryLogTest) run(t *testing.T) {
ql = readQueryLog(t, queryLogFile.Name())
qc = len(ql)
if p.exactQueryCount() {
assert.Equal(t, 1, qc)
require.Equal(t, 1, qc)
} else {
assert.Greater(t, qc, 0, "no queries logged")
require.Greater(t, qc, 0, "no queries logged")
}
}
@ -393,12 +393,12 @@ type queryLogLine struct {
func readQueryLog(t *testing.T, path string) []queryLogLine {
ql := []queryLogLine{}
file, err := os.Open(path)
assert.NoError(t, err)
require.NoError(t, err)
defer file.Close()
scanner := bufio.NewScanner(file)
for scanner.Scan() {
var q queryLogLine
assert.NoError(t, json.Unmarshal(scanner.Bytes(), &q))
require.NoError(t, json.Unmarshal(scanner.Bytes(), &q))
ql = append(ql, q)
}
return ql
@ -410,7 +410,7 @@ func TestQueryLog(t *testing.T) {
}
cwd, err := os.Getwd()
assert.NoError(t, err)
require.NoError(t, err)
port := 15000
for _, host := range []string{"127.0.0.1", "[::1]"} {

View file

@ -20,7 +20,7 @@ import (
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestQueryRange(t *testing.T) {
@ -29,18 +29,18 @@ func TestQueryRange(t *testing.T) {
p := &promqlPrinter{}
exitCode := QueryRange(s.URL, map[string]string{}, "up", "0", "300", 0, p)
assert.Equal(t, "/api/v1/query_range", getRequest().URL.Path)
require.Equal(t, "/api/v1/query_range", getRequest().URL.Path)
form := getRequest().Form
assert.Equal(t, "up", form.Get("query"))
assert.Equal(t, "1", form.Get("step"))
assert.Equal(t, 0, exitCode)
require.Equal(t, "up", form.Get("query"))
require.Equal(t, "1", form.Get("step"))
require.Equal(t, 0, exitCode)
exitCode = QueryRange(s.URL, map[string]string{}, "up", "0", "300", 10*time.Millisecond, p)
assert.Equal(t, "/api/v1/query_range", getRequest().URL.Path)
require.Equal(t, "/api/v1/query_range", getRequest().URL.Path)
form = getRequest().Form
assert.Equal(t, "up", form.Get("query"))
assert.Equal(t, "0.01", form.Get("step"))
assert.Equal(t, 0, exitCode)
require.Equal(t, "up", form.Get("query"))
require.Equal(t, "0.01", form.Get("step"))
require.Equal(t, 0, exitCode)
}
func TestQueryInstant(t *testing.T) {
@ -49,11 +49,11 @@ func TestQueryInstant(t *testing.T) {
p := &promqlPrinter{}
exitCode := QueryInstant(s.URL, "up", "300", p)
assert.Equal(t, "/api/v1/query", getRequest().URL.Path)
require.Equal(t, "/api/v1/query", getRequest().URL.Path)
form := getRequest().Form
assert.Equal(t, "up", form.Get("query"))
assert.Equal(t, "300", form.Get("time"))
assert.Equal(t, 0, exitCode)
require.Equal(t, "up", form.Get("query"))
require.Equal(t, "300", form.Get("time"))
require.Equal(t, 0, exitCode)
}
func mockServer(code int, body string) (*httptest.Server, func() *http.Request) {

View file

@ -24,7 +24,7 @@ import (
"github.com/prometheus/common/config"
"github.com/prometheus/common/model"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"gopkg.in/yaml.v2"
"github.com/prometheus/prometheus/discovery"
@ -719,77 +719,77 @@ var expectedConf = &Config{
func TestYAMLRoundtrip(t *testing.T) {
want, err := LoadFile("testdata/roundtrip.good.yml")
assert.NoError(t, err)
require.NoError(t, err)
out, err := yaml.Marshal(want)
assert.NoError(t, err)
require.NoError(t, err)
got := &Config{}
assert.NoError(t, yaml.UnmarshalStrict(out, got))
require.NoError(t, yaml.UnmarshalStrict(out, got))
assert.Equal(t, want, got)
require.Equal(t, want, got)
}
func TestLoadConfig(t *testing.T) {
// Parse a valid file that sets a global scrape timeout. This tests whether parsing
// an overwritten default field in the global config permanently changes the default.
_, err := LoadFile("testdata/global_timeout.good.yml")
assert.NoError(t, err)
require.NoError(t, err)
c, err := LoadFile("testdata/conf.good.yml")
assert.NoError(t, err)
assert.Equal(t, expectedConf, c)
require.NoError(t, err)
require.Equal(t, expectedConf, c)
}
func TestScrapeIntervalLarger(t *testing.T) {
c, err := LoadFile("testdata/scrape_interval_larger.good.yml")
assert.NoError(t, err)
assert.Equal(t, 1, len(c.ScrapeConfigs))
require.NoError(t, err)
require.Equal(t, 1, len(c.ScrapeConfigs))
for _, sc := range c.ScrapeConfigs {
assert.Equal(t, true, sc.ScrapeInterval >= sc.ScrapeTimeout)
require.Equal(t, true, sc.ScrapeInterval >= sc.ScrapeTimeout)
}
}
// YAML marshaling must not reveal authentication credentials.
func TestElideSecrets(t *testing.T) {
c, err := LoadFile("testdata/conf.good.yml")
assert.NoError(t, err)
require.NoError(t, err)
secretRe := regexp.MustCompile(`\\u003csecret\\u003e|<secret>`)
config, err := yaml.Marshal(c)
assert.NoError(t, err)
require.NoError(t, err)
yamlConfig := string(config)
matches := secretRe.FindAllStringIndex(yamlConfig, -1)
assert.Equal(t, 10, len(matches), "wrong number of secret matches found")
assert.NotContains(t, yamlConfig, "mysecret",
require.Equal(t, 10, len(matches), "wrong number of secret matches found")
require.NotContains(t, yamlConfig, "mysecret",
"yaml marshal reveals authentication credentials.")
}
func TestLoadConfigRuleFilesAbsolutePath(t *testing.T) {
// Parse a valid file that sets a rule files with an absolute path
c, err := LoadFile(ruleFilesConfigFile)
assert.NoError(t, err)
assert.Equal(t, ruleFilesExpectedConf, c)
require.NoError(t, err)
require.Equal(t, ruleFilesExpectedConf, c)
}
func TestKubernetesEmptyAPIServer(t *testing.T) {
_, err := LoadFile("testdata/kubernetes_empty_apiserver.good.yml")
assert.NoError(t, err)
require.NoError(t, err)
}
func TestKubernetesSelectors(t *testing.T) {
_, err := LoadFile("testdata/kubernetes_selectors_endpoints.good.yml")
assert.NoError(t, err)
require.NoError(t, err)
_, err = LoadFile("testdata/kubernetes_selectors_node.good.yml")
assert.NoError(t, err)
require.NoError(t, err)
_, err = LoadFile("testdata/kubernetes_selectors_ingress.good.yml")
assert.NoError(t, err)
require.NoError(t, err)
_, err = LoadFile("testdata/kubernetes_selectors_pod.good.yml")
assert.NoError(t, err)
require.NoError(t, err)
_, err = LoadFile("testdata/kubernetes_selectors_service.good.yml")
assert.NoError(t, err)
require.NoError(t, err)
}
var expectedErrors = []struct {
@ -1025,40 +1025,40 @@ var expectedErrors = []struct {
func TestBadConfigs(t *testing.T) {
for _, ee := range expectedErrors {
_, err := LoadFile("testdata/" + ee.filename)
assert.Error(t, err, "%s", ee.filename)
assert.Contains(t, err.Error(), ee.errMsg,
require.Error(t, err, "%s", ee.filename)
require.Contains(t, err.Error(), ee.errMsg,
"Expected error for %s to contain %q but got: %s", ee.filename, ee.errMsg, err)
}
}
func TestBadStaticConfigsJSON(t *testing.T) {
content, err := ioutil.ReadFile("testdata/static_config.bad.json")
assert.NoError(t, err)
require.NoError(t, err)
var tg targetgroup.Group
err = json.Unmarshal(content, &tg)
assert.Error(t, err)
require.Error(t, err)
}
func TestBadStaticConfigsYML(t *testing.T) {
content, err := ioutil.ReadFile("testdata/static_config.bad.yml")
assert.NoError(t, err)
require.NoError(t, err)
var tg targetgroup.Group
err = yaml.UnmarshalStrict(content, &tg)
assert.Error(t, err)
require.Error(t, err)
}
func TestEmptyConfig(t *testing.T) {
c, err := Load("")
assert.NoError(t, err)
require.NoError(t, err)
exp := DefaultConfig
assert.Equal(t, exp, *c)
require.Equal(t, exp, *c)
}
func TestEmptyGlobalBlock(t *testing.T) {
c, err := Load("global:\n")
assert.NoError(t, err)
require.NoError(t, err)
exp := DefaultConfig
assert.Equal(t, exp, *c)
require.Equal(t, exp, *c)
}
func kubernetesSDHostURL() config.URL {

View file

@ -17,7 +17,7 @@ import (
"testing"
"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"go.uber.org/goleak"
)
@ -63,7 +63,7 @@ func TestMapFromVMWithEmptyTags(t *testing.T) {
actualVM := mapFromVM(testVM)
assert.Equal(t, expectedVM, actualVM)
require.Equal(t, expectedVM, actualVM)
}
func TestMapFromVMWithTags(t *testing.T) {
@ -107,7 +107,7 @@ func TestMapFromVMWithTags(t *testing.T) {
actualVM := mapFromVM(testVM)
assert.Equal(t, expectedVM, actualVM)
require.Equal(t, expectedVM, actualVM)
}
func TestMapFromVMScaleSetVMWithEmptyTags(t *testing.T) {
@ -150,7 +150,7 @@ func TestMapFromVMScaleSetVMWithEmptyTags(t *testing.T) {
actualVM := mapFromVMScaleSetVM(testVM, scaleSet)
assert.Equal(t, expectedVM, actualVM)
require.Equal(t, expectedVM, actualVM)
}
func TestMapFromVMScaleSetVMWithTags(t *testing.T) {
@ -196,7 +196,7 @@ func TestMapFromVMScaleSetVMWithTags(t *testing.T) {
actualVM := mapFromVMScaleSetVM(testVM, scaleSet)
assert.Equal(t, expectedVM, actualVM)
require.Equal(t, expectedVM, actualVM)
}
func TestNewAzureResourceFromID(t *testing.T) {
@ -214,6 +214,6 @@ func TestNewAzureResourceFromID(t *testing.T) {
},
} {
actual, _ := newAzureResourceFromID(tc.id, nil)
assert.Equal(t, tc.expected, actual)
require.Equal(t, tc.expected, actual)
}
}

View file

@ -23,7 +23,7 @@ import (
"github.com/go-kit/kit/log"
"github.com/prometheus/common/model"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"go.uber.org/goleak"
"github.com/prometheus/prometheus/discovery/targetgroup"
@ -251,7 +251,7 @@ func newServer(t *testing.T) (*httptest.Server, *SDConfig) {
w.Write([]byte(response))
}))
stuburl, err := url.Parse(stub.URL)
assert.NoError(t, err)
require.NoError(t, err)
config := &SDConfig{
Server: stuburl.Host,
@ -264,18 +264,18 @@ func newServer(t *testing.T) (*httptest.Server, *SDConfig) {
func newDiscovery(t *testing.T, config *SDConfig) *Discovery {
logger := log.NewNopLogger()
d, err := NewDiscovery(config, logger)
assert.NoError(t, err)
require.NoError(t, err)
return d
}
func checkOneTarget(t *testing.T, tg []*targetgroup.Group) {
assert.Equal(t, 1, len(tg))
require.Equal(t, 1, len(tg))
target := tg[0]
assert.Equal(t, "test-dc", string(target.Labels["__meta_consul_dc"]))
assert.Equal(t, target.Source, string(target.Labels["__meta_consul_service"]))
require.Equal(t, "test-dc", string(target.Labels["__meta_consul_dc"]))
require.Equal(t, target.Source, string(target.Labels["__meta_consul_service"]))
if target.Source == "test" {
// test service should have one node.
assert.Greater(t, len(target.Targets), 0, "Test service should have one node")
require.Greater(t, len(target.Targets), 0, "Test service should have one node")
}
}
@ -359,7 +359,7 @@ func TestGetDatacenterShouldReturnError(t *testing.T) {
} {
stub := httptest.NewServer(http.HandlerFunc(tc.handler))
stuburl, err := url.Parse(stub.URL)
assert.NoError(t, err)
require.NoError(t, err)
config := &SDConfig{
Server: stuburl.Host,
@ -370,13 +370,13 @@ func TestGetDatacenterShouldReturnError(t *testing.T) {
d := newDiscovery(t, config)
// Should be empty if not initialized.
assert.Equal(t, "", d.clientDatacenter)
require.Equal(t, "", d.clientDatacenter)
err = d.getDatacenter()
// An error should be returned.
assert.Equal(t, tc.errMessage, err.Error())
require.Equal(t, tc.errMessage, err.Error())
// Should still be empty.
assert.Equal(t, "", d.clientDatacenter)
require.Equal(t, "", d.clientDatacenter)
}
}

View file

@ -21,7 +21,7 @@ import (
"github.com/go-kit/kit/log"
"github.com/prometheus/common/model"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
type DigitalOceanSDTestSuite struct {
@ -47,21 +47,21 @@ func TestDigitalOceanSDRefresh(t *testing.T) {
cfg := DefaultSDConfig
cfg.HTTPClientConfig.BearerToken = tokenID
d, err := NewDiscovery(&cfg, log.NewNopLogger())
assert.NoError(t, err)
require.NoError(t, err)
endpoint, err := url.Parse(sdmock.Mock.Endpoint())
assert.NoError(t, err)
require.NoError(t, err)
d.client.BaseURL = endpoint
ctx := context.Background()
tgs, err := d.refresh(ctx)
assert.NoError(t, err)
require.NoError(t, err)
assert.Equal(t, 1, len(tgs))
require.Equal(t, 1, len(tgs))
tg := tgs[0]
assert.NotNil(t, tg)
assert.NotNil(t, tg.Targets)
assert.Equal(t, 4, len(tg.Targets))
require.NotNil(t, tg)
require.NotNil(t, tg.Targets)
require.Equal(t, 4, len(tg.Targets))
for i, lbls := range []model.LabelSet{
{
@ -119,7 +119,7 @@ func TestDigitalOceanSDRefresh(t *testing.T) {
},
} {
t.Run(fmt.Sprintf("item %d", i), func(t *testing.T) {
assert.Equal(t, lbls, tg.Targets[i])
require.Equal(t, lbls, tg.Targets[i])
})
}
}

View file

@ -23,7 +23,7 @@ import (
"github.com/go-kit/kit/log"
"github.com/miekg/dns"
"github.com/prometheus/common/model"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"go.uber.org/goleak"
"gopkg.in/yaml.v2"
@ -205,8 +205,8 @@ func TestDNS(t *testing.T) {
sd.lookupFn = tc.lookup
tgs, err := sd.refresh(context.Background())
assert.NoError(t, err)
assert.Equal(t, tc.expected, tgs)
require.NoError(t, err)
require.Equal(t, tc.expected, tgs)
})
}
}
@ -296,7 +296,7 @@ func TestSDConfigUnmarshalYAML(t *testing.T) {
var config SDConfig
d := marshal(c.input)
err := config.UnmarshalYAML(unmarshal(d))
assert.Equal(t, c.expectErr, err != nil)
require.Equal(t, c.expectErr, err != nil)
})
}
}

View file

@ -23,7 +23,7 @@ import (
"strings"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"gopkg.in/yaml.v2"
"github.com/prometheus/prometheus/util/strutil"
@ -64,7 +64,7 @@ func (m *SDMock) Setup() {
func (m *SDMock) SetupHandlers() {
headers := make(map[string]string)
rawHeaders, err := ioutil.ReadFile(filepath.Join("testdata", m.directory, "headers.yml"))
assert.NoError(m.t, err)
require.NoError(m.t, err)
yaml.Unmarshal(rawHeaders, &headers)
prefix := "/"

View file

@ -20,7 +20,7 @@ import (
"github.com/go-kit/kit/log"
"github.com/prometheus/common/model"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"gopkg.in/yaml.v2"
)
@ -36,21 +36,21 @@ role: nodes
host: %s
`, url)
var cfg SDConfig
assert.NoError(t, yaml.Unmarshal([]byte(cfgString), &cfg))
require.NoError(t, yaml.Unmarshal([]byte(cfgString), &cfg))
d, err := NewDiscovery(&cfg, log.NewNopLogger())
assert.NoError(t, err)
require.NoError(t, err)
ctx := context.Background()
tgs, err := d.refresh(ctx)
assert.NoError(t, err)
require.NoError(t, err)
assert.Equal(t, 1, len(tgs))
require.Equal(t, 1, len(tgs))
tg := tgs[0]
assert.NotNil(t, tg)
assert.NotNil(t, tg.Targets)
assert.Equal(t, 5, len(tg.Targets))
require.NotNil(t, tg)
require.NotNil(t, tg.Targets)
require.Equal(t, 5, len(tg.Targets))
for i, lbls := range []model.LabelSet{
{
@ -124,7 +124,7 @@ host: %s
},
} {
t.Run(fmt.Sprintf("item %d", i), func(t *testing.T) {
assert.Equal(t, lbls, tg.Targets[i])
require.Equal(t, lbls, tg.Targets[i])
})
}
}

View file

@ -20,7 +20,7 @@ import (
"github.com/go-kit/kit/log"
"github.com/prometheus/common/model"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"gopkg.in/yaml.v2"
)
@ -36,21 +36,21 @@ role: services
host: %s
`, url)
var cfg SDConfig
assert.NoError(t, yaml.Unmarshal([]byte(cfgString), &cfg))
require.NoError(t, yaml.Unmarshal([]byte(cfgString), &cfg))
d, err := NewDiscovery(&cfg, log.NewNopLogger())
assert.NoError(t, err)
require.NoError(t, err)
ctx := context.Background()
tgs, err := d.refresh(ctx)
assert.NoError(t, err)
require.NoError(t, err)
assert.Equal(t, 1, len(tgs))
require.Equal(t, 1, len(tgs))
tg := tgs[0]
assert.NotNil(t, tg)
assert.NotNil(t, tg.Targets)
assert.Equal(t, 15, len(tg.Targets))
require.NotNil(t, tg)
require.NotNil(t, tg.Targets)
require.Equal(t, 15, len(tg.Targets))
for i, lbls := range []model.LabelSet{
{
@ -310,7 +310,7 @@ host: %s
},
} {
t.Run(fmt.Sprintf("item %d", i), func(t *testing.T) {
assert.Equal(t, lbls, tg.Targets[i])
require.Equal(t, lbls, tg.Targets[i])
})
}
}

View file

@ -20,7 +20,7 @@ import (
"github.com/go-kit/kit/log"
"github.com/prometheus/common/model"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"gopkg.in/yaml.v2"
)
@ -36,21 +36,21 @@ role: tasks
host: %s
`, url)
var cfg SDConfig
assert.NoError(t, yaml.Unmarshal([]byte(cfgString), &cfg))
require.NoError(t, yaml.Unmarshal([]byte(cfgString), &cfg))
d, err := NewDiscovery(&cfg, log.NewNopLogger())
assert.NoError(t, err)
require.NoError(t, err)
ctx := context.Background()
tgs, err := d.refresh(ctx)
assert.NoError(t, err)
require.NoError(t, err)
assert.Equal(t, 1, len(tgs))
require.Equal(t, 1, len(tgs))
tg := tgs[0]
assert.NotNil(t, tg)
assert.NotNil(t, tg.Targets)
assert.Equal(t, 27, len(tg.Targets))
require.NotNil(t, tg)
require.NotNil(t, tg.Targets)
require.Equal(t, 27, len(tg.Targets))
for i, lbls := range []model.LabelSet{
{
@ -787,7 +787,7 @@ host: %s
},
} {
t.Run(fmt.Sprintf("item %d", i), func(t *testing.T) {
assert.Equal(t, lbls, tg.Targets[i])
require.Equal(t, lbls, tg.Targets[i])
})
}
}

View file

@ -20,7 +20,7 @@ import (
"net/http/httptest"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestFetchApps(t *testing.T) {
@ -182,19 +182,19 @@ func TestFetchApps(t *testing.T) {
defer ts.Close()
apps, err := fetchApps(context.TODO(), ts.URL, &http.Client{})
assert.NoError(t, err)
require.NoError(t, err)
assert.Equal(t, len(apps.Applications), 2)
assert.Equal(t, apps.Applications[0].Name, "CONFIG-SERVICE")
assert.Equal(t, apps.Applications[1].Name, "META-SERVICE")
require.Equal(t, len(apps.Applications), 2)
require.Equal(t, apps.Applications[0].Name, "CONFIG-SERVICE")
require.Equal(t, apps.Applications[1].Name, "META-SERVICE")
assert.Equal(t, len(apps.Applications[1].Instances), 2)
assert.Equal(t, apps.Applications[1].Instances[0].InstanceID, "meta-service002.test.com:meta-service:8080")
assert.Equal(t, apps.Applications[1].Instances[0].Metadata.Items[0].XMLName.Local, "project")
assert.Equal(t, apps.Applications[1].Instances[0].Metadata.Items[0].Content, "meta-service")
assert.Equal(t, apps.Applications[1].Instances[0].Metadata.Items[1].XMLName.Local, "management.port")
assert.Equal(t, apps.Applications[1].Instances[0].Metadata.Items[1].Content, "8090")
assert.Equal(t, apps.Applications[1].Instances[1].InstanceID, "meta-service001.test.com:meta-service:8080")
require.Equal(t, len(apps.Applications[1].Instances), 2)
require.Equal(t, apps.Applications[1].Instances[0].InstanceID, "meta-service002.test.com:meta-service:8080")
require.Equal(t, apps.Applications[1].Instances[0].Metadata.Items[0].XMLName.Local, "project")
require.Equal(t, apps.Applications[1].Instances[0].Metadata.Items[0].Content, "meta-service")
require.Equal(t, apps.Applications[1].Instances[0].Metadata.Items[1].XMLName.Local, "management.port")
require.Equal(t, apps.Applications[1].Instances[0].Metadata.Items[1].Content, "8090")
require.Equal(t, apps.Applications[1].Instances[1].InstanceID, "meta-service001.test.com:meta-service:8080")
}
func Test500ErrorHttpResponse(t *testing.T) {
@ -209,5 +209,5 @@ func Test500ErrorHttpResponse(t *testing.T) {
defer ts.Close()
_, err := fetchApps(context.TODO(), ts.URL, &http.Client{})
assert.Error(t, err, "5xx HTTP response")
require.Error(t, err, "5xx HTTP response")
}

View file

@ -21,7 +21,7 @@ import (
"testing"
"github.com/prometheus/common/model"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/prometheus/prometheus/discovery/targetgroup"
)
@ -54,8 +54,8 @@ func TestEurekaSDHandleError(t *testing.T) {
)
tgs, err := testUpdateServices(respHandler)
assert.EqualError(t, err, errTesting)
assert.Equal(t, len(tgs), 0)
require.EqualError(t, err, errTesting)
require.Equal(t, len(tgs), 0)
}
func TestEurekaSDEmptyList(t *testing.T) {
@ -71,8 +71,8 @@ func TestEurekaSDEmptyList(t *testing.T) {
}
)
tgs, err := testUpdateServices(respHandler)
assert.NoError(t, err)
assert.Equal(t, len(tgs), 1)
require.NoError(t, err)
require.Equal(t, len(tgs), 1)
}
func TestEurekaSDSendGroup(t *testing.T) {
@ -231,16 +231,16 @@ func TestEurekaSDSendGroup(t *testing.T) {
)
tgs, err := testUpdateServices(respHandler)
assert.NoError(t, err)
assert.Equal(t, len(tgs), 1)
require.NoError(t, err)
require.Equal(t, len(tgs), 1)
tg := tgs[0]
assert.Equal(t, tg.Source, "eureka")
assert.Equal(t, len(tg.Targets), 4)
require.Equal(t, tg.Source, "eureka")
require.Equal(t, len(tg.Targets), 4)
tgt := tg.Targets[0]
assert.Equal(t, tgt[model.AddressLabel], model.LabelValue("config-service001.test.com:8080"))
require.Equal(t, tgt[model.AddressLabel], model.LabelValue("config-service001.test.com:8080"))
tgt = tg.Targets[2]
assert.Equal(t, tgt[model.AddressLabel], model.LabelValue("meta-service002.test.com:8080"))
require.Equal(t, tgt[model.AddressLabel], model.LabelValue("meta-service002.test.com:8080"))
}

View file

@ -26,7 +26,7 @@ import (
"time"
"github.com/prometheus/common/model"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"go.uber.org/goleak"
"github.com/prometheus/prometheus/discovery/targetgroup"
@ -54,7 +54,7 @@ func newTestRunner(t *testing.T) *testRunner {
t.Helper()
tmpDir, err := ioutil.TempDir("", "prometheus-file-sd")
assert.NoError(t, err)
require.NoError(t, err)
return &testRunner{
T: t,
@ -77,19 +77,19 @@ func (t *testRunner) copyFileTo(src string, name string) string {
t.Helper()
newf, err := ioutil.TempFile(t.dir, "")
assert.NoError(t, err)
require.NoError(t, err)
f, err := os.Open(src)
assert.NoError(t, err)
require.NoError(t, err)
_, err = io.Copy(newf, f)
assert.NoError(t, err)
assert.NoError(t, f.Close())
assert.NoError(t, newf.Close())
require.NoError(t, err)
require.NoError(t, f.Close())
require.NoError(t, newf.Close())
dst := filepath.Join(t.dir, name)
err = os.Rename(newf.Name(), dst)
assert.NoError(t, err)
require.NoError(t, err)
return dst
}
@ -99,14 +99,14 @@ func (t *testRunner) writeString(file string, data string) {
t.Helper()
newf, err := ioutil.TempFile(t.dir, "")
assert.NoError(t, err)
require.NoError(t, err)
_, err = newf.WriteString(data)
assert.NoError(t, err)
assert.NoError(t, newf.Close())
require.NoError(t, err)
require.NoError(t, newf.Close())
err = os.Rename(newf.Name(), file)
assert.NoError(t, err)
require.NoError(t, err)
}
// appendString appends a string to a file.
@ -114,11 +114,11 @@ func (t *testRunner) appendString(file, data string) {
t.Helper()
f, err := os.OpenFile(file, os.O_WRONLY|os.O_APPEND, 0)
assert.NoError(t, err)
require.NoError(t, err)
defer f.Close()
_, err = f.WriteString(data)
assert.NoError(t, err)
require.NoError(t, err)
}
// run starts the file SD and the loop receiving target groups updates.
@ -230,7 +230,7 @@ func (t *testRunner) requireTargetGroups(expected, got []*targetgroup.Group) {
panic(err)
}
assert.Equal(t, string(b1), string(b2))
require.Equal(t, string(b1), string(b2))
}
// validTg() maps to fixtures/valid.{json,yml}.
@ -468,7 +468,7 @@ func TestRemoveFile(t *testing.T) {
// Verify that we receive the update about the target groups being removed.
ref := runner.lastReceive()
assert.NoError(t, os.Remove(sdFile))
require.NoError(t, os.Remove(sdFile))
runner.requireUpdate(
ref,
[]*targetgroup.Group{

View file

@ -20,7 +20,7 @@ import (
"github.com/go-kit/kit/log"
"github.com/prometheus/common/model"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
type hcloudSDTestSuite struct {
@ -44,16 +44,16 @@ func TestHCloudSDRefresh(t *testing.T) {
cfg.hcloudEndpoint = suite.Mock.Endpoint()
d, err := newHcloudDiscovery(&cfg, log.NewNopLogger())
assert.NoError(t, err)
require.NoError(t, err)
targetGroups, err := d.refresh(context.Background())
assert.NoError(t, err)
assert.Equal(t, 1, len(targetGroups))
require.NoError(t, err)
require.Equal(t, 1, len(targetGroups))
targetGroup := targetGroups[0]
assert.NotNil(t, targetGroup, "targetGroup should not be nil")
assert.NotNil(t, targetGroup.Targets, "targetGroup.targets should not be nil")
assert.Equal(t, 3, len(targetGroup.Targets))
require.NotNil(t, targetGroup, "targetGroup should not be nil")
require.NotNil(t, targetGroup.Targets, "targetGroup.targets should not be nil")
require.Equal(t, 3, len(targetGroup.Targets))
for i, labelSet := range []model.LabelSet{
{
@ -119,7 +119,7 @@ func TestHCloudSDRefresh(t *testing.T) {
},
} {
t.Run(fmt.Sprintf("item %d", i), func(t *testing.T) {
assert.Equal(t, labelSet, targetGroup.Targets[i])
require.Equal(t, labelSet, targetGroup.Targets[i])
})
}
}

View file

@ -21,7 +21,7 @@ import (
"github.com/go-kit/kit/log"
"github.com/prometheus/common/config"
"github.com/prometheus/common/model"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
type robotSDTestSuite struct {
@ -43,16 +43,16 @@ func TestRobotSDRefresh(t *testing.T) {
cfg.robotEndpoint = suite.Mock.Endpoint()
d, err := newRobotDiscovery(&cfg, log.NewNopLogger())
assert.NoError(t, err)
require.NoError(t, err)
targetGroups, err := d.refresh(context.Background())
assert.NoError(t, err)
assert.Equal(t, 1, len(targetGroups))
require.NoError(t, err)
require.Equal(t, 1, len(targetGroups))
targetGroup := targetGroups[0]
assert.NotNil(t, targetGroup, "targetGroup should not be nil")
assert.NotNil(t, targetGroup.Targets, "targetGroup.targets should not be nil")
assert.Equal(t, 2, len(targetGroup.Targets))
require.NotNil(t, targetGroup, "targetGroup should not be nil")
require.NotNil(t, targetGroup.Targets, "targetGroup.targets should not be nil")
require.Equal(t, 2, len(targetGroup.Targets))
for i, labelSet := range []model.LabelSet{
{
@ -80,7 +80,7 @@ func TestRobotSDRefresh(t *testing.T) {
},
} {
t.Run(fmt.Sprintf("item %d", i), func(t *testing.T) {
assert.Equal(t, labelSet, targetGroup.Targets[i])
require.Equal(t, labelSet, targetGroup.Targets[i])
})
}
}
@ -92,11 +92,11 @@ func TestRobotSDRefreshHandleError(t *testing.T) {
cfg.robotEndpoint = suite.Mock.Endpoint()
d, err := newRobotDiscovery(&cfg, log.NewNopLogger())
assert.NoError(t, err)
require.NoError(t, err)
targetGroups, err := d.refresh(context.Background())
assert.Error(t, err)
assert.Equal(t, "non 2xx status '401' response during hetzner service discovery with role robot", err.Error())
require.Error(t, err)
require.Equal(t, "non 2xx status '401' response during hetzner service discovery with role robot", err.Error())
assert.Equal(t, 0, len(targetGroups))
require.Equal(t, 0, len(targetGroups))
}

View file

@ -20,7 +20,7 @@ import (
"time"
"github.com/go-kit/kit/log"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/kubernetes/fake"
@ -151,7 +151,7 @@ func requireTargetGroups(t *testing.T, expected, res map[string]*targetgroup.Gro
panic(err)
}
assert.Equal(t, string(b1), string(b2))
require.Equal(t, string(b1), string(b2))
}
type hasSynced interface {

View file

@ -24,7 +24,7 @@ import (
"github.com/go-kit/kit/log"
client_testutil "github.com/prometheus/client_golang/prometheus/testutil"
"github.com/prometheus/common/model"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/prometheus/prometheus/discovery/targetgroup"
"github.com/prometheus/prometheus/util/testutil"
@ -702,7 +702,7 @@ func assertEqualGroups(t *testing.T, got, expected []*targetgroup.Group, msg fun
sort.Sort(byGroupSource(got))
sort.Sort(byGroupSource(expected))
assert.Equal(t, expected, got)
require.Equal(t, expected, got)
}
func staticConfig(addrs ...string) StaticConfig {
@ -882,7 +882,7 @@ func TestApplyConfigDoesNotModifyStaticTargets(t *testing.T) {
<-discoveryManager.SyncCh()
for _, cfg := range cfgs {
assert.Equal(t, originalConfig, cfg)
require.Equal(t, originalConfig, cfg)
}
}

View file

@ -18,7 +18,7 @@ import (
"testing"
"github.com/prometheus/common/model"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
type OpenstackSDHypervisorTestSuite struct {
@ -54,12 +54,12 @@ func TestOpenstackSDHypervisorRefresh(t *testing.T) {
hypervisor, _ := mock.openstackAuthSuccess()
ctx := context.Background()
tgs, err := hypervisor.refresh(ctx)
assert.Equal(t, 1, len(tgs))
require.Equal(t, 1, len(tgs))
tg := tgs[0]
assert.NoError(t, err)
assert.NotNil(t, tg)
assert.NotNil(t, tg.Targets)
assert.Equal(t, 2, len(tg.Targets))
require.NoError(t, err)
require.NotNil(t, tg)
require.NotNil(t, tg.Targets)
require.Equal(t, 2, len(tg.Targets))
for l, v := range map[string]string{
"__address__": "172.16.70.14:0",
@ -70,7 +70,7 @@ func TestOpenstackSDHypervisorRefresh(t *testing.T) {
"__meta_openstack_hypervisor_status": "enabled",
"__meta_openstack_hypervisor_id": "1",
} {
assert.Equal(t, model.LabelValue(v), tg.Targets[0][model.LabelName(l)])
require.Equal(t, model.LabelValue(v), tg.Targets[0][model.LabelName(l)])
}
for l, v := range map[string]string{
@ -82,7 +82,7 @@ func TestOpenstackSDHypervisorRefresh(t *testing.T) {
"__meta_openstack_hypervisor_status": "enabled",
"__meta_openstack_hypervisor_id": "721",
} {
assert.Equal(t, model.LabelValue(v), tg.Targets[1][model.LabelName(l)])
require.Equal(t, model.LabelValue(v), tg.Targets[1][model.LabelName(l)])
}
}
@ -94,6 +94,6 @@ func TestOpenstackSDHypervisorRefreshWithDoneContext(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
cancel()
_, err := hypervisor.refresh(ctx)
assert.Error(t, err)
assert.Contains(t, err.Error(), context.Canceled.Error(), "%q doesn't contain %q", err, context.Canceled)
require.Error(t, err)
require.Contains(t, err.Error(), context.Canceled.Error(), "%q doesn't contain %q", err, context.Canceled)
}

View file

@ -19,7 +19,7 @@ import (
"testing"
"github.com/prometheus/common/model"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
type OpenstackSDInstanceTestSuite struct {
@ -56,18 +56,18 @@ func TestOpenstackSDInstanceRefresh(t *testing.T) {
mock.SetupTest(t)
instance, err := mock.openstackAuthSuccess()
assert.NoError(t, err)
require.NoError(t, err)
ctx := context.Background()
tgs, err := instance.refresh(ctx)
assert.NoError(t, err)
assert.Equal(t, 1, len(tgs))
require.NoError(t, err)
require.Equal(t, 1, len(tgs))
tg := tgs[0]
assert.NotNil(t, tg)
assert.NotNil(t, tg.Targets)
assert.Equal(t, 4, len(tg.Targets))
require.NotNil(t, tg)
require.NotNil(t, tg.Targets)
require.Equal(t, 4, len(tg.Targets))
for i, lbls := range []model.LabelSet{
{
@ -120,7 +120,7 @@ func TestOpenstackSDInstanceRefresh(t *testing.T) {
},
} {
t.Run(fmt.Sprintf("item %d", i), func(t *testing.T) {
assert.Equal(t, lbls, tg.Targets[i])
require.Equal(t, lbls, tg.Targets[i])
})
}
}
@ -133,6 +133,6 @@ func TestOpenstackSDInstanceRefreshWithDoneContext(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
cancel()
_, err := hypervisor.refresh(ctx)
assert.Error(t, err)
assert.Contains(t, err.Error(), context.Canceled.Error(), "%q doesn't contain %q", err, context.Canceled)
require.Error(t, err)
require.Contains(t, err.Error(), context.Canceled.Error(), "%q doesn't contain %q", err, context.Canceled)
}

View file

@ -20,7 +20,7 @@ import (
"time"
"github.com/prometheus/common/model"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"go.uber.org/goleak"
"github.com/prometheus/prometheus/discovery/targetgroup"
@ -73,10 +73,10 @@ func TestRefresh(t *testing.T) {
go d.Run(ctx, ch)
tg := <-ch
assert.Equal(t, tg1, tg)
require.Equal(t, tg1, tg)
tg = <-ch
assert.Equal(t, tg2, tg)
require.Equal(t, tg2, tg)
tick := time.NewTicker(2 * interval)
defer tick.Stop()

View file

@ -18,7 +18,7 @@ import (
"testing"
"github.com/prometheus/common/model"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"gopkg.in/yaml.v2"
)
@ -53,8 +53,8 @@ func TestTargetGroupStrictJsonUnmarshal(t *testing.T) {
for _, test := range tests {
tg := Group{}
actual := tg.UnmarshalJSON([]byte(test.json))
assert.Equal(t, test.expectedReply, actual)
assert.Equal(t, test.expectedGroup, tg)
require.Equal(t, test.expectedReply, actual)
require.Equal(t, test.expectedGroup, tg)
}
}
@ -92,8 +92,8 @@ func TestTargetGroupYamlMarshal(t *testing.T) {
for _, test := range tests {
actual, err := test.group.MarshalYAML()
assert.Equal(t, test.expectedErr, err)
assert.Equal(t, test.expectedYaml, string(marshal(actual)))
require.Equal(t, test.expectedErr, err)
require.Equal(t, test.expectedYaml, string(marshal(actual)))
}
}
@ -132,8 +132,8 @@ func TestTargetGroupYamlUnmarshal(t *testing.T) {
for _, test := range tests {
tg := Group{}
actual := tg.UnmarshalYAML(unmarshal([]byte(test.yaml)))
assert.Equal(t, test.expectedReply, actual)
assert.Equal(t, test.expectedGroup, tg)
require.Equal(t, test.expectedReply, actual)
require.Equal(t, test.expectedGroup, tg)
}
}
@ -150,7 +150,7 @@ func TestString(t *testing.T) {
Group{Targets: []model.LabelSet{},
Source: "<source>",
Labels: model.LabelSet{}}
assert.Equal(t, "<source>", group1.String())
assert.Equal(t, "<source>", group2.String())
assert.Equal(t, group1.String(), group2.String())
require.Equal(t, "<source>", group1.String())
require.Equal(t, "<source>", group2.String())
require.Equal(t, group1.String(), group2.String())
}

View file

@ -26,7 +26,7 @@ import (
"github.com/prometheus/common/config"
"github.com/prometheus/common/model"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
var (
@ -84,54 +84,54 @@ func newTritonDiscovery(c SDConfig) (*Discovery, error) {
func TestTritonSDNew(t *testing.T) {
td, err := newTritonDiscovery(conf)
assert.NoError(t, err)
assert.NotNil(t, td)
assert.NotNil(t, td.client)
assert.NotZero(t, td.interval)
assert.NotNil(t, td.sdConfig)
assert.Equal(t, conf.Account, td.sdConfig.Account)
assert.Equal(t, conf.DNSSuffix, td.sdConfig.DNSSuffix)
assert.Equal(t, conf.Endpoint, td.sdConfig.Endpoint)
assert.Equal(t, conf.Port, td.sdConfig.Port)
require.NoError(t, err)
require.NotNil(t, td)
require.NotNil(t, td.client)
require.NotZero(t, td.interval)
require.NotNil(t, td.sdConfig)
require.Equal(t, conf.Account, td.sdConfig.Account)
require.Equal(t, conf.DNSSuffix, td.sdConfig.DNSSuffix)
require.Equal(t, conf.Endpoint, td.sdConfig.Endpoint)
require.Equal(t, conf.Port, td.sdConfig.Port)
}
func TestTritonSDNewBadConfig(t *testing.T) {
td, err := newTritonDiscovery(badconf)
assert.Error(t, err)
assert.Nil(t, td)
require.Error(t, err)
require.Nil(t, td)
}
func TestTritonSDNewGroupsConfig(t *testing.T) {
td, err := newTritonDiscovery(groupsconf)
assert.NoError(t, err)
assert.NotNil(t, td)
assert.NotNil(t, td.client)
assert.NotZero(t, td.interval)
assert.NotNil(t, td.sdConfig)
assert.Equal(t, groupsconf.Account, td.sdConfig.Account)
assert.Equal(t, groupsconf.DNSSuffix, td.sdConfig.DNSSuffix)
assert.Equal(t, groupsconf.Endpoint, td.sdConfig.Endpoint)
assert.Equal(t, groupsconf.Groups, td.sdConfig.Groups)
assert.Equal(t, groupsconf.Port, td.sdConfig.Port)
require.NoError(t, err)
require.NotNil(t, td)
require.NotNil(t, td.client)
require.NotZero(t, td.interval)
require.NotNil(t, td.sdConfig)
require.Equal(t, groupsconf.Account, td.sdConfig.Account)
require.Equal(t, groupsconf.DNSSuffix, td.sdConfig.DNSSuffix)
require.Equal(t, groupsconf.Endpoint, td.sdConfig.Endpoint)
require.Equal(t, groupsconf.Groups, td.sdConfig.Groups)
require.Equal(t, groupsconf.Port, td.sdConfig.Port)
}
func TestTritonSDNewCNConfig(t *testing.T) {
td, err := newTritonDiscovery(cnconf)
assert.NoError(t, err)
assert.NotNil(t, td)
assert.NotNil(t, td.client)
assert.NotZero(t, td.interval)
assert.NotZero(t, td.sdConfig)
assert.Equal(t, cnconf.Role, td.sdConfig.Role)
assert.Equal(t, cnconf.Account, td.sdConfig.Account)
assert.Equal(t, cnconf.DNSSuffix, td.sdConfig.DNSSuffix)
assert.Equal(t, cnconf.Endpoint, td.sdConfig.Endpoint)
assert.Equal(t, cnconf.Port, td.sdConfig.Port)
require.NoError(t, err)
require.NotNil(t, td)
require.NotNil(t, td.client)
require.NotZero(t, td.interval)
require.NotZero(t, td.sdConfig)
require.Equal(t, cnconf.Role, td.sdConfig.Role)
require.Equal(t, cnconf.Account, td.sdConfig.Account)
require.Equal(t, cnconf.DNSSuffix, td.sdConfig.DNSSuffix)
require.Equal(t, cnconf.Endpoint, td.sdConfig.Endpoint)
require.Equal(t, cnconf.Port, td.sdConfig.Port)
}
func TestTritonSDRefreshNoTargets(t *testing.T) {
tgts := testTritonSDRefresh(t, conf, "{\"containers\":[]}")
assert.Nil(t, tgts)
require.Nil(t, tgts)
}
func TestTritonSDRefreshMultipleTargets(t *testing.T) {
@ -156,8 +156,8 @@ func TestTritonSDRefreshMultipleTargets(t *testing.T) {
)
tgts := testTritonSDRefresh(t, conf, dstr)
assert.NotNil(t, tgts)
assert.Equal(t, 2, len(tgts))
require.NotNil(t, tgts)
require.Equal(t, 2, len(tgts))
}
func TestTritonSDRefreshNoServer(t *testing.T) {
@ -166,8 +166,8 @@ func TestTritonSDRefreshNoServer(t *testing.T) {
)
_, err := td.refresh(context.Background())
assert.Error(t, err)
assert.Equal(t, strings.Contains(err.Error(), "an error occurred when requesting targets from the discovery endpoint"), true)
require.Error(t, err)
require.Equal(t, strings.Contains(err.Error(), "an error occurred when requesting targets from the discovery endpoint"), true)
}
func TestTritonSDRefreshCancelled(t *testing.T) {
@ -178,8 +178,8 @@ func TestTritonSDRefreshCancelled(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
cancel()
_, err := td.refresh(ctx)
assert.Error(t, err)
assert.Equal(t, strings.Contains(err.Error(), context.Canceled.Error()), true)
require.Error(t, err)
require.Equal(t, strings.Contains(err.Error(), context.Canceled.Error()), true)
}
func TestTritonSDRefreshCNsUUIDOnly(t *testing.T) {
@ -195,8 +195,8 @@ func TestTritonSDRefreshCNsUUIDOnly(t *testing.T) {
)
tgts := testTritonSDRefresh(t, cnconf, dstr)
assert.NotNil(t, tgts)
assert.Equal(t, 2, len(tgts))
require.NotNil(t, tgts)
require.Equal(t, 2, len(tgts))
}
func TestTritonSDRefreshCNsWithHostname(t *testing.T) {
@ -214,8 +214,8 @@ func TestTritonSDRefreshCNsWithHostname(t *testing.T) {
)
tgts := testTritonSDRefresh(t, cnconf, dstr)
assert.NotNil(t, tgts)
assert.Equal(t, 2, len(tgts))
require.NotNil(t, tgts)
require.Equal(t, 2, len(tgts))
}
func testTritonSDRefresh(t *testing.T, c SDConfig, dstr string) []model.LabelSet {
@ -229,25 +229,25 @@ func testTritonSDRefresh(t *testing.T, c SDConfig, dstr string) []model.LabelSet
defer s.Close()
u, err := url.Parse(s.URL)
assert.NoError(t, err)
assert.NotNil(t, u)
require.NoError(t, err)
require.NotNil(t, u)
host, strport, err := net.SplitHostPort(u.Host)
assert.NoError(t, err)
assert.NotEmpty(t, host)
assert.NotEmpty(t, strport)
require.NoError(t, err)
require.NotEmpty(t, host)
require.NotEmpty(t, strport)
port, err := strconv.Atoi(strport)
assert.NoError(t, err)
assert.NotZero(t, port)
require.NoError(t, err)
require.NotZero(t, port)
td.sdConfig.Port = port
tgs, err := td.refresh(context.Background())
assert.NoError(t, err)
assert.Equal(t, 1, len(tgs))
require.NoError(t, err)
require.Equal(t, 1, len(tgs))
tg := tgs[0]
assert.NotNil(t, tg)
require.NotNil(t, tg)
return tg.Targets
}

View file

@ -21,7 +21,7 @@ import (
"testing"
"github.com/prometheus/common/model"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/prometheus/prometheus/discovery/targetgroup"
)
@ -232,9 +232,9 @@ func TestGenerateTargetGroups(t *testing.T) {
func TestWriteOutput(t *testing.T) {
ctx := context.Background()
tmpfile, err := ioutil.TempFile("", "sd_adapter_test")
assert.NoError(t, err)
require.NoError(t, err)
defer os.Remove(tmpfile.Name())
tmpfile.Close()
adapter := NewAdapter(ctx, tmpfile.Name(), "test_sd", nil, nil)
assert.NoError(t, adapter.writeOutput())
require.NoError(t, adapter.writeOutput())
}

View file

@ -29,7 +29,7 @@ import (
"github.com/prometheus/alertmanager/api/v2/models"
config_util "github.com/prometheus/common/config"
"github.com/prometheus/common/model"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"go.uber.org/atomic"
yaml "gopkg.in/yaml.v2"
@ -65,7 +65,7 @@ func TestPostPath(t *testing.T) {
},
}
for _, c := range cases {
assert.Equal(t, c.out, postPath(c.in, config.AlertmanagerAPIVersionV1))
require.Equal(t, c.out, postPath(c.in, config.AlertmanagerAPIVersionV1))
}
}
@ -80,10 +80,10 @@ func TestHandlerNextBatch(t *testing.T) {
expected := append([]*Alert{}, h.queue...)
assert.NoError(t, alertsEqual(expected[0:maxBatchSize], h.nextBatch()))
assert.NoError(t, alertsEqual(expected[maxBatchSize:2*maxBatchSize], h.nextBatch()))
assert.NoError(t, alertsEqual(expected[2*maxBatchSize:], h.nextBatch()))
assert.Equal(t, 0, len(h.queue), "Expected queue to be empty but got %d alerts", len(h.queue))
require.NoError(t, alertsEqual(expected[0:maxBatchSize], h.nextBatch()))
require.NoError(t, alertsEqual(expected[maxBatchSize:2*maxBatchSize], h.nextBatch()))
require.NoError(t, alertsEqual(expected[2*maxBatchSize:], h.nextBatch()))
require.Equal(t, 0, len(h.queue), "Expected queue to be empty but got %d alerts", len(h.queue))
}
func alertsEqual(a, b []*Alert) error {
@ -188,20 +188,20 @@ func TestHandlerSendAll(t *testing.T) {
t.Helper()
select {
case err := <-errc:
assert.NoError(t, err)
require.NoError(t, err)
default:
}
}
assert.True(t, h.sendAll(h.queue...), "all sends failed unexpectedly")
require.True(t, h.sendAll(h.queue...), "all sends failed unexpectedly")
checkNoErr()
status1.Store(int32(http.StatusNotFound))
assert.True(t, h.sendAll(h.queue...), "all sends failed unexpectedly")
require.True(t, h.sendAll(h.queue...), "all sends failed unexpectedly")
checkNoErr()
status2.Store(int32(http.StatusInternalServerError))
assert.False(t, h.sendAll(h.queue...), "all sends succeeded unexpectedly")
require.False(t, h.sendAll(h.queue...), "all sends succeeded unexpectedly")
checkNoErr()
}
@ -215,11 +215,11 @@ func TestCustomDo(t *testing.T) {
received = true
body, err := ioutil.ReadAll(req.Body)
assert.NoError(t, err)
require.NoError(t, err)
assert.Equal(t, testBody, string(body))
require.Equal(t, testBody, string(body))
assert.Equal(t, testURL, req.URL.String())
require.Equal(t, testURL, req.URL.String())
return &http.Response{
Body: ioutil.NopCloser(bytes.NewBuffer(nil)),
@ -229,7 +229,7 @@ func TestCustomDo(t *testing.T) {
h.sendOne(context.Background(), nil, testURL, []byte(testBody))
assert.True(t, received, "Expected to receive an alert, but didn't")
require.True(t, received, "Expected to receive an alert, but didn't")
}
func TestExternalLabels(t *testing.T) {
@ -263,7 +263,7 @@ func TestExternalLabels(t *testing.T) {
{Labels: labels.FromStrings("alertname", "externalrelabelthis", "a", "c")},
}
assert.NoError(t, alertsEqual(expected, h.queue))
require.NoError(t, alertsEqual(expected, h.queue))
}
func TestHandlerRelabel(t *testing.T) {
@ -299,7 +299,7 @@ func TestHandlerRelabel(t *testing.T) {
{Labels: labels.FromStrings("alertname", "renamed")},
}
assert.NoError(t, alertsEqual(expected, h.queue))
require.NoError(t, alertsEqual(expected, h.queue))
}
func TestHandlerQueuing(t *testing.T) {
@ -375,7 +375,7 @@ func TestHandlerQueuing(t *testing.T) {
case <-called:
expectedc <- expected
case err := <-errc:
assert.NoError(t, err)
require.NoError(t, err)
return
case <-time.After(5 * time.Second):
t.Fatalf("Alerts were not pushed")
@ -408,7 +408,7 @@ func TestHandlerQueuing(t *testing.T) {
expectedc <- alerts[:maxBatchSize]
select {
case err := <-errc:
assert.NoError(t, err)
require.NoError(t, err)
case <-time.After(5 * time.Second):
t.Fatalf("Alerts were not pushed")
}
@ -435,10 +435,10 @@ func TestLabelSetNotReused(t *testing.T) {
tg := makeInputTargetGroup()
_, _, err := alertmanagerFromGroup(tg, &config.AlertmanagerConfig{})
assert.NoError(t, err)
require.NoError(t, err)
// Target modified during alertmanager extraction
assert.Equal(t, tg, makeInputTargetGroup())
require.Equal(t, tg, makeInputTargetGroup())
}
func TestReload(t *testing.T) {
@ -469,7 +469,7 @@ alerting:
if err := yaml.UnmarshalStrict([]byte(s), cfg); err != nil {
t.Fatalf("Unable to load YAML config: %s", err)
}
assert.Equal(t, 1, len(cfg.AlertingConfig.AlertmanagerConfigs))
require.Equal(t, 1, len(cfg.AlertingConfig.AlertmanagerConfigs))
if err := n.ApplyConfig(cfg); err != nil {
t.Fatalf("Error Applying the config:%v", err)
@ -486,7 +486,7 @@ alerting:
n.reload(tgs)
res := n.Alertmanagers()[0].String()
assert.Equal(t, tt.out, res)
require.Equal(t, tt.out, res)
}
}
@ -523,7 +523,7 @@ alerting:
if err := yaml.UnmarshalStrict([]byte(s), cfg); err != nil {
t.Fatalf("Unable to load YAML config: %s", err)
}
assert.Equal(t, 1, len(cfg.AlertingConfig.AlertmanagerConfigs))
require.Equal(t, 1, len(cfg.AlertingConfig.AlertmanagerConfigs))
if err := n.ApplyConfig(cfg); err != nil {
t.Fatalf("Error Applying the config:%v", err)
@ -541,7 +541,7 @@ alerting:
n.reload(tgs)
res := n.DroppedAlertmanagers()[0].String()
assert.Equal(t, res, tt.out)
require.Equal(t, res, tt.out)
}
}
@ -561,5 +561,5 @@ func makeInputTargetGroup() *targetgroup.Group {
}
func TestLabelsToOpenAPILabelSet(t *testing.T) {
assert.Equal(t, models.LabelSet{"aaa": "111", "bbb": "222"}, labelsToOpenAPILabelSet(labels.Labels{{Name: "aaa", Value: "111"}, {Name: "bbb", Value: "222"}}))
require.Equal(t, models.LabelSet{"aaa": "111", "bbb": "222"}, labelsToOpenAPILabelSet(labels.Labels{{Name: "aaa", Value: "111"}, {Name: "bbb", Value: "222"}}))
}

View file

@ -18,7 +18,7 @@ import (
"strings"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestLabels_String(t *testing.T) {
@ -50,7 +50,7 @@ func TestLabels_String(t *testing.T) {
}
for _, c := range cases {
str := c.lables.String()
assert.Equal(t, c.expected, str)
require.Equal(t, c.expected, str)
}
}
@ -181,7 +181,7 @@ func TestLabels_MatchLabels(t *testing.T) {
for i, test := range tests {
got := labels.MatchLabels(test.on, test.providedNames...)
assert.Equal(t, test.expected, got, "unexpected labelset for test case %d", i)
require.Equal(t, test.expected, got, "unexpected labelset for test case %d", i)
}
}
@ -206,8 +206,8 @@ func TestLabels_HasDuplicateLabelNames(t *testing.T) {
for i, c := range cases {
l, d := c.Input.HasDuplicateLabelNames()
assert.Equal(t, c.Duplicate, d, "test %d: incorrect duplicate bool", i)
assert.Equal(t, c.LabelName, l, "test %d: incorrect label name", i)
require.Equal(t, c.Duplicate, d, "test %d: incorrect duplicate bool", i)
require.Equal(t, c.LabelName, l, "test %d: incorrect label name", i)
}
}
@ -287,7 +287,7 @@ func TestLabels_WithoutEmpty(t *testing.T) {
},
} {
t.Run("", func(t *testing.T) {
assert.Equal(t, test.expected, test.input.WithoutEmpty())
require.Equal(t, test.expected, test.input.WithoutEmpty())
})
}
}
@ -368,7 +368,7 @@ func TestLabels_Equal(t *testing.T) {
for i, test := range tests {
got := Equal(labels, test.compared)
assert.Equal(t, test.expected, got, "unexpected comparison result for test case %d", i)
require.Equal(t, test.expected, got, "unexpected comparison result for test case %d", i)
}
}
@ -385,9 +385,9 @@ func TestLabels_FromStrings(t *testing.T) {
},
}
assert.Equal(t, expected, labels, "unexpected labelset")
require.Equal(t, expected, labels, "unexpected labelset")
assert.Panics(t, func() { FromStrings("aaa", "111", "bbb") })
require.Panics(t, func() { FromStrings("aaa", "111", "bbb") })
}
func TestLabels_Compare(t *testing.T) {
@ -505,7 +505,7 @@ func TestLabels_Compare(t *testing.T) {
for i, test := range tests {
got := Compare(labels, test.compared)
assert.Equal(t, test.expected, got, "unexpected comparison result for test case %d", i)
require.Equal(t, test.expected, got, "unexpected comparison result for test case %d", i)
}
}
@ -537,34 +537,34 @@ func TestLabels_Has(t *testing.T) {
for i, test := range tests {
got := labelsSet.Has(test.input)
assert.Equal(t, test.expected, got, "unexpected comparison result for test case %d", i)
require.Equal(t, test.expected, got, "unexpected comparison result for test case %d", i)
}
}
func TestLabels_Get(t *testing.T) {
assert.Equal(t, "", Labels{{"aaa", "111"}, {"bbb", "222"}}.Get("foo"))
assert.Equal(t, "111", Labels{{"aaa", "111"}, {"bbb", "222"}}.Get("aaa"))
require.Equal(t, "", Labels{{"aaa", "111"}, {"bbb", "222"}}.Get("foo"))
require.Equal(t, "111", Labels{{"aaa", "111"}, {"bbb", "222"}}.Get("aaa"))
}
func TestLabels_Copy(t *testing.T) {
assert.Equal(t, Labels{{"aaa", "111"}, {"bbb", "222"}}, Labels{{"aaa", "111"}, {"bbb", "222"}}.Copy())
require.Equal(t, Labels{{"aaa", "111"}, {"bbb", "222"}}, Labels{{"aaa", "111"}, {"bbb", "222"}}.Copy())
}
func TestLabels_Map(t *testing.T) {
assert.Equal(t, map[string]string{"aaa": "111", "bbb": "222"}, Labels{{"aaa", "111"}, {"bbb", "222"}}.Map())
require.Equal(t, map[string]string{"aaa": "111", "bbb": "222"}, Labels{{"aaa", "111"}, {"bbb", "222"}}.Map())
}
func TestLabels_WithLabels(t *testing.T) {
assert.Equal(t, Labels{{"aaa", "111"}, {"bbb", "222"}}, Labels{{"aaa", "111"}, {"bbb", "222"}, {"ccc", "333"}}.WithLabels("aaa", "bbb"))
require.Equal(t, Labels{{"aaa", "111"}, {"bbb", "222"}}, Labels{{"aaa", "111"}, {"bbb", "222"}, {"ccc", "333"}}.WithLabels("aaa", "bbb"))
}
func TestLabels_WithoutLabels(t *testing.T) {
assert.Equal(t, Labels{{"aaa", "111"}}, Labels{{"aaa", "111"}, {"bbb", "222"}, {"ccc", "333"}}.WithoutLabels("bbb", "ccc"))
assert.Equal(t, Labels{{"aaa", "111"}}, Labels{{"aaa", "111"}, {"bbb", "222"}, {MetricName, "333"}}.WithoutLabels("bbb"))
require.Equal(t, Labels{{"aaa", "111"}}, Labels{{"aaa", "111"}, {"bbb", "222"}, {"ccc", "333"}}.WithoutLabels("bbb", "ccc"))
require.Equal(t, Labels{{"aaa", "111"}}, Labels{{"aaa", "111"}, {"bbb", "222"}, {MetricName, "333"}}.WithoutLabels("bbb"))
}
func TestBulider_NewBulider(t *testing.T) {
assert.Equal(
require.Equal(
t,
&Builder{
base: Labels{{"aaa", "111"}},
@ -576,7 +576,7 @@ func TestBulider_NewBulider(t *testing.T) {
}
func TestBuilder_Del(t *testing.T) {
assert.Equal(
require.Equal(
t,
&Builder{
del: []string{"bbb"},
@ -590,7 +590,7 @@ func TestBuilder_Del(t *testing.T) {
}
func TestBuilder_Set(t *testing.T) {
assert.Equal(
require.Equal(
t,
&Builder{
base: Labels{{"aaa", "111"}},
@ -604,7 +604,7 @@ func TestBuilder_Set(t *testing.T) {
}).Set("bbb", "222"),
)
assert.Equal(
require.Equal(
t,
&Builder{
base: Labels{{"aaa", "111"}},
@ -620,7 +620,7 @@ func TestBuilder_Set(t *testing.T) {
}
func TestBuilder_Labels(t *testing.T) {
assert.Equal(
require.Equal(
t,
Labels{{"aaa", "111"}, {"ccc", "333"}, {"ddd", "444"}},
(&Builder{
@ -636,9 +636,9 @@ func TestLabels_Hash(t *testing.T) {
{Name: "foo", Value: "bar"},
{Name: "baz", Value: "qux"},
}
assert.Equal(t, lbls.Hash(), lbls.Hash())
assert.NotEqual(t, lbls.Hash(), Labels{lbls[1], lbls[0]}.Hash(), "unordered labels match.")
assert.NotEqual(t, lbls.Hash(), Labels{lbls[0]}.Hash(), "different labels match.")
require.Equal(t, lbls.Hash(), lbls.Hash())
require.NotEqual(t, lbls.Hash(), Labels{lbls[1], lbls[0]}.Hash(), "unordered labels match.")
require.NotEqual(t, lbls.Hash(), Labels{lbls[0]}.Hash(), "different labels match.")
}
var benchmarkLabelsResult uint64

View file

@ -16,12 +16,12 @@ package labels
import (
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func mustNewMatcher(t *testing.T, mType MatchType, value string) *Matcher {
m, err := NewMatcher(mType, "", value)
assert.NoError(t, err)
require.NoError(t, err)
return m
}
@ -84,7 +84,7 @@ func TestMatcher(t *testing.T) {
}
for _, test := range tests {
assert.Equal(t, test.matcher.Matches(test.value), test.match)
require.Equal(t, test.matcher.Matches(test.value), test.match)
}
}
@ -113,7 +113,7 @@ func TestInverse(t *testing.T) {
for _, test := range tests {
result, err := test.matcher.Inverse()
assert.NoError(t, err)
assert.Equal(t, test.expected.Type, result.Type)
require.NoError(t, err)
require.Equal(t, test.expected.Type, result.Type)
}
}

View file

@ -17,7 +17,7 @@ import (
"regexp/syntax"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestNewFastRegexMatcher(t *testing.T) {
@ -54,8 +54,8 @@ func TestNewFastRegexMatcher(t *testing.T) {
for _, c := range cases {
m, err := NewFastRegexMatcher(c.regex)
assert.NoError(t, err)
assert.Equal(t, c.expected, m.MatchString(c.value))
require.NoError(t, err)
require.Equal(t, c.expected, m.MatchString(c.value))
}
}
@ -88,11 +88,11 @@ func TestOptimizeConcatRegex(t *testing.T) {
for _, c := range cases {
parsed, err := syntax.Parse(c.regex, syntax.Perl)
assert.NoError(t, err)
require.NoError(t, err)
prefix, suffix, contains := optimizeConcatRegex(parsed)
assert.Equal(t, c.prefix, prefix)
assert.Equal(t, c.suffix, suffix)
assert.Equal(t, c.contains, contains)
require.Equal(t, c.prefix, prefix)
require.Equal(t, c.suffix, suffix)
require.Equal(t, c.contains, contains)
}
}

View file

@ -17,7 +17,7 @@ import (
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
type counter int
@ -35,13 +35,13 @@ func TestDedupe(t *testing.T) {
// Log 10 times quickly, ensure they are deduped.
for i := 0; i < 10; i++ {
err := d.Log("msg", "hello")
assert.NoError(t, err)
require.NoError(t, err)
}
assert.Equal(t, 1, int(c))
require.Equal(t, 1, int(c))
// Wait, then log again, make sure it is logged.
time.Sleep(200 * time.Millisecond)
err := d.Log("msg", "hello")
assert.NoError(t, err)
assert.Equal(t, 2, int(c))
require.NoError(t, err)
require.Equal(t, 2, int(c))
}

View file

@ -20,71 +20,71 @@ import (
"strings"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestJSONFileLogger_basic(t *testing.T) {
f, err := ioutil.TempFile("", "logging")
assert.NoError(t, err)
require.NoError(t, err)
defer func() {
assert.NoError(t, f.Close())
assert.NoError(t, os.Remove(f.Name()))
require.NoError(t, f.Close())
require.NoError(t, os.Remove(f.Name()))
}()
l, err := NewJSONFileLogger(f.Name())
assert.NoError(t, err)
assert.NotNil(t, l, "logger can't be nil")
require.NoError(t, err)
require.NotNil(t, l, "logger can't be nil")
err = l.Log("test", "yes")
assert.NoError(t, err)
require.NoError(t, err)
r := make([]byte, 1024)
_, err = f.Read(r)
assert.NoError(t, err)
require.NoError(t, err)
result, err := regexp.Match(`^{"test":"yes","ts":"[^"]+"}\n`, r)
assert.NoError(t, err)
assert.True(t, result, "unexpected content: %s", r)
require.NoError(t, err)
require.True(t, result, "unexpected content: %s", r)
err = l.Close()
assert.NoError(t, err)
require.NoError(t, err)
err = l.file.Close()
assert.Error(t, err)
assert.True(t, strings.HasSuffix(err.Error(), os.ErrClosed.Error()), "file not closed")
require.Error(t, err)
require.True(t, strings.HasSuffix(err.Error(), os.ErrClosed.Error()), "file not closed")
}
func TestJSONFileLogger_parallel(t *testing.T) {
f, err := ioutil.TempFile("", "logging")
assert.NoError(t, err)
require.NoError(t, err)
defer func() {
assert.NoError(t, f.Close())
assert.NoError(t, os.Remove(f.Name()))
require.NoError(t, f.Close())
require.NoError(t, os.Remove(f.Name()))
}()
l, err := NewJSONFileLogger(f.Name())
assert.NoError(t, err)
assert.NotNil(t, l, "logger can't be nil")
require.NoError(t, err)
require.NotNil(t, l, "logger can't be nil")
err = l.Log("test", "yes")
assert.NoError(t, err)
require.NoError(t, err)
l2, err := NewJSONFileLogger(f.Name())
assert.NoError(t, err)
assert.NotNil(t, l, "logger can't be nil")
require.NoError(t, err)
require.NotNil(t, l, "logger can't be nil")
err = l2.Log("test", "yes")
assert.NoError(t, err)
require.NoError(t, err)
err = l.Close()
assert.NoError(t, err)
require.NoError(t, err)
err = l.file.Close()
assert.Error(t, err)
assert.True(t, strings.HasSuffix(err.Error(), os.ErrClosed.Error()), "file not closed")
require.Error(t, err)
require.True(t, strings.HasSuffix(err.Error(), os.ErrClosed.Error()), "file not closed")
err = l2.Close()
assert.NoError(t, err)
require.NoError(t, err)
err = l2.file.Close()
assert.Error(t, err)
assert.True(t, strings.HasSuffix(err.Error(), os.ErrClosed.Error()), "file not closed")
require.Error(t, err)
require.True(t, strings.HasSuffix(err.Error(), os.ErrClosed.Error()), "file not closed")
}

View file

@ -16,7 +16,7 @@ package pool
import (
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func makeFunc(size int) interface{} {
@ -44,7 +44,7 @@ func TestPool(t *testing.T) {
}
for _, c := range cases {
ret := testPool.Get(c.size)
assert.Equal(t, c.expectedCap, cap(ret.([]int)))
require.Equal(t, c.expectedCap, cap(ret.([]int)))
testPool.Put(ret)
}
}

View file

@ -17,7 +17,7 @@ import (
"testing"
"github.com/prometheus/common/model"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/prometheus/prometheus/pkg/labels"
)
@ -414,7 +414,7 @@ func TestRelabel(t *testing.T) {
for _, test := range tests {
res := Process(test.input, test.relabel...)
assert.Equal(t, test.output, res)
require.Equal(t, test.output, res)
}
}
@ -440,7 +440,7 @@ func TestTargetLabelValidity(t *testing.T) {
{"foo${bar}foo", true},
}
for _, test := range tests {
assert.Equal(t, test.valid, relabelTarget.Match([]byte(test.str)),
require.Equal(t, test.valid, relabelTarget.Match([]byte(test.str)),
"Expected %q to be %v", test.str, test.valid)
}
}

View file

@ -18,7 +18,7 @@ import (
"strings"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestParseFileSuccess(t *testing.T) {
@ -163,9 +163,9 @@ groups:
for _, tst := range tests {
rgs, errs := Parse([]byte(tst.ruleString))
assert.NotNil(t, rgs, "Rule parsing, rule=\n"+tst.ruleString)
require.NotNil(t, rgs, "Rule parsing, rule=\n"+tst.ruleString)
passed := (tst.shouldPass && len(errs) == 0) || (!tst.shouldPass && len(errs) > 0)
assert.True(t, passed, "Rule validation failed, rule=\n"+tst.ruleString)
require.True(t, passed, "Rule validation failed, rule=\n"+tst.ruleString)
}
}

View file

@ -17,7 +17,7 @@ import (
"io"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/prometheus/prometheus/pkg/exemplar"
"github.com/prometheus/prometheus/pkg/labels"
@ -221,7 +221,7 @@ foo_total 17.0 1520879607.789 # {xx="yy"} 5`
if err == io.EOF {
break
}
assert.NoError(t, err)
require.NoError(t, err)
switch et {
case EntrySeries:
@ -231,40 +231,40 @@ foo_total 17.0 1520879607.789 # {xx="yy"} 5`
p.Metric(&res)
found := p.Exemplar(&e)
assert.Equal(t, exp[i].m, string(m))
assert.Equal(t, exp[i].t, ts)
assert.Equal(t, exp[i].v, v)
assert.Equal(t, exp[i].lset, res)
require.Equal(t, exp[i].m, string(m))
require.Equal(t, exp[i].t, ts)
require.Equal(t, exp[i].v, v)
require.Equal(t, exp[i].lset, res)
if exp[i].e == nil {
assert.Equal(t, false, found)
require.Equal(t, false, found)
} else {
assert.Equal(t, true, found)
assert.Equal(t, *exp[i].e, e)
require.Equal(t, true, found)
require.Equal(t, *exp[i].e, e)
}
res = res[:0]
case EntryType:
m, typ := p.Type()
assert.Equal(t, exp[i].m, string(m))
assert.Equal(t, exp[i].typ, typ)
require.Equal(t, exp[i].m, string(m))
require.Equal(t, exp[i].typ, typ)
case EntryHelp:
m, h := p.Help()
assert.Equal(t, exp[i].m, string(m))
assert.Equal(t, exp[i].help, string(h))
require.Equal(t, exp[i].m, string(m))
require.Equal(t, exp[i].help, string(h))
case EntryUnit:
m, u := p.Unit()
assert.Equal(t, exp[i].m, string(m))
assert.Equal(t, exp[i].unit, string(u))
require.Equal(t, exp[i].m, string(m))
require.Equal(t, exp[i].unit, string(u))
case EntryComment:
assert.Equal(t, exp[i].comment, string(p.Comment()))
require.Equal(t, exp[i].comment, string(p.Comment()))
}
i++
}
assert.Equal(t, len(exp), i)
require.Equal(t, len(exp), i)
}
func TestOpenMetricsParseErrors(t *testing.T) {
@ -511,7 +511,7 @@ func TestOpenMetricsParseErrors(t *testing.T) {
for err == nil {
_, err = p.Next()
}
assert.Equal(t, c.err, err.Error(), "test %d: %s", i, c.input)
require.Equal(t, c.err, err.Error(), "test %d: %s", i, c.input)
}
}
@ -578,10 +578,10 @@ func TestOMNullByteHandling(t *testing.T) {
}
if c.err == "" {
assert.Equal(t, io.EOF, err, "test %d", i)
require.Equal(t, io.EOF, err, "test %d", i)
continue
}
assert.Equal(t, c.err, err.Error(), "test %d", i)
require.Equal(t, c.err, err.Error(), "test %d", i)
}
}

View file

@ -23,7 +23,7 @@ import (
"github.com/prometheus/common/expfmt"
"github.com/prometheus/common/model"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/prometheus/prometheus/pkg/labels"
)
@ -180,7 +180,7 @@ testmetric{label="\"bar\""} 1`
if err == io.EOF {
break
}
assert.NoError(t, err)
require.NoError(t, err)
switch et {
case EntrySeries:
@ -188,29 +188,29 @@ testmetric{label="\"bar\""} 1`
p.Metric(&res)
assert.Equal(t, exp[i].m, string(m))
assert.Equal(t, exp[i].t, ts)
assert.Equal(t, exp[i].v, v)
assert.Equal(t, exp[i].lset, res)
require.Equal(t, exp[i].m, string(m))
require.Equal(t, exp[i].t, ts)
require.Equal(t, exp[i].v, v)
require.Equal(t, exp[i].lset, res)
res = res[:0]
case EntryType:
m, typ := p.Type()
assert.Equal(t, exp[i].m, string(m))
assert.Equal(t, exp[i].typ, typ)
require.Equal(t, exp[i].m, string(m))
require.Equal(t, exp[i].typ, typ)
case EntryHelp:
m, h := p.Help()
assert.Equal(t, exp[i].m, string(m))
assert.Equal(t, exp[i].help, string(h))
require.Equal(t, exp[i].m, string(m))
require.Equal(t, exp[i].help, string(h))
case EntryComment:
assert.Equal(t, exp[i].comment, string(p.Comment()))
require.Equal(t, exp[i].comment, string(p.Comment()))
}
i++
}
assert.Equal(t, len(exp), i)
require.Equal(t, len(exp), i)
}
func TestPromParseErrors(t *testing.T) {
@ -278,8 +278,8 @@ func TestPromParseErrors(t *testing.T) {
for err == nil {
_, err = p.Next()
}
assert.Error(t, err)
assert.Equal(t, c.err, err.Error(), "test %d", i)
require.Error(t, err)
require.Equal(t, c.err, err.Error(), "test %d", i)
}
}
@ -330,12 +330,12 @@ func TestPromNullByteHandling(t *testing.T) {
}
if c.err == "" {
assert.Equal(t, io.EOF, err, "test %d", i)
require.Equal(t, io.EOF, err, "test %d", i)
continue
}
assert.Error(t, err)
assert.Equal(t, c.err, err.Error(), "test %d", i)
require.Error(t, err)
require.Equal(t, c.err, err.Error(), "test %d", i)
}
}
@ -350,11 +350,11 @@ func BenchmarkParse(b *testing.B) {
} {
for _, fn := range []string{"promtestdata.txt", "promtestdata.nometa.txt"} {
f, err := os.Open(fn)
assert.NoError(b, err)
require.NoError(b, err)
defer f.Close()
buf, err := ioutil.ReadAll(f)
assert.NoError(b, err)
require.NoError(b, err)
b.Run(parserName+"/no-decode-metric/"+fn, func(b *testing.B) {
total := 0
@ -484,18 +484,18 @@ func BenchmarkGzip(b *testing.B) {
for _, fn := range []string{"promtestdata.txt", "promtestdata.nometa.txt"} {
b.Run(fn, func(b *testing.B) {
f, err := os.Open(fn)
assert.NoError(b, err)
require.NoError(b, err)
defer f.Close()
var buf bytes.Buffer
gw := gzip.NewWriter(&buf)
n, err := io.Copy(gw, f)
assert.NoError(b, err)
assert.NoError(b, gw.Close())
require.NoError(b, err)
require.NoError(b, gw.Close())
gbuf, err := ioutil.ReadAll(&buf)
assert.NoError(b, err)
require.NoError(b, err)
k := b.N / promtestdataSampleCount
@ -507,11 +507,11 @@ func BenchmarkGzip(b *testing.B) {
for i := 0; i < k; i++ {
gr, err := gzip.NewReader(bytes.NewReader(gbuf))
assert.NoError(b, err)
require.NoError(b, err)
d, err := ioutil.ReadAll(gr)
assert.NoError(b, err)
assert.NoError(b, gr.Close())
require.NoError(b, err)
require.NoError(b, gr.Close())
total += len(d)
}

View file

@ -23,7 +23,7 @@ import (
"time"
"github.com/go-kit/kit/log"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"go.uber.org/goleak"
"github.com/prometheus/prometheus/pkg/labels"
@ -40,7 +40,7 @@ func TestQueryConcurrency(t *testing.T) {
maxConcurrency := 10
dir, err := ioutil.TempDir("", "test_concurrency")
assert.NoError(t, err)
require.NoError(t, err)
defer os.RemoveAll(dir)
queryTracker := NewActiveQueryTracker(dir, maxConcurrency, nil)
@ -119,10 +119,10 @@ func TestQueryTimeout(t *testing.T) {
})
res := query.Exec(ctx)
assert.Error(t, res.Err, "expected timeout error but got none")
require.Error(t, res.Err, "expected timeout error but got none")
var e ErrQueryTimeout
assert.True(t, errors.As(res.Err, &e), "expected timeout error but got: %s", res.Err)
require.True(t, errors.As(res.Err, &e), "expected timeout error but got: %s", res.Err)
}
const errQueryCanceled = ErrQueryCanceled("test statement execution")
@ -160,8 +160,8 @@ func TestQueryCancel(t *testing.T) {
block <- struct{}{}
<-processing
assert.Error(t, res.Err, "expected cancellation error for query1 but got none")
assert.Equal(t, errQueryCanceled, res.Err)
require.Error(t, res.Err, "expected cancellation error for query1 but got none")
require.Equal(t, errQueryCanceled, res.Err)
// Canceling a query before starting it must have no effect.
query2 := engine.newTestQuery(func(ctx context.Context) error {
@ -170,7 +170,7 @@ func TestQueryCancel(t *testing.T) {
query2.Cancel()
res = query2.Exec(ctx)
assert.NoError(t, res.Err)
require.NoError(t, res.Err)
}
// errQuerier implements storage.Querier which always returns error.
@ -211,18 +211,18 @@ func TestQueryError(t *testing.T) {
defer cancelCtx()
vectorQuery, err := engine.NewInstantQuery(queryable, "foo", time.Unix(1, 0))
assert.NoError(t, err)
require.NoError(t, err)
res := vectorQuery.Exec(ctx)
assert.Error(t, res.Err, "expected error on failed select but got none")
assert.True(t, errors.Is(res.Err, errStorage), "expected error doesn't match")
require.Error(t, res.Err, "expected error on failed select but got none")
require.True(t, errors.Is(res.Err, errStorage), "expected error doesn't match")
matrixQuery, err := engine.NewInstantQuery(queryable, "foo[1m]", time.Unix(1, 0))
assert.NoError(t, err)
require.NoError(t, err)
res = matrixQuery.Exec(ctx)
assert.Error(t, res.Err, "expected error on failed select but got none")
assert.True(t, errors.Is(res.Err, errStorage), "expected error doesn't match")
require.Error(t, res.Err, "expected error on failed select but got none")
require.True(t, errors.Is(res.Err, errStorage), "expected error doesn't match")
}
type noopHintRecordingQueryable struct {
@ -378,12 +378,12 @@ func TestSelectHintsSetCorrectly(t *testing.T) {
} else {
query, err = engine.NewRangeQuery(hintsRecorder, tc.query, timestamp.Time(tc.start), timestamp.Time(tc.end), time.Second)
}
assert.NoError(t, err)
require.NoError(t, err)
res := query.Exec(context.Background())
assert.NoError(t, res.Err)
require.NoError(t, res.Err)
assert.Equal(t, tc.expected, hintsRecorder.hints)
require.Equal(t, tc.expected, hintsRecorder.hints)
})
}
@ -426,8 +426,8 @@ func TestEngineShutdown(t *testing.T) {
block <- struct{}{}
<-processing
assert.Error(t, res.Err, "expected error on shutdown during query but got none")
assert.Equal(t, errQueryCanceled, res.Err)
require.Error(t, res.Err, "expected error on shutdown during query but got none")
require.Equal(t, errQueryCanceled, res.Err)
query2 := engine.newTestQuery(func(context.Context) error {
t.Fatalf("reached query execution unexpectedly")
@ -437,10 +437,10 @@ func TestEngineShutdown(t *testing.T) {
// The second query is started after the engine shut down. It must
// be canceled immediately.
res2 := query2.Exec(ctx)
assert.Error(t, res2.Err, "expected error on querying with canceled context but got none")
require.Error(t, res2.Err, "expected error on querying with canceled context but got none")
var e ErrQueryCanceled
assert.True(t, errors.As(res2.Err, &e), "expected cancellation error but got: %s", res2.Err)
require.True(t, errors.As(res2.Err, &e), "expected cancellation error but got: %s", res2.Err)
}
func TestEngineEvalStmtTimestamps(t *testing.T) {
@ -448,11 +448,11 @@ func TestEngineEvalStmtTimestamps(t *testing.T) {
load 10s
metric 1 2
`)
assert.NoError(t, err)
require.NoError(t, err)
defer test.Close()
err = test.Run()
assert.NoError(t, err)
require.NoError(t, err)
cases := []struct {
Query string
@ -529,16 +529,16 @@ load 10s
} else {
qry, err = test.QueryEngine().NewRangeQuery(test.Queryable(), c.Query, c.Start, c.End, c.Interval)
}
assert.NoError(t, err)
require.NoError(t, err)
res := qry.Exec(test.Context())
if c.ShouldError {
assert.Error(t, res.Err, "expected error for the query %q", c.Query)
require.Error(t, res.Err, "expected error for the query %q", c.Query)
continue
}
assert.NoError(t, res.Err)
assert.Equal(t, c.Result, res.Value, "query %q failed", c.Query)
require.NoError(t, res.Err)
require.Equal(t, c.Result, res.Value, "query %q failed", c.Query)
}
}
@ -549,11 +549,11 @@ load 10s
bigmetric{a="1"} 1 2
bigmetric{a="2"} 1 2
`)
assert.NoError(t, err)
require.NoError(t, err)
defer test.Close()
err = test.Run()
assert.NoError(t, err)
require.NoError(t, err)
cases := []struct {
Query string
@ -765,11 +765,11 @@ load 10s
} else {
qry, err = engine.NewRangeQuery(test.Queryable(), c.Query, c.Start, c.End, c.Interval)
}
assert.NoError(t, err)
require.NoError(t, err)
res := qry.Exec(test.Context())
assert.Equal(t, c.Result.Err, res.Err)
assert.Equal(t, c.Result.Value, res.Value, "query %q failed", c.Query)
require.Equal(t, c.Result.Err, res.Err)
require.Equal(t, c.Result.Value, res.Value, "query %q failed", c.Query)
}
}
@ -1052,21 +1052,21 @@ func TestSubquerySelector(t *testing.T) {
} {
t.Run("", func(t *testing.T) {
test, err := NewTest(t, tst.loadString)
assert.NoError(t, err)
require.NoError(t, err)
defer test.Close()
assert.NoError(t, test.Run())
require.NoError(t, test.Run())
engine := test.QueryEngine()
for _, c := range tst.cases {
t.Run(c.Query, func(t *testing.T) {
qry, err := engine.NewInstantQuery(test.Queryable(), c.Query, c.Start)
assert.NoError(t, err)
require.NoError(t, err)
res := qry.Exec(test.Context())
assert.Equal(t, c.Result.Err, res.Err)
require.Equal(t, c.Result.Err, res.Err)
mat := res.Value.(Matrix)
sort.Sort(mat)
assert.Equal(t, c.Result.Value, mat)
require.Equal(t, c.Result.Value, mat)
})
}
})
@ -1111,7 +1111,7 @@ func TestQueryLogger_basic(t *testing.T) {
return contextDone(ctx, "test statement execution")
})
res := query.Exec(ctx)
assert.NoError(t, res.Err)
require.NoError(t, res.Err)
}
// Query works without query log initialized.
@ -1121,28 +1121,28 @@ func TestQueryLogger_basic(t *testing.T) {
engine.SetQueryLogger(f1)
queryExec()
for i, field := range []interface{}{"params", map[string]interface{}{"query": "test statement"}} {
assert.Equal(t, field, f1.logs[i])
require.Equal(t, field, f1.logs[i])
}
l := len(f1.logs)
queryExec()
assert.Equal(t, 2*l, len(f1.logs))
require.Equal(t, 2*l, len(f1.logs))
// Test that we close the query logger when unsetting it.
assert.False(t, f1.closed, "expected f1 to be open, got closed")
require.False(t, f1.closed, "expected f1 to be open, got closed")
engine.SetQueryLogger(nil)
assert.True(t, f1.closed, "expected f1 to be closed, got open")
require.True(t, f1.closed, "expected f1 to be closed, got open")
queryExec()
// Test that we close the query logger when swapping.
f2 := NewFakeQueryLogger()
f3 := NewFakeQueryLogger()
engine.SetQueryLogger(f2)
assert.False(t, f2.closed, "expected f2 to be open, got closed")
require.False(t, f2.closed, "expected f2 to be open, got closed")
queryExec()
engine.SetQueryLogger(f3)
assert.True(t, f2.closed, "expected f2 to be closed, got open")
assert.False(t, f3.closed, "expected f3 to be open, got closed")
require.True(t, f2.closed, "expected f2 to be closed, got open")
require.False(t, f3.closed, "expected f3 to be open, got closed")
queryExec()
}
@ -1166,12 +1166,12 @@ func TestQueryLogger_fields(t *testing.T) {
})
res := query.Exec(ctx)
assert.NoError(t, res.Err)
require.NoError(t, res.Err)
expected := []string{"foo", "bar"}
for i, field := range expected {
v := f1.logs[len(f1.logs)-len(expected)+i].(string)
assert.Equal(t, field, v)
require.Equal(t, field, v)
}
}
@ -1196,9 +1196,9 @@ func TestQueryLogger_error(t *testing.T) {
})
res := query.Exec(ctx)
assert.Error(t, res.Err, "query should have failed")
require.Error(t, res.Err, "query should have failed")
for i, field := range []interface{}{"params", map[string]interface{}{"query": "test statement"}, "error", testErr} {
assert.Equal(t, f1.logs[i], field)
require.Equal(t, f1.logs[i], field)
}
}

View file

@ -18,7 +18,7 @@ import (
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/prometheus/prometheus/pkg/labels"
"github.com/prometheus/prometheus/pkg/timestamp"
@ -46,28 +46,28 @@ func TestDeriv(t *testing.T) {
a.Add(metric, 1493712816939, 1.0)
a.Add(metric, 1493712846939, 1.0)
assert.NoError(t, a.Commit())
require.NoError(t, a.Commit())
query, err := engine.NewInstantQuery(storage, "deriv(foo[30m])", timestamp.Time(1493712846939))
assert.NoError(t, err)
require.NoError(t, err)
result := query.Exec(context.Background())
assert.NoError(t, result.Err)
require.NoError(t, result.Err)
vec, _ := result.Vector()
assert.Equal(t, 1, len(vec), "Expected 1 result, got %d", len(vec))
assert.Equal(t, 0.0, vec[0].V, "Expected 0.0 as value, got %f", vec[0].V)
require.Equal(t, 1, len(vec), "Expected 1 result, got %d", len(vec))
require.Equal(t, 0.0, vec[0].V, "Expected 0.0 as value, got %f", vec[0].V)
}
func TestFunctionList(t *testing.T) {
// Test that Functions and parser.Functions list the same functions.
for i := range FunctionCalls {
_, ok := parser.Functions[i]
assert.True(t, ok, "function %s exists in promql package, but not in parser package", i)
require.True(t, ok, "function %s exists in promql package, but not in parser package", i)
}
for i := range parser.Functions {
_, ok := FunctionCalls[i]
assert.True(t, ok, "function %s exists in parser package, but not in promql package", i)
require.True(t, ok, "function %s exists in parser package, but not in promql package", i)
}
}

View file

@ -16,7 +16,7 @@ package parser
import (
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
type testCase struct {
@ -731,10 +731,10 @@ func TestLexer(t *testing.T) {
}
eofItem := Item{EOF, Pos(len(test.input)), ""}
assert.Equal(t, lastItem, eofItem, "%d: input %q", i, test.input)
require.Equal(t, lastItem, eofItem, "%d: input %q", i, test.input)
out = out[:len(out)-1]
assert.Equal(t, test.expected, out, "%d: input %q", i, test.input)
require.Equal(t, test.expected, out, "%d: input %q", i, test.input)
}
})
}

View file

@ -20,7 +20,7 @@ import (
"github.com/pkg/errors"
"github.com/prometheus/common/model"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/prometheus/prometheus/pkg/labels"
)
@ -2658,23 +2658,23 @@ func TestParseExpressions(t *testing.T) {
expr, err := ParseExpr(test.input)
// Unexpected errors are always caused by a bug.
assert.NotEqual(t, err, errUnexpected, "unexpected error occurred")
require.NotEqual(t, err, errUnexpected, "unexpected error occurred")
if !test.fail {
assert.NoError(t, err)
assert.Equal(t, test.expected, expr, "error on input '%s'", test.input)
require.NoError(t, err)
require.Equal(t, test.expected, expr, "error on input '%s'", test.input)
} else {
assert.Error(t, err)
assert.Contains(t, err.Error(), test.errMsg, "unexpected error on input '%s', expected '%s', got '%s'", test.input, test.errMsg, err.Error())
require.Error(t, err)
require.Contains(t, err.Error(), test.errMsg, "unexpected error on input '%s', expected '%s', got '%s'", test.input, test.errMsg, err.Error())
errorList, ok := err.(ParseErrors)
assert.True(t, ok, "unexpected error type")
require.True(t, ok, "unexpected error type")
for _, e := range errorList {
assert.True(t, 0 <= e.PositionRange.Start, "parse error has negative position\nExpression '%s'\nError: %v", test.input, e)
assert.True(t, e.PositionRange.Start <= e.PositionRange.End, "parse error has negative length\nExpression '%s'\nError: %v", test.input, e)
assert.True(t, e.PositionRange.End <= Pos(len(test.input)), "parse error is not contained in input\nExpression '%s'\nError: %v", test.input, e)
require.True(t, 0 <= e.PositionRange.Start, "parse error has negative position\nExpression '%s'\nError: %v", test.input, e)
require.True(t, e.PositionRange.Start <= e.PositionRange.End, "parse error has negative length\nExpression '%s'\nError: %v", test.input, e)
require.True(t, e.PositionRange.End <= Pos(len(test.input)), "parse error is not contained in input\nExpression '%s'\nError: %v", test.input, e)
}
}
})
@ -2684,11 +2684,11 @@ func TestParseExpressions(t *testing.T) {
// NaN has no equality. Thus, we need a separate test for it.
func TestNaNExpression(t *testing.T) {
expr, err := ParseExpr("NaN")
assert.NoError(t, err)
require.NoError(t, err)
nl, ok := expr.(*NumberLiteral)
assert.True(t, ok, "expected number literal but got %T", expr)
assert.True(t, math.IsNaN(float64(nl.Val)), "expected 'NaN' in number literal but got %v", nl.Val)
require.True(t, ok, "expected number literal but got %T", expr)
require.True(t, math.IsNaN(float64(nl.Val)), "expected 'NaN' in number literal but got %v", nl.Val)
}
func mustLabelMatcher(mt labels.MatchType, name, val string) *labels.Matcher {
@ -2803,14 +2803,14 @@ func TestParseSeries(t *testing.T) {
metric, vals, err := ParseSeriesDesc(test.input)
// Unexpected errors are always caused by a bug.
assert.NotEqual(t, err, errUnexpected, "unexpected error occurred")
require.NotEqual(t, err, errUnexpected, "unexpected error occurred")
if !test.fail {
assert.NoError(t, err)
assert.Equal(t, test.expectedMetric, metric, "error on input '%s'", test.input)
assert.Equal(t, test.expectedValues, vals, "error in input '%s'", test.input)
require.NoError(t, err)
require.Equal(t, test.expectedMetric, metric, "error on input '%s'", test.input)
require.Equal(t, test.expectedValues, vals, "error in input '%s'", test.input)
} else {
assert.Error(t, err)
require.Error(t, err)
}
}
}
@ -2820,7 +2820,7 @@ func TestRecoverParserRuntime(t *testing.T) {
var err error
defer func() {
assert.Equal(t, errUnexpected, err)
require.Equal(t, errUnexpected, err)
}()
defer p.recover(&err)
// Cause a runtime panic.
@ -2836,7 +2836,7 @@ func TestRecoverParserError(t *testing.T) {
e := errors.New("custom error")
defer func() {
assert.Equal(t, e.Error(), err.Error())
require.Equal(t, e.Error(), err.Error())
}()
defer p.recover(&err)

View file

@ -16,7 +16,7 @@ package parser
import (
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestExprString(t *testing.T) {
@ -102,13 +102,13 @@ func TestExprString(t *testing.T) {
for _, test := range inputs {
expr, err := ParseExpr(test.in)
assert.NoError(t, err)
require.NoError(t, err)
exp := test.in
if test.out != "" {
exp = test.out
}
assert.Equal(t, exp, expr.String())
require.Equal(t, exp, expr.String())
}
}

View file

@ -17,18 +17,18 @@ import (
"path/filepath"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestEvaluations(t *testing.T) {
files, err := filepath.Glob("testdata/*.test")
assert.NoError(t, err)
require.NoError(t, err)
for _, fn := range files {
t.Run(fn, func(t *testing.T) {
test, err := newTestFromFile(t, fn)
assert.NoError(t, err)
assert.NoError(t, test.Run())
require.NoError(t, err)
require.NoError(t, test.Run())
test.Close()
})

View file

@ -20,7 +20,7 @@ import (
"regexp"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestQueryLogging(t *testing.T) {
@ -109,18 +109,18 @@ func TestIndexReuse(t *testing.T) {
func TestMMapFile(t *testing.T) {
file, err := ioutil.TempFile("", "mmapedFile")
assert.NoError(t, err)
require.NoError(t, err)
filename := file.Name()
defer os.Remove(filename)
fileAsBytes, err := getMMapedFile(filename, 2, nil)
assert.NoError(t, err)
require.NoError(t, err)
copy(fileAsBytes, "ab")
f, err := os.Open(filename)
assert.NoError(t, err)
require.NoError(t, err)
bytes := make([]byte, 4)
n, err := f.Read(bytes)

View file

@ -18,7 +18,7 @@ import (
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/prometheus/prometheus/pkg/labels"
)
@ -110,12 +110,12 @@ func TestLazyLoader_WithSamplesTill(t *testing.T) {
for _, c := range cases {
suite, err := NewLazyLoader(t, c.loadString)
assert.NoError(t, err)
require.NoError(t, err)
defer suite.Close()
for _, tc := range c.testCases {
suite.WithSamplesTill(tc.ts, func(err error) {
assert.NoError(t, err)
require.NoError(t, err)
if tc.checkOnlyError {
return
}
@ -123,20 +123,20 @@ func TestLazyLoader_WithSamplesTill(t *testing.T) {
// Check the series.
queryable := suite.Queryable()
querier, err := queryable.Querier(suite.Context(), math.MinInt64, math.MaxInt64)
assert.NoError(t, err)
require.NoError(t, err)
for _, s := range tc.series {
var matchers []*labels.Matcher
for _, label := range s.Metric {
m, err := labels.NewMatcher(labels.MatchEqual, label.Name, label.Value)
assert.NoError(t, err)
require.NoError(t, err)
matchers = append(matchers, m)
}
// Get the series for the matcher.
ss := querier.Select(false, nil, matchers...)
assert.True(t, ss.Next())
require.True(t, ss.Next())
storageSeries := ss.At()
assert.False(t, ss.Next(), "Expecting only 1 series")
require.False(t, ss.Next(), "Expecting only 1 series")
// Convert `storage.Series` to `promql.Series`.
got := Series{
@ -147,9 +147,9 @@ func TestLazyLoader_WithSamplesTill(t *testing.T) {
t, v := it.At()
got.Points = append(got.Points, Point{T: t, V: v})
}
assert.NoError(t, it.Err())
require.NoError(t, it.Err())
assert.Equal(t, s, got)
require.Equal(t, s, got)
}
})
}

View file

@ -20,7 +20,7 @@ import (
"time"
"github.com/go-kit/kit/log"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/prometheus/prometheus/pkg/labels"
"github.com/prometheus/prometheus/pkg/timestamp"
@ -31,7 +31,7 @@ import (
func TestAlertingRuleHTMLSnippet(t *testing.T) {
expr, err := parser.ParseExpr(`foo{html="<b>BOLD<b>"}`)
assert.NoError(t, err)
require.NoError(t, err)
rule := NewAlertingRule("testrule", expr, 0, labels.FromStrings("html", "<b>BOLD</b>"), labels.FromStrings("html", "<b>BOLD</b>"), nil, false, nil)
const want = template.HTML(`alert: <a href="/test/prefix/graph?g0.expr=ALERTS%7Balertname%3D%22testrule%22%7D&g0.tab=1">testrule</a>
@ -43,7 +43,7 @@ annotations:
`)
got := rule.HTMLSnippet("/test/prefix")
assert.Equal(t, want, got, "incorrect HTML snippet; want:\n\n|%v|\n\ngot:\n\n|%v|", want, got)
require.Equal(t, want, got, "incorrect HTML snippet; want:\n\n|%v|\n\ngot:\n\n|%v|", want, got)
}
func TestAlertingRuleState(t *testing.T) {
@ -82,7 +82,7 @@ func TestAlertingRuleState(t *testing.T) {
rule := NewAlertingRule(test.name, nil, 0, nil, nil, nil, true, nil)
rule.active = test.active
got := rule.State()
assert.Equal(t, test.want, got, "test case %d unexpected AlertState, want:%d got:%d", i, test.want, got)
require.Equal(t, test.want, got, "test case %d unexpected AlertState, want:%d got:%d", i, test.want, got)
}
}
@ -91,13 +91,13 @@ func TestAlertingRuleLabelsUpdate(t *testing.T) {
load 1m
http_requests{job="app-server", instance="0"} 75 85 70 70
`)
assert.NoError(t, err)
require.NoError(t, err)
defer suite.Close()
assert.NoError(t, suite.Run())
require.NoError(t, suite.Run())
expr, err := parser.ParseExpr(`http_requests < 100`)
assert.NoError(t, err)
require.NoError(t, err)
rule := NewAlertingRule(
"HTTPRequestRateLow",
@ -171,7 +171,7 @@ func TestAlertingRuleLabelsUpdate(t *testing.T) {
evalTime := baseTime.Add(time.Duration(i) * time.Minute)
result[0].Point.T = timestamp.FromTime(evalTime)
res, err := rule.Eval(suite.Context(), evalTime, EngineQueryFunc(suite.QueryEngine(), suite.Storage()), nil)
assert.NoError(t, err)
require.NoError(t, err)
var filteredRes promql.Vector // After removing 'ALERTS_FOR_STATE' samples.
for _, smpl := range res {
@ -180,11 +180,11 @@ func TestAlertingRuleLabelsUpdate(t *testing.T) {
filteredRes = append(filteredRes, smpl)
} else {
// If not 'ALERTS', it has to be 'ALERTS_FOR_STATE'.
assert.Equal(t, "ALERTS_FOR_STATE", smplName)
require.Equal(t, "ALERTS_FOR_STATE", smplName)
}
}
assert.Equal(t, result, filteredRes)
require.Equal(t, result, filteredRes)
}
}
@ -193,13 +193,13 @@ func TestAlertingRuleExternalLabelsInTemplate(t *testing.T) {
load 1m
http_requests{job="app-server", instance="0"} 75 85 70 70
`)
assert.NoError(t, err)
require.NoError(t, err)
defer suite.Close()
assert.NoError(t, suite.Run())
require.NoError(t, suite.Run())
expr, err := parser.ParseExpr(`http_requests < 100`)
assert.NoError(t, err)
require.NoError(t, err)
ruleWithoutExternalLabels := NewAlertingRule(
"ExternalLabelDoesNotExist",
@ -252,32 +252,32 @@ func TestAlertingRuleExternalLabelsInTemplate(t *testing.T) {
res, err := ruleWithoutExternalLabels.Eval(
suite.Context(), evalTime, EngineQueryFunc(suite.QueryEngine(), suite.Storage()), nil,
)
assert.NoError(t, err)
require.NoError(t, err)
for _, smpl := range res {
smplName := smpl.Metric.Get("__name__")
if smplName == "ALERTS" {
filteredRes = append(filteredRes, smpl)
} else {
// If not 'ALERTS', it has to be 'ALERTS_FOR_STATE'.
assert.Equal(t, "ALERTS_FOR_STATE", smplName)
require.Equal(t, "ALERTS_FOR_STATE", smplName)
}
}
res, err = ruleWithExternalLabels.Eval(
suite.Context(), evalTime, EngineQueryFunc(suite.QueryEngine(), suite.Storage()), nil,
)
assert.NoError(t, err)
require.NoError(t, err)
for _, smpl := range res {
smplName := smpl.Metric.Get("__name__")
if smplName == "ALERTS" {
filteredRes = append(filteredRes, smpl)
} else {
// If not 'ALERTS', it has to be 'ALERTS_FOR_STATE'.
assert.Equal(t, "ALERTS_FOR_STATE", smplName)
require.Equal(t, "ALERTS_FOR_STATE", smplName)
}
}
assert.Equal(t, result, filteredRes)
require.Equal(t, result, filteredRes)
}
func TestAlertingRuleEmptyLabelFromTemplate(t *testing.T) {
@ -285,13 +285,13 @@ func TestAlertingRuleEmptyLabelFromTemplate(t *testing.T) {
load 1m
http_requests{job="app-server", instance="0"} 75 85 70 70
`)
assert.NoError(t, err)
require.NoError(t, err)
defer suite.Close()
assert.NoError(t, suite.Run())
require.NoError(t, suite.Run())
expr, err := parser.ParseExpr(`http_requests < 100`)
assert.NoError(t, err)
require.NoError(t, err)
rule := NewAlertingRule(
"EmptyLabel",
@ -322,17 +322,17 @@ func TestAlertingRuleEmptyLabelFromTemplate(t *testing.T) {
res, err := rule.Eval(
suite.Context(), evalTime, EngineQueryFunc(suite.QueryEngine(), suite.Storage()), nil,
)
assert.NoError(t, err)
require.NoError(t, err)
for _, smpl := range res {
smplName := smpl.Metric.Get("__name__")
if smplName == "ALERTS" {
filteredRes = append(filteredRes, smpl)
} else {
// If not 'ALERTS', it has to be 'ALERTS_FOR_STATE'.
assert.Equal(t, "ALERTS_FOR_STATE", smplName)
require.Equal(t, "ALERTS_FOR_STATE", smplName)
}
}
assert.Equal(t, result, filteredRes)
require.Equal(t, result, filteredRes)
}
func TestAlertingRuleDuplicate(t *testing.T) {
@ -363,6 +363,6 @@ func TestAlertingRuleDuplicate(t *testing.T) {
true, log.NewNopLogger(),
)
_, err := rule.Eval(ctx, now, EngineQueryFunc(engine, storage), nil)
assert.Error(t, err)
assert.EqualError(t, err, "vector contains metrics with the same labelset after applying alert labels")
require.Error(t, err)
require.EqualError(t, err, "vector contains metrics with the same labelset after applying alert labels")
}

View file

@ -26,7 +26,7 @@ import (
"github.com/go-kit/kit/log"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/common/model"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"go.uber.org/goleak"
yaml "gopkg.in/yaml.v2"
@ -50,14 +50,14 @@ func TestAlertingRule(t *testing.T) {
http_requests{job="app-server", instance="0", group="canary", severity="overwrite-me"} 75 85 95 105 105 95 85
http_requests{job="app-server", instance="1", group="canary", severity="overwrite-me"} 80 90 100 110 120 130 140
`)
assert.NoError(t, err)
require.NoError(t, err)
defer suite.Close()
err = suite.Run()
assert.NoError(t, err)
require.NoError(t, err)
expr, err := parser.ParseExpr(`http_requests{group="canary", job="app-server"} < 100`)
assert.NoError(t, err)
require.NoError(t, err)
rule := NewAlertingRule(
"HTTPRequestRateLow",
@ -157,7 +157,7 @@ func TestAlertingRule(t *testing.T) {
evalTime := baseTime.Add(test.time)
res, err := rule.Eval(suite.Context(), evalTime, EngineQueryFunc(suite.QueryEngine(), suite.Storage()), nil)
assert.NoError(t, err)
require.NoError(t, err)
var filteredRes promql.Vector // After removing 'ALERTS_FOR_STATE' samples.
for _, smpl := range res {
@ -166,21 +166,21 @@ func TestAlertingRule(t *testing.T) {
filteredRes = append(filteredRes, smpl)
} else {
// If not 'ALERTS', it has to be 'ALERTS_FOR_STATE'.
assert.Equal(t, smplName, "ALERTS_FOR_STATE")
require.Equal(t, smplName, "ALERTS_FOR_STATE")
}
}
for i := range test.result {
test.result[i].T = timestamp.FromTime(evalTime)
}
assert.Equal(t, len(test.result), len(filteredRes), "%d. Number of samples in expected and actual output don't match (%d vs. %d)", i, len(test.result), len(res))
require.Equal(t, len(test.result), len(filteredRes), "%d. Number of samples in expected and actual output don't match (%d vs. %d)", i, len(test.result), len(res))
sort.Slice(filteredRes, func(i, j int) bool {
return labels.Compare(filteredRes[i].Metric, filteredRes[j].Metric) < 0
})
assert.Equal(t, test.result, filteredRes)
require.Equal(t, test.result, filteredRes)
for _, aa := range rule.ActiveAlerts() {
assert.Zero(t, aa.Labels.Get(model.MetricNameLabel), "%s label set on active alert: %s", model.MetricNameLabel, aa.Labels)
require.Zero(t, aa.Labels.Get(model.MetricNameLabel), "%s label set on active alert: %s", model.MetricNameLabel, aa.Labels)
}
}
}
@ -191,14 +191,14 @@ func TestForStateAddSamples(t *testing.T) {
http_requests{job="app-server", instance="0", group="canary", severity="overwrite-me"} 75 85 95 105 105 95 85
http_requests{job="app-server", instance="1", group="canary", severity="overwrite-me"} 80 90 100 110 120 130 140
`)
assert.NoError(t, err)
require.NoError(t, err)
defer suite.Close()
err = suite.Run()
assert.NoError(t, err)
require.NoError(t, err)
expr, err := parser.ParseExpr(`http_requests{group="canary", job="app-server"} < 100`)
assert.NoError(t, err)
require.NoError(t, err)
rule := NewAlertingRule(
"HTTPRequestRateLow",
@ -306,7 +306,7 @@ func TestForStateAddSamples(t *testing.T) {
}
res, err := rule.Eval(suite.Context(), evalTime, EngineQueryFunc(suite.QueryEngine(), suite.Storage()), nil)
assert.NoError(t, err)
require.NoError(t, err)
var filteredRes promql.Vector // After removing 'ALERTS' samples.
for _, smpl := range res {
@ -315,7 +315,7 @@ func TestForStateAddSamples(t *testing.T) {
filteredRes = append(filteredRes, smpl)
} else {
// If not 'ALERTS_FOR_STATE', it has to be 'ALERTS'.
assert.Equal(t, smplName, "ALERTS")
require.Equal(t, smplName, "ALERTS")
}
}
for i := range test.result {
@ -325,15 +325,15 @@ func TestForStateAddSamples(t *testing.T) {
test.result[i].V = forState
}
}
assert.Equal(t, len(test.result), len(filteredRes), "%d. Number of samples in expected and actual output don't match (%d vs. %d)", i, len(test.result), len(res))
require.Equal(t, len(test.result), len(filteredRes), "%d. Number of samples in expected and actual output don't match (%d vs. %d)", i, len(test.result), len(res))
sort.Slice(filteredRes, func(i, j int) bool {
return labels.Compare(filteredRes[i].Metric, filteredRes[j].Metric) < 0
})
assert.Equal(t, test.result, filteredRes)
require.Equal(t, test.result, filteredRes)
for _, aa := range rule.ActiveAlerts() {
assert.Zero(t, aa.Labels.Get(model.MetricNameLabel), "%s label set on active alert: %s", model.MetricNameLabel, aa.Labels)
require.Zero(t, aa.Labels.Get(model.MetricNameLabel), "%s label set on active alert: %s", model.MetricNameLabel, aa.Labels)
}
}
@ -352,14 +352,14 @@ func TestForStateRestore(t *testing.T) {
http_requests{job="app-server", instance="0", group="canary", severity="overwrite-me"} 75 85 50 0 0 25 0 0 40 0 120
http_requests{job="app-server", instance="1", group="canary", severity="overwrite-me"} 125 90 60 0 0 25 0 0 40 0 130
`)
assert.NoError(t, err)
require.NoError(t, err)
defer suite.Close()
err = suite.Run()
assert.NoError(t, err)
require.NoError(t, err)
expr, err := parser.ParseExpr(`http_requests{group="canary", job="app-server"} < 100`)
assert.NoError(t, err)
require.NoError(t, err)
opts := &ManagerOptions{
QueryFunc: EngineQueryFunc(suite.QueryEngine(), suite.Storage()),
@ -402,7 +402,7 @@ func TestForStateRestore(t *testing.T) {
exp := rule.ActiveAlerts()
for _, aa := range exp {
assert.Zero(t, aa.Labels.Get(model.MetricNameLabel), "%s label set on active alert: %s", model.MetricNameLabel, aa.Labels)
require.Zero(t, aa.Labels.Get(model.MetricNameLabel), "%s label set on active alert: %s", model.MetricNameLabel, aa.Labels)
}
sort.Slice(exp, func(i, j int) bool {
return labels.Compare(exp[i].Labels, exp[j].Labels) < 0
@ -466,7 +466,7 @@ func TestForStateRestore(t *testing.T) {
got := newRule.ActiveAlerts()
for _, aa := range got {
assert.Zero(t, aa.Labels.Get(model.MetricNameLabel), "%s label set on active alert: %s", model.MetricNameLabel, aa.Labels)
require.Zero(t, aa.Labels.Get(model.MetricNameLabel), "%s label set on active alert: %s", model.MetricNameLabel, aa.Labels)
}
sort.Slice(got, func(i, j int) bool {
return labels.Compare(got[i].Labels, got[j].Labels) < 0
@ -474,27 +474,27 @@ func TestForStateRestore(t *testing.T) {
// Checking if we have restored it correctly.
if tst.noRestore {
assert.Equal(t, tst.num, len(got))
require.Equal(t, tst.num, len(got))
for _, e := range got {
assert.Equal(t, e.ActiveAt, restoreTime)
require.Equal(t, e.ActiveAt, restoreTime)
}
} else if tst.gracePeriod {
assert.Equal(t, tst.num, len(got))
require.Equal(t, tst.num, len(got))
for _, e := range got {
assert.Equal(t, opts.ForGracePeriod, e.ActiveAt.Add(alertForDuration).Sub(restoreTime))
require.Equal(t, opts.ForGracePeriod, e.ActiveAt.Add(alertForDuration).Sub(restoreTime))
}
} else {
exp := tst.alerts
assert.Equal(t, len(exp), len(got))
require.Equal(t, len(exp), len(got))
sortAlerts(exp)
sortAlerts(got)
for i, e := range exp {
assert.Equal(t, e.Labels, got[i].Labels)
require.Equal(t, e.Labels, got[i].Labels)
// Difference in time should be within 1e6 ns, i.e. 1ms
// (due to conversion between ns & ms, float64 & int64).
activeAtDiff := float64(e.ActiveAt.Unix() + int64(tst.downDuration/time.Second) - got[i].ActiveAt.Unix())
assert.Equal(t, 0.0, math.Abs(activeAtDiff), "'for' state restored time is wrong")
require.Equal(t, 0.0, math.Abs(activeAtDiff), "'for' state restored time is wrong")
}
}
}
@ -535,7 +535,7 @@ func TestStaleness(t *testing.T) {
}
expr, err := parser.ParseExpr("a + 1")
assert.NoError(t, err)
require.NoError(t, err)
rule := NewRecordingRule("a_plus_one", expr, labels.Labels{})
group := NewGroup(GroupOptions{
Name: "default",
@ -552,7 +552,7 @@ func TestStaleness(t *testing.T) {
app.Add(labels.FromStrings(model.MetricNameLabel, "a"), 2000, math.Float64frombits(value.StaleNaN))
err = app.Commit()
assert.NoError(t, err)
require.NoError(t, err)
ctx := context.Background()
@ -562,31 +562,31 @@ func TestStaleness(t *testing.T) {
group.Eval(ctx, time.Unix(2, 0))
querier, err := st.Querier(context.Background(), 0, 2000)
assert.NoError(t, err)
require.NoError(t, err)
defer querier.Close()
matcher, err := labels.NewMatcher(labels.MatchEqual, model.MetricNameLabel, "a_plus_one")
assert.NoError(t, err)
require.NoError(t, err)
set := querier.Select(false, nil, matcher)
samples, err := readSeriesSet(set)
assert.NoError(t, err)
require.NoError(t, err)
metric := labels.FromStrings(model.MetricNameLabel, "a_plus_one").String()
metricSample, ok := samples[metric]
assert.True(t, ok, "Series %s not returned.", metric)
assert.True(t, value.IsStaleNaN(metricSample[2].V), "Appended second sample not as expected. Wanted: stale NaN Got: %x", math.Float64bits(metricSample[2].V))
metricSample[2].V = 42 // assert.Equal cannot handle NaN.
require.True(t, ok, "Series %s not returned.", metric)
require.True(t, value.IsStaleNaN(metricSample[2].V), "Appended second sample not as expected. Wanted: stale NaN Got: %x", math.Float64bits(metricSample[2].V))
metricSample[2].V = 42 // require.Equal cannot handle NaN.
want := map[string][]promql.Point{
metric: {{T: 0, V: 2}, {T: 1000, V: 3}, {T: 2000, V: 42}},
}
assert.Equal(t, want, samples)
require.Equal(t, want, samples)
}
// Convert a SeriesSet into a form usable with assert.Equal.
// Convert a SeriesSet into a form usable with require.Equal.
func readSeriesSet(ss storage.SeriesSet) (map[string][]promql.Point, error) {
result := map[string][]promql.Point{}
@ -654,11 +654,11 @@ func TestCopyState(t *testing.T) {
{"a2": labels.Labels{{Name: "l2", Value: "v1"}}},
nil,
}
assert.Equal(t, want, newGroup.seriesInPreviousEval)
assert.Equal(t, oldGroup.rules[0], newGroup.rules[3])
assert.Equal(t, oldGroup.evaluationTime, newGroup.evaluationTime)
assert.Equal(t, oldGroup.lastEvaluation, newGroup.lastEvaluation)
assert.Equal(t, []labels.Labels{{{Name: "l1", Value: "v3"}}}, newGroup.staleSeries)
require.Equal(t, want, newGroup.seriesInPreviousEval)
require.Equal(t, oldGroup.rules[0], newGroup.rules[3])
require.Equal(t, oldGroup.evaluationTime, newGroup.evaluationTime)
require.Equal(t, oldGroup.lastEvaluation, newGroup.lastEvaluation)
require.Equal(t, []labels.Labels{{{Name: "l1", Value: "v3"}}}, newGroup.staleSeries)
}
func TestDeletedRuleMarkedStale(t *testing.T) {
@ -684,21 +684,21 @@ func TestDeletedRuleMarkedStale(t *testing.T) {
newGroup.Eval(context.Background(), time.Unix(0, 0))
querier, err := st.Querier(context.Background(), 0, 2000)
assert.NoError(t, err)
require.NoError(t, err)
defer querier.Close()
matcher, err := labels.NewMatcher(labels.MatchEqual, "l1", "v1")
assert.NoError(t, err)
require.NoError(t, err)
set := querier.Select(false, nil, matcher)
samples, err := readSeriesSet(set)
assert.NoError(t, err)
require.NoError(t, err)
metric := labels.FromStrings("l1", "v1").String()
metricSample, ok := samples[metric]
assert.True(t, ok, "Series %s not returned.", metric)
assert.True(t, value.IsStaleNaN(metricSample[0].V), "Appended sample not as expected. Wanted: stale NaN Got: %x", math.Float64bits(metricSample[0].V))
require.True(t, ok, "Series %s not returned.", metric)
require.True(t, value.IsStaleNaN(metricSample[0].V), "Appended sample not as expected. Wanted: stale NaN Got: %x", math.Float64bits(metricSample[0].V))
}
func TestUpdate(t *testing.T) {
@ -726,8 +726,8 @@ func TestUpdate(t *testing.T) {
defer ruleManager.Stop()
err := ruleManager.Update(10*time.Second, files, nil)
assert.NoError(t, err)
assert.Greater(t, len(ruleManager.groups), 0, "expected non-empty rule groups")
require.NoError(t, err)
require.Greater(t, len(ruleManager.groups), 0, "expected non-empty rule groups")
ogs := map[string]*Group{}
for h, g := range ruleManager.groups {
g.seriesInPreviousEval = []map[string]labels.Labels{
@ -737,26 +737,26 @@ func TestUpdate(t *testing.T) {
}
err = ruleManager.Update(10*time.Second, files, nil)
assert.NoError(t, err)
require.NoError(t, err)
for h, g := range ruleManager.groups {
for _, actual := range g.seriesInPreviousEval {
assert.Equal(t, expected, actual)
require.Equal(t, expected, actual)
}
// Groups are the same because of no updates.
assert.Equal(t, ogs[h], g)
require.Equal(t, ogs[h], g)
}
// Groups will be recreated if updated.
rgs, errs := rulefmt.ParseFile("fixtures/rules.yaml")
assert.Equal(t, 0, len(errs), "file parsing failures")
require.Equal(t, 0, len(errs), "file parsing failures")
tmpFile, err := ioutil.TempFile("", "rules.test.*.yaml")
assert.NoError(t, err)
require.NoError(t, err)
defer os.Remove(tmpFile.Name())
defer tmpFile.Close()
err = ruleManager.Update(10*time.Second, []string{tmpFile.Name()}, nil)
assert.NoError(t, err)
require.NoError(t, err)
for h, g := range ruleManager.groups {
ogs[h] = g
@ -822,12 +822,12 @@ func formatRules(r *rulefmt.RuleGroups) ruleGroupsTest {
func reloadAndValidate(rgs *rulefmt.RuleGroups, t *testing.T, tmpFile *os.File, ruleManager *Manager, expected map[string]labels.Labels, ogs map[string]*Group) {
bs, err := yaml.Marshal(formatRules(rgs))
assert.NoError(t, err)
require.NoError(t, err)
tmpFile.Seek(0, 0)
_, err = tmpFile.Write(bs)
assert.NoError(t, err)
require.NoError(t, err)
err = ruleManager.Update(10*time.Second, []string{tmpFile.Name()}, nil)
assert.NoError(t, err)
require.NoError(t, err)
for h, g := range ruleManager.groups {
if ogs[h] == g {
t.Fail()
@ -861,7 +861,7 @@ func TestNotify(t *testing.T) {
}
expr, err := parser.ParseExpr("a > 1")
assert.NoError(t, err)
require.NoError(t, err)
rule := NewAlertingRule("aTooHigh", expr, 0, labels.Labels{}, labels.Labels{}, nil, true, log.NewNopLogger())
group := NewGroup(GroupOptions{
Name: "alert",
@ -878,26 +878,26 @@ func TestNotify(t *testing.T) {
app.Add(labels.FromStrings(model.MetricNameLabel, "a"), 6000, 0)
err = app.Commit()
assert.NoError(t, err)
require.NoError(t, err)
ctx := context.Background()
// Alert sent right away
group.Eval(ctx, time.Unix(1, 0))
assert.Equal(t, 1, len(lastNotified))
assert.NotZero(t, lastNotified[0].ValidUntil, "ValidUntil should not be zero")
require.Equal(t, 1, len(lastNotified))
require.NotZero(t, lastNotified[0].ValidUntil, "ValidUntil should not be zero")
// Alert is not sent 1s later
group.Eval(ctx, time.Unix(2, 0))
assert.Equal(t, 0, len(lastNotified))
require.Equal(t, 0, len(lastNotified))
// Alert is resent at t=5s
group.Eval(ctx, time.Unix(5, 0))
assert.Equal(t, 1, len(lastNotified))
require.Equal(t, 1, len(lastNotified))
// Resolution alert sent right away
group.Eval(ctx, time.Unix(6, 0))
assert.Equal(t, 1, len(lastNotified))
require.Equal(t, 1, len(lastNotified))
}
func TestMetricsUpdate(t *testing.T) {
@ -934,7 +934,7 @@ func TestMetricsUpdate(t *testing.T) {
countMetrics := func() int {
ms, err := registry.Gather()
assert.NoError(t, err)
require.NoError(t, err)
var metrics int
for _, m := range ms {
s := m.GetName()
@ -972,9 +972,9 @@ func TestMetricsUpdate(t *testing.T) {
for i, c := range cases {
err := ruleManager.Update(time.Second, c.files, nil)
assert.NoError(t, err)
require.NoError(t, err)
time.Sleep(2 * time.Second)
assert.Equal(t, c.metrics, countMetrics(), "test %d: invalid count of metrics", i)
require.Equal(t, c.metrics, countMetrics(), "test %d: invalid count of metrics", i)
}
}
@ -1046,14 +1046,14 @@ func TestGroupStalenessOnRemoval(t *testing.T) {
var totalStaleNaN int
for i, c := range cases {
err := ruleManager.Update(time.Second, c.files, nil)
assert.NoError(t, err)
require.NoError(t, err)
time.Sleep(3 * time.Second)
totalStaleNaN += c.staleNaN
assert.Equal(t, totalStaleNaN, countStaleNaN(t, storage), "test %d/%q: invalid count of staleness markers", i, c.files)
require.Equal(t, totalStaleNaN, countStaleNaN(t, storage), "test %d/%q: invalid count of staleness markers", i, c.files)
}
ruleManager.Stop()
stopped = true
assert.Equal(t, totalStaleNaN, countStaleNaN(t, storage), "invalid count of staleness markers after stopping the engine")
require.Equal(t, totalStaleNaN, countStaleNaN(t, storage), "invalid count of staleness markers after stopping the engine")
}
func TestMetricsStalenessOnManagerShutdown(t *testing.T) {
@ -1089,34 +1089,34 @@ func TestMetricsStalenessOnManagerShutdown(t *testing.T) {
err := ruleManager.Update(2*time.Second, files, nil)
time.Sleep(4 * time.Second)
assert.NoError(t, err)
require.NoError(t, err)
start := time.Now()
err = ruleManager.Update(3*time.Second, files[:0], nil)
assert.NoError(t, err)
require.NoError(t, err)
ruleManager.Stop()
stopped = true
assert.True(t, time.Since(start) < 1*time.Second, "rule manager does not stop early")
require.True(t, time.Since(start) < 1*time.Second, "rule manager does not stop early")
time.Sleep(5 * time.Second)
assert.Equal(t, 0, countStaleNaN(t, storage), "invalid count of staleness markers after stopping the engine")
require.Equal(t, 0, countStaleNaN(t, storage), "invalid count of staleness markers after stopping the engine")
}
func countStaleNaN(t *testing.T, st storage.Storage) int {
var c int
querier, err := st.Querier(context.Background(), 0, time.Now().Unix()*1000)
assert.NoError(t, err)
require.NoError(t, err)
defer querier.Close()
matcher, err := labels.NewMatcher(labels.MatchEqual, model.MetricNameLabel, "test_2")
assert.NoError(t, err)
require.NoError(t, err)
set := querier.Select(false, nil, matcher)
samples, err := readSeriesSet(set)
assert.NoError(t, err)
require.NoError(t, err)
metric := labels.FromStrings(model.MetricNameLabel, "test_2").String()
metricSample, ok := samples[metric]
assert.True(t, ok, "Series %s not returned.", metric)
require.True(t, ok, "Series %s not returned.", metric)
for _, s := range metricSample {
if value.IsStaleNaN(s.V) {
c++
@ -1160,6 +1160,6 @@ func TestGroupHasAlertingRules(t *testing.T) {
for i, test := range tests {
got := test.group.HasAlertingRules()
assert.Equal(t, test.want, got, "test case %d failed, expected:%t got:%t", i, test.want, got)
require.Equal(t, test.want, got, "test case %d failed, expected:%t got:%t", i, test.want, got)
}
}

View file

@ -19,7 +19,7 @@ import (
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/prometheus/prometheus/pkg/labels"
"github.com/prometheus/prometheus/pkg/timestamp"
@ -74,14 +74,14 @@ func TestRuleEval(t *testing.T) {
for _, test := range suite {
rule := NewRecordingRule(test.name, test.expr, test.labels)
result, err := rule.Eval(ctx, now, EngineQueryFunc(engine, storage), nil)
assert.NoError(t, err)
assert.Equal(t, test.result, result)
require.NoError(t, err)
require.Equal(t, test.result, result)
}
}
func TestRecordingRuleHTMLSnippet(t *testing.T) {
expr, err := parser.ParseExpr(`foo{html="<b>BOLD<b>"}`)
assert.NoError(t, err)
require.NoError(t, err)
rule := NewRecordingRule("testrule", expr, labels.FromStrings("html", "<b>BOLD</b>"))
const want = template.HTML(`record: <a href="/test/prefix/graph?g0.expr=testrule&g0.tab=1">testrule</a>
@ -91,7 +91,7 @@ labels:
`)
got := rule.HTMLSnippet("/test/prefix")
assert.Equal(t, want, got, "incorrect HTML snippet; want:\n\n%s\n\ngot:\n\n%s", want, got)
require.Equal(t, want, got, "incorrect HTML snippet; want:\n\n%s\n\ngot:\n\n%s", want, got)
}
// TestRuleEvalDuplicate tests for duplicate labels in recorded metrics, see #5529.
@ -115,6 +115,6 @@ func TestRuleEvalDuplicate(t *testing.T) {
expr, _ := parser.ParseExpr(`vector(0) or label_replace(vector(0),"test","x","","")`)
rule := NewRecordingRule("foo", expr, labels.FromStrings("test", "test"))
_, err := rule.Eval(ctx, now, EngineQueryFunc(engine, storage), nil)
assert.Error(t, err)
assert.EqualError(t, err, "vector contains metrics with the same labelset after applying rule labels")
require.Error(t, err)
require.EqualError(t, err, "vector contains metrics with the same labelset after applying rule labels")
}

View file

@ -20,7 +20,7 @@ import (
"time"
"github.com/prometheus/common/model"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
yaml "gopkg.in/yaml.v2"
"github.com/prometheus/prometheus/config"
@ -215,13 +215,13 @@ func TestPopulateLabels(t *testing.T) {
res, orig, err := populateLabels(c.in, c.cfg)
if c.err != "" {
assert.EqualError(t, err, c.err)
require.EqualError(t, err, c.err)
} else {
assert.NoError(t, err)
require.NoError(t, err)
}
assert.Equal(t, c.in, in)
assert.Equal(t, c.res, res)
assert.Equal(t, c.resOrig, orig)
require.Equal(t, c.in, in)
require.Equal(t, c.res, res)
require.Equal(t, c.resOrig, orig)
}
}
@ -365,7 +365,7 @@ func TestManagerTargetsUpdates(t *testing.T) {
m.mtxScrape.Unlock()
// Make sure all updates have been received.
assert.Equal(t, tgSent, tsetActual)
require.Equal(t, tgSent, tsetActual)
select {
case <-m.triggerReload:

View file

@ -33,7 +33,7 @@ import (
dto "github.com/prometheus/client_model/go"
config_util "github.com/prometheus/common/config"
"github.com/prometheus/common/model"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/prometheus/prometheus/config"
"github.com/prometheus/prometheus/discovery/targetgroup"
@ -135,7 +135,7 @@ func TestDiscoveredLabelsUpdate(t *testing.T) {
}
sp.sync([]*Target{t2})
assert.Equal(t, t2.DiscoveredLabels(), sp.activeTargets[t1.hash()].DiscoveredLabels())
require.Equal(t, t2.DiscoveredLabels(), sp.activeTargets[t1.hash()].DiscoveredLabels())
}
type testLoop struct {
@ -228,11 +228,11 @@ func TestScrapePoolStop(t *testing.T) {
}
mtx.Lock()
assert.Equal(t, numTargets, len(stopped), "Unexpected number of stopped loops")
require.Equal(t, numTargets, len(stopped), "Unexpected number of stopped loops")
mtx.Unlock()
assert.Equal(t, 0, len(sp.activeTargets), "Targets were not cleared on stopping: %d left", len(sp.activeTargets))
assert.Equal(t, 0, len(sp.loops), "Loops were not cleared on stopping: %d left", len(sp.loops))
require.Equal(t, 0, len(sp.activeTargets), "Targets were not cleared on stopping: %d left", len(sp.activeTargets))
require.Equal(t, 0, len(sp.loops), "Loops were not cleared on stopping: %d left", len(sp.loops))
}
func TestScrapePoolReload(t *testing.T) {
@ -250,12 +250,12 @@ func TestScrapePoolReload(t *testing.T) {
newLoop := func(opts scrapeLoopOptions) loop {
l := &testLoop{}
l.startFunc = func(interval, timeout time.Duration, errc chan<- error) {
assert.Equal(t, 3*time.Second, interval, "Unexpected scrape interval")
assert.Equal(t, 2*time.Second, timeout, "Unexpected scrape timeout")
require.Equal(t, 3*time.Second, interval, "Unexpected scrape interval")
require.Equal(t, 2*time.Second, timeout, "Unexpected scrape timeout")
mtx.Lock()
targetScraper := opts.scraper.(*targetScraper)
assert.True(t, stopped[targetScraper.hash()], "Scrape loop for %v not stopped yet", targetScraper)
require.True(t, stopped[targetScraper.hash()], "Scrape loop for %v not stopped yet", targetScraper)
mtx.Unlock()
}
return l
@ -314,11 +314,11 @@ func TestScrapePoolReload(t *testing.T) {
}
mtx.Lock()
assert.Equal(t, numTargets, len(stopped), "Unexpected number of stopped loops")
require.Equal(t, numTargets, len(stopped), "Unexpected number of stopped loops")
mtx.Unlock()
assert.Equal(t, sp.activeTargets, beforeTargets, "Reloading affected target states unexpectedly")
assert.Equal(t, numTargets, len(sp.loops), "Unexpected number of stopped loops after reload")
require.Equal(t, sp.activeTargets, beforeTargets, "Reloading affected target states unexpectedly")
require.Equal(t, numTargets, len(sp.loops), "Unexpected number of stopped loops after reload")
}
func TestScrapePoolTargetLimit(t *testing.T) {
@ -358,7 +358,7 @@ func TestScrapePoolTargetLimit(t *testing.T) {
var limit uint
reloadWithLimit := func(l uint) {
limit = l
assert.NoError(t, sp.reload(&config.ScrapeConfig{
require.NoError(t, sp.reload(&config.ScrapeConfig{
ScrapeInterval: model.Duration(3 * time.Second),
ScrapeTimeout: model.Duration(2 * time.Second),
TargetLimit: l,
@ -374,7 +374,7 @@ func TestScrapePoolTargetLimit(t *testing.T) {
validateIsRunning := func() {
wg.Wait()
for _, l := range sp.loops {
assert.True(t, l.(*testLoop).runOnce, "loop should be running")
require.True(t, l.(*testLoop).runOnce, "loop should be running")
}
}
@ -382,10 +382,10 @@ func TestScrapePoolTargetLimit(t *testing.T) {
for _, l := range sp.loops {
lerr := l.(*testLoop).getForcedError()
if shouldErr {
assert.NotNil(t, lerr, "error was expected for %d targets with a limit of %d", targets, limit)
assert.Equal(t, fmt.Sprintf("target_limit exceeded (number of targets: %d, limit: %d)", targets, limit), lerr.Error())
require.NotNil(t, lerr, "error was expected for %d targets with a limit of %d", targets, limit)
require.Equal(t, fmt.Sprintf("target_limit exceeded (number of targets: %d, limit: %d)", targets, limit), lerr.Error())
} else {
assert.Equal(t, nil, lerr)
require.Equal(t, nil, lerr)
}
}
}
@ -452,33 +452,33 @@ func TestScrapePoolAppender(t *testing.T) {
target: &Target{},
})
appl, ok := loop.(*scrapeLoop)
assert.True(t, ok, "Expected scrapeLoop but got %T", loop)
require.True(t, ok, "Expected scrapeLoop but got %T", loop)
wrapped := appl.appender(context.Background())
tl, ok := wrapped.(*timeLimitAppender)
assert.True(t, ok, "Expected timeLimitAppender but got %T", wrapped)
require.True(t, ok, "Expected timeLimitAppender but got %T", wrapped)
_, ok = tl.Appender.(nopAppender)
assert.True(t, ok, "Expected base appender but got %T", tl.Appender)
require.True(t, ok, "Expected base appender but got %T", tl.Appender)
loop = sp.newLoop(scrapeLoopOptions{
target: &Target{},
limit: 100,
})
appl, ok = loop.(*scrapeLoop)
assert.True(t, ok, "Expected scrapeLoop but got %T", loop)
require.True(t, ok, "Expected scrapeLoop but got %T", loop)
wrapped = appl.appender(context.Background())
sl, ok := wrapped.(*limitAppender)
assert.True(t, ok, "Expected limitAppender but got %T", wrapped)
require.True(t, ok, "Expected limitAppender but got %T", wrapped)
tl, ok = sl.Appender.(*timeLimitAppender)
assert.True(t, ok, "Expected limitAppender but got %T", sl.Appender)
require.True(t, ok, "Expected limitAppender but got %T", sl.Appender)
_, ok = tl.Appender.(nopAppender)
assert.True(t, ok, "Expected base appender but got %T", tl.Appender)
require.True(t, ok, "Expected base appender but got %T", tl.Appender)
}
func TestScrapePoolRaces(t *testing.T) {
@ -508,8 +508,8 @@ func TestScrapePoolRaces(t *testing.T) {
dropped := sp.DroppedTargets()
expectedActive, expectedDropped := len(tgts[0].Targets), 0
assert.Equal(t, expectedActive, len(active), "Invalid number of active targets")
assert.Equal(t, expectedDropped, len(dropped), "Invalid number of dropped targets")
require.Equal(t, expectedActive, len(active), "Invalid number of active targets")
require.Equal(t, expectedDropped, len(dropped), "Invalid number of dropped targets")
for i := 0; i < 20; i++ {
time.Sleep(time.Duration(10 * time.Millisecond))
@ -552,17 +552,17 @@ func TestScrapePoolScrapeLoopsStarted(t *testing.T) {
},
}
assert.NoError(t, sp.reload(&config.ScrapeConfig{
require.NoError(t, sp.reload(&config.ScrapeConfig{
ScrapeInterval: model.Duration(3 * time.Second),
ScrapeTimeout: model.Duration(2 * time.Second),
}))
sp.Sync(tgs)
assert.Equal(t, 1, len(sp.loops))
require.Equal(t, 1, len(sp.loops))
wg.Wait()
for _, l := range sp.loops {
assert.True(t, l.(*testLoop).runOnce, "loop should be running")
require.True(t, l.(*testLoop).runOnce, "loop should be running")
}
}
@ -866,27 +866,27 @@ test_metric 1
# TYPE test_metric_no_help gauge
# HELP test_metric_no_type other help text
# EOF`), "application/openmetrics-text", time.Now())
assert.NoError(t, err)
assert.NoError(t, slApp.Commit())
assert.Equal(t, 1, total)
require.NoError(t, err)
require.NoError(t, slApp.Commit())
require.Equal(t, 1, total)
md, ok := cache.GetMetadata("test_metric")
assert.True(t, ok, "expected metadata to be present")
assert.Equal(t, textparse.MetricTypeCounter, md.Type, "unexpected metric type")
assert.Equal(t, "some help text", md.Help)
assert.Equal(t, "metric", md.Unit)
require.True(t, ok, "expected metadata to be present")
require.Equal(t, textparse.MetricTypeCounter, md.Type, "unexpected metric type")
require.Equal(t, "some help text", md.Help)
require.Equal(t, "metric", md.Unit)
md, ok = cache.GetMetadata("test_metric_no_help")
assert.True(t, ok, "expected metadata to be present")
assert.Equal(t, textparse.MetricTypeGauge, md.Type, "unexpected metric type")
assert.Equal(t, "", md.Help)
assert.Equal(t, "", md.Unit)
require.True(t, ok, "expected metadata to be present")
require.Equal(t, textparse.MetricTypeGauge, md.Type, "unexpected metric type")
require.Equal(t, "", md.Help)
require.Equal(t, "", md.Unit)
md, ok = cache.GetMetadata("test_metric_no_type")
assert.True(t, ok, "expected metadata to be present")
assert.Equal(t, textparse.MetricTypeUnknown, md.Type, "unexpected metric type")
assert.Equal(t, "other help text", md.Help)
assert.Equal(t, "", md.Unit)
require.True(t, ok, "expected metadata to be present")
require.Equal(t, textparse.MetricTypeUnknown, md.Type, "unexpected metric type")
require.Equal(t, "other help text", md.Help)
require.Equal(t, "", md.Unit)
}
func TestScrapeLoopSeriesAdded(t *testing.T) {
@ -909,19 +909,19 @@ func TestScrapeLoopSeriesAdded(t *testing.T) {
slApp := sl.appender(ctx)
total, added, seriesAdded, err := sl.append(slApp, []byte("test_metric 1\n"), "", time.Time{})
assert.NoError(t, err)
assert.NoError(t, slApp.Commit())
assert.Equal(t, 1, total)
assert.Equal(t, 1, added)
assert.Equal(t, 1, seriesAdded)
require.NoError(t, err)
require.NoError(t, slApp.Commit())
require.Equal(t, 1, total)
require.Equal(t, 1, added)
require.Equal(t, 1, seriesAdded)
slApp = sl.appender(ctx)
total, added, seriesAdded, err = sl.append(slApp, []byte("test_metric 1\n"), "", time.Time{})
assert.NoError(t, slApp.Commit())
assert.NoError(t, err)
assert.Equal(t, 1, total)
assert.Equal(t, 1, added)
assert.Equal(t, 0, seriesAdded)
require.NoError(t, slApp.Commit())
require.NoError(t, err)
require.Equal(t, 1, total)
require.Equal(t, 1, added)
require.Equal(t, 0, seriesAdded)
}
func TestScrapeLoopRunCreatesStaleMarkersOnFailedScrape(t *testing.T) {
@ -971,9 +971,9 @@ func TestScrapeLoopRunCreatesStaleMarkersOnFailedScrape(t *testing.T) {
// 1 successfully scraped sample, 1 stale marker after first fail, 5 report samples for
// each scrape successful or not.
assert.Equal(t, 27, len(appender.result), "Appended samples not as expected")
assert.Equal(t, 42.0, appender.result[0].v, "Appended first sample not as expected")
assert.True(t, value.IsStaleNaN(appender.result[6].v),
require.Equal(t, 27, len(appender.result), "Appended samples not as expected")
require.Equal(t, 42.0, appender.result[0].v, "Appended first sample not as expected")
require.True(t, value.IsStaleNaN(appender.result[6].v),
"Appended second sample not as expected. Wanted: stale NaN Got: %x", math.Float64bits(appender.result[6].v))
}
@ -1027,9 +1027,9 @@ func TestScrapeLoopRunCreatesStaleMarkersOnParseFailure(t *testing.T) {
// 1 successfully scraped sample, 1 stale marker after first fail, 5 report samples for
// each scrape successful or not.
assert.Equal(t, 17, len(appender.result), "Appended samples not as expected")
assert.Equal(t, 42.0, appender.result[0].v, "Appended first sample not as expected")
assert.True(t, value.IsStaleNaN(appender.result[6].v),
require.Equal(t, 17, len(appender.result), "Appended samples not as expected")
require.Equal(t, 42.0, appender.result[0].v, "Appended first sample not as expected")
require.True(t, value.IsStaleNaN(appender.result[6].v),
"Appended second sample not as expected. Wanted: stale NaN Got: %x", math.Float64bits(appender.result[6].v))
}
@ -1102,7 +1102,7 @@ func TestScrapeLoopCache(t *testing.T) {
// 1 successfully scraped sample, 1 stale marker after first fail, 5 report samples for
// each scrape successful or not.
assert.Equal(t, 26, len(appender.result), "Appended samples not as expected")
require.Equal(t, 26, len(appender.result), "Appended samples not as expected")
}
func TestScrapeLoopCacheMemoryExhaustionProtection(t *testing.T) {
@ -1240,8 +1240,8 @@ func TestScrapeLoopAppend(t *testing.T) {
slApp := sl.appender(context.Background())
_, _, _, err := sl.append(slApp, []byte(test.scrapeLabels), "", now)
assert.NoError(t, err)
assert.NoError(t, slApp.Commit())
require.NoError(t, err)
require.NoError(t, slApp.Commit())
expected := []sample{
{
@ -1259,7 +1259,7 @@ func TestScrapeLoopAppend(t *testing.T) {
}
t.Logf("Test:%s", test.title)
assert.Equal(t, expected, app.result)
require.Equal(t, expected, app.result)
}
}
@ -1293,8 +1293,8 @@ func TestScrapeLoopAppendCacheEntryButErrNotFound(t *testing.T) {
slApp := sl.appender(context.Background())
_, _, _, err := sl.append(slApp, []byte(metric), "", now)
assert.NoError(t, err)
assert.NoError(t, slApp.Commit())
require.NoError(t, err)
require.NoError(t, slApp.Commit())
expected := []sample{
{
@ -1304,7 +1304,7 @@ func TestScrapeLoopAppendCacheEntryButErrNotFound(t *testing.T) {
},
}
assert.Equal(t, expected, app.result)
require.Equal(t, expected, app.result)
}
func TestScrapeLoopAppendSampleLimit(t *testing.T) {
@ -1329,7 +1329,7 @@ func TestScrapeLoopAppendSampleLimit(t *testing.T) {
// Get the value of the Counter before performing the append.
beforeMetric := dto.Metric{}
err := targetScrapeSampleLimit.Write(&beforeMetric)
assert.NoError(t, err)
require.NoError(t, err)
beforeMetricValue := beforeMetric.GetCounter().GetValue()
@ -1339,20 +1339,20 @@ func TestScrapeLoopAppendSampleLimit(t *testing.T) {
if err != errSampleLimit {
t.Fatalf("Did not see expected sample limit error: %s", err)
}
assert.NoError(t, slApp.Rollback())
assert.Equal(t, 3, total)
assert.Equal(t, 3, added)
assert.Equal(t, 1, seriesAdded)
require.NoError(t, slApp.Rollback())
require.Equal(t, 3, total)
require.Equal(t, 3, added)
require.Equal(t, 1, seriesAdded)
// Check that the Counter has been incremented a single time for the scrape,
// not multiple times for each sample.
metric := dto.Metric{}
err = targetScrapeSampleLimit.Write(&metric)
assert.NoError(t, err)
require.NoError(t, err)
value := metric.GetCounter().GetValue()
change := value - beforeMetricValue
assert.Equal(t, 1.0, change, "Unexpected change of sample limit metric: %f", change)
require.Equal(t, 1.0, change, "Unexpected change of sample limit metric: %f", change)
// And verify that we got the samples that fit under the limit.
want := []sample{
@ -1362,7 +1362,7 @@ func TestScrapeLoopAppendSampleLimit(t *testing.T) {
v: 1,
},
}
assert.Equal(t, want, resApp.rolledbackResult, "Appended samples not as expected")
require.Equal(t, want, resApp.rolledbackResult, "Appended samples not as expected")
now = time.Now()
slApp = sl.appender(context.Background())
@ -1370,10 +1370,10 @@ func TestScrapeLoopAppendSampleLimit(t *testing.T) {
if err != errSampleLimit {
t.Fatalf("Did not see expected sample limit error: %s", err)
}
assert.NoError(t, slApp.Rollback())
assert.Equal(t, 9, total)
assert.Equal(t, 6, added)
assert.Equal(t, 0, seriesAdded)
require.NoError(t, slApp.Rollback())
require.Equal(t, 9, total)
require.Equal(t, 6, added)
require.Equal(t, 0, seriesAdded)
}
func TestScrapeLoop_ChangingMetricString(t *testing.T) {
@ -1398,13 +1398,13 @@ func TestScrapeLoop_ChangingMetricString(t *testing.T) {
now := time.Now()
slApp := sl.appender(context.Background())
_, _, _, err := sl.append(slApp, []byte(`metric_a{a="1",b="1"} 1`), "", now)
assert.NoError(t, err)
assert.NoError(t, slApp.Commit())
require.NoError(t, err)
require.NoError(t, slApp.Commit())
slApp = sl.appender(context.Background())
_, _, _, err = sl.append(slApp, []byte(`metric_a{b="1",a="1"} 2`), "", now.Add(time.Minute))
assert.NoError(t, err)
assert.NoError(t, slApp.Commit())
require.NoError(t, err)
require.NoError(t, slApp.Commit())
// DeepEqual will report NaNs as being different, so replace with a different value.
want := []sample{
@ -1419,7 +1419,7 @@ func TestScrapeLoop_ChangingMetricString(t *testing.T) {
v: 2,
},
}
assert.Equal(t, want, capp.result, "Appended samples not as expected")
require.Equal(t, want, capp.result, "Appended samples not as expected")
}
func TestScrapeLoopAppendStaleness(t *testing.T) {
@ -1438,16 +1438,16 @@ func TestScrapeLoopAppendStaleness(t *testing.T) {
now := time.Now()
slApp := sl.appender(context.Background())
_, _, _, err := sl.append(slApp, []byte("metric_a 1\n"), "", now)
assert.NoError(t, err)
assert.NoError(t, slApp.Commit())
require.NoError(t, err)
require.NoError(t, slApp.Commit())
slApp = sl.appender(context.Background())
_, _, _, err = sl.append(slApp, []byte(""), "", now.Add(time.Second))
assert.NoError(t, err)
assert.NoError(t, slApp.Commit())
require.NoError(t, err)
require.NoError(t, slApp.Commit())
ingestedNaN := math.Float64bits(app.result[1].v)
assert.Equal(t, value.StaleNaN, ingestedNaN, "Appended stale sample wasn't as expected")
require.Equal(t, value.StaleNaN, ingestedNaN, "Appended stale sample wasn't as expected")
// DeepEqual will report NaNs as being different, so replace with a different value.
app.result[1].v = 42
@ -1463,7 +1463,7 @@ func TestScrapeLoopAppendStaleness(t *testing.T) {
v: 42,
},
}
assert.Equal(t, want, app.result, "Appended samples not as expected")
require.Equal(t, want, app.result, "Appended samples not as expected")
}
func TestScrapeLoopAppendNoStalenessIfTimestamp(t *testing.T) {
@ -1481,13 +1481,13 @@ func TestScrapeLoopAppendNoStalenessIfTimestamp(t *testing.T) {
now := time.Now()
slApp := sl.appender(context.Background())
_, _, _, err := sl.append(slApp, []byte("metric_a 1 1000\n"), "", now)
assert.NoError(t, err)
assert.NoError(t, slApp.Commit())
require.NoError(t, err)
require.NoError(t, slApp.Commit())
slApp = sl.appender(context.Background())
_, _, _, err = sl.append(slApp, []byte(""), "", now.Add(time.Second))
assert.NoError(t, err)
assert.NoError(t, slApp.Commit())
require.NoError(t, err)
require.NoError(t, slApp.Commit())
want := []sample{
{
@ -1496,7 +1496,7 @@ func TestScrapeLoopAppendNoStalenessIfTimestamp(t *testing.T) {
v: 1,
},
}
assert.Equal(t, want, app.result, "Appended samples not as expected")
require.Equal(t, want, app.result, "Appended samples not as expected")
}
func TestScrapeLoopRunReportsTargetDownOnScrapeError(t *testing.T) {
@ -1524,7 +1524,7 @@ func TestScrapeLoopRunReportsTargetDownOnScrapeError(t *testing.T) {
}
sl.run(10*time.Millisecond, time.Hour, nil)
assert.Equal(t, 0.0, appender.result[0].v, "bad 'up' value")
require.Equal(t, 0.0, appender.result[0].v, "bad 'up' value")
}
func TestScrapeLoopRunReportsTargetDownOnInvalidUTF8(t *testing.T) {
@ -1553,7 +1553,7 @@ func TestScrapeLoopRunReportsTargetDownOnInvalidUTF8(t *testing.T) {
}
sl.run(10*time.Millisecond, time.Hour, nil)
assert.Equal(t, 0.0, appender.result[0].v, "bad 'up' value")
require.Equal(t, 0.0, appender.result[0].v, "bad 'up' value")
}
type errorAppender struct {
@ -1594,8 +1594,8 @@ func TestScrapeLoopAppendGracefullyIfAmendOrOutOfOrderOrOutOfBounds(t *testing.T
now := time.Unix(1, 0)
slApp := sl.appender(context.Background())
total, added, seriesAdded, err := sl.append(slApp, []byte("out_of_order 1\namend 1\nnormal 1\nout_of_bounds 1\n"), "", now)
assert.NoError(t, err)
assert.NoError(t, slApp.Commit())
require.NoError(t, err)
require.NoError(t, slApp.Commit())
want := []sample{
{
@ -1604,10 +1604,10 @@ func TestScrapeLoopAppendGracefullyIfAmendOrOutOfOrderOrOutOfBounds(t *testing.T
v: 1,
},
}
assert.Equal(t, want, app.result, "Appended samples not as expected")
assert.Equal(t, 4, total)
assert.Equal(t, 4, added)
assert.Equal(t, 1, seriesAdded)
require.Equal(t, want, app.result, "Appended samples not as expected")
require.Equal(t, 4, total)
require.Equal(t, 4, added)
require.Equal(t, 1, seriesAdded)
}
func TestScrapeLoopOutOfBoundsTimeError(t *testing.T) {
@ -1631,11 +1631,11 @@ func TestScrapeLoopOutOfBoundsTimeError(t *testing.T) {
now := time.Now().Add(20 * time.Minute)
slApp := sl.appender(context.Background())
total, added, seriesAdded, err := sl.append(slApp, []byte("normal 1\n"), "", now)
assert.NoError(t, err)
assert.NoError(t, slApp.Commit())
assert.Equal(t, 1, total)
assert.Equal(t, 1, added)
assert.Equal(t, 0, seriesAdded)
require.NoError(t, err)
require.NoError(t, slApp.Commit())
require.Equal(t, 1, total)
require.Equal(t, 1, added)
require.Equal(t, 0, seriesAdded)
}
@ -1681,9 +1681,9 @@ func TestTargetScraperScrapeOK(t *testing.T) {
var buf bytes.Buffer
contentType, err := ts.scrape(context.Background(), &buf)
assert.NoError(t, err)
assert.Equal(t, "text/plain; version=0.0.4", contentType)
assert.Equal(t, "metric_a 1\nmetric_b 2\n", buf.String())
require.NoError(t, err)
require.Equal(t, "text/plain; version=0.0.4", contentType)
require.Equal(t, "metric_a 1\nmetric_b 2\n", buf.String())
}
func TestTargetScrapeScrapeCancel(t *testing.T) {
@ -1734,7 +1734,7 @@ func TestTargetScrapeScrapeCancel(t *testing.T) {
case <-time.After(5 * time.Second):
t.Fatalf("Scrape function did not return unexpectedly")
case err := <-errc:
assert.NoError(t, err)
require.NoError(t, err)
}
// If this is closed in a defer above the function the test server
// doesn't terminate and the test doesn't complete.
@ -1765,7 +1765,7 @@ func TestTargetScrapeScrapeNotFound(t *testing.T) {
}
_, err = ts.scrape(context.Background(), ioutil.Discard)
assert.Contains(t, err.Error(), "404", "Expected \"404 NotFound\" error but got: %s", err)
require.Contains(t, err.Error(), "404", "Expected \"404 NotFound\" error but got: %s", err)
}
// testScraper implements the scraper interface and allows setting values
@ -1818,8 +1818,8 @@ func TestScrapeLoop_RespectTimestamps(t *testing.T) {
now := time.Now()
slApp := sl.appender(context.Background())
_, _, _, err := sl.append(slApp, []byte(`metric_a{a="1",b="1"} 1 0`), "", now)
assert.NoError(t, err)
assert.NoError(t, slApp.Commit())
require.NoError(t, err)
require.NoError(t, slApp.Commit())
want := []sample{
{
@ -1828,7 +1828,7 @@ func TestScrapeLoop_RespectTimestamps(t *testing.T) {
v: 1,
},
}
assert.Equal(t, want, capp.result, "Appended samples not as expected")
require.Equal(t, want, capp.result, "Appended samples not as expected")
}
func TestScrapeLoop_DiscardTimestamps(t *testing.T) {
@ -1851,8 +1851,8 @@ func TestScrapeLoop_DiscardTimestamps(t *testing.T) {
now := time.Now()
slApp := sl.appender(context.Background())
_, _, _, err := sl.append(slApp, []byte(`metric_a{a="1",b="1"} 1 0`), "", now)
assert.NoError(t, err)
assert.NoError(t, slApp.Commit())
require.NoError(t, err)
require.NoError(t, slApp.Commit())
want := []sample{
{
@ -1861,7 +1861,7 @@ func TestScrapeLoop_DiscardTimestamps(t *testing.T) {
v: 1,
},
}
assert.Equal(t, want, capp.result, "Appended samples not as expected")
require.Equal(t, want, capp.result, "Appended samples not as expected")
}
func TestScrapeLoopDiscardDuplicateLabels(t *testing.T) {
@ -1884,27 +1884,27 @@ func TestScrapeLoopDiscardDuplicateLabels(t *testing.T) {
// We add a good and a bad metric to check that both are discarded.
slApp := sl.appender(ctx)
_, _, _, err := sl.append(slApp, []byte("test_metric{le=\"500\"} 1\ntest_metric{le=\"600\",le=\"700\"} 1\n"), "", time.Time{})
assert.Error(t, err)
assert.NoError(t, slApp.Rollback())
require.Error(t, err)
require.NoError(t, slApp.Rollback())
q, err := s.Querier(ctx, time.Time{}.UnixNano(), 0)
assert.NoError(t, err)
require.NoError(t, err)
series := q.Select(false, nil, labels.MustNewMatcher(labels.MatchRegexp, "__name__", ".*"))
assert.Equal(t, false, series.Next(), "series found in tsdb")
assert.NoError(t, series.Err())
require.Equal(t, false, series.Next(), "series found in tsdb")
require.NoError(t, series.Err())
// We add a good metric to check that it is recorded.
slApp = sl.appender(ctx)
_, _, _, err = sl.append(slApp, []byte("test_metric{le=\"500\"} 1\n"), "", time.Time{})
assert.NoError(t, err)
assert.NoError(t, slApp.Commit())
require.NoError(t, err)
require.NoError(t, slApp.Commit())
q, err = s.Querier(ctx, time.Time{}.UnixNano(), 0)
assert.NoError(t, err)
require.NoError(t, err)
series = q.Select(false, nil, labels.MustNewMatcher(labels.MatchEqual, "le", "500"))
assert.Equal(t, true, series.Next(), "series not found in tsdb")
assert.NoError(t, series.Err())
assert.Equal(t, false, series.Next(), "more than one series found in tsdb")
require.Equal(t, true, series.Next(), "series not found in tsdb")
require.NoError(t, series.Err())
require.Equal(t, false, series.Next(), "more than one series found in tsdb")
}
func TestScrapeLoopDiscardUnnamedMetrics(t *testing.T) {
@ -1933,15 +1933,15 @@ func TestScrapeLoopDiscardUnnamedMetrics(t *testing.T) {
slApp := sl.appender(context.Background())
_, _, _, err := sl.append(slApp, []byte("nok 1\nnok2{drop=\"drop\"} 1\n"), "", time.Time{})
assert.Error(t, err)
assert.NoError(t, slApp.Rollback())
assert.Equal(t, errNameLabelMandatory, err)
require.Error(t, err)
require.NoError(t, slApp.Rollback())
require.Equal(t, errNameLabelMandatory, err)
q, err := s.Querier(ctx, time.Time{}.UnixNano(), 0)
assert.NoError(t, err)
require.NoError(t, err)
series := q.Select(false, nil, labels.MustNewMatcher(labels.MatchRegexp, "__name__", ".*"))
assert.Equal(t, false, series.Next(), "series found in tsdb")
assert.NoError(t, series.Err())
require.Equal(t, false, series.Next(), "series found in tsdb")
require.NoError(t, series.Err())
}
func TestReusableConfig(t *testing.T) {
@ -2000,14 +2000,14 @@ func TestReusableConfig(t *testing.T) {
}
for i, m := range match {
assert.Equal(t, true, reusableCache(variants[m[0]], variants[m[1]]), "match test %d", i)
assert.Equal(t, true, reusableCache(variants[m[1]], variants[m[0]]), "match test %d", i)
assert.Equal(t, true, reusableCache(variants[m[1]], variants[m[1]]), "match test %d", i)
assert.Equal(t, true, reusableCache(variants[m[0]], variants[m[0]]), "match test %d", i)
require.Equal(t, true, reusableCache(variants[m[0]], variants[m[1]]), "match test %d", i)
require.Equal(t, true, reusableCache(variants[m[1]], variants[m[0]]), "match test %d", i)
require.Equal(t, true, reusableCache(variants[m[1]], variants[m[1]]), "match test %d", i)
require.Equal(t, true, reusableCache(variants[m[0]], variants[m[0]]), "match test %d", i)
}
for i, m := range noMatch {
assert.Equal(t, false, reusableCache(variants[m[0]], variants[m[1]]), "not match test %d", i)
assert.Equal(t, false, reusableCache(variants[m[1]], variants[m[0]]), "not match test %d", i)
require.Equal(t, false, reusableCache(variants[m[0]], variants[m[1]]), "not match test %d", i)
require.Equal(t, false, reusableCache(variants[m[1]], variants[m[0]]), "not match test %d", i)
}
}
@ -2118,15 +2118,15 @@ func TestReuseScrapeCache(t *testing.T) {
sp.reload(s.newConfig)
for fp, newCacheAddr := range cacheAddr(sp) {
if s.keep {
assert.Equal(t, initCacheAddr[fp], newCacheAddr, "step %d: old cache and new cache are not the same", i)
require.Equal(t, initCacheAddr[fp], newCacheAddr, "step %d: old cache and new cache are not the same", i)
} else {
assert.NotEqual(t, initCacheAddr[fp], newCacheAddr, "step %d: old cache and new cache are the same", i)
require.NotEqual(t, initCacheAddr[fp], newCacheAddr, "step %d: old cache and new cache are the same", i)
}
}
initCacheAddr = cacheAddr(sp)
sp.reload(s.newConfig)
for fp, newCacheAddr := range cacheAddr(sp) {
assert.Equal(t, initCacheAddr[fp], newCacheAddr, "step %d: reloading the exact config invalidates the cache", i)
require.Equal(t, initCacheAddr[fp], newCacheAddr, "step %d: reloading the exact config invalidates the cache", i)
}
}
}
@ -2150,8 +2150,8 @@ func TestScrapeAddFast(t *testing.T) {
slApp := sl.appender(ctx)
_, _, _, err := sl.append(slApp, []byte("up 1\n"), "", time.Time{})
assert.NoError(t, err)
assert.NoError(t, slApp.Commit())
require.NoError(t, err)
require.NoError(t, slApp.Commit())
// Poison the cache. There is just one entry, and one series in the
// storage. Changing the ref will create a 'not found' error.
@ -2161,8 +2161,8 @@ func TestScrapeAddFast(t *testing.T) {
slApp = sl.appender(ctx)
_, _, _, err = sl.append(slApp, []byte("up 1\n"), "", time.Time{}.Add(time.Second))
assert.NoError(t, err)
assert.NoError(t, slApp.Commit())
require.NoError(t, err)
require.NoError(t, slApp.Commit())
}
func TestReuseCacheRace(t *testing.T) {
@ -2206,7 +2206,7 @@ func TestCheckAddError(t *testing.T) {
var appErrs appendErrors
sl := scrapeLoop{l: log.NewNopLogger()}
sl.checkAddError(nil, nil, nil, storage.ErrOutOfOrderSample, nil, &appErrs)
assert.Equal(t, 1, appErrs.numOutOfOrder)
require.Equal(t, 1, appErrs.numOutOfOrder)
}
func TestScrapeReportSingleAppender(t *testing.T) {
@ -2249,7 +2249,7 @@ func TestScrapeReportSingleAppender(t *testing.T) {
start := time.Now()
for time.Since(start) < 3*time.Second {
q, err := s.Querier(ctx, time.Time{}.UnixNano(), time.Now().UnixNano())
assert.NoError(t, err)
require.NoError(t, err)
series := q.Select(false, nil, labels.MustNewMatcher(labels.MatchRegexp, "__name__", ".+"))
c := 0
@ -2260,7 +2260,7 @@ func TestScrapeReportSingleAppender(t *testing.T) {
}
}
assert.Equal(t, 0, c%9, "Appended samples not as expected: %d", c)
require.Equal(t, 0, c%9, "Appended samples not as expected: %d", c)
q.Close()
}
cancel()

View file

@ -27,7 +27,7 @@ import (
config_util "github.com/prometheus/common/config"
"github.com/prometheus/common/model"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/prometheus/prometheus/pkg/labels"
)
@ -40,7 +40,7 @@ func TestTargetLabels(t *testing.T) {
target := newTestTarget("example.com:80", 0, labels.FromStrings("job", "some_job", "foo", "bar"))
want := labels.FromStrings(model.JobLabel, "some_job", "foo", "bar")
got := target.Labels()
assert.Equal(t, want, got)
require.Equal(t, want, got)
}
func TestTargetOffset(t *testing.T) {
@ -118,7 +118,7 @@ func TestTargetURL(t *testing.T) {
RawQuery: expectedParams.Encode(),
}
assert.Equal(t, expectedURL, target.URL())
require.Equal(t, expectedURL, target.URL())
}
func newTestTarget(targetURL string, deadline time.Duration, lbls labels.Labels) *Target {

View file

@ -17,7 +17,7 @@ import (
"math/rand"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestSampleRing(t *testing.T) {
@ -77,9 +77,9 @@ func TestSampleRing(t *testing.T) {
}
if found {
assert.GreaterOrEqual(t, sold.t, s.t-c.delta, "%d: unexpected sample %d in buffer; buffer %v", i, sold.t, buffered)
require.GreaterOrEqual(t, sold.t, s.t-c.delta, "%d: unexpected sample %d in buffer; buffer %v", i, sold.t, buffered)
} else {
assert.Less(t, sold.t, s.t-c.delta, "%d: expected sample %d to be in buffer but was not; buffer %v", i, sold.t, buffered)
require.Less(t, sold.t, s.t-c.delta, "%d: expected sample %d to be in buffer but was not; buffer %v", i, sold.t, buffered)
}
}
}
@ -96,12 +96,12 @@ func TestBufferedSeriesIterator(t *testing.T) {
t, v := bit.At()
b = append(b, sample{t: t, v: v})
}
assert.Equal(t, exp, b, "buffer mismatch")
require.Equal(t, exp, b, "buffer mismatch")
}
sampleEq := func(ets int64, ev float64) {
ts, v := it.Values()
assert.Equal(t, ets, ts, "timestamp mismatch")
assert.Equal(t, ev, v, "value mismatch")
require.Equal(t, ets, ts, "timestamp mismatch")
require.Equal(t, ev, v, "value mismatch")
}
it = NewBufferIterator(NewListSeriesIterator(samples{
@ -115,29 +115,29 @@ func TestBufferedSeriesIterator(t *testing.T) {
sample{t: 101, v: 10},
}), 2)
assert.True(t, it.Seek(-123), "seek failed")
require.True(t, it.Seek(-123), "seek failed")
sampleEq(1, 2)
bufferEq(nil)
assert.True(t, it.Next(), "next failed")
require.True(t, it.Next(), "next failed")
sampleEq(2, 3)
bufferEq([]sample{{t: 1, v: 2}})
assert.True(t, it.Next(), "next failed")
assert.True(t, it.Next(), "next failed")
assert.True(t, it.Next(), "next failed")
require.True(t, it.Next(), "next failed")
require.True(t, it.Next(), "next failed")
require.True(t, it.Next(), "next failed")
sampleEq(5, 6)
bufferEq([]sample{{t: 2, v: 3}, {t: 3, v: 4}, {t: 4, v: 5}})
assert.True(t, it.Seek(5), "seek failed")
require.True(t, it.Seek(5), "seek failed")
sampleEq(5, 6)
bufferEq([]sample{{t: 2, v: 3}, {t: 3, v: 4}, {t: 4, v: 5}})
assert.True(t, it.Seek(101), "seek failed")
require.True(t, it.Seek(101), "seek failed")
sampleEq(101, 10)
bufferEq([]sample{{t: 99, v: 8}, {t: 100, v: 9}})
assert.False(t, it.Next(), "next succeeded unexpectedly")
require.False(t, it.Next(), "next succeeded unexpectedly")
}
// At() should not be called once Next() returns false.
@ -147,7 +147,7 @@ func TestBufferedSeriesIteratorNoBadAt(t *testing.T) {
m := &mockSeriesIterator{
seek: func(int64) bool { return false },
at: func() (int64, float64) {
assert.False(t, done, "unexpectedly done")
require.False(t, done, "unexpectedly done")
done = true
return 0, 0
},
@ -171,7 +171,7 @@ func BenchmarkBufferedSeriesIterator(b *testing.B) {
for it.Next() {
// scan everything
}
assert.NoError(b, it.Err())
require.NoError(b, it.Err())
}
type mockSeriesIterator struct {

View file

@ -19,7 +19,7 @@ import (
"github.com/pkg/errors"
"github.com/prometheus/common/model"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/prometheus/prometheus/pkg/labels"
"github.com/prometheus/prometheus/storage"
@ -43,7 +43,7 @@ func TestFanout_SelectSorted(t *testing.T) {
app1.Add(inputLabel, 2000, 2)
inputTotalSize++
err := app1.Commit()
assert.NoError(t, err)
require.NoError(t, err)
remoteStorage1 := teststorage.New(t)
defer remoteStorage1.Close()
@ -55,7 +55,7 @@ func TestFanout_SelectSorted(t *testing.T) {
app2.Add(inputLabel, 5000, 5)
inputTotalSize++
err = app2.Commit()
assert.NoError(t, err)
require.NoError(t, err)
remoteStorage2 := teststorage.New(t)
defer remoteStorage2.Close()
@ -69,17 +69,17 @@ func TestFanout_SelectSorted(t *testing.T) {
inputTotalSize++
err = app3.Commit()
assert.NoError(t, err)
require.NoError(t, err)
fanoutStorage := storage.NewFanout(nil, priStorage, remoteStorage1, remoteStorage2)
t.Run("querier", func(t *testing.T) {
querier, err := fanoutStorage.Querier(context.Background(), 0, 8000)
assert.NoError(t, err)
require.NoError(t, err)
defer querier.Close()
matcher, err := labels.NewMatcher(labels.MatchEqual, model.MetricNameLabel, "a")
assert.NoError(t, err)
require.NoError(t, err)
seriesSet := querier.Select(true, nil, matcher)
@ -96,16 +96,16 @@ func TestFanout_SelectSorted(t *testing.T) {
}
}
assert.Equal(t, labelsResult, outputLabel)
assert.Equal(t, inputTotalSize, len(result))
require.Equal(t, labelsResult, outputLabel)
require.Equal(t, inputTotalSize, len(result))
})
t.Run("chunk querier", func(t *testing.T) {
querier, err := fanoutStorage.ChunkQuerier(ctx, 0, 8000)
assert.NoError(t, err)
require.NoError(t, err)
defer querier.Close()
matcher, err := labels.NewMatcher(labels.MatchEqual, model.MetricNameLabel, "a")
assert.NoError(t, err)
require.NoError(t, err)
seriesSet := storage.NewSeriesSetFromChunkSeriesSet(querier.Select(true, nil, matcher))
@ -122,9 +122,9 @@ func TestFanout_SelectSorted(t *testing.T) {
}
}
assert.NoError(t, seriesSet.Err())
assert.Equal(t, labelsResult, outputLabel)
assert.Equal(t, inputTotalSize, len(result))
require.NoError(t, seriesSet.Err())
require.Equal(t, labelsResult, outputLabel)
require.Equal(t, inputTotalSize, len(result))
})
}
@ -157,7 +157,7 @@ func TestFanoutErrors(t *testing.T) {
t.Run("samples", func(t *testing.T) {
querier, err := fanoutStorage.Querier(context.Background(), 0, 8000)
assert.NoError(t, err)
require.NoError(t, err)
defer querier.Close()
matcher := labels.MustNewMatcher(labels.MatchEqual, "a", "b")
@ -169,20 +169,20 @@ func TestFanoutErrors(t *testing.T) {
}
if tc.err != nil {
assert.Error(t, ss.Err())
assert.Equal(t, tc.err.Error(), ss.Err().Error())
require.Error(t, ss.Err())
require.Equal(t, tc.err.Error(), ss.Err().Error())
}
if tc.warning != nil {
assert.Greater(t, len(ss.Warnings()), 0, "warnings expected")
assert.Error(t, ss.Warnings()[0])
assert.Equal(t, tc.warning.Error(), ss.Warnings()[0].Error())
require.Greater(t, len(ss.Warnings()), 0, "warnings expected")
require.Error(t, ss.Warnings()[0])
require.Equal(t, tc.warning.Error(), ss.Warnings()[0].Error())
}
})
t.Run("chunks", func(t *testing.T) {
t.Skip("enable once TestStorage and TSDB implements ChunkQuerier")
querier, err := fanoutStorage.ChunkQuerier(context.Background(), 0, 8000)
assert.NoError(t, err)
require.NoError(t, err)
defer querier.Close()
matcher := labels.MustNewMatcher(labels.MatchEqual, "a", "b")
@ -194,14 +194,14 @@ func TestFanoutErrors(t *testing.T) {
}
if tc.err != nil {
assert.Error(t, ss.Err())
assert.Equal(t, tc.err.Error(), ss.Err().Error())
require.Error(t, ss.Err())
require.Equal(t, tc.err.Error(), ss.Err().Error())
}
if tc.warning != nil {
assert.Greater(t, len(ss.Warnings()), 0, "warnings expected")
assert.Error(t, ss.Warnings()[0])
assert.Equal(t, tc.warning.Error(), ss.Warnings()[0].Error())
require.Greater(t, len(ss.Warnings()), 0, "warnings expected")
require.Error(t, ss.Warnings()[0])
require.Equal(t, tc.warning.Error(), ss.Warnings()[0].Error())
}
})
}

View file

@ -21,7 +21,7 @@ import (
"testing"
"github.com/pkg/errors"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/prometheus/prometheus/pkg/labels"
"github.com/prometheus/prometheus/tsdb/chunkenc"
@ -194,19 +194,19 @@ func TestMergeQuerierWithChainMerger(t *testing.T) {
for mergedQuerier.Next() {
mergedSeries = append(mergedSeries, mergedQuerier.At())
}
assert.NoError(t, mergedQuerier.Err())
require.NoError(t, mergedQuerier.Err())
for _, actualSeries := range mergedSeries {
assert.True(t, tc.expected.Next(), "Expected Next() to be true")
require.True(t, tc.expected.Next(), "Expected Next() to be true")
expectedSeries := tc.expected.At()
assert.Equal(t, expectedSeries.Labels(), actualSeries.Labels())
require.Equal(t, expectedSeries.Labels(), actualSeries.Labels())
expSmpl, expErr := ExpandSamples(expectedSeries.Iterator(), nil)
actSmpl, actErr := ExpandSamples(actualSeries.Iterator(), nil)
assert.Equal(t, expErr, actErr)
assert.Equal(t, expSmpl, actSmpl)
require.Equal(t, expErr, actErr)
require.Equal(t, expSmpl, actSmpl)
}
assert.False(t, tc.expected.Next(), "Expected Next() to be false")
require.False(t, tc.expected.Next(), "Expected Next() to be false")
})
}
}
@ -364,19 +364,19 @@ func TestMergeChunkQuerierWithNoVerticalChunkSeriesMerger(t *testing.T) {
merged := NewMergeChunkQuerier([]ChunkQuerier{p}, qs, NewCompactingChunkSeriesMerger(nil)).Select(false, nil)
for merged.Next() {
assert.True(t, tc.expected.Next(), "Expected Next() to be true")
require.True(t, tc.expected.Next(), "Expected Next() to be true")
actualSeries := merged.At()
expectedSeries := tc.expected.At()
assert.Equal(t, expectedSeries.Labels(), actualSeries.Labels())
require.Equal(t, expectedSeries.Labels(), actualSeries.Labels())
expChks, expErr := ExpandChunks(expectedSeries.Iterator())
actChks, actErr := ExpandChunks(actualSeries.Iterator())
assert.Equal(t, expErr, actErr)
assert.Equal(t, expChks, actChks)
require.Equal(t, expErr, actErr)
require.Equal(t, expChks, actChks)
}
assert.NoError(t, merged.Err())
assert.False(t, tc.expected.Next(), "Expected Next() to be false")
require.NoError(t, merged.Err())
require.False(t, tc.expected.Next(), "Expected Next() to be false")
})
}
}
@ -468,12 +468,12 @@ func TestCompactingChunkSeriesMerger(t *testing.T) {
} {
t.Run(tc.name, func(t *testing.T) {
merged := m(tc.input...)
assert.Equal(t, tc.expected.Labels(), merged.Labels())
require.Equal(t, tc.expected.Labels(), merged.Labels())
actChks, actErr := ExpandChunks(merged.Iterator())
expChks, expErr := ExpandChunks(tc.expected.Iterator())
assert.Equal(t, expErr, actErr)
assert.Equal(t, expChks, actChks)
require.Equal(t, expErr, actErr)
require.Equal(t, expChks, actChks)
})
}
}
@ -612,8 +612,8 @@ func TestChainSampleIterator(t *testing.T) {
} {
merged := newChainSampleIterator(tc.input)
actual, err := ExpandSamples(merged, nil)
assert.NoError(t, err)
assert.Equal(t, tc.expected, actual)
require.NoError(t, err)
require.Equal(t, tc.expected, actual)
}
}
@ -655,9 +655,9 @@ func TestChainSampleIteratorSeek(t *testing.T) {
actual = append(actual, sample{t, v})
}
s, err := ExpandSamples(merged, nil)
assert.NoError(t, err)
require.NoError(t, err)
actual = append(actual, s...)
assert.Equal(t, tc.expected, actual)
require.Equal(t, tc.expected, actual)
}
}
@ -689,7 +689,7 @@ func benchmarkDrain(seriesSet SeriesSet, b *testing.B) {
for n := 0; n < b.N; n++ {
for seriesSet.Next() {
result, err = ExpandSamples(seriesSet.At().Iterator(), nil)
assert.NoError(b, err)
require.NoError(b, err)
}
}
}
@ -789,9 +789,9 @@ func unwrapMockGenericQuerier(t *testing.T, qr genericQuerier) *mockGenericQueri
m, ok := qr.(*mockGenericQuerier)
if !ok {
s, ok := qr.(*secondaryQuerier)
assert.True(t, ok, "expected secondaryQuerier got something else")
require.True(t, ok, "expected secondaryQuerier got something else")
m, ok = s.genericQuerier.(*mockGenericQuerier)
assert.True(t, ok, "expected mockGenericQuerier got something else")
require.True(t, ok, "expected mockGenericQuerier got something else")
}
return m
}
@ -922,10 +922,10 @@ func TestMergeGenericQuerierWithSecondaries_ErrorHandling(t *testing.T) {
for res.Next() {
lbls = append(lbls, res.At().Labels())
}
assert.Equal(t, tcase.expectedWarnings[0], res.Warnings())
assert.Equal(t, tcase.expectedErrs[0], res.Err())
assert.True(t, errors.Is(res.Err(), tcase.expectedErrs[0]), "expected error doesn't match")
assert.Equal(t, tcase.expectedSelectsSeries, lbls)
require.Equal(t, tcase.expectedWarnings[0], res.Warnings())
require.Equal(t, tcase.expectedErrs[0], res.Err())
require.True(t, errors.Is(res.Err(), tcase.expectedErrs[0]), "expected error doesn't match")
require.Equal(t, tcase.expectedSelectsSeries, lbls)
for _, qr := range q.queriers {
m := unwrapMockGenericQuerier(t, qr)
@ -934,14 +934,14 @@ func TestMergeGenericQuerierWithSecondaries_ErrorHandling(t *testing.T) {
if len(q.queriers) == 1 {
exp[0] = false
}
assert.Equal(t, exp, m.sortedSeriesRequested)
require.Equal(t, exp, m.sortedSeriesRequested)
}
})
t.Run("LabelNames", func(t *testing.T) {
res, w, err := q.LabelNames()
assert.Equal(t, tcase.expectedWarnings[1], w)
assert.True(t, errors.Is(err, tcase.expectedErrs[1]), "expected error doesn't match")
assert.Equal(t, tcase.expectedLabels, res)
require.Equal(t, tcase.expectedWarnings[1], w)
require.True(t, errors.Is(err, tcase.expectedErrs[1]), "expected error doesn't match")
require.Equal(t, tcase.expectedLabels, res)
if err != nil {
return
@ -949,14 +949,14 @@ func TestMergeGenericQuerierWithSecondaries_ErrorHandling(t *testing.T) {
for _, qr := range q.queriers {
m := unwrapMockGenericQuerier(t, qr)
assert.Equal(t, 1, m.labelNamesCalls)
require.Equal(t, 1, m.labelNamesCalls)
}
})
t.Run("LabelValues", func(t *testing.T) {
res, w, err := q.LabelValues("test")
assert.Equal(t, tcase.expectedWarnings[2], w)
assert.True(t, errors.Is(err, tcase.expectedErrs[2]), "expected error doesn't match")
assert.Equal(t, tcase.expectedLabels, res)
require.Equal(t, tcase.expectedWarnings[2], w)
require.True(t, errors.Is(err, tcase.expectedErrs[2]), "expected error doesn't match")
require.Equal(t, tcase.expectedLabels, res)
if err != nil {
return
@ -964,7 +964,7 @@ func TestMergeGenericQuerierWithSecondaries_ErrorHandling(t *testing.T) {
for _, qr := range q.queriers {
m := unwrapMockGenericQuerier(t, qr)
assert.Equal(t, []string{"test"}, m.labelNamesRequested)
require.Equal(t, []string{"test"}, m.labelNamesRequested)
}
})
})

View file

@ -17,7 +17,7 @@ import (
"io"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
type mockedFlusher struct {
@ -45,48 +45,48 @@ func TestChunkedReaderCanReadFromChunkedWriter(t *testing.T) {
for _, msg := range msgs {
n, err := w.Write(msg)
assert.NoError(t, err)
assert.Equal(t, len(msg), n)
require.NoError(t, err)
require.Equal(t, len(msg), n)
}
i := 0
for ; i < 4; i++ {
msg, err := r.Next()
assert.NoError(t, err)
assert.Less(t, i, len(msgs), "more messages then expected")
assert.Equal(t, msgs[i], msg)
require.NoError(t, err)
require.Less(t, i, len(msgs), "more messages then expected")
require.Equal(t, msgs[i], msg)
}
// Empty byte slice is skipped.
i++
msg, err := r.Next()
assert.NoError(t, err)
assert.Less(t, i, len(msgs), "more messages then expected")
assert.Equal(t, msgs[i], msg)
require.NoError(t, err)
require.Less(t, i, len(msgs), "more messages then expected")
require.Equal(t, msgs[i], msg)
_, err = r.Next()
assert.Error(t, err, "expected io.EOF")
assert.Equal(t, io.EOF, err)
require.Error(t, err, "expected io.EOF")
require.Equal(t, io.EOF, err)
assert.Equal(t, 5, f.flushed)
require.Equal(t, 5, f.flushed)
}
func TestChunkedReader_Overflow(t *testing.T) {
b := &bytes.Buffer{}
_, err := NewChunkedWriter(b, &mockedFlusher{}).Write([]byte("twelve bytes"))
assert.NoError(t, err)
require.NoError(t, err)
b2 := make([]byte, 12)
copy(b2, b.Bytes())
ret, err := NewChunkedReader(b, 12, nil).Next()
assert.NoError(t, err)
assert.Equal(t, "twelve bytes", string(ret))
require.NoError(t, err)
require.Equal(t, "twelve bytes", string(ret))
_, err = NewChunkedReader(bytes.NewReader(b2), 11, nil).Next()
assert.Error(t, err, "expect exceed limit error")
assert.Equal(t, "chunkedReader: message size exceeded the limit 11 bytes; got: 12 bytes", err.Error())
require.Error(t, err, "expect exceed limit error")
require.Equal(t, "chunkedReader: message size exceeded the limit 11 bytes; got: 12 bytes", err.Error())
}
func TestChunkedReader_CorruptedFrame(t *testing.T) {
@ -94,13 +94,13 @@ func TestChunkedReader_CorruptedFrame(t *testing.T) {
w := NewChunkedWriter(b, &mockedFlusher{})
n, err := w.Write([]byte("test1"))
assert.NoError(t, err)
assert.Equal(t, 5, n)
require.NoError(t, err)
require.Equal(t, 5, n)
bs := b.Bytes()
bs[9] = 1 // Malform the frame by changing one byte.
_, err = NewChunkedReader(bytes.NewReader(bs), 20, nil).Next()
assert.Error(t, err, "expected malformed frame")
assert.Equal(t, "chunkedReader: corrupted frame; checksum mismatch", err.Error())
require.Error(t, err, "expected malformed frame")
require.Equal(t, "chunkedReader: corrupted frame; checksum mismatch", err.Error())
}

View file

@ -25,7 +25,7 @@ import (
"github.com/pkg/errors"
config_util "github.com/prometheus/common/config"
"github.com/prometheus/common/model"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
var longErrMessage = strings.Repeat("error message", maxErrMsgLen)
@ -61,7 +61,7 @@ func TestStoreHTTPErrorHandling(t *testing.T) {
)
serverURL, err := url.Parse(server.URL)
assert.NoError(t, err)
require.NoError(t, err)
conf := &ClientConfig{
URL: &config_util.URL{URL: serverURL},
@ -69,15 +69,15 @@ func TestStoreHTTPErrorHandling(t *testing.T) {
}
hash, err := toHash(conf)
assert.NoError(t, err)
require.NoError(t, err)
c, err := NewWriteClient(hash, conf)
assert.NoError(t, err)
require.NoError(t, err)
err = c.Store(context.Background(), []byte{})
if test.err != nil {
assert.EqualError(t, err, test.err.Error())
require.EqualError(t, err, test.err.Error())
} else {
assert.NoError(t, err)
require.NoError(t, err)
}
server.Close()

View file

@ -17,7 +17,7 @@ import (
"fmt"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/prometheus/prometheus/pkg/labels"
"github.com/prometheus/prometheus/prompb"
@ -115,10 +115,10 @@ func TestValidateLabelsAndMetricName(t *testing.T) {
t.Run(test.description, func(t *testing.T) {
err := validateLabelsAndMetricName(test.input)
if test.expectedErr != "" {
assert.Error(t, err)
assert.Equal(t, test.expectedErr, err.Error())
require.Error(t, err)
require.Equal(t, test.expectedErr, err.Error())
} else {
assert.NoError(t, err)
require.NoError(t, err)
}
})
}
@ -136,11 +136,11 @@ func TestConcreteSeriesSet(t *testing.T) {
c := &concreteSeriesSet{
series: []storage.Series{series1, series2},
}
assert.True(t, c.Next(), "Expected Next() to be true.")
assert.Equal(t, series1, c.At(), "Unexpected series returned.")
assert.True(t, c.Next(), "Expected Next() to be true.")
assert.Equal(t, series2, c.At(), "Unexpected series returned.")
assert.False(t, c.Next(), "Expected Next() to be false.")
require.True(t, c.Next(), "Expected Next() to be true.")
require.Equal(t, series1, c.At(), "Unexpected series returned.")
require.True(t, c.Next(), "Expected Next() to be true.")
require.Equal(t, series2, c.At(), "Unexpected series returned.")
require.False(t, c.Next(), "Expected Next() to be false.")
}
func TestConcreteSeriesClonesLabels(t *testing.T) {
@ -153,13 +153,13 @@ func TestConcreteSeriesClonesLabels(t *testing.T) {
}
gotLabels := cs.Labels()
assert.Equal(t, lbls, gotLabels)
require.Equal(t, lbls, gotLabels)
gotLabels[0].Value = "foo"
gotLabels[1].Value = "bar"
gotLabels = cs.Labels()
assert.Equal(t, lbls, gotLabels)
require.Equal(t, lbls, gotLabels)
}
func TestFromQueryResultWithDuplicates(t *testing.T) {
@ -183,9 +183,9 @@ func TestFromQueryResultWithDuplicates(t *testing.T) {
errSeries, isErrSeriesSet := series.(errSeriesSet)
assert.True(t, isErrSeriesSet, "Expected resulting series to be an errSeriesSet")
require.True(t, isErrSeriesSet, "Expected resulting series to be an errSeriesSet")
errMessage := errSeries.Err().Error()
assert.Equal(t, "duplicate label with name: foo", errMessage, fmt.Sprintf("Expected error to be from duplicate label, but got: %s", errMessage))
require.Equal(t, "duplicate label with name: foo", errMessage, fmt.Sprintf("Expected error to be from duplicate label, but got: %s", errMessage))
}
func TestNegotiateResponseType(t *testing.T) {
@ -193,23 +193,23 @@ func TestNegotiateResponseType(t *testing.T) {
prompb.ReadRequest_STREAMED_XOR_CHUNKS,
prompb.ReadRequest_SAMPLES,
})
assert.NoError(t, err)
assert.Equal(t, prompb.ReadRequest_STREAMED_XOR_CHUNKS, r)
require.NoError(t, err)
require.Equal(t, prompb.ReadRequest_STREAMED_XOR_CHUNKS, r)
r2, err := NegotiateResponseType([]prompb.ReadRequest_ResponseType{
prompb.ReadRequest_SAMPLES,
prompb.ReadRequest_STREAMED_XOR_CHUNKS,
})
assert.NoError(t, err)
assert.Equal(t, prompb.ReadRequest_SAMPLES, r2)
require.NoError(t, err)
require.Equal(t, prompb.ReadRequest_SAMPLES, r2)
r3, err := NegotiateResponseType([]prompb.ReadRequest_ResponseType{})
assert.NoError(t, err)
assert.Equal(t, prompb.ReadRequest_SAMPLES, r3)
require.NoError(t, err)
require.Equal(t, prompb.ReadRequest_SAMPLES, r3)
_, err = NegotiateResponseType([]prompb.ReadRequest_ResponseType{20})
assert.Error(t, err, "expected error due to not supported requested response types")
assert.Equal(t, "server does not support any of the requested response types: [20]; supported: map[SAMPLES:{} STREAMED_XOR_CHUNKS:{}]", err.Error())
require.Error(t, err, "expected error due to not supported requested response types")
require.Equal(t, "server does not support any of the requested response types: [20]; supported: map[SAMPLES:{} STREAMED_XOR_CHUNKS:{}]", err.Error())
}
func TestMergeLabels(t *testing.T) {
@ -227,6 +227,6 @@ func TestMergeLabels(t *testing.T) {
expected: []prompb.Label{{Name: "aaa", Value: "foo"}, {Name: "bbb", Value: "bar"}, {Name: "ccc", Value: "bar"}, {Name: "ddd", Value: "foo"}},
},
} {
assert.Equal(t, tc.expected, MergeLabels(tc.primary, tc.secondary))
require.Equal(t, tc.expected, MergeLabels(tc.primary, tc.secondary))
}
}

View file

@ -23,7 +23,7 @@ import (
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestIntern(t *testing.T) {
@ -32,8 +32,8 @@ func TestIntern(t *testing.T) {
interner.intern(testString)
interned, ok := interner.pool[testString]
assert.Equal(t, true, ok)
assert.Equal(t, int64(1), interned.refs.Load(), fmt.Sprintf("expected refs to be 1 but it was %d", interned.refs.Load()))
require.Equal(t, true, ok)
require.Equal(t, int64(1), interned.refs.Load(), fmt.Sprintf("expected refs to be 1 but it was %d", interned.refs.Load()))
}
func TestIntern_MultiRef(t *testing.T) {
@ -43,14 +43,14 @@ func TestIntern_MultiRef(t *testing.T) {
interner.intern(testString)
interned, ok := interner.pool[testString]
assert.Equal(t, true, ok)
assert.Equal(t, int64(1), interned.refs.Load(), fmt.Sprintf("expected refs to be 1 but it was %d", interned.refs.Load()))
require.Equal(t, true, ok)
require.Equal(t, int64(1), interned.refs.Load(), fmt.Sprintf("expected refs to be 1 but it was %d", interned.refs.Load()))
interner.intern(testString)
interned, ok = interner.pool[testString]
assert.Equal(t, true, ok)
assert.Equal(t, int64(2), interned.refs.Load(), fmt.Sprintf("expected refs to be 2 but it was %d", interned.refs.Load()))
require.Equal(t, true, ok)
require.Equal(t, int64(2), interned.refs.Load(), fmt.Sprintf("expected refs to be 2 but it was %d", interned.refs.Load()))
}
func TestIntern_DeleteRef(t *testing.T) {
@ -60,12 +60,12 @@ func TestIntern_DeleteRef(t *testing.T) {
interner.intern(testString)
interned, ok := interner.pool[testString]
assert.Equal(t, true, ok)
assert.Equal(t, int64(1), interned.refs.Load(), fmt.Sprintf("expected refs to be 1 but it was %d", interned.refs.Load()))
require.Equal(t, true, ok)
require.Equal(t, int64(1), interned.refs.Load(), fmt.Sprintf("expected refs to be 1 but it was %d", interned.refs.Load()))
interner.release(testString)
_, ok = interner.pool[testString]
assert.Equal(t, false, ok)
require.Equal(t, false, ok)
}
func TestIntern_MultiRef_Concurrent(t *testing.T) {
@ -74,8 +74,8 @@ func TestIntern_MultiRef_Concurrent(t *testing.T) {
interner.intern(testString)
interned, ok := interner.pool[testString]
assert.Equal(t, true, ok)
assert.Equal(t, int64(1), interned.refs.Load(), fmt.Sprintf("expected refs to be 1 but it was %d", interned.refs.Load()))
require.Equal(t, true, ok)
require.Equal(t, int64(1), interned.refs.Load(), fmt.Sprintf("expected refs to be 1 but it was %d", interned.refs.Load()))
go interner.release(testString)
@ -86,6 +86,6 @@ func TestIntern_MultiRef_Concurrent(t *testing.T) {
interner.mtx.RLock()
interned, ok = interner.pool[testString]
interner.mtx.RUnlock()
assert.Equal(t, true, ok)
assert.Equal(t, int64(1), interned.refs.Load(), fmt.Sprintf("expected refs to be 1 but it was %d", interned.refs.Load()))
require.Equal(t, true, ok)
require.Equal(t, int64(1), interned.refs.Load(), fmt.Sprintf("expected refs to be 1 but it was %d", interned.refs.Load()))
}

View file

@ -34,7 +34,7 @@ import (
client_testutil "github.com/prometheus/client_golang/prometheus/testutil"
common_config "github.com/prometheus/common/config"
"github.com/prometheus/common/model"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"go.uber.org/atomic"
"github.com/prometheus/prometheus/config"
@ -73,9 +73,9 @@ func TestSampleDelivery(t *testing.T) {
queueConfig.MaxSamplesPerSend = len(samples) / 2
dir, err := ioutil.TempDir("", "TestSampleDeliver")
assert.NoError(t, err)
require.NoError(t, err)
defer func() {
assert.NoError(t, os.RemoveAll(dir))
require.NoError(t, os.RemoveAll(dir))
}()
s := NewStorage(nil, nil, nil, dir, defaultFlushDeadline)
@ -95,9 +95,9 @@ func TestSampleDelivery(t *testing.T) {
},
}
writeConfig.QueueConfig = queueConfig
assert.NoError(t, s.ApplyConfig(conf))
require.NoError(t, s.ApplyConfig(conf))
hash, err := toHash(writeConfig)
assert.NoError(t, err)
require.NoError(t, err)
qm := s.rws.queues[hash]
qm.SetClient(c)
@ -121,9 +121,9 @@ func TestSampleDeliveryTimeout(t *testing.T) {
cfg.BatchSendDeadline = model.Duration(100 * time.Millisecond)
dir, err := ioutil.TempDir("", "TestSampleDeliveryTimeout")
assert.NoError(t, err)
require.NoError(t, err)
defer func() {
assert.NoError(t, os.RemoveAll(dir))
require.NoError(t, os.RemoveAll(dir))
}()
metrics := newQueueManagerMetrics(nil, "", "")
@ -164,9 +164,9 @@ func TestSampleDeliveryOrder(t *testing.T) {
c.expectSamples(samples, series)
dir, err := ioutil.TempDir("", "TestSampleDeliveryOrder")
assert.NoError(t, err)
require.NoError(t, err)
defer func() {
assert.NoError(t, os.RemoveAll(dir))
require.NoError(t, os.RemoveAll(dir))
}()
metrics := newQueueManagerMetrics(nil, "", "")
@ -185,9 +185,9 @@ func TestShutdown(t *testing.T) {
c := NewTestBlockedWriteClient()
dir, err := ioutil.TempDir("", "TestShutdown")
assert.NoError(t, err)
require.NoError(t, err)
defer func() {
assert.NoError(t, os.RemoveAll(dir))
require.NoError(t, os.RemoveAll(dir))
}()
metrics := newQueueManagerMetrics(nil, "", "")
@ -226,9 +226,9 @@ func TestSeriesReset(t *testing.T) {
numSeries := 25
dir, err := ioutil.TempDir("", "TestSeriesReset")
assert.NoError(t, err)
require.NoError(t, err)
defer func() {
assert.NoError(t, os.RemoveAll(dir))
require.NoError(t, os.RemoveAll(dir))
}()
metrics := newQueueManagerMetrics(nil, "", "")
@ -240,9 +240,9 @@ func TestSeriesReset(t *testing.T) {
}
m.StoreSeries(series, i)
}
assert.Equal(t, numSegments*numSeries, len(m.seriesLabels))
require.Equal(t, numSegments*numSeries, len(m.seriesLabels))
m.SeriesReset(2)
assert.Equal(t, numSegments*numSeries/2, len(m.seriesLabels))
require.Equal(t, numSegments*numSeries/2, len(m.seriesLabels))
}
func TestReshard(t *testing.T) {
@ -258,9 +258,9 @@ func TestReshard(t *testing.T) {
cfg.MaxShards = 1
dir, err := ioutil.TempDir("", "TestReshard")
assert.NoError(t, err)
require.NoError(t, err)
defer func() {
assert.NoError(t, os.RemoveAll(dir))
require.NoError(t, os.RemoveAll(dir))
}()
metrics := newQueueManagerMetrics(nil, "", "")
@ -273,7 +273,7 @@ func TestReshard(t *testing.T) {
go func() {
for i := 0; i < len(samples); i += config.DefaultQueueConfig.Capacity {
sent := m.Append(samples[i : i+config.DefaultQueueConfig.Capacity])
assert.True(t, sent, "samples not sent")
require.True(t, sent, "samples not sent")
time.Sleep(100 * time.Millisecond)
}
}()
@ -334,7 +334,7 @@ func TestReleaseNoninternedString(t *testing.T) {
}
metric := client_testutil.ToFloat64(noReferenceReleases)
assert.Equal(t, 0.0, metric, "expected there to be no calls to release for strings that were not already interned: %d", int(metric))
require.Equal(t, 0.0, metric, "expected there to be no calls to release for strings that were not already interned: %d", int(metric))
}
func TestShouldReshard(t *testing.T) {
@ -376,7 +376,7 @@ func TestShouldReshard(t *testing.T) {
m.Stop()
assert.Equal(t, c.expectedToReshard, shouldReshard)
require.Equal(t, c.expectedToReshard, shouldReshard)
}
}
@ -455,7 +455,7 @@ func (c *TestWriteClient) waitForExpectedSamples(tb testing.TB) {
c.mtx.Lock()
defer c.mtx.Unlock()
for ts, expectedSamples := range c.expectedSamples {
assert.Equal(tb, expectedSamples, c.receivedSamples[ts], ts)
require.Equal(tb, expectedSamples, c.receivedSamples[ts], ts)
}
}
@ -564,7 +564,7 @@ func BenchmarkSampleDelivery(b *testing.B) {
cfg.MaxShards = 1
dir, err := ioutil.TempDir("", "BenchmarkSampleDelivery")
assert.NoError(b, err)
require.NoError(b, err)
defer os.RemoveAll(dir)
metrics := newQueueManagerMetrics(nil, "", "")
@ -594,7 +594,7 @@ func BenchmarkStartup(b *testing.B) {
// Find the second largest segment; we will replay up to this.
// (Second largest as WALWatcher will start tailing the largest).
dirents, err := ioutil.ReadDir(dir)
assert.NoError(b, err)
require.NoError(b, err)
var segments []int
for _, dirent := range dirents {
@ -616,7 +616,7 @@ func BenchmarkStartup(b *testing.B) {
m.watcher.SetStartTime(timestamp.Time(math.MaxInt64))
m.watcher.MaxSegment = segments[len(segments)-2]
err := m.watcher.Run()
assert.NoError(b, err)
require.NoError(b, err)
}
}
@ -647,7 +647,7 @@ func TestProcessExternalLabels(t *testing.T) {
expected: labels.Labels{{Name: "a", Value: "b"}},
},
} {
assert.Equal(t, tc.expected, processExternalLabels(tc.labels, tc.externalLabels))
require.Equal(t, tc.expected, processExternalLabels(tc.labels, tc.externalLabels))
}
}
@ -656,9 +656,9 @@ func TestCalculateDesiredShards(t *testing.T) {
cfg := config.DefaultQueueConfig
dir, err := ioutil.TempDir("", "TestCalculateDesiredShards")
assert.NoError(t, err)
require.NoError(t, err)
defer func() {
assert.NoError(t, os.RemoveAll(dir))
require.NoError(t, os.RemoveAll(dir))
}()
metrics := newQueueManagerMetrics(nil, "", "")
@ -703,7 +703,7 @@ func TestCalculateDesiredShards(t *testing.T) {
for ; ts < 120*time.Second; ts += shardUpdateDuration {
addSamples(inputRate*int64(shardUpdateDuration/time.Second), ts)
m.numShards = m.calculateDesiredShards()
assert.Equal(t, 1, m.numShards)
require.Equal(t, 1, m.numShards)
}
// Assume 100ms per request, or 10 requests per second per shard.
@ -725,10 +725,10 @@ func TestCalculateDesiredShards(t *testing.T) {
t.Log("desiredShards", m.numShards, "pendingSamples", pendingSamples)
m.numShards = m.calculateDesiredShards()
assert.GreaterOrEqual(t, m.numShards, minShards, "Shards are too low. desiredShards=%d, minShards=%d, t_seconds=%d", m.numShards, minShards, ts/time.Second)
assert.LessOrEqual(t, m.numShards, maxShards, "Shards are too high. desiredShards=%d, maxShards=%d, t_seconds=%d", m.numShards, maxShards, ts/time.Second)
require.GreaterOrEqual(t, m.numShards, minShards, "Shards are too low. desiredShards=%d, minShards=%d, t_seconds=%d", m.numShards, minShards, ts/time.Second)
require.LessOrEqual(t, m.numShards, maxShards, "Shards are too high. desiredShards=%d, maxShards=%d, t_seconds=%d", m.numShards, maxShards, ts/time.Second)
}
assert.Equal(t, int64(0), pendingSamples, "Remote write never caught up, there are still %d pending samples.", pendingSamples)
require.Equal(t, int64(0), pendingSamples, "Remote write never caught up, there are still %d pending samples.", pendingSamples)
}
func TestQueueManagerMetrics(t *testing.T) {
@ -737,12 +737,12 @@ func TestQueueManagerMetrics(t *testing.T) {
// Make sure metrics pass linting.
problems, err := client_testutil.GatherAndLint(reg)
assert.NoError(t, err)
assert.Equal(t, 0, len(problems), "Metric linting problems detected: %v", problems)
require.NoError(t, err)
require.Equal(t, 0, len(problems), "Metric linting problems detected: %v", problems)
// Make sure all metrics were unregistered. A failure here means you need
// unregister a metric in `queueManagerMetrics.unregister()`.
metrics.unregister()
err = client_testutil.GatherAndCompare(reg, strings.NewReader(""))
assert.NoError(t, err)
require.NoError(t, err)
}

View file

@ -24,7 +24,7 @@ import (
"github.com/pkg/errors"
"github.com/prometheus/client_golang/prometheus"
config_util "github.com/prometheus/common/config"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/prometheus/prometheus/config"
"github.com/prometheus/prometheus/pkg/labels"
@ -34,7 +34,7 @@ import (
func TestNoDuplicateReadConfigs(t *testing.T) {
dir, err := ioutil.TempDir("", "TestNoDuplicateReadConfigs")
assert.NoError(t, err)
require.NoError(t, err)
defer os.RemoveAll(dir)
cfg1 := config.RemoteReadConfig{
@ -103,8 +103,8 @@ func TestNoDuplicateReadConfigs(t *testing.T) {
err := s.ApplyConfig(conf)
prometheus.Unregister(s.rws.highestTimestamp)
gotError := err != nil
assert.Equal(t, tc.err, gotError)
assert.NoError(t, s.Close())
require.Equal(t, tc.err, gotError)
require.NoError(t, s.Close())
})
}
}
@ -170,8 +170,8 @@ func TestExternalLabelsQuerierAddExternalLabels(t *testing.T) {
sort.Slice(test.outMatchers, func(i, j int) bool { return test.outMatchers[i].Name < test.outMatchers[j].Name })
sort.Slice(matchers, func(i, j int) bool { return matchers[i].Name < matchers[j].Name })
assert.Equal(t, test.outMatchers, matchers, "%d", i)
assert.Equal(t, test.added, added, "%d", i)
require.Equal(t, test.outMatchers, matchers, "%d", i)
require.Equal(t, test.added, added, "%d", i)
}
}
@ -200,9 +200,9 @@ func TestSeriesSetFilter(t *testing.T) {
for _, tc := range tests {
filtered := newSeriesSetFilter(FromQueryResult(true, tc.in), tc.toRemove)
act, ws, err := ToQueryResult(filtered, 1e6)
assert.NoError(t, err)
assert.Equal(t, 0, len(ws))
assert.Equal(t, tc.expected, act)
require.NoError(t, err)
require.Equal(t, 0, len(ws))
require.Equal(t, tc.expected, act)
}
}
@ -491,21 +491,21 @@ func TestSampleAndChunkQueryableClient(t *testing.T) {
tc.callback,
)
q, err := c.Querier(context.TODO(), tc.mint, tc.maxt)
assert.NoError(t, err)
defer assert.NoError(t, q.Close())
require.NoError(t, err)
defer require.NoError(t, q.Close())
ss := q.Select(true, nil, tc.matchers...)
assert.NoError(t, err)
assert.Equal(t, storage.Warnings(nil), ss.Warnings())
require.NoError(t, err)
require.Equal(t, storage.Warnings(nil), ss.Warnings())
assert.Equal(t, tc.expectedQuery, m.got)
require.Equal(t, tc.expectedQuery, m.got)
var got []labels.Labels
for ss.Next() {
got = append(got, ss.At().Labels())
}
assert.NoError(t, ss.Err())
assert.Equal(t, tc.expectedSeries, got)
require.NoError(t, ss.Err())
require.Equal(t, tc.expectedSeries, got)
})
}

View file

@ -20,14 +20,14 @@ import (
"testing"
common_config "github.com/prometheus/common/config"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/prometheus/prometheus/config"
)
func TestStorageLifecycle(t *testing.T) {
dir, err := ioutil.TempDir("", "TestStorageLifecycle")
assert.NoError(t, err)
require.NoError(t, err)
defer os.RemoveAll(dir)
s := NewStorage(nil, nil, nil, dir, defaultFlushDeadline)
@ -52,21 +52,21 @@ func TestStorageLifecycle(t *testing.T) {
},
}
assert.NoError(t, s.ApplyConfig(conf))
require.NoError(t, s.ApplyConfig(conf))
// make sure remote write has a queue.
assert.Equal(t, 1, len(s.rws.queues))
require.Equal(t, 1, len(s.rws.queues))
// make sure remote write has a queue.
assert.Equal(t, 1, len(s.queryables))
require.Equal(t, 1, len(s.queryables))
err = s.Close()
assert.NoError(t, err)
require.NoError(t, err)
}
func TestUpdateRemoteReadConfigs(t *testing.T) {
dir, err := ioutil.TempDir("", "TestUpdateRemoteReadConfigs")
assert.NoError(t, err)
require.NoError(t, err)
defer os.RemoveAll(dir)
s := NewStorage(nil, nil, nil, dir, defaultFlushDeadline)
@ -74,15 +74,15 @@ func TestUpdateRemoteReadConfigs(t *testing.T) {
conf := &config.Config{
GlobalConfig: config.GlobalConfig{},
}
assert.NoError(t, s.ApplyConfig(conf))
assert.Equal(t, 0, len(s.queryables))
require.NoError(t, s.ApplyConfig(conf))
require.Equal(t, 0, len(s.queryables))
conf.RemoteReadConfigs = []*config.RemoteReadConfig{
&config.DefaultRemoteReadConfig,
}
assert.NoError(t, s.ApplyConfig(conf))
assert.Equal(t, 1, len(s.queryables))
require.NoError(t, s.ApplyConfig(conf))
require.Equal(t, 1, len(s.queryables))
err = s.Close()
assert.NoError(t, err)
require.NoError(t, err)
}

View file

@ -23,7 +23,7 @@ import (
"github.com/prometheus/client_golang/prometheus"
common_config "github.com/prometheus/common/config"
"github.com/prometheus/common/model"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/prometheus/prometheus/config"
"github.com/prometheus/prometheus/pkg/labels"
@ -43,9 +43,9 @@ var cfg = config.RemoteWriteConfig{
func TestNoDuplicateWriteConfigs(t *testing.T) {
dir, err := ioutil.TempDir("", "TestNoDuplicateWriteConfigs")
assert.NoError(t, err)
require.NoError(t, err)
defer func() {
assert.NoError(t, os.RemoveAll(dir))
require.NoError(t, os.RemoveAll(dir))
}()
cfg1 := config.RemoteWriteConfig{
@ -122,22 +122,22 @@ func TestNoDuplicateWriteConfigs(t *testing.T) {
}
err := s.ApplyConfig(conf)
gotError := err != nil
assert.Equal(t, tc.err, gotError)
require.Equal(t, tc.err, gotError)
err = s.Close()
assert.NoError(t, err)
require.NoError(t, err)
}
}
func TestRestartOnNameChange(t *testing.T) {
dir, err := ioutil.TempDir("", "TestRestartOnNameChange")
assert.NoError(t, err)
require.NoError(t, err)
defer func() {
assert.NoError(t, os.RemoveAll(dir))
require.NoError(t, os.RemoveAll(dir))
}()
hash, err := toHash(cfg)
assert.NoError(t, err)
require.NoError(t, err)
s := NewWriteStorage(nil, nil, dir, time.Millisecond)
conf := &config.Config{
@ -146,25 +146,25 @@ func TestRestartOnNameChange(t *testing.T) {
&cfg,
},
}
assert.NoError(t, s.ApplyConfig(conf))
assert.Equal(t, s.queues[hash].client().Name(), cfg.Name)
require.NoError(t, s.ApplyConfig(conf))
require.Equal(t, s.queues[hash].client().Name(), cfg.Name)
// Change the queues name, ensure the queue has been restarted.
conf.RemoteWriteConfigs[0].Name = "dev-2"
assert.NoError(t, s.ApplyConfig(conf))
require.NoError(t, s.ApplyConfig(conf))
hash, err = toHash(cfg)
assert.NoError(t, err)
assert.Equal(t, s.queues[hash].client().Name(), conf.RemoteWriteConfigs[0].Name)
require.NoError(t, err)
require.Equal(t, s.queues[hash].client().Name(), conf.RemoteWriteConfigs[0].Name)
err = s.Close()
assert.NoError(t, err)
require.NoError(t, err)
}
func TestUpdateWithRegisterer(t *testing.T) {
dir, err := ioutil.TempDir("", "TestRestartWithRegisterer")
assert.NoError(t, err)
require.NoError(t, err)
defer func() {
assert.NoError(t, os.RemoveAll(dir))
require.NoError(t, os.RemoveAll(dir))
}()
s := NewWriteStorage(nil, prometheus.NewRegistry(), dir, time.Millisecond)
@ -191,24 +191,24 @@ func TestUpdateWithRegisterer(t *testing.T) {
GlobalConfig: config.DefaultGlobalConfig,
RemoteWriteConfigs: []*config.RemoteWriteConfig{c1, c2},
}
assert.NoError(t, s.ApplyConfig(conf))
require.NoError(t, s.ApplyConfig(conf))
c1.QueueConfig.MaxShards = 10
c2.QueueConfig.MaxShards = 10
assert.NoError(t, s.ApplyConfig(conf))
require.NoError(t, s.ApplyConfig(conf))
for _, queue := range s.queues {
assert.Equal(t, 10, queue.cfg.MaxShards)
require.Equal(t, 10, queue.cfg.MaxShards)
}
err = s.Close()
assert.NoError(t, err)
require.NoError(t, err)
}
func TestWriteStorageLifecycle(t *testing.T) {
dir, err := ioutil.TempDir("", "TestWriteStorageLifecycle")
assert.NoError(t, err)
require.NoError(t, err)
defer func() {
assert.NoError(t, os.RemoveAll(dir))
require.NoError(t, os.RemoveAll(dir))
}()
s := NewWriteStorage(nil, nil, dir, defaultFlushDeadline)
@ -219,17 +219,17 @@ func TestWriteStorageLifecycle(t *testing.T) {
},
}
s.ApplyConfig(conf)
assert.Equal(t, 1, len(s.queues))
require.Equal(t, 1, len(s.queues))
err = s.Close()
assert.NoError(t, err)
require.NoError(t, err)
}
func TestUpdateExternalLabels(t *testing.T) {
dir, err := ioutil.TempDir("", "TestUpdateExternalLabels")
assert.NoError(t, err)
require.NoError(t, err)
defer func() {
assert.NoError(t, os.RemoveAll(dir))
require.NoError(t, os.RemoveAll(dir))
}()
s := NewWriteStorage(nil, prometheus.NewRegistry(), dir, time.Second)
@ -242,27 +242,27 @@ func TestUpdateExternalLabels(t *testing.T) {
},
}
hash, err := toHash(conf.RemoteWriteConfigs[0])
assert.NoError(t, err)
require.NoError(t, err)
s.ApplyConfig(conf)
assert.Equal(t, 1, len(s.queues))
assert.Equal(t, labels.Labels(nil), s.queues[hash].externalLabels)
require.Equal(t, 1, len(s.queues))
require.Equal(t, labels.Labels(nil), s.queues[hash].externalLabels)
conf.GlobalConfig.ExternalLabels = externalLabels
hash, err = toHash(conf.RemoteWriteConfigs[0])
assert.NoError(t, err)
require.NoError(t, err)
s.ApplyConfig(conf)
assert.Equal(t, 1, len(s.queues))
assert.Equal(t, externalLabels, s.queues[hash].externalLabels)
require.Equal(t, 1, len(s.queues))
require.Equal(t, externalLabels, s.queues[hash].externalLabels)
err = s.Close()
assert.NoError(t, err)
require.NoError(t, err)
}
func TestWriteStorageApplyConfigsIdempotent(t *testing.T) {
dir, err := ioutil.TempDir("", "TestWriteStorageApplyConfigsIdempotent")
assert.NoError(t, err)
require.NoError(t, err)
defer func() {
assert.NoError(t, os.RemoveAll(dir))
require.NoError(t, os.RemoveAll(dir))
}()
s := NewWriteStorage(nil, nil, dir, defaultFlushDeadline)
@ -280,25 +280,25 @@ func TestWriteStorageApplyConfigsIdempotent(t *testing.T) {
},
}
hash, err := toHash(conf.RemoteWriteConfigs[0])
assert.NoError(t, err)
require.NoError(t, err)
s.ApplyConfig(conf)
assert.Equal(t, 1, len(s.queues))
require.Equal(t, 1, len(s.queues))
s.ApplyConfig(conf)
assert.Equal(t, 1, len(s.queues))
require.Equal(t, 1, len(s.queues))
_, hashExists := s.queues[hash]
assert.True(t, hashExists, "Queue pointer should have remained the same")
require.True(t, hashExists, "Queue pointer should have remained the same")
err = s.Close()
assert.NoError(t, err)
require.NoError(t, err)
}
func TestWriteStorageApplyConfigsPartialUpdate(t *testing.T) {
dir, err := ioutil.TempDir("", "TestWriteStorageApplyConfigsPartialUpdate")
assert.NoError(t, err)
require.NoError(t, err)
defer func() {
assert.NoError(t, os.RemoveAll(dir))
require.NoError(t, os.RemoveAll(dir))
}()
s := NewWriteStorage(nil, nil, dir, defaultFlushDeadline)
@ -336,15 +336,15 @@ func TestWriteStorageApplyConfigsPartialUpdate(t *testing.T) {
},
}
}
assert.NoError(t, s.ApplyConfig(conf))
assert.Equal(t, 3, len(s.queues))
require.NoError(t, s.ApplyConfig(conf))
require.Equal(t, 3, len(s.queues))
hashes := make([]string, len(conf.RemoteWriteConfigs))
queues := make([]*QueueManager, len(conf.RemoteWriteConfigs))
storeHashes := func() {
for i := range conf.RemoteWriteConfigs {
hash, err := toHash(conf.RemoteWriteConfigs[i])
assert.NoError(t, err)
require.NoError(t, err)
hashes[i] = hash
queues[i] = s.queues[hash]
}
@ -358,32 +358,32 @@ func TestWriteStorageApplyConfigsPartialUpdate(t *testing.T) {
GlobalConfig: config.GlobalConfig{},
RemoteWriteConfigs: []*config.RemoteWriteConfig{c0, c1, c2},
}
assert.NoError(t, s.ApplyConfig(conf))
assert.Equal(t, 3, len(s.queues))
require.NoError(t, s.ApplyConfig(conf))
require.Equal(t, 3, len(s.queues))
_, hashExists := s.queues[hashes[0]]
assert.False(t, hashExists, "The queue for the first remote write configuration should have been restarted because the relabel configuration has changed.")
require.False(t, hashExists, "The queue for the first remote write configuration should have been restarted because the relabel configuration has changed.")
q, hashExists := s.queues[hashes[1]]
assert.True(t, hashExists, "Hash of unchanged queue should have remained the same")
assert.Equal(t, q, queues[1], "Pointer of unchanged queue should have remained the same")
require.True(t, hashExists, "Hash of unchanged queue should have remained the same")
require.Equal(t, q, queues[1], "Pointer of unchanged queue should have remained the same")
_, hashExists = s.queues[hashes[2]]
assert.False(t, hashExists, "The queue for the third remote write configuration should have been restarted because the timeout has changed.")
require.False(t, hashExists, "The queue for the third remote write configuration should have been restarted because the timeout has changed.")
storeHashes()
secondClient := s.queues[hashes[1]].client()
// Update c1.
c1.HTTPClientConfig.BearerToken = "bar"
err = s.ApplyConfig(conf)
assert.NoError(t, err)
assert.Equal(t, 3, len(s.queues))
require.NoError(t, err)
require.Equal(t, 3, len(s.queues))
_, hashExists = s.queues[hashes[0]]
assert.True(t, hashExists, "Pointer of unchanged queue should have remained the same")
require.True(t, hashExists, "Pointer of unchanged queue should have remained the same")
q, hashExists = s.queues[hashes[1]]
assert.True(t, hashExists, "Hash of queue with secret change should have remained the same")
assert.NotEqual(t, secondClient, q.client(), "Pointer of a client with a secret change should not be the same")
require.True(t, hashExists, "Hash of queue with secret change should have remained the same")
require.NotEqual(t, secondClient, q.client(), "Pointer of a client with a secret change should not be the same")
_, hashExists = s.queues[hashes[2]]
assert.True(t, hashExists, "Pointer of unchanged queue should have remained the same")
require.True(t, hashExists, "Pointer of unchanged queue should have remained the same")
storeHashes()
// Delete c0.
@ -392,15 +392,15 @@ func TestWriteStorageApplyConfigsPartialUpdate(t *testing.T) {
RemoteWriteConfigs: []*config.RemoteWriteConfig{c1, c2},
}
s.ApplyConfig(conf)
assert.Equal(t, 2, len(s.queues))
require.Equal(t, 2, len(s.queues))
_, hashExists = s.queues[hashes[0]]
assert.False(t, hashExists, "If a config is removed, the queue should be stopped and recreated.")
require.False(t, hashExists, "If a config is removed, the queue should be stopped and recreated.")
_, hashExists = s.queues[hashes[1]]
assert.True(t, hashExists, "Pointer of unchanged queue should have remained the same")
require.True(t, hashExists, "Pointer of unchanged queue should have remained the same")
_, hashExists = s.queues[hashes[2]]
assert.True(t, hashExists, "Pointer of unchanged queue should have remained the same")
require.True(t, hashExists, "Pointer of unchanged queue should have remained the same")
err = s.Close()
assert.NoError(t, err)
require.NoError(t, err)
}

View file

@ -20,7 +20,7 @@ import (
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/prometheus/prometheus/pkg/labels"
"github.com/prometheus/prometheus/promql"
@ -284,14 +284,14 @@ func TestTemplateExpansion(t *testing.T) {
result, err = expander.Expand()
}
if s.shouldFail {
assert.Error(t, err, "%v", s.text)
require.Error(t, err, "%v", s.text)
continue
}
assert.NoError(t, err)
require.NoError(t, err)
if err == nil {
assert.Equal(t, result, s.output)
require.Equal(t, result, s.output)
}
}
}

View file

@ -26,7 +26,7 @@ import (
"testing"
"github.com/go-kit/kit/log"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/prometheus/prometheus/pkg/labels"
"github.com/prometheus/prometheus/storage"
@ -41,51 +41,51 @@ import (
// version 3 next time to avoid confusion and issues.
func TestBlockMetaMustNeverBeVersion2(t *testing.T) {
dir, err := ioutil.TempDir("", "metaversion")
assert.NoError(t, err)
require.NoError(t, err)
defer func() {
assert.NoError(t, os.RemoveAll(dir))
require.NoError(t, os.RemoveAll(dir))
}()
_, err = writeMetaFile(log.NewNopLogger(), dir, &BlockMeta{})
assert.NoError(t, err)
require.NoError(t, err)
meta, _, err := readMetaFile(dir)
assert.NoError(t, err)
assert.NotEqual(t, 2, meta.Version, "meta.json version must never be 2")
require.NoError(t, err)
require.NotEqual(t, 2, meta.Version, "meta.json version must never be 2")
}
func TestSetCompactionFailed(t *testing.T) {
tmpdir, err := ioutil.TempDir("", "test")
assert.NoError(t, err)
require.NoError(t, err)
defer func() {
assert.NoError(t, os.RemoveAll(tmpdir))
require.NoError(t, os.RemoveAll(tmpdir))
}()
blockDir := createBlock(t, tmpdir, genSeries(1, 1, 0, 1))
b, err := OpenBlock(nil, blockDir, nil)
assert.NoError(t, err)
assert.Equal(t, false, b.meta.Compaction.Failed)
assert.NoError(t, b.setCompactionFailed())
assert.Equal(t, true, b.meta.Compaction.Failed)
assert.NoError(t, b.Close())
require.NoError(t, err)
require.Equal(t, false, b.meta.Compaction.Failed)
require.NoError(t, b.setCompactionFailed())
require.Equal(t, true, b.meta.Compaction.Failed)
require.NoError(t, b.Close())
b, err = OpenBlock(nil, blockDir, nil)
assert.NoError(t, err)
assert.Equal(t, true, b.meta.Compaction.Failed)
assert.NoError(t, b.Close())
require.NoError(t, err)
require.Equal(t, true, b.meta.Compaction.Failed)
require.NoError(t, b.Close())
}
func TestCreateBlock(t *testing.T) {
tmpdir, err := ioutil.TempDir("", "test")
assert.NoError(t, err)
require.NoError(t, err)
defer func() {
assert.NoError(t, os.RemoveAll(tmpdir))
require.NoError(t, os.RemoveAll(tmpdir))
}()
b, err := OpenBlock(nil, createBlock(t, tmpdir, genSeries(1, 1, 0, 10)), nil)
if err == nil {
assert.NoError(t, b.Close())
require.NoError(t, b.Close())
}
assert.NoError(t, err)
require.NoError(t, err)
}
func TestCorruptedChunk(t *testing.T) {
@ -98,7 +98,7 @@ func TestCorruptedChunk(t *testing.T) {
{
name: "invalid header size",
corrFunc: func(f *os.File) {
assert.NoError(t, f.Truncate(1))
require.NoError(t, f.Truncate(1))
},
openErr: errors.New("invalid segment header in segment 0: invalid size"),
},
@ -107,14 +107,14 @@ func TestCorruptedChunk(t *testing.T) {
corrFunc: func(f *os.File) {
magicChunksOffset := int64(0)
_, err := f.Seek(magicChunksOffset, 0)
assert.NoError(t, err)
require.NoError(t, err)
// Set invalid magic number.
b := make([]byte, chunks.MagicChunksSize)
binary.BigEndian.PutUint32(b[:chunks.MagicChunksSize], 0x00000000)
n, err := f.Write(b)
assert.NoError(t, err)
assert.Equal(t, chunks.MagicChunksSize, n)
require.NoError(t, err)
require.Equal(t, chunks.MagicChunksSize, n)
},
openErr: errors.New("invalid magic number 0"),
},
@ -123,14 +123,14 @@ func TestCorruptedChunk(t *testing.T) {
corrFunc: func(f *os.File) {
chunksFormatVersionOffset := int64(4)
_, err := f.Seek(chunksFormatVersionOffset, 0)
assert.NoError(t, err)
require.NoError(t, err)
// Set invalid chunk format version.
b := make([]byte, chunks.ChunksFormatVersionSize)
b[0] = 0
n, err := f.Write(b)
assert.NoError(t, err)
assert.Equal(t, chunks.ChunksFormatVersionSize, n)
require.NoError(t, err)
require.Equal(t, chunks.ChunksFormatVersionSize, n)
},
openErr: errors.New("invalid chunk format version 0"),
},
@ -138,7 +138,7 @@ func TestCorruptedChunk(t *testing.T) {
name: "chunk not enough bytes to read the chunk length",
corrFunc: func(f *os.File) {
// Truncate one byte after the segment header.
assert.NoError(t, f.Truncate(chunks.SegmentHeaderSize+1))
require.NoError(t, f.Truncate(chunks.SegmentHeaderSize+1))
},
iterErr: errors.New("cannot populate chunk 8: segment doesn't include enough bytes to read the chunk size data field - required:13, available:9"),
},
@ -146,8 +146,8 @@ func TestCorruptedChunk(t *testing.T) {
name: "chunk not enough bytes to read the data",
corrFunc: func(f *os.File) {
fi, err := f.Stat()
assert.NoError(t, err)
assert.NoError(t, f.Truncate(fi.Size()-1))
require.NoError(t, err)
require.NoError(t, f.Truncate(fi.Size()-1))
},
iterErr: errors.New("cannot populate chunk 8: segment doesn't include enough bytes to read the chunk - required:26, available:25"),
},
@ -155,59 +155,59 @@ func TestCorruptedChunk(t *testing.T) {
name: "checksum mismatch",
corrFunc: func(f *os.File) {
fi, err := f.Stat()
assert.NoError(t, err)
require.NoError(t, err)
// Get the chunk data end offset.
chkEndOffset := int(fi.Size()) - crc32.Size
// Seek to the last byte of chunk data and modify it.
_, err = f.Seek(int64(chkEndOffset-1), 0)
assert.NoError(t, err)
require.NoError(t, err)
n, err := f.Write([]byte("x"))
assert.NoError(t, err)
assert.Equal(t, n, 1)
require.NoError(t, err)
require.Equal(t, n, 1)
},
iterErr: errors.New("cannot populate chunk 8: checksum mismatch expected:cfc0526c, actual:34815eae"),
},
} {
t.Run(tc.name, func(t *testing.T) {
tmpdir, err := ioutil.TempDir("", "test_open_block_chunk_corrupted")
assert.NoError(t, err)
require.NoError(t, err)
defer func() {
assert.NoError(t, os.RemoveAll(tmpdir))
require.NoError(t, os.RemoveAll(tmpdir))
}()
series := storage.NewListSeries(labels.FromStrings("a", "b"), []tsdbutil.Sample{sample{1, 1}})
blockDir := createBlock(t, tmpdir, []storage.Series{series})
files, err := sequenceFiles(chunkDir(blockDir))
assert.NoError(t, err)
assert.Greater(t, len(files), 0, "No chunk created.")
require.NoError(t, err)
require.Greater(t, len(files), 0, "No chunk created.")
f, err := os.OpenFile(files[0], os.O_RDWR, 0666)
assert.NoError(t, err)
require.NoError(t, err)
// Apply corruption function.
tc.corrFunc(f)
assert.NoError(t, f.Close())
require.NoError(t, f.Close())
// Check open err.
b, err := OpenBlock(nil, blockDir, nil)
if tc.openErr != nil {
assert.Equal(t, tc.openErr.Error(), err.Error())
require.Equal(t, tc.openErr.Error(), err.Error())
return
}
defer func() { assert.NoError(t, b.Close()) }()
defer func() { require.NoError(t, b.Close()) }()
querier, err := NewBlockQuerier(b, 0, 1)
assert.NoError(t, err)
defer func() { assert.NoError(t, querier.Close()) }()
require.NoError(t, err)
defer func() { require.NoError(t, querier.Close()) }()
set := querier.Select(false, nil, labels.MustNewMatcher(labels.MatchEqual, "a", "b"))
// Check chunk errors during iter time.
assert.True(t, set.Next())
require.True(t, set.Next())
it := set.At().Iterator()
assert.Equal(t, false, it.Next())
assert.Equal(t, tc.iterErr.Error(), it.Err().Error())
require.Equal(t, false, it.Next())
require.Equal(t, tc.iterErr.Error(), it.Err().Error())
})
}
}
@ -215,9 +215,9 @@ func TestCorruptedChunk(t *testing.T) {
// TestBlockSize ensures that the block size is calculated correctly.
func TestBlockSize(t *testing.T) {
tmpdir, err := ioutil.TempDir("", "test_blockSize")
assert.NoError(t, err)
require.NoError(t, err)
defer func() {
assert.NoError(t, os.RemoveAll(tmpdir))
require.NoError(t, os.RemoveAll(tmpdir))
}()
var (
@ -230,39 +230,39 @@ func TestBlockSize(t *testing.T) {
{
blockDirInit = createBlock(t, tmpdir, genSeries(10, 1, 1, 100))
blockInit, err = OpenBlock(nil, blockDirInit, nil)
assert.NoError(t, err)
require.NoError(t, err)
defer func() {
assert.NoError(t, blockInit.Close())
require.NoError(t, blockInit.Close())
}()
expSizeInit = blockInit.Size()
actSizeInit, err := fileutil.DirSize(blockInit.Dir())
assert.NoError(t, err)
assert.Equal(t, expSizeInit, actSizeInit)
require.NoError(t, err)
require.Equal(t, expSizeInit, actSizeInit)
}
// Delete some series and check the sizes again.
{
assert.NoError(t, blockInit.Delete(1, 10, labels.MustNewMatcher(labels.MatchRegexp, "", ".*")))
require.NoError(t, blockInit.Delete(1, 10, labels.MustNewMatcher(labels.MatchRegexp, "", ".*")))
expAfterDelete := blockInit.Size()
assert.Greater(t, expAfterDelete, expSizeInit, "after a delete the block size should be bigger as the tombstone file should grow %v > %v", expAfterDelete, expSizeInit)
require.Greater(t, expAfterDelete, expSizeInit, "after a delete the block size should be bigger as the tombstone file should grow %v > %v", expAfterDelete, expSizeInit)
actAfterDelete, err := fileutil.DirSize(blockDirInit)
assert.NoError(t, err)
assert.Equal(t, expAfterDelete, actAfterDelete, "after a delete reported block size doesn't match actual disk size")
require.NoError(t, err)
require.Equal(t, expAfterDelete, actAfterDelete, "after a delete reported block size doesn't match actual disk size")
c, err := NewLeveledCompactor(context.Background(), nil, log.NewNopLogger(), []int64{0}, nil)
assert.NoError(t, err)
require.NoError(t, err)
blockDirAfterCompact, err := c.Compact(tmpdir, []string{blockInit.Dir()}, nil)
assert.NoError(t, err)
require.NoError(t, err)
blockAfterCompact, err := OpenBlock(nil, filepath.Join(tmpdir, blockDirAfterCompact.String()), nil)
assert.NoError(t, err)
require.NoError(t, err)
defer func() {
assert.NoError(t, blockAfterCompact.Close())
require.NoError(t, blockAfterCompact.Close())
}()
expAfterCompact := blockAfterCompact.Size()
actAfterCompact, err := fileutil.DirSize(blockAfterCompact.Dir())
assert.NoError(t, err)
assert.Greater(t, actAfterDelete, actAfterCompact, "after a delete and compaction the block size should be smaller %v,%v", actAfterDelete, actAfterCompact)
assert.Equal(t, expAfterCompact, actAfterCompact, "after a delete and compaction reported block size doesn't match actual disk size")
require.NoError(t, err)
require.Greater(t, actAfterDelete, actAfterCompact, "after a delete and compaction the block size should be smaller %v,%v", actAfterDelete, actAfterCompact)
require.Equal(t, expAfterCompact, actAfterCompact, "after a delete and compaction reported block size doesn't match actual disk size")
}
}
@ -285,16 +285,16 @@ func TestReadIndexFormatV1(t *testing.T) {
blockDir := filepath.Join("testdata", "index_format_v1")
block, err := OpenBlock(nil, blockDir, nil)
assert.NoError(t, err)
require.NoError(t, err)
q, err := NewBlockQuerier(block, 0, 1000)
assert.NoError(t, err)
assert.Equal(t, query(t, q, labels.MustNewMatcher(labels.MatchEqual, "foo", "bar")),
require.NoError(t, err)
require.Equal(t, query(t, q, labels.MustNewMatcher(labels.MatchEqual, "foo", "bar")),
map[string][]tsdbutil.Sample{`{foo="bar"}`: {sample{t: 1, v: 2}}})
q, err = NewBlockQuerier(block, 0, 1000)
assert.NoError(t, err)
assert.Equal(t, query(t, q, labels.MustNewMatcher(labels.MatchNotRegexp, "foo", "^.?$")),
require.NoError(t, err)
require.Equal(t, query(t, q, labels.MustNewMatcher(labels.MatchNotRegexp, "foo", "^.?$")),
map[string][]tsdbutil.Sample{
`{foo="bar"}`: {sample{t: 1, v: 2}},
`{foo="baz"}`: {sample{t: 3, v: 4}},
@ -304,26 +304,26 @@ func TestReadIndexFormatV1(t *testing.T) {
// createBlock creates a block with given set of series and returns its dir.
func createBlock(tb testing.TB, dir string, series []storage.Series) string {
blockDir, err := CreateBlock(series, dir, 0, log.NewNopLogger())
assert.NoError(tb, err)
require.NoError(tb, err)
return blockDir
}
func createBlockFromHead(tb testing.TB, dir string, head *Head) string {
compactor, err := NewLeveledCompactor(context.Background(), nil, log.NewNopLogger(), []int64{1000000}, nil)
assert.NoError(tb, err)
require.NoError(tb, err)
assert.NoError(tb, os.MkdirAll(dir, 0777))
require.NoError(tb, os.MkdirAll(dir, 0777))
// Add +1 millisecond to block maxt because block intervals are half-open: [b.MinTime, b.MaxTime).
// Because of this block intervals are always +1 than the total samples it includes.
ulid, err := compactor.Write(dir, head, head.MinTime(), head.MaxTime()+1, nil)
assert.NoError(tb, err)
require.NoError(tb, err)
return filepath.Join(dir, ulid.String())
}
func createHead(tb testing.TB, w *wal.WAL, series []storage.Series, chunkDir string) *Head {
head, err := NewHead(nil, nil, w, DefaultBlockDuration, chunkDir, nil, DefaultStripeSize, nil)
assert.NoError(tb, err)
require.NoError(tb, err)
app := head.Appender(context.Background())
for _, s := range series {
@ -338,11 +338,11 @@ func createHead(tb testing.TB, w *wal.WAL, series []storage.Series, chunkDir str
}
}
ref, err = app.Add(s.Labels(), t, v)
assert.NoError(tb, err)
require.NoError(tb, err)
}
assert.NoError(tb, it.Err())
require.NoError(tb, it.Err())
}
assert.NoError(tb, app.Commit())
require.NoError(tb, app.Commit())
return head
}

View file

@ -22,7 +22,7 @@ import (
"testing"
"github.com/go-kit/kit/log"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/prometheus/prometheus/pkg/labels"
"github.com/prometheus/prometheus/tsdb/tsdbutil"
@ -31,37 +31,37 @@ import (
func TestBlockWriter(t *testing.T) {
ctx := context.Background()
outputDir, err := ioutil.TempDir(os.TempDir(), "output")
assert.NoError(t, err)
require.NoError(t, err)
w, err := NewBlockWriter(log.NewNopLogger(), outputDir, DefaultBlockDuration)
assert.NoError(t, err)
require.NoError(t, err)
// Flush with no series results in error.
_, err = w.Flush(ctx)
assert.EqualError(t, err, "no series appended, aborting")
require.EqualError(t, err, "no series appended, aborting")
// Add some series.
app := w.Appender(ctx)
ts1, v1 := int64(44), float64(7)
_, err = app.Add(labels.Labels{{Name: "a", Value: "b"}}, ts1, v1)
assert.NoError(t, err)
require.NoError(t, err)
ts2, v2 := int64(55), float64(12)
_, err = app.Add(labels.Labels{{Name: "c", Value: "d"}}, ts2, v2)
assert.NoError(t, err)
assert.NoError(t, app.Commit())
require.NoError(t, err)
require.NoError(t, app.Commit())
id, err := w.Flush(ctx)
assert.NoError(t, err)
require.NoError(t, err)
// Confirm the block has the correct data.
blockpath := filepath.Join(outputDir, id.String())
b, err := OpenBlock(nil, blockpath, nil)
assert.NoError(t, err)
require.NoError(t, err)
q, err := NewBlockQuerier(b, math.MinInt64, math.MaxInt64)
assert.NoError(t, err)
require.NoError(t, err)
series := query(t, q, labels.MustNewMatcher(labels.MatchRegexp, "", ".*"))
sample1 := []tsdbutil.Sample{sample{t: ts1, v: v1}}
sample2 := []tsdbutil.Sample{sample{t: ts2, v: v2}}
expectedSeries := map[string][]tsdbutil.Sample{"{a=\"b\"}": sample1, "{c=\"d\"}": sample2}
assert.Equal(t, expectedSeries, series)
require.Equal(t, expectedSeries, series)
assert.NoError(t, w.Close())
require.NoError(t, w.Close())
}

View file

@ -16,7 +16,7 @@ package chunkenc
import (
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestBstreamReader(t *testing.T) {
@ -39,23 +39,23 @@ func TestBstreamReader(t *testing.T) {
if err != nil {
v, err = r.readBit()
}
assert.NoError(t, err)
assert.Equal(t, bit, v)
require.NoError(t, err)
require.Equal(t, bit, v)
}
for nbits := uint8(1); nbits <= 64; nbits++ {
v, err := r.readBitsFast(nbits)
if err != nil {
v, err = r.readBits(nbits)
}
assert.NoError(t, err)
assert.Equal(t, uint64(nbits), v, "nbits=%d", nbits)
require.NoError(t, err)
require.Equal(t, uint64(nbits), v, "nbits=%d", nbits)
}
for v := 1; v < 10000; v += 123 {
actual, err := r.readBitsFast(29)
if err != nil {
actual, err = r.readBits(29)
}
assert.NoError(t, err)
assert.Equal(t, uint64(v), actual, "v=%d", v)
require.NoError(t, err)
require.Equal(t, uint64(v), actual, "v=%d", v)
}
}

View file

@ -19,7 +19,7 @@ import (
"math/rand"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
type pair struct {
@ -42,7 +42,7 @@ func TestChunk(t *testing.T) {
func testChunk(t *testing.T, c Chunk) {
app, err := c.Appender()
assert.NoError(t, err)
require.NoError(t, err)
var exp []pair
var (
@ -61,7 +61,7 @@ func testChunk(t *testing.T, c Chunk) {
// appending to a partially filled chunk.
if i%10 == 0 {
app, err = c.Appender()
assert.NoError(t, err)
require.NoError(t, err)
}
app.Append(ts, v)
@ -75,8 +75,8 @@ func testChunk(t *testing.T, c Chunk) {
ts, v := it1.At()
res1 = append(res1, pair{t: ts, v: v})
}
assert.NoError(t, it1.Err())
assert.Equal(t, exp, res1)
require.NoError(t, it1.Err())
require.Equal(t, exp, res1)
// 2. Expand second iterator while reusing first one.
it2 := c.Iterator(it1)
@ -85,18 +85,18 @@ func testChunk(t *testing.T, c Chunk) {
ts, v := it2.At()
res2 = append(res2, pair{t: ts, v: v})
}
assert.NoError(t, it2.Err())
assert.Equal(t, exp, res2)
require.NoError(t, it2.Err())
require.Equal(t, exp, res2)
// 3. Test iterator Seek.
mid := len(exp) / 2
it3 := c.Iterator(nil)
var res3 []pair
assert.Equal(t, true, it3.Seek(exp[mid].t))
require.Equal(t, true, it3.Seek(exp[mid].t))
// Below ones should not matter.
assert.Equal(t, true, it3.Seek(exp[mid].t))
assert.Equal(t, true, it3.Seek(exp[mid].t))
require.Equal(t, true, it3.Seek(exp[mid].t))
require.Equal(t, true, it3.Seek(exp[mid].t))
ts, v = it3.At()
res3 = append(res3, pair{t: ts, v: v})
@ -104,9 +104,9 @@ func testChunk(t *testing.T, c Chunk) {
ts, v := it3.At()
res3 = append(res3, pair{t: ts, v: v})
}
assert.NoError(t, it3.Err())
assert.Equal(t, exp[mid:], res3)
assert.Equal(t, false, it3.Seek(exp[len(exp)-1].t+1))
require.NoError(t, it3.Err())
require.Equal(t, exp[mid:], res3)
require.Equal(t, false, it3.Seek(exp[len(exp)-1].t+1))
}
func benchmarkIterator(b *testing.B, newChunk func() Chunk) {
@ -160,7 +160,7 @@ func benchmarkIterator(b *testing.B, newChunk func() Chunk) {
res = append(res, v)
}
if it.Err() != io.EOF {
assert.NoError(b, it.Err())
require.NoError(b, it.Err())
}
res = res[:0]
}

View file

@ -16,7 +16,7 @@ package chunks
import (
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestReaderWithInvalidBuffer(t *testing.T) {
@ -24,5 +24,5 @@ func TestReaderWithInvalidBuffer(t *testing.T) {
r := &Reader{bs: []ByteSlice{b}}
_, err := r.Chunk(0)
assert.Error(t, err)
require.Error(t, err)
}

View file

@ -22,7 +22,7 @@ import (
"strconv"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/prometheus/prometheus/tsdb/chunkenc"
)
@ -30,7 +30,7 @@ import (
func TestChunkDiskMapper_WriteChunk_Chunk_IterateChunks(t *testing.T) {
hrw := testChunkDiskMapper(t)
defer func() {
assert.NoError(t, hrw.Close())
require.NoError(t, hrw.Close())
}()
expectedBytes := []byte{}
@ -69,7 +69,7 @@ func TestChunkDiskMapper_WriteChunk_Chunk_IterateChunks(t *testing.T) {
// Calculating expected bytes written on disk for first file.
firstFileName = hrw.curFile.Name()
assert.Equal(t, chunkRef(1, nextChunkOffset), chkRef)
require.Equal(t, chunkRef(1, nextChunkOffset), chkRef)
bytesWritten := 0
chkCRC32.Reset()
@ -87,10 +87,10 @@ func TestChunkDiskMapper_WriteChunk_Chunk_IterateChunks(t *testing.T) {
expectedBytes = append(expectedBytes, buf[:bytesWritten]...)
_, err := chkCRC32.Write(buf[:bytesWritten])
assert.NoError(t, err)
require.NoError(t, err)
expectedBytes = append(expectedBytes, chunk.Bytes()...)
_, err = chkCRC32.Write(chunk.Bytes())
assert.NoError(t, err)
require.NoError(t, err)
expectedBytes = append(expectedBytes, chkCRC32.Sum(nil)...)
@ -104,58 +104,58 @@ func TestChunkDiskMapper_WriteChunk_Chunk_IterateChunks(t *testing.T) {
}
// Checking on-disk bytes for the first file.
assert.Equal(t, 3, len(hrw.mmappedChunkFiles), "expected 3 mmapped files, got %d", len(hrw.mmappedChunkFiles))
assert.Equal(t, len(hrw.mmappedChunkFiles), len(hrw.closers))
require.Equal(t, 3, len(hrw.mmappedChunkFiles), "expected 3 mmapped files, got %d", len(hrw.mmappedChunkFiles))
require.Equal(t, len(hrw.mmappedChunkFiles), len(hrw.closers))
actualBytes, err := ioutil.ReadFile(firstFileName)
assert.NoError(t, err)
require.NoError(t, err)
// Check header of the segment file.
assert.Equal(t, MagicHeadChunks, int(binary.BigEndian.Uint32(actualBytes[0:MagicChunksSize])))
assert.Equal(t, chunksFormatV1, int(actualBytes[MagicChunksSize]))
require.Equal(t, MagicHeadChunks, int(binary.BigEndian.Uint32(actualBytes[0:MagicChunksSize])))
require.Equal(t, chunksFormatV1, int(actualBytes[MagicChunksSize]))
// Remaining chunk data.
fileEnd := HeadChunkFileHeaderSize + len(expectedBytes)
assert.Equal(t, expectedBytes, actualBytes[HeadChunkFileHeaderSize:fileEnd])
require.Equal(t, expectedBytes, actualBytes[HeadChunkFileHeaderSize:fileEnd])
// Test for the next chunk header to be all 0s. That marks the end of the file.
for _, b := range actualBytes[fileEnd : fileEnd+MaxHeadChunkMetaSize] {
assert.Equal(t, byte(0), b)
require.Equal(t, byte(0), b)
}
// Testing reading of chunks.
for _, exp := range expectedData {
actChunk, err := hrw.Chunk(exp.chunkRef)
assert.NoError(t, err)
assert.Equal(t, exp.chunk.Bytes(), actChunk.Bytes())
require.NoError(t, err)
require.Equal(t, exp.chunk.Bytes(), actChunk.Bytes())
}
// Testing IterateAllChunks method.
dir := hrw.dir.Name()
assert.NoError(t, hrw.Close())
require.NoError(t, hrw.Close())
hrw, err = NewChunkDiskMapper(dir, chunkenc.NewPool())
assert.NoError(t, err)
require.NoError(t, err)
idx := 0
err = hrw.IterateAllChunks(func(seriesRef, chunkRef uint64, mint, maxt int64, numSamples uint16) error {
t.Helper()
expData := expectedData[idx]
assert.Equal(t, expData.seriesRef, seriesRef)
assert.Equal(t, expData.chunkRef, chunkRef)
assert.Equal(t, expData.maxt, maxt)
assert.Equal(t, expData.maxt, maxt)
assert.Equal(t, expData.numSamples, numSamples)
require.Equal(t, expData.seriesRef, seriesRef)
require.Equal(t, expData.chunkRef, chunkRef)
require.Equal(t, expData.maxt, maxt)
require.Equal(t, expData.maxt, maxt)
require.Equal(t, expData.numSamples, numSamples)
actChunk, err := hrw.Chunk(expData.chunkRef)
assert.NoError(t, err)
assert.Equal(t, expData.chunk.Bytes(), actChunk.Bytes())
require.NoError(t, err)
require.Equal(t, expData.chunk.Bytes(), actChunk.Bytes())
idx++
return nil
})
assert.NoError(t, err)
assert.Equal(t, len(expectedData), idx)
require.NoError(t, err)
require.Equal(t, len(expectedData), idx)
}
@ -167,7 +167,7 @@ func TestChunkDiskMapper_WriteChunk_Chunk_IterateChunks(t *testing.T) {
func TestChunkDiskMapper_Truncate(t *testing.T) {
hrw := testChunkDiskMapper(t)
defer func() {
assert.NoError(t, hrw.Close())
require.NoError(t, hrw.Close())
}()
timeRange := 0
@ -180,7 +180,7 @@ func TestChunkDiskMapper_Truncate(t *testing.T) {
// Write a chunks to set maxt for the segment.
_, err := hrw.WriteChunk(1, int64(mint), int64(maxt), randomChunk(t))
assert.NoError(t, err)
require.NoError(t, err)
timeRange += fileTimeStep
@ -191,20 +191,20 @@ func TestChunkDiskMapper_Truncate(t *testing.T) {
t.Helper()
files, err := ioutil.ReadDir(hrw.dir.Name())
assert.NoError(t, err)
assert.Equal(t, len(remainingFiles), len(files), "files on disk")
assert.Equal(t, len(remainingFiles), len(hrw.mmappedChunkFiles), "hrw.mmappedChunkFiles")
assert.Equal(t, len(remainingFiles), len(hrw.closers), "closers")
require.NoError(t, err)
require.Equal(t, len(remainingFiles), len(files), "files on disk")
require.Equal(t, len(remainingFiles), len(hrw.mmappedChunkFiles), "hrw.mmappedChunkFiles")
require.Equal(t, len(remainingFiles), len(hrw.closers), "closers")
for _, i := range remainingFiles {
_, ok := hrw.mmappedChunkFiles[i]
assert.Equal(t, true, ok)
require.Equal(t, true, ok)
}
}
// Create segments 1 to 7.
for i := 1; i <= 7; i++ {
assert.NoError(t, hrw.CutNewFile())
require.NoError(t, hrw.CutNewFile())
mint := int64(addChunk())
if i == 3 {
thirdFileMinT = mint
@ -215,20 +215,20 @@ func TestChunkDiskMapper_Truncate(t *testing.T) {
verifyFiles([]int{1, 2, 3, 4, 5, 6, 7})
// Truncating files.
assert.NoError(t, hrw.Truncate(thirdFileMinT))
require.NoError(t, hrw.Truncate(thirdFileMinT))
verifyFiles([]int{3, 4, 5, 6, 7, 8})
dir := hrw.dir.Name()
assert.NoError(t, hrw.Close())
require.NoError(t, hrw.Close())
// Restarted.
var err error
hrw, err = NewChunkDiskMapper(dir, chunkenc.NewPool())
assert.NoError(t, err)
require.NoError(t, err)
assert.False(t, hrw.fileMaxtSet)
assert.NoError(t, hrw.IterateAllChunks(func(_, _ uint64, _, _ int64, _ uint16) error { return nil }))
assert.True(t, hrw.fileMaxtSet)
require.False(t, hrw.fileMaxtSet)
require.NoError(t, hrw.IterateAllChunks(func(_, _ uint64, _, _ int64, _ uint16) error { return nil }))
require.True(t, hrw.fileMaxtSet)
verifyFiles([]int{3, 4, 5, 6, 7, 8})
// New file is created after restart even if last file was empty.
@ -236,16 +236,16 @@ func TestChunkDiskMapper_Truncate(t *testing.T) {
verifyFiles([]int{3, 4, 5, 6, 7, 8, 9})
// Truncating files after restart.
assert.NoError(t, hrw.Truncate(sixthFileMinT))
require.NoError(t, hrw.Truncate(sixthFileMinT))
verifyFiles([]int{6, 7, 8, 9, 10})
// As the last file was empty, this creates no new files.
assert.NoError(t, hrw.Truncate(sixthFileMinT+1))
require.NoError(t, hrw.Truncate(sixthFileMinT+1))
verifyFiles([]int{6, 7, 8, 9, 10})
addChunk()
// Truncating till current time should not delete the current active file.
assert.NoError(t, hrw.Truncate(int64(timeRange+(2*fileTimeStep))))
require.NoError(t, hrw.Truncate(int64(timeRange+(2*fileTimeStep))))
verifyFiles([]int{10, 11}) // One file is the previously active file and one currently created.
}
@ -256,7 +256,7 @@ func TestChunkDiskMapper_Truncate(t *testing.T) {
func TestChunkDiskMapper_Truncate_PreservesFileSequence(t *testing.T) {
hrw := testChunkDiskMapper(t)
defer func() {
assert.NoError(t, hrw.Close())
require.NoError(t, hrw.Close())
}()
timeRange := 0
@ -264,11 +264,11 @@ func TestChunkDiskMapper_Truncate_PreservesFileSequence(t *testing.T) {
step := 100
mint, maxt := timeRange+1, timeRange+step-1
_, err := hrw.WriteChunk(1, int64(mint), int64(maxt), randomChunk(t))
assert.NoError(t, err)
require.NoError(t, err)
timeRange += step
}
emptyFile := func() {
assert.NoError(t, hrw.CutNewFile())
require.NoError(t, hrw.CutNewFile())
}
nonEmptyFile := func() {
emptyFile()
@ -286,14 +286,14 @@ func TestChunkDiskMapper_Truncate_PreservesFileSequence(t *testing.T) {
t.Helper()
files, err := ioutil.ReadDir(hrw.dir.Name())
assert.NoError(t, err)
assert.Equal(t, len(remainingFiles), len(files), "files on disk")
assert.Equal(t, len(remainingFiles), len(hrw.mmappedChunkFiles), "hrw.mmappedChunkFiles")
assert.Equal(t, len(remainingFiles), len(hrw.closers), "closers")
require.NoError(t, err)
require.Equal(t, len(remainingFiles), len(files), "files on disk")
require.Equal(t, len(remainingFiles), len(hrw.mmappedChunkFiles), "hrw.mmappedChunkFiles")
require.Equal(t, len(remainingFiles), len(hrw.closers), "closers")
for _, i := range remainingFiles {
_, ok := hrw.mmappedChunkFiles[i]
assert.True(t, ok, "remaining file %d not in hrw.mmappedChunkFiles", i)
require.True(t, ok, "remaining file %d not in hrw.mmappedChunkFiles", i)
}
}
@ -302,22 +302,22 @@ func TestChunkDiskMapper_Truncate_PreservesFileSequence(t *testing.T) {
// Truncating files till 2. It should not delete anything after 3 (inclusive)
// though files 4 and 6 are empty.
file2Maxt := hrw.mmappedChunkFiles[2].maxt
assert.NoError(t, hrw.Truncate(file2Maxt+1))
require.NoError(t, hrw.Truncate(file2Maxt+1))
// As 6 was empty, it should not create another file.
verifyFiles([]int{3, 4, 5, 6})
addChunk()
// Truncate creates another file as 6 is not empty now.
assert.NoError(t, hrw.Truncate(file2Maxt+1))
require.NoError(t, hrw.Truncate(file2Maxt+1))
verifyFiles([]int{3, 4, 5, 6, 7})
dir := hrw.dir.Name()
assert.NoError(t, hrw.Close())
require.NoError(t, hrw.Close())
// Restarting checks for unsequential files.
var err error
hrw, err = NewChunkDiskMapper(dir, chunkenc.NewPool())
assert.NoError(t, err)
require.NoError(t, err)
verifyFiles([]int{3, 4, 5, 6, 7})
}
@ -326,33 +326,33 @@ func TestChunkDiskMapper_Truncate_PreservesFileSequence(t *testing.T) {
func TestHeadReadWriter_TruncateAfterFailedIterateChunks(t *testing.T) {
hrw := testChunkDiskMapper(t)
defer func() {
assert.NoError(t, hrw.Close())
require.NoError(t, hrw.Close())
}()
// Write a chunks to iterate on it later.
_, err := hrw.WriteChunk(1, 0, 1000, randomChunk(t))
assert.NoError(t, err)
require.NoError(t, err)
dir := hrw.dir.Name()
assert.NoError(t, hrw.Close())
require.NoError(t, hrw.Close())
// Restarting to recreate https://github.com/prometheus/prometheus/issues/7753.
hrw, err = NewChunkDiskMapper(dir, chunkenc.NewPool())
assert.NoError(t, err)
require.NoError(t, err)
// Forcefully failing IterateAllChunks.
assert.Error(t, hrw.IterateAllChunks(func(_, _ uint64, _, _ int64, _ uint16) error {
require.Error(t, hrw.IterateAllChunks(func(_, _ uint64, _, _ int64, _ uint16) error {
return errors.New("random error")
}))
// Truncation call should not return error after IterateAllChunks fails.
assert.NoError(t, hrw.Truncate(2000))
require.NoError(t, hrw.Truncate(2000))
}
func TestHeadReadWriter_ReadRepairOnEmptyLastFile(t *testing.T) {
hrw := testChunkDiskMapper(t)
defer func() {
assert.NoError(t, hrw.Close())
require.NoError(t, hrw.Close())
}()
timeRange := 0
@ -360,11 +360,11 @@ func TestHeadReadWriter_ReadRepairOnEmptyLastFile(t *testing.T) {
step := 100
mint, maxt := timeRange+1, timeRange+step-1
_, err := hrw.WriteChunk(1, int64(mint), int64(maxt), randomChunk(t))
assert.NoError(t, err)
require.NoError(t, err)
timeRange += step
}
nonEmptyFile := func() {
assert.NoError(t, hrw.CutNewFile())
require.NoError(t, hrw.CutNewFile())
addChunk()
}
@ -372,64 +372,64 @@ func TestHeadReadWriter_ReadRepairOnEmptyLastFile(t *testing.T) {
nonEmptyFile() // 2.
nonEmptyFile() // 3.
assert.Equal(t, 3, len(hrw.mmappedChunkFiles))
require.Equal(t, 3, len(hrw.mmappedChunkFiles))
lastFile := 0
for idx := range hrw.mmappedChunkFiles {
if idx > lastFile {
lastFile = idx
}
}
assert.Equal(t, 3, lastFile)
require.Equal(t, 3, lastFile)
dir := hrw.dir.Name()
assert.NoError(t, hrw.Close())
require.NoError(t, hrw.Close())
// Write an empty last file mimicking an abrupt shutdown on file creation.
emptyFileName := segmentFile(dir, lastFile+1)
f, err := os.OpenFile(emptyFileName, os.O_WRONLY|os.O_CREATE, 0666)
assert.NoError(t, err)
assert.NoError(t, f.Sync())
require.NoError(t, err)
require.NoError(t, f.Sync())
stat, err := f.Stat()
assert.NoError(t, err)
assert.Equal(t, int64(0), stat.Size())
assert.NoError(t, f.Close())
require.NoError(t, err)
require.Equal(t, int64(0), stat.Size())
require.NoError(t, f.Close())
// Open chunk disk mapper again, corrupt file should be removed.
hrw, err = NewChunkDiskMapper(dir, chunkenc.NewPool())
assert.NoError(t, err)
assert.False(t, hrw.fileMaxtSet)
assert.NoError(t, hrw.IterateAllChunks(func(_, _ uint64, _, _ int64, _ uint16) error { return nil }))
assert.True(t, hrw.fileMaxtSet)
require.NoError(t, err)
require.False(t, hrw.fileMaxtSet)
require.NoError(t, hrw.IterateAllChunks(func(_, _ uint64, _, _ int64, _ uint16) error { return nil }))
require.True(t, hrw.fileMaxtSet)
// Removed from memory.
assert.Equal(t, 3, len(hrw.mmappedChunkFiles))
require.Equal(t, 3, len(hrw.mmappedChunkFiles))
for idx := range hrw.mmappedChunkFiles {
assert.LessOrEqual(t, idx, lastFile, "file index is bigger than previous last file")
require.LessOrEqual(t, idx, lastFile, "file index is bigger than previous last file")
}
// Removed even from disk.
files, err := ioutil.ReadDir(dir)
assert.NoError(t, err)
assert.Equal(t, 3, len(files))
require.NoError(t, err)
require.Equal(t, 3, len(files))
for _, fi := range files {
seq, err := strconv.ParseUint(fi.Name(), 10, 64)
assert.NoError(t, err)
assert.LessOrEqual(t, seq, uint64(lastFile), "file index on disk is bigger than previous last file")
require.NoError(t, err)
require.LessOrEqual(t, seq, uint64(lastFile), "file index on disk is bigger than previous last file")
}
}
func testChunkDiskMapper(t *testing.T) *ChunkDiskMapper {
tmpdir, err := ioutil.TempDir("", "data")
assert.NoError(t, err)
require.NoError(t, err)
t.Cleanup(func() {
assert.NoError(t, os.RemoveAll(tmpdir))
require.NoError(t, os.RemoveAll(tmpdir))
})
hrw, err := NewChunkDiskMapper(tmpdir, chunkenc.NewPool())
assert.NoError(t, err)
assert.False(t, hrw.fileMaxtSet)
assert.NoError(t, hrw.IterateAllChunks(func(_, _ uint64, _, _ int64, _ uint16) error { return nil }))
assert.True(t, hrw.fileMaxtSet)
require.NoError(t, err)
require.False(t, hrw.fileMaxtSet)
require.NoError(t, hrw.IterateAllChunks(func(_, _ uint64, _, _ int64, _ uint16) error { return nil }))
require.True(t, hrw.fileMaxtSet)
return hrw
}
@ -437,7 +437,7 @@ func randomChunk(t *testing.T) chunkenc.Chunk {
chunk := chunkenc.NewXORChunk()
len := rand.Int() % 120
app, err := chunk.Appender()
assert.NoError(t, err)
require.NoError(t, err)
for i := 0; i < len; i++ {
app.Append(rand.Int63(), rand.Float64())
}
@ -451,6 +451,6 @@ func createChunk(t *testing.T, idx int, hrw *ChunkDiskMapper) (seriesRef uint64,
maxt = int64((idx + 1) * 1000)
chunk = randomChunk(t)
chunkRef, err = hrw.WriteChunk(seriesRef, mint, maxt, chunk)
assert.NoError(t, err)
require.NoError(t, err)
return
}

View file

@ -27,7 +27,7 @@ import (
"github.com/go-kit/kit/log"
"github.com/pkg/errors"
prom_testutil "github.com/prometheus/client_golang/prometheus/testutil"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/prometheus/prometheus/pkg/labels"
"github.com/prometheus/prometheus/tsdb/chunkenc"
@ -135,7 +135,7 @@ func TestSplitByRange(t *testing.T) {
}
}
assert.Equal(t, exp, splitByRange(blocks, c.trange))
require.Equal(t, exp, splitByRange(blocks, c.trange))
}
}
@ -159,7 +159,7 @@ func TestNoPanicFor0Tombstones(t *testing.T) {
}
c, err := NewLeveledCompactor(context.Background(), nil, nil, []int64{50}, nil)
assert.NoError(t, err)
require.NoError(t, err)
c.plan(metas)
}
@ -173,7 +173,7 @@ func TestLeveledCompactor_plan(t *testing.T) {
540,
1620,
}, nil)
assert.NoError(t, err)
require.NoError(t, err)
cases := map[string]struct {
metas []dirMeta
@ -366,8 +366,8 @@ func TestLeveledCompactor_plan(t *testing.T) {
for title, c := range cases {
if !t.Run(title, func(t *testing.T) {
res, err := compactor.plan(c.metas)
assert.NoError(t, err)
assert.Equal(t, c.expected, res)
require.NoError(t, err)
require.Equal(t, c.expected, res)
}) {
return
}
@ -382,7 +382,7 @@ func TestRangeWithFailedCompactionWontGetSelected(t *testing.T) {
720,
2160,
}, nil)
assert.NoError(t, err)
require.NoError(t, err)
cases := []struct {
metas []dirMeta
@ -418,9 +418,9 @@ func TestRangeWithFailedCompactionWontGetSelected(t *testing.T) {
for _, c := range cases {
c.metas[1].meta.Compaction.Failed = true
res, err := compactor.plan(c.metas)
assert.NoError(t, err)
require.NoError(t, err)
assert.Equal(t, []string(nil), res)
require.Equal(t, []string(nil), res)
}
}
@ -432,17 +432,17 @@ func TestCompactionFailWillCleanUpTempDir(t *testing.T) {
720,
2160,
}, nil)
assert.NoError(t, err)
require.NoError(t, err)
tmpdir, err := ioutil.TempDir("", "test")
assert.NoError(t, err)
require.NoError(t, err)
defer func() {
assert.NoError(t, os.RemoveAll(tmpdir))
require.NoError(t, os.RemoveAll(tmpdir))
}()
assert.Error(t, compactor.write(tmpdir, &BlockMeta{}, erringBReader{}))
require.Error(t, compactor.write(tmpdir, &BlockMeta{}, erringBReader{}))
_, err = os.Stat(filepath.Join(tmpdir, BlockMeta{}.ULID.String()) + tmpForCreationBlockDirSuffix)
assert.True(t, os.IsNotExist(err), "directory is not cleaned up")
require.True(t, os.IsNotExist(err), "directory is not cleaned up")
}
func metaRange(name string, mint, maxt int64, stats *BlockStats) dirMeta {
@ -941,7 +941,7 @@ func TestCompaction_populateBlock(t *testing.T) {
}
c, err := NewLeveledCompactor(context.Background(), nil, nil, []int64{0}, nil)
assert.NoError(t, err)
require.NoError(t, err)
meta := &BlockMeta{
MinTime: tc.compactMinTime,
@ -954,11 +954,11 @@ func TestCompaction_populateBlock(t *testing.T) {
iw := &mockIndexWriter{}
err = c.populateBlock(blocks, meta, iw, nopChunkWriter{})
if tc.expErr != nil {
assert.Error(t, err)
assert.Equal(t, tc.expErr.Error(), err.Error())
require.Error(t, err)
require.Equal(t, tc.expErr.Error(), err.Error())
return
}
assert.NoError(t, err)
require.NoError(t, err)
// Check if response is expected and chunk is valid.
var raw []seriesSamples
@ -981,15 +981,15 @@ func TestCompaction_populateBlock(t *testing.T) {
}
// Check if chunk has correct min, max times.
assert.Equal(t, firstTs, chk.MinTime, "chunk Meta %v does not match the first encoded sample timestamp: %v", chk, firstTs)
assert.Equal(t, s.t, chk.MaxTime, "chunk Meta %v does not match the last encoded sample timestamp %v", chk, s.t)
require.Equal(t, firstTs, chk.MinTime, "chunk Meta %v does not match the first encoded sample timestamp: %v", chk, firstTs)
require.Equal(t, s.t, chk.MaxTime, "chunk Meta %v does not match the last encoded sample timestamp %v", chk, s.t)
assert.NoError(t, iter.Err())
require.NoError(t, iter.Err())
ss.chunks = append(ss.chunks, samples)
}
raw = append(raw, ss)
}
assert.Equal(t, tc.expSeriesSamples, raw)
require.Equal(t, tc.expSeriesSamples, raw)
// Check if stats are calculated properly.
s := BlockStats{NumSeries: uint64(len(tc.expSeriesSamples))}
@ -999,7 +999,7 @@ func TestCompaction_populateBlock(t *testing.T) {
s.NumSamples += uint64(len(chk))
}
}
assert.Equal(t, s, meta.Stats)
require.Equal(t, s, meta.Stats)
})
}
}
@ -1049,30 +1049,30 @@ func BenchmarkCompaction(b *testing.B) {
nBlocks := len(c.ranges)
b.Run(fmt.Sprintf("type=%s,blocks=%d,series=%d,samplesPerSeriesPerBlock=%d", c.compactionType, nBlocks, nSeries, c.ranges[0][1]-c.ranges[0][0]+1), func(b *testing.B) {
dir, err := ioutil.TempDir("", "bench_compaction")
assert.NoError(b, err)
require.NoError(b, err)
defer func() {
assert.NoError(b, os.RemoveAll(dir))
require.NoError(b, os.RemoveAll(dir))
}()
blockDirs := make([]string, 0, len(c.ranges))
var blocks []*Block
for _, r := range c.ranges {
block, err := OpenBlock(nil, createBlock(b, dir, genSeries(nSeries, 10, r[0], r[1])), nil)
assert.NoError(b, err)
require.NoError(b, err)
blocks = append(blocks, block)
defer func() {
assert.NoError(b, block.Close())
require.NoError(b, block.Close())
}()
blockDirs = append(blockDirs, block.Dir())
}
c, err := NewLeveledCompactor(context.Background(), nil, log.NewNopLogger(), []int64{0}, nil)
assert.NoError(b, err)
require.NoError(b, err)
b.ResetTimer()
b.ReportAllocs()
for i := 0; i < b.N; i++ {
_, err = c.Compact(dir, blockDirs, blocks)
assert.NoError(b, err)
require.NoError(b, err)
}
})
}
@ -1080,27 +1080,27 @@ func BenchmarkCompaction(b *testing.B) {
func BenchmarkCompactionFromHead(b *testing.B) {
dir, err := ioutil.TempDir("", "bench_compaction_from_head")
assert.NoError(b, err)
require.NoError(b, err)
defer func() {
assert.NoError(b, os.RemoveAll(dir))
require.NoError(b, os.RemoveAll(dir))
}()
totalSeries := 100000
for labelNames := 1; labelNames < totalSeries; labelNames *= 10 {
labelValues := totalSeries / labelNames
b.Run(fmt.Sprintf("labelnames=%d,labelvalues=%d", labelNames, labelValues), func(b *testing.B) {
chunkDir, err := ioutil.TempDir("", "chunk_dir")
assert.NoError(b, err)
require.NoError(b, err)
defer func() {
assert.NoError(b, os.RemoveAll(chunkDir))
require.NoError(b, os.RemoveAll(chunkDir))
}()
h, err := NewHead(nil, nil, nil, 1000, chunkDir, nil, DefaultStripeSize, nil)
assert.NoError(b, err)
require.NoError(b, err)
for ln := 0; ln < labelNames; ln++ {
app := h.Appender(context.Background())
for lv := 0; lv < labelValues; lv++ {
app.Add(labels.FromStrings(fmt.Sprintf("%d", ln), fmt.Sprintf("%d%s%d", lv, postingsBenchSuffix, ln)), 0, 0)
}
assert.NoError(b, app.Commit())
require.NoError(b, app.Commit())
}
b.ResetTimer()
@ -1120,7 +1120,7 @@ func BenchmarkCompactionFromHead(b *testing.B) {
func TestDisableAutoCompactions(t *testing.T) {
db := openTestDB(t, nil, nil)
defer func() {
assert.NoError(t, db.Close())
require.NoError(t, db.Close())
}()
blockRange := db.compactor.(*LeveledCompactor).ranges[0]
@ -1132,11 +1132,11 @@ func TestDisableAutoCompactions(t *testing.T) {
app := db.Appender(context.Background())
for i := int64(0); i < 3; i++ {
_, err := app.Add(label, i*blockRange, 0)
assert.NoError(t, err)
require.NoError(t, err)
_, err = app.Add(label, i*blockRange+1000, 0)
assert.NoError(t, err)
require.NoError(t, err)
}
assert.NoError(t, app.Commit())
require.NoError(t, app.Commit())
select {
case db.compactc <- struct{}{}:
@ -1150,8 +1150,8 @@ func TestDisableAutoCompactions(t *testing.T) {
time.Sleep(10 * time.Millisecond)
}
assert.Greater(t, prom_testutil.ToFloat64(db.metrics.compactionsSkipped), 0.0, "No compaction was skipped after the set timeout.")
assert.Equal(t, 0, len(db.blocks))
require.Greater(t, prom_testutil.ToFloat64(db.metrics.compactionsSkipped), 0.0, "No compaction was skipped after the set timeout.")
require.Equal(t, 0, len(db.blocks))
// Enable the compaction, trigger it and check that the block is persisted.
db.EnableCompactions()
@ -1165,16 +1165,16 @@ func TestDisableAutoCompactions(t *testing.T) {
}
time.Sleep(100 * time.Millisecond)
}
assert.Greater(t, len(db.Blocks()), 0, "No block was persisted after the set timeout.")
require.Greater(t, len(db.Blocks()), 0, "No block was persisted after the set timeout.")
}
// TestCancelCompactions ensures that when the db is closed
// any running compaction is cancelled to unblock closing the db.
func TestCancelCompactions(t *testing.T) {
tmpdir, err := ioutil.TempDir("", "testCancelCompaction")
assert.NoError(t, err)
require.NoError(t, err)
defer func() {
assert.NoError(t, os.RemoveAll(tmpdir))
require.NoError(t, os.RemoveAll(tmpdir))
}()
// Create some blocks to fall within the compaction range.
@ -1185,18 +1185,18 @@ func TestCancelCompactions(t *testing.T) {
// Copy the db so we have an exact copy to compare compaction times.
tmpdirCopy := tmpdir + "Copy"
err = fileutil.CopyDirs(tmpdir, tmpdirCopy)
assert.NoError(t, err)
require.NoError(t, err)
defer func() {
assert.NoError(t, os.RemoveAll(tmpdirCopy))
require.NoError(t, os.RemoveAll(tmpdirCopy))
}()
// Measure the compaction time without interrupting it.
var timeCompactionUninterrupted time.Duration
{
db, err := open(tmpdir, log.NewNopLogger(), nil, DefaultOptions(), []int64{1, 2000})
assert.NoError(t, err)
assert.Equal(t, 3, len(db.Blocks()), "initial block count mismatch")
assert.Equal(t, 0.0, prom_testutil.ToFloat64(db.compactor.(*LeveledCompactor).metrics.ran), "initial compaction counter mismatch")
require.NoError(t, err)
require.Equal(t, 3, len(db.Blocks()), "initial block count mismatch")
require.Equal(t, 0.0, prom_testutil.ToFloat64(db.compactor.(*LeveledCompactor).metrics.ran), "initial compaction counter mismatch")
db.compactc <- struct{}{} // Trigger a compaction.
var start time.Time
for prom_testutil.ToFloat64(db.compactor.(*LeveledCompactor).metrics.populatingBlocks) <= 0 {
@ -1209,14 +1209,14 @@ func TestCancelCompactions(t *testing.T) {
}
timeCompactionUninterrupted = time.Since(start)
assert.NoError(t, db.Close())
require.NoError(t, db.Close())
}
// Measure the compaction time when closing the db in the middle of compaction.
{
db, err := open(tmpdirCopy, log.NewNopLogger(), nil, DefaultOptions(), []int64{1, 2000})
assert.NoError(t, err)
assert.Equal(t, 3, len(db.Blocks()), "initial block count mismatch")
assert.Equal(t, 0.0, prom_testutil.ToFloat64(db.compactor.(*LeveledCompactor).metrics.ran), "initial compaction counter mismatch")
require.NoError(t, err)
require.Equal(t, 3, len(db.Blocks()), "initial block count mismatch")
require.Equal(t, 0.0, prom_testutil.ToFloat64(db.compactor.(*LeveledCompactor).metrics.ran), "initial compaction counter mismatch")
db.compactc <- struct{}{} // Trigger a compaction.
dbClosed := make(chan struct{})
@ -1224,7 +1224,7 @@ func TestCancelCompactions(t *testing.T) {
time.Sleep(3 * time.Millisecond)
}
go func() {
assert.NoError(t, db.Close())
require.NoError(t, db.Close())
close(dbClosed)
}()
@ -1232,7 +1232,7 @@ func TestCancelCompactions(t *testing.T) {
<-dbClosed
actT := time.Since(start)
expT := time.Duration(timeCompactionUninterrupted / 2) // Closing the db in the middle of compaction should less than half the time.
assert.True(t, actT < expT, "closing the db took more than expected. exp: <%v, act: %v", expT, actT)
require.True(t, actT < expT, "closing the db took more than expected. exp: <%v, act: %v", expT, actT)
}
}
@ -1247,12 +1247,12 @@ func TestDeleteCompactionBlockAfterFailedReload(t *testing.T) {
// Add some data to the head that is enough to trigger a compaction.
app := db.Appender(context.Background())
_, err := app.Add(defaultLabel, 1, 0)
assert.NoError(t, err)
require.NoError(t, err)
_, err = app.Add(defaultLabel, 2, 0)
assert.NoError(t, err)
require.NoError(t, err)
_, err = app.Add(defaultLabel, 3+rangeToTriggerCompaction, 0)
assert.NoError(t, err)
assert.NoError(t, app.Commit())
require.NoError(t, err)
require.NoError(t, app.Commit())
return 0
},
@ -1265,8 +1265,8 @@ func TestDeleteCompactionBlockAfterFailedReload(t *testing.T) {
for _, m := range blocks {
createBlock(t, db.Dir(), genSeries(1, 1, m.MinTime, m.MaxTime))
}
assert.NoError(t, db.reload())
assert.Equal(t, len(blocks), len(db.Blocks()), "unexpected block count after a reloadBlocks")
require.NoError(t, db.reload())
require.Equal(t, len(blocks), len(db.Blocks()), "unexpected block count after a reloadBlocks")
return len(blocks)
},
@ -1276,7 +1276,7 @@ func TestDeleteCompactionBlockAfterFailedReload(t *testing.T) {
t.Run(title, func(t *testing.T) {
db := openTestDB(t, nil, []int64{1, 100})
defer func() {
assert.NoError(t, db.Close())
require.NoError(t, db.Close())
}()
db.DisableCompactions()
@ -1286,25 +1286,25 @@ func TestDeleteCompactionBlockAfterFailedReload(t *testing.T) {
blockPath := createBlock(t, db.Dir(), genSeries(1, 1, 200, 300))
lastBlockIndex := path.Join(blockPath, indexFilename)
actBlocks, err := blockDirs(db.Dir())
assert.NoError(t, err)
assert.Equal(t, expBlocks, len(actBlocks)-1) // -1 to exclude the corrupted block.
assert.NoError(t, os.RemoveAll(lastBlockIndex)) // Corrupt the block by removing the index file.
require.NoError(t, err)
require.Equal(t, expBlocks, len(actBlocks)-1) // -1 to exclude the corrupted block.
require.NoError(t, os.RemoveAll(lastBlockIndex)) // Corrupt the block by removing the index file.
assert.Equal(t, 0.0, prom_testutil.ToFloat64(db.metrics.reloadsFailed), "initial 'failed db reloadBlocks' count metrics mismatch")
assert.Equal(t, 0.0, prom_testutil.ToFloat64(db.compactor.(*LeveledCompactor).metrics.ran), "initial `compactions` count metric mismatch")
assert.Equal(t, 0.0, prom_testutil.ToFloat64(db.metrics.compactionsFailed), "initial `compactions failed` count metric mismatch")
require.Equal(t, 0.0, prom_testutil.ToFloat64(db.metrics.reloadsFailed), "initial 'failed db reloadBlocks' count metrics mismatch")
require.Equal(t, 0.0, prom_testutil.ToFloat64(db.compactor.(*LeveledCompactor).metrics.ran), "initial `compactions` count metric mismatch")
require.Equal(t, 0.0, prom_testutil.ToFloat64(db.metrics.compactionsFailed), "initial `compactions failed` count metric mismatch")
// Do the compaction and check the metrics.
// Compaction should succeed, but the reloadBlocks should fail and
// the new block created from the compaction should be deleted.
assert.Error(t, db.Compact())
assert.Equal(t, 1.0, prom_testutil.ToFloat64(db.metrics.reloadsFailed), "'failed db reloadBlocks' count metrics mismatch")
assert.Equal(t, 1.0, prom_testutil.ToFloat64(db.compactor.(*LeveledCompactor).metrics.ran), "`compaction` count metric mismatch")
assert.Equal(t, 1.0, prom_testutil.ToFloat64(db.metrics.compactionsFailed), "`compactions failed` count metric mismatch")
require.Error(t, db.Compact())
require.Equal(t, 1.0, prom_testutil.ToFloat64(db.metrics.reloadsFailed), "'failed db reloadBlocks' count metrics mismatch")
require.Equal(t, 1.0, prom_testutil.ToFloat64(db.compactor.(*LeveledCompactor).metrics.ran), "`compaction` count metric mismatch")
require.Equal(t, 1.0, prom_testutil.ToFloat64(db.metrics.compactionsFailed), "`compactions failed` count metric mismatch")
actBlocks, err = blockDirs(db.Dir())
assert.NoError(t, err)
assert.Equal(t, expBlocks, len(actBlocks)-1, "block count should be the same as before the compaction") // -1 to exclude the corrupted block.
require.NoError(t, err)
require.Equal(t, expBlocks, len(actBlocks)-1, "block count should be the same as before the compaction") // -1 to exclude the corrupted block.
})
}
}

File diff suppressed because it is too large Load diff

View file

@ -19,7 +19,7 @@ import (
"strconv"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"go.uber.org/atomic"
"github.com/prometheus/prometheus/pkg/labels"
@ -27,13 +27,13 @@ import (
func BenchmarkHeadStripeSeriesCreate(b *testing.B) {
chunkDir, err := ioutil.TempDir("", "chunk_dir")
assert.NoError(b, err)
require.NoError(b, err)
defer func() {
assert.NoError(b, os.RemoveAll(chunkDir))
require.NoError(b, os.RemoveAll(chunkDir))
}()
// Put a series, select it. GC it and then access it.
h, err := NewHead(nil, nil, nil, 1000, chunkDir, nil, DefaultStripeSize, nil)
assert.NoError(b, err)
require.NoError(b, err)
defer h.Close()
for i := 0; i < b.N; i++ {
@ -43,13 +43,13 @@ func BenchmarkHeadStripeSeriesCreate(b *testing.B) {
func BenchmarkHeadStripeSeriesCreateParallel(b *testing.B) {
chunkDir, err := ioutil.TempDir("", "chunk_dir")
assert.NoError(b, err)
require.NoError(b, err)
defer func() {
assert.NoError(b, os.RemoveAll(chunkDir))
require.NoError(b, os.RemoveAll(chunkDir))
}()
// Put a series, select it. GC it and then access it.
h, err := NewHead(nil, nil, nil, 1000, chunkDir, nil, DefaultStripeSize, nil)
assert.NoError(b, err)
require.NoError(b, err)
defer h.Close()
var count atomic.Int64

File diff suppressed because it is too large Load diff

View file

@ -25,7 +25,7 @@ import (
"testing"
"github.com/pkg/errors"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"go.uber.org/goleak"
"github.com/prometheus/prometheus/pkg/labels"
@ -137,44 +137,44 @@ func (m mockIndex) Series(ref uint64, lset *labels.Labels, chks *[]chunks.Meta)
func TestIndexRW_Create_Open(t *testing.T) {
dir, err := ioutil.TempDir("", "test_index_create")
assert.NoError(t, err)
require.NoError(t, err)
defer func() {
assert.NoError(t, os.RemoveAll(dir))
require.NoError(t, os.RemoveAll(dir))
}()
fn := filepath.Join(dir, indexFilename)
// An empty index must still result in a readable file.
iw, err := NewWriter(context.Background(), fn)
assert.NoError(t, err)
assert.NoError(t, iw.Close())
require.NoError(t, err)
require.NoError(t, iw.Close())
ir, err := NewFileReader(fn)
assert.NoError(t, err)
assert.NoError(t, ir.Close())
require.NoError(t, err)
require.NoError(t, ir.Close())
// Modify magic header must cause open to fail.
f, err := os.OpenFile(fn, os.O_WRONLY, 0666)
assert.NoError(t, err)
require.NoError(t, err)
_, err = f.WriteAt([]byte{0, 0}, 0)
assert.NoError(t, err)
require.NoError(t, err)
f.Close()
_, err = NewFileReader(dir)
assert.Error(t, err)
require.Error(t, err)
}
func TestIndexRW_Postings(t *testing.T) {
dir, err := ioutil.TempDir("", "test_index_postings")
assert.NoError(t, err)
require.NoError(t, err)
defer func() {
assert.NoError(t, os.RemoveAll(dir))
require.NoError(t, os.RemoveAll(dir))
}()
fn := filepath.Join(dir, indexFilename)
iw, err := NewWriter(context.Background(), fn)
assert.NoError(t, err)
require.NoError(t, err)
series := []labels.Labels{
labels.FromStrings("a", "1", "b", "1"),
@ -183,27 +183,27 @@ func TestIndexRW_Postings(t *testing.T) {
labels.FromStrings("a", "1", "b", "4"),
}
assert.NoError(t, iw.AddSymbol("1"))
assert.NoError(t, iw.AddSymbol("2"))
assert.NoError(t, iw.AddSymbol("3"))
assert.NoError(t, iw.AddSymbol("4"))
assert.NoError(t, iw.AddSymbol("a"))
assert.NoError(t, iw.AddSymbol("b"))
require.NoError(t, iw.AddSymbol("1"))
require.NoError(t, iw.AddSymbol("2"))
require.NoError(t, iw.AddSymbol("3"))
require.NoError(t, iw.AddSymbol("4"))
require.NoError(t, iw.AddSymbol("a"))
require.NoError(t, iw.AddSymbol("b"))
// Postings lists are only written if a series with the respective
// reference was added before.
assert.NoError(t, iw.AddSeries(1, series[0]))
assert.NoError(t, iw.AddSeries(2, series[1]))
assert.NoError(t, iw.AddSeries(3, series[2]))
assert.NoError(t, iw.AddSeries(4, series[3]))
require.NoError(t, iw.AddSeries(1, series[0]))
require.NoError(t, iw.AddSeries(2, series[1]))
require.NoError(t, iw.AddSeries(3, series[2]))
require.NoError(t, iw.AddSeries(4, series[3]))
assert.NoError(t, iw.Close())
require.NoError(t, iw.Close())
ir, err := NewFileReader(fn)
assert.NoError(t, err)
require.NoError(t, err)
p, err := ir.Postings("a", "1")
assert.NoError(t, err)
require.NoError(t, err)
var l labels.Labels
var c []chunks.Meta
@ -211,15 +211,15 @@ func TestIndexRW_Postings(t *testing.T) {
for i := 0; p.Next(); i++ {
err := ir.Series(p.At(), &l, &c)
assert.NoError(t, err)
assert.Equal(t, 0, len(c))
assert.Equal(t, series[i], l)
require.NoError(t, err)
require.Equal(t, 0, len(c))
require.Equal(t, series[i], l)
}
assert.NoError(t, p.Err())
require.NoError(t, p.Err())
// The label incides are no longer used, so test them by hand here.
labelIndices := map[string][]string{}
assert.NoError(t, ReadOffsetTable(ir.b, ir.toc.LabelIndicesTable, func(key []string, off uint64, _ int) error {
require.NoError(t, ReadOffsetTable(ir.b, ir.toc.LabelIndicesTable, func(key []string, off uint64, _ int) error {
if len(key) != 1 {
return errors.Errorf("unexpected key length for label indices table %d", len(key))
}
@ -240,25 +240,25 @@ func TestIndexRW_Postings(t *testing.T) {
labelIndices[key[0]] = vals
return d.Err()
}))
assert.Equal(t, map[string][]string{
require.Equal(t, map[string][]string{
"a": {"1"},
"b": {"1", "2", "3", "4"},
}, labelIndices)
assert.NoError(t, ir.Close())
require.NoError(t, ir.Close())
}
func TestPostingsMany(t *testing.T) {
dir, err := ioutil.TempDir("", "test_postings_many")
assert.NoError(t, err)
require.NoError(t, err)
defer func() {
assert.NoError(t, os.RemoveAll(dir))
require.NoError(t, os.RemoveAll(dir))
}()
fn := filepath.Join(dir, indexFilename)
iw, err := NewWriter(context.Background(), fn)
assert.NoError(t, err)
require.NoError(t, err)
// Create a label in the index which has 999 values.
symbols := map[string]struct{}{}
@ -277,17 +277,17 @@ func TestPostingsMany(t *testing.T) {
}
sort.Strings(syms)
for _, s := range syms {
assert.NoError(t, iw.AddSymbol(s))
require.NoError(t, iw.AddSymbol(s))
}
for i, s := range series {
assert.NoError(t, iw.AddSeries(uint64(i), s))
require.NoError(t, iw.AddSeries(uint64(i), s))
}
assert.NoError(t, iw.Close())
require.NoError(t, iw.Close())
ir, err := NewFileReader(fn)
assert.NoError(t, err)
defer func() { assert.NoError(t, ir.Close()) }()
require.NoError(t, err)
defer func() { require.NoError(t, ir.Close()) }()
cases := []struct {
in []string
@ -322,36 +322,36 @@ func TestPostingsMany(t *testing.T) {
for _, c := range cases {
it, err := ir.Postings("i", c.in...)
assert.NoError(t, err)
require.NoError(t, err)
got := []string{}
var lbls labels.Labels
var metas []chunks.Meta
for it.Next() {
assert.NoError(t, ir.Series(it.At(), &lbls, &metas))
require.NoError(t, ir.Series(it.At(), &lbls, &metas))
got = append(got, lbls.Get("i"))
}
assert.NoError(t, it.Err())
require.NoError(t, it.Err())
exp := []string{}
for _, e := range c.in {
if _, ok := symbols[e]; ok && e != "l" {
exp = append(exp, e)
}
}
assert.Equal(t, exp, got, fmt.Sprintf("input: %v", c.in))
require.Equal(t, exp, got, fmt.Sprintf("input: %v", c.in))
}
}
func TestPersistence_index_e2e(t *testing.T) {
dir, err := ioutil.TempDir("", "test_persistence_e2e")
assert.NoError(t, err)
require.NoError(t, err)
defer func() {
assert.NoError(t, os.RemoveAll(dir))
require.NoError(t, os.RemoveAll(dir))
}()
lbls, err := labels.ReadLabels(filepath.Join("..", "testdata", "20kseries.json"), 20000)
assert.NoError(t, err)
require.NoError(t, err)
// Sort labels as the index writer expects series in sorted order.
sort.Sort(labels.Slice(lbls))
@ -385,7 +385,7 @@ func TestPersistence_index_e2e(t *testing.T) {
}
iw, err := NewWriter(context.Background(), filepath.Join(dir, indexFilename))
assert.NoError(t, err)
require.NoError(t, err)
syms := []string{}
for s := range symbols {
@ -393,7 +393,7 @@ func TestPersistence_index_e2e(t *testing.T) {
}
sort.Strings(syms)
for _, s := range syms {
assert.NoError(t, iw.AddSymbol(s))
require.NoError(t, iw.AddSymbol(s))
}
// Population procedure as done by compaction.
@ -406,8 +406,8 @@ func TestPersistence_index_e2e(t *testing.T) {
for i, s := range input {
err = iw.AddSeries(uint64(i), s.labels, s.chunks...)
assert.NoError(t, err)
assert.NoError(t, mi.AddSeries(uint64(i), s.labels, s.chunks...))
require.NoError(t, err)
require.NoError(t, mi.AddSeries(uint64(i), s.labels, s.chunks...))
for _, l := range s.labels {
valset, ok := values[l.Name]
@ -421,36 +421,36 @@ func TestPersistence_index_e2e(t *testing.T) {
}
err = iw.Close()
assert.NoError(t, err)
require.NoError(t, err)
ir, err := NewFileReader(filepath.Join(dir, indexFilename))
assert.NoError(t, err)
require.NoError(t, err)
for p := range mi.postings {
gotp, err := ir.Postings(p.Name, p.Value)
assert.NoError(t, err)
require.NoError(t, err)
expp, err := mi.Postings(p.Name, p.Value)
assert.NoError(t, err)
require.NoError(t, err)
var lset, explset labels.Labels
var chks, expchks []chunks.Meta
for gotp.Next() {
assert.True(t, expp.Next())
require.True(t, expp.Next())
ref := gotp.At()
err := ir.Series(ref, &lset, &chks)
assert.NoError(t, err)
require.NoError(t, err)
err = mi.Series(expp.At(), &explset, &expchks)
assert.NoError(t, err)
assert.Equal(t, explset, lset)
assert.Equal(t, expchks, chks)
require.NoError(t, err)
require.Equal(t, explset, lset)
require.Equal(t, expchks, chks)
}
assert.False(t, expp.Next(), "Expected no more postings for %q=%q", p.Name, p.Value)
assert.NoError(t, gotp.Err())
require.False(t, expp.Next(), "Expected no more postings for %q=%q", p.Name, p.Value)
require.NoError(t, gotp.Err())
}
labelPairs := map[string][]string{}
@ -461,11 +461,11 @@ func TestPersistence_index_e2e(t *testing.T) {
sort.Strings(v)
res, err := ir.SortedLabelValues(k)
assert.NoError(t, err)
require.NoError(t, err)
assert.Equal(t, len(v), len(res))
require.Equal(t, len(v), len(res))
for i := 0; i < len(v); i++ {
assert.Equal(t, v[i], res[i])
require.Equal(t, v[i], res[i])
}
}
@ -474,29 +474,29 @@ func TestPersistence_index_e2e(t *testing.T) {
for it.Next() {
gotSymbols = append(gotSymbols, it.At())
}
assert.NoError(t, it.Err())
require.NoError(t, it.Err())
expSymbols := []string{}
for s := range mi.symbols {
expSymbols = append(expSymbols, s)
}
sort.Strings(expSymbols)
assert.Equal(t, expSymbols, gotSymbols)
require.Equal(t, expSymbols, gotSymbols)
assert.NoError(t, ir.Close())
require.NoError(t, ir.Close())
}
func TestDecbufUvarintWithInvalidBuffer(t *testing.T) {
b := realByteSlice([]byte{0x81, 0x81, 0x81, 0x81, 0x81, 0x81})
db := encoding.NewDecbufUvarintAt(b, 0, castagnoliTable)
assert.Error(t, db.Err())
require.Error(t, db.Err())
}
func TestReaderWithInvalidBuffer(t *testing.T) {
b := realByteSlice([]byte{0x81, 0x81, 0x81, 0x81, 0x81, 0x81})
_, err := NewReader(b)
assert.Error(t, err)
require.Error(t, err)
}
// TestNewFileReaderErrorNoOpenFiles ensures that in case of an error no file remains open.
@ -505,10 +505,10 @@ func TestNewFileReaderErrorNoOpenFiles(t *testing.T) {
idxName := filepath.Join(dir.Path(), "index")
err := ioutil.WriteFile(idxName, []byte("corrupted contents"), 0666)
assert.NoError(t, err)
require.NoError(t, err)
_, err = NewFileReader(idxName)
assert.Error(t, err)
require.Error(t, err)
// dir.Close will fail on Win if idxName fd is not closed on error path.
dir.Close()
@ -531,32 +531,32 @@ func TestSymbols(t *testing.T) {
buf.PutBE32(checksum) // Check sum at the end.
s, err := NewSymbols(realByteSlice(buf.Get()), FormatV2, symbolsStart)
assert.NoError(t, err)
require.NoError(t, err)
// We store only 4 offsets to symbols.
assert.Equal(t, 32, s.Size())
require.Equal(t, 32, s.Size())
for i := 99; i >= 0; i-- {
s, err := s.Lookup(uint32(i))
assert.NoError(t, err)
assert.Equal(t, string(rune(i)), s)
require.NoError(t, err)
require.Equal(t, string(rune(i)), s)
}
_, err = s.Lookup(100)
assert.Error(t, err)
require.Error(t, err)
for i := 99; i >= 0; i-- {
r, err := s.ReverseLookup(string(rune(i)))
assert.NoError(t, err)
assert.Equal(t, uint32(i), r)
require.NoError(t, err)
require.Equal(t, uint32(i), r)
}
_, err = s.ReverseLookup(string(rune(100)))
assert.Error(t, err)
require.Error(t, err)
iter := s.Iter()
i := 0
for iter.Next() {
assert.Equal(t, string(rune(i)), iter.At())
require.Equal(t, string(rune(i)), iter.At())
i++
}
assert.NoError(t, iter.Err())
require.NoError(t, iter.Err())
}

View file

@ -20,7 +20,7 @@ import (
"sort"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/prometheus/prometheus/pkg/labels"
)
@ -32,7 +32,7 @@ func TestMemPostings_addFor(t *testing.T) {
p.addFor(5, allPostingsKey)
assert.Equal(t, []uint64{1, 2, 3, 4, 5, 6, 7, 8}, p.m[allPostingsKey.Name][allPostingsKey.Value])
require.Equal(t, []uint64{1, 2, 3, 4, 5, 6, 7, 8}, p.m[allPostingsKey.Name][allPostingsKey.Value])
}
func TestMemPostings_ensureOrder(t *testing.T) {
@ -161,12 +161,12 @@ func TestIntersect(t *testing.T) {
}
expected, err := ExpandPostings(c.res)
assert.NoError(t, err)
require.NoError(t, err)
i := Intersect(c.in...)
if c.res == EmptyPostings() {
assert.Equal(t, EmptyPostings(), i)
require.Equal(t, EmptyPostings(), i)
return
}
@ -175,8 +175,8 @@ func TestIntersect(t *testing.T) {
}
res, err := ExpandPostings(i)
assert.NoError(t, err)
assert.Equal(t, expected, res)
require.NoError(t, err)
require.Equal(t, expected, res)
})
}
}
@ -217,8 +217,8 @@ func TestMultiIntersect(t *testing.T) {
res, err := ExpandPostings(Intersect(ps...))
assert.NoError(t, err)
assert.Equal(t, c.res, res)
require.NoError(t, err)
require.Equal(t, c.res, res)
}
}
@ -315,8 +315,8 @@ func TestMultiMerge(t *testing.T) {
i3 := newListPostings(1, 2, 5, 6, 7, 8, 1001, 1200)
res, err := ExpandPostings(Merge(i1, i2, i3))
assert.NoError(t, err)
assert.Equal(t, []uint64{1, 2, 3, 4, 5, 6, 7, 8, 999, 1000, 1001, 1200}, res)
require.NoError(t, err)
require.Equal(t, []uint64{1, 2, 3, 4, 5, 6, 7, 8, 999, 1000, 1001, 1200}, res)
}
func TestMergedPostings(t *testing.T) {
@ -403,12 +403,12 @@ func TestMergedPostings(t *testing.T) {
}
expected, err := ExpandPostings(c.res)
assert.NoError(t, err)
require.NoError(t, err)
m := Merge(c.in...)
if c.res == EmptyPostings() {
assert.Equal(t, EmptyPostings(), m)
require.Equal(t, EmptyPostings(), m)
return
}
@ -417,8 +417,8 @@ func TestMergedPostings(t *testing.T) {
}
res, err := ExpandPostings(m)
assert.NoError(t, err)
assert.Equal(t, expected, res)
require.NoError(t, err)
require.Equal(t, expected, res)
})
}
}
@ -471,16 +471,16 @@ func TestMergedPostingsSeek(t *testing.T) {
p := Merge(a, b)
assert.Equal(t, c.success, p.Seek(c.seek))
require.Equal(t, c.success, p.Seek(c.seek))
// After Seek(), At() should be called.
if c.success {
start := p.At()
lst, err := ExpandPostings(p)
assert.NoError(t, err)
require.NoError(t, err)
lst = append([]uint64{start}, lst...)
assert.Equal(t, c.res, lst)
require.Equal(t, c.res, lst)
}
}
}
@ -532,8 +532,8 @@ func TestRemovedPostings(t *testing.T) {
b := newListPostings(c.b...)
res, err := ExpandPostings(newRemovedPostings(a, b))
assert.NoError(t, err)
assert.Equal(t, c.res, res)
require.NoError(t, err)
require.Equal(t, c.res, res)
}
}
@ -556,8 +556,8 @@ func TestRemovedNextStackoverflow(t *testing.T) {
gotElem = true
}
assert.NoError(t, rp.Err())
assert.False(t, gotElem)
require.NoError(t, rp.Err())
require.False(t, gotElem)
}
func TestRemovedPostingsSeek(t *testing.T) {
@ -632,16 +632,16 @@ func TestRemovedPostingsSeek(t *testing.T) {
p := newRemovedPostings(a, b)
assert.Equal(t, c.success, p.Seek(c.seek))
require.Equal(t, c.success, p.Seek(c.seek))
// After Seek(), At() should be called.
if c.success {
start := p.At()
lst, err := ExpandPostings(p)
assert.NoError(t, err)
require.NoError(t, err)
lst = append([]uint64{start}, lst...)
assert.Equal(t, c.res, lst)
require.Equal(t, c.res, lst)
}
}
}
@ -664,12 +664,12 @@ func TestBigEndian(t *testing.T) {
t.Run("Iteration", func(t *testing.T) {
bep := newBigEndianPostings(beLst)
for i := 0; i < num; i++ {
assert.True(t, bep.Next())
assert.Equal(t, uint64(ls[i]), bep.At())
require.True(t, bep.Next())
require.Equal(t, uint64(ls[i]), bep.At())
}
assert.False(t, bep.Next())
assert.NoError(t, bep.Err())
require.False(t, bep.Next())
require.NoError(t, bep.Err())
})
t.Run("Seek", func(t *testing.T) {
@ -713,9 +713,9 @@ func TestBigEndian(t *testing.T) {
bep := newBigEndianPostings(beLst)
for _, v := range table {
assert.Equal(t, v.found, bep.Seek(uint64(v.seek)))
assert.Equal(t, uint64(v.val), bep.At())
assert.NoError(t, bep.Err())
require.Equal(t, v.found, bep.Seek(uint64(v.seek)))
require.Equal(t, uint64(v.val), bep.At())
require.NoError(t, bep.Err())
}
})
}
@ -733,8 +733,8 @@ func TestIntersectWithMerge(t *testing.T) {
p := Intersect(a, b)
res, err := ExpandPostings(p)
assert.NoError(t, err)
assert.Equal(t, []uint64{30}, res)
require.NoError(t, err)
require.Equal(t, []uint64{30}, res)
}
func TestWithoutPostings(t *testing.T) {
@ -795,12 +795,12 @@ func TestWithoutPostings(t *testing.T) {
}
expected, err := ExpandPostings(c.res)
assert.NoError(t, err)
require.NoError(t, err)
w := Without(c.base, c.drop)
if c.res == EmptyPostings() {
assert.Equal(t, EmptyPostings(), w)
require.Equal(t, EmptyPostings(), w)
return
}
@ -809,8 +809,8 @@ func TestWithoutPostings(t *testing.T) {
}
res, err := ExpandPostings(w)
assert.NoError(t, err)
assert.Equal(t, expected, res)
require.NoError(t, err)
require.Equal(t, expected, res)
})
}
}
@ -860,17 +860,17 @@ func TestMemPostings_Delete(t *testing.T) {
// Make sure postings gotten before the delete have the old data when
// iterated over.
expanded, err := ExpandPostings(before)
assert.NoError(t, err)
assert.Equal(t, []uint64{1, 2, 3}, expanded)
require.NoError(t, err)
require.Equal(t, []uint64{1, 2, 3}, expanded)
// Make sure postings gotten after the delete have the new data when
// iterated over.
expanded, err = ExpandPostings(after)
assert.NoError(t, err)
assert.Equal(t, []uint64{1, 3}, expanded)
require.NoError(t, err)
require.Equal(t, []uint64{1, 3}, expanded)
deleted := p.Get("lbl1", "b")
expanded, err = ExpandPostings(deleted)
assert.NoError(t, err)
assert.Equal(t, 0, len(expanded), "expected empty postings, got %v", expanded)
require.NoError(t, err)
require.Equal(t, 0, len(expanded), "expected empty postings, got %v", expanded)
}

View file

@ -15,7 +15,7 @@ package index
import (
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestPostingsStats(t *testing.T) {
@ -33,9 +33,9 @@ func TestPostingsStats(t *testing.T) {
stats.push(Stat{Name: "Stuff", Count: 3000000})
data := stats.get()
assert.Equal(t, 10, len(data))
require.Equal(t, 10, len(data))
for i := 0; i < heapLength; i++ {
assert.Equal(t, uint64(max-i), data[i].Count)
require.Equal(t, uint64(max-i), data[i].Count)
}
}
@ -52,8 +52,8 @@ func TestPostingsStats2(t *testing.T) {
data := stats.get()
assert.Equal(t, 4, len(data))
assert.Equal(t, uint64(11), data[0].Count)
require.Equal(t, 4, len(data))
require.Equal(t, uint64(11), data[0].Count)
}
func BenchmarkPostingStatsMaxHep(b *testing.B) {
stats := &maxHeap{}

View file

@ -21,7 +21,7 @@ import (
"strconv"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/prometheus/prometheus/pkg/labels"
)
@ -33,14 +33,14 @@ const (
func BenchmarkPostingsForMatchers(b *testing.B) {
chunkDir, err := ioutil.TempDir("", "chunk_dir")
assert.NoError(b, err)
require.NoError(b, err)
defer func() {
assert.NoError(b, os.RemoveAll(chunkDir))
require.NoError(b, os.RemoveAll(chunkDir))
}()
h, err := NewHead(nil, nil, nil, 1000, chunkDir, nil, DefaultStripeSize, nil)
assert.NoError(b, err)
require.NoError(b, err)
defer func() {
assert.NoError(b, h.Close())
require.NoError(b, h.Close())
}()
app := h.Appender(context.Background())
@ -58,28 +58,28 @@ func BenchmarkPostingsForMatchers(b *testing.B) {
addSeries(labels.FromStrings("i", strconv.Itoa(i)+postingsBenchSuffix, "n", "2_"+strconv.Itoa(n)+postingsBenchSuffix, "j", "foo"))
}
}
assert.NoError(b, app.Commit())
require.NoError(b, app.Commit())
ir, err := h.Index()
assert.NoError(b, err)
require.NoError(b, err)
b.Run("Head", func(b *testing.B) {
benchmarkPostingsForMatchers(b, ir)
})
tmpdir, err := ioutil.TempDir("", "test_benchpostingsformatchers")
assert.NoError(b, err)
require.NoError(b, err)
defer func() {
assert.NoError(b, os.RemoveAll(tmpdir))
require.NoError(b, os.RemoveAll(tmpdir))
}()
blockdir := createBlockFromHead(b, tmpdir, h)
block, err := OpenBlock(nil, blockdir, nil)
assert.NoError(b, err)
require.NoError(b, err)
defer func() {
assert.NoError(b, block.Close())
require.NoError(b, block.Close())
}()
ir, err = block.Index()
assert.NoError(b, err)
require.NoError(b, err)
defer ir.Close()
b.Run("Block", func(b *testing.B) {
benchmarkPostingsForMatchers(b, ir)
@ -134,7 +134,7 @@ func benchmarkPostingsForMatchers(b *testing.B, ir IndexReader) {
b.Run(c.name, func(b *testing.B) {
for i := 0; i < b.N; i++ {
_, err := PostingsForMatchers(ir, c.matchers...)
assert.NoError(b, err)
require.NoError(b, err)
}
})
}
@ -142,33 +142,33 @@ func benchmarkPostingsForMatchers(b *testing.B, ir IndexReader) {
func BenchmarkQuerierSelect(b *testing.B) {
chunkDir, err := ioutil.TempDir("", "chunk_dir")
assert.NoError(b, err)
require.NoError(b, err)
defer func() {
assert.NoError(b, os.RemoveAll(chunkDir))
require.NoError(b, os.RemoveAll(chunkDir))
}()
h, err := NewHead(nil, nil, nil, 1000, chunkDir, nil, DefaultStripeSize, nil)
assert.NoError(b, err)
require.NoError(b, err)
defer h.Close()
app := h.Appender(context.Background())
numSeries := 1000000
for i := 0; i < numSeries; i++ {
app.Add(labels.FromStrings("foo", "bar", "i", fmt.Sprintf("%d%s", i, postingsBenchSuffix)), int64(i), 0)
}
assert.NoError(b, app.Commit())
require.NoError(b, app.Commit())
bench := func(b *testing.B, br BlockReader, sorted bool) {
matcher := labels.MustNewMatcher(labels.MatchEqual, "foo", "bar")
for s := 1; s <= numSeries; s *= 10 {
b.Run(fmt.Sprintf("%dof%d", s, numSeries), func(b *testing.B) {
q, err := NewBlockQuerier(br, 0, int64(s-1))
assert.NoError(b, err)
require.NoError(b, err)
b.ResetTimer()
for i := 0; i < b.N; i++ {
ss := q.Select(sorted, nil, matcher)
for ss.Next() {
}
assert.NoError(b, ss.Err())
require.NoError(b, ss.Err())
}
q.Close()
})
@ -183,16 +183,16 @@ func BenchmarkQuerierSelect(b *testing.B) {
})
tmpdir, err := ioutil.TempDir("", "test_benchquerierselect")
assert.NoError(b, err)
require.NoError(b, err)
defer func() {
assert.NoError(b, os.RemoveAll(tmpdir))
require.NoError(b, os.RemoveAll(tmpdir))
}()
blockdir := createBlockFromHead(b, tmpdir, h)
block, err := OpenBlock(nil, blockdir, nil)
assert.NoError(b, err)
require.NoError(b, err)
defer func() {
assert.NoError(b, block.Close())
require.NoError(b, block.Close())
}()
b.Run("Block", func(b *testing.B) {

View file

@ -27,7 +27,7 @@ import (
"time"
"github.com/pkg/errors"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/prometheus/prometheus/pkg/labels"
"github.com/prometheus/prometheus/storage"
@ -139,7 +139,7 @@ func createIdxChkReaders(t *testing.T, tc []seriesSamples) (IndexReader, ChunkRe
chunkRef++
}
ls := labels.FromMap(s.lset)
assert.NoError(t, mi.AddSeries(uint64(i), ls, metas...))
require.NoError(t, mi.AddSeries(uint64(i), ls, metas...))
postings.Add(uint64(i), ls)
@ -153,7 +153,7 @@ func createIdxChkReaders(t *testing.T, tc []seriesSamples) (IndexReader, ChunkRe
}
}
assert.NoError(t, postings.Iter(func(l labels.Label, p index.Postings) error {
require.NoError(t, postings.Iter(func(l labels.Label, p index.Postings) error {
return mi.WritePostings(l.Name, l.Value, p)
}))
return mi, chkReader, blockMint, blockMaxt
@ -180,27 +180,27 @@ func testBlockQuerier(t *testing.T, c blockQuerierTestCase, ir IndexReader, cr C
}
res := q.Select(false, nil, c.ms...)
defer func() { assert.NoError(t, q.Close()) }()
defer func() { require.NoError(t, q.Close()) }()
for {
eok, rok := c.exp.Next(), res.Next()
assert.Equal(t, eok, rok)
require.Equal(t, eok, rok)
if !eok {
assert.Equal(t, 0, len(res.Warnings()))
require.Equal(t, 0, len(res.Warnings()))
break
}
sexp := c.exp.At()
sres := res.At()
assert.Equal(t, sexp.Labels(), sres.Labels())
require.Equal(t, sexp.Labels(), sres.Labels())
smplExp, errExp := storage.ExpandSamples(sexp.Iterator(), nil)
smplRes, errRes := storage.ExpandSamples(sres.Iterator(), nil)
assert.Equal(t, errExp, errRes)
assert.Equal(t, smplExp, smplRes)
require.Equal(t, errExp, errRes)
require.Equal(t, smplExp, smplRes)
}
assert.NoError(t, res.Err())
require.NoError(t, res.Err())
})
t.Run("chunk", func(t *testing.T) {
@ -215,29 +215,29 @@ func testBlockQuerier(t *testing.T, c blockQuerierTestCase, ir IndexReader, cr C
},
}
res := q.Select(false, nil, c.ms...)
defer func() { assert.NoError(t, q.Close()) }()
defer func() { require.NoError(t, q.Close()) }()
for {
eok, rok := c.expChks.Next(), res.Next()
assert.Equal(t, eok, rok)
require.Equal(t, eok, rok)
if !eok {
assert.Equal(t, 0, len(res.Warnings()))
require.Equal(t, 0, len(res.Warnings()))
break
}
sexpChks := c.expChks.At()
sres := res.At()
assert.Equal(t, sexpChks.Labels(), sres.Labels())
require.Equal(t, sexpChks.Labels(), sres.Labels())
chksExp, errExp := storage.ExpandChunks(sexpChks.Iterator())
rmChunkRefs(chksExp)
chksRes, errRes := storage.ExpandChunks(sres.Iterator())
rmChunkRefs(chksRes)
assert.Equal(t, errExp, errRes)
assert.Equal(t, chksExp, chksRes)
require.Equal(t, errExp, errRes)
require.Equal(t, chksExp, chksRes)
}
assert.NoError(t, res.Err())
require.NoError(t, res.Err())
})
}
@ -408,7 +408,7 @@ func TestBlockQuerier_AgainstHeadWithOpenChunks(t *testing.T) {
} {
t.Run("", func(t *testing.T) {
h, err := NewHead(nil, nil, nil, 2*time.Hour.Milliseconds(), "", nil, DefaultStripeSize, nil)
assert.NoError(t, err)
require.NoError(t, err)
defer h.Close()
app := h.Appender(context.Background())
@ -416,19 +416,19 @@ func TestBlockQuerier_AgainstHeadWithOpenChunks(t *testing.T) {
for _, chk := range s.chunks {
for _, sample := range chk {
_, err = app.Add(labels.FromMap(s.lset), sample.t, sample.v)
assert.NoError(t, err)
require.NoError(t, err)
}
}
}
assert.NoError(t, app.Commit())
require.NoError(t, app.Commit())
hr := NewRangeHead(h, c.mint, c.maxt)
ir, err := hr.Index()
assert.NoError(t, err)
require.NoError(t, err)
defer ir.Close()
cr, err := hr.Chunks()
assert.NoError(t, err)
require.NoError(t, err)
defer cr.Close()
testBlockQuerier(t, c, ir, cr, tombstones.NewMemTombstones())
@ -811,8 +811,8 @@ func TestPopulateWithTombSeriesIterators(t *testing.T) {
var r []tsdbutil.Sample
if tc.seek != 0 {
assert.Equal(t, tc.seekSuccess, it.Seek(tc.seek))
assert.Equal(t, tc.seekSuccess, it.Seek(tc.seek)) // Next one should be noop.
require.Equal(t, tc.seekSuccess, it.Seek(tc.seek))
require.Equal(t, tc.seekSuccess, it.Seek(tc.seek)) // Next one should be noop.
if tc.seekSuccess {
// After successful seek iterator is ready. Grab the value.
@ -821,9 +821,9 @@ func TestPopulateWithTombSeriesIterators(t *testing.T) {
}
}
expandedResult, err := storage.ExpandSamples(it, newSample)
assert.NoError(t, err)
require.NoError(t, err)
r = append(r, expandedResult...)
assert.Equal(t, tc.expected, r)
require.Equal(t, tc.expected, r)
})
t.Run("chunk", func(t *testing.T) {
f, chkMetas := createFakeReaderAndNotPopulatedChunks(tc.chks...)
@ -834,12 +834,12 @@ func TestPopulateWithTombSeriesIterators(t *testing.T) {
return
}
expandedResult, err := storage.ExpandChunks(it)
assert.NoError(t, err)
require.NoError(t, err)
// We don't care about ref IDs for comparison, only chunk's samples matters.
rmChunkRefs(expandedResult)
rmChunkRefs(tc.expectedChks)
assert.Equal(t, tc.expectedChks, expandedResult)
require.Equal(t, tc.expectedChks, expandedResult)
})
})
}
@ -860,12 +860,12 @@ func TestPopulateWithDelSeriesIterator_DoubleSeek(t *testing.T) {
)
it := newPopulateWithDelGenericSeriesIterator(f, chkMetas, nil).toSeriesIterator()
assert.True(t, it.Seek(1))
assert.True(t, it.Seek(2))
assert.True(t, it.Seek(2))
require.True(t, it.Seek(1))
require.True(t, it.Seek(2))
require.True(t, it.Seek(2))
ts, v := it.At()
assert.Equal(t, int64(2), ts)
assert.Equal(t, float64(2), v)
require.Equal(t, int64(2), ts)
require.Equal(t, float64(2), v)
}
// Regression when seeked chunks were still found via binary search and we always
@ -878,15 +878,15 @@ func TestPopulateWithDelSeriesIterator_SeekInCurrentChunk(t *testing.T) {
)
it := newPopulateWithDelGenericSeriesIterator(f, chkMetas, nil).toSeriesIterator()
assert.True(t, it.Next())
require.True(t, it.Next())
ts, v := it.At()
assert.Equal(t, int64(1), ts)
assert.Equal(t, float64(2), v)
require.Equal(t, int64(1), ts)
require.Equal(t, float64(2), v)
assert.True(t, it.Seek(4))
require.True(t, it.Seek(4))
ts, v = it.At()
assert.Equal(t, int64(5), ts)
assert.Equal(t, float64(6), v)
require.Equal(t, int64(5), ts)
require.Equal(t, float64(6), v)
}
func TestPopulateWithDelSeriesIterator_SeekWithMinTime(t *testing.T) {
@ -895,8 +895,8 @@ func TestPopulateWithDelSeriesIterator_SeekWithMinTime(t *testing.T) {
)
it := newPopulateWithDelGenericSeriesIterator(f, chkMetas, nil).toSeriesIterator()
assert.Equal(t, false, it.Seek(7))
assert.Equal(t, true, it.Seek(3))
require.Equal(t, false, it.Seek(7))
require.Equal(t, true, it.Seek(3))
}
// Regression when calling Next() with a time bounded to fit within two samples.
@ -909,7 +909,7 @@ func TestPopulateWithDelSeriesIterator_NextWithMinTime(t *testing.T) {
it := newPopulateWithDelGenericSeriesIterator(
f, chkMetas, tombstones.Intervals{{Mint: math.MinInt64, Maxt: 2}}.Add(tombstones.Interval{Mint: 4, Maxt: math.MaxInt64}),
).toSeriesIterator()
assert.Equal(t, false, it.Next())
require.Equal(t, false, it.Next())
}
// Test the cost of merging series sets for different number of merged sets and their size.
@ -929,7 +929,7 @@ func BenchmarkMergedSeriesSet(b *testing.B) {
for _, j := range []int{1, 2, 4, 8, 16, 32} {
b.Run(fmt.Sprintf("series=%d,blocks=%d", k, j), func(b *testing.B) {
lbls, err := labels.ReadLabels(filepath.Join("testdata", "20kseries.json"), k)
assert.NoError(b, err)
require.NoError(b, err)
sort.Sort(labels.Slice(lbls))
@ -955,8 +955,8 @@ func BenchmarkMergedSeriesSet(b *testing.B) {
for ms.Next() {
i++
}
assert.NoError(b, ms.Err())
assert.Equal(b, len(lbls), i)
require.NoError(b, ms.Err())
require.Equal(b, len(lbls), i)
}
})
}
@ -981,7 +981,7 @@ func (cr mockChunkReader) Close() error {
func TestDeletedIterator(t *testing.T) {
chk := chunkenc.NewXORChunk()
app, err := chk.Appender()
assert.NoError(t, err)
require.NoError(t, err)
// Insert random stuff from (0, 1000).
act := make([]sample, 1000)
for i := 0; i < 1000; i++ {
@ -1018,11 +1018,11 @@ func TestDeletedIterator(t *testing.T) {
}
}
assert.Less(t, i, int64(1000))
require.Less(t, i, int64(1000))
ts, v := it.At()
assert.Equal(t, act[i].t, ts)
assert.Equal(t, act[i].v, v)
require.Equal(t, act[i].t, ts)
require.Equal(t, act[i].v, v)
}
// There has been an extra call to Next().
i++
@ -1033,15 +1033,15 @@ func TestDeletedIterator(t *testing.T) {
}
}
assert.GreaterOrEqual(t, i, int64(1000))
assert.NoError(t, it.Err())
require.GreaterOrEqual(t, i, int64(1000))
require.NoError(t, it.Err())
}
}
func TestDeletedIterator_WithSeek(t *testing.T) {
chk := chunkenc.NewXORChunk()
app, err := chk.Appender()
assert.NoError(t, err)
require.NoError(t, err)
// Insert random stuff from (0, 1000).
act := make([]sample, 1000)
for i := 0; i < 1000; i++ {
@ -1071,10 +1071,10 @@ func TestDeletedIterator_WithSeek(t *testing.T) {
for _, c := range cases {
it := &deletedIterator{it: chk.Iterator(nil), intervals: c.r[:]}
assert.Equal(t, c.ok, it.Seek(c.seek))
require.Equal(t, c.ok, it.Seek(c.seek))
if c.ok {
ts, _ := it.At()
assert.Equal(t, c.seekedTs, ts)
require.Equal(t, c.seekedTs, ts)
}
}
}
@ -1228,9 +1228,9 @@ func BenchmarkQueryIterator(b *testing.B) {
b.Run(benchMsg, func(b *testing.B) {
dir, err := ioutil.TempDir("", "bench_query_iterator")
assert.NoError(b, err)
require.NoError(b, err)
defer func() {
assert.NoError(b, os.RemoveAll(dir))
require.NoError(b, os.RemoveAll(dir))
}()
var (
@ -1252,7 +1252,7 @@ func BenchmarkQueryIterator(b *testing.B) {
generatedSeries = populateSeries(prefilledLabels, mint, maxt)
}
block, err := OpenBlock(nil, createBlock(b, dir, generatedSeries), nil)
assert.NoError(b, err)
require.NoError(b, err)
blocks = append(blocks, block)
defer block.Close()
}
@ -1260,7 +1260,7 @@ func BenchmarkQueryIterator(b *testing.B) {
qblocks := make([]storage.Querier, 0, len(blocks))
for _, blk := range blocks {
q, err := NewBlockQuerier(blk, math.MinInt64, math.MaxInt64)
assert.NoError(b, err)
require.NoError(b, err)
qblocks = append(qblocks, q)
}
@ -1295,9 +1295,9 @@ func BenchmarkQuerySeek(b *testing.B) {
b.Run(benchMsg, func(b *testing.B) {
dir, err := ioutil.TempDir("", "bench_query_iterator")
assert.NoError(b, err)
require.NoError(b, err)
defer func() {
assert.NoError(b, os.RemoveAll(dir))
require.NoError(b, os.RemoveAll(dir))
}()
var (
@ -1319,7 +1319,7 @@ func BenchmarkQuerySeek(b *testing.B) {
generatedSeries = populateSeries(prefilledLabels, mint, maxt)
}
block, err := OpenBlock(nil, createBlock(b, dir, generatedSeries), nil)
assert.NoError(b, err)
require.NoError(b, err)
blocks = append(blocks, block)
defer block.Close()
}
@ -1327,7 +1327,7 @@ func BenchmarkQuerySeek(b *testing.B) {
qblocks := make([]storage.Querier, 0, len(blocks))
for _, blk := range blocks {
q, err := NewBlockQuerier(blk, math.MinInt64, math.MaxInt64)
assert.NoError(b, err)
require.NoError(b, err)
qblocks = append(qblocks, q)
}
@ -1346,11 +1346,11 @@ func BenchmarkQuerySeek(b *testing.B) {
for t := mint; t <= maxt; t++ {
it.Seek(t)
}
assert.NoError(b, it.Err())
require.NoError(b, it.Err())
}
assert.NoError(b, ss.Err())
assert.NoError(b, err)
assert.Equal(b, 0, len(ss.Warnings()))
require.NoError(b, ss.Err())
require.NoError(b, err)
require.Equal(b, 0, len(ss.Warnings()))
})
}
}
@ -1436,9 +1436,9 @@ func BenchmarkSetMatcher(b *testing.B) {
for _, c := range cases {
dir, err := ioutil.TempDir("", "bench_postings_for_matchers")
assert.NoError(b, err)
require.NoError(b, err)
defer func() {
assert.NoError(b, os.RemoveAll(dir))
require.NoError(b, os.RemoveAll(dir))
}()
var (
@ -1458,7 +1458,7 @@ func BenchmarkSetMatcher(b *testing.B) {
generatedSeries = populateSeries(prefilledLabels, mint, maxt)
}
block, err := OpenBlock(nil, createBlock(b, dir, generatedSeries), nil)
assert.NoError(b, err)
require.NoError(b, err)
blocks = append(blocks, block)
defer block.Close()
}
@ -1466,7 +1466,7 @@ func BenchmarkSetMatcher(b *testing.B) {
qblocks := make([]storage.Querier, 0, len(blocks))
for _, blk := range blocks {
q, err := NewBlockQuerier(blk, math.MinInt64, math.MaxInt64)
assert.NoError(b, err)
require.NoError(b, err)
qblocks = append(qblocks, q)
}
@ -1481,8 +1481,8 @@ func BenchmarkSetMatcher(b *testing.B) {
ss := sq.Select(false, nil, labels.MustNewMatcher(labels.MatchRegexp, "test", c.pattern))
for ss.Next() {
}
assert.NoError(b, ss.Err())
assert.Equal(b, 0, len(ss.Warnings()))
require.NoError(b, ss.Err())
require.Equal(b, 0, len(ss.Warnings()))
}
})
}
@ -1546,14 +1546,14 @@ func TestFindSetMatches(t *testing.T) {
func TestPostingsForMatchers(t *testing.T) {
chunkDir, err := ioutil.TempDir("", "chunk_dir")
assert.NoError(t, err)
require.NoError(t, err)
defer func() {
assert.NoError(t, os.RemoveAll(chunkDir))
require.NoError(t, os.RemoveAll(chunkDir))
}()
h, err := NewHead(nil, nil, nil, 1000, chunkDir, nil, DefaultStripeSize, nil)
assert.NoError(t, err)
require.NoError(t, err)
defer func() {
assert.NoError(t, h.Close())
require.NoError(t, h.Close())
}()
app := h.Appender(context.Background())
@ -1562,7 +1562,7 @@ func TestPostingsForMatchers(t *testing.T) {
app.Add(labels.FromStrings("n", "1", "i", "b"), 0, 0)
app.Add(labels.FromStrings("n", "2"), 0, 0)
app.Add(labels.FromStrings("n", "2.5"), 0, 0)
assert.NoError(t, app.Commit())
require.NoError(t, app.Commit())
cases := []struct {
matchers []*labels.Matcher
@ -1775,7 +1775,7 @@ func TestPostingsForMatchers(t *testing.T) {
}
ir, err := h.Index()
assert.NoError(t, err)
require.NoError(t, err)
for _, c := range cases {
exp := map[string]struct{}{}
@ -1783,18 +1783,18 @@ func TestPostingsForMatchers(t *testing.T) {
exp[l.String()] = struct{}{}
}
p, err := PostingsForMatchers(ir, c.matchers...)
assert.NoError(t, err)
require.NoError(t, err)
for p.Next() {
lbls := labels.Labels{}
assert.NoError(t, ir.Series(p.At(), &lbls, &[]chunks.Meta{}))
require.NoError(t, ir.Series(p.At(), &lbls, &[]chunks.Meta{}))
if _, ok := exp[lbls.String()]; !ok {
t.Errorf("Evaluating %v, unexpected result %s", c.matchers, lbls.String())
} else {
delete(exp, lbls.String())
}
}
assert.NoError(t, p.Err())
require.NoError(t, p.Err())
if len(exp) != 0 {
t.Errorf("Evaluating %v, missing results %+v", c.matchers, exp)
}
@ -1809,7 +1809,7 @@ func TestClose(t *testing.T) {
t.Fatalf("Opening test dir failed: %s", err)
}
defer func() {
assert.NoError(t, os.RemoveAll(dir))
require.NoError(t, os.RemoveAll(dir))
}()
createBlock(t, dir, genSeries(1, 1, 0, 10))
@ -1820,13 +1820,13 @@ func TestClose(t *testing.T) {
t.Fatalf("Opening test storage failed: %s", err)
}
defer func() {
assert.NoError(t, db.Close())
require.NoError(t, db.Close())
}()
q, err := db.Querier(context.TODO(), 0, 20)
assert.NoError(t, err)
assert.NoError(t, q.Close())
assert.Error(t, q.Close())
require.NoError(t, err)
require.NoError(t, q.Close())
require.Error(t, q.Close())
}
func BenchmarkQueries(b *testing.B) {
@ -1872,9 +1872,9 @@ func BenchmarkQueries(b *testing.B) {
for _, nSeries := range []int{10} {
for _, nSamples := range []int64{1000, 10000, 100000} {
dir, err := ioutil.TempDir("", "test_persisted_query")
assert.NoError(b, err)
require.NoError(b, err)
defer func() {
assert.NoError(b, os.RemoveAll(dir))
require.NoError(b, os.RemoveAll(dir))
}()
series := genSeries(nSeries, 5, 1, nSamples)
@ -1903,9 +1903,9 @@ func BenchmarkQueries(b *testing.B) {
qs := make([]storage.Querier, 0, 10)
for x := 0; x <= 10; x++ {
block, err := OpenBlock(nil, createBlock(b, dir, series), nil)
assert.NoError(b, err)
require.NoError(b, err)
q, err := NewBlockQuerier(block, 1, int64(nSamples))
assert.NoError(b, err)
require.NoError(b, err)
qs = append(qs, q)
}
@ -1914,23 +1914,23 @@ func BenchmarkQueries(b *testing.B) {
queryTypes["_10-Blocks"] = storage.NewMergeQuerier(qs, nil, storage.ChainedSeriesMerge)
chunkDir, err := ioutil.TempDir("", "chunk_dir")
assert.NoError(b, err)
require.NoError(b, err)
defer func() {
assert.NoError(b, os.RemoveAll(chunkDir))
require.NoError(b, os.RemoveAll(chunkDir))
}()
head := createHead(b, nil, series, chunkDir)
qHead, err := NewBlockQuerier(head, 1, nSamples)
assert.NoError(b, err)
require.NoError(b, err)
queryTypes["_Head"] = qHead
for qtype, querier := range queryTypes {
b.Run(title+qtype+"_nSeries:"+strconv.Itoa(nSeries)+"_nSamples:"+strconv.Itoa(int(nSamples)), func(b *testing.B) {
expExpansions, err := strconv.Atoi(string(title[len(title)-1]))
assert.NoError(b, err)
require.NoError(b, err)
benchQuery(b, expExpansions, querier, selectors)
})
}
assert.NoError(b, head.Close())
require.NoError(b, head.Close())
}
}
}
@ -1950,10 +1950,10 @@ func benchQuery(b *testing.B, expExpansions int, q storage.Querier, selectors la
}
actualExpansions++
}
assert.NoError(b, ss.Err())
assert.Equal(b, 0, len(ss.Warnings()))
assert.Equal(b, expExpansions, actualExpansions)
assert.NoError(b, ss.Err())
require.NoError(b, ss.Err())
require.Equal(b, 0, len(ss.Warnings()))
require.Equal(b, expExpansions, actualExpansions)
require.NoError(b, ss.Err())
}
}
@ -2018,9 +2018,9 @@ func TestPostingsForMatcher(t *testing.T) {
ir := &mockMatcherIndex{}
_, err := postingsForMatcher(ir, tc.matcher)
if tc.hasError {
assert.Error(t, err)
require.Error(t, err)
} else {
assert.NoError(t, err)
require.NoError(t, err)
}
}
}
@ -2096,7 +2096,7 @@ func TestBlockBaseSeriesSet(t *testing.T) {
for _, tc := range cases {
mi := newMockIndex()
for _, s := range tc.series {
assert.NoError(t, mi.AddSeries(s.ref, s.lset, s.chunks...))
require.NoError(t, mi.AddSeries(s.ref, s.lset, s.chunks...))
}
bcs := &blockBaseSeriesSet{
@ -2110,12 +2110,12 @@ func TestBlockBaseSeriesSet(t *testing.T) {
chks := bcs.currIterFn().chks
idx := tc.expIdxs[i]
assert.Equal(t, tc.series[idx].lset, bcs.currLabels)
assert.Equal(t, tc.series[idx].chunks, chks)
require.Equal(t, tc.series[idx].lset, bcs.currLabels)
require.Equal(t, tc.series[idx].chunks, chks)
i++
}
assert.Equal(t, len(tc.expIdxs), i)
assert.NoError(t, bcs.Err())
require.Equal(t, len(tc.expIdxs), i)
require.NoError(t, bcs.Err())
}
}

View file

@ -18,7 +18,7 @@ import (
"testing"
"github.com/pkg/errors"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/prometheus/prometheus/pkg/labels"
"github.com/prometheus/prometheus/tsdb/encoding"
@ -42,8 +42,8 @@ func TestRecord_EncodeDecode(t *testing.T) {
},
}
decSeries, err := dec.Series(enc.Series(series, nil), nil)
assert.NoError(t, err)
assert.Equal(t, series, decSeries)
require.NoError(t, err)
require.Equal(t, series, decSeries)
samples := []RefSample{
{Ref: 0, T: 12423423, V: 1.2345},
@ -51,8 +51,8 @@ func TestRecord_EncodeDecode(t *testing.T) {
{Ref: 2, T: 0, V: 99999},
}
decSamples, err := dec.Samples(enc.Samples(samples, nil), nil)
assert.NoError(t, err)
assert.Equal(t, samples, decSamples)
require.NoError(t, err)
require.Equal(t, samples, decSamples)
// Intervals get split up into single entries. So we don't get back exactly
// what we put in.
@ -67,8 +67,8 @@ func TestRecord_EncodeDecode(t *testing.T) {
}},
}
decTstones, err := dec.Tombstones(enc.Tombstones(tstones, nil), nil)
assert.NoError(t, err)
assert.Equal(t, []tombstones.Stone{
require.NoError(t, err)
require.Equal(t, []tombstones.Stone{
{Ref: 123, Intervals: tombstones.Intervals{{Mint: -1000, Maxt: 1231231}}},
{Ref: 123, Intervals: tombstones.Intervals{{Mint: 5000, Maxt: 0}}},
{Ref: 13, Intervals: tombstones.Intervals{{Mint: -1000, Maxt: -11}}},
@ -92,7 +92,7 @@ func TestRecord_Corrupted(t *testing.T) {
corrupted := enc.Series(series, nil)[:8]
_, err := dec.Series(corrupted, nil)
assert.Equal(t, err, encoding.ErrInvalidSize)
require.Equal(t, err, encoding.ErrInvalidSize)
})
t.Run("Test corrupted sample record", func(t *testing.T) {
@ -102,7 +102,7 @@ func TestRecord_Corrupted(t *testing.T) {
corrupted := enc.Samples(samples, nil)[:8]
_, err := dec.Samples(corrupted, nil)
assert.Equal(t, errors.Cause(err), encoding.ErrInvalidSize)
require.Equal(t, errors.Cause(err), encoding.ErrInvalidSize)
})
t.Run("Test corrupted tombstone record", func(t *testing.T) {
@ -115,7 +115,7 @@ func TestRecord_Corrupted(t *testing.T) {
corrupted := enc.Tombstones(tstones, nil)[:8]
_, err := dec.Tombstones(corrupted, nil)
assert.Equal(t, err, encoding.ErrInvalidSize)
require.Equal(t, err, encoding.ErrInvalidSize)
})
}
@ -125,19 +125,19 @@ func TestRecord_Type(t *testing.T) {
series := []RefSeries{{Ref: 100, Labels: labels.FromStrings("abc", "123")}}
recordType := dec.Type(enc.Series(series, nil))
assert.Equal(t, Series, recordType)
require.Equal(t, Series, recordType)
samples := []RefSample{{Ref: 123, T: 12345, V: 1.2345}}
recordType = dec.Type(enc.Samples(samples, nil))
assert.Equal(t, Samples, recordType)
require.Equal(t, Samples, recordType)
tstones := []tombstones.Stone{{Ref: 1, Intervals: tombstones.Intervals{{Mint: 1, Maxt: 2}}}}
recordType = dec.Type(enc.Tombstones(tstones, nil))
assert.Equal(t, Tombstones, recordType)
require.Equal(t, Tombstones, recordType)
recordType = dec.Type(nil)
assert.Equal(t, Unknown, recordType)
require.Equal(t, Unknown, recordType)
recordType = dec.Type([]byte{0})
assert.Equal(t, Unknown, recordType)
require.Equal(t, Unknown, recordType)
}

View file

@ -19,7 +19,7 @@ import (
"path/filepath"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/prometheus/prometheus/pkg/labels"
"github.com/prometheus/prometheus/tsdb/chunks"
@ -29,9 +29,9 @@ import (
func TestRepairBadIndexVersion(t *testing.T) {
tmpDir, err := ioutil.TempDir("", "test")
assert.NoError(t, err)
require.NoError(t, err)
t.Cleanup(func() {
assert.NoError(t, os.RemoveAll(tmpDir))
require.NoError(t, os.RemoveAll(tmpDir))
})
// The broken index used in this test was written by the following script
@ -70,40 +70,40 @@ func TestRepairBadIndexVersion(t *testing.T) {
tmpDbDir := filepath.Join(tmpDir, "01BZJ9WJQPWHGNC2W4J9TA62KC")
// Create a copy DB to run test against.
assert.NoError(t, fileutil.CopyDirs(filepath.Join("testdata", "repair_index_version", "01BZJ9WJQPWHGNC2W4J9TA62KC"), tmpDbDir))
require.NoError(t, fileutil.CopyDirs(filepath.Join("testdata", "repair_index_version", "01BZJ9WJQPWHGNC2W4J9TA62KC"), tmpDbDir))
// Check the current db.
// In its current state, lookups should fail with the fixed code.
_, _, err = readMetaFile(tmpDbDir)
assert.Error(t, err)
require.Error(t, err)
// Touch chunks dir in block to imitate them.
assert.NoError(t, os.MkdirAll(filepath.Join(tmpDbDir, "chunks"), 0777))
require.NoError(t, os.MkdirAll(filepath.Join(tmpDbDir, "chunks"), 0777))
// Read current index to check integrity.
r, err := index.NewFileReader(filepath.Join(tmpDbDir, indexFilename))
assert.NoError(t, err)
require.NoError(t, err)
p, err := r.Postings("b", "1")
assert.NoError(t, err)
require.NoError(t, err)
for p.Next() {
t.Logf("next ID %d", p.At())
var lset labels.Labels
assert.Error(t, r.Series(p.At(), &lset, nil))
require.Error(t, r.Series(p.At(), &lset, nil))
}
assert.NoError(t, p.Err())
assert.NoError(t, r.Close())
require.NoError(t, p.Err())
require.NoError(t, r.Close())
// On DB opening all blocks in the base dir should be repaired.
db, err := Open(tmpDir, nil, nil, nil)
assert.NoError(t, err)
require.NoError(t, err)
db.Close()
r, err = index.NewFileReader(filepath.Join(tmpDbDir, indexFilename))
assert.NoError(t, err)
require.NoError(t, err)
defer r.Close()
p, err = r.Postings("b", "1")
assert.NoError(t, err)
require.NoError(t, err)
res := []labels.Labels{}
for p.Next() {
@ -111,17 +111,17 @@ func TestRepairBadIndexVersion(t *testing.T) {
var lset labels.Labels
var chks []chunks.Meta
assert.NoError(t, r.Series(p.At(), &lset, &chks))
require.NoError(t, r.Series(p.At(), &lset, &chks))
res = append(res, lset)
}
assert.NoError(t, p.Err())
assert.Equal(t, []labels.Labels{
require.NoError(t, p.Err())
require.Equal(t, []labels.Labels{
{{Name: "a", Value: "1"}, {Name: "b", Value: "1"}},
{{Name: "a", Value: "2"}, {Name: "b", Value: "1"}},
}, res)
meta, _, err := readMetaFile(tmpDbDir)
assert.NoError(t, err)
assert.Equal(t, metaVersion1, meta.Version, "unexpected meta version %d", meta.Version)
require.NoError(t, err)
require.Equal(t, metaVersion1, meta.Version, "unexpected meta version %d", meta.Version)
}

View file

@ -23,7 +23,7 @@ import (
"time"
"github.com/go-kit/kit/log"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"go.uber.org/goleak"
)
@ -34,7 +34,7 @@ func TestMain(m *testing.M) {
func TestWriteAndReadbackTombstones(t *testing.T) {
tmpdir, _ := ioutil.TempDir("", "test")
defer func() {
assert.NoError(t, os.RemoveAll(tmpdir))
require.NoError(t, os.RemoveAll(tmpdir))
}()
ref := uint64(0)
@ -54,13 +54,13 @@ func TestWriteAndReadbackTombstones(t *testing.T) {
}
_, err := WriteFile(log.NewNopLogger(), tmpdir, stones)
assert.NoError(t, err)
require.NoError(t, err)
restr, _, err := ReadTombstones(tmpdir)
assert.NoError(t, err)
require.NoError(t, err)
// Compare the two readers.
assert.Equal(t, stones, restr)
require.Equal(t, stones, restr)
}
func TestAddingNewIntervals(t *testing.T) {
@ -157,7 +157,7 @@ func TestAddingNewIntervals(t *testing.T) {
for _, c := range cases {
t.Run("", func(t *testing.T) {
assert.Equal(t, c.exp, c.exist.Add(c.new))
require.Equal(t, c.exp, c.exist.Add(c.new))
})
}
}
@ -178,7 +178,7 @@ func TestMemTombstonesConcurrency(t *testing.T) {
go func() {
for x := 0; x < totalRuns; x++ {
_, err := tomb.Get(uint64(x))
assert.NoError(t, err)
require.NoError(t, err)
}
wg.Done()
}()

View file

@ -18,7 +18,7 @@ import (
"sort"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestSampleRing(t *testing.T) {
@ -92,12 +92,12 @@ func TestBufferedSeriesIterator(t *testing.T) {
t, v := bit.At()
b = append(b, sample{t: t, v: v})
}
assert.Equal(t, exp, b)
require.Equal(t, exp, b)
}
sampleEq := func(ets int64, ev float64) {
ts, v := it.At()
assert.Equal(t, ets, ts)
assert.Equal(t, ev, v)
require.Equal(t, ets, ts)
require.Equal(t, ev, v)
}
it = NewBuffer(newListSeriesIterator([]sample{
@ -111,29 +111,29 @@ func TestBufferedSeriesIterator(t *testing.T) {
{t: 101, v: 10},
}), 2)
assert.True(t, it.Seek(-123), "seek failed")
require.True(t, it.Seek(-123), "seek failed")
sampleEq(1, 2)
bufferEq(nil)
assert.True(t, it.Next(), "next failed")
require.True(t, it.Next(), "next failed")
sampleEq(2, 3)
bufferEq([]sample{{t: 1, v: 2}})
assert.True(t, it.Next(), "next failed")
assert.True(t, it.Next(), "next failed")
assert.True(t, it.Next(), "next failed")
require.True(t, it.Next(), "next failed")
require.True(t, it.Next(), "next failed")
require.True(t, it.Next(), "next failed")
sampleEq(5, 6)
bufferEq([]sample{{t: 2, v: 3}, {t: 3, v: 4}, {t: 4, v: 5}})
assert.True(t, it.Seek(5), "seek failed")
require.True(t, it.Seek(5), "seek failed")
sampleEq(5, 6)
bufferEq([]sample{{t: 2, v: 3}, {t: 3, v: 4}, {t: 4, v: 5}})
assert.True(t, it.Seek(101), "seek failed")
require.True(t, it.Seek(101), "seek failed")
sampleEq(101, 10)
bufferEq([]sample{{t: 99, v: 8}, {t: 100, v: 9}})
assert.False(t, it.Next(), "next succeeded unexpectedly")
require.False(t, it.Next(), "next succeeded unexpectedly")
}
type listSeriesIterator struct {

View file

@ -24,7 +24,7 @@ import (
"github.com/go-kit/kit/log"
"github.com/pkg/errors"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/prometheus/prometheus/pkg/labels"
"github.com/prometheus/prometheus/tsdb/record"
@ -32,127 +32,127 @@ import (
func TestLastCheckpoint(t *testing.T) {
dir, err := ioutil.TempDir("", "test_checkpoint")
assert.NoError(t, err)
require.NoError(t, err)
defer func() {
assert.NoError(t, os.RemoveAll(dir))
require.NoError(t, os.RemoveAll(dir))
}()
_, _, err = LastCheckpoint(dir)
assert.Equal(t, record.ErrNotFound, err)
require.Equal(t, record.ErrNotFound, err)
assert.NoError(t, os.MkdirAll(filepath.Join(dir, "checkpoint.0000"), 0777))
require.NoError(t, os.MkdirAll(filepath.Join(dir, "checkpoint.0000"), 0777))
s, k, err := LastCheckpoint(dir)
assert.NoError(t, err)
assert.Equal(t, filepath.Join(dir, "checkpoint.0000"), s)
assert.Equal(t, 0, k)
require.NoError(t, err)
require.Equal(t, filepath.Join(dir, "checkpoint.0000"), s)
require.Equal(t, 0, k)
assert.NoError(t, os.MkdirAll(filepath.Join(dir, "checkpoint.xyz"), 0777))
require.NoError(t, os.MkdirAll(filepath.Join(dir, "checkpoint.xyz"), 0777))
s, k, err = LastCheckpoint(dir)
assert.NoError(t, err)
assert.Equal(t, filepath.Join(dir, "checkpoint.0000"), s)
assert.Equal(t, 0, k)
require.NoError(t, err)
require.Equal(t, filepath.Join(dir, "checkpoint.0000"), s)
require.Equal(t, 0, k)
assert.NoError(t, os.MkdirAll(filepath.Join(dir, "checkpoint.1"), 0777))
require.NoError(t, os.MkdirAll(filepath.Join(dir, "checkpoint.1"), 0777))
s, k, err = LastCheckpoint(dir)
assert.NoError(t, err)
assert.Equal(t, filepath.Join(dir, "checkpoint.1"), s)
assert.Equal(t, 1, k)
require.NoError(t, err)
require.Equal(t, filepath.Join(dir, "checkpoint.1"), s)
require.Equal(t, 1, k)
assert.NoError(t, os.MkdirAll(filepath.Join(dir, "checkpoint.1000"), 0777))
require.NoError(t, os.MkdirAll(filepath.Join(dir, "checkpoint.1000"), 0777))
s, k, err = LastCheckpoint(dir)
assert.NoError(t, err)
assert.Equal(t, filepath.Join(dir, "checkpoint.1000"), s)
assert.Equal(t, 1000, k)
require.NoError(t, err)
require.Equal(t, filepath.Join(dir, "checkpoint.1000"), s)
require.Equal(t, 1000, k)
assert.NoError(t, os.MkdirAll(filepath.Join(dir, "checkpoint.99999999"), 0777))
require.NoError(t, os.MkdirAll(filepath.Join(dir, "checkpoint.99999999"), 0777))
s, k, err = LastCheckpoint(dir)
assert.NoError(t, err)
assert.Equal(t, filepath.Join(dir, "checkpoint.99999999"), s)
assert.Equal(t, 99999999, k)
require.NoError(t, err)
require.Equal(t, filepath.Join(dir, "checkpoint.99999999"), s)
require.Equal(t, 99999999, k)
assert.NoError(t, os.MkdirAll(filepath.Join(dir, "checkpoint.100000000"), 0777))
require.NoError(t, os.MkdirAll(filepath.Join(dir, "checkpoint.100000000"), 0777))
s, k, err = LastCheckpoint(dir)
assert.NoError(t, err)
assert.Equal(t, filepath.Join(dir, "checkpoint.100000000"), s)
assert.Equal(t, 100000000, k)
require.NoError(t, err)
require.Equal(t, filepath.Join(dir, "checkpoint.100000000"), s)
require.Equal(t, 100000000, k)
}
func TestDeleteCheckpoints(t *testing.T) {
dir, err := ioutil.TempDir("", "test_checkpoint")
assert.NoError(t, err)
require.NoError(t, err)
defer func() {
assert.NoError(t, os.RemoveAll(dir))
require.NoError(t, os.RemoveAll(dir))
}()
assert.NoError(t, DeleteCheckpoints(dir, 0))
require.NoError(t, DeleteCheckpoints(dir, 0))
assert.NoError(t, os.MkdirAll(filepath.Join(dir, "checkpoint.00"), 0777))
assert.NoError(t, os.MkdirAll(filepath.Join(dir, "checkpoint.01"), 0777))
assert.NoError(t, os.MkdirAll(filepath.Join(dir, "checkpoint.02"), 0777))
assert.NoError(t, os.MkdirAll(filepath.Join(dir, "checkpoint.03"), 0777))
require.NoError(t, os.MkdirAll(filepath.Join(dir, "checkpoint.00"), 0777))
require.NoError(t, os.MkdirAll(filepath.Join(dir, "checkpoint.01"), 0777))
require.NoError(t, os.MkdirAll(filepath.Join(dir, "checkpoint.02"), 0777))
require.NoError(t, os.MkdirAll(filepath.Join(dir, "checkpoint.03"), 0777))
assert.NoError(t, DeleteCheckpoints(dir, 2))
require.NoError(t, DeleteCheckpoints(dir, 2))
files, err := ioutil.ReadDir(dir)
assert.NoError(t, err)
require.NoError(t, err)
fns := []string{}
for _, f := range files {
fns = append(fns, f.Name())
}
assert.Equal(t, []string{"checkpoint.02", "checkpoint.03"}, fns)
require.Equal(t, []string{"checkpoint.02", "checkpoint.03"}, fns)
assert.NoError(t, os.MkdirAll(filepath.Join(dir, "checkpoint.99999999"), 0777))
assert.NoError(t, os.MkdirAll(filepath.Join(dir, "checkpoint.100000000"), 0777))
assert.NoError(t, os.MkdirAll(filepath.Join(dir, "checkpoint.100000001"), 0777))
require.NoError(t, os.MkdirAll(filepath.Join(dir, "checkpoint.99999999"), 0777))
require.NoError(t, os.MkdirAll(filepath.Join(dir, "checkpoint.100000000"), 0777))
require.NoError(t, os.MkdirAll(filepath.Join(dir, "checkpoint.100000001"), 0777))
assert.NoError(t, DeleteCheckpoints(dir, 100000000))
require.NoError(t, DeleteCheckpoints(dir, 100000000))
files, err = ioutil.ReadDir(dir)
assert.NoError(t, err)
require.NoError(t, err)
fns = []string{}
for _, f := range files {
fns = append(fns, f.Name())
}
assert.Equal(t, []string{"checkpoint.100000000", "checkpoint.100000001"}, fns)
require.Equal(t, []string{"checkpoint.100000000", "checkpoint.100000001"}, fns)
}
func TestCheckpoint(t *testing.T) {
for _, compress := range []bool{false, true} {
t.Run(fmt.Sprintf("compress=%t", compress), func(t *testing.T) {
dir, err := ioutil.TempDir("", "test_checkpoint")
assert.NoError(t, err)
require.NoError(t, err)
defer func() {
assert.NoError(t, os.RemoveAll(dir))
require.NoError(t, os.RemoveAll(dir))
}()
var enc record.Encoder
// Create a dummy segment to bump the initial number.
seg, err := CreateSegment(dir, 100)
assert.NoError(t, err)
assert.NoError(t, seg.Close())
require.NoError(t, err)
require.NoError(t, seg.Close())
// Manually create checkpoint for 99 and earlier.
w, err := New(nil, nil, filepath.Join(dir, "checkpoint.0099"), compress)
assert.NoError(t, err)
require.NoError(t, err)
// Add some data we expect to be around later.
err = w.Log(enc.Series([]record.RefSeries{
{Ref: 0, Labels: labels.FromStrings("a", "b", "c", "0")},
{Ref: 1, Labels: labels.FromStrings("a", "b", "c", "1")},
}, nil))
assert.NoError(t, err)
require.NoError(t, err)
// Log an unknown record, that might have come from a future Prometheus version.
assert.NoError(t, w.Log([]byte{255}))
assert.NoError(t, w.Close())
require.NoError(t, w.Log([]byte{255}))
require.NoError(t, w.Close())
// Start a WAL and write records to it as usual.
w, err = NewSize(nil, nil, dir, 64*1024, compress)
assert.NoError(t, err)
require.NoError(t, err)
var last int64
for i := 0; ; i++ {
_, n, err := Segments(w.Dir())
assert.NoError(t, err)
require.NoError(t, err)
if n >= 106 {
break
}
@ -164,7 +164,7 @@ func TestCheckpoint(t *testing.T) {
{Ref: 4, Labels: labels.FromStrings("a", "b", "c", "4")},
{Ref: 5, Labels: labels.FromStrings("a", "b", "c", "5")},
}, nil)
assert.NoError(t, w.Log(b))
require.NoError(t, w.Log(b))
}
// Write samples until the WAL has enough segments.
// Make them have drifting timestamps within a record to see that they
@ -175,27 +175,27 @@ func TestCheckpoint(t *testing.T) {
{Ref: 2, T: last + 20000, V: float64(i)},
{Ref: 3, T: last + 30000, V: float64(i)},
}, nil)
assert.NoError(t, w.Log(b))
require.NoError(t, w.Log(b))
last += 100
}
assert.NoError(t, w.Close())
require.NoError(t, w.Close())
_, err = Checkpoint(log.NewNopLogger(), w, 100, 106, func(x uint64) bool {
return x%2 == 0
}, last/2)
assert.NoError(t, err)
assert.NoError(t, w.Truncate(107))
assert.NoError(t, DeleteCheckpoints(w.Dir(), 106))
require.NoError(t, err)
require.NoError(t, w.Truncate(107))
require.NoError(t, DeleteCheckpoints(w.Dir(), 106))
// Only the new checkpoint should be left.
files, err := ioutil.ReadDir(dir)
assert.NoError(t, err)
assert.Equal(t, 1, len(files))
assert.Equal(t, "checkpoint.00000106", files[0].Name())
require.NoError(t, err)
require.Equal(t, 1, len(files))
require.Equal(t, "checkpoint.00000106", files[0].Name())
sr, err := NewSegmentsReader(filepath.Join(dir, "checkpoint.00000106"))
assert.NoError(t, err)
require.NoError(t, err)
defer sr.Close()
var dec record.Decoder
@ -208,17 +208,17 @@ func TestCheckpoint(t *testing.T) {
switch dec.Type(rec) {
case record.Series:
series, err = dec.Series(rec, series)
assert.NoError(t, err)
require.NoError(t, err)
case record.Samples:
samples, err := dec.Samples(rec, nil)
assert.NoError(t, err)
require.NoError(t, err)
for _, s := range samples {
assert.GreaterOrEqual(t, s.T, last/2, "sample with wrong timestamp")
require.GreaterOrEqual(t, s.T, last/2, "sample with wrong timestamp")
}
}
}
assert.NoError(t, r.Err())
assert.Equal(t, []record.RefSeries{
require.NoError(t, r.Err())
require.Equal(t, []record.RefSeries{
{Ref: 0, Labels: labels.FromStrings("a", "b", "c", "0")},
{Ref: 2, Labels: labels.FromStrings("a", "b", "c", "2")},
{Ref: 4, Labels: labels.FromStrings("a", "b", "c", "4")},
@ -230,27 +230,27 @@ func TestCheckpoint(t *testing.T) {
func TestCheckpointNoTmpFolderAfterError(t *testing.T) {
// Create a new wal with invalid data.
dir, err := ioutil.TempDir("", "test_checkpoint")
assert.NoError(t, err)
require.NoError(t, err)
defer func() {
assert.NoError(t, os.RemoveAll(dir))
require.NoError(t, os.RemoveAll(dir))
}()
w, err := NewSize(nil, nil, dir, 64*1024, false)
assert.NoError(t, err)
require.NoError(t, err)
var enc record.Encoder
assert.NoError(t, w.Log(enc.Series([]record.RefSeries{
require.NoError(t, w.Log(enc.Series([]record.RefSeries{
{Ref: 0, Labels: labels.FromStrings("a", "b", "c", "2")}}, nil)))
assert.NoError(t, w.Close())
require.NoError(t, w.Close())
// Corrupt data.
f, err := os.OpenFile(filepath.Join(w.Dir(), "00000000"), os.O_WRONLY, 0666)
assert.NoError(t, err)
require.NoError(t, err)
_, err = f.WriteAt([]byte{42}, 1)
assert.NoError(t, err)
assert.NoError(t, f.Close())
require.NoError(t, err)
require.NoError(t, f.Close())
// Run the checkpoint and since the wal contains corrupt data this should return an error.
_, err = Checkpoint(log.NewNopLogger(), w, 0, 1, nil, 0)
assert.Error(t, err)
require.Error(t, err)
// Walk the wal dir to make sure there are no tmp folder left behind after the error.
err = filepath.Walk(w.Dir(), func(path string, info os.FileInfo, err error) error {
@ -262,5 +262,5 @@ func TestCheckpointNoTmpFolderAfterError(t *testing.T) {
}
return nil
})
assert.NoError(t, err)
require.NoError(t, err)
}

View file

@ -30,7 +30,7 @@ import (
"time"
"github.com/go-kit/kit/log"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
tsdb_errors "github.com/prometheus/prometheus/tsdb/errors"
"github.com/prometheus/prometheus/util/testutil"
@ -183,7 +183,7 @@ func TestReader(t *testing.T) {
if j >= len(c.exp) {
t.Fatal("received more records than expected")
}
assert.Equal(t, c.exp[j], rec, "Bytes within record did not match expected Bytes")
require.Equal(t, c.exp[j], rec, "Bytes within record did not match expected Bytes")
}
if !c.fail && r.Err() != nil {
t.Fatalf("unexpected error: %s", r.Err())
@ -202,14 +202,14 @@ func TestReader_Live(t *testing.T) {
for i := range testReaderCases {
t.Run(strconv.Itoa(i), func(t *testing.T) {
writeFd, err := ioutil.TempFile("", "TestReader_Live")
assert.NoError(t, err)
require.NoError(t, err)
defer os.Remove(writeFd.Name())
go func(i int) {
for _, rec := range testReaderCases[i].t {
rec := encodedRecord(rec.t, rec.b)
_, err := writeFd.Write(rec)
assert.NoError(t, err)
require.NoError(t, err)
runtime.Gosched()
}
writeFd.Close()
@ -217,21 +217,21 @@ func TestReader_Live(t *testing.T) {
// Read from a second FD on the same file.
readFd, err := os.Open(writeFd.Name())
assert.NoError(t, err)
require.NoError(t, err)
reader := NewLiveReader(logger, NewLiveReaderMetrics(nil), readFd)
for _, exp := range testReaderCases[i].exp {
for !reader.Next() {
assert.Equal(t, io.EOF, reader.Err(), "expect EOF, got: %v", reader.Err())
require.Equal(t, io.EOF, reader.Err(), "expect EOF, got: %v", reader.Err())
runtime.Gosched()
}
actual := reader.Record()
assert.Equal(t, exp, actual, "read wrong record")
require.Equal(t, exp, actual, "read wrong record")
}
assert.False(t, reader.Next(), "unexpected record")
require.False(t, reader.Next(), "unexpected record")
if testReaderCases[i].fail {
assert.Error(t, reader.Err())
require.Error(t, reader.Err())
}
})
}
@ -311,33 +311,33 @@ func TestReaderFuzz(t *testing.T) {
for _, compress := range []bool{false, true} {
t.Run(fmt.Sprintf("%s,compress=%t", name, compress), func(t *testing.T) {
dir, err := ioutil.TempDir("", "wal_fuzz_live")
assert.NoError(t, err)
require.NoError(t, err)
defer func() {
assert.NoError(t, os.RemoveAll(dir))
require.NoError(t, os.RemoveAll(dir))
}()
w, err := NewSize(nil, nil, dir, 128*pageSize, compress)
assert.NoError(t, err)
require.NoError(t, err)
// Buffering required as we're not reading concurrently.
input := make(chan []byte, fuzzLen)
err = generateRandomEntries(w, input)
assert.NoError(t, err)
require.NoError(t, err)
close(input)
err = w.Close()
assert.NoError(t, err)
require.NoError(t, err)
sr, err := allSegments(w.Dir())
assert.NoError(t, err)
require.NoError(t, err)
defer sr.Close()
reader := fn(sr)
for expected := range input {
assert.True(t, reader.Next(), "expected record: %v", reader.Err())
assert.Equal(t, expected, reader.Record(), "read wrong record")
require.True(t, reader.Next(), "expected record: %v", reader.Err())
require.Equal(t, expected, reader.Record(), "read wrong record")
}
assert.False(t, reader.Next(), "unexpected record")
require.False(t, reader.Next(), "unexpected record")
})
}
}
@ -348,13 +348,13 @@ func TestReaderFuzz_Live(t *testing.T) {
for _, compress := range []bool{false, true} {
t.Run(fmt.Sprintf("compress=%t", compress), func(t *testing.T) {
dir, err := ioutil.TempDir("", "wal_fuzz_live")
assert.NoError(t, err)
require.NoError(t, err)
defer func() {
assert.NoError(t, os.RemoveAll(dir))
require.NoError(t, os.RemoveAll(dir))
}()
w, err := NewSize(nil, nil, dir, 128*pageSize, compress)
assert.NoError(t, err)
require.NoError(t, err)
defer w.Close()
// In the background, generate a stream of random records and write them
@ -363,17 +363,17 @@ func TestReaderFuzz_Live(t *testing.T) {
done := make(chan struct{})
go func() {
err := generateRandomEntries(w, input)
assert.NoError(t, err)
require.NoError(t, err)
time.Sleep(100 * time.Millisecond)
close(done)
}()
// Tail the WAL and compare the results.
m, _, err := Segments(w.Dir())
assert.NoError(t, err)
require.NoError(t, err)
seg, err := OpenReadSegment(SegmentName(dir, m))
assert.NoError(t, err)
require.NoError(t, err)
defer seg.Close()
r := NewLiveReader(logger, nil, seg)
@ -384,10 +384,10 @@ func TestReaderFuzz_Live(t *testing.T) {
for r.Next() {
rec := r.Record()
expected, ok := <-input
assert.True(t, ok, "unexpected record")
assert.Equal(t, expected, rec, "record does not match expected")
require.True(t, ok, "unexpected record")
require.Equal(t, expected, rec, "record does not match expected")
}
assert.Equal(t, io.EOF, r.Err(), "expected EOF, got: %v", r.Err())
require.Equal(t, io.EOF, r.Err(), "expected EOF, got: %v", r.Err())
return true
}
@ -397,7 +397,7 @@ func TestReaderFuzz_Live(t *testing.T) {
case <-segmentTicker.C:
// check if new segments exist
_, last, err := Segments(w.Dir())
assert.NoError(t, err)
require.NoError(t, err)
if last <= seg.i {
continue
}
@ -406,11 +406,11 @@ func TestReaderFuzz_Live(t *testing.T) {
readSegment(r)
fi, err := os.Stat(SegmentName(dir, seg.i))
assert.NoError(t, err)
assert.Equal(t, r.Offset(), fi.Size(), "expected to have read whole segment, but read %d of %d", r.Offset(), fi.Size())
require.NoError(t, err)
require.Equal(t, r.Offset(), fi.Size(), "expected to have read whole segment, but read %d of %d", r.Offset(), fi.Size())
seg, err = OpenReadSegment(SegmentName(dir, seg.i+1))
assert.NoError(t, err)
require.NoError(t, err)
defer seg.Close()
r = NewLiveReader(logger, nil, seg)
@ -423,7 +423,7 @@ func TestReaderFuzz_Live(t *testing.T) {
}
}
assert.Equal(t, io.EOF, r.Err(), "expected EOF")
require.Equal(t, io.EOF, r.Err(), "expected EOF")
})
}
}
@ -433,92 +433,92 @@ func TestLiveReaderCorrupt_ShortFile(t *testing.T) {
// but the segment is only half written.
logger := testutil.NewLogger(t)
dir, err := ioutil.TempDir("", "wal_live_corrupt")
assert.NoError(t, err)
require.NoError(t, err)
defer func() {
assert.NoError(t, os.RemoveAll(dir))
require.NoError(t, os.RemoveAll(dir))
}()
w, err := NewSize(nil, nil, dir, pageSize, false)
assert.NoError(t, err)
require.NoError(t, err)
rec := make([]byte, pageSize-recordHeaderSize)
_, err = rand.Read(rec)
assert.NoError(t, err)
require.NoError(t, err)
err = w.Log(rec)
assert.NoError(t, err)
require.NoError(t, err)
err = w.Close()
assert.NoError(t, err)
require.NoError(t, err)
segmentFile, err := os.OpenFile(filepath.Join(dir, "00000000"), os.O_RDWR, 0666)
assert.NoError(t, err)
require.NoError(t, err)
err = segmentFile.Truncate(pageSize / 2)
assert.NoError(t, err)
require.NoError(t, err)
err = segmentFile.Close()
assert.NoError(t, err)
require.NoError(t, err)
// Try and LiveReader it.
m, _, err := Segments(w.Dir())
assert.NoError(t, err)
require.NoError(t, err)
seg, err := OpenReadSegment(SegmentName(dir, m))
assert.NoError(t, err)
require.NoError(t, err)
defer seg.Close()
r := NewLiveReader(logger, nil, seg)
assert.False(t, r.Next(), "expected no records")
assert.Equal(t, io.EOF, r.Err(), "expected error, got: %v", r.Err())
require.False(t, r.Next(), "expected no records")
require.Equal(t, io.EOF, r.Err(), "expected error, got: %v", r.Err())
}
func TestLiveReaderCorrupt_RecordTooLongAndShort(t *testing.T) {
// Write a corrupt WAL segment, when record len > page size.
logger := testutil.NewLogger(t)
dir, err := ioutil.TempDir("", "wal_live_corrupt")
assert.NoError(t, err)
require.NoError(t, err)
defer func() {
assert.NoError(t, os.RemoveAll(dir))
require.NoError(t, os.RemoveAll(dir))
}()
w, err := NewSize(nil, nil, dir, pageSize*2, false)
assert.NoError(t, err)
require.NoError(t, err)
rec := make([]byte, pageSize-recordHeaderSize)
_, err = rand.Read(rec)
assert.NoError(t, err)
require.NoError(t, err)
err = w.Log(rec)
assert.NoError(t, err)
require.NoError(t, err)
err = w.Close()
assert.NoError(t, err)
require.NoError(t, err)
segmentFile, err := os.OpenFile(filepath.Join(dir, "00000000"), os.O_RDWR, 0666)
assert.NoError(t, err)
require.NoError(t, err)
// Override the record length
buf := make([]byte, 3)
buf[0] = byte(recFull)
binary.BigEndian.PutUint16(buf[1:], 0xFFFF)
_, err = segmentFile.WriteAt(buf, 0)
assert.NoError(t, err)
require.NoError(t, err)
err = segmentFile.Close()
assert.NoError(t, err)
require.NoError(t, err)
// Try and LiveReader it.
m, _, err := Segments(w.Dir())
assert.NoError(t, err)
require.NoError(t, err)
seg, err := OpenReadSegment(SegmentName(dir, m))
assert.NoError(t, err)
require.NoError(t, err)
defer seg.Close()
r := NewLiveReader(logger, NewLiveReaderMetrics(nil), seg)
assert.False(t, r.Next(), "expected no records")
assert.EqualError(t, r.Err(), "record length greater than a single page: 65542 > 32768", "expected error, got: %v", r.Err())
require.False(t, r.Next(), "expected no records")
require.EqualError(t, r.Err(), "record length greater than a single page: 65542 > 32768", "expected error, got: %v", r.Err())
}
func TestReaderData(t *testing.T) {
@ -530,18 +530,18 @@ func TestReaderData(t *testing.T) {
for name, fn := range readerConstructors {
t.Run(name, func(t *testing.T) {
w, err := New(nil, nil, dir, true)
assert.NoError(t, err)
require.NoError(t, err)
sr, err := allSegments(dir)
assert.NoError(t, err)
require.NoError(t, err)
reader := fn(sr)
for reader.Next() {
}
assert.NoError(t, reader.Err())
require.NoError(t, reader.Err())
err = w.Repair(reader.Err())
assert.NoError(t, err)
require.NoError(t, err)
})
}
}

View file

@ -24,7 +24,7 @@ import (
"testing"
client_testutil "github.com/prometheus/client_golang/prometheus/testutil"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"go.uber.org/goleak"
"github.com/prometheus/prometheus/tsdb/fileutil"
@ -47,9 +47,9 @@ func TestWALRepair_ReadingError(t *testing.T) {
2,
func(f *os.File) {
_, err := f.Seek(pageSize*2, 0)
assert.NoError(t, err)
require.NoError(t, err)
_, err = f.Write([]byte{byte(recFirst)})
assert.NoError(t, err)
require.NoError(t, err)
},
8,
},
@ -60,9 +60,9 @@ func TestWALRepair_ReadingError(t *testing.T) {
1,
func(f *os.File) {
_, err := f.Seek(pageSize, 0)
assert.NoError(t, err)
require.NoError(t, err)
_, err = f.Write([]byte{byte(recPageTerm)})
assert.NoError(t, err)
require.NoError(t, err)
},
4,
},
@ -70,9 +70,9 @@ func TestWALRepair_ReadingError(t *testing.T) {
1,
func(f *os.File) {
_, err := f.Seek(pageSize, 0)
assert.NoError(t, err)
require.NoError(t, err)
_, err = f.Write([]byte{byte(recLast)})
assert.NoError(t, err)
require.NoError(t, err)
},
4,
},
@ -80,9 +80,9 @@ func TestWALRepair_ReadingError(t *testing.T) {
1,
func(f *os.File) {
_, err := f.Seek(pageSize, 0)
assert.NoError(t, err)
require.NoError(t, err)
_, err = f.Write([]byte{123})
assert.NoError(t, err)
require.NoError(t, err)
},
4,
},
@ -90,9 +90,9 @@ func TestWALRepair_ReadingError(t *testing.T) {
1,
func(f *os.File) {
_, err := f.Seek(pageSize+4, 0)
assert.NoError(t, err)
require.NoError(t, err)
_, err = f.Write([]byte{0})
assert.NoError(t, err)
require.NoError(t, err)
},
4,
},
@ -100,9 +100,9 @@ func TestWALRepair_ReadingError(t *testing.T) {
1,
func(f *os.File) {
_, err := f.Seek(pageSize+2, 0)
assert.NoError(t, err)
require.NoError(t, err)
_, err = f.Write([]byte{0})
assert.NoError(t, err)
require.NoError(t, err)
},
4,
},
@ -110,18 +110,18 @@ func TestWALRepair_ReadingError(t *testing.T) {
1,
func(f *os.File) {
_, err := f.Seek(pageSize+100, 0)
assert.NoError(t, err)
require.NoError(t, err)
_, err = f.Write([]byte("beef"))
assert.NoError(t, err)
require.NoError(t, err)
},
4,
},
} {
t.Run(name, func(t *testing.T) {
dir, err := ioutil.TempDir("", "wal_repair")
assert.NoError(t, err)
require.NoError(t, err)
defer func() {
assert.NoError(t, os.RemoveAll(dir))
require.NoError(t, os.RemoveAll(dir))
}()
// We create 3 segments with 3 records each and
@ -129,7 +129,7 @@ func TestWALRepair_ReadingError(t *testing.T) {
// As a result we want a repaired WAL with given intact records.
segSize := 3 * pageSize
w, err := NewSize(nil, nil, dir, segSize, false)
assert.NoError(t, err)
require.NoError(t, err)
var records [][]byte
@ -137,36 +137,36 @@ func TestWALRepair_ReadingError(t *testing.T) {
b := make([]byte, pageSize-recordHeaderSize)
b[0] = byte(i)
records = append(records, b)
assert.NoError(t, w.Log(b))
require.NoError(t, w.Log(b))
}
first, last, err := Segments(w.Dir())
assert.NoError(t, err)
assert.Equal(t, 3, 1+last-first, "wal creation didn't result in expected number of segments")
require.NoError(t, err)
require.Equal(t, 3, 1+last-first, "wal creation didn't result in expected number of segments")
assert.NoError(t, w.Close())
require.NoError(t, w.Close())
f, err := os.OpenFile(SegmentName(dir, test.corrSgm), os.O_RDWR, 0666)
assert.NoError(t, err)
require.NoError(t, err)
// Apply corruption function.
test.corrFunc(f)
assert.NoError(t, f.Close())
require.NoError(t, f.Close())
w, err = NewSize(nil, nil, dir, segSize, false)
assert.NoError(t, err)
require.NoError(t, err)
defer w.Close()
first, last, err = Segments(w.Dir())
assert.NoError(t, err)
require.NoError(t, err)
// Backfill segments from the most recent checkpoint onwards.
for i := first; i <= last; i++ {
s, err := OpenReadSegment(SegmentName(w.Dir(), i))
assert.NoError(t, err)
require.NoError(t, err)
sr := NewSegmentBufReader(s)
assert.NoError(t, err)
require.NoError(t, err)
r := NewReader(sr)
for r.Next() {
}
@ -178,12 +178,12 @@ func TestWALRepair_ReadingError(t *testing.T) {
if r.Err() == nil {
continue
}
assert.NoError(t, w.Repair(r.Err()))
require.NoError(t, w.Repair(r.Err()))
break
}
sr, err := NewSegmentsReader(dir)
assert.NoError(t, err)
require.NoError(t, err)
defer sr.Close()
r := NewReader(sr)
@ -192,8 +192,8 @@ func TestWALRepair_ReadingError(t *testing.T) {
var b []byte
result = append(result, append(b, r.Record()...))
}
assert.NoError(t, r.Err())
assert.Equal(t, test.intactRecs, len(result), "Wrong number of intact records")
require.NoError(t, r.Err())
require.Equal(t, test.intactRecs, len(result), "Wrong number of intact records")
for i, r := range result {
if !bytes.Equal(records[i], r) {
@ -203,11 +203,11 @@ func TestWALRepair_ReadingError(t *testing.T) {
// Make sure there is a new 0 size Segment after the corrupted Segment.
_, last, err = Segments(w.Dir())
assert.NoError(t, err)
assert.Equal(t, test.corrSgm+1, last)
require.NoError(t, err)
require.Equal(t, test.corrSgm+1, last)
fi, err := os.Stat(SegmentName(dir, last))
assert.NoError(t, err)
assert.Equal(t, int64(0), fi.Size())
require.NoError(t, err)
require.Equal(t, int64(0), fi.Size())
})
}
}
@ -217,9 +217,9 @@ func TestWALRepair_ReadingError(t *testing.T) {
// moving to write more records to the WAL.
func TestCorruptAndCarryOn(t *testing.T) {
dir, err := ioutil.TempDir("", "wal_repair")
assert.NoError(t, err)
require.NoError(t, err)
defer func() {
assert.NoError(t, os.RemoveAll(dir))
require.NoError(t, os.RemoveAll(dir))
}()
var (
@ -232,37 +232,37 @@ func TestCorruptAndCarryOn(t *testing.T) {
// so when we truncate the file we're guaranteed to split a record.
{
w, err := NewSize(logger, nil, dir, segmentSize, false)
assert.NoError(t, err)
require.NoError(t, err)
for i := 0; i < 18; i++ {
buf := make([]byte, recordSize)
_, err := rand.Read(buf)
assert.NoError(t, err)
require.NoError(t, err)
err = w.Log(buf)
assert.NoError(t, err)
require.NoError(t, err)
}
err = w.Close()
assert.NoError(t, err)
require.NoError(t, err)
}
// Check all the segments are the correct size.
{
segments, err := listSegments(dir)
assert.NoError(t, err)
require.NoError(t, err)
for _, segment := range segments {
f, err := os.OpenFile(filepath.Join(dir, fmt.Sprintf("%08d", segment.index)), os.O_RDONLY, 0666)
assert.NoError(t, err)
require.NoError(t, err)
fi, err := f.Stat()
assert.NoError(t, err)
require.NoError(t, err)
t.Log("segment", segment.index, "size", fi.Size())
assert.Equal(t, int64(segmentSize), fi.Size())
require.Equal(t, int64(segmentSize), fi.Size())
err = f.Close()
assert.NoError(t, err)
require.NoError(t, err)
}
}
@ -270,74 +270,74 @@ func TestCorruptAndCarryOn(t *testing.T) {
// page in half, leaving 4 valid records.
{
f, err := os.OpenFile(filepath.Join(dir, fmt.Sprintf("%08d", 0)), os.O_RDWR, 0666)
assert.NoError(t, err)
require.NoError(t, err)
fi, err := f.Stat()
assert.NoError(t, err)
assert.Equal(t, int64(segmentSize), fi.Size())
require.NoError(t, err)
require.Equal(t, int64(segmentSize), fi.Size())
err = f.Truncate(int64(segmentSize / 2))
assert.NoError(t, err)
require.NoError(t, err)
err = f.Close()
assert.NoError(t, err)
require.NoError(t, err)
}
// Now try and repair this WAL, and write 5 more records to it.
{
sr, err := NewSegmentsReader(dir)
assert.NoError(t, err)
require.NoError(t, err)
reader := NewReader(sr)
i := 0
for ; i < 4 && reader.Next(); i++ {
assert.Equal(t, recordSize, len(reader.Record()))
require.Equal(t, recordSize, len(reader.Record()))
}
assert.Equal(t, 4, i, "not enough records")
assert.False(t, reader.Next(), "unexpected record")
require.Equal(t, 4, i, "not enough records")
require.False(t, reader.Next(), "unexpected record")
corruptionErr := reader.Err()
assert.Error(t, corruptionErr)
require.Error(t, corruptionErr)
err = sr.Close()
assert.NoError(t, err)
require.NoError(t, err)
w, err := NewSize(logger, nil, dir, segmentSize, false)
assert.NoError(t, err)
require.NoError(t, err)
err = w.Repair(corruptionErr)
assert.NoError(t, err)
require.NoError(t, err)
// Ensure that we have a completely clean slate after repairing.
assert.Equal(t, w.segment.Index(), 1) // We corrupted segment 0.
assert.Equal(t, w.donePages, 0)
require.Equal(t, w.segment.Index(), 1) // We corrupted segment 0.
require.Equal(t, w.donePages, 0)
for i := 0; i < 5; i++ {
buf := make([]byte, recordSize)
_, err := rand.Read(buf)
assert.NoError(t, err)
require.NoError(t, err)
err = w.Log(buf)
assert.NoError(t, err)
require.NoError(t, err)
}
err = w.Close()
assert.NoError(t, err)
require.NoError(t, err)
}
// Replay the WAL. Should get 9 records.
{
sr, err := NewSegmentsReader(dir)
assert.NoError(t, err)
require.NoError(t, err)
reader := NewReader(sr)
i := 0
for ; i < 9 && reader.Next(); i++ {
assert.Equal(t, recordSize, len(reader.Record()))
require.Equal(t, recordSize, len(reader.Record()))
}
assert.Equal(t, 9, i, "wrong number of records")
assert.False(t, reader.Next(), "unexpected record")
assert.Equal(t, nil, reader.Err())
require.Equal(t, 9, i, "wrong number of records")
require.False(t, reader.Next(), "unexpected record")
require.Equal(t, nil, reader.Err())
sr.Close()
}
}
@ -345,14 +345,14 @@ func TestCorruptAndCarryOn(t *testing.T) {
// TestClose ensures that calling Close more than once doesn't panic and doesn't block.
func TestClose(t *testing.T) {
dir, err := ioutil.TempDir("", "wal_repair")
assert.NoError(t, err)
require.NoError(t, err)
defer func() {
assert.NoError(t, os.RemoveAll(dir))
require.NoError(t, os.RemoveAll(dir))
}()
w, err := NewSize(nil, nil, dir, pageSize, false)
assert.NoError(t, err)
assert.NoError(t, w.Close())
assert.Error(t, w.Close())
require.NoError(t, err)
require.NoError(t, w.Close())
require.Error(t, w.Close())
}
func TestSegmentMetric(t *testing.T) {
@ -362,12 +362,12 @@ func TestSegmentMetric(t *testing.T) {
)
dir, err := ioutil.TempDir("", "segment_metric")
assert.NoError(t, err)
require.NoError(t, err)
defer func() {
assert.NoError(t, os.RemoveAll(dir))
require.NoError(t, os.RemoveAll(dir))
}()
w, err := NewSize(nil, nil, dir, segmentSize, false)
assert.NoError(t, err)
require.NoError(t, err)
initialSegment := client_testutil.ToFloat64(w.metrics.currentSegment)
@ -375,13 +375,13 @@ func TestSegmentMetric(t *testing.T) {
for i := 0; i < 3; i++ {
buf := make([]byte, recordSize)
_, err := rand.Read(buf)
assert.NoError(t, err)
require.NoError(t, err)
err = w.Log(buf)
assert.NoError(t, err)
require.NoError(t, err)
}
assert.Equal(t, initialSegment+1, client_testutil.ToFloat64(w.metrics.currentSegment), "segment metric did not increment after segment rotation")
assert.NoError(t, w.Close())
require.Equal(t, initialSegment+1, client_testutil.ToFloat64(w.metrics.currentSegment), "segment metric did not increment after segment rotation")
require.NoError(t, w.Close())
}
func TestCompression(t *testing.T) {
@ -393,48 +393,48 @@ func TestCompression(t *testing.T) {
)
dirPath, err := ioutil.TempDir("", fmt.Sprintf("TestCompression_%t", compressed))
assert.NoError(t, err)
require.NoError(t, err)
w, err := NewSize(nil, nil, dirPath, segmentSize, compressed)
assert.NoError(t, err)
require.NoError(t, err)
buf := make([]byte, recordSize)
for i := 0; i < records; i++ {
assert.NoError(t, w.Log(buf))
require.NoError(t, w.Log(buf))
}
assert.NoError(t, w.Close())
require.NoError(t, w.Close())
return dirPath
}
dirCompressed := bootstrap(true)
defer func() {
assert.NoError(t, os.RemoveAll(dirCompressed))
require.NoError(t, os.RemoveAll(dirCompressed))
}()
dirUnCompressed := bootstrap(false)
defer func() {
assert.NoError(t, os.RemoveAll(dirUnCompressed))
require.NoError(t, os.RemoveAll(dirUnCompressed))
}()
uncompressedSize, err := fileutil.DirSize(dirUnCompressed)
assert.NoError(t, err)
require.NoError(t, err)
compressedSize, err := fileutil.DirSize(dirCompressed)
assert.NoError(t, err)
require.NoError(t, err)
assert.Greater(t, float64(uncompressedSize)*0.75, float64(compressedSize), "Compressing zeroes should save at least 25%% space - uncompressedSize: %d, compressedSize: %d", uncompressedSize, compressedSize)
require.Greater(t, float64(uncompressedSize)*0.75, float64(compressedSize), "Compressing zeroes should save at least 25%% space - uncompressedSize: %d, compressedSize: %d", uncompressedSize, compressedSize)
}
func BenchmarkWAL_LogBatched(b *testing.B) {
for _, compress := range []bool{true, false} {
b.Run(fmt.Sprintf("compress=%t", compress), func(b *testing.B) {
dir, err := ioutil.TempDir("", "bench_logbatch")
assert.NoError(b, err)
require.NoError(b, err)
defer func() {
assert.NoError(b, os.RemoveAll(dir))
require.NoError(b, os.RemoveAll(dir))
}()
w, err := New(nil, nil, dir, compress)
assert.NoError(b, err)
require.NoError(b, err)
defer w.Close()
var buf [2048]byte
@ -447,7 +447,7 @@ func BenchmarkWAL_LogBatched(b *testing.B) {
continue
}
err := w.Log(recs...)
assert.NoError(b, err)
require.NoError(b, err)
recs = recs[:0]
}
// Stop timer to not count fsync time on close.
@ -462,13 +462,13 @@ func BenchmarkWAL_Log(b *testing.B) {
for _, compress := range []bool{true, false} {
b.Run(fmt.Sprintf("compress=%t", compress), func(b *testing.B) {
dir, err := ioutil.TempDir("", "bench_logsingle")
assert.NoError(b, err)
require.NoError(b, err)
defer func() {
assert.NoError(b, os.RemoveAll(dir))
require.NoError(b, os.RemoveAll(dir))
}()
w, err := New(nil, nil, dir, compress)
assert.NoError(b, err)
require.NoError(b, err)
defer w.Close()
var buf [2048]byte
@ -476,7 +476,7 @@ func BenchmarkWAL_Log(b *testing.B) {
for i := 0; i < b.N; i++ {
err := w.Log(buf[:])
assert.NoError(b, err)
require.NoError(b, err)
}
// Stop timer to not count fsync time on close.
// If it's counted batched vs. single benchmarks are very similar but

View file

@ -24,7 +24,7 @@ import (
"github.com/go-kit/kit/log"
"github.com/prometheus/client_golang/prometheus"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/prometheus/prometheus/pkg/labels"
"github.com/prometheus/prometheus/tsdb/record"
@ -100,20 +100,20 @@ func TestTailSamples(t *testing.T) {
now := time.Now()
dir, err := ioutil.TempDir("", "readCheckpoint")
assert.NoError(t, err)
require.NoError(t, err)
defer func() {
assert.NoError(t, os.RemoveAll(dir))
require.NoError(t, os.RemoveAll(dir))
}()
wdir := path.Join(dir, "wal")
err = os.Mkdir(wdir, 0777)
assert.NoError(t, err)
require.NoError(t, err)
enc := record.Encoder{}
w, err := NewSize(nil, nil, wdir, 128*pageSize, compress)
assert.NoError(t, err)
require.NoError(t, err)
defer func() {
assert.NoError(t, w.Close())
require.NoError(t, w.Close())
}()
// Write to the initial segment then checkpoint.
@ -125,7 +125,7 @@ func TestTailSamples(t *testing.T) {
Labels: labels.Labels{labels.Label{Name: "__name__", Value: fmt.Sprintf("metric_%d", i)}},
},
}, nil)
assert.NoError(t, w.Log(series))
require.NoError(t, w.Log(series))
for j := 0; j < samplesCount; j++ {
inner := rand.Intn(ref + 1)
@ -136,13 +136,13 @@ func TestTailSamples(t *testing.T) {
V: float64(i),
},
}, nil)
assert.NoError(t, w.Log(sample))
require.NoError(t, w.Log(sample))
}
}
// Start read after checkpoint, no more data written.
first, last, err := Segments(w.Dir())
assert.NoError(t, err)
require.NoError(t, err)
wt := newWriteToMock()
watcher := NewWatcher(wMetrics, nil, nil, "", wt, dir)
@ -152,7 +152,7 @@ func TestTailSamples(t *testing.T) {
watcher.setMetrics()
for i := first; i <= last; i++ {
segment, err := OpenReadSegment(SegmentName(watcher.walDir, i))
assert.NoError(t, err)
require.NoError(t, err)
defer segment.Close()
reader := NewLiveReader(nil, NewLiveReaderMetrics(nil), segment)
@ -165,8 +165,8 @@ func TestTailSamples(t *testing.T) {
retry(t, defaultRetryInterval, defaultRetries, func() bool {
return wt.checkNumLabels() >= expectedSeries
})
assert.Equal(t, expectedSeries, wt.checkNumLabels())
assert.Equal(t, expectedSamples, wt.samplesAppended)
require.Equal(t, expectedSeries, wt.checkNumLabels())
require.Equal(t, expectedSamples, wt.samplesAppended)
})
}
}
@ -179,18 +179,18 @@ func TestReadToEndNoCheckpoint(t *testing.T) {
for _, compress := range []bool{false, true} {
t.Run(fmt.Sprintf("compress=%t", compress), func(t *testing.T) {
dir, err := ioutil.TempDir("", "readToEnd_noCheckpoint")
assert.NoError(t, err)
require.NoError(t, err)
defer func() {
assert.NoError(t, os.RemoveAll(dir))
require.NoError(t, os.RemoveAll(dir))
}()
wdir := path.Join(dir, "wal")
err = os.Mkdir(wdir, 0777)
assert.NoError(t, err)
require.NoError(t, err)
w, err := NewSize(nil, nil, wdir, 128*pageSize, compress)
assert.NoError(t, err)
require.NoError(t, err)
defer func() {
assert.NoError(t, w.Close())
require.NoError(t, w.Close())
}()
var recs [][]byte
@ -218,15 +218,15 @@ func TestReadToEndNoCheckpoint(t *testing.T) {
// Randomly batch up records.
if rand.Intn(4) < 3 {
assert.NoError(t, w.Log(recs...))
require.NoError(t, w.Log(recs...))
recs = recs[:0]
}
}
}
assert.NoError(t, w.Log(recs...))
require.NoError(t, w.Log(recs...))
_, _, err = Segments(w.Dir())
assert.NoError(t, err)
require.NoError(t, err)
wt := newWriteToMock()
watcher := NewWatcher(wMetrics, nil, nil, "", wt, dir)
@ -237,7 +237,7 @@ func TestReadToEndNoCheckpoint(t *testing.T) {
return wt.checkNumLabels() >= expected
})
watcher.Stop()
assert.Equal(t, expected, wt.checkNumLabels())
require.Equal(t, expected, wt.checkNumLabels())
})
}
}
@ -252,20 +252,20 @@ func TestReadToEndWithCheckpoint(t *testing.T) {
for _, compress := range []bool{false, true} {
t.Run(fmt.Sprintf("compress=%t", compress), func(t *testing.T) {
dir, err := ioutil.TempDir("", "readToEnd_withCheckpoint")
assert.NoError(t, err)
require.NoError(t, err)
defer func() {
assert.NoError(t, os.RemoveAll(dir))
require.NoError(t, os.RemoveAll(dir))
}()
wdir := path.Join(dir, "wal")
err = os.Mkdir(wdir, 0777)
assert.NoError(t, err)
require.NoError(t, err)
enc := record.Encoder{}
w, err := NewSize(nil, nil, wdir, segmentSize, compress)
assert.NoError(t, err)
require.NoError(t, err)
defer func() {
assert.NoError(t, w.Close())
require.NoError(t, w.Close())
}()
// Write to the initial segment then checkpoint.
@ -277,9 +277,9 @@ func TestReadToEndWithCheckpoint(t *testing.T) {
Labels: labels.Labels{labels.Label{Name: "__name__", Value: fmt.Sprintf("metric_%d", i)}},
},
}, nil)
assert.NoError(t, w.Log(series))
require.NoError(t, w.Log(series))
// Add in an unknown record type, which should be ignored.
assert.NoError(t, w.Log([]byte{255}))
require.NoError(t, w.Log([]byte{255}))
for j := 0; j < samplesCount; j++ {
inner := rand.Intn(ref + 1)
@ -290,7 +290,7 @@ func TestReadToEndWithCheckpoint(t *testing.T) {
V: float64(i),
},
}, nil)
assert.NoError(t, w.Log(sample))
require.NoError(t, w.Log(sample))
}
}
@ -305,7 +305,7 @@ func TestReadToEndWithCheckpoint(t *testing.T) {
Labels: labels.Labels{labels.Label{Name: "__name__", Value: fmt.Sprintf("metric_%d", i)}},
},
}, nil)
assert.NoError(t, w.Log(series))
require.NoError(t, w.Log(series))
for j := 0; j < samplesCount; j++ {
sample := enc.Samples([]record.RefSample{
@ -315,12 +315,12 @@ func TestReadToEndWithCheckpoint(t *testing.T) {
V: float64(i),
},
}, nil)
assert.NoError(t, w.Log(sample))
require.NoError(t, w.Log(sample))
}
}
_, _, err = Segments(w.Dir())
assert.NoError(t, err)
require.NoError(t, err)
wt := newWriteToMock()
watcher := NewWatcher(wMetrics, nil, nil, "", wt, dir)
go watcher.Start()
@ -330,7 +330,7 @@ func TestReadToEndWithCheckpoint(t *testing.T) {
return wt.checkNumLabels() >= expected
})
watcher.Stop()
assert.Equal(t, expected, wt.checkNumLabels())
require.Equal(t, expected, wt.checkNumLabels())
})
}
}
@ -343,22 +343,22 @@ func TestReadCheckpoint(t *testing.T) {
for _, compress := range []bool{false, true} {
t.Run(fmt.Sprintf("compress=%t", compress), func(t *testing.T) {
dir, err := ioutil.TempDir("", "readCheckpoint")
assert.NoError(t, err)
require.NoError(t, err)
defer func() {
assert.NoError(t, os.RemoveAll(dir))
require.NoError(t, os.RemoveAll(dir))
}()
wdir := path.Join(dir, "wal")
err = os.Mkdir(wdir, 0777)
assert.NoError(t, err)
require.NoError(t, err)
os.Create(SegmentName(wdir, 30))
enc := record.Encoder{}
w, err := NewSize(nil, nil, wdir, 128*pageSize, compress)
assert.NoError(t, err)
require.NoError(t, err)
defer func() {
assert.NoError(t, w.Close())
require.NoError(t, w.Close())
}()
// Write to the initial segment then checkpoint.
@ -370,7 +370,7 @@ func TestReadCheckpoint(t *testing.T) {
Labels: labels.Labels{labels.Label{Name: "__name__", Value: fmt.Sprintf("metric_%d", i)}},
},
}, nil)
assert.NoError(t, w.Log(series))
require.NoError(t, w.Log(series))
for j := 0; j < samplesCount; j++ {
inner := rand.Intn(ref + 1)
@ -381,7 +381,7 @@ func TestReadCheckpoint(t *testing.T) {
V: float64(i),
},
}, nil)
assert.NoError(t, w.Log(sample))
require.NoError(t, w.Log(sample))
}
}
Checkpoint(log.NewNopLogger(), w, 30, 31, func(x uint64) bool { return true }, 0)
@ -389,7 +389,7 @@ func TestReadCheckpoint(t *testing.T) {
// Start read after checkpoint, no more data written.
_, _, err = Segments(w.Dir())
assert.NoError(t, err)
require.NoError(t, err)
wt := newWriteToMock()
watcher := NewWatcher(wMetrics, nil, nil, "", wt, dir)
@ -400,7 +400,7 @@ func TestReadCheckpoint(t *testing.T) {
return wt.checkNumLabels() >= expectedSeries
})
watcher.Stop()
assert.Equal(t, expectedSeries, wt.checkNumLabels())
require.Equal(t, expectedSeries, wt.checkNumLabels())
})
}
}
@ -415,18 +415,18 @@ func TestReadCheckpointMultipleSegments(t *testing.T) {
for _, compress := range []bool{false, true} {
t.Run(fmt.Sprintf("compress=%t", compress), func(t *testing.T) {
dir, err := ioutil.TempDir("", "readCheckpoint")
assert.NoError(t, err)
require.NoError(t, err)
defer func() {
assert.NoError(t, os.RemoveAll(dir))
require.NoError(t, os.RemoveAll(dir))
}()
wdir := path.Join(dir, "wal")
err = os.Mkdir(wdir, 0777)
assert.NoError(t, err)
require.NoError(t, err)
enc := record.Encoder{}
w, err := NewSize(nil, nil, wdir, pageSize, compress)
assert.NoError(t, err)
require.NoError(t, err)
// Write a bunch of data.
for i := 0; i < segments; i++ {
@ -438,7 +438,7 @@ func TestReadCheckpointMultipleSegments(t *testing.T) {
Labels: labels.Labels{labels.Label{Name: "__name__", Value: fmt.Sprintf("metric_%d", j)}},
},
}, nil)
assert.NoError(t, w.Log(series))
require.NoError(t, w.Log(series))
for k := 0; k < samplesCount; k++ {
inner := rand.Intn(ref + 1)
@ -449,19 +449,19 @@ func TestReadCheckpointMultipleSegments(t *testing.T) {
V: float64(i),
},
}, nil)
assert.NoError(t, w.Log(sample))
require.NoError(t, w.Log(sample))
}
}
}
assert.NoError(t, w.Close())
require.NoError(t, w.Close())
// At this point we should have at least 6 segments, lets create a checkpoint dir of the first 5.
checkpointDir := dir + "/wal/checkpoint.000004"
err = os.Mkdir(checkpointDir, 0777)
assert.NoError(t, err)
require.NoError(t, err)
for i := 0; i <= 4; i++ {
err := os.Rename(SegmentName(dir+"/wal", i), SegmentName(checkpointDir, i))
assert.NoError(t, err)
require.NoError(t, err)
}
wt := newWriteToMock()
@ -472,10 +472,10 @@ func TestReadCheckpointMultipleSegments(t *testing.T) {
watcher.setMetrics()
lastCheckpoint, _, err := LastCheckpoint(watcher.walDir)
assert.NoError(t, err)
require.NoError(t, err)
err = watcher.readCheckpoint(lastCheckpoint)
assert.NoError(t, err)
require.NoError(t, err)
})
}
}
@ -497,20 +497,20 @@ func TestCheckpointSeriesReset(t *testing.T) {
for _, tc := range testCases {
t.Run(fmt.Sprintf("compress=%t", tc.compress), func(t *testing.T) {
dir, err := ioutil.TempDir("", "seriesReset")
assert.NoError(t, err)
require.NoError(t, err)
defer func() {
assert.NoError(t, os.RemoveAll(dir))
require.NoError(t, os.RemoveAll(dir))
}()
wdir := path.Join(dir, "wal")
err = os.Mkdir(wdir, 0777)
assert.NoError(t, err)
require.NoError(t, err)
enc := record.Encoder{}
w, err := NewSize(nil, nil, wdir, segmentSize, tc.compress)
assert.NoError(t, err)
require.NoError(t, err)
defer func() {
assert.NoError(t, w.Close())
require.NoError(t, w.Close())
}()
// Write to the initial segment, then checkpoint later.
@ -522,7 +522,7 @@ func TestCheckpointSeriesReset(t *testing.T) {
Labels: labels.Labels{labels.Label{Name: "__name__", Value: fmt.Sprintf("metric_%d", i)}},
},
}, nil)
assert.NoError(t, w.Log(series))
require.NoError(t, w.Log(series))
for j := 0; j < samplesCount; j++ {
inner := rand.Intn(ref + 1)
@ -533,12 +533,12 @@ func TestCheckpointSeriesReset(t *testing.T) {
V: float64(i),
},
}, nil)
assert.NoError(t, w.Log(sample))
require.NoError(t, w.Log(sample))
}
}
_, _, err = Segments(w.Dir())
assert.NoError(t, err)
require.NoError(t, err)
wt := newWriteToMock()
watcher := NewWatcher(wMetrics, nil, nil, "", wt, dir)
@ -549,24 +549,24 @@ func TestCheckpointSeriesReset(t *testing.T) {
retry(t, defaultRetryInterval, defaultRetries, func() bool {
return wt.checkNumLabels() >= expected
})
assert.Equal(t, seriesCount, wt.checkNumLabels())
require.Equal(t, seriesCount, wt.checkNumLabels())
_, err = Checkpoint(log.NewNopLogger(), w, 2, 4, func(x uint64) bool { return true }, 0)
assert.NoError(t, err)
require.NoError(t, err)
err = w.Truncate(5)
assert.NoError(t, err)
require.NoError(t, err)
_, cpi, err := LastCheckpoint(path.Join(dir, "wal"))
assert.NoError(t, err)
require.NoError(t, err)
err = watcher.garbageCollectSeries(cpi + 1)
assert.NoError(t, err)
require.NoError(t, err)
watcher.Stop()
// If you modify the checkpoint and truncate segment #'s run the test to see how
// many series records you end up with and change the last Equals check accordingly
// or modify the Equals to Assert(len(wt.seriesLabels) < seriesCount*10)
assert.Equal(t, tc.segments, wt.checkNumLabels())
require.Equal(t, tc.segments, wt.checkNumLabels())
})
}
}

View file

@ -27,7 +27,7 @@ import (
"time"
"github.com/go-kit/kit/log"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/prometheus/prometheus/pkg/labels"
"github.com/prometheus/prometheus/tsdb/record"
@ -37,44 +37,44 @@ import (
func TestSegmentWAL_cut(t *testing.T) {
tmpdir, err := ioutil.TempDir("", "test_wal_cut")
assert.NoError(t, err)
require.NoError(t, err)
defer func() {
assert.NoError(t, os.RemoveAll(tmpdir))
require.NoError(t, os.RemoveAll(tmpdir))
}()
// This calls cut() implicitly the first time without a previous tail.
w, err := OpenSegmentWAL(tmpdir, nil, 0, nil)
assert.NoError(t, err)
require.NoError(t, err)
assert.NoError(t, w.write(WALEntrySeries, 1, []byte("Hello World!!")))
require.NoError(t, w.write(WALEntrySeries, 1, []byte("Hello World!!")))
assert.NoError(t, w.cut())
require.NoError(t, w.cut())
// Cutting creates a new file.
assert.Equal(t, 2, len(w.files))
require.Equal(t, 2, len(w.files))
assert.NoError(t, w.write(WALEntrySeries, 1, []byte("Hello World!!")))
require.NoError(t, w.write(WALEntrySeries, 1, []byte("Hello World!!")))
assert.NoError(t, w.Close())
require.NoError(t, w.Close())
for _, of := range w.files {
f, err := os.Open(of.Name())
assert.NoError(t, err)
require.NoError(t, err)
// Verify header data.
metab := make([]byte, 8)
_, err = f.Read(metab)
assert.NoError(t, err)
assert.Equal(t, WALMagic, binary.BigEndian.Uint32(metab[:4]))
assert.Equal(t, WALFormatDefault, metab[4])
require.NoError(t, err)
require.Equal(t, WALMagic, binary.BigEndian.Uint32(metab[:4]))
require.Equal(t, WALFormatDefault, metab[4])
// We cannot actually check for correct pre-allocation as it is
// optional per filesystem and handled transparently.
et, flag, b, err := newWALReader(nil, nil).entry(f)
assert.NoError(t, err)
assert.Equal(t, WALEntrySeries, et)
assert.Equal(t, byte(walSeriesSimple), flag)
assert.Equal(t, []byte("Hello World!!"), b)
require.NoError(t, err)
require.Equal(t, WALEntrySeries, et)
require.Equal(t, byte(walSeriesSimple), flag)
require.Equal(t, []byte("Hello World!!"), b)
}
}
@ -84,17 +84,17 @@ func TestSegmentWAL_Truncate(t *testing.T) {
batch = 100
)
series, err := labels.ReadLabels(filepath.Join("testdata", "20kseries.json"), numMetrics)
assert.NoError(t, err)
require.NoError(t, err)
dir, err := ioutil.TempDir("", "test_wal_log_truncate")
assert.NoError(t, err)
require.NoError(t, err)
defer func() {
assert.NoError(t, os.RemoveAll(dir))
require.NoError(t, os.RemoveAll(dir))
}()
w, err := OpenSegmentWAL(dir, nil, 0, nil)
assert.NoError(t, err)
defer func(wal *SegmentWAL) { assert.NoError(t, wal.Close()) }(w)
require.NoError(t, err)
defer func(wal *SegmentWAL) { require.NoError(t, wal.Close()) }(w)
w.segmentSize = 10000
for i := 0; i < numMetrics; i += batch {
@ -104,7 +104,7 @@ func TestSegmentWAL_Truncate(t *testing.T) {
rs = append(rs, record.RefSeries{Labels: s, Ref: uint64(i+j) + 1})
}
err := w.LogSeries(rs)
assert.NoError(t, err)
require.NoError(t, err)
}
// We mark the 2nd half of the files with a min timestamp that should discard
@ -126,7 +126,7 @@ func TestSegmentWAL_Truncate(t *testing.T) {
}
err = w.Truncate(1000, keepf)
assert.NoError(t, err)
require.NoError(t, err)
var expected []record.RefSeries
@ -139,22 +139,22 @@ func TestSegmentWAL_Truncate(t *testing.T) {
// Call Truncate once again to see whether we can read the written file without
// creating a new WAL.
err = w.Truncate(1000, keepf)
assert.NoError(t, err)
assert.NoError(t, w.Close())
require.NoError(t, err)
require.NoError(t, w.Close())
// The same again with a new WAL.
w, err = OpenSegmentWAL(dir, nil, 0, nil)
assert.NoError(t, err)
defer func(wal *SegmentWAL) { assert.NoError(t, wal.Close()) }(w)
require.NoError(t, err)
defer func(wal *SegmentWAL) { require.NoError(t, wal.Close()) }(w)
var readSeries []record.RefSeries
r := w.Reader()
assert.NoError(t, r.Read(func(s []record.RefSeries) {
require.NoError(t, r.Read(func(s []record.RefSeries) {
readSeries = append(readSeries, s...)
}, nil, nil))
assert.Equal(t, expected, readSeries)
require.Equal(t, expected, readSeries)
}
// Symmetrical test of reading and writing to the WAL via its main interface.
@ -167,12 +167,12 @@ func TestSegmentWAL_Log_Restore(t *testing.T) {
// Generate testing data. It does not make semantic sense but
// for the purpose of this test.
series, err := labels.ReadLabels(filepath.Join("testdata", "20kseries.json"), numMetrics)
assert.NoError(t, err)
require.NoError(t, err)
dir, err := ioutil.TempDir("", "test_wal_log_restore")
assert.NoError(t, err)
require.NoError(t, err)
defer func() {
assert.NoError(t, os.RemoveAll(dir))
require.NoError(t, os.RemoveAll(dir))
}()
var (
@ -186,7 +186,7 @@ func TestSegmentWAL_Log_Restore(t *testing.T) {
// write more data to it, close it.
for k := 0; k < numMetrics; k += numMetrics / iterations {
w, err := OpenSegmentWAL(dir, nil, 0, nil)
assert.NoError(t, err)
require.NoError(t, err)
// Set smaller segment size so we can actually write several files.
w.segmentSize = 1000 * 1000
@ -222,11 +222,11 @@ func TestSegmentWAL_Log_Restore(t *testing.T) {
}
}
assert.NoError(t, r.Read(serf, smplf, delf))
require.NoError(t, r.Read(serf, smplf, delf))
assert.Equal(t, recordedSamples, resultSamples)
assert.Equal(t, recordedSeries, resultSeries)
assert.Equal(t, recordedDeletes, resultDeletes)
require.Equal(t, recordedSamples, resultSamples)
require.Equal(t, recordedSeries, resultSeries)
require.Equal(t, recordedDeletes, resultDeletes)
series := series[k : k+(numMetrics/iterations)]
@ -257,9 +257,9 @@ func TestSegmentWAL_Log_Restore(t *testing.T) {
})
}
assert.NoError(t, w.LogSeries(series))
assert.NoError(t, w.LogSamples(samples))
assert.NoError(t, w.LogDeletes(stones))
require.NoError(t, w.LogSeries(series))
require.NoError(t, w.LogSamples(samples))
require.NoError(t, w.LogDeletes(stones))
if len(lbls) > 0 {
recordedSeries = append(recordedSeries, series)
@ -273,47 +273,47 @@ func TestSegmentWAL_Log_Restore(t *testing.T) {
}
}
assert.NoError(t, w.Close())
require.NoError(t, w.Close())
}
}
func TestWALRestoreCorrupted_invalidSegment(t *testing.T) {
dir, err := ioutil.TempDir("", "test_wal_log_restore")
assert.NoError(t, err)
require.NoError(t, err)
defer func() {
assert.NoError(t, os.RemoveAll(dir))
require.NoError(t, os.RemoveAll(dir))
}()
wal, err := OpenSegmentWAL(dir, nil, 0, nil)
assert.NoError(t, err)
defer func(wal *SegmentWAL) { assert.NoError(t, wal.Close()) }(wal)
require.NoError(t, err)
defer func(wal *SegmentWAL) { require.NoError(t, wal.Close()) }(wal)
_, err = wal.createSegmentFile(filepath.Join(dir, "000000"))
assert.NoError(t, err)
require.NoError(t, err)
f, err := wal.createSegmentFile(filepath.Join(dir, "000001"))
assert.NoError(t, err)
require.NoError(t, err)
f2, err := wal.createSegmentFile(filepath.Join(dir, "000002"))
assert.NoError(t, err)
assert.NoError(t, f2.Close())
require.NoError(t, err)
require.NoError(t, f2.Close())
// Make header of second segment invalid.
_, err = f.WriteAt([]byte{1, 2, 3, 4}, 0)
assert.NoError(t, err)
assert.NoError(t, f.Close())
require.NoError(t, err)
require.NoError(t, f.Close())
assert.NoError(t, wal.Close())
require.NoError(t, wal.Close())
wal, err = OpenSegmentWAL(dir, log.NewLogfmtLogger(os.Stderr), 0, nil)
assert.NoError(t, err)
defer func(wal *SegmentWAL) { assert.NoError(t, wal.Close()) }(wal)
require.NoError(t, err)
defer func(wal *SegmentWAL) { require.NoError(t, wal.Close()) }(wal)
files, err := ioutil.ReadDir(dir)
assert.NoError(t, err)
require.NoError(t, err)
fns := []string{}
for _, f := range files {
fns = append(fns, f.Name())
}
assert.Equal(t, []string{"000000"}, fns)
require.Equal(t, []string{"000000"}, fns)
}
// Test reading from a WAL that has been corrupted through various means.
@ -326,56 +326,56 @@ func TestWALRestoreCorrupted(t *testing.T) {
name: "truncate_checksum",
f: func(t *testing.T, w *SegmentWAL) {
f, err := os.OpenFile(w.files[0].Name(), os.O_WRONLY, 0666)
assert.NoError(t, err)
require.NoError(t, err)
defer f.Close()
off, err := f.Seek(0, io.SeekEnd)
assert.NoError(t, err)
require.NoError(t, err)
assert.NoError(t, f.Truncate(off-1))
require.NoError(t, f.Truncate(off-1))
},
},
{
name: "truncate_body",
f: func(t *testing.T, w *SegmentWAL) {
f, err := os.OpenFile(w.files[0].Name(), os.O_WRONLY, 0666)
assert.NoError(t, err)
require.NoError(t, err)
defer f.Close()
off, err := f.Seek(0, io.SeekEnd)
assert.NoError(t, err)
require.NoError(t, err)
assert.NoError(t, f.Truncate(off-8))
require.NoError(t, f.Truncate(off-8))
},
},
{
name: "body_content",
f: func(t *testing.T, w *SegmentWAL) {
f, err := os.OpenFile(w.files[0].Name(), os.O_WRONLY, 0666)
assert.NoError(t, err)
require.NoError(t, err)
defer f.Close()
off, err := f.Seek(0, io.SeekEnd)
assert.NoError(t, err)
require.NoError(t, err)
// Write junk before checksum starts.
_, err = f.WriteAt([]byte{1, 2, 3, 4}, off-8)
assert.NoError(t, err)
require.NoError(t, err)
},
},
{
name: "checksum",
f: func(t *testing.T, w *SegmentWAL) {
f, err := os.OpenFile(w.files[0].Name(), os.O_WRONLY, 0666)
assert.NoError(t, err)
require.NoError(t, err)
defer f.Close()
off, err := f.Seek(0, io.SeekEnd)
assert.NoError(t, err)
require.NoError(t, err)
// Write junk into checksum
_, err = f.WriteAt([]byte{1, 2, 3, 4}, off-4)
assert.NoError(t, err)
require.NoError(t, err)
},
},
}
@ -384,29 +384,29 @@ func TestWALRestoreCorrupted(t *testing.T) {
// Generate testing data. It does not make semantic sense but
// for the purpose of this test.
dir, err := ioutil.TempDir("", "test_corrupted")
assert.NoError(t, err)
require.NoError(t, err)
defer func() {
assert.NoError(t, os.RemoveAll(dir))
require.NoError(t, os.RemoveAll(dir))
}()
w, err := OpenSegmentWAL(dir, nil, 0, nil)
assert.NoError(t, err)
defer func(wal *SegmentWAL) { assert.NoError(t, wal.Close()) }(w)
require.NoError(t, err)
defer func(wal *SegmentWAL) { require.NoError(t, wal.Close()) }(w)
assert.NoError(t, w.LogSamples([]record.RefSample{{T: 1, V: 2}}))
assert.NoError(t, w.LogSamples([]record.RefSample{{T: 2, V: 3}}))
require.NoError(t, w.LogSamples([]record.RefSample{{T: 1, V: 2}}))
require.NoError(t, w.LogSamples([]record.RefSample{{T: 2, V: 3}}))
assert.NoError(t, w.cut())
require.NoError(t, w.cut())
// Sleep 2 seconds to avoid error where cut and test "cases" function may write or
// truncate the file out of orders as "cases" are not synchronized with cut.
// Hopefully cut will complete by 2 seconds.
time.Sleep(2 * time.Second)
assert.NoError(t, w.LogSamples([]record.RefSample{{T: 3, V: 4}}))
assert.NoError(t, w.LogSamples([]record.RefSample{{T: 5, V: 6}}))
require.NoError(t, w.LogSamples([]record.RefSample{{T: 3, V: 4}}))
require.NoError(t, w.LogSamples([]record.RefSample{{T: 5, V: 6}}))
assert.NoError(t, w.Close())
require.NoError(t, w.Close())
// cut() truncates and fsyncs the first segment async. If it happens after
// the corruption we apply below, the corruption will be overwritten again.
@ -421,41 +421,41 @@ func TestWALRestoreCorrupted(t *testing.T) {
logger := log.NewLogfmtLogger(os.Stderr)
w2, err := OpenSegmentWAL(dir, logger, 0, nil)
assert.NoError(t, err)
defer func(wal *SegmentWAL) { assert.NoError(t, wal.Close()) }(w2)
require.NoError(t, err)
defer func(wal *SegmentWAL) { require.NoError(t, wal.Close()) }(w2)
r := w2.Reader()
serf := func(l []record.RefSeries) {
assert.Equal(t, 0, len(l))
require.Equal(t, 0, len(l))
}
// Weird hack to check order of reads.
i := 0
samplef := func(s []record.RefSample) {
if i == 0 {
assert.Equal(t, []record.RefSample{{T: 1, V: 2}}, s)
require.Equal(t, []record.RefSample{{T: 1, V: 2}}, s)
i++
} else {
assert.Equal(t, []record.RefSample{{T: 99, V: 100}}, s)
require.Equal(t, []record.RefSample{{T: 99, V: 100}}, s)
}
}
assert.NoError(t, r.Read(serf, samplef, nil))
require.NoError(t, r.Read(serf, samplef, nil))
assert.NoError(t, w2.LogSamples([]record.RefSample{{T: 99, V: 100}}))
assert.NoError(t, w2.Close())
require.NoError(t, w2.LogSamples([]record.RefSample{{T: 99, V: 100}}))
require.NoError(t, w2.Close())
// We should see the first valid entry and the new one, everything after
// is truncated.
w3, err := OpenSegmentWAL(dir, logger, 0, nil)
assert.NoError(t, err)
defer func(wal *SegmentWAL) { assert.NoError(t, wal.Close()) }(w3)
require.NoError(t, err)
defer func(wal *SegmentWAL) { require.NoError(t, wal.Close()) }(w3)
r = w3.Reader()
i = 0
assert.NoError(t, r.Read(serf, samplef, nil))
require.NoError(t, r.Read(serf, samplef, nil))
})
}
}
@ -464,75 +464,75 @@ func TestMigrateWAL_Empty(t *testing.T) {
// The migration procedure must properly deal with a zero-length segment,
// which is valid in the new format.
dir, err := ioutil.TempDir("", "walmigrate")
assert.NoError(t, err)
require.NoError(t, err)
defer func() {
assert.NoError(t, os.RemoveAll(dir))
require.NoError(t, os.RemoveAll(dir))
}()
wdir := path.Join(dir, "wal")
// Initialize empty WAL.
w, err := wal.New(nil, nil, wdir, false)
assert.NoError(t, err)
assert.NoError(t, w.Close())
require.NoError(t, err)
require.NoError(t, w.Close())
assert.NoError(t, MigrateWAL(nil, wdir))
require.NoError(t, MigrateWAL(nil, wdir))
}
func TestMigrateWAL_Fuzz(t *testing.T) {
dir, err := ioutil.TempDir("", "walmigrate")
assert.NoError(t, err)
require.NoError(t, err)
defer func() {
assert.NoError(t, os.RemoveAll(dir))
require.NoError(t, os.RemoveAll(dir))
}()
wdir := path.Join(dir, "wal")
// Should pass if no WAL exists yet.
assert.NoError(t, MigrateWAL(nil, wdir))
require.NoError(t, MigrateWAL(nil, wdir))
oldWAL, err := OpenSegmentWAL(wdir, nil, time.Minute, nil)
assert.NoError(t, err)
require.NoError(t, err)
// Write some data.
assert.NoError(t, oldWAL.LogSeries([]record.RefSeries{
require.NoError(t, oldWAL.LogSeries([]record.RefSeries{
{Ref: 100, Labels: labels.FromStrings("abc", "def", "123", "456")},
{Ref: 1, Labels: labels.FromStrings("abc", "def2", "1234", "4567")},
}))
assert.NoError(t, oldWAL.LogSamples([]record.RefSample{
require.NoError(t, oldWAL.LogSamples([]record.RefSample{
{Ref: 1, T: 100, V: 200},
{Ref: 2, T: 300, V: 400},
}))
assert.NoError(t, oldWAL.LogSeries([]record.RefSeries{
require.NoError(t, oldWAL.LogSeries([]record.RefSeries{
{Ref: 200, Labels: labels.FromStrings("xyz", "def", "foo", "bar")},
}))
assert.NoError(t, oldWAL.LogSamples([]record.RefSample{
require.NoError(t, oldWAL.LogSamples([]record.RefSample{
{Ref: 3, T: 100, V: 200},
{Ref: 4, T: 300, V: 400},
}))
assert.NoError(t, oldWAL.LogDeletes([]tombstones.Stone{
require.NoError(t, oldWAL.LogDeletes([]tombstones.Stone{
{Ref: 1, Intervals: []tombstones.Interval{{Mint: 100, Maxt: 200}}},
}))
assert.NoError(t, oldWAL.Close())
require.NoError(t, oldWAL.Close())
// Perform migration.
assert.NoError(t, MigrateWAL(nil, wdir))
require.NoError(t, MigrateWAL(nil, wdir))
w, err := wal.New(nil, nil, wdir, false)
assert.NoError(t, err)
require.NoError(t, err)
// We can properly write some new data after migration.
var enc record.Encoder
assert.NoError(t, w.Log(enc.Samples([]record.RefSample{
require.NoError(t, w.Log(enc.Samples([]record.RefSample{
{Ref: 500, T: 1, V: 1},
}, nil)))
assert.NoError(t, w.Close())
require.NoError(t, w.Close())
// Read back all data.
sr, err := wal.NewSegmentsReader(wdir)
assert.NoError(t, err)
require.NoError(t, err)
r := wal.NewReader(sr)
var res []interface{}
@ -544,23 +544,23 @@ func TestMigrateWAL_Fuzz(t *testing.T) {
switch dec.Type(rec) {
case record.Series:
s, err := dec.Series(rec, nil)
assert.NoError(t, err)
require.NoError(t, err)
res = append(res, s)
case record.Samples:
s, err := dec.Samples(rec, nil)
assert.NoError(t, err)
require.NoError(t, err)
res = append(res, s)
case record.Tombstones:
s, err := dec.Tombstones(rec, nil)
assert.NoError(t, err)
require.NoError(t, err)
res = append(res, s)
default:
t.Fatalf("unknown record type %d", dec.Type(rec))
}
}
assert.NoError(t, r.Err())
require.NoError(t, r.Err())
assert.Equal(t, []interface{}{
require.Equal(t, []interface{}{
[]record.RefSeries{
{Ref: 100, Labels: labels.FromStrings("abc", "def", "123", "456")},
{Ref: 1, Labels: labels.FromStrings("abc", "def2", "1234", "4567")},
@ -575,5 +575,5 @@ func TestMigrateWAL_Fuzz(t *testing.T) {
}, res)
// Migrating an already migrated WAL shouldn't do anything.
assert.NoError(t, MigrateWAL(nil, wdir))
require.NoError(t, MigrateWAL(nil, wdir))
}

View file

@ -22,7 +22,7 @@ import (
"strconv"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
const (
@ -139,32 +139,32 @@ func NewTemporaryDirectory(name string, t T) (handler TemporaryDirectory) {
func DirHash(t *testing.T, path string) []byte {
hash := sha256.New()
err := filepath.Walk(path, func(path string, info os.FileInfo, err error) error {
assert.NoError(t, err)
require.NoError(t, err)
if info.IsDir() {
return nil
}
f, err := os.Open(path)
assert.NoError(t, err)
require.NoError(t, err)
defer f.Close()
_, err = io.Copy(hash, f)
assert.NoError(t, err)
require.NoError(t, err)
_, err = io.WriteString(hash, strconv.Itoa(int(info.Size())))
assert.NoError(t, err)
require.NoError(t, err)
_, err = io.WriteString(hash, info.Name())
assert.NoError(t, err)
require.NoError(t, err)
modTime, err := info.ModTime().GobEncode()
assert.NoError(t, err)
require.NoError(t, err)
_, err = io.WriteString(hash, string(modTime))
assert.NoError(t, err)
require.NoError(t, err)
return nil
})
assert.NoError(t, err)
require.NoError(t, err)
return hash.Sum(nil)
}

28
vendor/github.com/stretchr/testify/require/doc.go generated vendored Normal file
View file

@ -0,0 +1,28 @@
// Package require implements the same assertions as the `assert` package but
// stops test execution when a test fails.
//
// Example Usage
//
// The following is a complete example using require in a standard test function:
// import (
// "testing"
// "github.com/stretchr/testify/require"
// )
//
// func TestSomething(t *testing.T) {
//
// var a string = "Hello"
// var b string = "Hello"
//
// require.Equal(t, a, b, "The two words should be the same.")
//
// }
//
// Assertions
//
// The `require` package have same global functions as in the `assert` package,
// but instead of returning a boolean result they call `t.FailNow()`.
//
// Every assertion function also takes an optional string message as the final argument,
// allowing custom error messages to be appended to the message the assertion method outputs.
package require

View file

@ -0,0 +1,16 @@
package require
// Assertions provides assertion methods around the
// TestingT interface.
type Assertions struct {
t TestingT
}
// New makes a new Assertions object for the specified TestingT.
func New(t TestingT) *Assertions {
return &Assertions{
t: t,
}
}
//go:generate sh -c "cd ../_codegen && go build && cd - && ../_codegen/_codegen -output-package=require -template=require_forward.go.tmpl -include-format-funcs"

1631
vendor/github.com/stretchr/testify/require/require.go generated vendored Normal file

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,6 @@
{{.Comment}}
func {{.DocInfo.Name}}(t TestingT, {{.Params}}) {
if h, ok := t.(tHelper); ok { h.Helper() }
if assert.{{.DocInfo.Name}}(t, {{.ForwardedParams}}) { return }
t.FailNow()
}

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,5 @@
{{.CommentWithoutT "a"}}
func (a *Assertions) {{.DocInfo.Name}}({{.Params}}) {
if h, ok := a.t.(tHelper); ok { h.Helper() }
{{.DocInfo.Name}}(a.t, {{.ForwardedParams}})
}

View file

@ -0,0 +1,29 @@
package require
// TestingT is an interface wrapper around *testing.T
type TestingT interface {
Errorf(format string, args ...interface{})
FailNow()
}
type tHelper interface {
Helper()
}
// ComparisonAssertionFunc is a common function prototype when comparing two values. Can be useful
// for table driven tests.
type ComparisonAssertionFunc func(TestingT, interface{}, interface{}, ...interface{})
// ValueAssertionFunc is a common function prototype when validating a single value. Can be useful
// for table driven tests.
type ValueAssertionFunc func(TestingT, interface{}, ...interface{})
// BoolAssertionFunc is a common function prototype when validating a bool value. Can be useful
// for table driven tests.
type BoolAssertionFunc func(TestingT, bool, ...interface{})
// ErrorAssertionFunc is a common function prototype when validating an error value. Can be useful
// for table driven tests.
type ErrorAssertionFunc func(TestingT, error, ...interface{})
//go:generate sh -c "cd ../_codegen && go build && cd - && ../_codegen/_codegen -output-package=require -template=require.go.tmpl -include-format-funcs"

1
vendor/modules.txt vendored
View file

@ -411,6 +411,7 @@ github.com/sirupsen/logrus
# github.com/stretchr/testify v1.6.1
## explicit
github.com/stretchr/testify/assert
github.com/stretchr/testify/require
# github.com/uber/jaeger-client-go v2.25.0+incompatible
## explicit
github.com/uber/jaeger-client-go

View file

@ -41,7 +41,7 @@ import (
"github.com/prometheus/common/model"
"github.com/prometheus/common/promlog"
"github.com/prometheus/common/route"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/prometheus/prometheus/config"
"github.com/prometheus/prometheus/pkg/gate"
@ -309,10 +309,10 @@ func TestEndpoints(t *testing.T) {
test_metric4{foo="boo", dup="1"} 1+0x100
test_metric4{foo="boo"} 1+0x100
`)
assert.NoError(t, err)
require.NoError(t, err)
defer suite.Close()
assert.NoError(t, suite.Run())
require.NoError(t, suite.Run())
now := time.Now()
@ -349,13 +349,13 @@ func TestEndpoints(t *testing.T) {
defer server.Close()
u, err := url.Parse(server.URL)
assert.NoError(t, err)
require.NoError(t, err)
al := promlog.AllowedLevel{}
assert.NoError(t, al.Set("debug"))
require.NoError(t, al.Set("debug"))
af := promlog.AllowedFormat{}
assert.NoError(t, af.Set("logfmt"))
require.NoError(t, af.Set("logfmt"))
promlogConfig := promlog.Config{
Level: &al,
@ -363,7 +363,7 @@ func TestEndpoints(t *testing.T) {
}
dbDir, err := ioutil.TempDir("", "tsdb-api-ready")
assert.NoError(t, err)
require.NoError(t, err)
defer os.RemoveAll(dbDir)
remote := remote.NewStorage(promlog.New(&promlogConfig), prometheus.DefaultRegisterer, nil, dbDir, 1*time.Second)
@ -377,7 +377,7 @@ func TestEndpoints(t *testing.T) {
},
},
})
assert.NoError(t, err)
require.NoError(t, err)
var algr rulesRetrieverMock
algr.testing = t
@ -415,9 +415,9 @@ func TestLabelNames(t *testing.T) {
test_metric2{foo="boo"} 1+0x100
test_metric2{foo="boo", xyz="qwerty"} 1+0x100
`)
assert.NoError(t, err)
require.NoError(t, err)
defer suite.Close()
assert.NoError(t, suite.Run())
require.NoError(t, suite.Run())
api := &API{
Queryable: suite.Storage(),
@ -433,7 +433,7 @@ func TestLabelNames(t *testing.T) {
for _, method := range []string{http.MethodGet, http.MethodPost} {
ctx := context.Background()
req, err := request(method)
assert.NoError(t, err)
require.NoError(t, err)
res := api.labelNames(req.WithContext(ctx))
assertAPIError(t, res.err, "")
assertAPIResponse(t, res.data, []string{"__name__", "baz", "foo", "foo1", "foo2", "xyz"})
@ -1784,7 +1784,7 @@ func assertAPIError(t *testing.T, got *apiError, exp errorType) {
func assertAPIResponse(t *testing.T, got interface{}, exp interface{}) {
t.Helper()
assert.Equal(t, exp, got)
require.Equal(t, exp, got)
}
func assertAPIResponseLength(t *testing.T, got interface{}, expLen int) {
@ -1805,12 +1805,12 @@ func TestSampledReadEndpoint(t *testing.T) {
load 1m
test_metric1{foo="bar",baz="qux"} 1
`)
assert.NoError(t, err)
require.NoError(t, err)
defer suite.Close()
err = suite.Run()
assert.NoError(t, err)
require.NoError(t, err)
api := &API{
Queryable: suite.Storage(),
@ -1833,21 +1833,21 @@ func TestSampledReadEndpoint(t *testing.T) {
// Encode the request.
matcher1, err := labels.NewMatcher(labels.MatchEqual, "__name__", "test_metric1")
assert.NoError(t, err)
require.NoError(t, err)
matcher2, err := labels.NewMatcher(labels.MatchEqual, "d", "e")
assert.NoError(t, err)
require.NoError(t, err)
query, err := remote.ToQuery(0, 1, []*labels.Matcher{matcher1, matcher2}, &storage.SelectHints{Step: 0, Func: "avg"})
assert.NoError(t, err)
require.NoError(t, err)
req := &prompb.ReadRequest{Queries: []*prompb.Query{query}}
data, err := proto.Marshal(req)
assert.NoError(t, err)
require.NoError(t, err)
compressed := snappy.Encode(nil, data)
request, err := http.NewRequest("POST", "", bytes.NewBuffer(compressed))
assert.NoError(t, err)
require.NoError(t, err)
recorder := httptest.NewRecorder()
api.remoteRead(recorder, request)
@ -1856,25 +1856,25 @@ func TestSampledReadEndpoint(t *testing.T) {
t.Fatal(recorder.Code)
}
assert.Equal(t, "application/x-protobuf", recorder.Result().Header.Get("Content-Type"))
assert.Equal(t, "snappy", recorder.Result().Header.Get("Content-Encoding"))
require.Equal(t, "application/x-protobuf", recorder.Result().Header.Get("Content-Type"))
require.Equal(t, "snappy", recorder.Result().Header.Get("Content-Encoding"))
// Decode the response.
compressed, err = ioutil.ReadAll(recorder.Result().Body)
assert.NoError(t, err)
require.NoError(t, err)
uncompressed, err := snappy.Decode(nil, compressed)
assert.NoError(t, err)
require.NoError(t, err)
var resp prompb.ReadResponse
err = proto.Unmarshal(uncompressed, &resp)
assert.NoError(t, err)
require.NoError(t, err)
if len(resp.Results) != 1 {
t.Fatalf("Expected 1 result, got %d", len(resp.Results))
}
assert.Equal(t, &prompb.QueryResult{
require.Equal(t, &prompb.QueryResult{
Timeseries: []*prompb.TimeSeries{
{
Labels: []prompb.Label{
@ -1900,11 +1900,11 @@ func TestStreamReadEndpoint(t *testing.T) {
test_metric1{foo="bar2",baz="qux"} 0+100x120
test_metric1{foo="bar3",baz="qux"} 0+100x240
`)
assert.NoError(t, err)
require.NoError(t, err)
defer suite.Close()
assert.NoError(t, suite.Run())
require.NoError(t, suite.Run())
api := &API{
Queryable: suite.Storage(),
@ -1929,13 +1929,13 @@ func TestStreamReadEndpoint(t *testing.T) {
// Encode the request.
matcher1, err := labels.NewMatcher(labels.MatchEqual, "__name__", "test_metric1")
assert.NoError(t, err)
require.NoError(t, err)
matcher2, err := labels.NewMatcher(labels.MatchEqual, "d", "e")
assert.NoError(t, err)
require.NoError(t, err)
matcher3, err := labels.NewMatcher(labels.MatchEqual, "foo", "bar1")
assert.NoError(t, err)
require.NoError(t, err)
query1, err := remote.ToQuery(0, 14400001, []*labels.Matcher{matcher1, matcher2}, &storage.SelectHints{
Step: 1,
@ -1943,7 +1943,7 @@ func TestStreamReadEndpoint(t *testing.T) {
Start: 0,
End: 14400001,
})
assert.NoError(t, err)
require.NoError(t, err)
query2, err := remote.ToQuery(0, 14400001, []*labels.Matcher{matcher1, matcher3}, &storage.SelectHints{
Step: 1,
@ -1951,18 +1951,18 @@ func TestStreamReadEndpoint(t *testing.T) {
Start: 0,
End: 14400001,
})
assert.NoError(t, err)
require.NoError(t, err)
req := &prompb.ReadRequest{
Queries: []*prompb.Query{query1, query2},
AcceptedResponseTypes: []prompb.ReadRequest_ResponseType{prompb.ReadRequest_STREAMED_XOR_CHUNKS},
}
data, err := proto.Marshal(req)
assert.NoError(t, err)
require.NoError(t, err)
compressed := snappy.Encode(nil, data)
request, err := http.NewRequest("POST", "", bytes.NewBuffer(compressed))
assert.NoError(t, err)
require.NoError(t, err)
recorder := httptest.NewRecorder()
api.remoteRead(recorder, request)
@ -1971,8 +1971,8 @@ func TestStreamReadEndpoint(t *testing.T) {
t.Fatal(recorder.Code)
}
assert.Equal(t, "application/x-streamed-protobuf; proto=prometheus.ChunkedReadResponse", recorder.Result().Header.Get("Content-Type"))
assert.Equal(t, "", recorder.Result().Header.Get("Content-Encoding"))
require.Equal(t, "application/x-streamed-protobuf; proto=prometheus.ChunkedReadResponse", recorder.Result().Header.Get("Content-Type"))
require.Equal(t, "", recorder.Result().Header.Get("Content-Encoding"))
var results []*prompb.ChunkedReadResponse
stream := remote.NewChunkedReader(recorder.Result().Body, remote.DefaultChunkedReadLimit, nil)
@ -1982,7 +1982,7 @@ func TestStreamReadEndpoint(t *testing.T) {
if err == io.EOF {
break
}
assert.NoError(t, err)
require.NoError(t, err)
results = append(results, res)
}
@ -1990,7 +1990,7 @@ func TestStreamReadEndpoint(t *testing.T) {
t.Fatalf("Expected 5 result, got %d", len(results))
}
assert.Equal(t, []*prompb.ChunkedReadResponse{
require.Equal(t, []*prompb.ChunkedReadResponse{
{
ChunkedSeries: []*prompb.ChunkedSeries{
{
@ -2294,7 +2294,7 @@ func TestAdminEndpoints(t *testing.T) {
tc := tc
t.Run("", func(t *testing.T) {
dir, _ := ioutil.TempDir("", "fakeDB")
defer func() { assert.NoError(t, os.RemoveAll(dir)) }()
defer func() { require.NoError(t, os.RemoveAll(dir)) }()
api := &API{
db: tc.db,
@ -2305,7 +2305,7 @@ func TestAdminEndpoints(t *testing.T) {
endpoint := tc.endpoint(api)
req, err := http.NewRequest(tc.method, fmt.Sprintf("?%s", tc.values.Encode()), nil)
assert.NoError(t, err)
require.NoError(t, err)
res := setUnavailStatusOnTSDBNotReady(endpoint(req))
assertAPIError(t, res.err, tc.errType)
@ -2346,7 +2346,7 @@ func TestRespondSuccess(t *testing.T) {
Status: statusSuccess,
Data: "test",
}
assert.Equal(t, exp, &res)
require.Equal(t, exp, &res)
}
func TestRespondError(t *testing.T) {
@ -2384,7 +2384,7 @@ func TestRespondError(t *testing.T) {
ErrorType: errorTimeout,
Error: "message",
}
assert.Equal(t, exp, &res)
require.Equal(t, exp, &res)
}
func TestParseTimeParam(t *testing.T) {
@ -2394,7 +2394,7 @@ func TestParseTimeParam(t *testing.T) {
}
ts, err := parseTime("1582468023986")
assert.NoError(t, err)
require.NoError(t, err)
var tests = []struct {
paramName string
@ -2436,15 +2436,15 @@ func TestParseTimeParam(t *testing.T) {
for _, test := range tests {
req, err := http.NewRequest("GET", "localhost:42/foo?"+test.paramName+"="+test.paramValue, nil)
assert.NoError(t, err)
require.NoError(t, err)
result := test.result
asTime, err := parseTimeParam(req, test.paramName, test.defaultValue)
if err != nil {
assert.EqualError(t, err, result.asError().Error())
require.EqualError(t, err, result.asError().Error())
} else {
assert.True(t, asTime.Equal(result.asTime), "time as return value: %s not parsed correctly. Expected %s. Actual %s", test.paramValue, result.asTime, asTime)
require.True(t, asTime.Equal(result.asTime), "time as return value: %s not parsed correctly. Expected %s. Actual %s", test.paramValue, result.asTime, asTime)
}
}
}
@ -2756,8 +2756,8 @@ func TestReturnAPIError(t *testing.T) {
for _, c := range cases {
actual := returnAPIError(c.err)
assert.Error(t, actual)
assert.Equal(t, c.expected, actual.typ)
require.Error(t, actual)
require.Equal(t, c.expected, actual.typ)
}
}

View file

@ -25,7 +25,7 @@ import (
"github.com/pkg/errors"
"github.com/prometheus/common/model"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/prometheus/prometheus/config"
"github.com/prometheus/prometheus/pkg/labels"
@ -221,8 +221,8 @@ func TestFederation(t *testing.T) {
res := httptest.NewRecorder()
h.federation(res, req)
assert.Equal(t, scenario.code, res.Code)
assert.Equal(t, scenario.body, normalizeBody(res.Body))
require.Equal(t, scenario.code, res.Code)
require.Equal(t, scenario.body, normalizeBody(res.Body))
})
}
}
@ -263,10 +263,10 @@ func TestFederation_NotReady(t *testing.T) {
h.federation(res, req)
if scenario.code == http.StatusBadRequest {
// Request are expected to be checked before DB readiness.
assert.Equal(t, http.StatusBadRequest, res.Code)
require.Equal(t, http.StatusBadRequest, res.Code)
return
}
assert.Equal(t, http.StatusServiceUnavailable, res.Code)
require.Equal(t, http.StatusServiceUnavailable, res.Code)
})
}
}

View file

@ -32,7 +32,7 @@ import (
"github.com/prometheus/client_golang/prometheus"
prom_testutil "github.com/prometheus/client_golang/prometheus/testutil"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/prometheus/prometheus/config"
"github.com/prometheus/prometheus/notifier"
@ -86,12 +86,12 @@ func TestGlobalURL(t *testing.T) {
for _, test := range tests {
inURL, err := url.Parse(test.inURL)
assert.NoError(t, err)
require.NoError(t, err)
globalURL := tmplFuncs("", opts)["globalURL"].(func(u *url.URL) *url.URL)
outURL := globalURL(inURL)
assert.Equal(t, test.outURL, outURL.String())
require.Equal(t, test.outURL, outURL.String())
}
}
@ -107,11 +107,11 @@ func TestReadyAndHealthy(t *testing.T) {
t.Parallel()
dbDir, err := ioutil.TempDir("", "tsdb-ready")
assert.NoError(t, err)
defer func() { assert.NoError(t, os.RemoveAll(dbDir)) }()
require.NoError(t, err)
defer func() { require.NoError(t, os.RemoveAll(dbDir)) }()
db, err := tsdb.Open(dbDir, nil, nil, nil)
assert.NoError(t, err)
require.NoError(t, err)
opts := &Options{
ListenAddress: ":9090",
@ -157,8 +157,8 @@ func TestReadyAndHealthy(t *testing.T) {
time.Sleep(5 * time.Second)
resp, err := http.Get("http://localhost:9090/-/healthy")
assert.NoError(t, err)
assert.Equal(t, http.StatusOK, resp.StatusCode)
require.NoError(t, err)
require.Equal(t, http.StatusOK, resp.StatusCode)
cleanupTestResponse(t, resp)
for _, u := range []string{
@ -173,19 +173,19 @@ func TestReadyAndHealthy(t *testing.T) {
"http://localhost:9090/config",
} {
resp, err = http.Get(u)
assert.NoError(t, err)
assert.Equal(t, http.StatusServiceUnavailable, resp.StatusCode)
require.NoError(t, err)
require.Equal(t, http.StatusServiceUnavailable, resp.StatusCode)
cleanupTestResponse(t, resp)
}
resp, err = http.Post("http://localhost:9090/api/v1/admin/tsdb/snapshot", "", strings.NewReader(""))
assert.NoError(t, err)
assert.Equal(t, http.StatusServiceUnavailable, resp.StatusCode)
require.NoError(t, err)
require.Equal(t, http.StatusServiceUnavailable, resp.StatusCode)
cleanupTestResponse(t, resp)
resp, err = http.Post("http://localhost:9090/api/v1/admin/tsdb/delete_series", "", strings.NewReader("{}"))
assert.NoError(t, err)
assert.Equal(t, http.StatusServiceUnavailable, resp.StatusCode)
require.NoError(t, err)
require.Equal(t, http.StatusServiceUnavailable, resp.StatusCode)
cleanupTestResponse(t, resp)
// Set to ready.
@ -204,31 +204,31 @@ func TestReadyAndHealthy(t *testing.T) {
"http://localhost:9090/config",
} {
resp, err = http.Get(u)
assert.NoError(t, err)
assert.Equal(t, http.StatusOK, resp.StatusCode)
require.NoError(t, err)
require.Equal(t, http.StatusOK, resp.StatusCode)
cleanupTestResponse(t, resp)
}
resp, err = http.Post("http://localhost:9090/api/v1/admin/tsdb/snapshot", "", strings.NewReader(""))
assert.NoError(t, err)
assert.Equal(t, http.StatusOK, resp.StatusCode)
require.NoError(t, err)
require.Equal(t, http.StatusOK, resp.StatusCode)
cleanupSnapshot(t, dbDir, resp)
cleanupTestResponse(t, resp)
resp, err = http.Post("http://localhost:9090/api/v1/admin/tsdb/delete_series?match[]=up", "", nil)
assert.NoError(t, err)
assert.Equal(t, http.StatusNoContent, resp.StatusCode)
require.NoError(t, err)
require.Equal(t, http.StatusNoContent, resp.StatusCode)
cleanupTestResponse(t, resp)
}
func TestRoutePrefix(t *testing.T) {
t.Parallel()
dbDir, err := ioutil.TempDir("", "tsdb-ready")
assert.NoError(t, err)
defer func() { assert.NoError(t, os.RemoveAll(dbDir)) }()
require.NoError(t, err)
defer func() { require.NoError(t, os.RemoveAll(dbDir)) }()
db, err := tsdb.Open(dbDir, nil, nil, nil)
assert.NoError(t, err)
require.NoError(t, err)
opts := &Options{
ListenAddress: ":9091",
@ -267,57 +267,57 @@ func TestRoutePrefix(t *testing.T) {
time.Sleep(5 * time.Second)
resp, err := http.Get("http://localhost:9091" + opts.RoutePrefix + "/-/healthy")
assert.NoError(t, err)
assert.Equal(t, http.StatusOK, resp.StatusCode)
require.NoError(t, err)
require.Equal(t, http.StatusOK, resp.StatusCode)
cleanupTestResponse(t, resp)
resp, err = http.Get("http://localhost:9091" + opts.RoutePrefix + "/-/ready")
assert.NoError(t, err)
assert.Equal(t, http.StatusServiceUnavailable, resp.StatusCode)
require.NoError(t, err)
require.Equal(t, http.StatusServiceUnavailable, resp.StatusCode)
cleanupTestResponse(t, resp)
resp, err = http.Get("http://localhost:9091" + opts.RoutePrefix + "/version")
assert.NoError(t, err)
assert.Equal(t, http.StatusServiceUnavailable, resp.StatusCode)
require.NoError(t, err)
require.Equal(t, http.StatusServiceUnavailable, resp.StatusCode)
cleanupTestResponse(t, resp)
resp, err = http.Post("http://localhost:9091"+opts.RoutePrefix+"/api/v1/admin/tsdb/snapshot", "", strings.NewReader(""))
assert.NoError(t, err)
assert.Equal(t, http.StatusServiceUnavailable, resp.StatusCode)
require.NoError(t, err)
require.Equal(t, http.StatusServiceUnavailable, resp.StatusCode)
cleanupTestResponse(t, resp)
resp, err = http.Post("http://localhost:9091"+opts.RoutePrefix+"/api/v1/admin/tsdb/delete_series", "", strings.NewReader("{}"))
assert.NoError(t, err)
assert.Equal(t, http.StatusServiceUnavailable, resp.StatusCode)
require.NoError(t, err)
require.Equal(t, http.StatusServiceUnavailable, resp.StatusCode)
cleanupTestResponse(t, resp)
// Set to ready.
webHandler.Ready()
resp, err = http.Get("http://localhost:9091" + opts.RoutePrefix + "/-/healthy")
assert.NoError(t, err)
assert.Equal(t, http.StatusOK, resp.StatusCode)
require.NoError(t, err)
require.Equal(t, http.StatusOK, resp.StatusCode)
cleanupTestResponse(t, resp)
resp, err = http.Get("http://localhost:9091" + opts.RoutePrefix + "/-/ready")
assert.NoError(t, err)
assert.Equal(t, http.StatusOK, resp.StatusCode)
require.NoError(t, err)
require.Equal(t, http.StatusOK, resp.StatusCode)
cleanupTestResponse(t, resp)
resp, err = http.Get("http://localhost:9091" + opts.RoutePrefix + "/version")
assert.NoError(t, err)
assert.Equal(t, http.StatusOK, resp.StatusCode)
require.NoError(t, err)
require.Equal(t, http.StatusOK, resp.StatusCode)
cleanupTestResponse(t, resp)
resp, err = http.Post("http://localhost:9091"+opts.RoutePrefix+"/api/v1/admin/tsdb/snapshot", "", strings.NewReader(""))
assert.NoError(t, err)
assert.Equal(t, http.StatusOK, resp.StatusCode)
require.NoError(t, err)
require.Equal(t, http.StatusOK, resp.StatusCode)
cleanupSnapshot(t, dbDir, resp)
cleanupTestResponse(t, resp)
resp, err = http.Post("http://localhost:9091"+opts.RoutePrefix+"/api/v1/admin/tsdb/delete_series?match[]=up", "", nil)
assert.NoError(t, err)
assert.Equal(t, http.StatusNoContent, resp.StatusCode)
require.NoError(t, err)
require.Equal(t, http.StatusNoContent, resp.StatusCode)
cleanupTestResponse(t, resp)
}
@ -350,11 +350,11 @@ func TestDebugHandler(t *testing.T) {
req, err := http.NewRequest("GET", tc.url, nil)
assert.NoError(t, err)
require.NoError(t, err)
handler.router.ServeHTTP(w, req)
assert.Equal(t, tc.code, w.Code)
require.Equal(t, tc.code, w.Code)
}
}
@ -373,33 +373,33 @@ func TestHTTPMetrics(t *testing.T) {
w := httptest.NewRecorder()
req, err := http.NewRequest("GET", "/-/ready", nil)
assert.NoError(t, err)
require.NoError(t, err)
handler.router.ServeHTTP(w, req)
return w.Code
}
code := getReady()
assert.Equal(t, http.StatusServiceUnavailable, code)
require.Equal(t, http.StatusServiceUnavailable, code)
counter := handler.metrics.requestCounter
assert.Equal(t, 1, int(prom_testutil.ToFloat64(counter.WithLabelValues("/-/ready", strconv.Itoa(http.StatusServiceUnavailable)))))
require.Equal(t, 1, int(prom_testutil.ToFloat64(counter.WithLabelValues("/-/ready", strconv.Itoa(http.StatusServiceUnavailable)))))
handler.Ready()
for range [2]int{} {
code = getReady()
assert.Equal(t, http.StatusOK, code)
require.Equal(t, http.StatusOK, code)
}
assert.Equal(t, 2, int(prom_testutil.ToFloat64(counter.WithLabelValues("/-/ready", strconv.Itoa(http.StatusOK)))))
assert.Equal(t, 1, int(prom_testutil.ToFloat64(counter.WithLabelValues("/-/ready", strconv.Itoa(http.StatusServiceUnavailable)))))
require.Equal(t, 2, int(prom_testutil.ToFloat64(counter.WithLabelValues("/-/ready", strconv.Itoa(http.StatusOK)))))
require.Equal(t, 1, int(prom_testutil.ToFloat64(counter.WithLabelValues("/-/ready", strconv.Itoa(http.StatusServiceUnavailable)))))
}
func TestShutdownWithStaleConnection(t *testing.T) {
dbDir, err := ioutil.TempDir("", "tsdb-ready")
assert.NoError(t, err)
defer func() { assert.NoError(t, os.RemoveAll(dbDir)) }()
require.NoError(t, err)
defer func() { require.NoError(t, os.RemoveAll(dbDir)) }()
db, err := tsdb.Open(dbDir, nil, nil, nil)
assert.NoError(t, err)
require.NoError(t, err)
timeout := 10 * time.Second
@ -450,8 +450,8 @@ func TestShutdownWithStaleConnection(t *testing.T) {
// Open a socket, and don't use it. This connection should then be closed
// after the ReadTimeout.
c, err := net.Dial("tcp", "localhost:9090")
assert.NoError(t, err)
t.Cleanup(func() { assert.NoError(t, c.Close()) })
require.NoError(t, err)
t.Cleanup(func() { require.NoError(t, c.Close()) })
// Stop the web handler.
cancel()
@ -465,8 +465,8 @@ func TestShutdownWithStaleConnection(t *testing.T) {
func cleanupTestResponse(t *testing.T, resp *http.Response) {
_, err := io.Copy(ioutil.Discard, resp.Body)
assert.NoError(t, err)
assert.NoError(t, resp.Body.Close())
require.NoError(t, err)
require.NoError(t, resp.Body.Close())
}
func cleanupSnapshot(t *testing.T, dbDir string, resp *http.Response) {
@ -476,9 +476,9 @@ func cleanupSnapshot(t *testing.T, dbDir string, resp *http.Response) {
} `json:"data"`
}{}
b, err := ioutil.ReadAll(resp.Body)
assert.NoError(t, err)
assert.NoError(t, json.Unmarshal(b, snapshot))
assert.NotZero(t, snapshot.Data.Name, "snapshot directory not returned")
assert.NoError(t, os.Remove(filepath.Join(dbDir, "snapshots", snapshot.Data.Name)))
assert.NoError(t, os.Remove(filepath.Join(dbDir, "snapshots")))
require.NoError(t, err)
require.NoError(t, json.Unmarshal(b, snapshot))
require.NotZero(t, snapshot.Data.Name, "snapshot directory not returned")
require.NoError(t, os.Remove(filepath.Join(dbDir, "snapshots", snapshot.Data.Name)))
require.NoError(t, os.Remove(filepath.Join(dbDir, "snapshots")))
}