diff --git a/cmd/prometheus/main.go b/cmd/prometheus/main.go index 463b01ea8..7246f3012 100644 --- a/cmd/prometheus/main.go +++ b/cmd/prometheus/main.go @@ -16,6 +16,7 @@ package main import ( "context" + "errors" "fmt" "math" "math/bits" @@ -38,7 +39,6 @@ import ( "github.com/grafana/regexp" conntrack "github.com/mwitkow/go-conntrack" "github.com/oklog/run" - "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" "github.com/prometheus/common/promlog" @@ -397,7 +397,7 @@ func main() { _, err := a.Parse(os.Args[1:]) if err != nil { - fmt.Fprintln(os.Stderr, errors.Wrapf(err, "Error parsing commandline arguments")) + fmt.Fprintln(os.Stderr, fmt.Errorf("Error parsing commandline arguments: %w", err)) a.Usage(os.Args[1:]) os.Exit(2) } @@ -405,7 +405,7 @@ func main() { logger := promlog.New(&cfg.promlogConfig) if err := cfg.setFeatureListOptions(logger); err != nil { - fmt.Fprintln(os.Stderr, errors.Wrapf(err, "Error parsing feature list")) + fmt.Fprintln(os.Stderr, fmt.Errorf("Error parsing feature list: %w", err)) os.Exit(1) } @@ -426,13 +426,13 @@ func main() { cfg.web.ExternalURL, err = computeExternalURL(cfg.prometheusURL, cfg.web.ListenAddress) if err != nil { - fmt.Fprintln(os.Stderr, errors.Wrapf(err, "parse external URL %q", cfg.prometheusURL)) + fmt.Fprintln(os.Stderr, fmt.Errorf("parse external URL %q: %w", cfg.prometheusURL, err)) os.Exit(2) } cfg.web.CORSOrigin, err = compileCORSRegexString(cfg.corsRegexString) if err != nil { - fmt.Fprintln(os.Stderr, errors.Wrapf(err, "could not compile CORS regex string %q", cfg.corsRegexString)) + fmt.Fprintln(os.Stderr, fmt.Errorf("could not compile CORS regex string %q: %w", cfg.corsRegexString, err)) os.Exit(2) } @@ -732,7 +732,7 @@ func main() { fs, err := filepath.Glob(pat) if err != nil { // The only error can be a bad pattern. - return errors.Wrapf(err, "error retrieving rule files for %s", pat) + return fmt.Errorf("error retrieving rule files for %s: %w", pat, err) } files = append(files, fs...) } @@ -940,7 +940,7 @@ func main() { } if err := reloadConfig(cfg.configFile, cfg.enableExpandExternalLabels, cfg.tsdb.EnableExemplarStorage, logger, noStepSubqueryInterval, reloaders...); err != nil { - return errors.Wrapf(err, "error loading config from %q", cfg.configFile) + return fmt.Errorf("error loading config from %q: %w", cfg.configFile, err) } reloadReady.Close() @@ -975,7 +975,7 @@ func main() { db, err := openDBWithMetrics(localStoragePath, logger, prometheus.DefaultRegisterer, &opts, localStorage.getStats()) if err != nil { - return errors.Wrapf(err, "opening storage failed") + return fmt.Errorf("opening storage failed: %w", err) } switch fsType := prom_runtime.Statfs(localStoragePath); fsType { @@ -1031,7 +1031,7 @@ func main() { &opts, ) if err != nil { - return errors.Wrap(err, "opening storage failed") + return fmt.Errorf("opening storage failed: %w", err) } switch fsType := prom_runtime.Statfs(localStoragePath); fsType { @@ -1069,7 +1069,7 @@ func main() { g.Add( func() error { if err := webHandler.Run(ctxWeb, listener, *webConfig); err != nil { - return errors.Wrapf(err, "error starting web server") + return fmt.Errorf("error starting web server: %w", err) } return nil }, @@ -1179,7 +1179,7 @@ func reloadConfig(filename string, expandExternalLabels, enableExemplarStorage b conf, err := config.LoadFile(filename, agentMode, expandExternalLabels, logger) if err != nil { - return errors.Wrapf(err, "couldn't load configuration (--config.file=%q)", filename) + return fmt.Errorf("couldn't load configuration (--config.file=%q): %w", filename, err) } if enableExemplarStorage { @@ -1198,7 +1198,7 @@ func reloadConfig(filename string, expandExternalLabels, enableExemplarStorage b timings = append(timings, rl.name, time.Since(rstart)) } if failed { - return errors.Errorf("one or more errors occurred while applying the new configuration (--config.file=%q)", filename) + return fmt.Errorf("one or more errors occurred while applying the new configuration (--config.file=%q)", filename) } noStepSuqueryInterval.Set(conf.GlobalConfig.EvaluationInterval) diff --git a/cmd/prometheus/main_test.go b/cmd/prometheus/main_test.go index cfdb5abdf..9a6dc6671 100644 --- a/cmd/prometheus/main_test.go +++ b/cmd/prometheus/main_test.go @@ -16,6 +16,7 @@ package main import ( "bytes" "context" + "errors" "fmt" "io" "math" @@ -121,7 +122,8 @@ func TestFailedStartupExitCode(t *testing.T) { err := prom.Run() require.Error(t, err) - if exitError, ok := err.(*exec.ExitError); ok { + var exitError *exec.ExitError + if errors.As(err, &exitError) { status := exitError.Sys().(syscall.WaitStatus) require.Equal(t, expectedExitStatus, status.ExitStatus()) } else { @@ -233,7 +235,8 @@ func TestWALSegmentSizeBounds(t *testing.T) { err = prom.Wait() require.Error(t, err) - if exitError, ok := err.(*exec.ExitError); ok { + var exitError *exec.ExitError + if errors.As(err, &exitError) { status := exitError.Sys().(syscall.WaitStatus) require.Equal(t, expectedExitStatus, status.ExitStatus()) } else { @@ -278,7 +281,8 @@ func TestMaxBlockChunkSegmentSizeBounds(t *testing.T) { err = prom.Wait() require.Error(t, err) - if exitError, ok := err.(*exec.ExitError); ok { + var exitError *exec.ExitError + if errors.As(err, &exitError) { status := exitError.Sys().(syscall.WaitStatus) require.Equal(t, expectedExitStatus, status.ExitStatus()) } else { @@ -467,7 +471,8 @@ func TestModeSpecificFlags(t *testing.T) { err = prom.Wait() require.Error(t, err) - if exitError, ok := err.(*exec.ExitError); ok { + var exitError *exec.ExitError + if errors.As(err, &exitError) { status := exitError.Sys().(syscall.WaitStatus) require.Equal(t, tc.exitStatus, status.ExitStatus()) } else { diff --git a/cmd/promtool/archive.go b/cmd/promtool/archive.go index cca148cb4..6edb741ac 100644 --- a/cmd/promtool/archive.go +++ b/cmd/promtool/archive.go @@ -16,9 +16,8 @@ package main import ( "archive/tar" "compress/gzip" + "fmt" "os" - - "github.com/pkg/errors" ) const filePerm = 0o666 @@ -32,7 +31,7 @@ type tarGzFileWriter struct { func newTarGzFileWriter(archiveName string) (*tarGzFileWriter, error) { file, err := os.Create(archiveName) if err != nil { - return nil, errors.Wrapf(err, "error creating archive %q", archiveName) + return nil, fmt.Errorf("error creating archive %q: %w", archiveName, err) } gzw := gzip.NewWriter(file) tw := tar.NewWriter(gzw) diff --git a/cmd/promtool/backfill.go b/cmd/promtool/backfill.go index d1c8f9633..3c23d2c03 100644 --- a/cmd/promtool/backfill.go +++ b/cmd/promtool/backfill.go @@ -15,12 +15,13 @@ package main import ( "context" + "errors" + "fmt" "io" "math" "time" "github.com/go-kit/log" - "github.com/pkg/errors" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/textparse" @@ -33,11 +34,11 @@ func getMinAndMaxTimestamps(p textparse.Parser) (int64, int64, error) { for { entry, err := p.Next() - if err == io.EOF { + if errors.Is(err, io.EOF) { break } if err != nil { - return 0, 0, errors.Wrap(err, "next") + return 0, 0, fmt.Errorf("next: %w", err) } if entry != textparse.EntrySeries { @@ -46,7 +47,7 @@ func getMinAndMaxTimestamps(p textparse.Parser) (int64, int64, error) { _, ts, _ := p.Series() if ts == nil { - return 0, 0, errors.Errorf("expected timestamp for series got none") + return 0, 0, fmt.Errorf("expected timestamp for series got none") } if *ts > maxt { @@ -118,7 +119,7 @@ func createBlocks(input []byte, mint, maxt, maxBlockDuration int64, maxSamplesIn // original interval later. w, err := tsdb.NewBlockWriter(log.NewNopLogger(), outputDir, 2*blockDuration) if err != nil { - return errors.Wrap(err, "block writer") + return fmt.Errorf("block writer: %w", err) } defer func() { err = tsdb_errors.NewMulti(err, w.Close()).Err() @@ -130,11 +131,11 @@ func createBlocks(input []byte, mint, maxt, maxBlockDuration int64, maxSamplesIn samplesCount := 0 for { e, err := p.Next() - if err == io.EOF { + if errors.Is(err, io.EOF) { break } if err != nil { - return errors.Wrap(err, "parse") + return fmt.Errorf("parse: %w", err) } if e != textparse.EntrySeries { continue @@ -144,7 +145,7 @@ func createBlocks(input []byte, mint, maxt, maxBlockDuration int64, maxSamplesIn if ts == nil { l := labels.Labels{} p.Metric(&l) - return errors.Errorf("expected timestamp for series %v, got none", l) + return fmt.Errorf("expected timestamp for series %v, got none", l) } if *ts < t { continue @@ -160,7 +161,7 @@ func createBlocks(input []byte, mint, maxt, maxBlockDuration int64, maxSamplesIn p.Metric(&l) if _, err := app.Append(0, l, *ts, v); err != nil { - return errors.Wrap(err, "add sample") + return fmt.Errorf("add sample: %w", err) } samplesCount++ @@ -172,7 +173,7 @@ func createBlocks(input []byte, mint, maxt, maxBlockDuration int64, maxSamplesIn // Therefore the old appender is committed and a new one is created. // This prevents keeping too many samples lined up in an appender and thus in RAM. if err := app.Commit(); err != nil { - return errors.Wrap(err, "commit") + return fmt.Errorf("commit: %w", err) } app = w.Appender(ctx) @@ -180,18 +181,18 @@ func createBlocks(input []byte, mint, maxt, maxBlockDuration int64, maxSamplesIn } if err := app.Commit(); err != nil { - return errors.Wrap(err, "commit") + return fmt.Errorf("commit: %w", err) } block, err := w.Flush(ctx) - switch err { - case nil: + switch { + case err == nil: if quiet { break } blocks, err := db.Blocks() if err != nil { - return errors.Wrap(err, "get blocks") + return fmt.Errorf("get blocks: %w", err) } for _, b := range blocks { if b.Meta().ULID == block { @@ -200,15 +201,15 @@ func createBlocks(input []byte, mint, maxt, maxBlockDuration int64, maxSamplesIn break } } - case tsdb.ErrNoSeriesAppended: + case errors.Is(err, tsdb.ErrNoSeriesAppended): default: - return errors.Wrap(err, "flush") + return fmt.Errorf("flush: %w", err) } return nil }() if err != nil { - return errors.Wrap(err, "process blocks") + return fmt.Errorf("process blocks: %w", err) } } return nil @@ -218,7 +219,10 @@ func backfill(maxSamplesInAppender int, input []byte, outputDir string, humanRea p := textparse.NewOpenMetricsParser(input) maxt, mint, err := getMinAndMaxTimestamps(p) if err != nil { - return errors.Wrap(err, "getting min and max timestamp") + return fmt.Errorf("getting min and max timestamp: %w", err) } - return errors.Wrap(createBlocks(input, mint, maxt, int64(maxBlockDuration/time.Millisecond), maxSamplesInAppender, outputDir, humanReadable, quiet), "block creation") + if err = createBlocks(input, mint, maxt, int64(maxBlockDuration/time.Millisecond), maxSamplesInAppender, outputDir, humanReadable, quiet); err != nil { + return fmt.Errorf("block creation: %w", err) + } + return nil } diff --git a/cmd/promtool/debug.go b/cmd/promtool/debug.go index 310004480..6383aafac 100644 --- a/cmd/promtool/debug.go +++ b/cmd/promtool/debug.go @@ -17,8 +17,6 @@ import ( "fmt" "io" "net/http" - - "github.com/pkg/errors" ) type debugWriterConfig struct { @@ -30,7 +28,7 @@ type debugWriterConfig struct { func debugWrite(cfg debugWriterConfig) error { archiver, err := newTarGzFileWriter(cfg.tarballName) if err != nil { - return errors.Wrap(err, "error creating a new archiver") + return fmt.Errorf("error creating a new archiver: %w", err) } for _, endPointGroup := range cfg.endPointGroups { @@ -39,28 +37,28 @@ func debugWrite(cfg debugWriterConfig) error { fmt.Println("collecting:", url) res, err := http.Get(url) if err != nil { - return errors.Wrap(err, "error executing HTTP request") + return fmt.Errorf("error executing HTTP request: %w", err) } body, err := io.ReadAll(res.Body) res.Body.Close() if err != nil { - return errors.Wrap(err, "error reading the response body") + return fmt.Errorf("error reading the response body: %w", err) } if endPointGroup.postProcess != nil { body, err = endPointGroup.postProcess(body) if err != nil { - return errors.Wrap(err, "error post-processing HTTP response body") + return fmt.Errorf("error post-processing HTTP response body: %w", err) } } if err := archiver.write(filename, body); err != nil { - return errors.Wrap(err, "error writing into the archive") + return fmt.Errorf("error writing into the archive: %w", err) } } } if err := archiver.close(); err != nil { - return errors.Wrap(err, "error closing archive writer") + return fmt.Errorf("error closing archive writer: %w", err) } fmt.Printf("Compiling debug information complete, all files written in %q.\n", cfg.tarballName) diff --git a/cmd/promtool/main.go b/cmd/promtool/main.go index 5098a1903..01d953d20 100644 --- a/cmd/promtool/main.go +++ b/cmd/promtool/main.go @@ -17,6 +17,7 @@ import ( "bytes" "context" "encoding/json" + "errors" "fmt" "io" "math" @@ -32,7 +33,6 @@ import ( "github.com/go-kit/log" "github.com/google/pprof/profile" - "github.com/pkg/errors" "github.com/prometheus/client_golang/api" v1 "github.com/prometheus/client_golang/api/prometheus/v1" "github.com/prometheus/client_golang/prometheus/promhttp" @@ -414,10 +414,10 @@ func checkConfig(agentMode bool, filename string, checkSyntaxOnly bool) ([]strin // If an explicit file was given, error if it is not accessible. if !strings.Contains(rf, "*") { if len(rfs) == 0 { - return nil, errors.Errorf("%q does not point to an existing file", rf) + return nil, fmt.Errorf("%q does not point to an existing file", rf) } if err := checkFileExists(rfs[0]); err != nil { - return nil, errors.Wrapf(err, "error checking rule file %q", rfs[0]) + return nil, fmt.Errorf("error checking rule file %q: %w", rfs[0], err) } } ruleFiles = append(ruleFiles, rfs...) @@ -427,7 +427,7 @@ func checkConfig(agentMode bool, filename string, checkSyntaxOnly bool) ([]strin for _, scfg := range cfg.ScrapeConfigs { if !checkSyntaxOnly && scfg.HTTPClientConfig.Authorization != nil { if err := checkFileExists(scfg.HTTPClientConfig.Authorization.CredentialsFile); err != nil { - return nil, errors.Wrapf(err, "error checking authorization credentials or bearer token file %q", scfg.HTTPClientConfig.Authorization.CredentialsFile) + return nil, fmt.Errorf("error checking authorization credentials or bearer token file %q: %w", scfg.HTTPClientConfig.Authorization.CredentialsFile, err) } } @@ -455,7 +455,7 @@ func checkConfig(agentMode bool, filename string, checkSyntaxOnly bool) ([]strin var targetGroups []*targetgroup.Group targetGroups, err = checkSDFile(f) if err != nil { - return nil, errors.Errorf("checking SD file %q: %v", file, err) + return nil, fmt.Errorf("checking SD file %q: %w", file, err) } if err := checkTargetGroupsForScrapeConfig(targetGroups, scfg); err != nil { return nil, err @@ -491,7 +491,7 @@ func checkConfig(agentMode bool, filename string, checkSyntaxOnly bool) ([]strin var targetGroups []*targetgroup.Group targetGroups, err = checkSDFile(f) if err != nil { - return nil, errors.Errorf("checking SD file %q: %v", file, err) + return nil, fmt.Errorf("checking SD file %q: %w", file, err) } if err := checkTargetGroupsForAlertmanager(targetGroups, amcfg); err != nil { @@ -514,10 +514,10 @@ func checkConfig(agentMode bool, filename string, checkSyntaxOnly bool) ([]strin func checkTLSConfig(tlsConfig config_util.TLSConfig, checkSyntaxOnly bool) error { if len(tlsConfig.CertFile) > 0 && len(tlsConfig.KeyFile) == 0 { - return errors.Errorf("client cert file %q specified without client key file", tlsConfig.CertFile) + return fmt.Errorf("client cert file %q specified without client key file", tlsConfig.CertFile) } if len(tlsConfig.KeyFile) > 0 && len(tlsConfig.CertFile) == 0 { - return errors.Errorf("client key file %q specified without client cert file", tlsConfig.KeyFile) + return fmt.Errorf("client key file %q specified without client cert file", tlsConfig.KeyFile) } if checkSyntaxOnly { @@ -525,10 +525,10 @@ func checkTLSConfig(tlsConfig config_util.TLSConfig, checkSyntaxOnly bool) error } if err := checkFileExists(tlsConfig.CertFile); err != nil { - return errors.Wrapf(err, "error checking client cert file %q", tlsConfig.CertFile) + return fmt.Errorf("error checking client cert file %q: %w", tlsConfig.CertFile, err) } if err := checkFileExists(tlsConfig.KeyFile); err != nil { - return errors.Wrapf(err, "error checking client key file %q", tlsConfig.KeyFile) + return fmt.Errorf("error checking client key file %q: %w", tlsConfig.KeyFile, err) } return nil @@ -558,12 +558,12 @@ func checkSDFile(filename string) ([]*targetgroup.Group, error) { return nil, err } default: - return nil, errors.Errorf("invalid file extension: %q", ext) + return nil, fmt.Errorf("invalid file extension: %q", ext) } for i, tg := range targetGroups { if tg == nil { - return nil, errors.Errorf("nil target group item found (index %d)", i) + return nil, fmt.Errorf("nil target group item found (index %d)", i) } } @@ -992,14 +992,14 @@ func parseStartTimeAndEndTime(start, end string) (time.Time, time.Time, error) { if start != "" { stime, err = parseTime(start) if err != nil { - return stime, etime, errors.Wrap(err, "error parsing start time") + return stime, etime, fmt.Errorf("error parsing start time: %w", err) } } if end != "" { etime, err = parseTime(end) if err != nil { - return stime, etime, errors.Wrap(err, "error parsing end time") + return stime, etime, fmt.Errorf("error parsing end time: %w", err) } } return stime, etime, nil @@ -1013,7 +1013,7 @@ func parseTime(s string) (time.Time, error) { if t, err := time.Parse(time.RFC3339Nano, s); err == nil { return t, nil } - return time.Time{}, errors.Errorf("cannot parse %q to a valid timestamp", s) + return time.Time{}, fmt.Errorf("cannot parse %q to a valid timestamp", s) } type endpointsGroup struct { @@ -1039,7 +1039,7 @@ var ( } var buf bytes.Buffer if err := p.WriteUncompressed(&buf); err != nil { - return nil, errors.Wrap(err, "writing the profile to the buffer") + return nil, fmt.Errorf("writing the profile to the buffer: %w", err) } return buf.Bytes(), nil @@ -1149,13 +1149,13 @@ func importRules(url *url.URL, start, end, outputDir string, evalInterval, maxBl } else { etime, err = parseTime(end) if err != nil { - return fmt.Errorf("error parsing end time: %v", err) + return fmt.Errorf("error parsing end time: %w", err) } } stime, err = parseTime(start) if err != nil { - return fmt.Errorf("error parsing start time: %v", err) + return fmt.Errorf("error parsing start time: %w", err) } if !stime.Before(etime) { @@ -1173,14 +1173,14 @@ func importRules(url *url.URL, start, end, outputDir string, evalInterval, maxBl Address: url.String(), }) if err != nil { - return fmt.Errorf("new api client error: %v", err) + return fmt.Errorf("new api client error: %w", err) } ruleImporter := newRuleImporter(log.NewLogfmtLogger(log.NewSyncWriter(os.Stderr)), cfg, v1.NewAPI(client)) errs := ruleImporter.loadGroups(ctx, files) for _, err := range errs { if err != nil { - return fmt.Errorf("rule importer parse error: %v", err) + return fmt.Errorf("rule importer parse error: %w", err) } } diff --git a/cmd/promtool/rules.go b/cmd/promtool/rules.go index 760f92833..20d9fde9a 100644 --- a/cmd/promtool/rules.go +++ b/cmd/promtool/rules.go @@ -20,7 +20,6 @@ import ( "github.com/go-kit/log" "github.com/go-kit/log/level" - "github.com/pkg/errors" v1 "github.com/prometheus/client_golang/api/prometheus/v1" "github.com/prometheus/common/model" @@ -122,7 +121,7 @@ func (importer *ruleImporter) importRule(ctx context.Context, ruleExpr, ruleName }, ) if err != nil { - return errors.Wrap(err, "query range") + return fmt.Errorf("query range: %w", err) } if warnings != nil { level.Warn(importer.logger).Log("msg", "Range query returned warnings.", "warnings", warnings) @@ -136,7 +135,7 @@ func (importer *ruleImporter) importRule(ctx context.Context, ruleExpr, ruleName // original interval later. w, err := tsdb.NewBlockWriter(log.NewNopLogger(), importer.config.outputDir, 2*blockDuration) if err != nil { - return errors.Wrap(err, "new block writer") + return fmt.Errorf("new block writer: %w", err) } var closed bool defer func() { @@ -167,7 +166,7 @@ func (importer *ruleImporter) importRule(ctx context.Context, ruleExpr, ruleName for _, value := range sample.Values { if err := app.add(ctx, lb.Labels(), timestamp.FromTime(value.Timestamp.Time()), float64(value.Value)); err != nil { - return errors.Wrap(err, "add") + return fmt.Errorf("add: %w", err) } } } @@ -176,7 +175,7 @@ func (importer *ruleImporter) importRule(ctx context.Context, ruleExpr, ruleName } if err := app.flushAndCommit(ctx); err != nil { - return errors.Wrap(err, "flush and commit") + return fmt.Errorf("flush and commit: %w", err) } err = tsdb_errors.NewMulti(err, w.Close()).Err() closed = true @@ -204,7 +203,7 @@ type multipleAppender struct { func (m *multipleAppender) add(ctx context.Context, l labels.Labels, t int64, v float64) error { if _, err := m.appender.Append(0, l, t, v); err != nil { - return errors.Wrap(err, "multiappender append") + return fmt.Errorf("multiappender append: %w", err) } m.currentSampleCount++ if m.currentSampleCount >= m.maxSamplesInMemory { @@ -218,7 +217,7 @@ func (m *multipleAppender) commit(ctx context.Context) error { return nil } if err := m.appender.Commit(); err != nil { - return errors.Wrap(err, "multiappender commit") + return fmt.Errorf("multiappender commit: %w", err) } m.appender = m.writer.Appender(ctx) m.currentSampleCount = 0 @@ -230,7 +229,7 @@ func (m *multipleAppender) flushAndCommit(ctx context.Context) error { return err } if _, err := m.writer.Flush(ctx); err != nil { - return errors.Wrap(err, "multiappender flush") + return fmt.Errorf("multiappender flush: %w", err) } return nil } diff --git a/cmd/promtool/tsdb.go b/cmd/promtool/tsdb.go index 565751f72..7707a9904 100644 --- a/cmd/promtool/tsdb.go +++ b/cmd/promtool/tsdb.go @@ -35,7 +35,6 @@ import ( "github.com/alecthomas/units" "github.com/go-kit/log" - "github.com/pkg/errors" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/tsdb" @@ -236,29 +235,29 @@ func (b *writeBenchmark) startProfiling() error { // Start CPU profiling. b.cpuprof, err = os.Create(filepath.Join(b.outPath, "cpu.prof")) if err != nil { - return fmt.Errorf("bench: could not create cpu profile: %v", err) + return fmt.Errorf("bench: could not create cpu profile: %w", err) } if err := pprof.StartCPUProfile(b.cpuprof); err != nil { - return fmt.Errorf("bench: could not start CPU profile: %v", err) + return fmt.Errorf("bench: could not start CPU profile: %w", err) } // Start memory profiling. b.memprof, err = os.Create(filepath.Join(b.outPath, "mem.prof")) if err != nil { - return fmt.Errorf("bench: could not create memory profile: %v", err) + return fmt.Errorf("bench: could not create memory profile: %w", err) } runtime.MemProfileRate = 64 * 1024 // Start fatal profiling. b.blockprof, err = os.Create(filepath.Join(b.outPath, "block.prof")) if err != nil { - return fmt.Errorf("bench: could not create block profile: %v", err) + return fmt.Errorf("bench: could not create block profile: %w", err) } runtime.SetBlockProfileRate(20) b.mtxprof, err = os.Create(filepath.Join(b.outPath, "mutex.prof")) if err != nil { - return fmt.Errorf("bench: could not create mutex profile: %v", err) + return fmt.Errorf("bench: could not create mutex profile: %w", err) } runtime.SetMutexProfileFraction(20) return nil @@ -272,14 +271,14 @@ func (b *writeBenchmark) stopProfiling() error { } if b.memprof != nil { if err := pprof.Lookup("heap").WriteTo(b.memprof, 0); err != nil { - return fmt.Errorf("error writing mem profile: %v", err) + return fmt.Errorf("error writing mem profile: %w", err) } b.memprof.Close() b.memprof = nil } if b.blockprof != nil { if err := pprof.Lookup("block").WriteTo(b.blockprof, 0); err != nil { - return fmt.Errorf("error writing block profile: %v", err) + return fmt.Errorf("error writing block profile: %w", err) } b.blockprof.Close() b.blockprof = nil @@ -287,7 +286,7 @@ func (b *writeBenchmark) stopProfiling() error { } if b.mtxprof != nil { if err := pprof.Lookup("mutex").WriteTo(b.mtxprof, 0); err != nil { - return fmt.Errorf("error writing mutex profile: %v", err) + return fmt.Errorf("error writing mutex profile: %w", err) } b.mtxprof.Close() b.mtxprof = nil @@ -680,7 +679,7 @@ func backfillOpenMetrics(path, outputDir string, humanReadable, quiet bool, maxB defer inputFile.Close() if err := os.MkdirAll(outputDir, 0o777); err != nil { - return checkErr(errors.Wrap(err, "create output dir")) + return checkErr(fmt.Errorf("create output dir: %w", err)) } return checkErr(backfill(5000, inputFile.Bytes(), outputDir, humanReadable, quiet, maxBlockDuration)) diff --git a/cmd/promtool/unittest.go b/cmd/promtool/unittest.go index fd06f0e2d..f03906089 100644 --- a/cmd/promtool/unittest.go +++ b/cmd/promtool/unittest.go @@ -15,6 +15,7 @@ package main import ( "context" + "errors" "fmt" "os" "path/filepath" @@ -25,7 +26,6 @@ import ( "time" "github.com/go-kit/log" - "github.com/pkg/errors" "github.com/prometheus/common/model" yaml "gopkg.in/yaml.v2" @@ -87,7 +87,7 @@ func ruleUnitTest(filename string, queryOpts promql.LazyLoaderOpts) []error { groupOrderMap := make(map[string]int) for i, gn := range unitTestInp.GroupEvalOrder { if _, ok := groupOrderMap[gn]; ok { - return []error{errors.Errorf("group name repeated in evaluation order: %s", gn)} + return []error{fmt.Errorf("group name repeated in evaluation order: %s", gn)} } groupOrderMap[gn] = i } @@ -195,7 +195,7 @@ func (tg *testGroup) test(evalInterval time.Duration, groupOrderMap map[string]i if tg.TestGroupName != "" { testGroupLog = fmt.Sprintf(" (in TestGroup %s)", tg.TestGroupName) } - return []error{errors.Errorf("an item under alert_rule_test misses required attribute alertname at eval_time %v%s", alert.EvalTime, testGroupLog)} + return []error{fmt.Errorf("an item under alert_rule_test misses required attribute alertname at eval_time %v%s", alert.EvalTime, testGroupLog)} } alertEvalTimesMap[alert.EvalTime] = struct{}{} @@ -240,7 +240,7 @@ func (tg *testGroup) test(evalInterval time.Duration, groupOrderMap map[string]i g.Eval(suite.Context(), ts) for _, r := range g.Rules() { if r.LastError() != nil { - evalErrs = append(evalErrs, errors.Errorf(" rule: %s, time: %s, err: %v", + evalErrs = append(evalErrs, fmt.Errorf(" rule: %s, time: %s, err: %v", r.Name(), ts.Sub(time.Unix(0, 0).UTC()), r.LastError())) } } @@ -323,7 +323,7 @@ func (tg *testGroup) test(evalInterval time.Duration, groupOrderMap map[string]i } expString := indentLines(expAlerts.String(), " ") gotString := indentLines(gotAlerts.String(), " ") - errs = append(errs, errors.Errorf("%s alertname: %s, time: %s, \n exp:%v, \n got:%v", + errs = append(errs, fmt.Errorf("%s alertname: %s, time: %s, \n exp:%v, \n got:%v", testName, testcase.Alertname, testcase.EvalTime.String(), expString, gotString)) } } @@ -338,7 +338,7 @@ Outer: got, err := query(suite.Context(), testCase.Expr, mint.Add(time.Duration(testCase.EvalTime)), suite.QueryEngine(), suite.Queryable()) if err != nil { - errs = append(errs, errors.Errorf(" expr: %q, time: %s, err: %s", testCase.Expr, + errs = append(errs, fmt.Errorf(" expr: %q, time: %s, err: %s", testCase.Expr, testCase.EvalTime.String(), err.Error())) continue } @@ -355,9 +355,9 @@ Outer: for _, s := range testCase.ExpSamples { lb, err := parser.ParseMetric(s.Labels) if err != nil { - err = errors.Wrapf(err, "labels %q", s.Labels) - errs = append(errs, errors.Errorf(" expr: %q, time: %s, err: %s", testCase.Expr, - testCase.EvalTime.String(), err.Error())) + err = fmt.Errorf("labels %q: %w", s.Labels, err) + errs = append(errs, fmt.Errorf(" expr: %q, time: %s, err: %w", testCase.Expr, + testCase.EvalTime.String(), err)) continue Outer } expSamples = append(expSamples, parsedSample{ @@ -373,7 +373,7 @@ Outer: return labels.Compare(gotSamples[i].Labels, gotSamples[j].Labels) <= 0 }) if !reflect.DeepEqual(expSamples, gotSamples) { - errs = append(errs, errors.Errorf(" expr: %q, time: %s,\n exp: %v\n got: %v", testCase.Expr, + errs = append(errs, fmt.Errorf(" expr: %q, time: %s,\n exp: %v\n got: %v", testCase.Expr, testCase.EvalTime.String(), parsedSamplesString(expSamples), parsedSamplesString(gotSamples))) } }