refactor (rules): move from github.com/pkg/errors to 'errors' and 'fmt' (#10855)

* refactor (rules): move from github.com/pkg/errors to 'errors' and 'fmt'

Signed-off-by: Matthieu MOREL <mmorel-35@users.noreply.github.com>
This commit is contained in:
Matthieu MOREL 2022-06-17 09:54:25 +02:00 committed by GitHub
parent 03a2313f7a
commit ddfa9a7cc5
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
3 changed files with 20 additions and 17 deletions

View file

@ -23,7 +23,6 @@ import (
"github.com/go-kit/log" "github.com/go-kit/log"
"github.com/go-kit/log/level" "github.com/go-kit/log/level"
"github.com/pkg/errors"
"github.com/prometheus/common/model" "github.com/prometheus/common/model"
yaml "gopkg.in/yaml.v2" yaml "gopkg.in/yaml.v2"
@ -69,7 +68,7 @@ func (s AlertState) String() string {
case StateFiring: case StateFiring:
return "firing" return "firing"
} }
panic(errors.Errorf("unknown alert state: %d", s)) panic(fmt.Errorf("unknown alert state: %d", s))
} }
// Alert is the user-level representation of a single instance of an alerting rule. // Alert is the user-level representation of a single instance of an alerting rule.
@ -450,7 +449,7 @@ func (r *AlertingRule) Eval(ctx context.Context, ts time.Time, query QueryFunc,
if limit > 0 && numActivePending > limit { if limit > 0 && numActivePending > limit {
r.active = map[uint64]*Alert{} r.active = map[uint64]*Alert{}
return nil, errors.Errorf("exceeded limit of %d with %d alerts", limit, numActivePending) return nil, fmt.Errorf("exceeded limit of %d with %d alerts", limit, numActivePending)
} }
return vec, nil return vec, nil

View file

@ -15,11 +15,11 @@ package rules
import ( import (
"context" "context"
"errors"
"testing" "testing"
"time" "time"
"github.com/go-kit/log" "github.com/go-kit/log"
"github.com/pkg/errors"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
"github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/labels"

View file

@ -15,6 +15,8 @@ package rules
import ( import (
"context" "context"
"errors"
"fmt"
"math" "math"
"net/url" "net/url"
"sort" "sort"
@ -23,7 +25,6 @@ import (
"github.com/go-kit/log" "github.com/go-kit/log"
"github.com/go-kit/log/level" "github.com/go-kit/log/level"
"github.com/pkg/errors"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/common/model" "github.com/prometheus/common/model"
"go.opentelemetry.io/otel" "go.opentelemetry.io/otel"
@ -630,7 +631,8 @@ func (g *Group) Eval(ctx context.Context, ts time.Time) {
// Canceled queries are intentional termination of queries. This normally // Canceled queries are intentional termination of queries. This normally
// happens on shutdown and thus we skip logging of any errors here. // happens on shutdown and thus we skip logging of any errors here.
if _, ok := err.(promql.ErrQueryCanceled); !ok { var eqc promql.ErrQueryCanceled
if !errors.As(err, &eqc) {
level.Warn(g.logger).Log("name", rule.Name(), "index", i, "msg", "Evaluating rule failed", "rule", rule, "err", err) level.Warn(g.logger).Log("name", rule.Name(), "index", i, "msg", "Evaluating rule failed", "rule", rule, "err", err)
} }
return return
@ -667,12 +669,12 @@ func (g *Group) Eval(ctx context.Context, ts time.Time) {
rule.SetHealth(HealthBad) rule.SetHealth(HealthBad)
rule.SetLastError(err) rule.SetLastError(err)
sp.SetStatus(codes.Error, err.Error()) sp.SetStatus(codes.Error, err.Error())
unwrappedErr := errors.Unwrap(err)
switch errors.Cause(err) { switch {
case storage.ErrOutOfOrderSample: case errors.Is(unwrappedErr, storage.ErrOutOfOrderSample):
numOutOfOrder++ numOutOfOrder++
level.Debug(g.logger).Log("name", rule.Name(), "index", i, "msg", "Rule evaluation result discarded", "err", err, "sample", s) level.Debug(g.logger).Log("name", rule.Name(), "index", i, "msg", "Rule evaluation result discarded", "err", err, "sample", s)
case storage.ErrDuplicateSampleForTimestamp: case errors.Is(unwrappedErr, storage.ErrDuplicateSampleForTimestamp):
numDuplicates++ numDuplicates++
level.Debug(g.logger).Log("name", rule.Name(), "index", i, "msg", "Rule evaluation result discarded", "err", err, "sample", s) level.Debug(g.logger).Log("name", rule.Name(), "index", i, "msg", "Rule evaluation result discarded", "err", err, "sample", s)
default: default:
@ -694,9 +696,10 @@ func (g *Group) Eval(ctx context.Context, ts time.Time) {
if _, ok := seriesReturned[metric]; !ok { if _, ok := seriesReturned[metric]; !ok {
// Series no longer exposed, mark it stale. // Series no longer exposed, mark it stale.
_, err = app.Append(0, lset, timestamp.FromTime(ts), math.Float64frombits(value.StaleNaN)) _, err = app.Append(0, lset, timestamp.FromTime(ts), math.Float64frombits(value.StaleNaN))
switch errors.Cause(err) { unwrappedErr := errors.Unwrap(err)
case nil: switch {
case storage.ErrOutOfOrderSample, storage.ErrDuplicateSampleForTimestamp: case unwrappedErr == nil:
case errors.Is(unwrappedErr, storage.ErrOutOfOrderSample), errors.Is(unwrappedErr, storage.ErrDuplicateSampleForTimestamp):
// Do not count these in logging, as this is expected if series // Do not count these in logging, as this is expected if series
// is exposed from a different rule. // is exposed from a different rule.
default: default:
@ -720,9 +723,10 @@ func (g *Group) cleanupStaleSeries(ctx context.Context, ts time.Time) {
for _, s := range g.staleSeries { for _, s := range g.staleSeries {
// Rule that produced series no longer configured, mark it stale. // Rule that produced series no longer configured, mark it stale.
_, err := app.Append(0, s, timestamp.FromTime(ts), math.Float64frombits(value.StaleNaN)) _, err := app.Append(0, s, timestamp.FromTime(ts), math.Float64frombits(value.StaleNaN))
switch errors.Cause(err) { unwrappedErr := errors.Unwrap(err)
case nil: switch {
case storage.ErrOutOfOrderSample, storage.ErrDuplicateSampleForTimestamp: case unwrappedErr == nil:
case errors.Is(unwrappedErr, storage.ErrOutOfOrderSample), errors.Is(unwrappedErr, storage.ErrDuplicateSampleForTimestamp):
// Do not count these in logging, as this is expected if series // Do not count these in logging, as this is expected if series
// is exposed from a different rule. // is exposed from a different rule.
default: default:
@ -1074,7 +1078,7 @@ func (m *Manager) LoadGroups(
for _, r := range rg.Rules { for _, r := range rg.Rules {
expr, err := m.opts.GroupLoader.Parse(r.Expr.Value) expr, err := m.opts.GroupLoader.Parse(r.Expr.Value)
if err != nil { if err != nil {
return nil, []error{errors.Wrap(err, fn)} return nil, []error{fmt.Errorf("%s: %w", fn, err)}
} }
if r.Alert.Value != "" { if r.Alert.Value != "" {