enable errorf rule from perfsprint linter

Signed-off-by: Matthieu MOREL <matthieu.morel35@gmail.com>
This commit is contained in:
Matthieu MOREL 2024-11-03 13:15:51 +01:00 committed by Ayoub Mrini
parent 37f3f3f2db
commit af1a19fc78
64 changed files with 164 additions and 149 deletions

View file

@ -109,7 +109,7 @@ linters-settings:
extra-rules: true
perfsprint:
# Optimizes `fmt.Errorf`.
errorf: false
errorf: true
revive:
# By default, revive will enable only the linting rules that are named in the configuration file.
# So, it's needed to explicitly enable all required rules here.

View file

@ -34,8 +34,8 @@ import (
)
var (
errNotNativeHistogram = fmt.Errorf("not a native histogram")
errNotEnoughData = fmt.Errorf("not enough data")
errNotNativeHistogram = errors.New("not a native histogram")
errNotEnoughData = errors.New("not enough data")
outputHeader = `Bucket stats for each histogram series over time
------------------------------------------------
@ -169,7 +169,7 @@ func querySamples(ctx context.Context, api v1.API, query string, end time.Time)
matrix, ok := values.(model.Matrix)
if !ok {
return nil, fmt.Errorf("query of buckets resulted in non-Matrix")
return nil, errors.New("query of buckets resulted in non-Matrix")
}
return matrix, nil
@ -259,7 +259,7 @@ func getBucketCountsAtTime(matrix model.Matrix, numBuckets, timeIdx int) ([]int,
prev := matrix[i].Values[timeIdx]
// Assume the results are nicely aligned.
if curr.Timestamp != prev.Timestamp {
return counts, fmt.Errorf("matrix result is not time aligned")
return counts, errors.New("matrix result is not time aligned")
}
counts[i+1] = int(curr.Value - prev.Value)
}

View file

@ -49,7 +49,7 @@ func getMinAndMaxTimestamps(p textparse.Parser) (int64, int64, error) {
_, ts, _ := p.Series()
if ts == nil {
return 0, 0, fmt.Errorf("expected timestamp for series got none")
return 0, 0, errors.New("expected timestamp for series got none")
}
if *ts > maxt {

View file

@ -444,7 +444,7 @@ func checkExperimental(f bool) {
}
}
var errLint = fmt.Errorf("lint error")
var errLint = errors.New("lint error")
type lintConfig struct {
all bool

View file

@ -662,7 +662,7 @@ func analyzeCompaction(ctx context.Context, block tsdb.BlockReader, indexr tsdb.
histogramChunkSize = append(histogramChunkSize, len(chk.Bytes()))
fhchk, ok := chk.(*chunkenc.FloatHistogramChunk)
if !ok {
return fmt.Errorf("chunk is not FloatHistogramChunk")
return errors.New("chunk is not FloatHistogramChunk")
}
it := fhchk.Iterator(nil)
bucketCount := 0
@ -677,7 +677,7 @@ func analyzeCompaction(ctx context.Context, block tsdb.BlockReader, indexr tsdb.
histogramChunkSize = append(histogramChunkSize, len(chk.Bytes()))
hchk, ok := chk.(*chunkenc.HistogramChunk)
if !ok {
return fmt.Errorf("chunk is not HistogramChunk")
return errors.New("chunk is not HistogramChunk")
}
it := hchk.Iterator(nil)
bucketCount := 0

View file

@ -1072,7 +1072,7 @@ func (c *AlertmanagerConfig) UnmarshalYAML(unmarshal func(interface{}) error) er
c.HTTPClientConfig.Authorization != nil || c.HTTPClientConfig.OAuth2 != nil
if httpClientConfigAuthEnabled && c.SigV4Config != nil {
return fmt.Errorf("at most one of basic_auth, authorization, oauth2, & sigv4 must be configured")
return errors.New("at most one of basic_auth, authorization, oauth2, & sigv4 must be configured")
}
// Check for users putting URLs in target groups.
@ -1420,7 +1420,7 @@ func (c *OTLPConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
for i, attr := range c.PromoteResourceAttributes {
attr = strings.TrimSpace(attr)
if attr == "" {
err = errors.Join(err, fmt.Errorf("empty promoted OTel resource attribute"))
err = errors.Join(err, errors.New("empty promoted OTel resource attribute"))
continue
}
if _, exists := seen[attr]; exists {

View file

@ -161,7 +161,7 @@ type EC2Discovery struct {
func NewEC2Discovery(conf *EC2SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*EC2Discovery, error) {
m, ok := metrics.(*ec2Metrics)
if !ok {
return nil, fmt.Errorf("invalid discovery metrics type")
return nil, errors.New("invalid discovery metrics type")
}
if logger == nil {

View file

@ -134,7 +134,7 @@ type LightsailDiscovery struct {
func NewLightsailDiscovery(conf *LightsailSDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*LightsailDiscovery, error) {
m, ok := metrics.(*lightsailMetrics)
if !ok {
return nil, fmt.Errorf("invalid discovery metrics type")
return nil, errors.New("invalid discovery metrics type")
}
if logger == nil {

View file

@ -186,7 +186,7 @@ type Discovery struct {
func NewDiscovery(cfg *SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) {
m, ok := metrics.(*azureMetrics)
if !ok {
return nil, fmt.Errorf("invalid discovery metrics type")
return nil, errors.New("invalid discovery metrics type")
}
if logger == nil {

View file

@ -189,7 +189,7 @@ type Discovery struct {
func NewDiscovery(conf *SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) {
m, ok := metrics.(*consulMetrics)
if !ok {
return nil, fmt.Errorf("invalid discovery metrics type")
return nil, errors.New("invalid discovery metrics type")
}
if logger == nil {

View file

@ -15,6 +15,7 @@ package digitalocean
import (
"context"
"errors"
"fmt"
"log/slog"
"net"
@ -114,7 +115,7 @@ type Discovery struct {
func NewDiscovery(conf *SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) {
m, ok := metrics.(*digitaloceanMetrics)
if !ok {
return nil, fmt.Errorf("invalid discovery metrics type")
return nil, errors.New("invalid discovery metrics type")
}
d := &Discovery{

View file

@ -121,7 +121,7 @@ type Discovery struct {
func NewDiscovery(conf SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) {
m, ok := metrics.(*dnsMetrics)
if !ok {
return nil, fmt.Errorf("invalid discovery metrics type")
return nil, errors.New("invalid discovery metrics type")
}
if logger == nil {

View file

@ -15,7 +15,7 @@ package dns
import (
"context"
"fmt"
"errors"
"log/slog"
"net"
"testing"
@ -53,7 +53,7 @@ func TestDNS(t *testing.T) {
Type: "A",
},
lookup: func(name string, qtype uint16, logger *slog.Logger) (*dns.Msg, error) {
return nil, fmt.Errorf("some error")
return nil, errors.New("some error")
},
expected: []*targetgroup.Group{},
},

View file

@ -16,7 +16,6 @@ package eureka
import (
"context"
"errors"
"fmt"
"log/slog"
"net"
"net/http"
@ -129,7 +128,7 @@ type Discovery struct {
func NewDiscovery(conf *SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) {
m, ok := metrics.(*eurekaMetrics)
if !ok {
return nil, fmt.Errorf("invalid discovery metrics type")
return nil, errors.New("invalid discovery metrics type")
}
rt, err := config.NewRoundTripperFromConfig(conf.HTTPClientConfig, "eureka_sd")

View file

@ -184,7 +184,7 @@ type Discovery struct {
func NewDiscovery(conf *SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) {
fm, ok := metrics.(*fileMetrics)
if !ok {
return nil, fmt.Errorf("invalid discovery metrics type")
return nil, errors.New("invalid discovery metrics type")
}
if logger == nil {

View file

@ -132,7 +132,7 @@ type Discovery struct {
func NewDiscovery(conf SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) {
m, ok := metrics.(*gceMetrics)
if !ok {
return nil, fmt.Errorf("invalid discovery metrics type")
return nil, errors.New("invalid discovery metrics type")
}
d := &Discovery{

View file

@ -138,7 +138,7 @@ type Discovery struct {
func NewDiscovery(conf *SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*refresh.Discovery, error) {
m, ok := metrics.(*hetznerMetrics)
if !ok {
return nil, fmt.Errorf("invalid discovery metrics type")
return nil, errors.New("invalid discovery metrics type")
}
r, err := newRefresher(conf, logger)

View file

@ -86,17 +86,17 @@ func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
return err
}
if c.URL == "" {
return fmt.Errorf("URL is missing")
return errors.New("URL is missing")
}
parsedURL, err := url.Parse(c.URL)
if err != nil {
return err
}
if parsedURL.Scheme != "http" && parsedURL.Scheme != "https" {
return fmt.Errorf("URL scheme must be 'http' or 'https'")
return errors.New("URL scheme must be 'http' or 'https'")
}
if parsedURL.Host == "" {
return fmt.Errorf("host is missing in URL")
return errors.New("host is missing in URL")
}
return c.HTTPClientConfig.Validate()
}
@ -118,7 +118,7 @@ type Discovery struct {
func NewDiscovery(conf *SDConfig, logger *slog.Logger, clientOpts []config.HTTPClientOption, metrics discovery.DiscovererMetrics) (*Discovery, error) {
m, ok := metrics.(*httpMetrics)
if !ok {
return nil, fmt.Errorf("invalid discovery metrics type")
return nil, errors.New("invalid discovery metrics type")
}
if logger == nil {

View file

@ -15,7 +15,6 @@ package ionos
import (
"errors"
"fmt"
"log/slog"
"time"
@ -46,7 +45,7 @@ type Discovery struct{}
func NewDiscovery(conf *SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*refresh.Discovery, error) {
m, ok := metrics.(*ionosMetrics)
if !ok {
return nil, fmt.Errorf("invalid discovery metrics type")
return nil, errors.New("invalid discovery metrics type")
}
if conf.ionosEndpoint == "" {

View file

@ -173,7 +173,7 @@ func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
return err
}
if c.Role == "" {
return fmt.Errorf("role missing (one of: pod, service, endpoints, endpointslice, node, ingress)")
return errors.New("role missing (one of: pod, service, endpoints, endpointslice, node, ingress)")
}
err = c.HTTPClientConfig.Validate()
if err != nil {
@ -181,20 +181,20 @@ func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
}
if c.APIServer.URL != nil && c.KubeConfig != "" {
// Api-server and kubeconfig_file are mutually exclusive
return fmt.Errorf("cannot use 'kubeconfig_file' and 'api_server' simultaneously")
return errors.New("cannot use 'kubeconfig_file' and 'api_server' simultaneously")
}
if c.KubeConfig != "" && !reflect.DeepEqual(c.HTTPClientConfig, config.DefaultHTTPClientConfig) {
// Kubeconfig_file and custom http config are mutually exclusive
return fmt.Errorf("cannot use a custom HTTP client configuration together with 'kubeconfig_file'")
return errors.New("cannot use a custom HTTP client configuration together with 'kubeconfig_file'")
}
if c.APIServer.URL == nil && !reflect.DeepEqual(c.HTTPClientConfig, config.DefaultHTTPClientConfig) {
return fmt.Errorf("to use custom HTTP client configuration please provide the 'api_server' URL explicitly")
return errors.New("to use custom HTTP client configuration please provide the 'api_server' URL explicitly")
}
if c.APIServer.URL != nil && c.NamespaceDiscovery.IncludeOwnNamespace {
return fmt.Errorf("cannot use 'api_server' and 'namespaces.own_namespace' simultaneously")
return errors.New("cannot use 'api_server' and 'namespaces.own_namespace' simultaneously")
}
if c.KubeConfig != "" && c.NamespaceDiscovery.IncludeOwnNamespace {
return fmt.Errorf("cannot use 'kubeconfig_file' and 'namespaces.own_namespace' simultaneously")
return errors.New("cannot use 'kubeconfig_file' and 'namespaces.own_namespace' simultaneously")
}
foundSelectorRoles := make(map[Role]struct{})
@ -288,7 +288,7 @@ func (d *Discovery) getNamespaces() []string {
func New(l *slog.Logger, metrics discovery.DiscovererMetrics, conf *SDConfig) (*Discovery, error) {
m, ok := metrics.(*kubernetesMetrics)
if !ok {
return nil, fmt.Errorf("invalid discovery metrics type")
return nil, errors.New("invalid discovery metrics type")
}
if l == nil {
@ -672,7 +672,7 @@ func (d *Discovery) newPodsByNodeInformer(plw *cache.ListWatch) cache.SharedInde
indexers[nodeIndex] = func(obj interface{}) ([]string, error) {
pod, ok := obj.(*apiv1.Pod)
if !ok {
return nil, fmt.Errorf("object is not a pod")
return nil, errors.New("object is not a pod")
}
return []string{pod.Spec.NodeName}, nil
}
@ -686,7 +686,7 @@ func (d *Discovery) newEndpointsByNodeInformer(plw *cache.ListWatch) cache.Share
indexers[podIndex] = func(obj interface{}) ([]string, error) {
e, ok := obj.(*apiv1.Endpoints)
if !ok {
return nil, fmt.Errorf("object is not endpoints")
return nil, errors.New("object is not endpoints")
}
var pods []string
for _, target := range e.Subsets {
@ -705,7 +705,7 @@ func (d *Discovery) newEndpointsByNodeInformer(plw *cache.ListWatch) cache.Share
indexers[nodeIndex] = func(obj interface{}) ([]string, error) {
e, ok := obj.(*apiv1.Endpoints)
if !ok {
return nil, fmt.Errorf("object is not endpoints")
return nil, errors.New("object is not endpoints")
}
var nodes []string
for _, target := range e.Subsets {
@ -751,7 +751,7 @@ func (d *Discovery) newEndpointSlicesByNodeInformer(plw *cache.ListWatch, object
}
}
default:
return nil, fmt.Errorf("object is not an endpointslice")
return nil, errors.New("object is not an endpointslice")
}
return nodes, nil

View file

@ -141,7 +141,7 @@ type Discovery struct {
func NewDiscovery(conf *SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) {
m, ok := metrics.(*linodeMetrics)
if !ok {
return nil, fmt.Errorf("invalid discovery metrics type")
return nil, errors.New("invalid discovery metrics type")
}
d := &Discovery{

View file

@ -15,6 +15,7 @@ package discovery
import (
"context"
"errors"
"fmt"
"sort"
"strconv"
@ -1209,9 +1210,9 @@ func TestGaugeFailedConfigs(t *testing.T) {
c := map[string]Configs{
"prometheus": {
errorConfig{fmt.Errorf("tests error 0")},
errorConfig{fmt.Errorf("tests error 1")},
errorConfig{fmt.Errorf("tests error 2")},
errorConfig{errors.New("tests error 0")},
errorConfig{errors.New("tests error 1")},
errorConfig{errors.New("tests error 2")},
},
}
discoveryManager.ApplyConfig(c)

View file

@ -143,7 +143,7 @@ type Discovery struct {
func NewDiscovery(conf SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) {
m, ok := metrics.(*marathonMetrics)
if !ok {
return nil, fmt.Errorf("invalid discovery metrics type")
return nil, errors.New("invalid discovery metrics type")
}
rt, err := config.NewRoundTripperFromConfig(conf.HTTPClientConfig, "marathon_sd")

View file

@ -15,6 +15,7 @@ package moby
import (
"context"
"errors"
"fmt"
"log/slog"
"net"
@ -110,7 +111,7 @@ func (c *DockerSDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error
return err
}
if c.Host == "" {
return fmt.Errorf("host missing")
return errors.New("host missing")
}
if _, err = url.Parse(c.Host); err != nil {
return err
@ -131,7 +132,7 @@ type DockerDiscovery struct {
func NewDockerDiscovery(conf *DockerSDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*DockerDiscovery, error) {
m, ok := metrics.(*dockerMetrics)
if !ok {
return nil, fmt.Errorf("invalid discovery metrics type")
return nil, errors.New("invalid discovery metrics type")
}
d := &DockerDiscovery{

View file

@ -15,6 +15,7 @@ package moby
import (
"context"
"errors"
"fmt"
"log/slog"
"net/http"
@ -99,7 +100,7 @@ func (c *DockerSwarmSDConfig) UnmarshalYAML(unmarshal func(interface{}) error) e
return err
}
if c.Host == "" {
return fmt.Errorf("host missing")
return errors.New("host missing")
}
if _, err = url.Parse(c.Host); err != nil {
return err
@ -107,7 +108,7 @@ func (c *DockerSwarmSDConfig) UnmarshalYAML(unmarshal func(interface{}) error) e
switch c.Role {
case "services", "nodes", "tasks":
case "":
return fmt.Errorf("role missing (one of: tasks, services, nodes)")
return errors.New("role missing (one of: tasks, services, nodes)")
default:
return fmt.Errorf("invalid role %s, expected tasks, services, or nodes", c.Role)
}
@ -128,7 +129,7 @@ type Discovery struct {
func NewDiscovery(conf *DockerSwarmSDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) {
m, ok := metrics.(*dockerswarmMetrics)
if !ok {
return nil, fmt.Errorf("invalid discovery metrics type")
return nil, errors.New("invalid discovery metrics type")
}
d := &Discovery{

View file

@ -124,7 +124,7 @@ type Discovery struct {
func NewDiscovery(conf *SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) {
m, ok := metrics.(*nomadMetrics)
if !ok {
return nil, fmt.Errorf("invalid discovery metrics type")
return nil, errors.New("invalid discovery metrics type")
}
d := &Discovery{

View file

@ -145,7 +145,7 @@ type refresher interface {
func NewDiscovery(conf *SDConfig, l *slog.Logger, metrics discovery.DiscovererMetrics) (*refresh.Discovery, error) {
m, ok := metrics.(*openstackMetrics)
if !ok {
return nil, fmt.Errorf("invalid discovery metrics type")
return nil, errors.New("invalid discovery metrics type")
}
r, err := newRefresher(conf, l)

View file

@ -151,7 +151,7 @@ func newRefresher(conf *SDConfig, logger *slog.Logger) (refresher, error) {
func NewDiscovery(conf *SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*refresh.Discovery, error) {
m, ok := metrics.(*ovhcloudMetrics)
if !ok {
return nil, fmt.Errorf("invalid discovery metrics type")
return nil, errors.New("invalid discovery metrics type")
}
r, err := newRefresher(conf, logger)

View file

@ -17,6 +17,7 @@ import (
"bytes"
"context"
"encoding/json"
"errors"
"fmt"
"io"
"log/slog"
@ -109,20 +110,20 @@ func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
return err
}
if c.URL == "" {
return fmt.Errorf("URL is missing")
return errors.New("URL is missing")
}
parsedURL, err := url.Parse(c.URL)
if err != nil {
return err
}
if parsedURL.Scheme != "http" && parsedURL.Scheme != "https" {
return fmt.Errorf("URL scheme must be 'http' or 'https'")
return errors.New("URL scheme must be 'http' or 'https'")
}
if parsedURL.Host == "" {
return fmt.Errorf("host is missing in URL")
return errors.New("host is missing in URL")
}
if c.Query == "" {
return fmt.Errorf("query missing")
return errors.New("query missing")
}
return c.HTTPClientConfig.Validate()
}
@ -142,7 +143,7 @@ type Discovery struct {
func NewDiscovery(conf *SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) {
m, ok := metrics.(*puppetdbMetrics)
if !ok {
return nil, fmt.Errorf("invalid discovery metrics type")
return nil, errors.New("invalid discovery metrics type")
}
if logger == nil {

View file

@ -15,7 +15,7 @@ package refresh
import (
"context"
"fmt"
"errors"
"testing"
"time"
@ -64,7 +64,7 @@ func TestRefresh(t *testing.T) {
case 2:
return tg2, nil
}
return nil, fmt.Errorf("some error")
return nil, errors.New("some error")
}
interval := time.Millisecond

View file

@ -267,7 +267,7 @@ func replaceYAMLTypeError(err error, oldTyp, newTyp reflect.Type) error {
func RegisterSDMetrics(registerer prometheus.Registerer, rmm RefreshMetricsManager) (map[string]DiscovererMetrics, error) {
err := rmm.Register()
if err != nil {
return nil, fmt.Errorf("failed to create service discovery refresh metrics")
return nil, errors.New("failed to create service discovery refresh metrics")
}
metrics := make(map[string]DiscovererMetrics)
@ -275,7 +275,7 @@ func RegisterSDMetrics(registerer prometheus.Registerer, rmm RefreshMetricsManag
currentSdMetrics := conf.NewDiscovererMetrics(registerer, rmm)
err = currentSdMetrics.Register()
if err != nil {
return nil, fmt.Errorf("failed to create service discovery metrics")
return nil, errors.New("failed to create service discovery metrics")
}
metrics[conf.Name()] = currentSdMetrics
}

View file

@ -188,7 +188,7 @@ type Discovery struct{}
func NewDiscovery(conf *SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*refresh.Discovery, error) {
m, ok := metrics.(*scalewayMetrics)
if !ok {
return nil, fmt.Errorf("invalid discovery metrics type")
return nil, errors.New("invalid discovery metrics type")
}
r, err := newRefresher(conf)

View file

@ -149,7 +149,7 @@ type Discovery struct {
func New(logger *slog.Logger, conf *SDConfig, metrics discovery.DiscovererMetrics) (*Discovery, error) {
m, ok := metrics.(*tritonMetrics)
if !ok {
return nil, fmt.Errorf("invalid discovery metrics type")
return nil, errors.New("invalid discovery metrics type")
}
tls, err := config.NewTLSConfig(&conf.TLSConfig)

View file

@ -215,7 +215,7 @@ func getEndpointInfoForSystems(
func NewDiscovery(conf *SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) {
m, ok := metrics.(*uyuniMetrics)
if !ok {
return nil, fmt.Errorf("invalid discovery metrics type")
return nil, errors.New("invalid discovery metrics type")
}
apiURL, err := url.Parse(conf.Server)

View file

@ -15,6 +15,7 @@ package vultr
import (
"context"
"errors"
"fmt"
"log/slog"
"net"
@ -117,7 +118,7 @@ type Discovery struct {
func NewDiscovery(conf *SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) {
m, ok := metrics.(*vultrMetrics)
if !ok {
return nil, fmt.Errorf("invalid discovery metrics type")
return nil, errors.New("invalid discovery metrics type")
}
d := &Discovery{

View file

@ -14,6 +14,7 @@
package xds
import (
"errors"
"fmt"
"log/slog"
"net/url"
@ -161,7 +162,7 @@ func kumaMadsV1ResourceParser(resources []*anypb.Any, typeURL string) ([]model.L
func NewKumaHTTPDiscovery(conf *KumaSDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (discovery.Discoverer, error) {
m, ok := metrics.(*xdsMetrics)
if !ok {
return nil, fmt.Errorf("invalid discovery metrics type")
return nil, errors.New("invalid discovery metrics type")
}
// Default to "prometheus" if hostname is unavailable.

View file

@ -14,6 +14,7 @@
package histogram
import (
"errors"
"fmt"
"math"
"strings"
@ -784,16 +785,16 @@ func (h *FloatHistogram) Validate() error {
return fmt.Errorf("custom buckets: %w", err)
}
if h.ZeroCount != 0 {
return fmt.Errorf("custom buckets: must have zero count of 0")
return errors.New("custom buckets: must have zero count of 0")
}
if h.ZeroThreshold != 0 {
return fmt.Errorf("custom buckets: must have zero threshold of 0")
return errors.New("custom buckets: must have zero threshold of 0")
}
if len(h.NegativeSpans) > 0 {
return fmt.Errorf("custom buckets: must not have negative spans")
return errors.New("custom buckets: must not have negative spans")
}
if len(h.NegativeBuckets) > 0 {
return fmt.Errorf("custom buckets: must not have negative buckets")
return errors.New("custom buckets: must not have negative buckets")
}
} else {
if err := checkHistogramSpans(h.PositiveSpans, len(h.PositiveBuckets)); err != nil {
@ -807,7 +808,7 @@ func (h *FloatHistogram) Validate() error {
return fmt.Errorf("negative side: %w", err)
}
if h.CustomValues != nil {
return fmt.Errorf("histogram with exponential schema must not have custom bounds")
return errors.New("histogram with exponential schema must not have custom bounds")
}
}
err := checkHistogramBuckets(h.PositiveBuckets, &pCount, false)
@ -948,10 +949,10 @@ func (h *FloatHistogram) floatBucketIterator(
positive bool, absoluteStartValue float64, targetSchema int32,
) floatBucketIterator {
if h.UsesCustomBuckets() && targetSchema != h.Schema {
panic(fmt.Errorf("cannot merge from custom buckets schema to exponential schema"))
panic(errors.New("cannot merge from custom buckets schema to exponential schema"))
}
if !h.UsesCustomBuckets() && IsCustomBucketsSchema(targetSchema) {
panic(fmt.Errorf("cannot merge from exponential buckets schema to custom schema"))
panic(errors.New("cannot merge from exponential buckets schema to custom schema"))
}
if targetSchema > h.Schema {
panic(fmt.Errorf("cannot merge from schema %d to %d", h.Schema, targetSchema))

View file

@ -14,6 +14,7 @@
package histogram
import (
"errors"
"fmt"
"math"
"slices"
@ -432,16 +433,16 @@ func (h *Histogram) Validate() error {
return fmt.Errorf("custom buckets: %w", err)
}
if h.ZeroCount != 0 {
return fmt.Errorf("custom buckets: must have zero count of 0")
return errors.New("custom buckets: must have zero count of 0")
}
if h.ZeroThreshold != 0 {
return fmt.Errorf("custom buckets: must have zero threshold of 0")
return errors.New("custom buckets: must have zero threshold of 0")
}
if len(h.NegativeSpans) > 0 {
return fmt.Errorf("custom buckets: must not have negative spans")
return errors.New("custom buckets: must not have negative spans")
}
if len(h.NegativeBuckets) > 0 {
return fmt.Errorf("custom buckets: must not have negative buckets")
return errors.New("custom buckets: must not have negative buckets")
}
} else {
if err := checkHistogramSpans(h.PositiveSpans, len(h.PositiveBuckets)); err != nil {
@ -455,7 +456,7 @@ func (h *Histogram) Validate() error {
return fmt.Errorf("negative side: %w", err)
}
if h.CustomValues != nil {
return fmt.Errorf("histogram with exponential schema must not have custom bounds")
return errors.New("histogram with exponential schema must not have custom bounds")
}
}
err := checkHistogramBuckets(h.PositiveBuckets, &pCount, true)

View file

@ -16,6 +16,7 @@ package relabel
import (
"crypto/md5"
"encoding/binary"
"errors"
"fmt"
"strconv"
"strings"
@ -114,10 +115,10 @@ func (c *Config) UnmarshalYAML(unmarshal func(interface{}) error) error {
func (c *Config) Validate() error {
if c.Action == "" {
return fmt.Errorf("relabel action cannot be empty")
return errors.New("relabel action cannot be empty")
}
if c.Modulus == 0 && c.Action == HashMod {
return fmt.Errorf("relabel configuration for hashmod requires non-zero modulus")
return errors.New("relabel configuration for hashmod requires non-zero modulus")
}
if (c.Action == Replace || c.Action == HashMod || c.Action == Lowercase || c.Action == Uppercase || c.Action == KeepEqual || c.Action == DropEqual) && c.TargetLabel == "" {
return fmt.Errorf("relabel configuration for %s action requires 'target_label' value", c.Action)

View file

@ -184,14 +184,14 @@ type RuleNode struct {
func (r *RuleNode) Validate() (nodes []WrappedError) {
if r.Record.Value != "" && r.Alert.Value != "" {
nodes = append(nodes, WrappedError{
err: fmt.Errorf("only one of 'record' and 'alert' must be set"),
err: errors.New("only one of 'record' and 'alert' must be set"),
node: &r.Record,
nodeAlt: &r.Alert,
})
}
if r.Record.Value == "" && r.Alert.Value == "" {
nodes = append(nodes, WrappedError{
err: fmt.Errorf("one of 'record' or 'alert' must be set"),
err: errors.New("one of 'record' or 'alert' must be set"),
node: &r.Record,
nodeAlt: &r.Alert,
})
@ -199,7 +199,7 @@ func (r *RuleNode) Validate() (nodes []WrappedError) {
if r.Expr.Value == "" {
nodes = append(nodes, WrappedError{
err: fmt.Errorf("field 'expr' must be set in rule"),
err: errors.New("field 'expr' must be set in rule"),
node: &r.Expr,
})
} else if _, err := parser.ParseExpr(r.Expr.Value); err != nil {
@ -211,19 +211,19 @@ func (r *RuleNode) Validate() (nodes []WrappedError) {
if r.Record.Value != "" {
if len(r.Annotations) > 0 {
nodes = append(nodes, WrappedError{
err: fmt.Errorf("invalid field 'annotations' in recording rule"),
err: errors.New("invalid field 'annotations' in recording rule"),
node: &r.Record,
})
}
if r.For != 0 {
nodes = append(nodes, WrappedError{
err: fmt.Errorf("invalid field 'for' in recording rule"),
err: errors.New("invalid field 'for' in recording rule"),
node: &r.Record,
})
}
if r.KeepFiringFor != 0 {
nodes = append(nodes, WrappedError{
err: fmt.Errorf("invalid field 'keep_firing_for' in recording rule"),
err: errors.New("invalid field 'keep_firing_for' in recording rule"),
node: &r.Record,
})
}

View file

@ -509,7 +509,7 @@ func yoloString(b []byte) string {
func parseFloat(s string) (float64, error) {
// Keep to pre-Go 1.13 float formats.
if strings.ContainsAny(s, "pP_") {
return 0, fmt.Errorf("unsupported character in float")
return 0, errors.New("unsupported character in float")
}
return strconv.ParseFloat(s, 64)
}

View file

@ -2047,7 +2047,7 @@ func (ev *evaluator) eval(ctx context.Context, expr parser.Expr) (parser.Value,
}
for i := range mat {
if len(mat[i].Floats)+len(mat[i].Histograms) != 1 {
panic(fmt.Errorf("unexpected number of samples"))
panic(errors.New("unexpected number of samples"))
}
for ts := ev.startTimestamp + ev.interval; ts <= ev.endTimestamp; ts += ev.interval {
if len(mat[i].Floats) > 0 {
@ -3626,7 +3626,7 @@ func detectHistogramStatsDecoding(expr parser.Expr) {
if n, ok := node.(*parser.BinaryExpr); ok {
detectHistogramStatsDecoding(n.LHS)
detectHistogramStatsDecoding(n.RHS)
return fmt.Errorf("stop")
return errors.New("stop")
}
n, ok := (node).(*parser.VectorSelector)
@ -3648,7 +3648,7 @@ func detectHistogramStatsDecoding(expr parser.Expr) {
break
}
}
return fmt.Errorf("stop")
return errors.New("stop")
})
}

View file

@ -90,7 +90,7 @@ func (ev *evaluator) infoSelectHints(expr parser.Expr) storage.SelectHints {
nodeTimestamp = n.Timestamp
}
offset = durationMilliseconds(n.OriginalOffset)
return fmt.Errorf("end traversal")
return errors.New("end traversal")
default:
return nil
}

View file

@ -672,7 +672,7 @@ func (ev *evalCmd) compareResult(result parser.Value) error {
switch val := result.(type) {
case promql.Matrix:
if ev.ordered {
return fmt.Errorf("expected ordered result, but query returned a matrix")
return errors.New("expected ordered result, but query returned a matrix")
}
if ev.expectScalar {

View file

@ -15,6 +15,7 @@ package rules
import (
"context"
"errors"
"fmt"
"log/slog"
"net/url"
@ -403,7 +404,7 @@ func (r *AlertingRule) Eval(ctx context.Context, queryOffset time.Duration, ts t
resultFPs[h] = struct{}{}
if _, ok := alerts[h]; ok {
return nil, fmt.Errorf("vector contains metrics with the same labelset after applying alert labels")
return nil, errors.New("vector contains metrics with the same labelset after applying alert labels")
}
alerts[h] = &Alert{

View file

@ -15,6 +15,7 @@ package rules
import (
"context"
"errors"
"fmt"
"net/url"
"time"
@ -103,7 +104,7 @@ func (rule *RecordingRule) Eval(ctx context.Context, queryOffset time.Duration,
// Check that the rule does not produce identical metrics after applying
// labels.
if vector.ContainsSameLabelset() {
return nil, fmt.Errorf("vector contains metrics with the same labelset after applying rule labels")
return nil, errors.New("vector contains metrics with the same labelset after applying rule labels")
}
numSeries := len(vector)

View file

@ -16,6 +16,7 @@ package scrape
import (
"bytes"
"context"
"errors"
"fmt"
"net/http"
"net/http/httptest"
@ -898,7 +899,7 @@ func doOneScrape(t *testing.T, manager *Manager, appender *collectResultAppender
if len(appender.resultFloats) > 0 {
return nil
}
return fmt.Errorf("expected some float samples, got none")
return errors.New("expected some float samples, got none")
}), "after 1 minute")
manager.Stop()
}
@ -1061,7 +1062,7 @@ func TestManagerCTZeroIngestionHistogram(t *testing.T) {
if len(app.resultHistograms) > 0 {
return nil
}
return fmt.Errorf("expected some histogram samples, got none")
return errors.New("expected some histogram samples, got none")
}), "after 1 minute")
scrapeManager.Stop()

View file

@ -1010,7 +1010,7 @@ func TestScrapeLoopForcedErr(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
sl := newBasicScrapeLoop(t, ctx, scraper, app, time.Second)
forcedErr := fmt.Errorf("forced err")
forcedErr := errors.New("forced err")
sl.setForcedError(forcedErr)
scraper.scrapeFunc = func(context.Context, io.Writer) error {
@ -1464,7 +1464,7 @@ func TestScrapeLoopCache(t *testing.T) {
case 4:
cancel()
}
return fmt.Errorf("scrape failed")
return errors.New("scrape failed")
}
go func() {
@ -3264,7 +3264,7 @@ func TestScrapeReportSingleAppender(t *testing.T) {
scraper.scrapeFunc = func(ctx context.Context, w io.Writer) error {
numScrapes++
if numScrapes%4 == 0 {
return fmt.Errorf("scrape failed")
return errors.New("scrape failed")
}
w.Write([]byte("metric_a 44\nmetric_b 44\nmetric_c 44\nmetric_d 44\n"))
return nil

View file

@ -41,17 +41,17 @@ var (
ErrOutOfOrderExemplar = errors.New("out of order exemplar")
ErrDuplicateExemplar = errors.New("duplicate exemplar")
ErrExemplarLabelLength = fmt.Errorf("label length for exemplar exceeds maximum of %d UTF-8 characters", exemplar.ExemplarMaxLabelSetLength)
ErrExemplarsDisabled = fmt.Errorf("exemplar storage is disabled or max exemplars is less than or equal to 0")
ErrNativeHistogramsDisabled = fmt.Errorf("native histograms are disabled")
ErrOOONativeHistogramsDisabled = fmt.Errorf("out-of-order native histogram ingestion is disabled")
ErrExemplarsDisabled = errors.New("exemplar storage is disabled or max exemplars is less than or equal to 0")
ErrNativeHistogramsDisabled = errors.New("native histograms are disabled")
ErrOOONativeHistogramsDisabled = errors.New("out-of-order native histogram ingestion is disabled")
// ErrOutOfOrderCT indicates failed append of CT to the storage
// due to CT being older the then newer sample.
// NOTE(bwplotka): This can be both an instrumentation failure or commonly expected
// behaviour, and we currently don't have a way to determine this. As a result
// it's recommended to ignore this error for now.
ErrOutOfOrderCT = fmt.Errorf("created timestamp out of order, ignoring")
ErrCTNewerThanSample = fmt.Errorf("CT is newer or the same as sample's timestamp, ignoring")
ErrOutOfOrderCT = errors.New("created timestamp out of order, ignoring")
ErrCTNewerThanSample = errors.New("CT is newer or the same as sample's timestamp, ignoring")
)
// SeriesRef is a generic series reference. In prometheus it is either a

View file

@ -16,7 +16,6 @@ package azuread
import (
"context"
"errors"
"fmt"
"net/http"
"strings"
"sync"
@ -110,55 +109,55 @@ func (c *AzureADConfig) Validate() error {
}
if c.Cloud != AzureChina && c.Cloud != AzureGovernment && c.Cloud != AzurePublic {
return fmt.Errorf("must provide a cloud in the Azure AD config")
return errors.New("must provide a cloud in the Azure AD config")
}
if c.ManagedIdentity == nil && c.OAuth == nil && c.SDK == nil {
return fmt.Errorf("must provide an Azure Managed Identity, Azure OAuth or Azure SDK in the Azure AD config")
return errors.New("must provide an Azure Managed Identity, Azure OAuth or Azure SDK in the Azure AD config")
}
if c.ManagedIdentity != nil && c.OAuth != nil {
return fmt.Errorf("cannot provide both Azure Managed Identity and Azure OAuth in the Azure AD config")
return errors.New("cannot provide both Azure Managed Identity and Azure OAuth in the Azure AD config")
}
if c.ManagedIdentity != nil && c.SDK != nil {
return fmt.Errorf("cannot provide both Azure Managed Identity and Azure SDK in the Azure AD config")
return errors.New("cannot provide both Azure Managed Identity and Azure SDK in the Azure AD config")
}
if c.OAuth != nil && c.SDK != nil {
return fmt.Errorf("cannot provide both Azure OAuth and Azure SDK in the Azure AD config")
return errors.New("cannot provide both Azure OAuth and Azure SDK in the Azure AD config")
}
if c.ManagedIdentity != nil {
if c.ManagedIdentity.ClientID == "" {
return fmt.Errorf("must provide an Azure Managed Identity client_id in the Azure AD config")
return errors.New("must provide an Azure Managed Identity client_id in the Azure AD config")
}
_, err := uuid.Parse(c.ManagedIdentity.ClientID)
if err != nil {
return fmt.Errorf("the provided Azure Managed Identity client_id is invalid")
return errors.New("the provided Azure Managed Identity client_id is invalid")
}
}
if c.OAuth != nil {
if c.OAuth.ClientID == "" {
return fmt.Errorf("must provide an Azure OAuth client_id in the Azure AD config")
return errors.New("must provide an Azure OAuth client_id in the Azure AD config")
}
if c.OAuth.ClientSecret == "" {
return fmt.Errorf("must provide an Azure OAuth client_secret in the Azure AD config")
return errors.New("must provide an Azure OAuth client_secret in the Azure AD config")
}
if c.OAuth.TenantID == "" {
return fmt.Errorf("must provide an Azure OAuth tenant_id in the Azure AD config")
return errors.New("must provide an Azure OAuth tenant_id in the Azure AD config")
}
var err error
_, err = uuid.Parse(c.OAuth.ClientID)
if err != nil {
return fmt.Errorf("the provided Azure OAuth client_id is invalid")
return errors.New("the provided Azure OAuth client_id is invalid")
}
_, err = regexp.MatchString("^[0-9a-zA-Z-.]+$", c.OAuth.TenantID)
if err != nil {
return fmt.Errorf("the provided Azure OAuth tenant_id is invalid")
return errors.New("the provided Azure OAuth tenant_id is invalid")
}
}
@ -168,7 +167,7 @@ func (c *AzureADConfig) Validate() error {
if c.SDK.TenantID != "" {
_, err = regexp.MatchString("^[0-9a-zA-Z-.]+$", c.SDK.TenantID)
if err != nil {
return fmt.Errorf("the provided Azure OAuth tenant_id is invalid")
return errors.New("the provided Azure OAuth tenant_id is invalid")
}
}
}

View file

@ -763,7 +763,7 @@ func TestDisableReshardOnRetry(t *testing.T) {
onStoreCalled()
return WriteResponseStats{}, RecoverableError{
error: fmt.Errorf("fake error"),
error: errors.New("fake error"),
retryAfter: model.Duration(retryAfter),
}
},

View file

@ -672,7 +672,7 @@ func TestCommitErr_V1Message(t *testing.T) {
req, err := http.NewRequest("", "", bytes.NewReader(payload))
require.NoError(t, err)
appendable := &mockAppendable{commitErr: fmt.Errorf("commit error")}
appendable := &mockAppendable{commitErr: errors.New("commit error")}
handler := NewWriteHandler(promslog.NewNopLogger(), nil, appendable, []config.RemoteWriteProtoMsg{config.RemoteWriteProtoMsgV1})
recorder := httptest.NewRecorder()
@ -696,7 +696,7 @@ func TestCommitErr_V2Message(t *testing.T) {
req.Header.Set("Content-Encoding", string(SnappyBlockCompression))
req.Header.Set(RemoteWriteVersionHeader, RemoteWriteVersion20HeaderValue)
appendable := &mockAppendable{commitErr: fmt.Errorf("commit error")}
appendable := &mockAppendable{commitErr: errors.New("commit error")}
handler := NewWriteHandler(promslog.NewNopLogger(), nil, appendable, []config.RemoteWriteProtoMsg{config.RemoteWriteProtoMsgV2})
recorder := httptest.NewRecorder()

View file

@ -132,7 +132,7 @@ func TestPool(t *testing.T) {
{
name: "invalid encoding",
encoding: EncNone,
expErr: fmt.Errorf(`invalid chunk encoding "none"`),
expErr: errors.New(`invalid chunk encoding "none"`),
},
} {
t.Run(tc.name, func(t *testing.T) {

View file

@ -15,6 +15,7 @@ package chunkenc
import (
"encoding/binary"
"errors"
"fmt"
"math"
@ -761,9 +762,9 @@ func (a *FloatHistogramAppender) AppendFloatHistogram(prev *FloatHistogramAppend
if !okToAppend || counterReset {
if appendOnly {
if counterReset {
return nil, false, a, fmt.Errorf("float histogram counter reset")
return nil, false, a, errors.New("float histogram counter reset")
}
return nil, false, a, fmt.Errorf("float histogram schema change")
return nil, false, a, errors.New("float histogram schema change")
}
newChunk := NewFloatHistogramChunk()
app, err := newChunk.Appender()
@ -812,7 +813,7 @@ func (a *FloatHistogramAppender) AppendFloatHistogram(prev *FloatHistogramAppend
pForwardInserts, nForwardInserts, pBackwardInserts, nBackwardInserts, pMergedSpans, nMergedSpans, okToAppend := a.appendableGauge(h)
if !okToAppend {
if appendOnly {
return nil, false, a, fmt.Errorf("float gauge histogram schema change")
return nil, false, a, errors.New("float gauge histogram schema change")
}
newChunk := NewFloatHistogramChunk()
app, err := newChunk.Appender()

View file

@ -15,6 +15,7 @@ package chunkenc
import (
"encoding/binary"
"errors"
"fmt"
"math"
@ -795,9 +796,9 @@ func (a *HistogramAppender) AppendHistogram(prev *HistogramAppender, t int64, h
if !okToAppend || counterReset {
if appendOnly {
if counterReset {
return nil, false, a, fmt.Errorf("histogram counter reset")
return nil, false, a, errors.New("histogram counter reset")
}
return nil, false, a, fmt.Errorf("histogram schema change")
return nil, false, a, errors.New("histogram schema change")
}
newChunk := NewHistogramChunk()
app, err := newChunk.Appender()
@ -846,7 +847,7 @@ func (a *HistogramAppender) AppendHistogram(prev *HistogramAppender, t int64, h
pForwardInserts, nForwardInserts, pBackwardInserts, nBackwardInserts, pMergedSpans, nMergedSpans, okToAppend := a.appendableGauge(h)
if !okToAppend {
if appendOnly {
return nil, false, a, fmt.Errorf("gauge histogram schema change")
return nil, false, a, errors.New("gauge histogram schema change")
}
newChunk := NewHistogramChunk()
app, err := newChunk.Appender()

View file

@ -16,6 +16,7 @@ package chunks
import (
"bufio"
"encoding/binary"
"errors"
"fmt"
"hash"
"hash/crc32"
@ -172,7 +173,7 @@ func ChunkFromSamplesGeneric(s Samples) (Meta, error) {
return emptyChunk, err
}
if newChunk != nil {
return emptyChunk, fmt.Errorf("did not expect to start a second chunk")
return emptyChunk, errors.New("did not expect to start a second chunk")
}
case chunkenc.ValFloatHistogram:
newChunk, _, ca, err = ca.AppendFloatHistogram(nil, s.Get(i).T(), s.Get(i).FH(), false)
@ -180,7 +181,7 @@ func ChunkFromSamplesGeneric(s Samples) (Meta, error) {
return emptyChunk, err
}
if newChunk != nil {
return emptyChunk, fmt.Errorf("did not expect to start a second chunk")
return emptyChunk, errors.New("did not expect to start a second chunk")
}
default:
panic(fmt.Sprintf("unknown sample type %s", sampleType.String()))
@ -250,7 +251,7 @@ func (cm *Meta) OverlapsClosedInterval(mint, maxt int64) bool {
return cm.MinTime <= maxt && mint <= cm.MaxTime
}
var errInvalidSize = fmt.Errorf("invalid size")
var errInvalidSize = errors.New("invalid size")
var castagnoliTable *crc32.Table

View file

@ -184,7 +184,7 @@ func NewLeveledCompactor(ctx context.Context, r prometheus.Registerer, l *slog.L
func NewLeveledCompactorWithOptions(ctx context.Context, r prometheus.Registerer, l *slog.Logger, ranges []int64, pool chunkenc.Pool, opts LeveledCompactorOptions) (*LeveledCompactor, error) {
if len(ranges) == 0 {
return nil, fmt.Errorf("at least one range must be provided")
return nil, errors.New("at least one range must be provided")
}
if pool == nil {
pool = chunkenc.NewPool()

View file

@ -2004,10 +2004,10 @@ func (db *DB) ForceHeadMMap() {
// will create a new block containing all data that's currently in the memory buffer/WAL.
func (db *DB) Snapshot(dir string, withHead bool) error {
if dir == db.dir {
return fmt.Errorf("cannot snapshot into base directory")
return errors.New("cannot snapshot into base directory")
}
if _, err := ulid.ParseStrict(dir); err == nil {
return fmt.Errorf("dir must not be a valid ULID")
return errors.New("dir must not be a valid ULID")
}
db.cmtx.Lock()

View file

@ -18,6 +18,7 @@ import (
"bytes"
"context"
"encoding/binary"
"errors"
"flag"
"fmt"
"hash/crc32"
@ -1432,7 +1433,7 @@ func (*mockCompactorFailing) Plan(string) ([]string, error) {
func (c *mockCompactorFailing) Write(dest string, _ BlockReader, _, _ int64, _ *BlockMeta) ([]ulid.ULID, error) {
if len(c.blocks) >= c.max {
return []ulid.ULID{}, fmt.Errorf("the compactor already did the maximum allowed blocks so it is time to fail")
return []ulid.ULID{}, errors.New("the compactor already did the maximum allowed blocks so it is time to fail")
}
block, err := OpenBlock(nil, createBlock(c.t, dest, genSeries(1, 1, 0, 1)), nil)
@ -1459,7 +1460,7 @@ func (*mockCompactorFailing) Compact(string, []string, []*Block) ([]ulid.ULID, e
}
func (*mockCompactorFailing) CompactOOO(string, *OOOCompactionHead) (result []ulid.ULID, err error) {
return nil, fmt.Errorf("mock compaction failing CompactOOO")
return nil, errors.New("mock compaction failing CompactOOO")
}
func TestTimeRetention(t *testing.T) {

View file

@ -509,7 +509,7 @@ func testOOOHeadChunkReader_Chunk(t *testing.T, scenario sampleTypeScenario) {
Ref: 0x1800000, Chunk: chunkenc.Chunk(nil), MinTime: 100, MaxTime: 300,
})
require.Nil(t, iterable)
require.Equal(t, err, fmt.Errorf("not found"))
require.EqualError(t, err, "not found")
require.Nil(t, c)
})

View file

@ -3324,7 +3324,7 @@ func (m mockMatcherIndex) LabelNames(context.Context, ...*labels.Matcher) ([]str
}
func (m mockMatcherIndex) PostingsForLabelMatching(context.Context, string, func(string) bool) index.Postings {
return index.ErrPostings(fmt.Errorf("PostingsForLabelMatching called"))
return index.ErrPostings(errors.New("PostingsForLabelMatching called"))
}
func TestPostingsForMatcher(t *testing.T) {

View file

@ -15,6 +15,7 @@ package tsdb
import (
"context"
"errors"
"fmt"
"log/slog"
"path/filepath"
@ -23,7 +24,7 @@ import (
"github.com/prometheus/prometheus/tsdb/chunkenc"
)
var ErrInvalidTimes = fmt.Errorf("max time is lesser than min time")
var ErrInvalidTimes = errors.New("max time is lesser than min time")
// CreateBlock creates a chunkrange block from the samples passed to it, and writes it to disk.
func CreateBlock(series []storage.Series, dir string, chunkRange int64, logger *slog.Logger) (string, error) {

View file

@ -1606,7 +1606,7 @@ func parseListRulesPaginationRequest(r *http.Request) (int64, string, *apiFuncRe
nextToken := r.URL.Query().Get("group_next_token")
if nextToken != "" && maxGroups == "" {
errResult := invalidParamError(fmt.Errorf("group_limit needs to be present in order to paginate over the groups"), "group_next_token")
errResult := invalidParamError(errors.New("group_limit needs to be present in order to paginate over the groups"), "group_next_token")
return -1, "", &errResult
}
@ -1617,7 +1617,7 @@ func parseListRulesPaginationRequest(r *http.Request) (int64, string, *apiFuncRe
return -1, "", &errResult
}
if parsedMaxGroups <= 0 {
errResult := invalidParamError(fmt.Errorf("group_limit needs to be greater than 0"), "group_limit")
errResult := invalidParamError(errors.New("group_limit needs to be greater than 0"), "group_limit")
return -1, "", &errResult
}
}

View file

@ -615,7 +615,7 @@ func TestGetSeries(t *testing.T) {
matchers: []string{`{foo="boo"}`, `{foo="baz"}`},
expectedErrorType: errorExec,
api: &API{
Queryable: errorTestQueryable{err: fmt.Errorf("generic")},
Queryable: errorTestQueryable{err: errors.New("generic")},
},
},
{
@ -623,7 +623,7 @@ func TestGetSeries(t *testing.T) {
matchers: []string{`{foo="boo"}`, `{foo="baz"}`},
expectedErrorType: errorInternal,
api: &API{
Queryable: errorTestQueryable{err: promql.ErrStorage{Err: fmt.Errorf("generic")}},
Queryable: errorTestQueryable{err: promql.ErrStorage{Err: errors.New("generic")}},
},
},
} {
@ -717,7 +717,7 @@ func TestQueryExemplars(t *testing.T) {
name: "should return errorExec upon genetic error",
expectedErrorType: errorExec,
api: &API{
ExemplarQueryable: errorTestQueryable{err: fmt.Errorf("generic")},
ExemplarQueryable: errorTestQueryable{err: errors.New("generic")},
},
query: url.Values{
"query": []string{`test_metric3{foo="boo"} - test_metric4{foo="bar"}`},
@ -729,7 +729,7 @@ func TestQueryExemplars(t *testing.T) {
name: "should return errorInternal err type is ErrStorage",
expectedErrorType: errorInternal,
api: &API{
ExemplarQueryable: errorTestQueryable{err: promql.ErrStorage{Err: fmt.Errorf("generic")}},
ExemplarQueryable: errorTestQueryable{err: promql.ErrStorage{Err: errors.New("generic")}},
},
query: url.Values{
"query": []string{`test_metric3{foo="boo"} - test_metric4{foo="bar"}`},
@ -838,7 +838,7 @@ func TestLabelNames(t *testing.T) {
matchers: []string{`{foo="boo"}`, `{foo="baz"}`},
expectedErrorType: errorExec,
api: &API{
Queryable: errorTestQueryable{err: fmt.Errorf("generic")},
Queryable: errorTestQueryable{err: errors.New("generic")},
},
},
{
@ -846,7 +846,7 @@ func TestLabelNames(t *testing.T) {
matchers: []string{`{foo="boo"}`, `{foo="baz"}`},
expectedErrorType: errorInternal,
api: &API{
Queryable: errorTestQueryable{err: promql.ErrStorage{Err: fmt.Errorf("generic")}},
Queryable: errorTestQueryable{err: promql.ErrStorage{Err: errors.New("generic")}},
},
},
} {