Merge branch 'main' into rishabhk-promxy-upgrade

Signed-off-by: Rishabh Kumar <3275971+rishabhkumar92@users.noreply.github.com>
This commit is contained in:
Rishabh Kumar 2024-11-06 10:01:29 -08:00 committed by GitHub
commit 245ad40efc
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
81 changed files with 415 additions and 302 deletions

View file

@ -14,11 +14,11 @@ jobs:
image: quay.io/prometheus/golang-builder:1.23-base
steps:
- uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0
- uses: prometheus/promci@468927c440349ab56c4a1aafd453b312841503c2 # v0.4.4
- uses: prometheus/promci@52c7012f5f0070d7281b8db4a119e21341d43c91 # v0.4.5
- uses: ./.github/promci/actions/setup_environment
with:
enable_npm: true
- run: make GOOPTS=--tags=stringlabels GO_ONLY=1 SKIP_GOLANGCI_LINT=1 test-flags=""
- run: make GOOPTS=--tags=stringlabels GO_ONLY=1 SKIP_GOLANGCI_LINT=1
- run: go test --tags=stringlabels ./tsdb/ -test.tsdb-isolation=false
- run: make -C documentation/examples/remote_storage
- run: make -C documentation/examples
@ -30,7 +30,7 @@ jobs:
image: quay.io/prometheus/golang-builder:1.23-base
steps:
- uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0
- uses: prometheus/promci@468927c440349ab56c4a1aafd453b312841503c2 # v0.4.4
- uses: prometheus/promci@52c7012f5f0070d7281b8db4a119e21341d43c91 # v0.4.5
- uses: ./.github/promci/actions/setup_environment
- run: go test --tags=dedupelabels ./...
- run: GOARCH=386 go test ./cmd/prometheus
@ -63,7 +63,7 @@ jobs:
steps:
- uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0
- uses: prometheus/promci@468927c440349ab56c4a1aafd453b312841503c2 # v0.4.4
- uses: prometheus/promci@52c7012f5f0070d7281b8db4a119e21341d43c91 # v0.4.5
- uses: ./.github/promci/actions/setup_environment
with:
enable_go: false
@ -122,7 +122,7 @@ jobs:
thread: [ 0, 1, 2 ]
steps:
- uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0
- uses: prometheus/promci@468927c440349ab56c4a1aafd453b312841503c2 # v0.4.4
- uses: prometheus/promci@52c7012f5f0070d7281b8db4a119e21341d43c91 # v0.4.5
- uses: ./.github/promci/actions/build
with:
promu_opts: "-p linux/amd64 -p windows/amd64 -p linux/arm64 -p darwin/amd64 -p darwin/arm64 -p linux/386"
@ -147,7 +147,7 @@ jobs:
# should also be updated.
steps:
- uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0
- uses: prometheus/promci@468927c440349ab56c4a1aafd453b312841503c2 # v0.4.4
- uses: prometheus/promci@52c7012f5f0070d7281b8db4a119e21341d43c91 # v0.4.5
- uses: ./.github/promci/actions/build
with:
parallelism: 12
@ -209,7 +209,7 @@ jobs:
if: github.event_name == 'push' && github.event.ref == 'refs/heads/main'
steps:
- uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0
- uses: prometheus/promci@468927c440349ab56c4a1aafd453b312841503c2 # v0.4.4
- uses: prometheus/promci@52c7012f5f0070d7281b8db4a119e21341d43c91 # v0.4.5
- uses: ./.github/promci/actions/publish_main
with:
docker_hub_login: ${{ secrets.docker_hub_login }}
@ -226,7 +226,7 @@ jobs:
(github.event_name == 'push' && startsWith(github.ref, 'refs/tags/v3.'))
steps:
- uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0
- uses: prometheus/promci@468927c440349ab56c4a1aafd453b312841503c2 # v0.4.4
- uses: prometheus/promci@52c7012f5f0070d7281b8db4a119e21341d43c91 # v0.4.5
- uses: ./.github/promci/actions/publish_release
with:
docker_hub_login: ${{ secrets.docker_hub_login }}
@ -241,9 +241,9 @@ jobs:
steps:
- name: Checkout
uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0
- uses: prometheus/promci@468927c440349ab56c4a1aafd453b312841503c2 # v0.4.4
- uses: prometheus/promci@52c7012f5f0070d7281b8db4a119e21341d43c91 # v0.4.5
- name: Install nodejs
uses: actions/setup-node@0a44ba7841725637a19e28fa30b79a866c81b0a6 # v4.0.4
uses: actions/setup-node@39370e3970a6d050c480ffad4ff0ed4d3fdee5af # v4.1.0
with:
node-version-file: "web/ui/.nvmrc"
registry-url: "https://registry.npmjs.org"

View file

@ -109,7 +109,7 @@ linters-settings:
extra-rules: true
perfsprint:
# Optimizes `fmt.Errorf`.
errorf: false
errorf: true
revive:
# By default, revive will enable only the linting rules that are named in the configuration file.
# So, it's needed to explicitly enable all required rules here.

View file

@ -63,6 +63,10 @@ As is traditional with a beta release, we do **not** recommend users install 3.0
* [FEATURE] Support config reload automatically - feature flag `auto-reload-config`. #14769
* [BUGFIX] Scrape: Do not override target parameter labels with config params. #11029
## 2.55.1 / 2024-01-04
* [BUGFIX] `round()` function did not remove `__name__` label. #15250
## 2.55.0 / 2024-10-22
* [FEATURE] PromQL: Add experimental `info` function. #14495

View file

@ -2,6 +2,7 @@ ARG ARCH="amd64"
ARG OS="linux"
FROM quay.io/prometheus/busybox-${OS}-${ARCH}:latest
LABEL maintainer="The Prometheus Authors <prometheus-developers@googlegroups.com>"
LABEL org.opencontainers.image.source="https://github.com/prometheus/prometheus"
ARG ARCH="amd64"
ARG OS="linux"

View file

@ -190,7 +190,6 @@ type flagConfig struct {
queryConcurrency int
queryMaxSamples int
RemoteFlushDeadline model.Duration
nameEscapingScheme string
maxNotificationsSubscribers int
enableAutoReload bool
@ -551,15 +550,6 @@ func main() {
os.Exit(1)
}
if cfg.nameEscapingScheme != "" {
scheme, err := model.ToEscapingScheme(cfg.nameEscapingScheme)
if err != nil {
fmt.Fprintf(os.Stderr, `Invalid name escaping scheme: %q; Needs to be one of "values", "underscores", or "dots"`, cfg.nameEscapingScheme)
os.Exit(1)
}
model.NameEscapingScheme = scheme
}
if agentMode && len(serverOnlyFlags) > 0 {
fmt.Fprintf(os.Stderr, "The following flag(s) can not be used in agent mode: %q", serverOnlyFlags)
os.Exit(3)

View file

@ -34,8 +34,8 @@ import (
)
var (
errNotNativeHistogram = fmt.Errorf("not a native histogram")
errNotEnoughData = fmt.Errorf("not enough data")
errNotNativeHistogram = errors.New("not a native histogram")
errNotEnoughData = errors.New("not enough data")
outputHeader = `Bucket stats for each histogram series over time
------------------------------------------------
@ -169,7 +169,7 @@ func querySamples(ctx context.Context, api v1.API, query string, end time.Time)
matrix, ok := values.(model.Matrix)
if !ok {
return nil, fmt.Errorf("query of buckets resulted in non-Matrix")
return nil, errors.New("query of buckets resulted in non-Matrix")
}
return matrix, nil
@ -259,7 +259,7 @@ func getBucketCountsAtTime(matrix model.Matrix, numBuckets, timeIdx int) ([]int,
prev := matrix[i].Values[timeIdx]
// Assume the results are nicely aligned.
if curr.Timestamp != prev.Timestamp {
return counts, fmt.Errorf("matrix result is not time aligned")
return counts, errors.New("matrix result is not time aligned")
}
counts[i+1] = int(curr.Value - prev.Value)
}

View file

@ -49,7 +49,7 @@ func getMinAndMaxTimestamps(p textparse.Parser) (int64, int64, error) {
_, ts, _ := p.Series()
if ts == nil {
return 0, 0, fmt.Errorf("expected timestamp for series got none")
return 0, 0, errors.New("expected timestamp for series got none")
}
if *ts > maxt {

View file

@ -444,7 +444,7 @@ func checkExperimental(f bool) {
}
}
var errLint = fmt.Errorf("lint error")
var errLint = errors.New("lint error")
type lintConfig struct {
all bool

View file

@ -662,7 +662,7 @@ func analyzeCompaction(ctx context.Context, block tsdb.BlockReader, indexr tsdb.
histogramChunkSize = append(histogramChunkSize, len(chk.Bytes()))
fhchk, ok := chk.(*chunkenc.FloatHistogramChunk)
if !ok {
return fmt.Errorf("chunk is not FloatHistogramChunk")
return errors.New("chunk is not FloatHistogramChunk")
}
it := fhchk.Iterator(nil)
bucketCount := 0
@ -677,7 +677,7 @@ func analyzeCompaction(ctx context.Context, block tsdb.BlockReader, indexr tsdb.
histogramChunkSize = append(histogramChunkSize, len(chk.Bytes()))
hchk, ok := chk.(*chunkenc.HistogramChunk)
if !ok {
return fmt.Errorf("chunk is not HistogramChunk")
return errors.New("chunk is not HistogramChunk")
}
it := hchk.Iterator(nil)
bucketCount := 0

View file

@ -1072,7 +1072,7 @@ func (c *AlertmanagerConfig) UnmarshalYAML(unmarshal func(interface{}) error) er
c.HTTPClientConfig.Authorization != nil || c.HTTPClientConfig.OAuth2 != nil
if httpClientConfigAuthEnabled && c.SigV4Config != nil {
return fmt.Errorf("at most one of basic_auth, authorization, oauth2, & sigv4 must be configured")
return errors.New("at most one of basic_auth, authorization, oauth2, & sigv4 must be configured")
}
// Check for users putting URLs in target groups.
@ -1420,7 +1420,7 @@ func (c *OTLPConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
for i, attr := range c.PromoteResourceAttributes {
attr = strings.TrimSpace(attr)
if attr == "" {
err = errors.Join(err, fmt.Errorf("empty promoted OTel resource attribute"))
err = errors.Join(err, errors.New("empty promoted OTel resource attribute"))
continue
}
if _, exists := seen[attr]; exists {

View file

@ -161,7 +161,7 @@ type EC2Discovery struct {
func NewEC2Discovery(conf *EC2SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*EC2Discovery, error) {
m, ok := metrics.(*ec2Metrics)
if !ok {
return nil, fmt.Errorf("invalid discovery metrics type")
return nil, errors.New("invalid discovery metrics type")
}
if logger == nil {

View file

@ -134,7 +134,7 @@ type LightsailDiscovery struct {
func NewLightsailDiscovery(conf *LightsailSDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*LightsailDiscovery, error) {
m, ok := metrics.(*lightsailMetrics)
if !ok {
return nil, fmt.Errorf("invalid discovery metrics type")
return nil, errors.New("invalid discovery metrics type")
}
if logger == nil {

View file

@ -186,7 +186,7 @@ type Discovery struct {
func NewDiscovery(cfg *SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) {
m, ok := metrics.(*azureMetrics)
if !ok {
return nil, fmt.Errorf("invalid discovery metrics type")
return nil, errors.New("invalid discovery metrics type")
}
if logger == nil {

View file

@ -189,7 +189,7 @@ type Discovery struct {
func NewDiscovery(conf *SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) {
m, ok := metrics.(*consulMetrics)
if !ok {
return nil, fmt.Errorf("invalid discovery metrics type")
return nil, errors.New("invalid discovery metrics type")
}
if logger == nil {

View file

@ -15,6 +15,7 @@ package digitalocean
import (
"context"
"errors"
"fmt"
"log/slog"
"net"
@ -114,7 +115,7 @@ type Discovery struct {
func NewDiscovery(conf *SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) {
m, ok := metrics.(*digitaloceanMetrics)
if !ok {
return nil, fmt.Errorf("invalid discovery metrics type")
return nil, errors.New("invalid discovery metrics type")
}
d := &Discovery{

View file

@ -121,7 +121,7 @@ type Discovery struct {
func NewDiscovery(conf SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) {
m, ok := metrics.(*dnsMetrics)
if !ok {
return nil, fmt.Errorf("invalid discovery metrics type")
return nil, errors.New("invalid discovery metrics type")
}
if logger == nil {

View file

@ -15,7 +15,7 @@ package dns
import (
"context"
"fmt"
"errors"
"log/slog"
"net"
"testing"
@ -53,7 +53,7 @@ func TestDNS(t *testing.T) {
Type: "A",
},
lookup: func(name string, qtype uint16, logger *slog.Logger) (*dns.Msg, error) {
return nil, fmt.Errorf("some error")
return nil, errors.New("some error")
},
expected: []*targetgroup.Group{},
},

View file

@ -16,7 +16,6 @@ package eureka
import (
"context"
"errors"
"fmt"
"log/slog"
"net"
"net/http"
@ -129,7 +128,7 @@ type Discovery struct {
func NewDiscovery(conf *SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) {
m, ok := metrics.(*eurekaMetrics)
if !ok {
return nil, fmt.Errorf("invalid discovery metrics type")
return nil, errors.New("invalid discovery metrics type")
}
rt, err := config.NewRoundTripperFromConfig(conf.HTTPClientConfig, "eureka_sd")

View file

@ -184,7 +184,7 @@ type Discovery struct {
func NewDiscovery(conf *SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) {
fm, ok := metrics.(*fileMetrics)
if !ok {
return nil, fmt.Errorf("invalid discovery metrics type")
return nil, errors.New("invalid discovery metrics type")
}
if logger == nil {

View file

@ -132,7 +132,7 @@ type Discovery struct {
func NewDiscovery(conf SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) {
m, ok := metrics.(*gceMetrics)
if !ok {
return nil, fmt.Errorf("invalid discovery metrics type")
return nil, errors.New("invalid discovery metrics type")
}
d := &Discovery{

View file

@ -138,7 +138,7 @@ type Discovery struct {
func NewDiscovery(conf *SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*refresh.Discovery, error) {
m, ok := metrics.(*hetznerMetrics)
if !ok {
return nil, fmt.Errorf("invalid discovery metrics type")
return nil, errors.New("invalid discovery metrics type")
}
r, err := newRefresher(conf, logger)

View file

@ -86,17 +86,17 @@ func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
return err
}
if c.URL == "" {
return fmt.Errorf("URL is missing")
return errors.New("URL is missing")
}
parsedURL, err := url.Parse(c.URL)
if err != nil {
return err
}
if parsedURL.Scheme != "http" && parsedURL.Scheme != "https" {
return fmt.Errorf("URL scheme must be 'http' or 'https'")
return errors.New("URL scheme must be 'http' or 'https'")
}
if parsedURL.Host == "" {
return fmt.Errorf("host is missing in URL")
return errors.New("host is missing in URL")
}
return c.HTTPClientConfig.Validate()
}
@ -118,7 +118,7 @@ type Discovery struct {
func NewDiscovery(conf *SDConfig, logger *slog.Logger, clientOpts []config.HTTPClientOption, metrics discovery.DiscovererMetrics) (*Discovery, error) {
m, ok := metrics.(*httpMetrics)
if !ok {
return nil, fmt.Errorf("invalid discovery metrics type")
return nil, errors.New("invalid discovery metrics type")
}
if logger == nil {

View file

@ -15,7 +15,6 @@ package ionos
import (
"errors"
"fmt"
"log/slog"
"time"
@ -46,7 +45,7 @@ type Discovery struct{}
func NewDiscovery(conf *SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*refresh.Discovery, error) {
m, ok := metrics.(*ionosMetrics)
if !ok {
return nil, fmt.Errorf("invalid discovery metrics type")
return nil, errors.New("invalid discovery metrics type")
}
if conf.ionosEndpoint == "" {

View file

@ -173,7 +173,7 @@ func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
return err
}
if c.Role == "" {
return fmt.Errorf("role missing (one of: pod, service, endpoints, endpointslice, node, ingress)")
return errors.New("role missing (one of: pod, service, endpoints, endpointslice, node, ingress)")
}
err = c.HTTPClientConfig.Validate()
if err != nil {
@ -181,20 +181,20 @@ func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
}
if c.APIServer.URL != nil && c.KubeConfig != "" {
// Api-server and kubeconfig_file are mutually exclusive
return fmt.Errorf("cannot use 'kubeconfig_file' and 'api_server' simultaneously")
return errors.New("cannot use 'kubeconfig_file' and 'api_server' simultaneously")
}
if c.KubeConfig != "" && !reflect.DeepEqual(c.HTTPClientConfig, config.DefaultHTTPClientConfig) {
// Kubeconfig_file and custom http config are mutually exclusive
return fmt.Errorf("cannot use a custom HTTP client configuration together with 'kubeconfig_file'")
return errors.New("cannot use a custom HTTP client configuration together with 'kubeconfig_file'")
}
if c.APIServer.URL == nil && !reflect.DeepEqual(c.HTTPClientConfig, config.DefaultHTTPClientConfig) {
return fmt.Errorf("to use custom HTTP client configuration please provide the 'api_server' URL explicitly")
return errors.New("to use custom HTTP client configuration please provide the 'api_server' URL explicitly")
}
if c.APIServer.URL != nil && c.NamespaceDiscovery.IncludeOwnNamespace {
return fmt.Errorf("cannot use 'api_server' and 'namespaces.own_namespace' simultaneously")
return errors.New("cannot use 'api_server' and 'namespaces.own_namespace' simultaneously")
}
if c.KubeConfig != "" && c.NamespaceDiscovery.IncludeOwnNamespace {
return fmt.Errorf("cannot use 'kubeconfig_file' and 'namespaces.own_namespace' simultaneously")
return errors.New("cannot use 'kubeconfig_file' and 'namespaces.own_namespace' simultaneously")
}
foundSelectorRoles := make(map[Role]struct{})
@ -288,7 +288,7 @@ func (d *Discovery) getNamespaces() []string {
func New(l *slog.Logger, metrics discovery.DiscovererMetrics, conf *SDConfig) (*Discovery, error) {
m, ok := metrics.(*kubernetesMetrics)
if !ok {
return nil, fmt.Errorf("invalid discovery metrics type")
return nil, errors.New("invalid discovery metrics type")
}
if l == nil {
@ -672,7 +672,7 @@ func (d *Discovery) newPodsByNodeInformer(plw *cache.ListWatch) cache.SharedInde
indexers[nodeIndex] = func(obj interface{}) ([]string, error) {
pod, ok := obj.(*apiv1.Pod)
if !ok {
return nil, fmt.Errorf("object is not a pod")
return nil, errors.New("object is not a pod")
}
return []string{pod.Spec.NodeName}, nil
}
@ -686,7 +686,7 @@ func (d *Discovery) newEndpointsByNodeInformer(plw *cache.ListWatch) cache.Share
indexers[podIndex] = func(obj interface{}) ([]string, error) {
e, ok := obj.(*apiv1.Endpoints)
if !ok {
return nil, fmt.Errorf("object is not endpoints")
return nil, errors.New("object is not endpoints")
}
var pods []string
for _, target := range e.Subsets {
@ -705,7 +705,7 @@ func (d *Discovery) newEndpointsByNodeInformer(plw *cache.ListWatch) cache.Share
indexers[nodeIndex] = func(obj interface{}) ([]string, error) {
e, ok := obj.(*apiv1.Endpoints)
if !ok {
return nil, fmt.Errorf("object is not endpoints")
return nil, errors.New("object is not endpoints")
}
var nodes []string
for _, target := range e.Subsets {
@ -751,7 +751,7 @@ func (d *Discovery) newEndpointSlicesByNodeInformer(plw *cache.ListWatch, object
}
}
default:
return nil, fmt.Errorf("object is not an endpointslice")
return nil, errors.New("object is not an endpointslice")
}
return nodes, nil

View file

@ -141,7 +141,7 @@ type Discovery struct {
func NewDiscovery(conf *SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) {
m, ok := metrics.(*linodeMetrics)
if !ok {
return nil, fmt.Errorf("invalid discovery metrics type")
return nil, errors.New("invalid discovery metrics type")
}
d := &Discovery{

View file

@ -15,6 +15,7 @@ package discovery
import (
"context"
"errors"
"fmt"
"sort"
"strconv"
@ -1209,9 +1210,9 @@ func TestGaugeFailedConfigs(t *testing.T) {
c := map[string]Configs{
"prometheus": {
errorConfig{fmt.Errorf("tests error 0")},
errorConfig{fmt.Errorf("tests error 1")},
errorConfig{fmt.Errorf("tests error 2")},
errorConfig{errors.New("tests error 0")},
errorConfig{errors.New("tests error 1")},
errorConfig{errors.New("tests error 2")},
},
}
discoveryManager.ApplyConfig(c)

View file

@ -143,7 +143,7 @@ type Discovery struct {
func NewDiscovery(conf SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) {
m, ok := metrics.(*marathonMetrics)
if !ok {
return nil, fmt.Errorf("invalid discovery metrics type")
return nil, errors.New("invalid discovery metrics type")
}
rt, err := config.NewRoundTripperFromConfig(conf.HTTPClientConfig, "marathon_sd")

View file

@ -15,6 +15,7 @@ package moby
import (
"context"
"errors"
"fmt"
"log/slog"
"net"
@ -110,7 +111,7 @@ func (c *DockerSDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error
return err
}
if c.Host == "" {
return fmt.Errorf("host missing")
return errors.New("host missing")
}
if _, err = url.Parse(c.Host); err != nil {
return err
@ -131,7 +132,7 @@ type DockerDiscovery struct {
func NewDockerDiscovery(conf *DockerSDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*DockerDiscovery, error) {
m, ok := metrics.(*dockerMetrics)
if !ok {
return nil, fmt.Errorf("invalid discovery metrics type")
return nil, errors.New("invalid discovery metrics type")
}
d := &DockerDiscovery{

View file

@ -15,6 +15,7 @@ package moby
import (
"context"
"errors"
"fmt"
"log/slog"
"net/http"
@ -99,7 +100,7 @@ func (c *DockerSwarmSDConfig) UnmarshalYAML(unmarshal func(interface{}) error) e
return err
}
if c.Host == "" {
return fmt.Errorf("host missing")
return errors.New("host missing")
}
if _, err = url.Parse(c.Host); err != nil {
return err
@ -107,7 +108,7 @@ func (c *DockerSwarmSDConfig) UnmarshalYAML(unmarshal func(interface{}) error) e
switch c.Role {
case "services", "nodes", "tasks":
case "":
return fmt.Errorf("role missing (one of: tasks, services, nodes)")
return errors.New("role missing (one of: tasks, services, nodes)")
default:
return fmt.Errorf("invalid role %s, expected tasks, services, or nodes", c.Role)
}
@ -128,7 +129,7 @@ type Discovery struct {
func NewDiscovery(conf *DockerSwarmSDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) {
m, ok := metrics.(*dockerswarmMetrics)
if !ok {
return nil, fmt.Errorf("invalid discovery metrics type")
return nil, errors.New("invalid discovery metrics type")
}
d := &Discovery{

View file

@ -124,7 +124,7 @@ type Discovery struct {
func NewDiscovery(conf *SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) {
m, ok := metrics.(*nomadMetrics)
if !ok {
return nil, fmt.Errorf("invalid discovery metrics type")
return nil, errors.New("invalid discovery metrics type")
}
d := &Discovery{

View file

@ -145,7 +145,7 @@ type refresher interface {
func NewDiscovery(conf *SDConfig, l *slog.Logger, metrics discovery.DiscovererMetrics) (*refresh.Discovery, error) {
m, ok := metrics.(*openstackMetrics)
if !ok {
return nil, fmt.Errorf("invalid discovery metrics type")
return nil, errors.New("invalid discovery metrics type")
}
r, err := newRefresher(conf, l)

View file

@ -151,7 +151,7 @@ func newRefresher(conf *SDConfig, logger *slog.Logger) (refresher, error) {
func NewDiscovery(conf *SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*refresh.Discovery, error) {
m, ok := metrics.(*ovhcloudMetrics)
if !ok {
return nil, fmt.Errorf("invalid discovery metrics type")
return nil, errors.New("invalid discovery metrics type")
}
r, err := newRefresher(conf, logger)

View file

@ -17,6 +17,7 @@ import (
"bytes"
"context"
"encoding/json"
"errors"
"fmt"
"io"
"log/slog"
@ -109,20 +110,20 @@ func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
return err
}
if c.URL == "" {
return fmt.Errorf("URL is missing")
return errors.New("URL is missing")
}
parsedURL, err := url.Parse(c.URL)
if err != nil {
return err
}
if parsedURL.Scheme != "http" && parsedURL.Scheme != "https" {
return fmt.Errorf("URL scheme must be 'http' or 'https'")
return errors.New("URL scheme must be 'http' or 'https'")
}
if parsedURL.Host == "" {
return fmt.Errorf("host is missing in URL")
return errors.New("host is missing in URL")
}
if c.Query == "" {
return fmt.Errorf("query missing")
return errors.New("query missing")
}
return c.HTTPClientConfig.Validate()
}
@ -142,7 +143,7 @@ type Discovery struct {
func NewDiscovery(conf *SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) {
m, ok := metrics.(*puppetdbMetrics)
if !ok {
return nil, fmt.Errorf("invalid discovery metrics type")
return nil, errors.New("invalid discovery metrics type")
}
if logger == nil {

View file

@ -15,7 +15,7 @@ package refresh
import (
"context"
"fmt"
"errors"
"testing"
"time"
@ -64,7 +64,7 @@ func TestRefresh(t *testing.T) {
case 2:
return tg2, nil
}
return nil, fmt.Errorf("some error")
return nil, errors.New("some error")
}
interval := time.Millisecond

View file

@ -267,7 +267,7 @@ func replaceYAMLTypeError(err error, oldTyp, newTyp reflect.Type) error {
func RegisterSDMetrics(registerer prometheus.Registerer, rmm RefreshMetricsManager) (map[string]DiscovererMetrics, error) {
err := rmm.Register()
if err != nil {
return nil, fmt.Errorf("failed to create service discovery refresh metrics")
return nil, errors.New("failed to create service discovery refresh metrics")
}
metrics := make(map[string]DiscovererMetrics)
@ -275,7 +275,7 @@ func RegisterSDMetrics(registerer prometheus.Registerer, rmm RefreshMetricsManag
currentSdMetrics := conf.NewDiscovererMetrics(registerer, rmm)
err = currentSdMetrics.Register()
if err != nil {
return nil, fmt.Errorf("failed to create service discovery metrics")
return nil, errors.New("failed to create service discovery metrics")
}
metrics[conf.Name()] = currentSdMetrics
}

View file

@ -188,7 +188,7 @@ type Discovery struct{}
func NewDiscovery(conf *SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*refresh.Discovery, error) {
m, ok := metrics.(*scalewayMetrics)
if !ok {
return nil, fmt.Errorf("invalid discovery metrics type")
return nil, errors.New("invalid discovery metrics type")
}
r, err := newRefresher(conf)

View file

@ -149,7 +149,7 @@ type Discovery struct {
func New(logger *slog.Logger, conf *SDConfig, metrics discovery.DiscovererMetrics) (*Discovery, error) {
m, ok := metrics.(*tritonMetrics)
if !ok {
return nil, fmt.Errorf("invalid discovery metrics type")
return nil, errors.New("invalid discovery metrics type")
}
tls, err := config.NewTLSConfig(&conf.TLSConfig)

View file

@ -215,7 +215,7 @@ func getEndpointInfoForSystems(
func NewDiscovery(conf *SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) {
m, ok := metrics.(*uyuniMetrics)
if !ok {
return nil, fmt.Errorf("invalid discovery metrics type")
return nil, errors.New("invalid discovery metrics type")
}
apiURL, err := url.Parse(conf.Server)

View file

@ -15,6 +15,7 @@ package vultr
import (
"context"
"errors"
"fmt"
"log/slog"
"net"
@ -117,7 +118,7 @@ type Discovery struct {
func NewDiscovery(conf *SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) {
m, ok := metrics.(*vultrMetrics)
if !ok {
return nil, fmt.Errorf("invalid discovery metrics type")
return nil, errors.New("invalid discovery metrics type")
}
d := &Discovery{

View file

@ -14,6 +14,7 @@
package xds
import (
"errors"
"fmt"
"log/slog"
"net/url"
@ -161,7 +162,7 @@ func kumaMadsV1ResourceParser(resources []*anypb.Any, typeURL string) ([]model.L
func NewKumaHTTPDiscovery(conf *KumaSDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (discovery.Discoverer, error) {
m, ok := metrics.(*xdsMetrics)
if !ok {
return nil, fmt.Errorf("invalid discovery metrics type")
return nil, errors.New("invalid discovery metrics type")
}
// Default to "prometheus" if hostname is unavailable.

View file

@ -6,9 +6,9 @@ require (
github.com/alecthomas/kingpin/v2 v2.4.0
github.com/gogo/protobuf v1.3.2
github.com/golang/snappy v0.0.4
github.com/influxdata/influxdb v1.11.6
github.com/prometheus/client_golang v1.20.4
github.com/prometheus/common v0.60.0
github.com/influxdata/influxdb v1.11.7
github.com/prometheus/client_golang v1.20.5
github.com/prometheus/common v0.60.1
github.com/prometheus/prometheus v0.53.1
github.com/stretchr/testify v1.9.0
)

View file

@ -166,8 +166,8 @@ github.com/hetznercloud/hcloud-go/v2 v2.9.0 h1:s0N6R7Zoi2DPfMtUF5o9VeUBzTtHVY6MI
github.com/hetznercloud/hcloud-go/v2 v2.9.0/go.mod h1:qtW/TuU7Bs16ibXl/ktJarWqU2LwHr7eGlwoilHxtgg=
github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4=
github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY=
github.com/influxdata/influxdb v1.11.6 h1:zS5MRY+RQ5/XFTer5R8xQRnY17JYSbacvO6OaP164wU=
github.com/influxdata/influxdb v1.11.6/go.mod h1:F10NoQb9qa04lME3pTPWQrYt4JZ/ke1Eei+1ttgHHrg=
github.com/influxdata/influxdb v1.11.7 h1:C31A+S9YfjTCOuAv9Qs0ZdQufslOZZBtejjxiV8QNQw=
github.com/influxdata/influxdb v1.11.7/go.mod h1:zRTAuk/Ie/V1LGxJUv8jfDmfv+ypz22lxfhc1MxC3rI=
github.com/ionos-cloud/sdk-go/v6 v6.1.11 h1:J/uRN4UWO3wCyGOeDdMKv8LWRzKu6UIkLEaes38Kzh8=
github.com/ionos-cloud/sdk-go/v6 v6.1.11/go.mod h1:EzEgRIDxBELvfoa/uBN0kOQaqovLjUWEB7iW4/Q+t4k=
github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg=
@ -253,8 +253,8 @@ github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXP
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
github.com/prometheus/client_golang v1.20.4 h1:Tgh3Yr67PaOv/uTqloMsCEdeuFTatm5zIq5+qNN23vI=
github.com/prometheus/client_golang v1.20.4/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE=
github.com/prometheus/client_golang v1.20.5 h1:cxppBPuYhUnsO6yo/aoRol4L7q7UFfdm+bR9r+8l63Y=
github.com/prometheus/client_golang v1.20.5/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE=
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
@ -264,8 +264,8 @@ github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y8
github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc=
github.com/prometheus/common v0.29.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
github.com/prometheus/common v0.60.0 h1:+V9PAREWNvJMAuJ1x1BaWl9dewMW4YrHZQbx0sJNllA=
github.com/prometheus/common v0.60.0/go.mod h1:h0LYf1R1deLSKtD4Vdg8gy4RuOvENW2J/h19V5NADQw=
github.com/prometheus/common v0.60.1 h1:FUas6GcOw66yB/73KC+BOZoFJmbo/1pojoILArPAaSc=
github.com/prometheus/common v0.60.1/go.mod h1:h0LYf1R1deLSKtD4Vdg8gy4RuOvENW2J/h19V5NADQw=
github.com/prometheus/common/sigv4 v0.1.0 h1:qoVebwtwwEhS85Czm2dSROY5fTo2PAPEVdDeppTwGX4=
github.com/prometheus/common/sigv4 v0.1.0/go.mod h1:2Jkxxk9yYvCkE5G1sQT7GuEXm57JrvHu9k5YwTjsNtI=
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=

View file

@ -14,6 +14,7 @@
package histogram
import (
"errors"
"fmt"
"math"
"strings"
@ -784,16 +785,16 @@ func (h *FloatHistogram) Validate() error {
return fmt.Errorf("custom buckets: %w", err)
}
if h.ZeroCount != 0 {
return fmt.Errorf("custom buckets: must have zero count of 0")
return errors.New("custom buckets: must have zero count of 0")
}
if h.ZeroThreshold != 0 {
return fmt.Errorf("custom buckets: must have zero threshold of 0")
return errors.New("custom buckets: must have zero threshold of 0")
}
if len(h.NegativeSpans) > 0 {
return fmt.Errorf("custom buckets: must not have negative spans")
return errors.New("custom buckets: must not have negative spans")
}
if len(h.NegativeBuckets) > 0 {
return fmt.Errorf("custom buckets: must not have negative buckets")
return errors.New("custom buckets: must not have negative buckets")
}
} else {
if err := checkHistogramSpans(h.PositiveSpans, len(h.PositiveBuckets)); err != nil {
@ -807,7 +808,7 @@ func (h *FloatHistogram) Validate() error {
return fmt.Errorf("negative side: %w", err)
}
if h.CustomValues != nil {
return fmt.Errorf("histogram with exponential schema must not have custom bounds")
return errors.New("histogram with exponential schema must not have custom bounds")
}
}
err := checkHistogramBuckets(h.PositiveBuckets, &pCount, false)
@ -948,10 +949,10 @@ func (h *FloatHistogram) floatBucketIterator(
positive bool, absoluteStartValue float64, targetSchema int32,
) floatBucketIterator {
if h.UsesCustomBuckets() && targetSchema != h.Schema {
panic(fmt.Errorf("cannot merge from custom buckets schema to exponential schema"))
panic(errors.New("cannot merge from custom buckets schema to exponential schema"))
}
if !h.UsesCustomBuckets() && IsCustomBucketsSchema(targetSchema) {
panic(fmt.Errorf("cannot merge from exponential buckets schema to custom schema"))
panic(errors.New("cannot merge from exponential buckets schema to custom schema"))
}
if targetSchema > h.Schema {
panic(fmt.Errorf("cannot merge from schema %d to %d", h.Schema, targetSchema))

View file

@ -14,6 +14,7 @@
package histogram
import (
"errors"
"fmt"
"math"
"slices"
@ -432,16 +433,16 @@ func (h *Histogram) Validate() error {
return fmt.Errorf("custom buckets: %w", err)
}
if h.ZeroCount != 0 {
return fmt.Errorf("custom buckets: must have zero count of 0")
return errors.New("custom buckets: must have zero count of 0")
}
if h.ZeroThreshold != 0 {
return fmt.Errorf("custom buckets: must have zero threshold of 0")
return errors.New("custom buckets: must have zero threshold of 0")
}
if len(h.NegativeSpans) > 0 {
return fmt.Errorf("custom buckets: must not have negative spans")
return errors.New("custom buckets: must not have negative spans")
}
if len(h.NegativeBuckets) > 0 {
return fmt.Errorf("custom buckets: must not have negative buckets")
return errors.New("custom buckets: must not have negative buckets")
}
} else {
if err := checkHistogramSpans(h.PositiveSpans, len(h.PositiveBuckets)); err != nil {
@ -455,7 +456,7 @@ func (h *Histogram) Validate() error {
return fmt.Errorf("negative side: %w", err)
}
if h.CustomValues != nil {
return fmt.Errorf("histogram with exponential schema must not have custom bounds")
return errors.New("histogram with exponential schema must not have custom bounds")
}
}
err := checkHistogramBuckets(h.PositiveBuckets, &pCount, true)

View file

@ -16,6 +16,7 @@ package relabel
import (
"crypto/md5"
"encoding/binary"
"errors"
"fmt"
"strconv"
"strings"
@ -114,10 +115,10 @@ func (c *Config) UnmarshalYAML(unmarshal func(interface{}) error) error {
func (c *Config) Validate() error {
if c.Action == "" {
return fmt.Errorf("relabel action cannot be empty")
return errors.New("relabel action cannot be empty")
}
if c.Modulus == 0 && c.Action == HashMod {
return fmt.Errorf("relabel configuration for hashmod requires non-zero modulus")
return errors.New("relabel configuration for hashmod requires non-zero modulus")
}
if (c.Action == Replace || c.Action == HashMod || c.Action == Lowercase || c.Action == Uppercase || c.Action == KeepEqual || c.Action == DropEqual) && c.TargetLabel == "" {
return fmt.Errorf("relabel configuration for %s action requires 'target_label' value", c.Action)

View file

@ -184,14 +184,14 @@ type RuleNode struct {
func (r *RuleNode) Validate() (nodes []WrappedError) {
if r.Record.Value != "" && r.Alert.Value != "" {
nodes = append(nodes, WrappedError{
err: fmt.Errorf("only one of 'record' and 'alert' must be set"),
err: errors.New("only one of 'record' and 'alert' must be set"),
node: &r.Record,
nodeAlt: &r.Alert,
})
}
if r.Record.Value == "" && r.Alert.Value == "" {
nodes = append(nodes, WrappedError{
err: fmt.Errorf("one of 'record' or 'alert' must be set"),
err: errors.New("one of 'record' or 'alert' must be set"),
node: &r.Record,
nodeAlt: &r.Alert,
})
@ -199,7 +199,7 @@ func (r *RuleNode) Validate() (nodes []WrappedError) {
if r.Expr.Value == "" {
nodes = append(nodes, WrappedError{
err: fmt.Errorf("field 'expr' must be set in rule"),
err: errors.New("field 'expr' must be set in rule"),
node: &r.Expr,
})
} else if _, err := parser.ParseExpr(r.Expr.Value); err != nil {
@ -211,19 +211,19 @@ func (r *RuleNode) Validate() (nodes []WrappedError) {
if r.Record.Value != "" {
if len(r.Annotations) > 0 {
nodes = append(nodes, WrappedError{
err: fmt.Errorf("invalid field 'annotations' in recording rule"),
err: errors.New("invalid field 'annotations' in recording rule"),
node: &r.Record,
})
}
if r.For != 0 {
nodes = append(nodes, WrappedError{
err: fmt.Errorf("invalid field 'for' in recording rule"),
err: errors.New("invalid field 'for' in recording rule"),
node: &r.Record,
})
}
if r.KeepFiringFor != 0 {
nodes = append(nodes, WrappedError{
err: fmt.Errorf("invalid field 'keep_firing_for' in recording rule"),
err: errors.New("invalid field 'keep_firing_for' in recording rule"),
node: &r.Record,
})
}

View file

@ -509,7 +509,7 @@ func yoloString(b []byte) string {
func parseFloat(s string) (float64, error) {
// Keep to pre-Go 1.13 float formats.
if strings.ContainsAny(s, "pP_") {
return 0, fmt.Errorf("unsupported character in float")
return 0, errors.New("unsupported character in float")
}
return strconv.ParseFloat(s, 64)
}

View file

@ -2092,7 +2092,7 @@ func (ev *evaluator) eval(ctx context.Context, expr parser.Expr) (parser.Value,
}
for i := range mat {
if len(mat[i].Floats)+len(mat[i].Histograms) != 1 {
panic(fmt.Errorf("unexpected number of samples"))
panic(errors.New("unexpected number of samples"))
}
for ts := ev.startTimestamp + ev.interval; ts <= ev.endTimestamp; ts += ev.interval {
if len(mat[i].Floats) > 0 {
@ -3671,7 +3671,7 @@ func detectHistogramStatsDecoding(expr parser.Expr) {
if n, ok := node.(*parser.BinaryExpr); ok {
detectHistogramStatsDecoding(n.LHS)
detectHistogramStatsDecoding(n.RHS)
return fmt.Errorf("stop")
return errors.New("stop")
}
n, ok := (node).(*parser.VectorSelector)
@ -3693,8 +3693,8 @@ func detectHistogramStatsDecoding(expr parser.Expr) {
break
}
}
return fmt.Errorf("stop")
}, nil)
return errors.New("stop")
})
}
func makeInt64Pointer(val int64) *int64 {

View file

@ -3956,3 +3956,65 @@ func (s mockSeries) Iterator(it chunkenc.Iterator) chunkenc.Iterator {
}
return storage.ChainSampleIteratorFromIterators(it, iterables)
}
func TestEvaluationWithDelayedNameRemovalDisabled(t *testing.T) {
opts := promql.EngineOpts{
Logger: nil,
Reg: nil,
EnableAtModifier: true,
MaxSamples: 10000,
Timeout: 10 * time.Second,
EnableDelayedNameRemoval: false,
}
engine := promqltest.NewTestEngineWithOpts(t, opts)
promqltest.RunTest(t, `
load 5m
metric{env="1"} 0 60 120
another_metric{env="1"} 60 120 180
# Does not drop __name__ for vector selector
eval instant at 10m metric{env="1"}
metric{env="1"} 120
# Drops __name__ for unary operators
eval instant at 10m -metric
{env="1"} -120
# Drops __name__ for binary operators
eval instant at 10m metric + another_metric
{env="1"} 300
# Does not drop __name__ for binary comparison operators
eval instant at 10m metric <= another_metric
metric{env="1"} 120
# Drops __name__ for binary comparison operators with "bool" modifier
eval instant at 10m metric <= bool another_metric
{env="1"} 1
# Drops __name__ for vector-scalar operations
eval instant at 10m metric * 2
{env="1"} 240
# Drops __name__ for instant-vector functions
eval instant at 10m clamp(metric, 0, 100)
{env="1"} 100
# Drops __name__ for round function
eval instant at 10m round(metric)
{env="1"} 120
# Drops __name__ for range-vector functions
eval instant at 10m rate(metric{env="1"}[10m])
{env="1"} 0.2
# Does not drop __name__ for last_over_time function
eval instant at 10m last_over_time(metric{env="1"}[10m])
metric{env="1"} 120
# Drops name for other _over_time functions
eval instant at 10m max_over_time(metric{env="1"}[10m])
{env="1"} 120
`, engine)
}

View file

@ -538,6 +538,9 @@ func funcRound(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper
continue
}
f := math.Floor(el.F*toNearestInverse+0.5) / toNearestInverse
if !enh.enableDelayedNameRemoval {
el.Metric = el.Metric.DropMetricName()
}
enh.Out = append(enh.Out, Sample{
Metric: el.Metric,
F: f,

View file

@ -90,7 +90,7 @@ func (ev *evaluator) infoSelectHints(expr parser.Expr) storage.SelectHints {
nodeTimestamp = n.Timestamp
}
offset = durationMilliseconds(n.OriginalOffset)
return fmt.Errorf("end traversal")
return errors.New("end traversal")
default:
return nil
}

View file

@ -675,141 +675,141 @@ func (ev *evalCmd) expectMetric(pos int, m labels.Labels, vals ...parser.Sequenc
// compareResult compares the result value with the defined expectation.
func (ev *evalCmd) compareResult(result parser.Value) error {
switch val := result.(type) {
case promql.Matrix:
if ev.ordered {
return fmt.Errorf("expected ordered result, but query returned a matrix")
}
switch val := result.(type) {
case promql.Matrix:
if ev.ordered {
return errors.New("expected ordered result, but query returned a matrix")
}
if ev.expectScalar {
return fmt.Errorf("expected scalar result, but got matrix %s", val.String())
}
if ev.expectScalar {
return fmt.Errorf("expected scalar result, but got matrix %s", val.String())
}
if err := assertMatrixSorted(val); err != nil {
return err
}
if err := assertMatrixSorted(val); err != nil {
return err
}
seen := map[uint64]bool{}
for _, s := range val {
hash := s.Metric.Hash()
if _, ok := ev.metrics[hash]; !ok {
return fmt.Errorf("unexpected metric %s in result, has %s", s.Metric, formatSeriesResult(s))
}
seen[hash] = true
exp := ev.expected[hash]
seen := map[uint64]bool{}
for _, s := range val {
hash := s.Metric.Hash()
if _, ok := ev.metrics[hash]; !ok {
return fmt.Errorf("unexpected metric %s in result, has %s", s.Metric, formatSeriesResult(s))
}
seen[hash] = true
exp := ev.expected[hash]
var expectedFloats []promql.FPoint
var expectedHistograms []promql.HPoint
var expectedFloats []promql.FPoint
var expectedHistograms []promql.HPoint
for i, e := range exp.vals {
ts := ev.start.Add(time.Duration(i) * ev.step)
for i, e := range exp.vals {
ts := ev.start.Add(time.Duration(i) * ev.step)
if ts.After(ev.end) {
return fmt.Errorf("expected %v points for %s, but query time range cannot return this many points", len(exp.vals), ev.metrics[hash])
}
if ts.After(ev.end) {
return fmt.Errorf("expected %v points for %s, but query time range cannot return this many points", len(exp.vals), ev.metrics[hash])
}
t := ts.UnixNano() / int64(time.Millisecond/time.Nanosecond)
t := ts.UnixNano() / int64(time.Millisecond/time.Nanosecond)
if e.Histogram != nil {
expectedHistograms = append(expectedHistograms, promql.HPoint{T: t, H: e.Histogram})
} else if !e.Omitted {
expectedFloats = append(expectedFloats, promql.FPoint{T: t, F: e.Value})
}
}
if e.Histogram != nil {
expectedHistograms = append(expectedHistograms, promql.HPoint{T: t, H: e.Histogram})
} else if !e.Omitted {
expectedFloats = append(expectedFloats, promql.FPoint{T: t, F: e.Value})
}
}
if len(expectedFloats) != len(s.Floats) || len(expectedHistograms) != len(s.Histograms) {
return fmt.Errorf("expected %v float points and %v histogram points for %s, but got %s", len(expectedFloats), len(expectedHistograms), ev.metrics[hash], formatSeriesResult(s))
}
if len(expectedFloats) != len(s.Floats) || len(expectedHistograms) != len(s.Histograms) {
return fmt.Errorf("expected %v float points and %v histogram points for %s, but got %s", len(expectedFloats), len(expectedHistograms), ev.metrics[hash], formatSeriesResult(s))
}
for i, expected := range expectedFloats {
actual := s.Floats[i]
for i, expected := range expectedFloats {
actual := s.Floats[i]
if expected.T != actual.T {
return fmt.Errorf("expected float value at index %v for %s to have timestamp %v, but it had timestamp %v (result has %s)", i, ev.metrics[hash], expected.T, actual.T, formatSeriesResult(s))
}
if expected.T != actual.T {
return fmt.Errorf("expected float value at index %v for %s to have timestamp %v, but it had timestamp %v (result has %s)", i, ev.metrics[hash], expected.T, actual.T, formatSeriesResult(s))
}
if !almost.Equal(actual.F, expected.F, defaultEpsilon) {
return fmt.Errorf("expected float value at index %v (t=%v) for %s to be %v, but got %v (result has %s)", i, actual.T, ev.metrics[hash], expected.F, actual.F, formatSeriesResult(s))
}
}
if !almost.Equal(actual.F, expected.F, defaultEpsilon) {
return fmt.Errorf("expected float value at index %v (t=%v) for %s to be %v, but got %v (result has %s)", i, actual.T, ev.metrics[hash], expected.F, actual.F, formatSeriesResult(s))
}
}
for i, expected := range expectedHistograms {
actual := s.Histograms[i]
for i, expected := range expectedHistograms {
actual := s.Histograms[i]
if expected.T != actual.T {
return fmt.Errorf("expected histogram value at index %v for %s to have timestamp %v, but it had timestamp %v (result has %s)", i, ev.metrics[hash], expected.T, actual.T, formatSeriesResult(s))
}
if expected.T != actual.T {
return fmt.Errorf("expected histogram value at index %v for %s to have timestamp %v, but it had timestamp %v (result has %s)", i, ev.metrics[hash], expected.T, actual.T, formatSeriesResult(s))
}
if !compareNativeHistogram(expected.H.Compact(0), actual.H.Compact(0)) {
return fmt.Errorf("expected histogram value at index %v (t=%v) for %s to be %v, but got %v (result has %s)", i, actual.T, ev.metrics[hash], expected.H.TestExpression(), actual.H.TestExpression(), formatSeriesResult(s))
}
}
}
if !compareNativeHistogram(expected.H.Compact(0), actual.H.Compact(0)) {
return fmt.Errorf("expected histogram value at index %v (t=%v) for %s to be %v, but got %v (result has %s)", i, actual.T, ev.metrics[hash], expected.H.TestExpression(), actual.H.TestExpression(), formatSeriesResult(s))
}
}
}
for hash := range ev.expected {
if !seen[hash] {
return fmt.Errorf("expected metric %s not found", ev.metrics[hash])
}
}
for hash := range ev.expected {
if !seen[hash] {
return fmt.Errorf("expected metric %s not found", ev.metrics[hash])
}
}
case promql.Vector:
if ev.expectScalar {
return fmt.Errorf("expected scalar result, but got vector %s", val.String())
}
case promql.Vector:
if ev.expectScalar {
return fmt.Errorf("expected scalar result, but got vector %s", val.String())
}
seen := map[uint64]bool{}
for pos, v := range val {
fp := v.Metric.Hash()
if _, ok := ev.metrics[fp]; !ok {
if v.H != nil {
return fmt.Errorf("unexpected metric %s in result, has value %v", v.Metric, v.H)
}
seen := map[uint64]bool{}
for pos, v := range val {
fp := v.Metric.Hash()
if _, ok := ev.metrics[fp]; !ok {
if v.H != nil {
return fmt.Errorf("unexpected metric %s in result, has value %v", v.Metric, v.H)
}
return fmt.Errorf("unexpected metric %s in result, has value %v", v.Metric, v.F)
}
exp := ev.expected[fp]
if ev.ordered && exp.pos != pos+1 {
return fmt.Errorf("expected metric %s with %v at position %d but was at %d", v.Metric, exp.vals, exp.pos, pos+1)
}
exp0 := exp.vals[0]
expH := exp0.Histogram
if expH == nil && v.H != nil {
return fmt.Errorf("expected float value %v for %s but got histogram %s", exp0, v.Metric, HistogramTestExpression(v.H))
}
if expH != nil && v.H == nil {
return fmt.Errorf("expected histogram %s for %s but got float value %v", HistogramTestExpression(expH), v.Metric, v.F)
}
if expH != nil && !compareNativeHistogram(expH.Compact(0), v.H.Compact(0)) {
return fmt.Errorf("expected %v for %s but got %s", HistogramTestExpression(expH), v.Metric, HistogramTestExpression(v.H))
}
if !almost.Equal(exp0.Value, v.F, defaultEpsilon) {
return fmt.Errorf("expected %v for %s but got %v", exp0.Value, v.Metric, v.F)
}
return fmt.Errorf("unexpected metric %s in result, has value %v", v.Metric, v.F)
}
exp := ev.expected[fp]
if ev.ordered && exp.pos != pos+1 {
return fmt.Errorf("expected metric %s with %v at position %d but was at %d", v.Metric, exp.vals, exp.pos, pos+1)
}
exp0 := exp.vals[0]
expH := exp0.Histogram
if expH == nil && v.H != nil {
return fmt.Errorf("expected float value %v for %s but got histogram %s", exp0, v.Metric, HistogramTestExpression(v.H))
}
if expH != nil && v.H == nil {
return fmt.Errorf("expected histogram %s for %s but got float value %v", HistogramTestExpression(expH), v.Metric, v.F)
}
if expH != nil && !compareNativeHistogram(expH.Compact(0), v.H.Compact(0)) {
return fmt.Errorf("expected %v for %s but got %s", HistogramTestExpression(expH), v.Metric, HistogramTestExpression(v.H))
}
if !almost.Equal(exp0.Value, v.F, defaultEpsilon) {
return fmt.Errorf("expected %v for %s but got %v", exp0.Value, v.Metric, v.F)
}
seen[fp] = true
}
for fp, expVals := range ev.expected {
if !seen[fp] {
return fmt.Errorf("expected metric %s with %v not found", ev.metrics[fp], expVals)
}
}
seen[fp] = true
}
for fp, expVals := range ev.expected {
if !seen[fp] {
return fmt.Errorf("expected metric %s with %v not found", ev.metrics[fp], expVals)
}
}
case promql.Scalar:
if !ev.expectScalar {
return fmt.Errorf("expected vector or matrix result, but got %s", val.String())
}
exp0 := ev.expected[0].vals[0]
if exp0.Histogram != nil {
return fmt.Errorf("expected histogram %v but got %s", exp0.Histogram.TestExpression(), val.String())
}
if !almost.Equal(exp0.Value, val.V, defaultEpsilon) {
return fmt.Errorf("expected scalar %v but got %v", exp0.Value, val.V)
}
case promql.Scalar:
if !ev.expectScalar {
return fmt.Errorf("expected vector or matrix result, but got %s", val.String())
}
exp0 := ev.expected[0].vals[0]
if exp0.Histogram != nil {
return fmt.Errorf("expected histogram %v but got %s", exp0.Histogram.TestExpression(), val.String())
}
if !almost.Equal(exp0.Value, val.V, defaultEpsilon) {
return fmt.Errorf("expected scalar %v but got %v", exp0.Value, val.V)
}
default:
panic(fmt.Errorf("promql.Test.compareResult: unexpected result type %T", result))
}
return nil
default:
panic(fmt.Errorf("promql.Test.compareResult: unexpected result type %T", result))
}
return nil
}
// compareNativeHistogram is helper function to compare two native histograms

View file

@ -31,6 +31,10 @@ eval instant at 10m metric * 2
eval instant at 10m clamp(metric, 0, 100)
{env="1"} 100
# Drops __name__ for round function
eval instant at 10m round(metric)
{env="1"} 120
# Drops __name__ for range-vector functions
eval instant at 10m rate(metric{env="1"}[10m])
{env="1"} 0.2

View file

@ -15,6 +15,7 @@ package rules
import (
"context"
"errors"
"fmt"
"log/slog"
"net/url"
@ -403,7 +404,7 @@ func (r *AlertingRule) Eval(ctx context.Context, queryOffset time.Duration, ts t
resultFPs[h] = struct{}{}
if _, ok := alerts[h]; ok {
return nil, fmt.Errorf("vector contains metrics with the same labelset after applying alert labels")
return nil, errors.New("vector contains metrics with the same labelset after applying alert labels")
}
alerts[h] = &Alert{

View file

@ -15,6 +15,7 @@ package rules
import (
"context"
"errors"
"fmt"
"net/url"
"time"
@ -103,7 +104,7 @@ func (rule *RecordingRule) Eval(ctx context.Context, queryOffset time.Duration,
// Check that the rule does not produce identical metrics after applying
// labels.
if vector.ContainsSameLabelset() {
return nil, fmt.Errorf("vector contains metrics with the same labelset after applying rule labels")
return nil, errors.New("vector contains metrics with the same labelset after applying rule labels")
}
numSeries := len(vector)

View file

@ -94,8 +94,6 @@ type Options struct {
skipOffsetting bool
}
const DefaultNameEscapingScheme = model.ValueEncodingEscaping
// Manager maintains a set of scrape pools and manages start/stop cycles
// when receiving new target groups from the discovery manager.
type Manager struct {

View file

@ -16,6 +16,7 @@ package scrape
import (
"bytes"
"context"
"errors"
"fmt"
"net/http"
"net/http/httptest"
@ -898,7 +899,7 @@ func doOneScrape(t *testing.T, manager *Manager, appender *collectResultAppender
if len(appender.resultFloats) > 0 {
return nil
}
return fmt.Errorf("expected some float samples, got none")
return errors.New("expected some float samples, got none")
}), "after 1 minute")
manager.Stop()
}
@ -1061,7 +1062,7 @@ func TestManagerCTZeroIngestionHistogram(t *testing.T) {
if len(app.resultHistograms) > 0 {
return nil
}
return fmt.Errorf("expected some histogram samples, got none")
return errors.New("expected some histogram samples, got none")
}), "after 1 minute")
scrapeManager.Stop()

View file

@ -1421,7 +1421,7 @@ func (sl *scrapeLoop) scrapeAndReport(last, appendTime time.Time, errc chan<- er
sl.l.Debug("Scrape failed", "err", scrapeErr)
sl.scrapeFailureLoggerMtx.RLock()
if sl.scrapeFailureLogger != nil {
sl.scrapeFailureLogger.Error("err", scrapeErr)
sl.scrapeFailureLogger.Error(scrapeErr.Error())
}
sl.scrapeFailureLoggerMtx.RUnlock()
if errc != nil {

View file

@ -1010,7 +1010,7 @@ func TestScrapeLoopForcedErr(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
sl := newBasicScrapeLoop(t, ctx, scraper, app, time.Second)
forcedErr := fmt.Errorf("forced err")
forcedErr := errors.New("forced err")
sl.setForcedError(forcedErr)
scraper.scrapeFunc = func(context.Context, io.Writer) error {
@ -1464,7 +1464,7 @@ func TestScrapeLoopCache(t *testing.T) {
case 4:
cancel()
}
return fmt.Errorf("scrape failed")
return errors.New("scrape failed")
}
go func() {
@ -3264,7 +3264,7 @@ func TestScrapeReportSingleAppender(t *testing.T) {
scraper.scrapeFunc = func(ctx context.Context, w io.Writer) error {
numScrapes++
if numScrapes%4 == 0 {
return fmt.Errorf("scrape failed")
return errors.New("scrape failed")
}
w.Write([]byte("metric_a 44\nmetric_b 44\nmetric_c 44\nmetric_d 44\n"))
return nil

View file

@ -26,14 +26,14 @@ jobs:
- name: Checkout repository
uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0
- name: Install Go
uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 # v5.0.2
uses: actions/setup-go@41dfa10bad2bb2ae585af6ee5bb4d7d973ad74ed # v5.1.0
with:
go-version: 1.23.x
- name: Install snmp_exporter/generator dependencies
run: sudo apt-get update && sudo apt-get -y install libsnmp-dev
if: github.repository == 'prometheus/snmp_exporter'
- name: Lint
uses: golangci/golangci-lint-action@aaa42aa0628b4ae2578232a66b541047968fac86 # v6.1.0
uses: golangci/golangci-lint-action@971e284b6050e8a5849b72094c50ab08da042db8 # v6.1.1
with:
args: --verbose
version: v1.60.2

View file

@ -41,17 +41,17 @@ var (
ErrOutOfOrderExemplar = errors.New("out of order exemplar")
ErrDuplicateExemplar = errors.New("duplicate exemplar")
ErrExemplarLabelLength = fmt.Errorf("label length for exemplar exceeds maximum of %d UTF-8 characters", exemplar.ExemplarMaxLabelSetLength)
ErrExemplarsDisabled = fmt.Errorf("exemplar storage is disabled or max exemplars is less than or equal to 0")
ErrNativeHistogramsDisabled = fmt.Errorf("native histograms are disabled")
ErrOOONativeHistogramsDisabled = fmt.Errorf("out-of-order native histogram ingestion is disabled")
ErrExemplarsDisabled = errors.New("exemplar storage is disabled or max exemplars is less than or equal to 0")
ErrNativeHistogramsDisabled = errors.New("native histograms are disabled")
ErrOOONativeHistogramsDisabled = errors.New("out-of-order native histogram ingestion is disabled")
// ErrOutOfOrderCT indicates failed append of CT to the storage
// due to CT being older the then newer sample.
// NOTE(bwplotka): This can be both an instrumentation failure or commonly expected
// behaviour, and we currently don't have a way to determine this. As a result
// it's recommended to ignore this error for now.
ErrOutOfOrderCT = fmt.Errorf("created timestamp out of order, ignoring")
ErrCTNewerThanSample = fmt.Errorf("CT is newer or the same as sample's timestamp, ignoring")
ErrOutOfOrderCT = errors.New("created timestamp out of order, ignoring")
ErrCTNewerThanSample = errors.New("CT is newer or the same as sample's timestamp, ignoring")
)
// SeriesRef is a generic series reference. In prometheus it is either a

View file

@ -16,7 +16,6 @@ package azuread
import (
"context"
"errors"
"fmt"
"net/http"
"strings"
"sync"
@ -110,55 +109,55 @@ func (c *AzureADConfig) Validate() error {
}
if c.Cloud != AzureChina && c.Cloud != AzureGovernment && c.Cloud != AzurePublic {
return fmt.Errorf("must provide a cloud in the Azure AD config")
return errors.New("must provide a cloud in the Azure AD config")
}
if c.ManagedIdentity == nil && c.OAuth == nil && c.SDK == nil {
return fmt.Errorf("must provide an Azure Managed Identity, Azure OAuth or Azure SDK in the Azure AD config")
return errors.New("must provide an Azure Managed Identity, Azure OAuth or Azure SDK in the Azure AD config")
}
if c.ManagedIdentity != nil && c.OAuth != nil {
return fmt.Errorf("cannot provide both Azure Managed Identity and Azure OAuth in the Azure AD config")
return errors.New("cannot provide both Azure Managed Identity and Azure OAuth in the Azure AD config")
}
if c.ManagedIdentity != nil && c.SDK != nil {
return fmt.Errorf("cannot provide both Azure Managed Identity and Azure SDK in the Azure AD config")
return errors.New("cannot provide both Azure Managed Identity and Azure SDK in the Azure AD config")
}
if c.OAuth != nil && c.SDK != nil {
return fmt.Errorf("cannot provide both Azure OAuth and Azure SDK in the Azure AD config")
return errors.New("cannot provide both Azure OAuth and Azure SDK in the Azure AD config")
}
if c.ManagedIdentity != nil {
if c.ManagedIdentity.ClientID == "" {
return fmt.Errorf("must provide an Azure Managed Identity client_id in the Azure AD config")
return errors.New("must provide an Azure Managed Identity client_id in the Azure AD config")
}
_, err := uuid.Parse(c.ManagedIdentity.ClientID)
if err != nil {
return fmt.Errorf("the provided Azure Managed Identity client_id is invalid")
return errors.New("the provided Azure Managed Identity client_id is invalid")
}
}
if c.OAuth != nil {
if c.OAuth.ClientID == "" {
return fmt.Errorf("must provide an Azure OAuth client_id in the Azure AD config")
return errors.New("must provide an Azure OAuth client_id in the Azure AD config")
}
if c.OAuth.ClientSecret == "" {
return fmt.Errorf("must provide an Azure OAuth client_secret in the Azure AD config")
return errors.New("must provide an Azure OAuth client_secret in the Azure AD config")
}
if c.OAuth.TenantID == "" {
return fmt.Errorf("must provide an Azure OAuth tenant_id in the Azure AD config")
return errors.New("must provide an Azure OAuth tenant_id in the Azure AD config")
}
var err error
_, err = uuid.Parse(c.OAuth.ClientID)
if err != nil {
return fmt.Errorf("the provided Azure OAuth client_id is invalid")
return errors.New("the provided Azure OAuth client_id is invalid")
}
_, err = regexp.MatchString("^[0-9a-zA-Z-.]+$", c.OAuth.TenantID)
if err != nil {
return fmt.Errorf("the provided Azure OAuth tenant_id is invalid")
return errors.New("the provided Azure OAuth tenant_id is invalid")
}
}
@ -168,7 +167,7 @@ func (c *AzureADConfig) Validate() error {
if c.SDK.TenantID != "" {
_, err = regexp.MatchString("^[0-9a-zA-Z-.]+$", c.SDK.TenantID)
if err != nil {
return fmt.Errorf("the provided Azure OAuth tenant_id is invalid")
return errors.New("the provided Azure OAuth tenant_id is invalid")
}
}
}

View file

@ -763,7 +763,7 @@ func TestDisableReshardOnRetry(t *testing.T) {
onStoreCalled()
return WriteResponseStats{}, RecoverableError{
error: fmt.Errorf("fake error"),
error: errors.New("fake error"),
retryAfter: model.Duration(retryAfter),
}
},

View file

@ -672,7 +672,7 @@ func TestCommitErr_V1Message(t *testing.T) {
req, err := http.NewRequest("", "", bytes.NewReader(payload))
require.NoError(t, err)
appendable := &mockAppendable{commitErr: fmt.Errorf("commit error")}
appendable := &mockAppendable{commitErr: errors.New("commit error")}
handler := NewWriteHandler(promslog.NewNopLogger(), nil, appendable, []config.RemoteWriteProtoMsg{config.RemoteWriteProtoMsgV1})
recorder := httptest.NewRecorder()
@ -696,7 +696,7 @@ func TestCommitErr_V2Message(t *testing.T) {
req.Header.Set("Content-Encoding", string(SnappyBlockCompression))
req.Header.Set(RemoteWriteVersionHeader, RemoteWriteVersion20HeaderValue)
appendable := &mockAppendable{commitErr: fmt.Errorf("commit error")}
appendable := &mockAppendable{commitErr: errors.New("commit error")}
handler := NewWriteHandler(promslog.NewNopLogger(), nil, appendable, []config.RemoteWriteProtoMsg{config.RemoteWriteProtoMsgV2})
recorder := httptest.NewRecorder()

View file

@ -132,7 +132,7 @@ func TestPool(t *testing.T) {
{
name: "invalid encoding",
encoding: EncNone,
expErr: fmt.Errorf(`invalid chunk encoding "none"`),
expErr: errors.New(`invalid chunk encoding "none"`),
},
} {
t.Run(tc.name, func(t *testing.T) {

View file

@ -15,6 +15,7 @@ package chunkenc
import (
"encoding/binary"
"errors"
"fmt"
"math"
@ -761,9 +762,9 @@ func (a *FloatHistogramAppender) AppendFloatHistogram(prev *FloatHistogramAppend
if !okToAppend || counterReset {
if appendOnly {
if counterReset {
return nil, false, a, fmt.Errorf("float histogram counter reset")
return nil, false, a, errors.New("float histogram counter reset")
}
return nil, false, a, fmt.Errorf("float histogram schema change")
return nil, false, a, errors.New("float histogram schema change")
}
newChunk := NewFloatHistogramChunk()
app, err := newChunk.Appender()
@ -812,7 +813,7 @@ func (a *FloatHistogramAppender) AppendFloatHistogram(prev *FloatHistogramAppend
pForwardInserts, nForwardInserts, pBackwardInserts, nBackwardInserts, pMergedSpans, nMergedSpans, okToAppend := a.appendableGauge(h)
if !okToAppend {
if appendOnly {
return nil, false, a, fmt.Errorf("float gauge histogram schema change")
return nil, false, a, errors.New("float gauge histogram schema change")
}
newChunk := NewFloatHistogramChunk()
app, err := newChunk.Appender()

View file

@ -15,6 +15,7 @@ package chunkenc
import (
"encoding/binary"
"errors"
"fmt"
"math"
@ -795,9 +796,9 @@ func (a *HistogramAppender) AppendHistogram(prev *HistogramAppender, t int64, h
if !okToAppend || counterReset {
if appendOnly {
if counterReset {
return nil, false, a, fmt.Errorf("histogram counter reset")
return nil, false, a, errors.New("histogram counter reset")
}
return nil, false, a, fmt.Errorf("histogram schema change")
return nil, false, a, errors.New("histogram schema change")
}
newChunk := NewHistogramChunk()
app, err := newChunk.Appender()
@ -846,7 +847,7 @@ func (a *HistogramAppender) AppendHistogram(prev *HistogramAppender, t int64, h
pForwardInserts, nForwardInserts, pBackwardInserts, nBackwardInserts, pMergedSpans, nMergedSpans, okToAppend := a.appendableGauge(h)
if !okToAppend {
if appendOnly {
return nil, false, a, fmt.Errorf("gauge histogram schema change")
return nil, false, a, errors.New("gauge histogram schema change")
}
newChunk := NewHistogramChunk()
app, err := newChunk.Appender()

View file

@ -16,6 +16,7 @@ package chunks
import (
"bufio"
"encoding/binary"
"errors"
"fmt"
"hash"
"hash/crc32"
@ -172,7 +173,7 @@ func ChunkFromSamplesGeneric(s Samples) (Meta, error) {
return emptyChunk, err
}
if newChunk != nil {
return emptyChunk, fmt.Errorf("did not expect to start a second chunk")
return emptyChunk, errors.New("did not expect to start a second chunk")
}
case chunkenc.ValFloatHistogram:
newChunk, _, ca, err = ca.AppendFloatHistogram(nil, s.Get(i).T(), s.Get(i).FH(), false)
@ -180,7 +181,7 @@ func ChunkFromSamplesGeneric(s Samples) (Meta, error) {
return emptyChunk, err
}
if newChunk != nil {
return emptyChunk, fmt.Errorf("did not expect to start a second chunk")
return emptyChunk, errors.New("did not expect to start a second chunk")
}
default:
panic(fmt.Sprintf("unknown sample type %s", sampleType.String()))
@ -250,7 +251,7 @@ func (cm *Meta) OverlapsClosedInterval(mint, maxt int64) bool {
return cm.MinTime <= maxt && mint <= cm.MaxTime
}
var errInvalidSize = fmt.Errorf("invalid size")
var errInvalidSize = errors.New("invalid size")
var castagnoliTable *crc32.Table

View file

@ -184,7 +184,7 @@ func NewLeveledCompactor(ctx context.Context, r prometheus.Registerer, l *slog.L
func NewLeveledCompactorWithOptions(ctx context.Context, r prometheus.Registerer, l *slog.Logger, ranges []int64, pool chunkenc.Pool, opts LeveledCompactorOptions) (*LeveledCompactor, error) {
if len(ranges) == 0 {
return nil, fmt.Errorf("at least one range must be provided")
return nil, errors.New("at least one range must be provided")
}
if pool == nil {
pool = chunkenc.NewPool()

View file

@ -2004,10 +2004,10 @@ func (db *DB) ForceHeadMMap() {
// will create a new block containing all data that's currently in the memory buffer/WAL.
func (db *DB) Snapshot(dir string, withHead bool) error {
if dir == db.dir {
return fmt.Errorf("cannot snapshot into base directory")
return errors.New("cannot snapshot into base directory")
}
if _, err := ulid.ParseStrict(dir); err == nil {
return fmt.Errorf("dir must not be a valid ULID")
return errors.New("dir must not be a valid ULID")
}
db.cmtx.Lock()

View file

@ -18,6 +18,7 @@ import (
"bytes"
"context"
"encoding/binary"
"errors"
"flag"
"fmt"
"hash/crc32"
@ -1432,7 +1433,7 @@ func (*mockCompactorFailing) Plan(string) ([]string, error) {
func (c *mockCompactorFailing) Write(dest string, _ BlockReader, _, _ int64, _ *BlockMeta) ([]ulid.ULID, error) {
if len(c.blocks) >= c.max {
return []ulid.ULID{}, fmt.Errorf("the compactor already did the maximum allowed blocks so it is time to fail")
return []ulid.ULID{}, errors.New("the compactor already did the maximum allowed blocks so it is time to fail")
}
block, err := OpenBlock(nil, createBlock(c.t, dest, genSeries(1, 1, 0, 1)), nil)
@ -1459,7 +1460,7 @@ func (*mockCompactorFailing) Compact(string, []string, []*Block) ([]ulid.ULID, e
}
func (*mockCompactorFailing) CompactOOO(string, *OOOCompactionHead) (result []ulid.ULID, err error) {
return nil, fmt.Errorf("mock compaction failing CompactOOO")
return nil, errors.New("mock compaction failing CompactOOO")
}
func TestTimeRetention(t *testing.T) {

View file

@ -356,21 +356,21 @@ func (a *headAppender) Append(ref storage.SeriesRef, lset labels.Labels, t int64
}
}
s.Lock()
if value.IsStaleNaN(v) {
// This is not thread safe as we should be holding the lock for "s".
// TODO(krajorama): reorganize Commit() to handle samples in append order
// not floats first and then histograms. Then we could do this conversion
// in commit. This code should move into Commit().
switch {
case s.lastHistogramValue != nil:
s.Unlock()
return a.AppendHistogram(ref, lset, t, &histogram.Histogram{Sum: v}, nil)
case s.lastFloatHistogramValue != nil:
s.Unlock()
return a.AppendHistogram(ref, lset, t, nil, &histogram.FloatHistogram{Sum: v})
}
}
s.Lock()
defer s.Unlock()
// TODO(codesome): If we definitely know at this point that the sample is ooo, then optimise
// to skip that sample from the WAL and write only in the WBL.
@ -1517,7 +1517,7 @@ type chunkOpts struct {
// append adds the sample (t, v) to the series. The caller also has to provide
// the appendID for isolation. (The appendID can be zero, which results in no
// isolation for this append.)
// It is unsafe to call this concurrently with s.iterator(...) without holding the series lock.
// Series lock must be held when calling.
func (s *memSeries) append(t int64, v float64, appendID uint64, o chunkOpts) (sampleInOrder, chunkCreated bool) {
c, sampleInOrder, chunkCreated := s.appendPreprocessor(t, chunkenc.EncXOR, o)
if !sampleInOrder {

View file

@ -24,6 +24,7 @@ import (
"sort"
"strings"
"sync"
"time"
"github.com/bboreham/go-loser"
@ -312,8 +313,30 @@ func (p *MemPostings) Delete(deleted map[storage.SeriesRef]struct{}, affected ma
}
}
i := 0
for l := range affected {
i++
process(l)
// From time to time we want some readers to go through and read their postings.
// It takes around 50ms to process a 1K series batch, and 120ms to process a 10K series batch (local benchmarks on an M3).
// Note that a read query will most likely want to read multiple postings lists, say 5, 10 or 20 (depending on the number of matchers)
// And that read query will most likely evaluate only one of those matchers before we unpause here, so we want to pause often.
if i%512 == 0 {
p.mtx.Unlock()
// While it's tempting to just do a `time.Sleep(time.Millisecond)` here,
// it wouldn't ensure use that readers actually were able to get the read lock,
// because if there are writes waiting on same mutex, readers won't be able to get it.
// So we just grab one RLock ourselves.
p.mtx.RLock()
// We shouldn't wait here, because we would be blocking a potential write for no reason.
// Note that if there's a writer waiting for us to unlock, no reader will be able to get the read lock.
p.mtx.RUnlock() //nolint:staticcheck // SA2001: this is an intentionally empty critical section.
// Now we can wait a little bit just to increase the chance of a reader getting the lock.
// If we were deleting 100M series here, pausing every 512 with 1ms sleeps would be an extra of 200s, which is negligible.
time.Sleep(time.Millisecond)
p.mtx.Lock()
}
}
process(allPostingsKey)
}

View file

@ -509,7 +509,7 @@ func testOOOHeadChunkReader_Chunk(t *testing.T, scenario sampleTypeScenario) {
Ref: 0x1800000, Chunk: chunkenc.Chunk(nil), MinTime: 100, MaxTime: 300,
})
require.Nil(t, iterable)
require.Equal(t, err, fmt.Errorf("not found"))
require.EqualError(t, err, "not found")
require.Nil(t, c)
})

View file

@ -27,7 +27,6 @@ import (
const testMaxSize int = 32
// Formulas chosen to make testing easy.
// Formulas chosen to make testing easy.
func valEven(pos int) int64 { return int64(pos*2 + 2) } // s[0]=2, s[1]=4, s[2]=6, ..., s[31]=64 - Predictable pre-existing values
func valOdd(pos int) int64 { return int64(pos*2 + 1) } // s[0]=1, s[1]=3, s[2]=5, ..., s[31]=63 - New values will interject at chosen position because they sort before the pre-existing vals.

View file

@ -3324,7 +3324,7 @@ func (m mockMatcherIndex) LabelNames(context.Context, ...*labels.Matcher) ([]str
}
func (m mockMatcherIndex) PostingsForLabelMatching(context.Context, string, func(string) bool) index.Postings {
return index.ErrPostings(fmt.Errorf("PostingsForLabelMatching called"))
return index.ErrPostings(errors.New("PostingsForLabelMatching called"))
}
func TestPostingsForMatcher(t *testing.T) {

View file

@ -15,6 +15,7 @@ package tsdb
import (
"context"
"errors"
"fmt"
"log/slog"
"path/filepath"
@ -23,7 +24,7 @@ import (
"github.com/prometheus/prometheus/tsdb/chunkenc"
)
var ErrInvalidTimes = fmt.Errorf("max time is lesser than min time")
var ErrInvalidTimes = errors.New("max time is lesser than min time")
// CreateBlock creates a chunkrange block from the samples passed to it, and writes it to disk.
func CreateBlock(series []storage.Series, dir string, chunkRange int64, logger *slog.Logger) (string, error) {

View file

@ -1606,7 +1606,7 @@ func parseListRulesPaginationRequest(r *http.Request) (int64, string, *apiFuncRe
nextToken := r.URL.Query().Get("group_next_token")
if nextToken != "" && maxGroups == "" {
errResult := invalidParamError(fmt.Errorf("group_limit needs to be present in order to paginate over the groups"), "group_next_token")
errResult := invalidParamError(errors.New("group_limit needs to be present in order to paginate over the groups"), "group_next_token")
return -1, "", &errResult
}
@ -1617,7 +1617,7 @@ func parseListRulesPaginationRequest(r *http.Request) (int64, string, *apiFuncRe
return -1, "", &errResult
}
if parsedMaxGroups <= 0 {
errResult := invalidParamError(fmt.Errorf("group_limit needs to be greater than 0"), "group_limit")
errResult := invalidParamError(errors.New("group_limit needs to be greater than 0"), "group_limit")
return -1, "", &errResult
}
}

View file

@ -615,7 +615,7 @@ func TestGetSeries(t *testing.T) {
matchers: []string{`{foo="boo"}`, `{foo="baz"}`},
expectedErrorType: errorExec,
api: &API{
Queryable: errorTestQueryable{err: fmt.Errorf("generic")},
Queryable: errorTestQueryable{err: errors.New("generic")},
},
},
{
@ -623,7 +623,7 @@ func TestGetSeries(t *testing.T) {
matchers: []string{`{foo="boo"}`, `{foo="baz"}`},
expectedErrorType: errorInternal,
api: &API{
Queryable: errorTestQueryable{err: promql.ErrStorage{Err: fmt.Errorf("generic")}},
Queryable: errorTestQueryable{err: promql.ErrStorage{Err: errors.New("generic")}},
},
},
} {
@ -717,7 +717,7 @@ func TestQueryExemplars(t *testing.T) {
name: "should return errorExec upon genetic error",
expectedErrorType: errorExec,
api: &API{
ExemplarQueryable: errorTestQueryable{err: fmt.Errorf("generic")},
ExemplarQueryable: errorTestQueryable{err: errors.New("generic")},
},
query: url.Values{
"query": []string{`test_metric3{foo="boo"} - test_metric4{foo="bar"}`},
@ -729,7 +729,7 @@ func TestQueryExemplars(t *testing.T) {
name: "should return errorInternal err type is ErrStorage",
expectedErrorType: errorInternal,
api: &API{
ExemplarQueryable: errorTestQueryable{err: promql.ErrStorage{Err: fmt.Errorf("generic")}},
ExemplarQueryable: errorTestQueryable{err: promql.ErrStorage{Err: errors.New("generic")}},
},
query: url.Values{
"query": []string{`test_metric3{foo="boo"} - test_metric4{foo="bar"}`},
@ -838,7 +838,7 @@ func TestLabelNames(t *testing.T) {
matchers: []string{`{foo="boo"}`, `{foo="baz"}`},
expectedErrorType: errorExec,
api: &API{
Queryable: errorTestQueryable{err: fmt.Errorf("generic")},
Queryable: errorTestQueryable{err: errors.New("generic")},
},
},
{
@ -846,7 +846,7 @@ func TestLabelNames(t *testing.T) {
matchers: []string{`{foo="boo"}`, `{foo="baz"}`},
expectedErrorType: errorInternal,
api: &API{
Queryable: errorTestQueryable{err: promql.ErrStorage{Err: fmt.Errorf("generic")}},
Queryable: errorTestQueryable{err: promql.ErrStorage{Err: errors.New("generic")}},
},
},
} {

View file

@ -157,6 +157,20 @@ describe("serializeNode and formatNode", () => {
},
output: "metric_name[5m] @ start() offset -10m",
},
{
node: {
type: nodeType.vectorSelector,
name: "", // Test formatting a selector with an empty metric name.
matchers: [
{ type: matchType.equal, name: "label1", value: "value1" },
],
offset: 0,
timestamp: null,
startOrEnd: null,
},
output:
'{label1="value1"}',
},
// Aggregations.
{

View file

@ -271,7 +271,7 @@ const metricNameRe = /^[a-zA-Z_:][a-zA-Z0-9_:]*$/;
const labelNameCharsetRe = /^[a-zA-Z_][a-zA-Z0-9_]*$/;
export const metricContainsExtendedCharset = (str: string) => {
return !metricNameRe.test(str);
return str !== "" && !metricNameRe.test(str);
};
export const labelNameContainsExtendedCharset = (str: string) => {