Merge remote-tracking branch 'prometheus/main' into arve/upgrade-exp

Signed-off-by: Arve Knudsen <arve.knudsen@gmail.com>
This commit is contained in:
Arve Knudsen 2023-10-06 15:44:17 +02:00
commit 35ab75918a
62 changed files with 1388 additions and 793 deletions

View file

@ -13,6 +13,7 @@ output:
linters:
enable:
- depguard
- errorlint
- gocritic
- gofumpt
- goimports
@ -31,6 +32,24 @@ issues:
- path: _test.go
linters:
- errcheck
- path: discovery/
linters:
- errorlint
- path: scrape/
linters:
- errorlint
- path: storage/
linters:
- errorlint
- path: tsdb/
linters:
- errorlint
- path: util/
linters:
- errorlint
- path: web/
linters:
- errorlint
linters-settings:
depguard:

View file

@ -1,5 +1,9 @@
# Changelog
## 2.47.1 / 2023-10-04
* [BUGFIX] Fix duplicate sample detection at chunk size limit #12874
## 2.47.0 / 2023-09-06
This release adds an experimental OpenTelemetry (OTLP) Ingestion feature,

View file

@ -14,6 +14,7 @@ examples and guides.</p>
[![CII Best Practices](https://bestpractices.coreinfrastructure.org/projects/486/badge)](https://bestpractices.coreinfrastructure.org/projects/486)
[![Gitpod ready-to-code](https://img.shields.io/badge/Gitpod-ready--to--code-blue?logo=gitpod)](https://gitpod.io/#https://github.com/prometheus/prometheus)
[![Fuzzing Status](https://oss-fuzz-build-logs.storage.googleapis.com/badges/prometheus.svg)](https://bugs.chromium.org/p/oss-fuzz/issues/list?sort=-opened&can=1&q=proj:prometheus)
[![OpenSSF Scorecard](https://api.securityscorecards.dev/projects/github.com/prometheus/prometheus/badge)](https://api.securityscorecards.dev/projects/github.com/prometheus/prometheus)
</div>

View file

@ -1 +1 @@
2.47.0
2.47.1

View file

@ -498,10 +498,9 @@ func TestDocumentation(t *testing.T) {
cmd.Stdout = &stdout
if err := cmd.Run(); err != nil {
if exitError, ok := err.(*exec.ExitError); ok {
if exitError.ExitCode() != 0 {
fmt.Println("Command failed with non-zero exit code")
}
var exitError *exec.ExitError
if errors.As(err, &exitError) && exitError.ExitCode() != 0 {
fmt.Println("Command failed with non-zero exit code")
}
}

View file

@ -733,30 +733,7 @@ func CheckRules(ls lintConfig, files ...string) int {
failed := false
hasErrors := false
if len(files) == 0 {
fmt.Println("Checking standard input")
data, err := io.ReadAll(os.Stdin)
if err != nil {
fmt.Fprintln(os.Stderr, " FAILED:", err)
return failureExitCode
}
rgs, errs := rulefmt.Parse(data)
for _, e := range errs {
fmt.Fprintln(os.Stderr, e.Error())
return failureExitCode
}
if n, errs := checkRuleGroups(rgs, ls); errs != nil {
fmt.Fprintln(os.Stderr, " FAILED:")
for _, e := range errs {
fmt.Fprintln(os.Stderr, e.Error())
}
failed = true
for _, err := range errs {
hasErrors = hasErrors || !errors.Is(err, lintError)
}
} else {
fmt.Printf(" SUCCESS: %d rules found\n", n)
}
fmt.Println()
failed, hasErrors = checkRulesFromStdin(ls)
} else {
failed, hasErrors = checkRules(files, ls)
}
@ -771,6 +748,44 @@ func CheckRules(ls lintConfig, files ...string) int {
return successExitCode
}
// checkRulesFromStdin validates rule from stdin.
func checkRulesFromStdin(ls lintConfig) (bool, bool) {
failed := false
hasErrors := false
fmt.Println("Checking standard input")
data, err := io.ReadAll(os.Stdin)
if err != nil {
fmt.Fprintln(os.Stderr, " FAILED:", err)
return true, true
}
rgs, errs := rulefmt.Parse(data)
if errs != nil {
failed = true
fmt.Fprintln(os.Stderr, " FAILED:")
for _, e := range errs {
fmt.Fprintln(os.Stderr, e.Error())
hasErrors = hasErrors || !errors.Is(e, lintError)
}
if hasErrors {
return failed, hasErrors
}
}
if n, errs := checkRuleGroups(rgs, ls); errs != nil {
fmt.Fprintln(os.Stderr, " FAILED:")
for _, e := range errs {
fmt.Fprintln(os.Stderr, e.Error())
}
failed = true
for _, err := range errs {
hasErrors = hasErrors || !errors.Is(err, lintError)
}
} else {
fmt.Printf(" SUCCESS: %d rules found\n", n)
}
fmt.Println()
return failed, hasErrors
}
// checkRules validates rule files.
func checkRules(files []string, ls lintConfig) (bool, bool) {
failed := false
@ -780,7 +795,14 @@ func checkRules(files []string, ls lintConfig) (bool, bool) {
rgs, errs := rulefmt.ParseFile(f)
if errs != nil {
failed = true
continue
fmt.Fprintln(os.Stderr, " FAILED:")
for _, e := range errs {
fmt.Fprintln(os.Stderr, e.Error())
hasErrors = hasErrors || !errors.Is(e, lintError)
}
if hasErrors {
continue
}
}
if n, errs := checkRuleGroups(rgs, ls); errs != nil {
fmt.Fprintln(os.Stderr, " FAILED:")

View file

@ -450,10 +450,9 @@ func TestDocumentation(t *testing.T) {
cmd.Stdout = &stdout
if err := cmd.Run(); err != nil {
if exitError, ok := err.(*exec.ExitError); ok {
if exitError.ExitCode() != 0 {
fmt.Println("Command failed with non-zero exit code")
}
var exitError *exec.ExitError
if errors.As(err, &exitError) && exitError.ExitCode() != 0 {
fmt.Println("Command failed with non-zero exit code")
}
}
@ -464,3 +463,88 @@ func TestDocumentation(t *testing.T) {
require.Equal(t, string(expectedContent), generatedContent, "Generated content does not match documentation. Hint: run `make cli-documentation`.")
}
func TestCheckRules(t *testing.T) {
t.Run("rules-good", func(t *testing.T) {
data, err := os.ReadFile("./testdata/rules.yml")
require.NoError(t, err)
r, w, err := os.Pipe()
if err != nil {
t.Fatal(err)
}
_, err = w.Write(data)
if err != nil {
t.Error(err)
}
w.Close()
// Restore stdin right after the test.
defer func(v *os.File) { os.Stdin = v }(os.Stdin)
os.Stdin = r
exitCode := CheckRules(newLintConfig(lintOptionDuplicateRules, false))
require.Equal(t, successExitCode, exitCode, "")
})
t.Run("rules-bad", func(t *testing.T) {
data, err := os.ReadFile("./testdata/rules-bad.yml")
require.NoError(t, err)
r, w, err := os.Pipe()
if err != nil {
t.Fatal(err)
}
_, err = w.Write(data)
if err != nil {
t.Error(err)
}
w.Close()
// Restore stdin right after the test.
defer func(v *os.File) { os.Stdin = v }(os.Stdin)
os.Stdin = r
exitCode := CheckRules(newLintConfig(lintOptionDuplicateRules, false))
require.Equal(t, failureExitCode, exitCode, "")
})
t.Run("rules-lint-fatal", func(t *testing.T) {
data, err := os.ReadFile("./testdata/prometheus-rules.lint.yml")
require.NoError(t, err)
r, w, err := os.Pipe()
if err != nil {
t.Fatal(err)
}
_, err = w.Write(data)
if err != nil {
t.Error(err)
}
w.Close()
// Restore stdin right after the test.
defer func(v *os.File) { os.Stdin = v }(os.Stdin)
os.Stdin = r
exitCode := CheckRules(newLintConfig(lintOptionDuplicateRules, true))
require.Equal(t, lintErrExitCode, exitCode, "")
})
}
func TestCheckRulesWithRuleFiles(t *testing.T) {
t.Run("rules-good", func(t *testing.T) {
exitCode := CheckRules(newLintConfig(lintOptionDuplicateRules, false), "./testdata/rules.yml")
require.Equal(t, successExitCode, exitCode, "")
})
t.Run("rules-bad", func(t *testing.T) {
exitCode := CheckRules(newLintConfig(lintOptionDuplicateRules, false), "./testdata/rules-bad.yml")
require.Equal(t, failureExitCode, exitCode, "")
})
t.Run("rules-lint-fatal", func(t *testing.T) {
exitCode := CheckRules(newLintConfig(lintOptionDuplicateRules, true), "./testdata/prometheus-rules.lint.yml")
require.Equal(t, lintErrExitCode, exitCode, "")
})
}

28
cmd/promtool/testdata/rules-bad.yml vendored Normal file
View file

@ -0,0 +1,28 @@
# This is the rules file.
groups:
- name: alerts
rules:
- alert: InstanceDown
expr: up == 0
for: 5m
labels:
severity: page
annotations:
summary: "Instance {{ $label.foo }} down"
description: "{{ $labels.instance }} of job {{ $labels.job }} has been down for more than 5 minutes."
- alert: AlwaysFiring
expr: 1
- name: rules
rules:
- record: job:test:count_over_time1m
expr: sum without(instance) (count_over_time(test[1m]))
# A recording rule that doesn't depend on input series.
- record: fixed_data
expr: 1
# Subquery with default resolution test.
- record: suquery_interval_test
expr: count_over_time(up[5m:])

View file

@ -459,7 +459,7 @@ func analyzeBlock(ctx context.Context, path, blockID string, limit int, runExten
postingInfos := []postingInfo{}
printInfo := func(postingInfos []postingInfo) {
slices.SortFunc(postingInfos, func(a, b postingInfo) bool { return a.metric > b.metric })
slices.SortFunc(postingInfos, func(a, b postingInfo) int { return int(b.metric) - int(a.metric) })
for i, pc := range postingInfos {
if i >= limit {

View file

@ -241,7 +241,7 @@ func (tg *testGroup) test(evalInterval time.Duration, groupOrderMap map[string]i
g.Eval(suite.Context(), ts)
for _, r := range g.Rules() {
if r.LastError() != nil {
evalErrs = append(evalErrs, fmt.Errorf(" rule: %s, time: %s, err: %v",
evalErrs = append(evalErrs, fmt.Errorf(" rule: %s, time: %s, err: %w",
r.Name(), ts.Sub(time.Unix(0, 0).UTC()), r.LastError()))
}
}

View file

@ -140,11 +140,9 @@ type Manager struct {
// Run starts the background processing
func (m *Manager) Run() error {
go m.sender()
for range m.ctx.Done() {
m.cancelDiscoverers()
return m.ctx.Err()
}
return nil
<-m.ctx.Done()
m.cancelDiscoverers()
return m.ctx.Err()
}
// SyncCh returns a read only channel used by all the clients to receive target updates.

View file

@ -180,11 +180,9 @@ func (m *Manager) Providers() []*Provider {
// Run starts the background processing.
func (m *Manager) Run() error {
go m.sender()
for range m.ctx.Done() {
m.cancelDiscoverers()
return m.ctx.Err()
}
return nil
<-m.ctx.Done()
m.cancelDiscoverers()
return m.ctx.Err()
}
// SyncCh returns a read only channel used by all the clients to receive target updates.

View file

@ -3537,7 +3537,13 @@ azuread:
# Azure User-assigned Managed identity.
[ managed_identity:
[ client_id: <string> ]
[ client_id: <string> ] ]
# Azure OAuth.
[ oauth:
[ client_id: <string> ]
[ client_secret: <string> ]
[ tenant_id: <string> ] ]
# Configures the remote write request's TLS settings.
tls_config:

View file

@ -147,3 +147,7 @@ by the rule are discarded, and if it's an alerting rule, _all_ alerts for
the rule, active, pending, or inactive, are cleared as well. The event will be
recorded as an error in the evaluation, and as such no stale markers are
written.
# Failed rule evaluations due to slow evaluation
If a rule group hasn't finished evaluating before its next evaluation is supposed to start (as defined by the `evaluation_interval`), the next evaluation will be skipped. Subsequent evaluations of the rule group will continue to be skipped until the initial evaluation either completes or times out. When this happens, there will be a gap in the metric produced by the recording rule. The `rule_group_iterations_missed_total` metric will be incremented for each missed iteration of the rule group.

View file

@ -86,6 +86,7 @@ versions.
| args | []interface{} | map[string]interface{} | This converts a list of objects to a map with keys arg0, arg1 etc. This is intended to allow multiple arguments to be passed to templates. |
| tmpl | string, []interface{} | nothing | Like the built-in `template`, but allows non-literals as the template name. Note that the result is assumed to be safe, and will not be auto-escaped. Only available in consoles. |
| safeHtml | string | string | Marks string as HTML not requiring auto-escaping. |
| externalURL | _none_ | string | The external URL under which Prometheus is externally reachable. |
| pathPrefix | _none_ | string | The external URL [path](https://pkg.go.dev/net/url#URL) for use in console templates. |
## Template type differences

View file

@ -14,7 +14,7 @@ vector, which if not provided it will default to the value of the expression
_Notes about the experimental native histograms:_
* Ingesting native histograms has to be enabled via a [feature
flag](../../feature_flags.md#native-histograms). As long as no native histograms
flag](../feature_flags.md#native-histograms). As long as no native histograms
have been ingested into the TSDB, all functions will behave as usual.
* Functions that do not explicitly mention native histograms in their
documentation (see below) will ignore histogram samples.

View file

@ -8,7 +8,7 @@ require (
github.com/gogo/protobuf v1.3.2
github.com/golang/snappy v0.0.4
github.com/influxdata/influxdb v1.11.2
github.com/prometheus/client_golang v1.16.0
github.com/prometheus/client_golang v1.17.0
github.com/prometheus/common v0.44.0
github.com/prometheus/prometheus v0.45.0
github.com/stretchr/testify v1.8.4
@ -42,9 +42,9 @@ require (
github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 // indirect
github.com/pkg/errors v0.9.1 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/prometheus/client_model v0.4.0 // indirect
github.com/prometheus/client_model v0.4.1-0.20230718164431-9a2bf3000d16 // indirect
github.com/prometheus/common/sigv4 v0.1.0 // indirect
github.com/prometheus/procfs v0.10.1 // indirect
github.com/prometheus/procfs v0.11.1 // indirect
github.com/xhit/go-str2duration/v2 v2.1.0 // indirect
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.42.0 // indirect
go.opentelemetry.io/otel v1.16.0 // indirect
@ -56,11 +56,11 @@ require (
golang.org/x/exp v0.0.0-20230522175609-2e198f4a06a1 // indirect
golang.org/x/net v0.10.0 // indirect
golang.org/x/oauth2 v0.8.0 // indirect
golang.org/x/sys v0.8.0 // indirect
golang.org/x/sys v0.11.0 // indirect
golang.org/x/text v0.9.0 // indirect
golang.org/x/time v0.3.0 // indirect
google.golang.org/appengine v1.6.7 // indirect
google.golang.org/protobuf v1.30.0 // indirect
google.golang.org/protobuf v1.31.0 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
)

View file

@ -194,13 +194,13 @@ github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXP
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
github.com/prometheus/client_golang v1.16.0 h1:yk/hx9hDbrGHovbci4BY+pRMfSuuat626eFsHb7tmT8=
github.com/prometheus/client_golang v1.16.0/go.mod h1:Zsulrv/L9oM40tJ7T815tM89lFEugiJ9HzIqaAx4LKc=
github.com/prometheus/client_golang v1.17.0 h1:rl2sfwZMtSthVU752MqfjQozy7blglC+1SOtjMAMh+Q=
github.com/prometheus/client_golang v1.17.0/go.mod h1:VeL+gMmOAxkS2IqfCq0ZmHSL+LjWfWDUmp1mBz9JgUY=
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.4.0 h1:5lQXD3cAg1OXBf4Wq03gTrXHeaV0TQvGfUooCfx1yqY=
github.com/prometheus/client_model v0.4.0/go.mod h1:oMQmHW1/JoDwqLtg57MGgP/Fb1CJEYF2imWWhWtMkYU=
github.com/prometheus/client_model v0.4.1-0.20230718164431-9a2bf3000d16 h1:v7DLqVdK4VrYkVD5diGdl4sxJurKJEMnODWRJlxV9oM=
github.com/prometheus/client_model v0.4.1-0.20230718164431-9a2bf3000d16/go.mod h1:oMQmHW1/JoDwqLtg57MGgP/Fb1CJEYF2imWWhWtMkYU=
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc=
@ -213,8 +213,8 @@ github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
github.com/prometheus/procfs v0.10.1 h1:kYK1Va/YMlutzCGazswoHKo//tZVlFpKYh+PymziUAg=
github.com/prometheus/procfs v0.10.1/go.mod h1:nwNm2aOCAYw8uTR/9bWRREkZFxAUcWzPHWJq+XBB/FM=
github.com/prometheus/procfs v0.11.1 h1:xRC8Iq1yyca5ypa9n1EZnWZkt7dwcoRPQwX/5gwaUuI=
github.com/prometheus/procfs v0.11.1/go.mod h1:eesXgaPo1q7lBpVMoMy0ZOFTth9hBn4W/y0/p/ScXhY=
github.com/prometheus/prometheus v0.45.0 h1:O/uG+Nw4kNxx/jDPxmjsSDd+9Ohql6E7ZSY1x5x/0KI=
github.com/prometheus/prometheus v0.45.0/go.mod h1:jC5hyO8ItJBnDWGecbEucMyXjzxGv9cxsxsjS9u5s1w=
github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
@ -292,7 +292,7 @@ golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.2.0 h1:PUR+T4wwASmuSTYdKjYHI5TD22Wy5ogLU5qZCOLxBrI=
golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@ -312,8 +312,8 @@ golang.org/x/sys v0.0.0-20210616045830-e2b7044e8c71/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.8.0 h1:EBmGv8NaZBZTWvrbjNoL6HVt+IVy3QDQpJs7VRIw3tU=
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.11.0 h1:eG7RXZHdqOJ1i+0lgLgCpSXAp6M3LYlAo6osgSi0xOM=
golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
@ -352,8 +352,8 @@ google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzi
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
google.golang.org/protobuf v1.30.0 h1:kPPoIgf3TsEvrm0PFe15JQ+570QVxYzEvvHqChK+cng=
google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8=
google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=

4
go.mod
View file

@ -31,7 +31,7 @@ require (
github.com/gophercloud/gophercloud v1.5.0
github.com/grafana/regexp v0.0.0-20221122212121-6b5c0a4cb7fd
github.com/grpc-ecosystem/grpc-gateway v1.16.0
github.com/hashicorp/consul/api v1.22.0
github.com/hashicorp/consul/api v1.25.1
github.com/hashicorp/nomad/api v0.0.0-20230718173136-3a687930bd3e
github.com/hetznercloud/hcloud-go/v2 v2.0.0
github.com/ionos-cloud/sdk-go/v6 v6.1.8
@ -188,7 +188,7 @@ require (
go.opentelemetry.io/otel/metric v1.16.0 // indirect
go.opentelemetry.io/proto/otlp v1.0.0 // indirect
golang.org/x/crypto v0.11.0 // indirect
golang.org/x/exp v0.0.0-20230713183714-613f0c0eb8a1
golang.org/x/exp v0.0.0-20230801115018-d63ba01acd4b
golang.org/x/mod v0.12.0 // indirect
golang.org/x/term v0.10.0 // indirect
golang.org/x/text v0.11.0 // indirect

10
go.sum
View file

@ -411,10 +411,10 @@ github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFb
github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 h1:YBftPWNWd4WwGqtY2yeZL2ef8rHAxPBD8KFhJpmcqms=
github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0/go.mod h1:YN5jB8ie0yfIUg6VvR9Kz84aCaG7AsGZnLjhHbUqwPg=
github.com/hashicorp/consul/api v1.3.0/go.mod h1:MmDNSzIMUjNpY/mQ398R4bk2FnqQLoPndWW5VkKPlCE=
github.com/hashicorp/consul/api v1.22.0 h1:ydEvDooB/A0c/xpsBd8GSt7P2/zYPBui4KrNip0xGjE=
github.com/hashicorp/consul/api v1.22.0/go.mod h1:zHpYgZ7TeYqS6zaszjwSt128OwESRpnhU9aGa6ue3Eg=
github.com/hashicorp/consul/api v1.25.1 h1:CqrdhYzc8XZuPnhIYZWH45toM0LB9ZeYr/gvpLVI3PE=
github.com/hashicorp/consul/api v1.25.1/go.mod h1:iiLVwR/htV7mas/sy0O+XSuEnrdBUUydemjxcUrAt4g=
github.com/hashicorp/consul/sdk v0.3.0/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8=
github.com/hashicorp/consul/sdk v0.14.0 h1:Hly+BMNMssVzoWddbBnBFi3W+Fzytvm0haSkihhj3GU=
github.com/hashicorp/consul/sdk v0.14.1 h1:ZiwE2bKb+zro68sWzZ1SgHF3kRMBZ94TwOCFRF4ylPs=
github.com/hashicorp/cronexpr v1.1.2 h1:wG/ZYIKT+RT3QkOdgYc+xsKWVRgnxJ1OJtjjy84fJ9A=
github.com/hashicorp/cronexpr v1.1.2/go.mod h1:P4wA0KBl9C5q2hABiMO7cp6jcIg96CDh1Efb3g1PWA4=
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
@ -861,8 +861,8 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0
golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
golang.org/x/exp v0.0.0-20230713183714-613f0c0eb8a1 h1:MGwJjxBy0HJshjDNfLsYO8xppfqWlA5ZT9OhtUUhTNw=
golang.org/x/exp v0.0.0-20230713183714-613f0c0eb8a1/go.mod h1:FXUEEKJgO7OQYeo8N01OfiKP8RXMtf6e8aTskBGqWdc=
golang.org/x/exp v0.0.0-20230801115018-d63ba01acd4b h1:r+vk0EmXNmekl0S0BascoeeoHk/L7wmaW2QF90K+kYI=
golang.org/x/exp v0.0.0-20230801115018-d63ba01acd4b/go.mod h1:FXUEEKJgO7OQYeo8N01OfiKP8RXMtf6e8aTskBGqWdc=
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=

View file

@ -19,6 +19,7 @@ import (
"bytes"
"encoding/json"
"strconv"
"strings"
"github.com/cespare/xxhash/v2"
"github.com/prometheus/common/model"
@ -362,7 +363,7 @@ func EmptyLabels() Labels {
func New(ls ...Label) Labels {
set := make(Labels, 0, len(ls))
set = append(set, ls...)
slices.SortFunc(set, func(a, b Label) bool { return a.Name < b.Name })
slices.SortFunc(set, func(a, b Label) int { return strings.Compare(a.Name, b.Name) })
return set
}
@ -386,7 +387,7 @@ func FromStrings(ss ...string) Labels {
res = append(res, Label{Name: ss[i], Value: ss[i+1]})
}
slices.SortFunc(res, func(a, b Label) bool { return a.Name < b.Name })
slices.SortFunc(res, func(a, b Label) int { return strings.Compare(a.Name, b.Name) })
return res
}
@ -591,7 +592,7 @@ func (b *Builder) Labels() Labels {
}
if len(b.add) > 0 { // Base is already in order, so we only need to sort if we add to it.
res = append(res, b.add...)
slices.SortFunc(res, func(a, b Label) bool { return a.Name < b.Name })
slices.SortFunc(res, func(a, b Label) int { return strings.Compare(a.Name, b.Name) })
}
return res
}
@ -618,7 +619,7 @@ func (b *ScratchBuilder) Add(name, value string) {
// Sort the labels added so far by name.
func (b *ScratchBuilder) Sort() {
slices.SortFunc(b.add, func(a, b Label) bool { return a.Name < b.Name })
slices.SortFunc(b.add, func(a, b Label) int { return strings.Compare(a.Name, b.Name) })
}
// Assign is for when you already have a Labels which you want this ScratchBuilder to return.

View file

@ -20,6 +20,7 @@ import (
"encoding/json"
"reflect"
"strconv"
"strings"
"unsafe"
"github.com/cespare/xxhash/v2"
@ -412,7 +413,7 @@ func yoloBytes(s string) (b []byte) {
// New returns a sorted Labels from the given labels.
// The caller has to guarantee that all label names are unique.
func New(ls ...Label) Labels {
slices.SortFunc(ls, func(a, b Label) bool { return a.Name < b.Name })
slices.SortFunc(ls, func(a, b Label) int { return strings.Compare(a.Name, b.Name) })
size := labelsSize(ls)
buf := make([]byte, size)
marshalLabelsToSizedBuffer(ls, buf)
@ -671,7 +672,7 @@ func (b *Builder) Labels() Labels {
return b.base
}
slices.SortFunc(b.add, func(a, b Label) bool { return a.Name < b.Name })
slices.SortFunc(b.add, func(a, b Label) int { return strings.Compare(a.Name, b.Name) })
slices.Sort(b.del)
a, d := 0, 0
@ -830,7 +831,7 @@ func (b *ScratchBuilder) Add(name, value string) {
// Sort the labels added so far by name.
func (b *ScratchBuilder) Sort() {
slices.SortFunc(b.add, func(a, b Label) bool { return a.Name < b.Name })
slices.SortFunc(b.add, func(a, b Label) int { return strings.Compare(a.Name, b.Name) })
}
// Assign is for when you already have a Labels which you want this ScratchBuilder to return.

View file

@ -27,7 +27,6 @@ import (
"github.com/DmitriyVTitov/size"
"github.com/grafana/regexp"
"github.com/grafana/regexp/syntax"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
@ -511,17 +510,17 @@ func TestStringMatcherFromRegexp_LiteralPrefix(t *testing.T) {
require.Equal(t, c.expectedLiteralPrefixMatchers, numPrefixMatchers)
for _, value := range c.expectedMatches {
assert.Truef(t, matcher.Matches(value), "Value: %s", value)
require.Truef(t, matcher.Matches(value), "Value: %s", value)
// Ensure the golang regexp engine would return the same.
assert.Truef(t, re.MatchString(value), "Value: %s", value)
require.Truef(t, re.MatchString(value), "Value: %s", value)
}
for _, value := range c.expectedNotMatches {
assert.Falsef(t, matcher.Matches(value), "Value: %s", value)
require.Falsef(t, matcher.Matches(value), "Value: %s", value)
// Ensure the golang regexp engine would return the same.
assert.Falsef(t, re.MatchString(value), "Value: %s", value)
require.Falsef(t, re.MatchString(value), "Value: %s", value)
}
})
}
@ -586,17 +585,17 @@ func TestStringMatcherFromRegexp_LiteralSuffix(t *testing.T) {
require.Equal(t, c.expectedLiteralSuffixMatchers, numSuffixMatchers)
for _, value := range c.expectedMatches {
assert.Truef(t, matcher.Matches(value), "Value: %s", value)
require.Truef(t, matcher.Matches(value), "Value: %s", value)
// Ensure the golang regexp engine would return the same.
assert.Truef(t, re.MatchString(value), "Value: %s", value)
require.Truef(t, re.MatchString(value), "Value: %s", value)
}
for _, value := range c.expectedNotMatches {
assert.Falsef(t, matcher.Matches(value), "Value: %s", value)
require.Falsef(t, matcher.Matches(value), "Value: %s", value)
// Ensure the golang regexp engine would return the same.
assert.Falsef(t, re.MatchString(value), "Value: %s", value)
require.Falsef(t, re.MatchString(value), "Value: %s", value)
}
})
}
@ -671,17 +670,17 @@ func TestStringMatcherFromRegexp_Quest(t *testing.T) {
require.Equal(t, c.expectedZeroOrOneMatchers, numZeroOrOneMatchers)
for _, value := range c.expectedMatches {
assert.Truef(t, matcher.Matches(value), "Value: %s", value)
require.Truef(t, matcher.Matches(value), "Value: %s", value)
// Ensure the golang regexp engine would return the same.
assert.Truef(t, re.MatchString(value), "Value: %s", value)
require.Truef(t, re.MatchString(value), "Value: %s", value)
}
for _, value := range c.expectedNotMatches {
assert.Falsef(t, matcher.Matches(value), "Value: %s", value)
require.Falsef(t, matcher.Matches(value), "Value: %s", value)
// Ensure the golang regexp engine would return the same.
assert.Falsef(t, re.MatchString(value), "Value: %s", value)
require.Falsef(t, re.MatchString(value), "Value: %s", value)
}
})
}
@ -1116,8 +1115,8 @@ func TestFindEqualStringMatchers(t *testing.T) {
t.Run("empty matcher", func(t *testing.T) {
actualMatches, actualOk := findEqualStringMatchersAndCollectMatches(emptyStringMatcher{})
assert.False(t, actualOk)
assert.Empty(t, actualMatches)
require.False(t, actualOk)
require.Empty(t, actualMatches)
})
t.Run("concat of literal matchers (case sensitive)", func(t *testing.T) {
@ -1128,8 +1127,8 @@ func TestFindEqualStringMatchers(t *testing.T) {
},
)
assert.True(t, actualOk)
assert.Equal(t, []match{{"test-1", true}, {"test-2", true}}, actualMatches)
require.True(t, actualOk)
require.Equal(t, []match{{"test-1", true}, {"test-2", true}}, actualMatches)
})
t.Run("concat of literal matchers (case insensitive)", func(t *testing.T) {
@ -1140,8 +1139,8 @@ func TestFindEqualStringMatchers(t *testing.T) {
},
)
assert.True(t, actualOk)
assert.Equal(t, []match{{"test-1", false}, {"test-2", false}}, actualMatches)
require.True(t, actualOk)
require.Equal(t, []match{{"test-1", false}, {"test-2", false}}, actualMatches)
})
t.Run("concat of literal matchers (mixed case)", func(t *testing.T) {
@ -1152,8 +1151,8 @@ func TestFindEqualStringMatchers(t *testing.T) {
},
)
assert.True(t, actualOk)
assert.Equal(t, []match{{"test-1", false}, {"test-2", true}}, actualMatches)
require.True(t, actualOk)
require.Equal(t, []match{{"test-1", false}, {"test-2", true}}, actualMatches)
})
}
@ -1211,86 +1210,86 @@ func BenchmarkOptimizeEqualStringMatchers(b *testing.B) {
func TestZeroOrOneCharacterStringMatcher(t *testing.T) {
matcher := &zeroOrOneCharacterStringMatcher{matchNL: true}
assert.True(t, matcher.Matches(""))
assert.True(t, matcher.Matches("x"))
assert.True(t, matcher.Matches("\n"))
assert.False(t, matcher.Matches("xx"))
assert.False(t, matcher.Matches("\n\n"))
require.True(t, matcher.Matches(""))
require.True(t, matcher.Matches("x"))
require.True(t, matcher.Matches("\n"))
require.False(t, matcher.Matches("xx"))
require.False(t, matcher.Matches("\n\n"))
matcher = &zeroOrOneCharacterStringMatcher{matchNL: false}
assert.True(t, matcher.Matches(""))
assert.True(t, matcher.Matches("x"))
assert.False(t, matcher.Matches("\n"))
assert.False(t, matcher.Matches("xx"))
assert.False(t, matcher.Matches("\n\n"))
require.True(t, matcher.Matches(""))
require.True(t, matcher.Matches("x"))
require.False(t, matcher.Matches("\n"))
require.False(t, matcher.Matches("xx"))
require.False(t, matcher.Matches("\n\n"))
}
func TestLiteralPrefixStringMatcher(t *testing.T) {
m := &literalPrefixStringMatcher{prefix: "mar", prefixCaseSensitive: true, right: &emptyStringMatcher{}}
assert.True(t, m.Matches("mar"))
assert.False(t, m.Matches("marco"))
assert.False(t, m.Matches("ma"))
assert.False(t, m.Matches("mAr"))
require.True(t, m.Matches("mar"))
require.False(t, m.Matches("marco"))
require.False(t, m.Matches("ma"))
require.False(t, m.Matches("mAr"))
m = &literalPrefixStringMatcher{prefix: "mar", prefixCaseSensitive: false, right: &emptyStringMatcher{}}
assert.True(t, m.Matches("mar"))
assert.False(t, m.Matches("marco"))
assert.False(t, m.Matches("ma"))
assert.True(t, m.Matches("mAr"))
require.True(t, m.Matches("mar"))
require.False(t, m.Matches("marco"))
require.False(t, m.Matches("ma"))
require.True(t, m.Matches("mAr"))
m = &literalPrefixStringMatcher{prefix: "mar", prefixCaseSensitive: true, right: &equalStringMatcher{s: "co", caseSensitive: false}}
assert.True(t, m.Matches("marco"))
assert.True(t, m.Matches("marCO"))
assert.False(t, m.Matches("MARco"))
assert.False(t, m.Matches("mar"))
assert.False(t, m.Matches("marcopracucci"))
require.True(t, m.Matches("marco"))
require.True(t, m.Matches("marCO"))
require.False(t, m.Matches("MARco"))
require.False(t, m.Matches("mar"))
require.False(t, m.Matches("marcopracucci"))
}
func TestLiteralSuffixStringMatcher(t *testing.T) {
m := &literalSuffixStringMatcher{left: &emptyStringMatcher{}, suffix: "co", suffixCaseSensitive: true}
assert.True(t, m.Matches("co"))
assert.False(t, m.Matches("marco"))
assert.False(t, m.Matches("coo"))
assert.False(t, m.Matches("Co"))
require.True(t, m.Matches("co"))
require.False(t, m.Matches("marco"))
require.False(t, m.Matches("coo"))
require.False(t, m.Matches("Co"))
m = &literalSuffixStringMatcher{left: &emptyStringMatcher{}, suffix: "co", suffixCaseSensitive: false}
assert.True(t, m.Matches("co"))
assert.False(t, m.Matches("marco"))
assert.False(t, m.Matches("coo"))
assert.True(t, m.Matches("Co"))
require.True(t, m.Matches("co"))
require.False(t, m.Matches("marco"))
require.False(t, m.Matches("coo"))
require.True(t, m.Matches("Co"))
m = &literalSuffixStringMatcher{left: &equalStringMatcher{s: "mar", caseSensitive: false}, suffix: "co", suffixCaseSensitive: true}
assert.True(t, m.Matches("marco"))
assert.True(t, m.Matches("MARco"))
assert.False(t, m.Matches("marCO"))
assert.False(t, m.Matches("mar"))
assert.False(t, m.Matches("marcopracucci"))
require.True(t, m.Matches("marco"))
require.True(t, m.Matches("MARco"))
require.False(t, m.Matches("marCO"))
require.False(t, m.Matches("mar"))
require.False(t, m.Matches("marcopracucci"))
m = &literalSuffixStringMatcher{left: &equalStringMatcher{s: "mar", caseSensitive: false}, suffix: "co", suffixCaseSensitive: false}
assert.True(t, m.Matches("marco"))
assert.True(t, m.Matches("MARco"))
assert.True(t, m.Matches("marCO"))
assert.False(t, m.Matches("mar"))
assert.False(t, m.Matches("marcopracucci"))
require.True(t, m.Matches("marco"))
require.True(t, m.Matches("MARco"))
require.True(t, m.Matches("marCO"))
require.False(t, m.Matches("mar"))
require.False(t, m.Matches("marcopracucci"))
}
func TestHasPrefixCaseInsensitive(t *testing.T) {
assert.True(t, hasPrefixCaseInsensitive("marco", "mar"))
assert.True(t, hasPrefixCaseInsensitive("mArco", "mar"))
assert.True(t, hasPrefixCaseInsensitive("marco", "MaR"))
assert.True(t, hasPrefixCaseInsensitive("marco", "marco"))
assert.True(t, hasPrefixCaseInsensitive("mArco", "marco"))
require.True(t, hasPrefixCaseInsensitive("marco", "mar"))
require.True(t, hasPrefixCaseInsensitive("mArco", "mar"))
require.True(t, hasPrefixCaseInsensitive("marco", "MaR"))
require.True(t, hasPrefixCaseInsensitive("marco", "marco"))
require.True(t, hasPrefixCaseInsensitive("mArco", "marco"))
assert.False(t, hasPrefixCaseInsensitive("marco", "a"))
assert.False(t, hasPrefixCaseInsensitive("marco", "abcdefghi"))
require.False(t, hasPrefixCaseInsensitive("marco", "a"))
require.False(t, hasPrefixCaseInsensitive("marco", "abcdefghi"))
}
func TestHasSuffixCaseInsensitive(t *testing.T) {
assert.True(t, hasSuffixCaseInsensitive("marco", "rco"))
assert.True(t, hasSuffixCaseInsensitive("marco", "RcO"))
assert.True(t, hasSuffixCaseInsensitive("marco", "marco"))
assert.False(t, hasSuffixCaseInsensitive("marco", "a"))
assert.False(t, hasSuffixCaseInsensitive("marco", "abcdefghi"))
require.True(t, hasSuffixCaseInsensitive("marco", "rco"))
require.True(t, hasSuffixCaseInsensitive("marco", "RcO"))
require.True(t, hasSuffixCaseInsensitive("marco", "marco"))
require.False(t, hasSuffixCaseInsensitive("marco", "a"))
require.False(t, hasSuffixCaseInsensitive("marco", "abcdefghi"))
}
func getTestNameFromRegexp(re string) string {

View file

@ -176,17 +176,11 @@ func (r *RuleNode) Validate() (nodes []WrappedError) {
})
}
if r.Record.Value == "" && r.Alert.Value == "" {
if r.Record.Value == "0" {
nodes = append(nodes, WrappedError{
err: fmt.Errorf("one of 'record' or 'alert' must be set"),
node: &r.Alert,
})
} else {
nodes = append(nodes, WrappedError{
err: fmt.Errorf("one of 'record' or 'alert' must be set"),
node: &r.Record,
})
}
nodes = append(nodes, WrappedError{
err: fmt.Errorf("one of 'record' or 'alert' must be set"),
node: &r.Record,
nodeAlt: &r.Alert,
})
}
if r.Expr.Value == "" {

View file

@ -338,7 +338,7 @@ func (p *OpenMetricsParser) Next() (Entry, error) {
var ts float64
// A float is enough to hold what we need for millisecond resolution.
if ts, err = parseFloat(yoloString(p.l.buf()[1:])); err != nil {
return EntryInvalid, fmt.Errorf("%v while parsing: %q", err, p.l.b[p.start:p.l.i])
return EntryInvalid, fmt.Errorf("%w while parsing: %q", err, p.l.b[p.start:p.l.i])
}
if math.IsNaN(ts) || math.IsInf(ts, 0) {
return EntryInvalid, fmt.Errorf("invalid timestamp %f", ts)
@ -391,7 +391,7 @@ func (p *OpenMetricsParser) parseComment() error {
var ts float64
// A float is enough to hold what we need for millisecond resolution.
if ts, err = parseFloat(yoloString(p.l.buf()[1:])); err != nil {
return fmt.Errorf("%v while parsing: %q", err, p.l.b[p.start:p.l.i])
return fmt.Errorf("%w while parsing: %q", err, p.l.b[p.start:p.l.i])
}
if math.IsNaN(ts) || math.IsInf(ts, 0) {
return fmt.Errorf("invalid exemplar timestamp %f", ts)
@ -461,7 +461,7 @@ func (p *OpenMetricsParser) getFloatValue(t token, after string) (float64, error
}
val, err := parseFloat(yoloString(p.l.buf()[1:]))
if err != nil {
return 0, fmt.Errorf("%v while parsing: %q", err, p.l.b[p.start:p.l.i])
return 0, fmt.Errorf("%w while parsing: %q", err, p.l.b[p.start:p.l.i])
}
// Ensure canonical NaN value.
if math.IsNaN(p.exemplarVal) {

View file

@ -348,7 +348,7 @@ func (p *PromParser) Next() (Entry, error) {
return EntryInvalid, p.parseError("expected value after metric", t2)
}
if p.val, err = parseFloat(yoloString(p.l.buf())); err != nil {
return EntryInvalid, fmt.Errorf("%v while parsing: %q", err, p.l.b[p.start:p.l.i])
return EntryInvalid, fmt.Errorf("%w while parsing: %q", err, p.l.b[p.start:p.l.i])
}
// Ensure canonical NaN value.
if math.IsNaN(p.val) {
@ -361,7 +361,7 @@ func (p *PromParser) Next() (Entry, error) {
case tTimestamp:
p.hasTS = true
if p.ts, err = strconv.ParseInt(yoloString(p.l.buf()), 10, 64); err != nil {
return EntryInvalid, fmt.Errorf("%v while parsing: %q", err, p.l.b[p.start:p.l.i])
return EntryInvalid, fmt.Errorf("%w while parsing: %q", err, p.l.b[p.start:p.l.i])
}
if t2 := p.nextToken(); t2 != tLinebreak {
return EntryInvalid, p.parseError("expected next entry after timestamp", t2)

View file

@ -60,6 +60,11 @@ const (
maxInt64 = 9223372036854774784
// The smallest SampleValue that can be converted to an int64 without underflow.
minInt64 = -9223372036854775808
// Max initial size for the pooled points slices.
// The getHPointSlice and getFPointSlice functions are called with an estimated size which often can be
// over-estimated.
maxPointsSliceSize = 5000
)
type engineMetrics struct {
@ -1911,19 +1916,33 @@ func getFPointSlice(sz int) []FPoint {
if p := fPointPool.Get(); p != nil {
return p
}
if sz > maxPointsSliceSize {
sz = maxPointsSliceSize
}
return make([]FPoint, 0, sz)
}
// putFPointSlice will return a FPoint slice of size max(maxPointsSliceSize, sz).
// This function is called with an estimated size which often can be over-estimated.
func putFPointSlice(p []FPoint) {
if p != nil {
fPointPool.Put(p[:0])
}
}
// getHPointSlice will return a HPoint slice of size max(maxPointsSliceSize, sz).
// This function is called with an estimated size which often can be over-estimated.
func getHPointSlice(sz int) []HPoint {
if p := hPointPool.Get(); p != nil {
return p
}
if sz > maxPointsSliceSize {
sz = maxPointsSliceSize
}
return make([]HPoint, 0, sz)
}

View file

@ -81,8 +81,15 @@ func bucketQuantile(q float64, buckets buckets) float64 {
if q > 1 {
return math.Inf(+1)
}
slices.SortFunc(buckets, func(a, b bucket) bool {
return a.upperBound < b.upperBound
slices.SortFunc(buckets, func(a, b bucket) int {
// We don't expect the bucket boundary to be a NaN.
if a.upperBound < b.upperBound {
return -1
}
if a.upperBound > b.upperBound {
return +1
}
return 0
})
if !math.IsInf(buckets[len(buckets)-1].upperBound, +1) {
return math.NaN()

View file

@ -20,6 +20,7 @@ import (
"math"
"net/url"
"sort"
"strings"
"sync"
"time"
@ -506,9 +507,11 @@ func (g *Group) AlertingRules() []*AlertingRule {
alerts = append(alerts, alertingRule)
}
}
slices.SortFunc(alerts, func(a, b *AlertingRule) bool {
return a.State() > b.State() ||
(a.State() == b.State() && a.Name() < b.Name())
slices.SortFunc(alerts, func(a, b *AlertingRule) int {
if a.State() == b.State() {
return strings.Compare(a.Name(), b.Name())
}
return int(b.State() - a.State())
})
return alerts
}
@ -1277,11 +1280,15 @@ func (m *Manager) RuleGroups() []*Group {
rgs = append(rgs, g)
}
slices.SortFunc(rgs, func(a, b *Group) bool {
if a.file != b.file {
return a.file < b.file
slices.SortFunc(rgs, func(a, b *Group) int {
fileCompare := strings.Compare(a.file, b.file)
// If its 0, then the file names are the same.
// Lets look at the group names in that case.
if fileCompare != 0 {
return fileCompare
}
return a.name < b.name
return strings.Compare(a.name, b.name)
})
return rgs

View file

@ -732,8 +732,8 @@ func mutateSampleLabels(lset labels.Labels, target *Target, honor bool, rc []*re
}
func resolveConflictingExposedLabels(lb *labels.Builder, conflictingExposedLabels []labels.Label) {
slices.SortStableFunc(conflictingExposedLabels, func(a, b labels.Label) bool {
return len(a.Name) < len(b.Name)
slices.SortStableFunc(conflictingExposedLabels, func(a, b labels.Label) int {
return len(a.Name) - len(b.Name)
})
for _, l := range conflictingExposedLabels {
@ -1525,6 +1525,7 @@ func (sl *scrapeLoop) append(app storage.Appender, b []byte, contentType string,
appErrs = appendErrors{}
sampleLimitErr error
bucketLimitErr error
lset labels.Labels // escapes to heap so hoisted out of loop
e exemplar.Exemplar // escapes to heap so hoisted out of loop
meta metadata.Metadata
metadataChanged bool
@ -1622,7 +1623,6 @@ loop:
ce, ok := sl.cache.get(met)
var (
ref storage.SeriesRef
lset labels.Labels
hash uint64
)

View file

@ -18,7 +18,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout repository
uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac # v4.0.0
uses: actions/checkout@8ade135a41bc03ea155e62e844d188df1ea18608 # v4.1.0
- name: install Go
uses: actions/setup-go@6edd4406fa81c3da01a34fa6f6343087c207a568 # v3.5.0
with:

View file

@ -91,12 +91,8 @@ func (b *BufferedSeriesIterator) Seek(t int64) chunkenc.ValueType {
switch b.valueType {
case chunkenc.ValNone:
return chunkenc.ValNone
case chunkenc.ValFloat:
b.lastTime, _ = b.At()
case chunkenc.ValHistogram:
b.lastTime, _ = b.AtHistogram()
case chunkenc.ValFloatHistogram:
b.lastTime, _ = b.AtFloatHistogram()
case chunkenc.ValFloat, chunkenc.ValHistogram, chunkenc.ValFloatHistogram:
b.lastTime = b.AtT()
default:
panic(fmt.Errorf("BufferedSeriesIterator: unknown value type %v", b.valueType))
}

View file

@ -347,7 +347,7 @@ func (c *genericMergeSeriesSet) Next() bool {
}
// Now, pop items of the heap that have equal label sets.
c.currentSets = nil
c.currentSets = c.currentSets[:0]
c.currentLabels = c.heap[0].At().Labels()
for len(c.heap) > 0 && labels.Equal(c.currentLabels, c.heap[0].At().Labels()) {
set := heap.Pop(&c.heap).(genericSeriesSet)

View file

@ -22,7 +22,10 @@ import (
"sync"
"time"
"github.com/grafana/regexp"
"github.com/Azure/azure-sdk-for-go/sdk/azcore"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"
"github.com/Azure/azure-sdk-for-go/sdk/azidentity"
"github.com/google/uuid"
@ -46,11 +49,26 @@ type ManagedIdentityConfig struct {
ClientID string `yaml:"client_id,omitempty"`
}
// OAuthConfig is used to store azure oauth config values.
type OAuthConfig struct {
// ClientID is the clientId of the azure active directory application that is being used to authenticate.
ClientID string `yaml:"client_id,omitempty"`
// ClientSecret is the clientSecret of the azure active directory application that is being used to authenticate.
ClientSecret string `yaml:"client_secret,omitempty"`
// TenantID is the tenantId of the azure active directory application that is being used to authenticate.
TenantID string `yaml:"tenant_id,omitempty"`
}
// AzureADConfig is used to store the config values.
type AzureADConfig struct { // nolint:revive
// ManagedIdentity is the managed identity that is being used to authenticate.
ManagedIdentity *ManagedIdentityConfig `yaml:"managed_identity,omitempty"`
// OAuth is the oauth config that is being used to authenticate.
OAuth *OAuthConfig `yaml:"oauth,omitempty"`
// Cloud is the Azure cloud in which the service is running. Example: AzurePublic/AzureGovernment/AzureChina.
Cloud string `yaml:"cloud,omitempty"`
}
@ -84,18 +102,47 @@ func (c *AzureADConfig) Validate() error {
return fmt.Errorf("must provide a cloud in the Azure AD config")
}
if c.ManagedIdentity == nil {
return fmt.Errorf("must provide an Azure Managed Identity in the Azure AD config")
if c.ManagedIdentity == nil && c.OAuth == nil {
return fmt.Errorf("must provide an Azure Managed Identity or Azure OAuth in the Azure AD config")
}
if c.ManagedIdentity.ClientID == "" {
return fmt.Errorf("must provide an Azure Managed Identity client_id in the Azure AD config")
if c.ManagedIdentity != nil && c.OAuth != nil {
return fmt.Errorf("cannot provide both Azure Managed Identity and Azure OAuth in the Azure AD config")
}
_, err := uuid.Parse(c.ManagedIdentity.ClientID)
if err != nil {
return fmt.Errorf("the provided Azure Managed Identity client_id provided is invalid")
if c.ManagedIdentity != nil {
if c.ManagedIdentity.ClientID == "" {
return fmt.Errorf("must provide an Azure Managed Identity client_id in the Azure AD config")
}
_, err := uuid.Parse(c.ManagedIdentity.ClientID)
if err != nil {
return fmt.Errorf("the provided Azure Managed Identity client_id is invalid")
}
}
if c.OAuth != nil {
if c.OAuth.ClientID == "" {
return fmt.Errorf("must provide an Azure OAuth client_id in the Azure AD config")
}
if c.OAuth.ClientSecret == "" {
return fmt.Errorf("must provide an Azure OAuth client_secret in the Azure AD config")
}
if c.OAuth.TenantID == "" {
return fmt.Errorf("must provide an Azure OAuth tenant_id in the Azure AD config")
}
var err error
_, err = uuid.Parse(c.OAuth.ClientID)
if err != nil {
return fmt.Errorf("the provided Azure OAuth client_id is invalid")
}
_, err = regexp.MatchString("^[0-9a-zA-Z-.]+$", c.OAuth.TenantID)
if err != nil {
return fmt.Errorf("the provided Azure OAuth tenant_id is invalid")
}
}
return nil
}
@ -146,21 +193,54 @@ func (rt *azureADRoundTripper) RoundTrip(req *http.Request) (*http.Response, err
// newTokenCredential returns a TokenCredential of different kinds like Azure Managed Identity and Azure AD application.
func newTokenCredential(cfg *AzureADConfig) (azcore.TokenCredential, error) {
cred, err := newManagedIdentityTokenCredential(cfg.ManagedIdentity.ClientID)
var cred azcore.TokenCredential
var err error
cloudConfiguration, err := getCloudConfiguration(cfg.Cloud)
if err != nil {
return nil, err
}
clientOpts := &azcore.ClientOptions{
Cloud: cloudConfiguration,
}
if cfg.ManagedIdentity != nil {
managedIdentityConfig := &ManagedIdentityConfig{
ClientID: cfg.ManagedIdentity.ClientID,
}
cred, err = newManagedIdentityTokenCredential(clientOpts, managedIdentityConfig)
if err != nil {
return nil, err
}
}
if cfg.OAuth != nil {
oAuthConfig := &OAuthConfig{
ClientID: cfg.OAuth.ClientID,
ClientSecret: cfg.OAuth.ClientSecret,
TenantID: cfg.OAuth.TenantID,
}
cred, err = newOAuthTokenCredential(clientOpts, oAuthConfig)
if err != nil {
return nil, err
}
}
return cred, nil
}
// newManagedIdentityTokenCredential returns new Managed Identity token credential.
func newManagedIdentityTokenCredential(managedIdentityClientID string) (azcore.TokenCredential, error) {
clientID := azidentity.ClientID(managedIdentityClientID)
opts := &azidentity.ManagedIdentityCredentialOptions{ID: clientID}
func newManagedIdentityTokenCredential(clientOpts *azcore.ClientOptions, managedIdentityConfig *ManagedIdentityConfig) (azcore.TokenCredential, error) {
clientID := azidentity.ClientID(managedIdentityConfig.ClientID)
opts := &azidentity.ManagedIdentityCredentialOptions{ClientOptions: *clientOpts, ID: clientID}
return azidentity.NewManagedIdentityCredential(opts)
}
// newOAuthTokenCredential returns new OAuth token credential
func newOAuthTokenCredential(clientOpts *azcore.ClientOptions, oAuthConfig *OAuthConfig) (azcore.TokenCredential, error) {
opts := &azidentity.ClientSecretCredentialOptions{ClientOptions: *clientOpts}
return azidentity.NewClientSecretCredential(oAuthConfig.TenantID, oAuthConfig.ClientID, oAuthConfig.ClientSecret, opts)
}
// newTokenProvider helps to fetch accessToken for different types of credential. This also takes care of
// refreshing the accessToken before expiry. This accessToken is attached to the Authorization header while making requests.
func newTokenProvider(cfg *AzureADConfig, cred azcore.TokenCredential) (*tokenProvider, error) {
@ -245,3 +325,17 @@ func getAudience(cloud string) (string, error) {
return "", errors.New("Cloud is not specified or is incorrect: " + cloud)
}
}
// getCloudConfiguration returns the cloud Configuration which contains AAD endpoint for different clouds
func getCloudConfiguration(c string) (cloud.Configuration, error) {
switch strings.ToLower(c) {
case strings.ToLower(AzureChina):
return cloud.AzureChina, nil
case strings.ToLower(AzureGovernment):
return cloud.AzureGovernment, nil
case strings.ToLower(AzurePublic):
return cloud.AzurePublic, nil
default:
return cloud.Configuration{}, errors.New("Cloud is not specified or is incorrect: " + c)
}
}

View file

@ -26,17 +26,20 @@ import (
"github.com/google/uuid"
"github.com/prometheus/client_golang/prometheus/promhttp"
"github.com/stretchr/testify/mock"
"github.com/stretchr/testify/require"
"github.com/stretchr/testify/suite"
"gopkg.in/yaml.v2"
)
const (
dummyAudience = "dummyAudience"
dummyClientID = "00000000-0000-0000-0000-000000000000"
testTokenString = "testTokenString"
dummyAudience = "dummyAudience"
dummyClientID = "00000000-0000-0000-0000-000000000000"
dummyClientSecret = "Cl1ent$ecret!"
dummyTenantID = "00000000-a12b-3cd4-e56f-000000000000"
testTokenString = "testTokenString"
)
var testTokenExpiry = time.Now().Add(10 * time.Second)
var testTokenExpiry = time.Now().Add(5 * time.Second)
type AzureAdTestSuite struct {
suite.Suite
@ -62,47 +65,64 @@ func TestAzureAd(t *testing.T) {
}
func (ad *AzureAdTestSuite) TestAzureAdRoundTripper() {
var gotReq *http.Request
testToken := &azcore.AccessToken{
Token: testTokenString,
ExpiresOn: testTokenExpiry,
cases := []struct {
cfg *AzureADConfig
}{
// AzureAd roundtripper with Managedidentity.
{
cfg: &AzureADConfig{
Cloud: "AzurePublic",
ManagedIdentity: &ManagedIdentityConfig{
ClientID: dummyClientID,
},
},
},
// AzureAd roundtripper with OAuth.
{
cfg: &AzureADConfig{
Cloud: "AzurePublic",
OAuth: &OAuthConfig{
ClientID: dummyClientID,
ClientSecret: dummyClientSecret,
TenantID: dummyTenantID,
},
},
},
}
for _, c := range cases {
var gotReq *http.Request
managedIdentityConfig := &ManagedIdentityConfig{
ClientID: dummyClientID,
testToken := &azcore.AccessToken{
Token: testTokenString,
ExpiresOn: testTokenExpiry,
}
ad.mockCredential.On("GetToken", mock.Anything, mock.Anything).Return(*testToken, nil)
tokenProvider, err := newTokenProvider(c.cfg, ad.mockCredential)
ad.Assert().NoError(err)
rt := &azureADRoundTripper{
next: promhttp.RoundTripperFunc(func(req *http.Request) (*http.Response, error) {
gotReq = req
return &http.Response{StatusCode: http.StatusOK}, nil
}),
tokenProvider: tokenProvider,
}
cli := &http.Client{Transport: rt}
req, err := http.NewRequest(http.MethodPost, "https://example.com", strings.NewReader("Hello, world!"))
ad.Assert().NoError(err)
_, err = cli.Do(req)
ad.Assert().NoError(err)
ad.Assert().NotNil(gotReq)
origReq := gotReq
ad.Assert().NotEmpty(origReq.Header.Get("Authorization"))
ad.Assert().Equal("Bearer "+testTokenString, origReq.Header.Get("Authorization"))
}
azureAdConfig := &AzureADConfig{
Cloud: "AzurePublic",
ManagedIdentity: managedIdentityConfig,
}
ad.mockCredential.On("GetToken", mock.Anything, mock.Anything).Return(*testToken, nil)
tokenProvider, err := newTokenProvider(azureAdConfig, ad.mockCredential)
ad.Assert().NoError(err)
rt := &azureADRoundTripper{
next: promhttp.RoundTripperFunc(func(req *http.Request) (*http.Response, error) {
gotReq = req
return &http.Response{StatusCode: http.StatusOK}, nil
}),
tokenProvider: tokenProvider,
}
cli := &http.Client{Transport: rt}
req, err := http.NewRequest(http.MethodPost, "https://example.com", strings.NewReader("Hello, world!"))
ad.Assert().NoError(err)
_, err = cli.Do(req)
ad.Assert().NoError(err)
ad.Assert().NotNil(gotReq)
origReq := gotReq
ad.Assert().NotEmpty(origReq.Header.Get("Authorization"))
ad.Assert().Equal("Bearer "+testTokenString, origReq.Header.Get("Authorization"))
}
func loadAzureAdConfig(filename string) (*AzureADConfig, error) {
@ -117,42 +137,54 @@ func loadAzureAdConfig(filename string) (*AzureADConfig, error) {
return &cfg, nil
}
func testGoodConfig(t *testing.T, filename string) {
_, err := loadAzureAdConfig(filename)
if err != nil {
t.Fatalf("Unexpected error parsing %s: %s", filename, err)
func TestAzureAdConfig(t *testing.T) {
cases := []struct {
filename string
err string
}{
// Missing managedidentiy or oauth field.
{
filename: "testdata/azuread_bad_configmissing.yaml",
err: "must provide an Azure Managed Identity or Azure OAuth in the Azure AD config",
},
// Invalid managedidentity client id.
{
filename: "testdata/azuread_bad_invalidclientid.yaml",
err: "the provided Azure Managed Identity client_id is invalid",
},
// Missing tenant id in oauth config.
{
filename: "testdata/azuread_bad_invalidoauthconfig.yaml",
err: "must provide an Azure OAuth tenant_id in the Azure AD config",
},
// Invalid config when both managedidentity and oauth is provided.
{
filename: "testdata/azuread_bad_twoconfig.yaml",
err: "cannot provide both Azure Managed Identity and Azure OAuth in the Azure AD config",
},
// Valid config with missing optionally cloud field.
{
filename: "testdata/azuread_good_cloudmissing.yaml",
},
// Valid managed identity config.
{
filename: "testdata/azuread_good_managedidentity.yaml",
},
// Valid Oauth config.
{
filename: "testdata/azuread_good_oauth.yaml",
},
}
}
func TestGoodAzureAdConfig(t *testing.T) {
filename := "testdata/azuread_good.yaml"
testGoodConfig(t, filename)
}
func TestGoodCloudMissingAzureAdConfig(t *testing.T) {
filename := "testdata/azuread_good_cloudmissing.yaml"
testGoodConfig(t, filename)
}
func TestBadClientIdMissingAzureAdConfig(t *testing.T) {
filename := "testdata/azuread_bad_clientidmissing.yaml"
_, err := loadAzureAdConfig(filename)
if err == nil {
t.Fatalf("Did not receive expected error unmarshaling bad azuread config")
}
if !strings.Contains(err.Error(), "must provide an Azure Managed Identity in the Azure AD config") {
t.Errorf("Received unexpected error from unmarshal of %s: %s", filename, err.Error())
}
}
func TestBadInvalidClientIdAzureAdConfig(t *testing.T) {
filename := "testdata/azuread_bad_invalidclientid.yaml"
_, err := loadAzureAdConfig(filename)
if err == nil {
t.Fatalf("Did not receive expected error unmarshaling bad azuread config")
}
if !strings.Contains(err.Error(), "the provided Azure Managed Identity client_id provided is invalid") {
t.Errorf("Received unexpected error from unmarshal of %s: %s", filename, err.Error())
for _, c := range cases {
_, err := loadAzureAdConfig(c.filename)
if c.err != "" {
if err == nil {
t.Fatalf("Did not receive expected error unmarshaling bad azuread config")
}
require.EqualError(t, err, c.err)
} else {
require.NoError(t, err)
}
}
}
@ -173,75 +205,90 @@ func TestTokenProvider(t *testing.T) {
suite.Run(t, new(TokenProviderTestSuite))
}
func (s *TokenProviderTestSuite) TestNewTokenProvider_NilAudience_Fail() {
managedIdentityConfig := &ManagedIdentityConfig{
ClientID: dummyClientID,
func (s *TokenProviderTestSuite) TestNewTokenProvider() {
cases := []struct {
cfg *AzureADConfig
err string
}{
// Invalid tokenProvider for managedidentity.
{
cfg: &AzureADConfig{
Cloud: "PublicAzure",
ManagedIdentity: &ManagedIdentityConfig{
ClientID: dummyClientID,
},
},
err: "Cloud is not specified or is incorrect: ",
},
// Invalid tokenProvider for oauth.
{
cfg: &AzureADConfig{
Cloud: "PublicAzure",
OAuth: &OAuthConfig{
ClientID: dummyClientID,
ClientSecret: dummyClientSecret,
TenantID: dummyTenantID,
},
},
err: "Cloud is not specified or is incorrect: ",
},
// Valid tokenProvider for managedidentity.
{
cfg: &AzureADConfig{
Cloud: "AzurePublic",
ManagedIdentity: &ManagedIdentityConfig{
ClientID: dummyClientID,
},
},
},
// Valid tokenProvider for oauth.
{
cfg: &AzureADConfig{
Cloud: "AzurePublic",
OAuth: &OAuthConfig{
ClientID: dummyClientID,
ClientSecret: dummyClientSecret,
TenantID: dummyTenantID,
},
},
},
}
mockGetTokenCallCounter := 1
for _, c := range cases {
if c.err != "" {
actualTokenProvider, actualErr := newTokenProvider(c.cfg, s.mockCredential)
azureAdConfig := &AzureADConfig{
Cloud: "PublicAzure",
ManagedIdentity: managedIdentityConfig,
s.Assert().Nil(actualTokenProvider)
s.Assert().NotNil(actualErr)
s.Assert().ErrorContains(actualErr, c.err)
} else {
testToken := &azcore.AccessToken{
Token: testTokenString,
ExpiresOn: testTokenExpiry,
}
s.mockCredential.On("GetToken", mock.Anything, mock.Anything).Return(*testToken, nil).Once().
On("GetToken", mock.Anything, mock.Anything).Return(getToken(), nil)
actualTokenProvider, actualErr := newTokenProvider(c.cfg, s.mockCredential)
s.Assert().NotNil(actualTokenProvider)
s.Assert().Nil(actualErr)
s.Assert().NotNil(actualTokenProvider.getAccessToken(context.Background()))
// Token set to refresh at half of the expiry time. The test tokens are set to expiry in 5s.
// Hence, the 4 seconds wait to check if the token is refreshed.
time.Sleep(4 * time.Second)
s.Assert().NotNil(actualTokenProvider.getAccessToken(context.Background()))
s.mockCredential.AssertNumberOfCalls(s.T(), "GetToken", 2*mockGetTokenCallCounter)
mockGetTokenCallCounter += 1
accessToken, err := actualTokenProvider.getAccessToken(context.Background())
s.Assert().Nil(err)
s.Assert().NotEqual(accessToken, testTokenString)
}
}
actualTokenProvider, actualErr := newTokenProvider(azureAdConfig, s.mockCredential)
s.Assert().Nil(actualTokenProvider)
s.Assert().NotNil(actualErr)
s.Assert().Equal("Cloud is not specified or is incorrect: "+azureAdConfig.Cloud, actualErr.Error())
}
func (s *TokenProviderTestSuite) TestNewTokenProvider_Success() {
managedIdentityConfig := &ManagedIdentityConfig{
ClientID: dummyClientID,
}
azureAdConfig := &AzureADConfig{
Cloud: "AzurePublic",
ManagedIdentity: managedIdentityConfig,
}
s.mockCredential.On("GetToken", mock.Anything, mock.Anything).Return(getToken(), nil)
actualTokenProvider, actualErr := newTokenProvider(azureAdConfig, s.mockCredential)
s.Assert().NotNil(actualTokenProvider)
s.Assert().Nil(actualErr)
s.Assert().NotNil(actualTokenProvider.getAccessToken(context.Background()))
}
func (s *TokenProviderTestSuite) TestPeriodicTokenRefresh_Success() {
// setup
managedIdentityConfig := &ManagedIdentityConfig{
ClientID: dummyClientID,
}
azureAdConfig := &AzureADConfig{
Cloud: "AzurePublic",
ManagedIdentity: managedIdentityConfig,
}
testToken := &azcore.AccessToken{
Token: testTokenString,
ExpiresOn: testTokenExpiry,
}
s.mockCredential.On("GetToken", mock.Anything, mock.Anything).Return(*testToken, nil).Once().
On("GetToken", mock.Anything, mock.Anything).Return(getToken(), nil)
actualTokenProvider, actualErr := newTokenProvider(azureAdConfig, s.mockCredential)
s.Assert().NotNil(actualTokenProvider)
s.Assert().Nil(actualErr)
s.Assert().NotNil(actualTokenProvider.getAccessToken(context.Background()))
// Token set to refresh at half of the expiry time. The test tokens are set to expiry in 10s.
// Hence, the 6 seconds wait to check if the token is refreshed.
time.Sleep(6 * time.Second)
s.Assert().NotNil(actualTokenProvider.getAccessToken(context.Background()))
s.mockCredential.AssertNumberOfCalls(s.T(), "GetToken", 2)
accessToken, err := actualTokenProvider.getAccessToken(context.Background())
s.Assert().Nil(err)
s.Assert().NotEqual(accessToken, testTokenString)
}
func getToken() azcore.AccessToken {

View file

@ -0,0 +1,4 @@
cloud: AzurePublic
oauth:
client_id: 00000000-0000-0000-0000-000000000000
client_secret: Cl1ent$ecret!

View file

@ -0,0 +1,7 @@
cloud: AzurePublic
managed_identity:
client_id: 00000000-0000-0000-0000-000000000000
oauth:
client_id: 00000000-0000-0000-0000-000000000000
client_secret: Cl1ent$ecret!
tenant_id: 00000000-a12b-3cd4-e56f-000000000000

View file

@ -0,0 +1,5 @@
cloud: AzurePublic
oauth:
client_id: 00000000-0000-0000-0000-000000000000
client_secret: Cl1ent$ecret!
tenant_id: 00000000-a12b-3cd4-e56f-000000000000

View file

@ -187,8 +187,8 @@ func FromQueryResult(sortSeries bool, res *prompb.QueryResult) storage.SeriesSet
}
if sortSeries {
slices.SortFunc(series, func(a, b storage.Series) bool {
return labels.Compare(a.Labels(), b.Labels()) < 0
slices.SortFunc(series, func(a, b storage.Series) int {
return labels.Compare(a.Labels(), b.Labels())
})
}
return &concreteSeriesSet{

View file

@ -16,6 +16,7 @@ package remote
import (
"context"
"net/http"
"strings"
"sync"
"github.com/go-kit/log"
@ -93,8 +94,8 @@ func (h *readHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
Value: value,
})
}
slices.SortFunc(sortedExternalLabels, func(a, b prompb.Label) bool {
return a.Name < b.Name
slices.SortFunc(sortedExternalLabels, func(a, b prompb.Label) int {
return strings.Compare(a.Name, b.Name)
})
responseType, err := NegotiateResponseType(req.AcceptedResponseTypes)

View file

@ -232,8 +232,8 @@ func (c *LeveledCompactor) Plan(dir string) ([]string, error) {
}
func (c *LeveledCompactor) plan(dms []dirMeta) ([]string, error) {
slices.SortFunc(dms, func(a, b dirMeta) bool {
return a.meta.MinTime < b.meta.MinTime
slices.SortFunc(dms, func(a, b dirMeta) int {
return int(a.meta.MinTime - b.meta.MinTime)
})
res := c.selectOverlappingDirs(dms)
@ -415,8 +415,8 @@ func CompactBlockMetas(uid ulid.ULID, blocks ...*BlockMeta) *BlockMeta {
for s := range sources {
res.Compaction.Sources = append(res.Compaction.Sources, s)
}
slices.SortFunc(res.Compaction.Sources, func(a, b ulid.ULID) bool {
return a.Compare(b) < 0
slices.SortFunc(res.Compaction.Sources, func(a, b ulid.ULID) int {
return a.Compare(b)
})
res.MinTime = mint

View file

@ -630,8 +630,8 @@ func (db *DBReadOnly) Blocks() ([]BlockReader, error) {
return nil, nil
}
slices.SortFunc(loadable, func(a, b *Block) bool {
return a.Meta().MinTime < b.Meta().MinTime
slices.SortFunc(loadable, func(a, b *Block) int {
return int(a.Meta().MinTime - b.Meta().MinTime)
})
blockMetas := make([]BlockMeta, 0, len(loadable))
@ -1506,8 +1506,8 @@ func (db *DB) reloadBlocks() (err error) {
}
db.metrics.blocksBytes.Set(float64(blocksSize))
slices.SortFunc(toLoad, func(a, b *Block) bool {
return a.Meta().MinTime < b.Meta().MinTime
slices.SortFunc(toLoad, func(a, b *Block) int {
return int(a.Meta().MinTime - b.Meta().MinTime)
})
// Swap new blocks first for subsequently created readers to be seen.
@ -1581,8 +1581,8 @@ func deletableBlocks(db *DB, blocks []*Block) map[ulid.ULID]struct{} {
// Sort the blocks by time - newest to oldest (largest to smallest timestamp).
// This ensures that the retentions will remove the oldest blocks.
slices.SortFunc(blocks, func(a, b *Block) bool {
return a.Meta().MaxTime > b.Meta().MaxTime
slices.SortFunc(blocks, func(a, b *Block) int {
return int(b.Meta().MaxTime - a.Meta().MaxTime)
})
for _, block := range blocks {

View file

@ -185,8 +185,8 @@ func (ce *CircularExemplarStorage) Select(start, end int64, matchers ...[]*label
}
}
slices.SortFunc(ret, func(a, b exemplar.QueryResult) bool {
return labels.Compare(a.SeriesLabels, b.SeriesLabels) < 0
slices.SortFunc(ret, func(a, b exemplar.QueryResult) int {
return labels.Compare(a.SeriesLabels, b.SeriesLabels)
})
return ret, nil

View file

@ -286,7 +286,6 @@ func NewHead(r prometheus.Registerer, l log.Logger, wal, wbl *wlog.WL, opts *Hea
if err := h.resetInMemoryState(); err != nil {
return nil, err
}
h.metrics = newHeadMetrics(h, r)
if opts.ChunkPool == nil {
opts.ChunkPool = chunkenc.NewPool()
@ -306,6 +305,7 @@ func NewHead(r prometheus.Registerer, l log.Logger, wal, wbl *wlog.WL, opts *Hea
if err != nil {
return nil, err
}
h.metrics = newHeadMetrics(h, r)
return h, nil
}

View file

@ -140,8 +140,8 @@ func (h *headIndexReader) SortedPostings(p index.Postings) index.Postings {
return index.ErrPostings(errors.Wrap(err, "expand postings"))
}
slices.SortFunc(series, func(a, b *memSeries) bool {
return labels.Compare(a.lset, b.lset) < 0
slices.SortFunc(series, func(a, b *memSeries) int {
return labels.Compare(a.lset, b.lset)
})
// Convert back to list.

View file

@ -4905,6 +4905,16 @@ func TestHistogramValidation(t *testing.T) {
"valid histogram": {
h: tsdbutil.GenerateTestHistograms(1)[0],
},
"valid histogram that has its Count (4) higher than the actual total of buckets (2 + 1)": {
// This case is possible if NaN values (which do not fall into any bucket) are observed.
h: &histogram.Histogram{
ZeroCount: 2,
Count: 4,
Sum: math.NaN(),
PositiveSpans: []histogram.Span{{Offset: 0, Length: 1}},
PositiveBuckets: []int64{1},
},
},
"rejects histogram that has too few negative buckets": {
h: &histogram.Histogram{
NegativeSpans: []histogram.Span{{Offset: 0, Length: 1}},

View file

@ -19,6 +19,7 @@ import (
"encoding/binary"
"runtime"
"sort"
"strings"
"sync"
"github.com/pkg/errors"
@ -108,11 +109,14 @@ func (p *MemPostings) SortedKeys() []labels.Label {
}
p.mtx.RUnlock()
slices.SortFunc(keys, func(a, b labels.Label) bool {
if a.Name != b.Name {
return a.Name < b.Name
slices.SortFunc(keys, func(a, b labels.Label) int {
nameCompare := strings.Compare(a.Name, b.Name)
// If names are the same, compare values.
if nameCompare != 0 {
return nameCompare
}
return a.Value < b.Value
return strings.Compare(a.Value, b.Value)
})
return keys
}
@ -743,7 +747,7 @@ func (rp *removedPostings) Err() error {
// ListPostings implements the Postings interface over a plain list.
type ListPostings struct {
list []storage.SeriesRef
cur storage.SeriesRef
pos int
}
func NewListPostings(list []storage.SeriesRef) Postings {
@ -755,39 +759,34 @@ func newListPostings(list ...storage.SeriesRef) *ListPostings {
}
func (it *ListPostings) At() storage.SeriesRef {
return it.cur
return it.list[it.pos-1]
}
func (it *ListPostings) Next() bool {
if len(it.list) > 0 {
it.cur = it.list[0]
it.list = it.list[1:]
if it.pos < len(it.list) {
it.pos++
return true
}
it.cur = 0
return false
}
func (it *ListPostings) Seek(x storage.SeriesRef) bool {
// If the current value satisfies, then return.
if it.cur >= x {
return true
if it.pos == 0 {
it.pos++
}
if len(it.list) == 0 {
if it.pos > len(it.list) {
return false
}
// If the current value satisfies, then return.
if it.list[it.pos-1] >= x {
return true
}
// Do binary search between current position and end.
i := sort.Search(len(it.list), func(i int) bool {
return it.list[i] >= x
})
if i < len(it.list) {
it.cur = it.list[i]
it.list = it.list[i+1:]
return true
}
it.list = nil
return false
it.pos = sort.Search(len(it.list[it.pos-1:]), func(i int) bool {
return it.list[i+it.pos-1] >= x
}) + it.pos
return it.pos-1 < len(it.list)
}
func (it *ListPostings) Err() error {

View file

@ -1241,3 +1241,147 @@ func TestPostingsWithIndexHeap(t *testing.T) {
require.Equal(t, storage.SeriesRef(25), node.p.At())
})
}
func TestListPostings(t *testing.T) {
t.Run("empty list", func(t *testing.T) {
p := NewListPostings(nil)
require.False(t, p.Next())
require.False(t, p.Seek(10))
require.False(t, p.Next())
require.NoError(t, p.Err())
})
t.Run("one posting", func(t *testing.T) {
t.Run("next", func(t *testing.T) {
p := NewListPostings([]storage.SeriesRef{10})
require.True(t, p.Next())
require.Equal(t, storage.SeriesRef(10), p.At())
require.False(t, p.Next())
require.NoError(t, p.Err())
})
t.Run("seek less", func(t *testing.T) {
p := NewListPostings([]storage.SeriesRef{10})
require.True(t, p.Seek(5))
require.Equal(t, storage.SeriesRef(10), p.At())
require.True(t, p.Seek(5))
require.Equal(t, storage.SeriesRef(10), p.At())
require.False(t, p.Next())
require.NoError(t, p.Err())
})
t.Run("seek equal", func(t *testing.T) {
p := NewListPostings([]storage.SeriesRef{10})
require.True(t, p.Seek(10))
require.Equal(t, storage.SeriesRef(10), p.At())
require.False(t, p.Next())
require.NoError(t, p.Err())
})
t.Run("seek more", func(t *testing.T) {
p := NewListPostings([]storage.SeriesRef{10})
require.False(t, p.Seek(15))
require.False(t, p.Next())
require.NoError(t, p.Err())
})
t.Run("seek after next", func(t *testing.T) {
p := NewListPostings([]storage.SeriesRef{10})
require.True(t, p.Next())
require.False(t, p.Seek(15))
require.False(t, p.Next())
require.NoError(t, p.Err())
})
})
t.Run("multiple postings", func(t *testing.T) {
t.Run("next", func(t *testing.T) {
p := NewListPostings([]storage.SeriesRef{10, 20})
require.True(t, p.Next())
require.Equal(t, storage.SeriesRef(10), p.At())
require.True(t, p.Next())
require.Equal(t, storage.SeriesRef(20), p.At())
require.False(t, p.Next())
require.NoError(t, p.Err())
})
t.Run("seek", func(t *testing.T) {
p := NewListPostings([]storage.SeriesRef{10, 20})
require.True(t, p.Seek(5))
require.Equal(t, storage.SeriesRef(10), p.At())
require.True(t, p.Seek(5))
require.Equal(t, storage.SeriesRef(10), p.At())
require.True(t, p.Seek(10))
require.Equal(t, storage.SeriesRef(10), p.At())
require.True(t, p.Next())
require.Equal(t, storage.SeriesRef(20), p.At())
require.True(t, p.Seek(10))
require.Equal(t, storage.SeriesRef(20), p.At())
require.True(t, p.Seek(20))
require.Equal(t, storage.SeriesRef(20), p.At())
require.False(t, p.Next())
require.NoError(t, p.Err())
})
t.Run("seek lest than last", func(t *testing.T) {
p := NewListPostings([]storage.SeriesRef{10, 20, 30, 40, 50})
require.True(t, p.Seek(45))
require.Equal(t, storage.SeriesRef(50), p.At())
require.False(t, p.Next())
})
t.Run("seek exactly last", func(t *testing.T) {
p := NewListPostings([]storage.SeriesRef{10, 20, 30, 40, 50})
require.True(t, p.Seek(50))
require.Equal(t, storage.SeriesRef(50), p.At())
require.False(t, p.Next())
})
t.Run("seek more than last", func(t *testing.T) {
p := NewListPostings([]storage.SeriesRef{10, 20, 30, 40, 50})
require.False(t, p.Seek(60))
require.False(t, p.Next())
})
})
t.Run("seek", func(t *testing.T) {
for _, c := range []int{2, 8, 9, 10} {
t.Run(fmt.Sprintf("count=%d", c), func(t *testing.T) {
list := make([]storage.SeriesRef, c)
for i := 0; i < c; i++ {
list[i] = storage.SeriesRef(i * 10)
}
t.Run("all one by one", func(t *testing.T) {
p := NewListPostings(list)
for i := 0; i < c; i++ {
require.True(t, p.Seek(storage.SeriesRef(i*10)))
require.Equal(t, storage.SeriesRef(i*10), p.At())
}
require.False(t, p.Seek(storage.SeriesRef(c*10)))
})
t.Run("each one", func(t *testing.T) {
for _, ref := range list {
p := NewListPostings(list)
require.True(t, p.Seek(ref))
require.Equal(t, ref, p.At())
}
})
})
}
})
}
func BenchmarkListPostings(b *testing.B) {
const maxCount = 1e6
input := make([]storage.SeriesRef, maxCount)
for i := 0; i < maxCount; i++ {
input[i] = storage.SeriesRef(i << 2)
}
for _, count := range []int{100, 1e3, 10e3, 100e3, maxCount} {
b.Run(fmt.Sprintf("count=%d", count), func(b *testing.B) {
for i := 0; i < b.N; i++ {
p := NewListPostings(input[:count])
var sum storage.SeriesRef
for p.Next() {
sum += p.At()
}
require.NotZero(b, sum)
}
})
}
}

View file

@ -63,8 +63,6 @@ func (m *maxHeap) push(item Stat) {
}
func (m *maxHeap) get() []Stat {
slices.SortFunc(m.Items, func(a, b Stat) bool {
return a.Count > b.Count
})
slices.SortFunc(m.Items, func(a, b Stat) int { return int(b.Count - a.Count) })
return m.Items
}

View file

@ -183,18 +183,18 @@ type chunkMetaAndChunkDiskMapperRef struct {
origMaxT int64
}
func refLessByMinTimeAndMinRef(a, b chunkMetaAndChunkDiskMapperRef) bool {
func refLessByMinTimeAndMinRef(a, b chunkMetaAndChunkDiskMapperRef) int {
if a.meta.MinTime == b.meta.MinTime {
return a.meta.Ref < b.meta.Ref
return int(a.meta.Ref - b.meta.Ref)
}
return a.meta.MinTime < b.meta.MinTime
return int(a.meta.MinTime - b.meta.MinTime)
}
func lessByMinTimeAndMinRef(a, b chunks.Meta) bool {
func lessByMinTimeAndMinRef(a, b chunks.Meta) int {
if a.MinTime == b.MinTime {
return a.Ref < b.Ref
return int(a.Ref - b.Ref)
}
return a.MinTime < b.MinTime
return int(a.MinTime - b.MinTime)
}
func (oh *OOOHeadIndexReader) Postings(ctx context.Context, name string, values ...string) (index.Postings, error) {

View file

@ -8,7 +8,6 @@ import (
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/prometheus/prometheus/model/labels"
@ -307,13 +306,13 @@ func TestPostingsForMatchersCache(t *testing.T) {
actual, err := index.ExpandPostings(p)
require.NoError(t, err)
assert.Equal(t, refsLists[matchersKey(matchers)], actual)
require.Equal(t, refsLists[matchersKey(matchers)], actual)
}
}
// At this point we expect that the postings have been computed only once for the 3 matchers.
for i := 0; i < 3; i++ {
assert.Equalf(t, 1, callsPerMatchers[matchersKey(matchersLists[i])], "matcher %d", i)
require.Equalf(t, 1, callsPerMatchers[matchersKey(matchersLists[i])], "matcher %d", i)
}
// Call PostingsForMatchers() for a 4th matcher. We expect this will evict the oldest cached entry.
@ -325,17 +324,17 @@ func TestPostingsForMatchersCache(t *testing.T) {
actual, err := index.ExpandPostings(p)
require.NoError(t, err)
assert.Equal(t, refsLists[matchersKey(matchers)], actual)
require.Equal(t, refsLists[matchersKey(matchers)], actual)
}
// To ensure the 1st (oldest) entry was removed, we call PostingsForMatchers() again on that matchers.
_, err := c.PostingsForMatchers(ctx, indexForPostingsMock{}, true, matchersLists[0]...)
require.NoError(t, err)
assert.Equal(t, 2, callsPerMatchers[matchersKey(matchersLists[0])])
assert.Equal(t, 1, callsPerMatchers[matchersKey(matchersLists[1])])
assert.Equal(t, 1, callsPerMatchers[matchersKey(matchersLists[2])])
assert.Equal(t, 1, callsPerMatchers[matchersKey(matchersLists[3])])
require.Equal(t, 2, callsPerMatchers[matchersKey(matchersLists[0])])
require.Equal(t, 1, callsPerMatchers[matchersKey(matchersLists[1])])
require.Equal(t, 1, callsPerMatchers[matchersKey(matchersLists[2])])
require.Equal(t, 1, callsPerMatchers[matchersKey(matchersLists[3])])
})
}

View file

@ -221,8 +221,12 @@ func PostingsForMatchers(ctx context.Context, ix IndexPostingsReader, ms ...*lab
// there is no chance that the set we subtract from
// contains postings of series that didn't exist when
// we constructed the set we subtract by.
slices.SortStableFunc(ms, func(i, j *labels.Matcher) bool {
return !isSubtractingMatcher(i) && isSubtractingMatcher(j)
slices.SortStableFunc(ms, func(i, j *labels.Matcher) int {
if !isSubtractingMatcher(i) && isSubtractingMatcher(j) {
return -1
}
return +1
})
for _, m := range ms {
@ -706,7 +710,7 @@ func (p *populateWithDelGenericSeriesIterator) reset(blockID ulid.ULID, cr Chunk
p.chks = chks
p.i = -1
p.err = nil
p.bufIter.Iter = nil
// Note we don't touch p.bufIter.Iter; it is holding on to an iterator we might reuse in next().
p.bufIter.Intervals = p.bufIter.Intervals[:0]
p.intervals = intervals
p.currDelIter = nil

View file

@ -33,10 +33,9 @@ const (
)
func BenchmarkQuerier(b *testing.B) {
chunkDir := b.TempDir()
opts := DefaultHeadOptions()
opts.ChunkRange = 1000
opts.ChunkDirRoot = chunkDir
opts.ChunkDirRoot = b.TempDir()
h, err := NewHead(nil, nil, nil, nil, opts, nil)
require.NoError(b, err)
defer func() {
@ -60,9 +59,13 @@ func BenchmarkQuerier(b *testing.B) {
}
require.NoError(b, app.Commit())
ir, err := h.Index()
require.NoError(b, err)
b.Run("Head", func(b *testing.B) {
ir, err := h.Index()
require.NoError(b, err)
defer func() {
require.NoError(b, ir.Close())
}()
b.Run("PostingsForMatchers", func(b *testing.B) {
benchmarkPostingsForMatchers(b, ir)
})
@ -71,18 +74,20 @@ func BenchmarkQuerier(b *testing.B) {
})
})
tmpdir := b.TempDir()
blockdir := createBlockFromHead(b, tmpdir, h)
block, err := OpenBlock(nil, blockdir, nil)
require.NoError(b, err)
defer func() {
require.NoError(b, block.Close())
}()
ir, err = block.Index()
require.NoError(b, err)
defer ir.Close()
b.Run("Block", func(b *testing.B) {
blockdir := createBlockFromHead(b, b.TempDir(), h)
block, err := OpenBlock(nil, blockdir, nil)
require.NoError(b, err)
defer func() {
require.NoError(b, block.Close())
}()
ir, err := block.Index()
require.NoError(b, err)
defer func() {
require.NoError(b, ir.Close())
}()
b.Run("PostingsForMatchers", func(b *testing.B) {
benchmarkPostingsForMatchers(b, ir)
})
@ -247,10 +252,9 @@ func BenchmarkMergedStringIter(b *testing.B) {
}
func BenchmarkQuerierSelect(b *testing.B) {
chunkDir := b.TempDir()
opts := DefaultHeadOptions()
opts.ChunkRange = 1000
opts.ChunkDirRoot = chunkDir
opts.ChunkDirRoot = b.TempDir()
h, err := NewHead(nil, nil, nil, nil, opts, nil)
require.NoError(b, err)
defer h.Close()

View file

@ -374,8 +374,8 @@ func listCheckpoints(dir string) (refs []checkpointRef, err error) {
refs = append(refs, checkpointRef{name: fi.Name(), index: idx})
}
slices.SortFunc(refs, func(a, b checkpointRef) bool {
return a.index < b.index
slices.SortFunc(refs, func(a, b checkpointRef) int {
return a.index - b.index
})
return refs, nil

View file

@ -909,8 +909,8 @@ func listSegments(dir string) (refs []segmentRef, err error) {
}
refs = append(refs, segmentRef{name: fn, index: k})
}
slices.SortFunc(refs, func(a, b segmentRef) bool {
return a.index < b.index
slices.SortFunc(refs, func(a, b segmentRef) int {
return a.index - b.index
})
for i := 0; i < len(refs)-1; i++ {
if refs[i].index+1 != refs[i+1].index {

View file

@ -85,9 +85,7 @@ func (t *TimerGroup) String() string {
for _, timer := range t.timers {
timers = append(timers, timer)
}
slices.SortFunc(timers, func(a, b *Timer) bool {
return a.created < b.created
})
slices.SortFunc(timers, func(a, b *Timer) int { return a.created - b.created })
result := &bytes.Buffer{}
for _, timer := range timers {
fmt.Fprintf(result, "%s\n", timer)

View file

@ -436,7 +436,7 @@ func (api *API) query(r *http.Request) (result apiFuncResult) {
return invalidParamError(err, "timeout")
}
ctx, cancel = context.WithTimeout(ctx, timeout)
ctx, cancel = context.WithDeadline(ctx, api.now().Add(timeout))
defer cancel()
}

View file

@ -404,7 +404,7 @@ func TestEndpoints(t *testing.T) {
testEndpoints(t, api, testTargetRetriever, storage, true)
})
// Run all the API tests against a API that is wired to forward queries via
// Run all the API tests against an API that is wired to forward queries via
// the remote read client to a test server, which in turn sends them to the
// data from the test storage.
t.Run("remote", func(t *testing.T) {
@ -3660,3 +3660,107 @@ func TestExtractQueryOpts(t *testing.T) {
})
}
}
// Test query timeout parameter.
func TestQueryTimeout(t *testing.T) {
storage := promql.LoadedStorage(t, `
load 1m
test_metric1{foo="bar"} 0+100x100
`)
t.Cleanup(func() {
_ = storage.Close()
})
now := time.Now()
for _, tc := range []struct {
name string
method string
}{
{
name: "GET method",
method: http.MethodGet,
},
{
name: "POST method",
method: http.MethodPost,
},
} {
t.Run(tc.name, func(t *testing.T) {
engine := &fakeEngine{}
api := &API{
Queryable: storage,
QueryEngine: engine,
ExemplarQueryable: storage.ExemplarQueryable(),
alertmanagerRetriever: testAlertmanagerRetriever{}.toFactory(),
flagsMap: sampleFlagMap,
now: func() time.Time { return now },
config: func() config.Config { return samplePrometheusCfg },
ready: func(f http.HandlerFunc) http.HandlerFunc { return f },
}
query := url.Values{
"query": []string{"2"},
"timeout": []string{"1s"},
}
ctx := context.Background()
req, err := http.NewRequest(tc.method, fmt.Sprintf("http://example.com?%s", query.Encode()), nil)
require.NoError(t, err)
req.RemoteAddr = "127.0.0.1:20201"
res := api.query(req.WithContext(ctx))
assertAPIError(t, res.err, errorNone)
require.Len(t, engine.query.execCalls, 1)
deadline, ok := engine.query.execCalls[0].Deadline()
require.True(t, ok)
require.Equal(t, now.Add(time.Second), deadline)
})
}
}
// fakeEngine is a fake QueryEngine implementation.
type fakeEngine struct {
query fakeQuery
}
func (e *fakeEngine) SetQueryLogger(promql.QueryLogger) {}
func (e *fakeEngine) NewInstantQuery(ctx context.Context, q storage.Queryable, opts promql.QueryOpts, qs string, ts time.Time) (promql.Query, error) {
return &e.query, nil
}
func (e *fakeEngine) NewRangeQuery(ctx context.Context, q storage.Queryable, opts promql.QueryOpts, qs string, start, end time.Time, interval time.Duration) (promql.Query, error) {
return &e.query, nil
}
// fakeQuery is a fake Query implementation.
type fakeQuery struct {
query string
execCalls []context.Context
}
func (q *fakeQuery) Exec(ctx context.Context) *promql.Result {
q.execCalls = append(q.execCalls, ctx)
return &promql.Result{
Value: &parser.StringLiteral{
Val: "test",
},
}
}
func (q *fakeQuery) Close() {}
func (q *fakeQuery) Statement() parser.Statement {
return nil
}
func (q *fakeQuery) Stats() *stats.Statistics {
return nil
}
func (q *fakeQuery) Cancel() {}
func (q *fakeQuery) String() string {
return q.query
}

View file

@ -17,6 +17,7 @@ import (
"fmt"
"net/http"
"sort"
"strings"
"github.com/go-kit/log/level"
"github.com/gogo/protobuf/proto"
@ -169,10 +170,10 @@ Loop:
return
}
slices.SortFunc(vec, func(a, b promql.Sample) bool {
slices.SortFunc(vec, func(a, b promql.Sample) int {
ni := a.Metric.Get(labels.MetricName)
nj := b.Metric.Get(labels.MetricName)
return ni < nj
return strings.Compare(ni, nj)
})
externalLabels := h.config.GlobalConfig.ExternalLabels.Map()

708
web/ui/package-lock.json generated

File diff suppressed because it is too large Load diff