Merge pull request #539 from grafana/arve/sync-prometheus

Sync with latest Prometheus upstream
This commit is contained in:
Arve Knudsen 2023-10-10 14:08:00 +02:00 committed by GitHub
commit 56e19def27
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
69 changed files with 1830 additions and 949 deletions

View file

@ -13,6 +13,7 @@ output:
linters:
enable:
- depguard
- errorlint
- gocritic
- gofumpt
- goimports
@ -31,6 +32,24 @@ issues:
- path: _test.go
linters:
- errcheck
- path: discovery/
linters:
- errorlint
- path: scrape/
linters:
- errorlint
- path: storage/
linters:
- errorlint
- path: tsdb/
linters:
- errorlint
- path: util/
linters:
- errorlint
- path: web/
linters:
- errorlint
linters-settings:
depguard:

View file

@ -1,5 +1,9 @@
# Changelog
## 2.47.1 / 2023-10-04
* [BUGFIX] Fix duplicate sample detection at chunk size limit #12874
## 2.47.0 / 2023-09-06
This release adds an experimental OpenTelemetry (OTLP) Ingestion feature,

View file

@ -14,6 +14,7 @@ examples and guides.</p>
[![CII Best Practices](https://bestpractices.coreinfrastructure.org/projects/486/badge)](https://bestpractices.coreinfrastructure.org/projects/486)
[![Gitpod ready-to-code](https://img.shields.io/badge/Gitpod-ready--to--code-blue?logo=gitpod)](https://gitpod.io/#https://github.com/prometheus/prometheus)
[![Fuzzing Status](https://oss-fuzz-build-logs.storage.googleapis.com/badges/prometheus.svg)](https://bugs.chromium.org/p/oss-fuzz/issues/list?sort=-opened&can=1&q=proj:prometheus)
[![OpenSSF Scorecard](https://api.securityscorecards.dev/projects/github.com/prometheus/prometheus/badge)](https://api.securityscorecards.dev/projects/github.com/prometheus/prometheus)
</div>

View file

@ -52,7 +52,8 @@ Release cadence of first pre-releases being cut is 6 weeks.
| v2.45 LTS | 2023-05-31 | Jesus Vazquez (Github: @jesusvazquez) |
| v2.46 | 2023-07-12 | Julien Pivotto (GitHub: @roidelapluie) |
| v2.47 | 2023-08-23 | Bryan Boreham (GitHub: @bboreham) |
| v2.48 | 2023-10-04 | **searching for volunteer** |
| v2.48 | 2023-10-04 | Levi Harrison (GitHub: @LeviHarrison) |
| v2.49 | 2023-11-15 | **searching for volunteer** |
If you are interested in volunteering please create a pull request against the [prometheus/prometheus](https://github.com/prometheus/prometheus) repository and propose yourself for the release series of your choice.

View file

@ -1 +1 @@
2.47.0
2.47.1

View file

@ -498,10 +498,9 @@ func TestDocumentation(t *testing.T) {
cmd.Stdout = &stdout
if err := cmd.Run(); err != nil {
if exitError, ok := err.(*exec.ExitError); ok {
if exitError.ExitCode() != 0 {
fmt.Println("Command failed with non-zero exit code")
}
var exitError *exec.ExitError
if errors.As(err, &exitError) && exitError.ExitCode() != 0 {
fmt.Println("Command failed with non-zero exit code")
}
}

View file

@ -733,30 +733,7 @@ func CheckRules(ls lintConfig, files ...string) int {
failed := false
hasErrors := false
if len(files) == 0 {
fmt.Println("Checking standard input")
data, err := io.ReadAll(os.Stdin)
if err != nil {
fmt.Fprintln(os.Stderr, " FAILED:", err)
return failureExitCode
}
rgs, errs := rulefmt.Parse(data)
for _, e := range errs {
fmt.Fprintln(os.Stderr, e.Error())
return failureExitCode
}
if n, errs := checkRuleGroups(rgs, ls); errs != nil {
fmt.Fprintln(os.Stderr, " FAILED:")
for _, e := range errs {
fmt.Fprintln(os.Stderr, e.Error())
}
failed = true
for _, err := range errs {
hasErrors = hasErrors || !errors.Is(err, lintError)
}
} else {
fmt.Printf(" SUCCESS: %d rules found\n", n)
}
fmt.Println()
failed, hasErrors = checkRulesFromStdin(ls)
} else {
failed, hasErrors = checkRules(files, ls)
}
@ -771,6 +748,44 @@ func CheckRules(ls lintConfig, files ...string) int {
return successExitCode
}
// checkRulesFromStdin validates rule from stdin.
func checkRulesFromStdin(ls lintConfig) (bool, bool) {
failed := false
hasErrors := false
fmt.Println("Checking standard input")
data, err := io.ReadAll(os.Stdin)
if err != nil {
fmt.Fprintln(os.Stderr, " FAILED:", err)
return true, true
}
rgs, errs := rulefmt.Parse(data)
if errs != nil {
failed = true
fmt.Fprintln(os.Stderr, " FAILED:")
for _, e := range errs {
fmt.Fprintln(os.Stderr, e.Error())
hasErrors = hasErrors || !errors.Is(e, lintError)
}
if hasErrors {
return failed, hasErrors
}
}
if n, errs := checkRuleGroups(rgs, ls); errs != nil {
fmt.Fprintln(os.Stderr, " FAILED:")
for _, e := range errs {
fmt.Fprintln(os.Stderr, e.Error())
}
failed = true
for _, err := range errs {
hasErrors = hasErrors || !errors.Is(err, lintError)
}
} else {
fmt.Printf(" SUCCESS: %d rules found\n", n)
}
fmt.Println()
return failed, hasErrors
}
// checkRules validates rule files.
func checkRules(files []string, ls lintConfig) (bool, bool) {
failed := false
@ -780,7 +795,14 @@ func checkRules(files []string, ls lintConfig) (bool, bool) {
rgs, errs := rulefmt.ParseFile(f)
if errs != nil {
failed = true
continue
fmt.Fprintln(os.Stderr, " FAILED:")
for _, e := range errs {
fmt.Fprintln(os.Stderr, e.Error())
hasErrors = hasErrors || !errors.Is(e, lintError)
}
if hasErrors {
continue
}
}
if n, errs := checkRuleGroups(rgs, ls); errs != nil {
fmt.Fprintln(os.Stderr, " FAILED:")

View file

@ -450,10 +450,9 @@ func TestDocumentation(t *testing.T) {
cmd.Stdout = &stdout
if err := cmd.Run(); err != nil {
if exitError, ok := err.(*exec.ExitError); ok {
if exitError.ExitCode() != 0 {
fmt.Println("Command failed with non-zero exit code")
}
var exitError *exec.ExitError
if errors.As(err, &exitError) && exitError.ExitCode() != 0 {
fmt.Println("Command failed with non-zero exit code")
}
}
@ -464,3 +463,88 @@ func TestDocumentation(t *testing.T) {
require.Equal(t, string(expectedContent), generatedContent, "Generated content does not match documentation. Hint: run `make cli-documentation`.")
}
func TestCheckRules(t *testing.T) {
t.Run("rules-good", func(t *testing.T) {
data, err := os.ReadFile("./testdata/rules.yml")
require.NoError(t, err)
r, w, err := os.Pipe()
if err != nil {
t.Fatal(err)
}
_, err = w.Write(data)
if err != nil {
t.Error(err)
}
w.Close()
// Restore stdin right after the test.
defer func(v *os.File) { os.Stdin = v }(os.Stdin)
os.Stdin = r
exitCode := CheckRules(newLintConfig(lintOptionDuplicateRules, false))
require.Equal(t, successExitCode, exitCode, "")
})
t.Run("rules-bad", func(t *testing.T) {
data, err := os.ReadFile("./testdata/rules-bad.yml")
require.NoError(t, err)
r, w, err := os.Pipe()
if err != nil {
t.Fatal(err)
}
_, err = w.Write(data)
if err != nil {
t.Error(err)
}
w.Close()
// Restore stdin right after the test.
defer func(v *os.File) { os.Stdin = v }(os.Stdin)
os.Stdin = r
exitCode := CheckRules(newLintConfig(lintOptionDuplicateRules, false))
require.Equal(t, failureExitCode, exitCode, "")
})
t.Run("rules-lint-fatal", func(t *testing.T) {
data, err := os.ReadFile("./testdata/prometheus-rules.lint.yml")
require.NoError(t, err)
r, w, err := os.Pipe()
if err != nil {
t.Fatal(err)
}
_, err = w.Write(data)
if err != nil {
t.Error(err)
}
w.Close()
// Restore stdin right after the test.
defer func(v *os.File) { os.Stdin = v }(os.Stdin)
os.Stdin = r
exitCode := CheckRules(newLintConfig(lintOptionDuplicateRules, true))
require.Equal(t, lintErrExitCode, exitCode, "")
})
}
func TestCheckRulesWithRuleFiles(t *testing.T) {
t.Run("rules-good", func(t *testing.T) {
exitCode := CheckRules(newLintConfig(lintOptionDuplicateRules, false), "./testdata/rules.yml")
require.Equal(t, successExitCode, exitCode, "")
})
t.Run("rules-bad", func(t *testing.T) {
exitCode := CheckRules(newLintConfig(lintOptionDuplicateRules, false), "./testdata/rules-bad.yml")
require.Equal(t, failureExitCode, exitCode, "")
})
t.Run("rules-lint-fatal", func(t *testing.T) {
exitCode := CheckRules(newLintConfig(lintOptionDuplicateRules, true), "./testdata/prometheus-rules.lint.yml")
require.Equal(t, lintErrExitCode, exitCode, "")
})
}

28
cmd/promtool/testdata/rules-bad.yml vendored Normal file
View file

@ -0,0 +1,28 @@
# This is the rules file.
groups:
- name: alerts
rules:
- alert: InstanceDown
expr: up == 0
for: 5m
labels:
severity: page
annotations:
summary: "Instance {{ $label.foo }} down"
description: "{{ $labels.instance }} of job {{ $labels.job }} has been down for more than 5 minutes."
- alert: AlwaysFiring
expr: 1
- name: rules
rules:
- record: job:test:count_over_time1m
expr: sum without(instance) (count_over_time(test[1m]))
# A recording rule that doesn't depend on input series.
- record: fixed_data
expr: 1
# Subquery with default resolution test.
- record: suquery_interval_test
expr: count_over_time(up[5m:])

View file

@ -459,7 +459,16 @@ func analyzeBlock(ctx context.Context, path, blockID string, limit int, runExten
postingInfos := []postingInfo{}
printInfo := func(postingInfos []postingInfo) {
slices.SortFunc(postingInfos, func(a, b postingInfo) bool { return a.metric > b.metric })
slices.SortFunc(postingInfos, func(a, b postingInfo) int {
switch {
case b.metric < a.metric:
return -1
case b.metric > a.metric:
return 1
default:
return 0
}
})
for i, pc := range postingInfos {
if i >= limit {

View file

@ -241,7 +241,7 @@ func (tg *testGroup) test(evalInterval time.Duration, groupOrderMap map[string]i
g.Eval(suite.Context(), ts)
for _, r := range g.Rules() {
if r.LastError() != nil {
evalErrs = append(evalErrs, fmt.Errorf(" rule: %s, time: %s, err: %v",
evalErrs = append(evalErrs, fmt.Errorf(" rule: %s, time: %s, err: %w",
r.Name(), ts.Sub(time.Unix(0, 0).UTC()), r.LastError()))
}
}

View file

@ -140,11 +140,9 @@ type Manager struct {
// Run starts the background processing
func (m *Manager) Run() error {
go m.sender()
for range m.ctx.Done() {
m.cancelDiscoverers()
return m.ctx.Err()
}
return nil
<-m.ctx.Done()
m.cancelDiscoverers()
return m.ctx.Err()
}
// SyncCh returns a read only channel used by all the clients to receive target updates.

View file

@ -180,11 +180,9 @@ func (m *Manager) Providers() []*Provider {
// Run starts the background processing.
func (m *Manager) Run() error {
go m.sender()
for range m.ctx.Done() {
m.cancelDiscoverers()
return m.ctx.Err()
}
return nil
<-m.ctx.Done()
m.cancelDiscoverers()
return m.ctx.Err()
}
// SyncCh returns a read only channel used by all the clients to receive target updates.

View file

@ -3537,7 +3537,13 @@ azuread:
# Azure User-assigned Managed identity.
[ managed_identity:
[ client_id: <string> ]
[ client_id: <string> ] ]
# Azure OAuth.
[ oauth:
[ client_id: <string> ]
[ client_secret: <string> ]
[ tenant_id: <string> ] ]
# Configures the remote write request's TLS settings.
tls_config:

View file

@ -147,3 +147,7 @@ by the rule are discarded, and if it's an alerting rule, _all_ alerts for
the rule, active, pending, or inactive, are cleared as well. The event will be
recorded as an error in the evaluation, and as such no stale markers are
written.
# Failed rule evaluations due to slow evaluation
If a rule group hasn't finished evaluating before its next evaluation is supposed to start (as defined by the `evaluation_interval`), the next evaluation will be skipped. Subsequent evaluations of the rule group will continue to be skipped until the initial evaluation either completes or times out. When this happens, there will be a gap in the metric produced by the recording rule. The `rule_group_iterations_missed_total` metric will be incremented for each missed iteration of the rule group.

View file

@ -86,6 +86,7 @@ versions.
| args | []interface{} | map[string]interface{} | This converts a list of objects to a map with keys arg0, arg1 etc. This is intended to allow multiple arguments to be passed to templates. |
| tmpl | string, []interface{} | nothing | Like the built-in `template`, but allows non-literals as the template name. Note that the result is assumed to be safe, and will not be auto-escaped. Only available in consoles. |
| safeHtml | string | string | Marks string as HTML not requiring auto-escaping. |
| externalURL | _none_ | string | The external URL under which Prometheus is externally reachable. |
| pathPrefix | _none_ | string | The external URL [path](https://pkg.go.dev/net/url#URL) for use in console templates. |
## Template type differences

View file

@ -14,7 +14,7 @@ vector, which if not provided it will default to the value of the expression
_Notes about the experimental native histograms:_
* Ingesting native histograms has to be enabled via a [feature
flag](../../feature_flags.md#native-histograms). As long as no native histograms
flag](../feature_flags.md#native-histograms). As long as no native histograms
have been ingested into the TSDB, all functions will behave as usual.
* Functions that do not explicitly mention native histograms in their
documentation (see below) will ignore histogram samples.

View file

@ -8,7 +8,7 @@ require (
github.com/gogo/protobuf v1.3.2
github.com/golang/snappy v0.0.4
github.com/influxdata/influxdb v1.11.2
github.com/prometheus/client_golang v1.16.0
github.com/prometheus/client_golang v1.17.0
github.com/prometheus/common v0.44.0
github.com/prometheus/prometheus v0.45.0
github.com/stretchr/testify v1.8.4
@ -42,9 +42,9 @@ require (
github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 // indirect
github.com/pkg/errors v0.9.1 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/prometheus/client_model v0.4.0 // indirect
github.com/prometheus/client_model v0.4.1-0.20230718164431-9a2bf3000d16 // indirect
github.com/prometheus/common/sigv4 v0.1.0 // indirect
github.com/prometheus/procfs v0.10.1 // indirect
github.com/prometheus/procfs v0.11.1 // indirect
github.com/xhit/go-str2duration/v2 v2.1.0 // indirect
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.42.0 // indirect
go.opentelemetry.io/otel v1.16.0 // indirect
@ -56,11 +56,11 @@ require (
golang.org/x/exp v0.0.0-20230522175609-2e198f4a06a1 // indirect
golang.org/x/net v0.10.0 // indirect
golang.org/x/oauth2 v0.8.0 // indirect
golang.org/x/sys v0.8.0 // indirect
golang.org/x/sys v0.11.0 // indirect
golang.org/x/text v0.9.0 // indirect
golang.org/x/time v0.3.0 // indirect
google.golang.org/appengine v1.6.7 // indirect
google.golang.org/protobuf v1.30.0 // indirect
google.golang.org/protobuf v1.31.0 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
)

View file

@ -194,13 +194,13 @@ github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXP
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
github.com/prometheus/client_golang v1.16.0 h1:yk/hx9hDbrGHovbci4BY+pRMfSuuat626eFsHb7tmT8=
github.com/prometheus/client_golang v1.16.0/go.mod h1:Zsulrv/L9oM40tJ7T815tM89lFEugiJ9HzIqaAx4LKc=
github.com/prometheus/client_golang v1.17.0 h1:rl2sfwZMtSthVU752MqfjQozy7blglC+1SOtjMAMh+Q=
github.com/prometheus/client_golang v1.17.0/go.mod h1:VeL+gMmOAxkS2IqfCq0ZmHSL+LjWfWDUmp1mBz9JgUY=
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.4.0 h1:5lQXD3cAg1OXBf4Wq03gTrXHeaV0TQvGfUooCfx1yqY=
github.com/prometheus/client_model v0.4.0/go.mod h1:oMQmHW1/JoDwqLtg57MGgP/Fb1CJEYF2imWWhWtMkYU=
github.com/prometheus/client_model v0.4.1-0.20230718164431-9a2bf3000d16 h1:v7DLqVdK4VrYkVD5diGdl4sxJurKJEMnODWRJlxV9oM=
github.com/prometheus/client_model v0.4.1-0.20230718164431-9a2bf3000d16/go.mod h1:oMQmHW1/JoDwqLtg57MGgP/Fb1CJEYF2imWWhWtMkYU=
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc=
@ -213,8 +213,8 @@ github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
github.com/prometheus/procfs v0.10.1 h1:kYK1Va/YMlutzCGazswoHKo//tZVlFpKYh+PymziUAg=
github.com/prometheus/procfs v0.10.1/go.mod h1:nwNm2aOCAYw8uTR/9bWRREkZFxAUcWzPHWJq+XBB/FM=
github.com/prometheus/procfs v0.11.1 h1:xRC8Iq1yyca5ypa9n1EZnWZkt7dwcoRPQwX/5gwaUuI=
github.com/prometheus/procfs v0.11.1/go.mod h1:eesXgaPo1q7lBpVMoMy0ZOFTth9hBn4W/y0/p/ScXhY=
github.com/prometheus/prometheus v0.45.0 h1:O/uG+Nw4kNxx/jDPxmjsSDd+9Ohql6E7ZSY1x5x/0KI=
github.com/prometheus/prometheus v0.45.0/go.mod h1:jC5hyO8ItJBnDWGecbEucMyXjzxGv9cxsxsjS9u5s1w=
github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
@ -292,7 +292,7 @@ golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.2.0 h1:PUR+T4wwASmuSTYdKjYHI5TD22Wy5ogLU5qZCOLxBrI=
golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@ -312,8 +312,8 @@ golang.org/x/sys v0.0.0-20210616045830-e2b7044e8c71/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.8.0 h1:EBmGv8NaZBZTWvrbjNoL6HVt+IVy3QDQpJs7VRIw3tU=
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.11.0 h1:eG7RXZHdqOJ1i+0lgLgCpSXAp6M3LYlAo6osgSi0xOM=
golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
@ -352,8 +352,8 @@ google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzi
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
google.golang.org/protobuf v1.30.0 h1:kPPoIgf3TsEvrm0PFe15JQ+570QVxYzEvvHqChK+cng=
google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8=
google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=

4
go.mod
View file

@ -31,7 +31,7 @@ require (
github.com/gophercloud/gophercloud v1.5.0
github.com/grafana/regexp v0.0.0-20221122212121-6b5c0a4cb7fd
github.com/grpc-ecosystem/grpc-gateway v1.16.0
github.com/hashicorp/consul/api v1.22.0
github.com/hashicorp/consul/api v1.25.1
github.com/hashicorp/nomad/api v0.0.0-20230718173136-3a687930bd3e
github.com/hetznercloud/hcloud-go/v2 v2.0.0
github.com/ionos-cloud/sdk-go/v6 v6.1.8
@ -188,7 +188,7 @@ require (
go.opentelemetry.io/otel/metric v1.16.0 // indirect
go.opentelemetry.io/proto/otlp v1.0.0 // indirect
golang.org/x/crypto v0.11.0 // indirect
golang.org/x/exp v0.0.0-20230713183714-613f0c0eb8a1
golang.org/x/exp v0.0.0-20230801115018-d63ba01acd4b
golang.org/x/mod v0.12.0 // indirect
golang.org/x/term v0.10.0 // indirect
golang.org/x/text v0.11.0 // indirect

10
go.sum
View file

@ -411,10 +411,10 @@ github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFb
github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 h1:YBftPWNWd4WwGqtY2yeZL2ef8rHAxPBD8KFhJpmcqms=
github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0/go.mod h1:YN5jB8ie0yfIUg6VvR9Kz84aCaG7AsGZnLjhHbUqwPg=
github.com/hashicorp/consul/api v1.3.0/go.mod h1:MmDNSzIMUjNpY/mQ398R4bk2FnqQLoPndWW5VkKPlCE=
github.com/hashicorp/consul/api v1.22.0 h1:ydEvDooB/A0c/xpsBd8GSt7P2/zYPBui4KrNip0xGjE=
github.com/hashicorp/consul/api v1.22.0/go.mod h1:zHpYgZ7TeYqS6zaszjwSt128OwESRpnhU9aGa6ue3Eg=
github.com/hashicorp/consul/api v1.25.1 h1:CqrdhYzc8XZuPnhIYZWH45toM0LB9ZeYr/gvpLVI3PE=
github.com/hashicorp/consul/api v1.25.1/go.mod h1:iiLVwR/htV7mas/sy0O+XSuEnrdBUUydemjxcUrAt4g=
github.com/hashicorp/consul/sdk v0.3.0/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8=
github.com/hashicorp/consul/sdk v0.14.0 h1:Hly+BMNMssVzoWddbBnBFi3W+Fzytvm0haSkihhj3GU=
github.com/hashicorp/consul/sdk v0.14.1 h1:ZiwE2bKb+zro68sWzZ1SgHF3kRMBZ94TwOCFRF4ylPs=
github.com/hashicorp/cronexpr v1.1.2 h1:wG/ZYIKT+RT3QkOdgYc+xsKWVRgnxJ1OJtjjy84fJ9A=
github.com/hashicorp/cronexpr v1.1.2/go.mod h1:P4wA0KBl9C5q2hABiMO7cp6jcIg96CDh1Efb3g1PWA4=
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
@ -861,8 +861,8 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0
golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
golang.org/x/exp v0.0.0-20230713183714-613f0c0eb8a1 h1:MGwJjxBy0HJshjDNfLsYO8xppfqWlA5ZT9OhtUUhTNw=
golang.org/x/exp v0.0.0-20230713183714-613f0c0eb8a1/go.mod h1:FXUEEKJgO7OQYeo8N01OfiKP8RXMtf6e8aTskBGqWdc=
golang.org/x/exp v0.0.0-20230801115018-d63ba01acd4b h1:r+vk0EmXNmekl0S0BascoeeoHk/L7wmaW2QF90K+kYI=
golang.org/x/exp v0.0.0-20230801115018-d63ba01acd4b/go.mod h1:FXUEEKJgO7OQYeo8N01OfiKP8RXMtf6e8aTskBGqWdc=
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=

View file

@ -19,6 +19,7 @@ import (
"bytes"
"encoding/json"
"strconv"
"strings"
"github.com/cespare/xxhash/v2"
"github.com/prometheus/common/model"
@ -362,7 +363,7 @@ func EmptyLabels() Labels {
func New(ls ...Label) Labels {
set := make(Labels, 0, len(ls))
set = append(set, ls...)
slices.SortFunc(set, func(a, b Label) bool { return a.Name < b.Name })
slices.SortFunc(set, func(a, b Label) int { return strings.Compare(a.Name, b.Name) })
return set
}
@ -386,7 +387,7 @@ func FromStrings(ss ...string) Labels {
res = append(res, Label{Name: ss[i], Value: ss[i+1]})
}
slices.SortFunc(res, func(a, b Label) bool { return a.Name < b.Name })
slices.SortFunc(res, func(a, b Label) int { return strings.Compare(a.Name, b.Name) })
return res
}
@ -591,7 +592,7 @@ func (b *Builder) Labels() Labels {
}
if len(b.add) > 0 { // Base is already in order, so we only need to sort if we add to it.
res = append(res, b.add...)
slices.SortFunc(res, func(a, b Label) bool { return a.Name < b.Name })
slices.SortFunc(res, func(a, b Label) int { return strings.Compare(a.Name, b.Name) })
}
return res
}
@ -618,7 +619,7 @@ func (b *ScratchBuilder) Add(name, value string) {
// Sort the labels added so far by name.
func (b *ScratchBuilder) Sort() {
slices.SortFunc(b.add, func(a, b Label) bool { return a.Name < b.Name })
slices.SortFunc(b.add, func(a, b Label) int { return strings.Compare(a.Name, b.Name) })
}
// Assign is for when you already have a Labels which you want this ScratchBuilder to return.

View file

@ -20,6 +20,7 @@ import (
"encoding/json"
"reflect"
"strconv"
"strings"
"unsafe"
"github.com/cespare/xxhash/v2"
@ -412,7 +413,7 @@ func yoloBytes(s string) (b []byte) {
// New returns a sorted Labels from the given labels.
// The caller has to guarantee that all label names are unique.
func New(ls ...Label) Labels {
slices.SortFunc(ls, func(a, b Label) bool { return a.Name < b.Name })
slices.SortFunc(ls, func(a, b Label) int { return strings.Compare(a.Name, b.Name) })
size := labelsSize(ls)
buf := make([]byte, size)
marshalLabelsToSizedBuffer(ls, buf)
@ -671,7 +672,7 @@ func (b *Builder) Labels() Labels {
return b.base
}
slices.SortFunc(b.add, func(a, b Label) bool { return a.Name < b.Name })
slices.SortFunc(b.add, func(a, b Label) int { return strings.Compare(a.Name, b.Name) })
slices.Sort(b.del)
a, d := 0, 0
@ -830,7 +831,7 @@ func (b *ScratchBuilder) Add(name, value string) {
// Sort the labels added so far by name.
func (b *ScratchBuilder) Sort() {
slices.SortFunc(b.add, func(a, b Label) bool { return a.Name < b.Name })
slices.SortFunc(b.add, func(a, b Label) int { return strings.Compare(a.Name, b.Name) })
}
// Assign is for when you already have a Labels which you want this ScratchBuilder to return.

View file

@ -27,7 +27,6 @@ import (
"github.com/DmitriyVTitov/size"
"github.com/grafana/regexp"
"github.com/grafana/regexp/syntax"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
@ -511,17 +510,17 @@ func TestStringMatcherFromRegexp_LiteralPrefix(t *testing.T) {
require.Equal(t, c.expectedLiteralPrefixMatchers, numPrefixMatchers)
for _, value := range c.expectedMatches {
assert.Truef(t, matcher.Matches(value), "Value: %s", value)
require.Truef(t, matcher.Matches(value), "Value: %s", value)
// Ensure the golang regexp engine would return the same.
assert.Truef(t, re.MatchString(value), "Value: %s", value)
require.Truef(t, re.MatchString(value), "Value: %s", value)
}
for _, value := range c.expectedNotMatches {
assert.Falsef(t, matcher.Matches(value), "Value: %s", value)
require.Falsef(t, matcher.Matches(value), "Value: %s", value)
// Ensure the golang regexp engine would return the same.
assert.Falsef(t, re.MatchString(value), "Value: %s", value)
require.Falsef(t, re.MatchString(value), "Value: %s", value)
}
})
}
@ -586,17 +585,17 @@ func TestStringMatcherFromRegexp_LiteralSuffix(t *testing.T) {
require.Equal(t, c.expectedLiteralSuffixMatchers, numSuffixMatchers)
for _, value := range c.expectedMatches {
assert.Truef(t, matcher.Matches(value), "Value: %s", value)
require.Truef(t, matcher.Matches(value), "Value: %s", value)
// Ensure the golang regexp engine would return the same.
assert.Truef(t, re.MatchString(value), "Value: %s", value)
require.Truef(t, re.MatchString(value), "Value: %s", value)
}
for _, value := range c.expectedNotMatches {
assert.Falsef(t, matcher.Matches(value), "Value: %s", value)
require.Falsef(t, matcher.Matches(value), "Value: %s", value)
// Ensure the golang regexp engine would return the same.
assert.Falsef(t, re.MatchString(value), "Value: %s", value)
require.Falsef(t, re.MatchString(value), "Value: %s", value)
}
})
}
@ -671,17 +670,17 @@ func TestStringMatcherFromRegexp_Quest(t *testing.T) {
require.Equal(t, c.expectedZeroOrOneMatchers, numZeroOrOneMatchers)
for _, value := range c.expectedMatches {
assert.Truef(t, matcher.Matches(value), "Value: %s", value)
require.Truef(t, matcher.Matches(value), "Value: %s", value)
// Ensure the golang regexp engine would return the same.
assert.Truef(t, re.MatchString(value), "Value: %s", value)
require.Truef(t, re.MatchString(value), "Value: %s", value)
}
for _, value := range c.expectedNotMatches {
assert.Falsef(t, matcher.Matches(value), "Value: %s", value)
require.Falsef(t, matcher.Matches(value), "Value: %s", value)
// Ensure the golang regexp engine would return the same.
assert.Falsef(t, re.MatchString(value), "Value: %s", value)
require.Falsef(t, re.MatchString(value), "Value: %s", value)
}
})
}
@ -1116,8 +1115,8 @@ func TestFindEqualStringMatchers(t *testing.T) {
t.Run("empty matcher", func(t *testing.T) {
actualMatches, actualOk := findEqualStringMatchersAndCollectMatches(emptyStringMatcher{})
assert.False(t, actualOk)
assert.Empty(t, actualMatches)
require.False(t, actualOk)
require.Empty(t, actualMatches)
})
t.Run("concat of literal matchers (case sensitive)", func(t *testing.T) {
@ -1128,8 +1127,8 @@ func TestFindEqualStringMatchers(t *testing.T) {
},
)
assert.True(t, actualOk)
assert.Equal(t, []match{{"test-1", true}, {"test-2", true}}, actualMatches)
require.True(t, actualOk)
require.Equal(t, []match{{"test-1", true}, {"test-2", true}}, actualMatches)
})
t.Run("concat of literal matchers (case insensitive)", func(t *testing.T) {
@ -1140,8 +1139,8 @@ func TestFindEqualStringMatchers(t *testing.T) {
},
)
assert.True(t, actualOk)
assert.Equal(t, []match{{"test-1", false}, {"test-2", false}}, actualMatches)
require.True(t, actualOk)
require.Equal(t, []match{{"test-1", false}, {"test-2", false}}, actualMatches)
})
t.Run("concat of literal matchers (mixed case)", func(t *testing.T) {
@ -1152,8 +1151,8 @@ func TestFindEqualStringMatchers(t *testing.T) {
},
)
assert.True(t, actualOk)
assert.Equal(t, []match{{"test-1", false}, {"test-2", true}}, actualMatches)
require.True(t, actualOk)
require.Equal(t, []match{{"test-1", false}, {"test-2", true}}, actualMatches)
})
}
@ -1211,86 +1210,86 @@ func BenchmarkOptimizeEqualStringMatchers(b *testing.B) {
func TestZeroOrOneCharacterStringMatcher(t *testing.T) {
matcher := &zeroOrOneCharacterStringMatcher{matchNL: true}
assert.True(t, matcher.Matches(""))
assert.True(t, matcher.Matches("x"))
assert.True(t, matcher.Matches("\n"))
assert.False(t, matcher.Matches("xx"))
assert.False(t, matcher.Matches("\n\n"))
require.True(t, matcher.Matches(""))
require.True(t, matcher.Matches("x"))
require.True(t, matcher.Matches("\n"))
require.False(t, matcher.Matches("xx"))
require.False(t, matcher.Matches("\n\n"))
matcher = &zeroOrOneCharacterStringMatcher{matchNL: false}
assert.True(t, matcher.Matches(""))
assert.True(t, matcher.Matches("x"))
assert.False(t, matcher.Matches("\n"))
assert.False(t, matcher.Matches("xx"))
assert.False(t, matcher.Matches("\n\n"))
require.True(t, matcher.Matches(""))
require.True(t, matcher.Matches("x"))
require.False(t, matcher.Matches("\n"))
require.False(t, matcher.Matches("xx"))
require.False(t, matcher.Matches("\n\n"))
}
func TestLiteralPrefixStringMatcher(t *testing.T) {
m := &literalPrefixStringMatcher{prefix: "mar", prefixCaseSensitive: true, right: &emptyStringMatcher{}}
assert.True(t, m.Matches("mar"))
assert.False(t, m.Matches("marco"))
assert.False(t, m.Matches("ma"))
assert.False(t, m.Matches("mAr"))
require.True(t, m.Matches("mar"))
require.False(t, m.Matches("marco"))
require.False(t, m.Matches("ma"))
require.False(t, m.Matches("mAr"))
m = &literalPrefixStringMatcher{prefix: "mar", prefixCaseSensitive: false, right: &emptyStringMatcher{}}
assert.True(t, m.Matches("mar"))
assert.False(t, m.Matches("marco"))
assert.False(t, m.Matches("ma"))
assert.True(t, m.Matches("mAr"))
require.True(t, m.Matches("mar"))
require.False(t, m.Matches("marco"))
require.False(t, m.Matches("ma"))
require.True(t, m.Matches("mAr"))
m = &literalPrefixStringMatcher{prefix: "mar", prefixCaseSensitive: true, right: &equalStringMatcher{s: "co", caseSensitive: false}}
assert.True(t, m.Matches("marco"))
assert.True(t, m.Matches("marCO"))
assert.False(t, m.Matches("MARco"))
assert.False(t, m.Matches("mar"))
assert.False(t, m.Matches("marcopracucci"))
require.True(t, m.Matches("marco"))
require.True(t, m.Matches("marCO"))
require.False(t, m.Matches("MARco"))
require.False(t, m.Matches("mar"))
require.False(t, m.Matches("marcopracucci"))
}
func TestLiteralSuffixStringMatcher(t *testing.T) {
m := &literalSuffixStringMatcher{left: &emptyStringMatcher{}, suffix: "co", suffixCaseSensitive: true}
assert.True(t, m.Matches("co"))
assert.False(t, m.Matches("marco"))
assert.False(t, m.Matches("coo"))
assert.False(t, m.Matches("Co"))
require.True(t, m.Matches("co"))
require.False(t, m.Matches("marco"))
require.False(t, m.Matches("coo"))
require.False(t, m.Matches("Co"))
m = &literalSuffixStringMatcher{left: &emptyStringMatcher{}, suffix: "co", suffixCaseSensitive: false}
assert.True(t, m.Matches("co"))
assert.False(t, m.Matches("marco"))
assert.False(t, m.Matches("coo"))
assert.True(t, m.Matches("Co"))
require.True(t, m.Matches("co"))
require.False(t, m.Matches("marco"))
require.False(t, m.Matches("coo"))
require.True(t, m.Matches("Co"))
m = &literalSuffixStringMatcher{left: &equalStringMatcher{s: "mar", caseSensitive: false}, suffix: "co", suffixCaseSensitive: true}
assert.True(t, m.Matches("marco"))
assert.True(t, m.Matches("MARco"))
assert.False(t, m.Matches("marCO"))
assert.False(t, m.Matches("mar"))
assert.False(t, m.Matches("marcopracucci"))
require.True(t, m.Matches("marco"))
require.True(t, m.Matches("MARco"))
require.False(t, m.Matches("marCO"))
require.False(t, m.Matches("mar"))
require.False(t, m.Matches("marcopracucci"))
m = &literalSuffixStringMatcher{left: &equalStringMatcher{s: "mar", caseSensitive: false}, suffix: "co", suffixCaseSensitive: false}
assert.True(t, m.Matches("marco"))
assert.True(t, m.Matches("MARco"))
assert.True(t, m.Matches("marCO"))
assert.False(t, m.Matches("mar"))
assert.False(t, m.Matches("marcopracucci"))
require.True(t, m.Matches("marco"))
require.True(t, m.Matches("MARco"))
require.True(t, m.Matches("marCO"))
require.False(t, m.Matches("mar"))
require.False(t, m.Matches("marcopracucci"))
}
func TestHasPrefixCaseInsensitive(t *testing.T) {
assert.True(t, hasPrefixCaseInsensitive("marco", "mar"))
assert.True(t, hasPrefixCaseInsensitive("mArco", "mar"))
assert.True(t, hasPrefixCaseInsensitive("marco", "MaR"))
assert.True(t, hasPrefixCaseInsensitive("marco", "marco"))
assert.True(t, hasPrefixCaseInsensitive("mArco", "marco"))
require.True(t, hasPrefixCaseInsensitive("marco", "mar"))
require.True(t, hasPrefixCaseInsensitive("mArco", "mar"))
require.True(t, hasPrefixCaseInsensitive("marco", "MaR"))
require.True(t, hasPrefixCaseInsensitive("marco", "marco"))
require.True(t, hasPrefixCaseInsensitive("mArco", "marco"))
assert.False(t, hasPrefixCaseInsensitive("marco", "a"))
assert.False(t, hasPrefixCaseInsensitive("marco", "abcdefghi"))
require.False(t, hasPrefixCaseInsensitive("marco", "a"))
require.False(t, hasPrefixCaseInsensitive("marco", "abcdefghi"))
}
func TestHasSuffixCaseInsensitive(t *testing.T) {
assert.True(t, hasSuffixCaseInsensitive("marco", "rco"))
assert.True(t, hasSuffixCaseInsensitive("marco", "RcO"))
assert.True(t, hasSuffixCaseInsensitive("marco", "marco"))
assert.False(t, hasSuffixCaseInsensitive("marco", "a"))
assert.False(t, hasSuffixCaseInsensitive("marco", "abcdefghi"))
require.True(t, hasSuffixCaseInsensitive("marco", "rco"))
require.True(t, hasSuffixCaseInsensitive("marco", "RcO"))
require.True(t, hasSuffixCaseInsensitive("marco", "marco"))
require.False(t, hasSuffixCaseInsensitive("marco", "a"))
require.False(t, hasSuffixCaseInsensitive("marco", "abcdefghi"))
}
func getTestNameFromRegexp(re string) string {

View file

@ -176,17 +176,11 @@ func (r *RuleNode) Validate() (nodes []WrappedError) {
})
}
if r.Record.Value == "" && r.Alert.Value == "" {
if r.Record.Value == "0" {
nodes = append(nodes, WrappedError{
err: fmt.Errorf("one of 'record' or 'alert' must be set"),
node: &r.Alert,
})
} else {
nodes = append(nodes, WrappedError{
err: fmt.Errorf("one of 'record' or 'alert' must be set"),
node: &r.Record,
})
}
nodes = append(nodes, WrappedError{
err: fmt.Errorf("one of 'record' or 'alert' must be set"),
node: &r.Record,
nodeAlt: &r.Alert,
})
}
if r.Expr.Value == "" {

View file

@ -338,7 +338,7 @@ func (p *OpenMetricsParser) Next() (Entry, error) {
var ts float64
// A float is enough to hold what we need for millisecond resolution.
if ts, err = parseFloat(yoloString(p.l.buf()[1:])); err != nil {
return EntryInvalid, fmt.Errorf("%v while parsing: %q", err, p.l.b[p.start:p.l.i])
return EntryInvalid, fmt.Errorf("%w while parsing: %q", err, p.l.b[p.start:p.l.i])
}
if math.IsNaN(ts) || math.IsInf(ts, 0) {
return EntryInvalid, fmt.Errorf("invalid timestamp %f", ts)
@ -391,7 +391,7 @@ func (p *OpenMetricsParser) parseComment() error {
var ts float64
// A float is enough to hold what we need for millisecond resolution.
if ts, err = parseFloat(yoloString(p.l.buf()[1:])); err != nil {
return fmt.Errorf("%v while parsing: %q", err, p.l.b[p.start:p.l.i])
return fmt.Errorf("%w while parsing: %q", err, p.l.b[p.start:p.l.i])
}
if math.IsNaN(ts) || math.IsInf(ts, 0) {
return fmt.Errorf("invalid exemplar timestamp %f", ts)
@ -461,7 +461,7 @@ func (p *OpenMetricsParser) getFloatValue(t token, after string) (float64, error
}
val, err := parseFloat(yoloString(p.l.buf()[1:]))
if err != nil {
return 0, fmt.Errorf("%v while parsing: %q", err, p.l.b[p.start:p.l.i])
return 0, fmt.Errorf("%w while parsing: %q", err, p.l.b[p.start:p.l.i])
}
// Ensure canonical NaN value.
if math.IsNaN(p.exemplarVal) {

View file

@ -348,7 +348,7 @@ func (p *PromParser) Next() (Entry, error) {
return EntryInvalid, p.parseError("expected value after metric", t2)
}
if p.val, err = parseFloat(yoloString(p.l.buf())); err != nil {
return EntryInvalid, fmt.Errorf("%v while parsing: %q", err, p.l.b[p.start:p.l.i])
return EntryInvalid, fmt.Errorf("%w while parsing: %q", err, p.l.b[p.start:p.l.i])
}
// Ensure canonical NaN value.
if math.IsNaN(p.val) {
@ -361,7 +361,7 @@ func (p *PromParser) Next() (Entry, error) {
case tTimestamp:
p.hasTS = true
if p.ts, err = strconv.ParseInt(yoloString(p.l.buf()), 10, 64); err != nil {
return EntryInvalid, fmt.Errorf("%v while parsing: %q", err, p.l.b[p.start:p.l.i])
return EntryInvalid, fmt.Errorf("%w while parsing: %q", err, p.l.b[p.start:p.l.i])
}
if t2 := p.nextToken(); t2 != tLinebreak {
return EntryInvalid, p.parseError("expected next entry after timestamp", t2)

View file

@ -172,11 +172,12 @@ func (m *Gauge) GetValue() float64 {
}
type Counter struct {
Value float64 `protobuf:"fixed64,1,opt,name=value,proto3" json:"value,omitempty"`
Exemplar *Exemplar `protobuf:"bytes,2,opt,name=exemplar,proto3" json:"exemplar,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
Value float64 `protobuf:"fixed64,1,opt,name=value,proto3" json:"value,omitempty"`
Exemplar *Exemplar `protobuf:"bytes,2,opt,name=exemplar,proto3" json:"exemplar,omitempty"`
CreatedTimestamp *types.Timestamp `protobuf:"bytes,3,opt,name=created_timestamp,json=createdTimestamp,proto3" json:"created_timestamp,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *Counter) Reset() { *m = Counter{} }
@ -226,6 +227,13 @@ func (m *Counter) GetExemplar() *Exemplar {
return nil
}
func (m *Counter) GetCreatedTimestamp() *types.Timestamp {
if m != nil {
return m.CreatedTimestamp
}
return nil
}
type Quantile struct {
Quantile float64 `protobuf:"fixed64,1,opt,name=quantile,proto3" json:"quantile,omitempty"`
Value float64 `protobuf:"fixed64,2,opt,name=value,proto3" json:"value,omitempty"`
@ -282,12 +290,13 @@ func (m *Quantile) GetValue() float64 {
}
type Summary struct {
SampleCount uint64 `protobuf:"varint,1,opt,name=sample_count,json=sampleCount,proto3" json:"sample_count,omitempty"`
SampleSum float64 `protobuf:"fixed64,2,opt,name=sample_sum,json=sampleSum,proto3" json:"sample_sum,omitempty"`
Quantile []Quantile `protobuf:"bytes,3,rep,name=quantile,proto3" json:"quantile"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
SampleCount uint64 `protobuf:"varint,1,opt,name=sample_count,json=sampleCount,proto3" json:"sample_count,omitempty"`
SampleSum float64 `protobuf:"fixed64,2,opt,name=sample_sum,json=sampleSum,proto3" json:"sample_sum,omitempty"`
Quantile []Quantile `protobuf:"bytes,3,rep,name=quantile,proto3" json:"quantile"`
CreatedTimestamp *types.Timestamp `protobuf:"bytes,4,opt,name=created_timestamp,json=createdTimestamp,proto3" json:"created_timestamp,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *Summary) Reset() { *m = Summary{} }
@ -344,6 +353,13 @@ func (m *Summary) GetQuantile() []Quantile {
return nil
}
func (m *Summary) GetCreatedTimestamp() *types.Timestamp {
if m != nil {
return m.CreatedTimestamp
}
return nil
}
type Untyped struct {
Value float64 `protobuf:"fixed64,1,opt,name=value,proto3" json:"value,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
@ -396,7 +412,8 @@ type Histogram struct {
SampleCountFloat float64 `protobuf:"fixed64,4,opt,name=sample_count_float,json=sampleCountFloat,proto3" json:"sample_count_float,omitempty"`
SampleSum float64 `protobuf:"fixed64,2,opt,name=sample_sum,json=sampleSum,proto3" json:"sample_sum,omitempty"`
// Buckets for the conventional histogram.
Bucket []Bucket `protobuf:"bytes,3,rep,name=bucket,proto3" json:"bucket"`
Bucket []Bucket `protobuf:"bytes,3,rep,name=bucket,proto3" json:"bucket"`
CreatedTimestamp *types.Timestamp `protobuf:"bytes,15,opt,name=created_timestamp,json=createdTimestamp,proto3" json:"created_timestamp,omitempty"`
// schema defines the bucket schema. Currently, valid numbers are -4 <= n <= 8.
// They are all for base-2 bucket schemas, where 1 is a bucket boundary in each case, and
// then each power of two is divided into 2^n logarithmic buckets.
@ -489,6 +506,13 @@ func (m *Histogram) GetBucket() []Bucket {
return nil
}
func (m *Histogram) GetCreatedTimestamp() *types.Timestamp {
if m != nil {
return m.CreatedTimestamp
}
return nil
}
func (m *Histogram) GetSchema() int32 {
if m != nil {
return m.Schema
@ -941,65 +965,68 @@ func init() {
}
var fileDescriptor_d1e5ddb18987a258 = []byte{
// 923 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x56, 0xdd, 0x8e, 0xdb, 0x44,
0x18, 0xad, 0x1b, 0xe7, 0xc7, 0x5f, 0x36, 0xdb, 0x74, 0x88, 0x2a, 0x6b, 0x61, 0x37, 0xc1, 0x12,
0xd2, 0x82, 0x50, 0x22, 0xa0, 0x08, 0x54, 0x40, 0x62, 0xb7, 0xdd, 0x6e, 0x51, 0x49, 0x5b, 0x26,
0xc9, 0x45, 0xe1, 0xc2, 0x9a, 0x64, 0x67, 0x1d, 0x0b, 0xdb, 0x63, 0xec, 0x71, 0xc5, 0x72, 0xcf,
0x25, 0xd7, 0xbc, 0x02, 0x4f, 0x82, 0x7a, 0xc9, 0x13, 0x20, 0xb4, 0xef, 0xc0, 0x3d, 0x9a, 0x3f,
0x3b, 0x5b, 0x39, 0x85, 0x15, 0x77, 0x33, 0xc7, 0xe7, 0xfb, 0xe6, 0x9c, 0x99, 0xc9, 0x99, 0x80,
0x17, 0xb2, 0x49, 0x9a, 0xb1, 0x98, 0xf2, 0x35, 0x2d, 0xf2, 0xc9, 0x2a, 0x0a, 0x69, 0xc2, 0x27,
0x31, 0xe5, 0x59, 0xb8, 0xca, 0xc7, 0x69, 0xc6, 0x38, 0x43, 0x83, 0x90, 0x8d, 0x2b, 0xce, 0x58,
0x71, 0xf6, 0x06, 0x01, 0x0b, 0x98, 0x24, 0x4c, 0xc4, 0x48, 0x71, 0xf7, 0x86, 0x01, 0x63, 0x41,
0x44, 0x27, 0x72, 0xb6, 0x2c, 0xce, 0x27, 0x3c, 0x8c, 0x69, 0xce, 0x49, 0x9c, 0x2a, 0x82, 0xf7,
0x31, 0x38, 0x5f, 0x93, 0x25, 0x8d, 0x9e, 0x91, 0x30, 0x43, 0x08, 0xec, 0x84, 0xc4, 0xd4, 0xb5,
0x46, 0xd6, 0xa1, 0x83, 0xe5, 0x18, 0x0d, 0xa0, 0xf9, 0x82, 0x44, 0x05, 0x75, 0x6f, 0x4a, 0x50,
0x4d, 0xbc, 0x7d, 0x68, 0x9e, 0x92, 0x22, 0xd8, 0xf8, 0x2c, 0x6a, 0x2c, 0xf3, 0xf9, 0x3b, 0x68,
0xdf, 0x67, 0x45, 0xc2, 0x69, 0x56, 0x4f, 0x40, 0xf7, 0xa0, 0x43, 0x7f, 0xa4, 0x71, 0x1a, 0x91,
0x4c, 0x36, 0xee, 0x7e, 0x78, 0x30, 0xae, 0xb3, 0x35, 0x3e, 0xd1, 0x2c, 0x5c, 0xf2, 0xbd, 0xcf,
0xa1, 0xf3, 0x4d, 0x41, 0x12, 0x1e, 0x46, 0x14, 0xed, 0x41, 0xe7, 0x07, 0x3d, 0xd6, 0x0b, 0x94,
0xf3, 0xab, 0xca, 0x4b, 0x69, 0xbf, 0x58, 0xd0, 0x9e, 0x15, 0x71, 0x4c, 0xb2, 0x0b, 0xf4, 0x36,
0xec, 0xe4, 0x24, 0x4e, 0x23, 0xea, 0xaf, 0x84, 0x5a, 0xd9, 0xc1, 0xc6, 0x5d, 0x85, 0x49, 0x03,
0x68, 0x1f, 0x40, 0x53, 0xf2, 0x22, 0xd6, 0x9d, 0x1c, 0x85, 0xcc, 0x8a, 0x18, 0x7d, 0xb9, 0xb1,
0x7e, 0x63, 0xd4, 0xd8, 0xee, 0xc3, 0x28, 0x3e, 0xb6, 0x5f, 0xfe, 0x39, 0xbc, 0x51, 0xa9, 0xf4,
0x86, 0xd0, 0x5e, 0x24, 0xfc, 0x22, 0xa5, 0x67, 0x5b, 0xf6, 0xf2, 0x6f, 0x1b, 0x9c, 0x47, 0x61,
0xce, 0x59, 0x90, 0x91, 0xf8, 0xbf, 0x48, 0x7e, 0x1f, 0xd0, 0x26, 0xc5, 0x3f, 0x8f, 0x18, 0xe1,
0xae, 0x2d, 0x7b, 0xf6, 0x37, 0x88, 0x0f, 0x05, 0xfe, 0x6f, 0x06, 0xef, 0x41, 0x6b, 0x59, 0xac,
0xbe, 0xa7, 0x5c, 0xdb, 0x7b, 0xab, 0xde, 0xde, 0xb1, 0xe4, 0x68, 0x73, 0xba, 0x02, 0xdd, 0x81,
0x56, 0xbe, 0x5a, 0xd3, 0x98, 0xb8, 0xcd, 0x91, 0x75, 0x78, 0x1b, 0xeb, 0x19, 0x7a, 0x07, 0x76,
0x7f, 0xa2, 0x19, 0xf3, 0xf9, 0x3a, 0xa3, 0xf9, 0x9a, 0x45, 0x67, 0x6e, 0x4b, 0x2e, 0xdb, 0x13,
0xe8, 0xdc, 0x80, 0x42, 0x99, 0xa4, 0x29, 0xa3, 0x6d, 0x69, 0xd4, 0x11, 0x88, 0xb2, 0x79, 0x08,
0xfd, 0xea, 0xb3, 0x36, 0xd9, 0x91, 0x7d, 0x76, 0x4b, 0x92, 0xb2, 0xf8, 0x18, 0x7a, 0x09, 0x0d,
0x08, 0x0f, 0x5f, 0x50, 0x3f, 0x4f, 0x49, 0xe2, 0x3a, 0xd2, 0xca, 0xe8, 0x75, 0x56, 0x66, 0x29,
0x49, 0xb4, 0x9d, 0x1d, 0x53, 0x2c, 0x30, 0x21, 0xbe, 0x6c, 0x76, 0x46, 0x23, 0x4e, 0x5c, 0x18,
0x35, 0x0e, 0x11, 0x2e, 0x97, 0x78, 0x20, 0xc0, 0x2b, 0x34, 0x65, 0xa0, 0x3b, 0x6a, 0x08, 0x8f,
0x06, 0x55, 0x26, 0x1e, 0x43, 0x2f, 0x65, 0x79, 0x58, 0x49, 0xdb, 0xb9, 0x9e, 0x34, 0x53, 0x6c,
0xa4, 0x95, 0xcd, 0x94, 0xb4, 0x9e, 0x92, 0x66, 0xd0, 0x52, 0x5a, 0x49, 0x53, 0xd2, 0x76, 0x95,
0x34, 0x83, 0x4a, 0x69, 0xde, 0xef, 0x16, 0xb4, 0xd4, 0x82, 0xe8, 0x5d, 0xe8, 0xaf, 0x8a, 0xb8,
0x88, 0x36, 0xed, 0xa8, 0x8b, 0x77, 0xab, 0xc2, 0x95, 0xa1, 0xbb, 0x70, 0xe7, 0x55, 0xea, 0x95,
0x0b, 0x38, 0x78, 0xa5, 0x40, 0x9d, 0xd0, 0x10, 0xba, 0x45, 0x9a, 0xd2, 0xcc, 0x5f, 0xb2, 0x22,
0x39, 0xd3, 0xb7, 0x10, 0x24, 0x74, 0x2c, 0x90, 0x2b, 0x79, 0xd1, 0xb8, 0x76, 0x5e, 0x40, 0xb5,
0x71, 0xe2, 0x52, 0xb2, 0xf3, 0xf3, 0x9c, 0x2a, 0x07, 0xb7, 0xb1, 0x9e, 0x09, 0x3c, 0xa2, 0x49,
0xc0, 0xd7, 0x72, 0xf5, 0x1e, 0xd6, 0x33, 0xef, 0x57, 0x0b, 0x3a, 0xa6, 0x29, 0xfa, 0x0c, 0x9a,
0x91, 0x48, 0x4b, 0xd7, 0x92, 0xc7, 0x34, 0xac, 0xd7, 0x50, 0x06, 0xaa, 0x3e, 0x25, 0x55, 0x53,
0x9f, 0x47, 0xe8, 0x53, 0x70, 0xca, 0x4c, 0xd6, 0xd6, 0xf6, 0xc6, 0x2a, 0xb5, 0xc7, 0x26, 0xb5,
0xc7, 0x73, 0xc3, 0xc0, 0x15, 0xd9, 0xfb, 0xb9, 0x01, 0xad, 0xa9, 0x7c, 0x19, 0xfe, 0x9f, 0xae,
0x0f, 0xa0, 0x19, 0x88, 0x2c, 0xd7, 0x41, 0xfc, 0x66, 0x7d, 0xb1, 0x8c, 0x7b, 0xac, 0x98, 0xe8,
0x13, 0x68, 0xaf, 0x54, 0xbe, 0x6b, 0xc9, 0xfb, 0xf5, 0x45, 0xfa, 0x11, 0xc0, 0x86, 0x2d, 0x0a,
0x73, 0x15, 0xbe, 0xf2, 0x3e, 0x6c, 0x2d, 0xd4, 0x09, 0x8d, 0x0d, 0x5b, 0x14, 0x16, 0x2a, 0x26,
0x65, 0x98, 0x6c, 0x2d, 0xd4, 0x59, 0x8a, 0x0d, 0x1b, 0x7d, 0x01, 0xce, 0xda, 0xa4, 0xa7, 0x0c,
0x91, 0xad, 0xdb, 0x53, 0x86, 0x2c, 0xae, 0x2a, 0x44, 0xde, 0x96, 0x3b, 0xee, 0xc7, 0xb9, 0x4c,
0xaa, 0x06, 0xee, 0x96, 0xd8, 0x34, 0xf7, 0x7e, 0xb3, 0x60, 0x47, 0x9d, 0xc3, 0x43, 0x12, 0x87,
0xd1, 0x45, 0xed, 0x33, 0x8a, 0xc0, 0x5e, 0xd3, 0x28, 0xd5, 0xaf, 0xa8, 0x1c, 0xa3, 0xbb, 0x60,
0x0b, 0x8d, 0x72, 0x0b, 0x77, 0xb7, 0xfd, 0xe6, 0x55, 0xe7, 0xf9, 0x45, 0x4a, 0xb1, 0x64, 0x8b,
0x44, 0x56, 0xff, 0x07, 0x5c, 0xfb, 0x75, 0x89, 0xac, 0xea, 0x4c, 0x22, 0xab, 0x8a, 0xf7, 0x96,
0x00, 0x55, 0x3f, 0xd4, 0x85, 0xf6, 0xfd, 0xa7, 0x8b, 0x27, 0xf3, 0x13, 0xdc, 0xbf, 0x81, 0x1c,
0x68, 0x9e, 0x1e, 0x2d, 0x4e, 0x4f, 0xfa, 0x96, 0xc0, 0x67, 0x8b, 0xe9, 0xf4, 0x08, 0x3f, 0xef,
0xdf, 0x14, 0x93, 0xc5, 0x93, 0xf9, 0xf3, 0x67, 0x27, 0x0f, 0xfa, 0x0d, 0xd4, 0x03, 0xe7, 0xd1,
0x57, 0xb3, 0xf9, 0xd3, 0x53, 0x7c, 0x34, 0xed, 0xdb, 0xe8, 0x0d, 0xb8, 0x25, 0x6b, 0xfc, 0x0a,
0x6c, 0x1e, 0x7b, 0x2f, 0x2f, 0x0f, 0xac, 0x3f, 0x2e, 0x0f, 0xac, 0xbf, 0x2e, 0x0f, 0xac, 0x6f,
0x07, 0x21, 0xf3, 0x2b, 0x71, 0xbe, 0x12, 0xb7, 0x6c, 0xc9, 0x9b, 0xfd, 0xd1, 0x3f, 0x01, 0x00,
0x00, 0xff, 0xff, 0x52, 0x2d, 0xb5, 0x31, 0xef, 0x08, 0x00, 0x00,
// 963 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x56, 0xdd, 0x6e, 0x1b, 0x45,
0x14, 0xee, 0x76, 0xfd, 0x93, 0x3d, 0x8e, 0x93, 0xcd, 0x60, 0x55, 0xab, 0x40, 0x62, 0xb3, 0x12,
0x52, 0x40, 0xc8, 0x16, 0x50, 0x04, 0x2a, 0x45, 0x22, 0x69, 0xd3, 0x14, 0x15, 0xb7, 0x65, 0x6c,
0x5f, 0x94, 0x9b, 0xd5, 0xd8, 0x9e, 0xac, 0x57, 0xec, 0xee, 0x2c, 0xfb, 0x53, 0x11, 0xee, 0x79,
0x06, 0x5e, 0x01, 0xf1, 0x1c, 0x08, 0xf5, 0x92, 0x07, 0x40, 0x08, 0xe5, 0x49, 0xd0, 0xfc, 0xed,
0x3a, 0xd5, 0xba, 0x90, 0xf6, 0x6e, 0xe6, 0xf3, 0x77, 0xce, 0x7c, 0xe7, 0x9b, 0xf1, 0x39, 0x0b,
0x6e, 0xc0, 0x46, 0x49, 0xca, 0x22, 0x9a, 0xaf, 0x68, 0x91, 0x8d, 0x16, 0x61, 0x40, 0xe3, 0x7c,
0x14, 0xd1, 0x3c, 0x0d, 0x16, 0xd9, 0x30, 0x49, 0x59, 0xce, 0x50, 0x2f, 0x60, 0xc3, 0x8a, 0x33,
0x94, 0x9c, 0xfd, 0x9e, 0xcf, 0x7c, 0x26, 0x08, 0x23, 0xbe, 0x92, 0xdc, 0xfd, 0xbe, 0xcf, 0x98,
0x1f, 0xd2, 0x91, 0xd8, 0xcd, 0x8b, 0xf3, 0x51, 0x1e, 0x44, 0x34, 0xcb, 0x49, 0x94, 0x48, 0x82,
0xfb, 0x29, 0x58, 0xdf, 0x90, 0x39, 0x0d, 0x9f, 0x92, 0x20, 0x45, 0x08, 0x1a, 0x31, 0x89, 0xa8,
0x63, 0x0c, 0x8c, 0x23, 0x0b, 0x8b, 0x35, 0xea, 0x41, 0xf3, 0x39, 0x09, 0x0b, 0xea, 0xdc, 0x14,
0xa0, 0xdc, 0xb8, 0x07, 0xd0, 0x3c, 0x23, 0x85, 0xbf, 0xf6, 0x33, 0x8f, 0x31, 0xf4, 0xcf, 0xbf,
0x19, 0xd0, 0xbe, 0xc7, 0x8a, 0x38, 0xa7, 0x69, 0x3d, 0x03, 0xdd, 0x81, 0x2d, 0xfa, 0x23, 0x8d,
0x92, 0x90, 0xa4, 0x22, 0x73, 0xe7, 0xe3, 0xc3, 0x61, 0x5d, 0x5d, 0xc3, 0x53, 0xc5, 0xc2, 0x25,
0x1f, 0x8d, 0x61, 0x6f, 0x91, 0x52, 0x92, 0xd3, 0xa5, 0x57, 0x96, 0xe3, 0x98, 0x22, 0xc9, 0xfe,
0x50, 0x16, 0x3c, 0xd4, 0x05, 0x0f, 0xa7, 0x9a, 0x71, 0xd2, 0x78, 0xf1, 0x77, 0xdf, 0xc0, 0xb6,
0x0a, 0x2d, 0x71, 0xf7, 0x2e, 0x6c, 0x7d, 0x5b, 0x90, 0x38, 0x0f, 0x42, 0x8a, 0xf6, 0x61, 0xeb,
0x07, 0xb5, 0x56, 0x7a, 0xcb, 0xfd, 0x55, 0x27, 0xca, 0x52, 0xff, 0x32, 0xa0, 0x3d, 0x29, 0xa2,
0x88, 0xa4, 0x17, 0xe8, 0x5d, 0xd8, 0xce, 0x48, 0x94, 0x84, 0xd4, 0x5b, 0xf0, 0xe2, 0x45, 0x86,
0x06, 0xee, 0x48, 0x4c, 0xf8, 0x81, 0x0e, 0x00, 0x14, 0x25, 0x2b, 0x22, 0x95, 0xc9, 0x92, 0xc8,
0xa4, 0x88, 0xd0, 0x57, 0x6b, 0xe7, 0x9b, 0x03, 0x73, 0xb3, 0x2d, 0x5a, 0xb1, 0xa8, 0xea, 0xc6,
0x9a, 0xca, 0x5a, 0x73, 0x1a, 0xaf, 0x6d, 0x4e, 0x1f, 0xda, 0xb3, 0x38, 0xbf, 0x48, 0xe8, 0x72,
0xc3, 0x55, 0xff, 0xde, 0x04, 0xeb, 0x61, 0x90, 0xe5, 0xcc, 0x4f, 0x49, 0xf4, 0x7f, 0x1c, 0xf8,
0x10, 0xd0, 0x3a, 0xc5, 0x3b, 0x0f, 0x19, 0xc9, 0x85, 0x42, 0x03, 0xdb, 0x6b, 0xc4, 0x07, 0x1c,
0xff, 0x2f, 0xbf, 0xee, 0x40, 0x6b, 0x5e, 0x2c, 0xbe, 0xa7, 0xb9, 0x72, 0xeb, 0x9d, 0x7a, 0xb7,
0x4e, 0x04, 0x47, 0x79, 0xa5, 0x22, 0xea, 0x9d, 0xda, 0x7d, 0x5d, 0xa7, 0xd0, 0x2d, 0x68, 0x65,
0x8b, 0x15, 0x8d, 0x88, 0xd3, 0x1c, 0x18, 0x47, 0x7b, 0x58, 0xed, 0xd0, 0x7b, 0xb0, 0xf3, 0x13,
0x4d, 0x99, 0x97, 0xaf, 0x52, 0x9a, 0xad, 0x58, 0xb8, 0x74, 0x5a, 0xa2, 0x8a, 0x2e, 0x47, 0xa7,
0x1a, 0xe4, 0x85, 0x0a, 0x9a, 0xf4, 0xad, 0x2d, 0x7c, 0xb3, 0x38, 0x22, 0x5d, 0x3b, 0x02, 0xbb,
0xfa, 0x59, 0x79, 0xb6, 0x25, 0xf2, 0xec, 0x94, 0x24, 0xe9, 0xd8, 0x23, 0xe8, 0xc6, 0xd4, 0x27,
0x79, 0xf0, 0x9c, 0x7a, 0x59, 0x42, 0x62, 0xc7, 0x12, 0xce, 0x0c, 0x5e, 0xe5, 0xcc, 0x24, 0x21,
0xb1, 0x72, 0x67, 0x5b, 0x07, 0x73, 0x8c, 0x8b, 0x2f, 0x93, 0x2d, 0x69, 0x98, 0x13, 0x07, 0x06,
0xe6, 0x11, 0xc2, 0xe5, 0x11, 0xf7, 0x39, 0x78, 0x85, 0x26, 0x0b, 0xe8, 0x0c, 0x4c, 0x5e, 0xa3,
0x46, 0x65, 0x11, 0x8f, 0xa0, 0x9b, 0xb0, 0x2c, 0xa8, 0xa4, 0x6d, 0x5f, 0x4f, 0x9a, 0x0e, 0xd6,
0xd2, 0xca, 0x64, 0x52, 0x5a, 0x57, 0x4a, 0xd3, 0x68, 0x29, 0xad, 0xa4, 0x49, 0x69, 0x3b, 0x52,
0x9a, 0x46, 0x85, 0x34, 0xf7, 0x0f, 0x03, 0x5a, 0xf2, 0x40, 0xf4, 0x3e, 0xd8, 0x8b, 0x22, 0x2a,
0xc2, 0xf5, 0x72, 0xe4, 0x3b, 0xde, 0xad, 0x70, 0x59, 0xd0, 0x6d, 0xb8, 0xf5, 0x32, 0xf5, 0xca,
0x7b, 0xee, 0xbd, 0x14, 0x20, 0x6f, 0xa8, 0x0f, 0x9d, 0x22, 0x49, 0x68, 0xea, 0xcd, 0x59, 0x11,
0x2f, 0xd5, 0xa3, 0x06, 0x01, 0x9d, 0x70, 0xe4, 0x4a, 0x73, 0x34, 0xaf, 0xd7, 0x1c, 0xdd, 0xbb,
0x00, 0x95, 0x71, 0xfc, 0x51, 0xb2, 0xf3, 0xf3, 0x8c, 0xca, 0x0a, 0xf6, 0xb0, 0xda, 0x71, 0x3c,
0xa4, 0xb1, 0x9f, 0xaf, 0xc4, 0xe9, 0x5d, 0xac, 0x76, 0xee, 0x2f, 0x06, 0x6c, 0xe9, 0xa4, 0xe8,
0x0b, 0x68, 0x86, 0x7c, 0x36, 0x38, 0x86, 0xb8, 0xa6, 0x7e, 0xbd, 0x86, 0x72, 0x7c, 0xa8, 0x5b,
0x92, 0x31, 0xf5, 0xdd, 0x12, 0x7d, 0x0e, 0xd6, 0x35, 0x5a, 0x36, 0xae, 0xc8, 0xee, 0xcf, 0x26,
0xb4, 0xc6, 0x62, 0x0e, 0xbe, 0x99, 0xae, 0x8f, 0xa0, 0xe9, 0xf3, 0xc9, 0xa5, 0xa6, 0xce, 0xdb,
0xf5, 0xc1, 0x62, 0xb8, 0x61, 0xc9, 0x44, 0x9f, 0x41, 0x7b, 0x21, 0x87, 0x99, 0x92, 0x7c, 0x50,
0x1f, 0xa4, 0x26, 0x1e, 0xd6, 0x6c, 0x1e, 0x98, 0xc9, 0xd1, 0xa0, 0x3a, 0xf0, 0x86, 0x40, 0x35,
0x3f, 0xb0, 0x66, 0xf3, 0xc0, 0x42, 0x76, 0x5d, 0xd1, 0x4c, 0x36, 0x06, 0xaa, 0xd6, 0x8c, 0x35,
0x1b, 0x7d, 0x09, 0xd6, 0x4a, 0x37, 0x63, 0xd1, 0x44, 0x36, 0xda, 0x53, 0xf6, 0x6c, 0x5c, 0x45,
0xf0, 0xf6, 0x5d, 0x3a, 0xee, 0x45, 0x99, 0xe8, 0x54, 0x26, 0xee, 0x94, 0xd8, 0x38, 0x73, 0x7f,
0x35, 0x60, 0x5b, 0xde, 0xc3, 0x03, 0x12, 0x05, 0xe1, 0x45, 0xed, 0x47, 0x03, 0x82, 0xc6, 0x8a,
0x86, 0x89, 0xfa, 0x66, 0x10, 0x6b, 0x74, 0x1b, 0x1a, 0x5c, 0xa3, 0xb0, 0x70, 0x67, 0xd3, 0x7f,
0x5e, 0x66, 0x9e, 0x5e, 0x24, 0x14, 0x0b, 0x36, 0x6f, 0xf0, 0xf2, 0xeb, 0xc7, 0x69, 0xbc, 0xaa,
0xc1, 0xcb, 0x38, 0xdd, 0xe0, 0x65, 0xc4, 0x07, 0x73, 0x80, 0x2a, 0x1f, 0xea, 0x40, 0xfb, 0xde,
0x93, 0xd9, 0xe3, 0xe9, 0x29, 0xb6, 0x6f, 0x20, 0x0b, 0x9a, 0x67, 0xc7, 0xb3, 0xb3, 0x53, 0xdb,
0xe0, 0xf8, 0x64, 0x36, 0x1e, 0x1f, 0xe3, 0x67, 0xf6, 0x4d, 0xbe, 0x99, 0x3d, 0x9e, 0x3e, 0x7b,
0x7a, 0x7a, 0xdf, 0x36, 0x51, 0x17, 0xac, 0x87, 0x5f, 0x4f, 0xa6, 0x4f, 0xce, 0xf0, 0xf1, 0xd8,
0x6e, 0xa0, 0xb7, 0x60, 0x57, 0xc4, 0x78, 0x15, 0xd8, 0x3c, 0x71, 0x5f, 0x5c, 0x1e, 0x1a, 0x7f,
0x5e, 0x1e, 0x1a, 0xff, 0x5c, 0x1e, 0x1a, 0xdf, 0xf5, 0x02, 0xe6, 0x55, 0xe2, 0x3c, 0x29, 0x6e,
0xde, 0x12, 0x2f, 0xfb, 0x93, 0x7f, 0x03, 0x00, 0x00, 0xff, 0xff, 0x68, 0x3f, 0xd9, 0x07, 0xdd,
0x09, 0x00, 0x00,
}
func (m *LabelPair) Marshal() (dAtA []byte, err error) {
@ -1100,6 +1127,18 @@ func (m *Counter) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i -= len(m.XXX_unrecognized)
copy(dAtA[i:], m.XXX_unrecognized)
}
if m.CreatedTimestamp != nil {
{
size, err := m.CreatedTimestamp.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintMetrics(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x1a
}
if m.Exemplar != nil {
{
size, err := m.Exemplar.MarshalToSizedBuffer(dAtA[:i])
@ -1184,6 +1223,18 @@ func (m *Summary) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i -= len(m.XXX_unrecognized)
copy(dAtA[i:], m.XXX_unrecognized)
}
if m.CreatedTimestamp != nil {
{
size, err := m.CreatedTimestamp.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintMetrics(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x22
}
if len(m.Quantile) > 0 {
for iNdEx := len(m.Quantile) - 1; iNdEx >= 0; iNdEx-- {
{
@ -1269,32 +1320,44 @@ func (m *Histogram) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i -= len(m.XXX_unrecognized)
copy(dAtA[i:], m.XXX_unrecognized)
}
if m.CreatedTimestamp != nil {
{
size, err := m.CreatedTimestamp.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintMetrics(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x7a
}
if len(m.PositiveCount) > 0 {
for iNdEx := len(m.PositiveCount) - 1; iNdEx >= 0; iNdEx-- {
f2 := math.Float64bits(float64(m.PositiveCount[iNdEx]))
f5 := math.Float64bits(float64(m.PositiveCount[iNdEx]))
i -= 8
encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(f2))
encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(f5))
}
i = encodeVarintMetrics(dAtA, i, uint64(len(m.PositiveCount)*8))
i--
dAtA[i] = 0x72
}
if len(m.PositiveDelta) > 0 {
var j3 int
dAtA5 := make([]byte, len(m.PositiveDelta)*10)
var j6 int
dAtA8 := make([]byte, len(m.PositiveDelta)*10)
for _, num := range m.PositiveDelta {
x4 := (uint64(num) << 1) ^ uint64((num >> 63))
for x4 >= 1<<7 {
dAtA5[j3] = uint8(uint64(x4)&0x7f | 0x80)
j3++
x4 >>= 7
x7 := (uint64(num) << 1) ^ uint64((num >> 63))
for x7 >= 1<<7 {
dAtA8[j6] = uint8(uint64(x7)&0x7f | 0x80)
j6++
x7 >>= 7
}
dAtA5[j3] = uint8(x4)
j3++
dAtA8[j6] = uint8(x7)
j6++
}
i -= j3
copy(dAtA[i:], dAtA5[:j3])
i = encodeVarintMetrics(dAtA, i, uint64(j3))
i -= j6
copy(dAtA[i:], dAtA8[:j6])
i = encodeVarintMetrics(dAtA, i, uint64(j6))
i--
dAtA[i] = 0x6a
}
@ -1314,30 +1377,30 @@ func (m *Histogram) MarshalToSizedBuffer(dAtA []byte) (int, error) {
}
if len(m.NegativeCount) > 0 {
for iNdEx := len(m.NegativeCount) - 1; iNdEx >= 0; iNdEx-- {
f6 := math.Float64bits(float64(m.NegativeCount[iNdEx]))
f9 := math.Float64bits(float64(m.NegativeCount[iNdEx]))
i -= 8
encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(f6))
encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(f9))
}
i = encodeVarintMetrics(dAtA, i, uint64(len(m.NegativeCount)*8))
i--
dAtA[i] = 0x5a
}
if len(m.NegativeDelta) > 0 {
var j7 int
dAtA9 := make([]byte, len(m.NegativeDelta)*10)
var j10 int
dAtA12 := make([]byte, len(m.NegativeDelta)*10)
for _, num := range m.NegativeDelta {
x8 := (uint64(num) << 1) ^ uint64((num >> 63))
for x8 >= 1<<7 {
dAtA9[j7] = uint8(uint64(x8)&0x7f | 0x80)
j7++
x8 >>= 7
x11 := (uint64(num) << 1) ^ uint64((num >> 63))
for x11 >= 1<<7 {
dAtA12[j10] = uint8(uint64(x11)&0x7f | 0x80)
j10++
x11 >>= 7
}
dAtA9[j7] = uint8(x8)
j7++
dAtA12[j10] = uint8(x11)
j10++
}
i -= j7
copy(dAtA[i:], dAtA9[:j7])
i = encodeVarintMetrics(dAtA, i, uint64(j7))
i -= j10
copy(dAtA[i:], dAtA12[:j10])
i = encodeVarintMetrics(dAtA, i, uint64(j10))
i--
dAtA[i] = 0x52
}
@ -1788,6 +1851,10 @@ func (m *Counter) Size() (n int) {
l = m.Exemplar.Size()
n += 1 + l + sovMetrics(uint64(l))
}
if m.CreatedTimestamp != nil {
l = m.CreatedTimestamp.Size()
n += 1 + l + sovMetrics(uint64(l))
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
@ -1830,6 +1897,10 @@ func (m *Summary) Size() (n int) {
n += 1 + l + sovMetrics(uint64(l))
}
}
if m.CreatedTimestamp != nil {
l = m.CreatedTimestamp.Size()
n += 1 + l + sovMetrics(uint64(l))
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
@ -1916,6 +1987,10 @@ func (m *Histogram) Size() (n int) {
if len(m.PositiveCount) > 0 {
n += 1 + sovMetrics(uint64(len(m.PositiveCount)*8)) + len(m.PositiveCount)*8
}
if m.CreatedTimestamp != nil {
l = m.CreatedTimestamp.Size()
n += 1 + l + sovMetrics(uint64(l))
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
@ -2319,6 +2394,42 @@ func (m *Counter) Unmarshal(dAtA []byte) error {
return err
}
iNdEx = postIndex
case 3:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field CreatedTimestamp", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowMetrics
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthMetrics
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthMetrics
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.CreatedTimestamp == nil {
m.CreatedTimestamp = &types.Timestamp{}
}
if err := m.CreatedTimestamp.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipMetrics(dAtA[iNdEx:])
@ -2507,6 +2618,42 @@ func (m *Summary) Unmarshal(dAtA []byte) error {
return err
}
iNdEx = postIndex
case 4:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field CreatedTimestamp", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowMetrics
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthMetrics
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthMetrics
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.CreatedTimestamp == nil {
m.CreatedTimestamp = &types.Timestamp{}
}
if err := m.CreatedTimestamp.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipMetrics(dAtA[iNdEx:])
@ -3089,6 +3236,42 @@ func (m *Histogram) Unmarshal(dAtA []byte) error {
} else {
return fmt.Errorf("proto: wrong wireType = %d for field PositiveCount", wireType)
}
case 15:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field CreatedTimestamp", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowMetrics
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthMetrics
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthMetrics
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.CreatedTimestamp == nil {
m.CreatedTimestamp = &types.Timestamp{}
}
if err := m.CreatedTimestamp.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipMetrics(dAtA[iNdEx:])

View file

@ -51,6 +51,8 @@ message Gauge {
message Counter {
double value = 1;
Exemplar exemplar = 2;
google.protobuf.Timestamp created_timestamp = 3 [(gogoproto.nullable) = true];
}
message Quantile {
@ -62,6 +64,8 @@ message Summary {
uint64 sample_count = 1;
double sample_sum = 2;
repeated Quantile quantile = 3 [(gogoproto.nullable) = false];
google.protobuf.Timestamp created_timestamp = 4 [(gogoproto.nullable) = true];
}
message Untyped {
@ -75,6 +79,8 @@ message Histogram {
// Buckets for the conventional histogram.
repeated Bucket bucket = 3 [(gogoproto.nullable) = false]; // Ordered in increasing order of upper_bound, +Inf bucket is optional.
google.protobuf.Timestamp created_timestamp = 15 [(gogoproto.nullable) = true];
// Everything below here is for native histograms (also known as sparse histograms).
// Native histograms are an experimental feature without stability guarantees.
@ -147,4 +153,4 @@ message MetricFamily {
string help = 2;
MetricType type = 3;
repeated Metric metric = 4 [(gogoproto.nullable) = false];
}
}

View file

@ -60,6 +60,11 @@ const (
maxInt64 = 9223372036854774784
// The smallest SampleValue that can be converted to an int64 without underflow.
minInt64 = -9223372036854775808
// Max initial size for the pooled points slices.
// The getHPointSlice and getFPointSlice functions are called with an estimated size which often can be
// over-estimated.
maxPointsSliceSize = 5000
)
type engineMetrics struct {
@ -1911,19 +1916,33 @@ func getFPointSlice(sz int) []FPoint {
if p := fPointPool.Get(); p != nil {
return p
}
if sz > maxPointsSliceSize {
sz = maxPointsSliceSize
}
return make([]FPoint, 0, sz)
}
// putFPointSlice will return a FPoint slice of size max(maxPointsSliceSize, sz).
// This function is called with an estimated size which often can be over-estimated.
func putFPointSlice(p []FPoint) {
if p != nil {
fPointPool.Put(p[:0])
}
}
// getHPointSlice will return a HPoint slice of size max(maxPointsSliceSize, sz).
// This function is called with an estimated size which often can be over-estimated.
func getHPointSlice(sz int) []HPoint {
if p := hPointPool.Get(); p != nil {
return p
}
if sz > maxPointsSliceSize {
sz = maxPointsSliceSize
}
return make([]HPoint, 0, sz)
}

View file

@ -1168,10 +1168,14 @@ func funcHistogramQuantile(vals []parser.Value, args parser.Expressions, enh *Ev
for _, mb := range enh.signatureToMetricWithBuckets {
if len(mb.buckets) > 0 {
res, forcedMonotonicity := bucketQuantile(q, mb.buckets)
enh.Out = append(enh.Out, Sample{
Metric: mb.metric,
F: bucketQuantile(q, mb.buckets),
F: res,
})
if forcedMonotonicity {
annos.Add(annotations.NewHistogramQuantileForcedMonotonicityInfo(mb.metric.Get(labels.MetricName), args[1].PositionRange()))
}
}
}

View file

@ -71,41 +71,50 @@ type metricWithBuckets struct {
// If q<0, -Inf is returned.
//
// If q>1, +Inf is returned.
func bucketQuantile(q float64, buckets buckets) float64 {
//
// We also return a bool to indicate if monotonicity needed to be forced.
func bucketQuantile(q float64, buckets buckets) (float64, bool) {
if math.IsNaN(q) {
return math.NaN()
return math.NaN(), false
}
if q < 0 {
return math.Inf(-1)
return math.Inf(-1), false
}
if q > 1 {
return math.Inf(+1)
return math.Inf(+1), false
}
slices.SortFunc(buckets, func(a, b bucket) bool {
return a.upperBound < b.upperBound
slices.SortFunc(buckets, func(a, b bucket) int {
// We don't expect the bucket boundary to be a NaN.
if a.upperBound < b.upperBound {
return -1
}
if a.upperBound > b.upperBound {
return +1
}
return 0
})
if !math.IsInf(buckets[len(buckets)-1].upperBound, +1) {
return math.NaN()
return math.NaN(), false
}
buckets = coalesceBuckets(buckets)
ensureMonotonic(buckets)
forcedMonotonic := ensureMonotonic(buckets)
if len(buckets) < 2 {
return math.NaN()
return math.NaN(), false
}
observations := buckets[len(buckets)-1].count
if observations == 0 {
return math.NaN()
return math.NaN(), false
}
rank := q * observations
b := sort.Search(len(buckets)-1, func(i int) bool { return buckets[i].count >= rank })
if b == len(buckets)-1 {
return buckets[len(buckets)-2].upperBound
return buckets[len(buckets)-2].upperBound, forcedMonotonic
}
if b == 0 && buckets[0].upperBound <= 0 {
return buckets[0].upperBound
return buckets[0].upperBound, forcedMonotonic
}
var (
bucketStart float64
@ -117,7 +126,7 @@ func bucketQuantile(q float64, buckets buckets) float64 {
count -= buckets[b-1].count
rank -= buckets[b-1].count
}
return bucketStart + (bucketEnd-bucketStart)*(rank/count)
return bucketStart + (bucketEnd-bucketStart)*(rank/count), forcedMonotonic
}
// histogramQuantile calculates the quantile 'q' based on the given histogram.
@ -335,37 +344,24 @@ func coalesceBuckets(buckets buckets) buckets {
// The assumption that bucket counts increase monotonically with increasing
// upperBound may be violated during:
//
// * Recording rule evaluation of histogram_quantile, especially when rate()
// has been applied to the underlying bucket timeseries.
// * Evaluation of histogram_quantile computed over federated bucket
// timeseries, especially when rate() has been applied.
//
// This is because scraped data is not made available to rule evaluation or
// federation atomically, so some buckets are computed with data from the
// most recent scrapes, but the other buckets are missing data from the most
// recent scrape.
// - Circumstances where data is already inconsistent at the target's side.
// - Ingestion via the remote write receiver that Prometheus implements.
// - Optimisation of query execution where precision is sacrificed for other
// benefits, not by Prometheus but by systems built on top of it.
//
// Monotonicity is usually guaranteed because if a bucket with upper bound
// u1 has count c1, then any bucket with a higher upper bound u > u1 must
// have counted all c1 observations and perhaps more, so that c >= c1.
//
// Randomly interspersed partial sampling breaks that guarantee, and rate()
// exacerbates it. Specifically, suppose bucket le=1000 has a count of 10 from
// 4 samples but the bucket with le=2000 has a count of 7 from 3 samples. The
// monotonicity is broken. It is exacerbated by rate() because under normal
// operation, cumulative counting of buckets will cause the bucket counts to
// diverge such that small differences from missing samples are not a problem.
// rate() removes this divergence.)
// have counted all c1 observations and perhaps more, so that c >= c1.
//
// bucketQuantile depends on that monotonicity to do a binary search for the
// bucket with the φ-quantile count, so breaking the monotonicity
// guarantee causes bucketQuantile() to return undefined (nonsense) results.
//
// As a somewhat hacky solution until ingestion is atomic per scrape, we
// calculate the "envelope" of the histogram buckets, essentially removing
// any decreases in the count between successive buckets.
func ensureMonotonic(buckets buckets) {
// As a somewhat hacky solution, we calculate the "envelope" of the histogram
// buckets, essentially removing any decreases in the count between successive
// buckets. We return a bool to indicate if this monotonicity was forced or not.
func ensureMonotonic(buckets buckets) bool {
forced := false
max := buckets[0].count
for i := 1; i < len(buckets); i++ {
switch {
@ -373,8 +369,10 @@ func ensureMonotonic(buckets buckets) {
max = buckets[i].count
case buckets[i].count < max:
buckets[i].count = max
forced = true
}
}
return forced
}
// quantile calculates the given quantile of a vector of samples.

View file

@ -20,6 +20,7 @@ import (
"math"
"net/url"
"sort"
"strings"
"sync"
"time"
@ -506,9 +507,11 @@ func (g *Group) AlertingRules() []*AlertingRule {
alerts = append(alerts, alertingRule)
}
}
slices.SortFunc(alerts, func(a, b *AlertingRule) bool {
return a.State() > b.State() ||
(a.State() == b.State() && a.Name() < b.Name())
slices.SortFunc(alerts, func(a, b *AlertingRule) int {
if a.State() == b.State() {
return strings.Compare(a.Name(), b.Name())
}
return int(b.State() - a.State())
})
return alerts
}
@ -1277,11 +1280,15 @@ func (m *Manager) RuleGroups() []*Group {
rgs = append(rgs, g)
}
slices.SortFunc(rgs, func(a, b *Group) bool {
if a.file != b.file {
return a.file < b.file
slices.SortFunc(rgs, func(a, b *Group) int {
fileCompare := strings.Compare(a.file, b.file)
// If its 0, then the file names are the same.
// Lets look at the group names in that case.
if fileCompare != 0 {
return fileCompare
}
return a.name < b.name
return strings.Compare(a.name, b.name)
})
return rgs

View file

@ -732,8 +732,8 @@ func mutateSampleLabels(lset labels.Labels, target *Target, honor bool, rc []*re
}
func resolveConflictingExposedLabels(lb *labels.Builder, conflictingExposedLabels []labels.Label) {
slices.SortStableFunc(conflictingExposedLabels, func(a, b labels.Label) bool {
return len(a.Name) < len(b.Name)
slices.SortStableFunc(conflictingExposedLabels, func(a, b labels.Label) int {
return len(a.Name) - len(b.Name)
})
for _, l := range conflictingExposedLabels {
@ -785,7 +785,8 @@ func appender(app storage.Appender, sampleLimit, bucketLimit int) storage.Append
// A scraper retrieves samples and accepts a status report at the end.
type scraper interface {
scrape(ctx context.Context, w io.Writer) (string, error)
scrape(ctx context.Context) (*http.Response, error)
readResponse(ctx context.Context, resp *http.Response, w io.Writer) (string, error)
Report(start time.Time, dur time.Duration, err error)
offset(interval time.Duration, offsetSeed uint64) time.Duration
}
@ -814,11 +815,11 @@ const (
var UserAgent = fmt.Sprintf("Prometheus/%s", version.Version)
func (s *targetScraper) scrape(ctx context.Context, w io.Writer) (string, error) {
func (s *targetScraper) scrape(ctx context.Context) (*http.Response, error) {
if s.req == nil {
req, err := http.NewRequest("GET", s.URL().String(), nil)
if err != nil {
return "", err
return nil, err
}
req.Header.Add("Accept", s.acceptHeader)
req.Header.Add("Accept-Encoding", "gzip")
@ -828,10 +829,10 @@ func (s *targetScraper) scrape(ctx context.Context, w io.Writer) (string, error)
s.req = req
}
resp, err := s.client.Do(s.req.WithContext(ctx))
if err != nil {
return "", err
}
return s.client.Do(s.req.WithContext(ctx))
}
func (s *targetScraper) readResponse(ctx context.Context, resp *http.Response, w io.Writer) (string, error) {
defer func() {
io.Copy(io.Discard, resp.Body)
resp.Body.Close()
@ -858,13 +859,14 @@ func (s *targetScraper) scrape(ctx context.Context, w io.Writer) (string, error)
if s.gzipr == nil {
s.buf = bufio.NewReader(resp.Body)
var err error
s.gzipr, err = gzip.NewReader(s.buf)
if err != nil {
return "", err
}
} else {
s.buf.Reset(resp.Body)
if err = s.gzipr.Reset(s.buf); err != nil {
if err := s.gzipr.Reset(s.buf); err != nil {
return "", err
}
}
@ -1326,11 +1328,7 @@ func (sl *scrapeLoop) scrapeAndReport(last, appendTime time.Time, errc chan<- er
)
}
b := sl.buffers.Get(sl.lastScrapeSize).([]byte)
defer sl.buffers.Put(b)
buf := bytes.NewBuffer(b)
var total, added, seriesAdded, bytes int
var total, added, seriesAdded, bytesRead int
var err, appErr, scrapeErr error
app := sl.appender(sl.appenderCtx)
@ -1346,7 +1344,7 @@ func (sl *scrapeLoop) scrapeAndReport(last, appendTime time.Time, errc chan<- er
}()
defer func() {
if err = sl.report(app, appendTime, time.Since(start), total, added, seriesAdded, bytes, scrapeErr); err != nil {
if err = sl.report(app, appendTime, time.Since(start), total, added, seriesAdded, bytesRead, scrapeErr); err != nil {
level.Warn(sl.l).Log("msg", "Appending scrape report failed", "err", err)
}
}()
@ -1367,8 +1365,17 @@ func (sl *scrapeLoop) scrapeAndReport(last, appendTime time.Time, errc chan<- er
}
var contentType string
var resp *http.Response
var b []byte
var buf *bytes.Buffer
scrapeCtx, cancel := context.WithTimeout(sl.parentCtx, sl.timeout)
contentType, scrapeErr = sl.scraper.scrape(scrapeCtx, buf)
resp, scrapeErr = sl.scraper.scrape(scrapeCtx)
if scrapeErr == nil {
b = sl.buffers.Get(sl.lastScrapeSize).([]byte)
defer sl.buffers.Put(b)
buf = bytes.NewBuffer(b)
contentType, scrapeErr = sl.scraper.readResponse(scrapeCtx, resp, buf)
}
cancel()
if scrapeErr == nil {
@ -1379,14 +1386,14 @@ func (sl *scrapeLoop) scrapeAndReport(last, appendTime time.Time, errc chan<- er
if len(b) > 0 {
sl.lastScrapeSize = len(b)
}
bytes = len(b)
bytesRead = len(b)
} else {
level.Debug(sl.l).Log("msg", "Scrape failed", "err", scrapeErr)
if errc != nil {
errc <- scrapeErr
}
if errors.Is(scrapeErr, errBodySizeLimit) {
bytes = -1
bytesRead = -1
}
}
@ -1525,6 +1532,7 @@ func (sl *scrapeLoop) append(app storage.Appender, b []byte, contentType string,
appErrs = appendErrors{}
sampleLimitErr error
bucketLimitErr error
lset labels.Labels // escapes to heap so hoisted out of loop
e exemplar.Exemplar // escapes to heap so hoisted out of loop
meta metadata.Metadata
metadataChanged bool
@ -1622,7 +1630,6 @@ loop:
ce, ok := sl.cache.get(met)
var (
ref storage.SeriesRef
lset labels.Labels
hash uint64
)

View file

@ -2619,7 +2619,9 @@ func TestTargetScraperScrapeOK(t *testing.T) {
}
var buf bytes.Buffer
contentType, err := ts.scrape(context.Background(), &buf)
resp, err := ts.scrape(context.Background())
require.NoError(t, err)
contentType, err := ts.readResponse(context.Background(), resp, &buf)
require.NoError(t, err)
require.Equal(t, "text/plain; version=0.0.4", contentType)
require.Equal(t, "metric_a 1\nmetric_b 2\n", buf.String())
@ -2665,7 +2667,7 @@ func TestTargetScrapeScrapeCancel(t *testing.T) {
}()
go func() {
_, err := ts.scrape(ctx, io.Discard)
_, err := ts.scrape(ctx)
switch {
case err == nil:
errc <- errors.New("Expected error but got nil")
@ -2711,7 +2713,9 @@ func TestTargetScrapeScrapeNotFound(t *testing.T) {
acceptHeader: scrapeAcceptHeader,
}
_, err = ts.scrape(context.Background(), io.Discard)
resp, err := ts.scrape(context.Background())
require.NoError(t, err)
_, err = ts.readResponse(context.Background(), resp, io.Discard)
require.Contains(t, err.Error(), "404", "Expected \"404 NotFound\" error but got: %s", err)
}
@ -2755,26 +2759,34 @@ func TestTargetScraperBodySizeLimit(t *testing.T) {
var buf bytes.Buffer
// Target response uncompressed body, scrape with body size limit.
_, err = ts.scrape(context.Background(), &buf)
resp, err := ts.scrape(context.Background())
require.NoError(t, err)
_, err = ts.readResponse(context.Background(), resp, &buf)
require.ErrorIs(t, err, errBodySizeLimit)
require.Equal(t, bodySizeLimit, buf.Len())
// Target response gzip compressed body, scrape with body size limit.
gzipResponse = true
buf.Reset()
_, err = ts.scrape(context.Background(), &buf)
resp, err = ts.scrape(context.Background())
require.NoError(t, err)
_, err = ts.readResponse(context.Background(), resp, &buf)
require.ErrorIs(t, err, errBodySizeLimit)
require.Equal(t, bodySizeLimit, buf.Len())
// Target response uncompressed body, scrape without body size limit.
gzipResponse = false
buf.Reset()
ts.bodySizeLimit = 0
_, err = ts.scrape(context.Background(), &buf)
resp, err = ts.scrape(context.Background())
require.NoError(t, err)
_, err = ts.readResponse(context.Background(), resp, &buf)
require.NoError(t, err)
require.Equal(t, len(responseBody), buf.Len())
// Target response gzip compressed body, scrape without body size limit.
gzipResponse = true
buf.Reset()
_, err = ts.scrape(context.Background(), &buf)
resp, err = ts.scrape(context.Background())
require.NoError(t, err)
_, err = ts.readResponse(context.Background(), resp, &buf)
require.NoError(t, err)
require.Equal(t, len(responseBody), buf.Len())
}
@ -2802,7 +2814,11 @@ func (ts *testScraper) Report(start time.Time, duration time.Duration, err error
ts.lastError = err
}
func (ts *testScraper) scrape(ctx context.Context, w io.Writer) (string, error) {
func (ts *testScraper) scrape(ctx context.Context) (*http.Response, error) {
return nil, ts.scrapeErr
}
func (ts *testScraper) readResponse(ctx context.Context, resp *http.Response, w io.Writer) (string, error) {
if ts.scrapeFunc != nil {
return "", ts.scrapeFunc(ctx, w)
}

View file

@ -18,7 +18,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout repository
uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac # v4.0.0
uses: actions/checkout@8ade135a41bc03ea155e62e844d188df1ea18608 # v4.1.0
- name: install Go
uses: actions/setup-go@6edd4406fa81c3da01a34fa6f6343087c207a568 # v3.5.0
with:

View file

@ -91,12 +91,8 @@ func (b *BufferedSeriesIterator) Seek(t int64) chunkenc.ValueType {
switch b.valueType {
case chunkenc.ValNone:
return chunkenc.ValNone
case chunkenc.ValFloat:
b.lastTime, _ = b.At()
case chunkenc.ValHistogram:
b.lastTime, _ = b.AtHistogram()
case chunkenc.ValFloatHistogram:
b.lastTime, _ = b.AtFloatHistogram()
case chunkenc.ValFloat, chunkenc.ValHistogram, chunkenc.ValFloatHistogram:
b.lastTime = b.AtT()
default:
panic(fmt.Errorf("BufferedSeriesIterator: unknown value type %v", b.valueType))
}

View file

@ -347,7 +347,7 @@ func (c *genericMergeSeriesSet) Next() bool {
}
// Now, pop items of the heap that have equal label sets.
c.currentSets = nil
c.currentSets = c.currentSets[:0]
c.currentLabels = c.heap[0].At().Labels()
for len(c.heap) > 0 && labels.Equal(c.currentLabels, c.heap[0].At().Labels()) {
set := heap.Pop(&c.heap).(genericSeriesSet)

View file

@ -22,7 +22,10 @@ import (
"sync"
"time"
"github.com/grafana/regexp"
"github.com/Azure/azure-sdk-for-go/sdk/azcore"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"
"github.com/Azure/azure-sdk-for-go/sdk/azidentity"
"github.com/google/uuid"
@ -46,11 +49,26 @@ type ManagedIdentityConfig struct {
ClientID string `yaml:"client_id,omitempty"`
}
// OAuthConfig is used to store azure oauth config values.
type OAuthConfig struct {
// ClientID is the clientId of the azure active directory application that is being used to authenticate.
ClientID string `yaml:"client_id,omitempty"`
// ClientSecret is the clientSecret of the azure active directory application that is being used to authenticate.
ClientSecret string `yaml:"client_secret,omitempty"`
// TenantID is the tenantId of the azure active directory application that is being used to authenticate.
TenantID string `yaml:"tenant_id,omitempty"`
}
// AzureADConfig is used to store the config values.
type AzureADConfig struct { // nolint:revive
// ManagedIdentity is the managed identity that is being used to authenticate.
ManagedIdentity *ManagedIdentityConfig `yaml:"managed_identity,omitempty"`
// OAuth is the oauth config that is being used to authenticate.
OAuth *OAuthConfig `yaml:"oauth,omitempty"`
// Cloud is the Azure cloud in which the service is running. Example: AzurePublic/AzureGovernment/AzureChina.
Cloud string `yaml:"cloud,omitempty"`
}
@ -84,18 +102,47 @@ func (c *AzureADConfig) Validate() error {
return fmt.Errorf("must provide a cloud in the Azure AD config")
}
if c.ManagedIdentity == nil {
return fmt.Errorf("must provide an Azure Managed Identity in the Azure AD config")
if c.ManagedIdentity == nil && c.OAuth == nil {
return fmt.Errorf("must provide an Azure Managed Identity or Azure OAuth in the Azure AD config")
}
if c.ManagedIdentity.ClientID == "" {
return fmt.Errorf("must provide an Azure Managed Identity client_id in the Azure AD config")
if c.ManagedIdentity != nil && c.OAuth != nil {
return fmt.Errorf("cannot provide both Azure Managed Identity and Azure OAuth in the Azure AD config")
}
_, err := uuid.Parse(c.ManagedIdentity.ClientID)
if err != nil {
return fmt.Errorf("the provided Azure Managed Identity client_id provided is invalid")
if c.ManagedIdentity != nil {
if c.ManagedIdentity.ClientID == "" {
return fmt.Errorf("must provide an Azure Managed Identity client_id in the Azure AD config")
}
_, err := uuid.Parse(c.ManagedIdentity.ClientID)
if err != nil {
return fmt.Errorf("the provided Azure Managed Identity client_id is invalid")
}
}
if c.OAuth != nil {
if c.OAuth.ClientID == "" {
return fmt.Errorf("must provide an Azure OAuth client_id in the Azure AD config")
}
if c.OAuth.ClientSecret == "" {
return fmt.Errorf("must provide an Azure OAuth client_secret in the Azure AD config")
}
if c.OAuth.TenantID == "" {
return fmt.Errorf("must provide an Azure OAuth tenant_id in the Azure AD config")
}
var err error
_, err = uuid.Parse(c.OAuth.ClientID)
if err != nil {
return fmt.Errorf("the provided Azure OAuth client_id is invalid")
}
_, err = regexp.MatchString("^[0-9a-zA-Z-.]+$", c.OAuth.TenantID)
if err != nil {
return fmt.Errorf("the provided Azure OAuth tenant_id is invalid")
}
}
return nil
}
@ -146,21 +193,54 @@ func (rt *azureADRoundTripper) RoundTrip(req *http.Request) (*http.Response, err
// newTokenCredential returns a TokenCredential of different kinds like Azure Managed Identity and Azure AD application.
func newTokenCredential(cfg *AzureADConfig) (azcore.TokenCredential, error) {
cred, err := newManagedIdentityTokenCredential(cfg.ManagedIdentity.ClientID)
var cred azcore.TokenCredential
var err error
cloudConfiguration, err := getCloudConfiguration(cfg.Cloud)
if err != nil {
return nil, err
}
clientOpts := &azcore.ClientOptions{
Cloud: cloudConfiguration,
}
if cfg.ManagedIdentity != nil {
managedIdentityConfig := &ManagedIdentityConfig{
ClientID: cfg.ManagedIdentity.ClientID,
}
cred, err = newManagedIdentityTokenCredential(clientOpts, managedIdentityConfig)
if err != nil {
return nil, err
}
}
if cfg.OAuth != nil {
oAuthConfig := &OAuthConfig{
ClientID: cfg.OAuth.ClientID,
ClientSecret: cfg.OAuth.ClientSecret,
TenantID: cfg.OAuth.TenantID,
}
cred, err = newOAuthTokenCredential(clientOpts, oAuthConfig)
if err != nil {
return nil, err
}
}
return cred, nil
}
// newManagedIdentityTokenCredential returns new Managed Identity token credential.
func newManagedIdentityTokenCredential(managedIdentityClientID string) (azcore.TokenCredential, error) {
clientID := azidentity.ClientID(managedIdentityClientID)
opts := &azidentity.ManagedIdentityCredentialOptions{ID: clientID}
func newManagedIdentityTokenCredential(clientOpts *azcore.ClientOptions, managedIdentityConfig *ManagedIdentityConfig) (azcore.TokenCredential, error) {
clientID := azidentity.ClientID(managedIdentityConfig.ClientID)
opts := &azidentity.ManagedIdentityCredentialOptions{ClientOptions: *clientOpts, ID: clientID}
return azidentity.NewManagedIdentityCredential(opts)
}
// newOAuthTokenCredential returns new OAuth token credential
func newOAuthTokenCredential(clientOpts *azcore.ClientOptions, oAuthConfig *OAuthConfig) (azcore.TokenCredential, error) {
opts := &azidentity.ClientSecretCredentialOptions{ClientOptions: *clientOpts}
return azidentity.NewClientSecretCredential(oAuthConfig.TenantID, oAuthConfig.ClientID, oAuthConfig.ClientSecret, opts)
}
// newTokenProvider helps to fetch accessToken for different types of credential. This also takes care of
// refreshing the accessToken before expiry. This accessToken is attached to the Authorization header while making requests.
func newTokenProvider(cfg *AzureADConfig, cred azcore.TokenCredential) (*tokenProvider, error) {
@ -245,3 +325,17 @@ func getAudience(cloud string) (string, error) {
return "", errors.New("Cloud is not specified or is incorrect: " + cloud)
}
}
// getCloudConfiguration returns the cloud Configuration which contains AAD endpoint for different clouds
func getCloudConfiguration(c string) (cloud.Configuration, error) {
switch strings.ToLower(c) {
case strings.ToLower(AzureChina):
return cloud.AzureChina, nil
case strings.ToLower(AzureGovernment):
return cloud.AzureGovernment, nil
case strings.ToLower(AzurePublic):
return cloud.AzurePublic, nil
default:
return cloud.Configuration{}, errors.New("Cloud is not specified or is incorrect: " + c)
}
}

View file

@ -26,17 +26,20 @@ import (
"github.com/google/uuid"
"github.com/prometheus/client_golang/prometheus/promhttp"
"github.com/stretchr/testify/mock"
"github.com/stretchr/testify/require"
"github.com/stretchr/testify/suite"
"gopkg.in/yaml.v2"
)
const (
dummyAudience = "dummyAudience"
dummyClientID = "00000000-0000-0000-0000-000000000000"
testTokenString = "testTokenString"
dummyAudience = "dummyAudience"
dummyClientID = "00000000-0000-0000-0000-000000000000"
dummyClientSecret = "Cl1ent$ecret!"
dummyTenantID = "00000000-a12b-3cd4-e56f-000000000000"
testTokenString = "testTokenString"
)
var testTokenExpiry = time.Now().Add(10 * time.Second)
var testTokenExpiry = time.Now().Add(5 * time.Second)
type AzureAdTestSuite struct {
suite.Suite
@ -62,47 +65,64 @@ func TestAzureAd(t *testing.T) {
}
func (ad *AzureAdTestSuite) TestAzureAdRoundTripper() {
var gotReq *http.Request
testToken := &azcore.AccessToken{
Token: testTokenString,
ExpiresOn: testTokenExpiry,
cases := []struct {
cfg *AzureADConfig
}{
// AzureAd roundtripper with Managedidentity.
{
cfg: &AzureADConfig{
Cloud: "AzurePublic",
ManagedIdentity: &ManagedIdentityConfig{
ClientID: dummyClientID,
},
},
},
// AzureAd roundtripper with OAuth.
{
cfg: &AzureADConfig{
Cloud: "AzurePublic",
OAuth: &OAuthConfig{
ClientID: dummyClientID,
ClientSecret: dummyClientSecret,
TenantID: dummyTenantID,
},
},
},
}
for _, c := range cases {
var gotReq *http.Request
managedIdentityConfig := &ManagedIdentityConfig{
ClientID: dummyClientID,
testToken := &azcore.AccessToken{
Token: testTokenString,
ExpiresOn: testTokenExpiry,
}
ad.mockCredential.On("GetToken", mock.Anything, mock.Anything).Return(*testToken, nil)
tokenProvider, err := newTokenProvider(c.cfg, ad.mockCredential)
ad.Assert().NoError(err)
rt := &azureADRoundTripper{
next: promhttp.RoundTripperFunc(func(req *http.Request) (*http.Response, error) {
gotReq = req
return &http.Response{StatusCode: http.StatusOK}, nil
}),
tokenProvider: tokenProvider,
}
cli := &http.Client{Transport: rt}
req, err := http.NewRequest(http.MethodPost, "https://example.com", strings.NewReader("Hello, world!"))
ad.Assert().NoError(err)
_, err = cli.Do(req)
ad.Assert().NoError(err)
ad.Assert().NotNil(gotReq)
origReq := gotReq
ad.Assert().NotEmpty(origReq.Header.Get("Authorization"))
ad.Assert().Equal("Bearer "+testTokenString, origReq.Header.Get("Authorization"))
}
azureAdConfig := &AzureADConfig{
Cloud: "AzurePublic",
ManagedIdentity: managedIdentityConfig,
}
ad.mockCredential.On("GetToken", mock.Anything, mock.Anything).Return(*testToken, nil)
tokenProvider, err := newTokenProvider(azureAdConfig, ad.mockCredential)
ad.Assert().NoError(err)
rt := &azureADRoundTripper{
next: promhttp.RoundTripperFunc(func(req *http.Request) (*http.Response, error) {
gotReq = req
return &http.Response{StatusCode: http.StatusOK}, nil
}),
tokenProvider: tokenProvider,
}
cli := &http.Client{Transport: rt}
req, err := http.NewRequest(http.MethodPost, "https://example.com", strings.NewReader("Hello, world!"))
ad.Assert().NoError(err)
_, err = cli.Do(req)
ad.Assert().NoError(err)
ad.Assert().NotNil(gotReq)
origReq := gotReq
ad.Assert().NotEmpty(origReq.Header.Get("Authorization"))
ad.Assert().Equal("Bearer "+testTokenString, origReq.Header.Get("Authorization"))
}
func loadAzureAdConfig(filename string) (*AzureADConfig, error) {
@ -117,42 +137,54 @@ func loadAzureAdConfig(filename string) (*AzureADConfig, error) {
return &cfg, nil
}
func testGoodConfig(t *testing.T, filename string) {
_, err := loadAzureAdConfig(filename)
if err != nil {
t.Fatalf("Unexpected error parsing %s: %s", filename, err)
func TestAzureAdConfig(t *testing.T) {
cases := []struct {
filename string
err string
}{
// Missing managedidentiy or oauth field.
{
filename: "testdata/azuread_bad_configmissing.yaml",
err: "must provide an Azure Managed Identity or Azure OAuth in the Azure AD config",
},
// Invalid managedidentity client id.
{
filename: "testdata/azuread_bad_invalidclientid.yaml",
err: "the provided Azure Managed Identity client_id is invalid",
},
// Missing tenant id in oauth config.
{
filename: "testdata/azuread_bad_invalidoauthconfig.yaml",
err: "must provide an Azure OAuth tenant_id in the Azure AD config",
},
// Invalid config when both managedidentity and oauth is provided.
{
filename: "testdata/azuread_bad_twoconfig.yaml",
err: "cannot provide both Azure Managed Identity and Azure OAuth in the Azure AD config",
},
// Valid config with missing optionally cloud field.
{
filename: "testdata/azuread_good_cloudmissing.yaml",
},
// Valid managed identity config.
{
filename: "testdata/azuread_good_managedidentity.yaml",
},
// Valid Oauth config.
{
filename: "testdata/azuread_good_oauth.yaml",
},
}
}
func TestGoodAzureAdConfig(t *testing.T) {
filename := "testdata/azuread_good.yaml"
testGoodConfig(t, filename)
}
func TestGoodCloudMissingAzureAdConfig(t *testing.T) {
filename := "testdata/azuread_good_cloudmissing.yaml"
testGoodConfig(t, filename)
}
func TestBadClientIdMissingAzureAdConfig(t *testing.T) {
filename := "testdata/azuread_bad_clientidmissing.yaml"
_, err := loadAzureAdConfig(filename)
if err == nil {
t.Fatalf("Did not receive expected error unmarshaling bad azuread config")
}
if !strings.Contains(err.Error(), "must provide an Azure Managed Identity in the Azure AD config") {
t.Errorf("Received unexpected error from unmarshal of %s: %s", filename, err.Error())
}
}
func TestBadInvalidClientIdAzureAdConfig(t *testing.T) {
filename := "testdata/azuread_bad_invalidclientid.yaml"
_, err := loadAzureAdConfig(filename)
if err == nil {
t.Fatalf("Did not receive expected error unmarshaling bad azuread config")
}
if !strings.Contains(err.Error(), "the provided Azure Managed Identity client_id provided is invalid") {
t.Errorf("Received unexpected error from unmarshal of %s: %s", filename, err.Error())
for _, c := range cases {
_, err := loadAzureAdConfig(c.filename)
if c.err != "" {
if err == nil {
t.Fatalf("Did not receive expected error unmarshaling bad azuread config")
}
require.EqualError(t, err, c.err)
} else {
require.NoError(t, err)
}
}
}
@ -173,75 +205,90 @@ func TestTokenProvider(t *testing.T) {
suite.Run(t, new(TokenProviderTestSuite))
}
func (s *TokenProviderTestSuite) TestNewTokenProvider_NilAudience_Fail() {
managedIdentityConfig := &ManagedIdentityConfig{
ClientID: dummyClientID,
func (s *TokenProviderTestSuite) TestNewTokenProvider() {
cases := []struct {
cfg *AzureADConfig
err string
}{
// Invalid tokenProvider for managedidentity.
{
cfg: &AzureADConfig{
Cloud: "PublicAzure",
ManagedIdentity: &ManagedIdentityConfig{
ClientID: dummyClientID,
},
},
err: "Cloud is not specified or is incorrect: ",
},
// Invalid tokenProvider for oauth.
{
cfg: &AzureADConfig{
Cloud: "PublicAzure",
OAuth: &OAuthConfig{
ClientID: dummyClientID,
ClientSecret: dummyClientSecret,
TenantID: dummyTenantID,
},
},
err: "Cloud is not specified or is incorrect: ",
},
// Valid tokenProvider for managedidentity.
{
cfg: &AzureADConfig{
Cloud: "AzurePublic",
ManagedIdentity: &ManagedIdentityConfig{
ClientID: dummyClientID,
},
},
},
// Valid tokenProvider for oauth.
{
cfg: &AzureADConfig{
Cloud: "AzurePublic",
OAuth: &OAuthConfig{
ClientID: dummyClientID,
ClientSecret: dummyClientSecret,
TenantID: dummyTenantID,
},
},
},
}
mockGetTokenCallCounter := 1
for _, c := range cases {
if c.err != "" {
actualTokenProvider, actualErr := newTokenProvider(c.cfg, s.mockCredential)
azureAdConfig := &AzureADConfig{
Cloud: "PublicAzure",
ManagedIdentity: managedIdentityConfig,
s.Assert().Nil(actualTokenProvider)
s.Assert().NotNil(actualErr)
s.Assert().ErrorContains(actualErr, c.err)
} else {
testToken := &azcore.AccessToken{
Token: testTokenString,
ExpiresOn: testTokenExpiry,
}
s.mockCredential.On("GetToken", mock.Anything, mock.Anything).Return(*testToken, nil).Once().
On("GetToken", mock.Anything, mock.Anything).Return(getToken(), nil)
actualTokenProvider, actualErr := newTokenProvider(c.cfg, s.mockCredential)
s.Assert().NotNil(actualTokenProvider)
s.Assert().Nil(actualErr)
s.Assert().NotNil(actualTokenProvider.getAccessToken(context.Background()))
// Token set to refresh at half of the expiry time. The test tokens are set to expiry in 5s.
// Hence, the 4 seconds wait to check if the token is refreshed.
time.Sleep(4 * time.Second)
s.Assert().NotNil(actualTokenProvider.getAccessToken(context.Background()))
s.mockCredential.AssertNumberOfCalls(s.T(), "GetToken", 2*mockGetTokenCallCounter)
mockGetTokenCallCounter += 1
accessToken, err := actualTokenProvider.getAccessToken(context.Background())
s.Assert().Nil(err)
s.Assert().NotEqual(accessToken, testTokenString)
}
}
actualTokenProvider, actualErr := newTokenProvider(azureAdConfig, s.mockCredential)
s.Assert().Nil(actualTokenProvider)
s.Assert().NotNil(actualErr)
s.Assert().Equal("Cloud is not specified or is incorrect: "+azureAdConfig.Cloud, actualErr.Error())
}
func (s *TokenProviderTestSuite) TestNewTokenProvider_Success() {
managedIdentityConfig := &ManagedIdentityConfig{
ClientID: dummyClientID,
}
azureAdConfig := &AzureADConfig{
Cloud: "AzurePublic",
ManagedIdentity: managedIdentityConfig,
}
s.mockCredential.On("GetToken", mock.Anything, mock.Anything).Return(getToken(), nil)
actualTokenProvider, actualErr := newTokenProvider(azureAdConfig, s.mockCredential)
s.Assert().NotNil(actualTokenProvider)
s.Assert().Nil(actualErr)
s.Assert().NotNil(actualTokenProvider.getAccessToken(context.Background()))
}
func (s *TokenProviderTestSuite) TestPeriodicTokenRefresh_Success() {
// setup
managedIdentityConfig := &ManagedIdentityConfig{
ClientID: dummyClientID,
}
azureAdConfig := &AzureADConfig{
Cloud: "AzurePublic",
ManagedIdentity: managedIdentityConfig,
}
testToken := &azcore.AccessToken{
Token: testTokenString,
ExpiresOn: testTokenExpiry,
}
s.mockCredential.On("GetToken", mock.Anything, mock.Anything).Return(*testToken, nil).Once().
On("GetToken", mock.Anything, mock.Anything).Return(getToken(), nil)
actualTokenProvider, actualErr := newTokenProvider(azureAdConfig, s.mockCredential)
s.Assert().NotNil(actualTokenProvider)
s.Assert().Nil(actualErr)
s.Assert().NotNil(actualTokenProvider.getAccessToken(context.Background()))
// Token set to refresh at half of the expiry time. The test tokens are set to expiry in 10s.
// Hence, the 6 seconds wait to check if the token is refreshed.
time.Sleep(6 * time.Second)
s.Assert().NotNil(actualTokenProvider.getAccessToken(context.Background()))
s.mockCredential.AssertNumberOfCalls(s.T(), "GetToken", 2)
accessToken, err := actualTokenProvider.getAccessToken(context.Background())
s.Assert().Nil(err)
s.Assert().NotEqual(accessToken, testTokenString)
}
func getToken() azcore.AccessToken {

View file

@ -0,0 +1,4 @@
cloud: AzurePublic
oauth:
client_id: 00000000-0000-0000-0000-000000000000
client_secret: Cl1ent$ecret!

View file

@ -0,0 +1,7 @@
cloud: AzurePublic
managed_identity:
client_id: 00000000-0000-0000-0000-000000000000
oauth:
client_id: 00000000-0000-0000-0000-000000000000
client_secret: Cl1ent$ecret!
tenant_id: 00000000-a12b-3cd4-e56f-000000000000

View file

@ -0,0 +1,5 @@
cloud: AzurePublic
oauth:
client_id: 00000000-0000-0000-0000-000000000000
client_secret: Cl1ent$ecret!
tenant_id: 00000000-a12b-3cd4-e56f-000000000000

View file

@ -187,8 +187,8 @@ func FromQueryResult(sortSeries bool, res *prompb.QueryResult) storage.SeriesSet
}
if sortSeries {
slices.SortFunc(series, func(a, b storage.Series) bool {
return labels.Compare(a.Labels(), b.Labels()) < 0
slices.SortFunc(series, func(a, b storage.Series) int {
return labels.Compare(a.Labels(), b.Labels())
})
}
return &concreteSeriesSet{

View file

@ -16,6 +16,7 @@ package remote
import (
"context"
"net/http"
"strings"
"sync"
"github.com/go-kit/log"
@ -93,8 +94,8 @@ func (h *readHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
Value: value,
})
}
slices.SortFunc(sortedExternalLabels, func(a, b prompb.Label) bool {
return a.Name < b.Name
slices.SortFunc(sortedExternalLabels, func(a, b prompb.Label) int {
return strings.Compare(a.Name, b.Name)
})
responseType, err := NegotiateResponseType(req.AcceptedResponseTypes)

View file

@ -232,8 +232,15 @@ func (c *LeveledCompactor) Plan(dir string) ([]string, error) {
}
func (c *LeveledCompactor) plan(dms []dirMeta) ([]string, error) {
slices.SortFunc(dms, func(a, b dirMeta) bool {
return a.meta.MinTime < b.meta.MinTime
slices.SortFunc(dms, func(a, b dirMeta) int {
switch {
case a.meta.MinTime < b.meta.MinTime:
return -1
case a.meta.MinTime > b.meta.MinTime:
return 1
default:
return 0
}
})
res := c.selectOverlappingDirs(dms)
@ -415,8 +422,8 @@ func CompactBlockMetas(uid ulid.ULID, blocks ...*BlockMeta) *BlockMeta {
for s := range sources {
res.Compaction.Sources = append(res.Compaction.Sources, s)
}
slices.SortFunc(res.Compaction.Sources, func(a, b ulid.ULID) bool {
return a.Compare(b) < 0
slices.SortFunc(res.Compaction.Sources, func(a, b ulid.ULID) int {
return a.Compare(b)
})
res.MinTime = mint

View file

@ -630,8 +630,15 @@ func (db *DBReadOnly) Blocks() ([]BlockReader, error) {
return nil, nil
}
slices.SortFunc(loadable, func(a, b *Block) bool {
return a.Meta().MinTime < b.Meta().MinTime
slices.SortFunc(loadable, func(a, b *Block) int {
switch {
case a.Meta().MinTime < b.Meta().MinTime:
return -1
case a.Meta().MinTime > b.Meta().MinTime:
return 1
default:
return 0
}
})
blockMetas := make([]BlockMeta, 0, len(loadable))
@ -1506,8 +1513,15 @@ func (db *DB) reloadBlocks() (err error) {
}
db.metrics.blocksBytes.Set(float64(blocksSize))
slices.SortFunc(toLoad, func(a, b *Block) bool {
return a.Meta().MinTime < b.Meta().MinTime
slices.SortFunc(toLoad, func(a, b *Block) int {
switch {
case a.Meta().MinTime < b.Meta().MinTime:
return -1
case a.Meta().MinTime > b.Meta().MinTime:
return 1
default:
return 0
}
})
// Swap new blocks first for subsequently created readers to be seen.
@ -1581,8 +1595,15 @@ func deletableBlocks(db *DB, blocks []*Block) map[ulid.ULID]struct{} {
// Sort the blocks by time - newest to oldest (largest to smallest timestamp).
// This ensures that the retentions will remove the oldest blocks.
slices.SortFunc(blocks, func(a, b *Block) bool {
return a.Meta().MaxTime > b.Meta().MaxTime
slices.SortFunc(blocks, func(a, b *Block) int {
switch {
case b.Meta().MaxTime < a.Meta().MaxTime:
return -1
case b.Meta().MaxTime > a.Meta().MaxTime:
return 1
default:
return 0
}
})
for _, block := range blocks {

View file

@ -185,8 +185,8 @@ func (ce *CircularExemplarStorage) Select(start, end int64, matchers ...[]*label
}
}
slices.SortFunc(ret, func(a, b exemplar.QueryResult) bool {
return labels.Compare(a.SeriesLabels, b.SeriesLabels) < 0
slices.SortFunc(ret, func(a, b exemplar.QueryResult) int {
return labels.Compare(a.SeriesLabels, b.SeriesLabels)
})
return ret, nil

View file

@ -286,7 +286,6 @@ func NewHead(r prometheus.Registerer, l log.Logger, wal, wbl *wlog.WL, opts *Hea
if err := h.resetInMemoryState(); err != nil {
return nil, err
}
h.metrics = newHeadMetrics(h, r)
if opts.ChunkPool == nil {
opts.ChunkPool = chunkenc.NewPool()
@ -306,6 +305,7 @@ func NewHead(r prometheus.Registerer, l log.Logger, wal, wbl *wlog.WL, opts *Hea
if err != nil {
return nil, err
}
h.metrics = newHeadMetrics(h, r)
return h, nil
}

View file

@ -140,8 +140,8 @@ func (h *headIndexReader) SortedPostings(p index.Postings) index.Postings {
return index.ErrPostings(errors.Wrap(err, "expand postings"))
}
slices.SortFunc(series, func(a, b *memSeries) bool {
return labels.Compare(a.lset, b.lset) < 0
slices.SortFunc(series, func(a, b *memSeries) int {
return labels.Compare(a.lset, b.lset)
})
// Convert back to list.

View file

@ -4905,6 +4905,16 @@ func TestHistogramValidation(t *testing.T) {
"valid histogram": {
h: tsdbutil.GenerateTestHistograms(1)[0],
},
"valid histogram that has its Count (4) higher than the actual total of buckets (2 + 1)": {
// This case is possible if NaN values (which do not fall into any bucket) are observed.
h: &histogram.Histogram{
ZeroCount: 2,
Count: 4,
Sum: math.NaN(),
PositiveSpans: []histogram.Span{{Offset: 0, Length: 1}},
PositiveBuckets: []int64{1},
},
},
"rejects histogram that has too few negative buckets": {
h: &histogram.Histogram{
NegativeSpans: []histogram.Span{{Offset: 0, Length: 1}},

View file

@ -19,6 +19,7 @@ import (
"encoding/binary"
"runtime"
"sort"
"strings"
"sync"
"github.com/pkg/errors"
@ -108,11 +109,14 @@ func (p *MemPostings) SortedKeys() []labels.Label {
}
p.mtx.RUnlock()
slices.SortFunc(keys, func(a, b labels.Label) bool {
if a.Name != b.Name {
return a.Name < b.Name
slices.SortFunc(keys, func(a, b labels.Label) int {
nameCompare := strings.Compare(a.Name, b.Name)
// If names are the same, compare values.
if nameCompare != 0 {
return nameCompare
}
return a.Value < b.Value
return strings.Compare(a.Value, b.Value)
})
return keys
}
@ -409,6 +413,7 @@ type Postings interface {
Seek(v storage.SeriesRef) bool
// At returns the value at the current iterator position.
// At should only be called after a successful call to Next or Seek.
At() storage.SeriesRef
// Err returns the last error of the iterator.

View file

@ -990,11 +990,9 @@ func TestPostingsCloner(t *testing.T) {
check: func(t testing.TB, pc *PostingsCloner) {
p1 := pc.Clone()
require.False(t, p1.Seek(9))
require.Equal(t, storage.SeriesRef(0), p1.At())
p2 := pc.Clone()
require.False(t, p2.Seek(10))
require.Equal(t, storage.SeriesRef(0), p2.At())
},
},
{
@ -1002,7 +1000,6 @@ func TestPostingsCloner(t *testing.T) {
check: func(t testing.TB, pc *PostingsCloner) {
p1 := pc.Clone()
require.False(t, p1.Seek(9))
require.Equal(t, storage.SeriesRef(0), p1.At())
p2 := pc.Clone()
require.True(t, p2.Seek(2))
@ -1070,21 +1067,12 @@ func TestPostingsCloner(t *testing.T) {
require.Equal(t, storage.SeriesRef(4), p2.At())
},
},
{
name: "at before call of next shouldn't panic",
check: func(t testing.TB, pc *PostingsCloner) {
p := pc.Clone()
require.Equal(t, storage.SeriesRef(0), p.At())
},
},
{
name: "ensure a failed seek doesn't allow more next calls",
check: func(t testing.TB, pc *PostingsCloner) {
p := pc.Clone()
require.False(t, p.Seek(9))
require.Equal(t, storage.SeriesRef(0), p.At())
require.False(t, p.Next())
require.Equal(t, storage.SeriesRef(0), p.At())
},
},
} {
@ -1241,3 +1229,149 @@ func TestPostingsWithIndexHeap(t *testing.T) {
require.Equal(t, storage.SeriesRef(25), node.p.At())
})
}
func TestListPostings(t *testing.T) {
t.Run("empty list", func(t *testing.T) {
p := NewListPostings(nil)
require.False(t, p.Next())
require.False(t, p.Seek(10))
require.False(t, p.Next())
require.NoError(t, p.Err())
})
t.Run("one posting", func(t *testing.T) {
t.Run("next", func(t *testing.T) {
p := NewListPostings([]storage.SeriesRef{10})
require.True(t, p.Next())
require.Equal(t, storage.SeriesRef(10), p.At())
require.False(t, p.Next())
require.NoError(t, p.Err())
})
t.Run("seek less", func(t *testing.T) {
p := NewListPostings([]storage.SeriesRef{10})
require.True(t, p.Seek(5))
require.Equal(t, storage.SeriesRef(10), p.At())
require.True(t, p.Seek(5))
require.Equal(t, storage.SeriesRef(10), p.At())
require.False(t, p.Next())
require.NoError(t, p.Err())
})
t.Run("seek equal", func(t *testing.T) {
p := NewListPostings([]storage.SeriesRef{10})
require.True(t, p.Seek(10))
require.Equal(t, storage.SeriesRef(10), p.At())
require.False(t, p.Next())
require.NoError(t, p.Err())
})
t.Run("seek more", func(t *testing.T) {
p := NewListPostings([]storage.SeriesRef{10})
require.False(t, p.Seek(15))
require.False(t, p.Next())
require.NoError(t, p.Err())
})
t.Run("seek after next", func(t *testing.T) {
p := NewListPostings([]storage.SeriesRef{10})
require.True(t, p.Next())
require.False(t, p.Seek(15))
require.False(t, p.Next())
require.NoError(t, p.Err())
})
})
t.Run("multiple postings", func(t *testing.T) {
t.Run("next", func(t *testing.T) {
p := NewListPostings([]storage.SeriesRef{10, 20})
require.True(t, p.Next())
require.Equal(t, storage.SeriesRef(10), p.At())
require.True(t, p.Next())
require.Equal(t, storage.SeriesRef(20), p.At())
require.False(t, p.Next())
require.NoError(t, p.Err())
})
t.Run("seek", func(t *testing.T) {
p := NewListPostings([]storage.SeriesRef{10, 20})
require.True(t, p.Seek(5))
require.Equal(t, storage.SeriesRef(10), p.At())
require.True(t, p.Seek(5))
require.Equal(t, storage.SeriesRef(10), p.At())
require.True(t, p.Seek(10))
require.Equal(t, storage.SeriesRef(10), p.At())
require.True(t, p.Next())
require.Equal(t, storage.SeriesRef(20), p.At())
require.True(t, p.Seek(10))
require.Equal(t, storage.SeriesRef(20), p.At())
require.True(t, p.Seek(20))
require.Equal(t, storage.SeriesRef(20), p.At())
require.False(t, p.Next())
require.NoError(t, p.Err())
})
t.Run("seek lest than last", func(t *testing.T) {
p := NewListPostings([]storage.SeriesRef{10, 20, 30, 40, 50})
require.True(t, p.Seek(45))
require.Equal(t, storage.SeriesRef(50), p.At())
require.False(t, p.Next())
})
t.Run("seek exactly last", func(t *testing.T) {
p := NewListPostings([]storage.SeriesRef{10, 20, 30, 40, 50})
require.True(t, p.Seek(50))
require.Equal(t, storage.SeriesRef(50), p.At())
require.False(t, p.Next())
})
t.Run("seek more than last", func(t *testing.T) {
p := NewListPostings([]storage.SeriesRef{10, 20, 30, 40, 50})
require.False(t, p.Seek(60))
require.False(t, p.Next())
})
})
t.Run("seek", func(t *testing.T) {
for _, c := range []int{2, 8, 9, 10} {
t.Run(fmt.Sprintf("count=%d", c), func(t *testing.T) {
list := make([]storage.SeriesRef, c)
for i := 0; i < c; i++ {
list[i] = storage.SeriesRef(i * 10)
}
t.Run("all one by one", func(t *testing.T) {
p := NewListPostings(list)
for i := 0; i < c; i++ {
require.True(t, p.Seek(storage.SeriesRef(i*10)))
require.Equal(t, storage.SeriesRef(i*10), p.At())
}
require.False(t, p.Seek(storage.SeriesRef(c*10)))
})
t.Run("each one", func(t *testing.T) {
for _, ref := range list {
p := NewListPostings(list)
require.True(t, p.Seek(ref))
require.Equal(t, ref, p.At())
}
})
})
}
})
}
// BenchmarkListPostings benchmarks ListPostings by iterating Next/At sequentially.
// See also BenchmarkIntersect as it performs more `At` calls than `Next` calls when intersecting.
func BenchmarkListPostings(b *testing.B) {
const maxCount = 1e6
input := make([]storage.SeriesRef, maxCount)
for i := 0; i < maxCount; i++ {
input[i] = storage.SeriesRef(i << 2)
}
for _, count := range []int{100, 1e3, 10e3, 100e3, maxCount} {
b.Run(fmt.Sprintf("count=%d", count), func(b *testing.B) {
for i := 0; i < b.N; i++ {
p := NewListPostings(input[:count])
var sum storage.SeriesRef
for p.Next() {
sum += p.At()
}
require.NotZero(b, sum)
}
})
}
}

View file

@ -63,8 +63,15 @@ func (m *maxHeap) push(item Stat) {
}
func (m *maxHeap) get() []Stat {
slices.SortFunc(m.Items, func(a, b Stat) bool {
return a.Count > b.Count
slices.SortFunc(m.Items, func(a, b Stat) int {
switch {
case b.Count < a.Count:
return -1
case b.Count > a.Count:
return 1
default:
return 0
}
})
return m.Items
}

View file

@ -183,18 +183,40 @@ type chunkMetaAndChunkDiskMapperRef struct {
origMaxT int64
}
func refLessByMinTimeAndMinRef(a, b chunkMetaAndChunkDiskMapperRef) bool {
if a.meta.MinTime == b.meta.MinTime {
return a.meta.Ref < b.meta.Ref
func refLessByMinTimeAndMinRef(a, b chunkMetaAndChunkDiskMapperRef) int {
switch {
case a.meta.MinTime < b.meta.MinTime:
return -1
case a.meta.MinTime > b.meta.MinTime:
return 1
}
switch {
case a.meta.Ref < b.meta.Ref:
return -1
case a.meta.Ref > b.meta.Ref:
return 1
default:
return 0
}
return a.meta.MinTime < b.meta.MinTime
}
func lessByMinTimeAndMinRef(a, b chunks.Meta) bool {
if a.MinTime == b.MinTime {
return a.Ref < b.Ref
func lessByMinTimeAndMinRef(a, b chunks.Meta) int {
switch {
case a.MinTime < b.MinTime:
return -1
case a.MinTime > b.MinTime:
return 1
}
switch {
case a.Ref < b.Ref:
return -1
case a.Ref > b.Ref:
return 1
default:
return 0
}
return a.MinTime < b.MinTime
}
func (oh *OOOHeadIndexReader) Postings(ctx context.Context, name string, values ...string) (index.Postings, error) {

View file

@ -8,7 +8,6 @@ import (
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/prometheus/prometheus/model/labels"
@ -307,13 +306,13 @@ func TestPostingsForMatchersCache(t *testing.T) {
actual, err := index.ExpandPostings(p)
require.NoError(t, err)
assert.Equal(t, refsLists[matchersKey(matchers)], actual)
require.Equal(t, refsLists[matchersKey(matchers)], actual)
}
}
// At this point we expect that the postings have been computed only once for the 3 matchers.
for i := 0; i < 3; i++ {
assert.Equalf(t, 1, callsPerMatchers[matchersKey(matchersLists[i])], "matcher %d", i)
require.Equalf(t, 1, callsPerMatchers[matchersKey(matchersLists[i])], "matcher %d", i)
}
// Call PostingsForMatchers() for a 4th matcher. We expect this will evict the oldest cached entry.
@ -325,17 +324,17 @@ func TestPostingsForMatchersCache(t *testing.T) {
actual, err := index.ExpandPostings(p)
require.NoError(t, err)
assert.Equal(t, refsLists[matchersKey(matchers)], actual)
require.Equal(t, refsLists[matchersKey(matchers)], actual)
}
// To ensure the 1st (oldest) entry was removed, we call PostingsForMatchers() again on that matchers.
_, err := c.PostingsForMatchers(ctx, indexForPostingsMock{}, true, matchersLists[0]...)
require.NoError(t, err)
assert.Equal(t, 2, callsPerMatchers[matchersKey(matchersLists[0])])
assert.Equal(t, 1, callsPerMatchers[matchersKey(matchersLists[1])])
assert.Equal(t, 1, callsPerMatchers[matchersKey(matchersLists[2])])
assert.Equal(t, 1, callsPerMatchers[matchersKey(matchersLists[3])])
require.Equal(t, 2, callsPerMatchers[matchersKey(matchersLists[0])])
require.Equal(t, 1, callsPerMatchers[matchersKey(matchersLists[1])])
require.Equal(t, 1, callsPerMatchers[matchersKey(matchersLists[2])])
require.Equal(t, 1, callsPerMatchers[matchersKey(matchersLists[3])])
})
}

View file

@ -221,8 +221,12 @@ func PostingsForMatchers(ctx context.Context, ix IndexPostingsReader, ms ...*lab
// there is no chance that the set we subtract from
// contains postings of series that didn't exist when
// we constructed the set we subtract by.
slices.SortStableFunc(ms, func(i, j *labels.Matcher) bool {
return !isSubtractingMatcher(i) && isSubtractingMatcher(j)
slices.SortStableFunc(ms, func(i, j *labels.Matcher) int {
if !isSubtractingMatcher(i) && isSubtractingMatcher(j) {
return -1
}
return +1
})
for _, m := range ms {
@ -706,7 +710,7 @@ func (p *populateWithDelGenericSeriesIterator) reset(blockID ulid.ULID, cr Chunk
p.chks = chks
p.i = -1
p.err = nil
p.bufIter.Iter = nil
// Note we don't touch p.bufIter.Iter; it is holding on to an iterator we might reuse in next().
p.bufIter.Intervals = p.bufIter.Intervals[:0]
p.intervals = intervals
p.currDelIter = nil

View file

@ -33,10 +33,9 @@ const (
)
func BenchmarkQuerier(b *testing.B) {
chunkDir := b.TempDir()
opts := DefaultHeadOptions()
opts.ChunkRange = 1000
opts.ChunkDirRoot = chunkDir
opts.ChunkDirRoot = b.TempDir()
h, err := NewHead(nil, nil, nil, nil, opts, nil)
require.NoError(b, err)
defer func() {
@ -60,9 +59,13 @@ func BenchmarkQuerier(b *testing.B) {
}
require.NoError(b, app.Commit())
ir, err := h.Index()
require.NoError(b, err)
b.Run("Head", func(b *testing.B) {
ir, err := h.Index()
require.NoError(b, err)
defer func() {
require.NoError(b, ir.Close())
}()
b.Run("PostingsForMatchers", func(b *testing.B) {
benchmarkPostingsForMatchers(b, ir)
})
@ -71,18 +74,20 @@ func BenchmarkQuerier(b *testing.B) {
})
})
tmpdir := b.TempDir()
blockdir := createBlockFromHead(b, tmpdir, h)
block, err := OpenBlock(nil, blockdir, nil)
require.NoError(b, err)
defer func() {
require.NoError(b, block.Close())
}()
ir, err = block.Index()
require.NoError(b, err)
defer ir.Close()
b.Run("Block", func(b *testing.B) {
blockdir := createBlockFromHead(b, b.TempDir(), h)
block, err := OpenBlock(nil, blockdir, nil)
require.NoError(b, err)
defer func() {
require.NoError(b, block.Close())
}()
ir, err := block.Index()
require.NoError(b, err)
defer func() {
require.NoError(b, ir.Close())
}()
b.Run("PostingsForMatchers", func(b *testing.B) {
benchmarkPostingsForMatchers(b, ir)
})
@ -247,10 +252,9 @@ func BenchmarkMergedStringIter(b *testing.B) {
}
func BenchmarkQuerierSelect(b *testing.B) {
chunkDir := b.TempDir()
opts := DefaultHeadOptions()
opts.ChunkRange = 1000
opts.ChunkDirRoot = chunkDir
opts.ChunkDirRoot = b.TempDir()
h, err := NewHead(nil, nil, nil, nil, opts, nil)
require.NoError(b, err)
defer h.Close()

View file

@ -374,8 +374,8 @@ func listCheckpoints(dir string) (refs []checkpointRef, err error) {
refs = append(refs, checkpointRef{name: fi.Name(), index: idx})
}
slices.SortFunc(refs, func(a, b checkpointRef) bool {
return a.index < b.index
slices.SortFunc(refs, func(a, b checkpointRef) int {
return a.index - b.index
})
return refs, nil

View file

@ -909,8 +909,8 @@ func listSegments(dir string) (refs []segmentRef, err error) {
}
refs = append(refs, segmentRef{name: fn, index: k})
}
slices.SortFunc(refs, func(a, b segmentRef) bool {
return a.index < b.index
slices.SortFunc(refs, func(a, b segmentRef) int {
return a.index - b.index
})
for i := 0; i < len(refs)-1; i++ {
if refs[i].index+1 != refs[i+1].index {

View file

@ -105,7 +105,8 @@ var (
MixedFloatsHistogramsWarning = fmt.Errorf("%w: encountered a mix of histograms and floats for metric name", PromQLWarning)
MixedClassicNativeHistogramsWarning = fmt.Errorf("%w: vector contains a mix of classic and native histograms for metric name", PromQLWarning)
PossibleNonCounterInfo = fmt.Errorf("%w: metric might not be a counter, name does not end in _total/_sum/_count:", PromQLInfo)
PossibleNonCounterInfo = fmt.Errorf("%w: metric might not be a counter, name does not end in _total/_sum/_count:", PromQLInfo)
HistogramQuantileForcedMonotonicityInfo = fmt.Errorf("%w: input to histogram_quantile needed to be fixed for monotonicity (and may give inaccurate results) for metric name", PromQLInfo)
)
type annoErr struct {
@ -163,3 +164,12 @@ func NewPossibleNonCounterInfo(metricName string, pos posrange.PositionRange) an
Err: fmt.Errorf("%w %q", PossibleNonCounterInfo, metricName),
}
}
// NewHistogramQuantileForcedMonotonicityInfo is used when the input (classic histograms) to
// histogram_quantile needs to be forced to be monotonic.
func NewHistogramQuantileForcedMonotonicityInfo(metricName string, pos posrange.PositionRange) annoErr {
return annoErr{
PositionRange: pos,
Err: fmt.Errorf("%w %q", HistogramQuantileForcedMonotonicityInfo, metricName),
}
}

View file

@ -85,9 +85,7 @@ func (t *TimerGroup) String() string {
for _, timer := range t.timers {
timers = append(timers, timer)
}
slices.SortFunc(timers, func(a, b *Timer) bool {
return a.created < b.created
})
slices.SortFunc(timers, func(a, b *Timer) int { return a.created - b.created })
result := &bytes.Buffer{}
for _, timer := range timers {
fmt.Fprintf(result, "%s\n", timer)

View file

@ -436,7 +436,7 @@ func (api *API) query(r *http.Request) (result apiFuncResult) {
return invalidParamError(err, "timeout")
}
ctx, cancel = context.WithTimeout(ctx, timeout)
ctx, cancel = context.WithDeadline(ctx, api.now().Add(timeout))
defer cancel()
}

View file

@ -404,7 +404,7 @@ func TestEndpoints(t *testing.T) {
testEndpoints(t, api, testTargetRetriever, storage, true)
})
// Run all the API tests against a API that is wired to forward queries via
// Run all the API tests against an API that is wired to forward queries via
// the remote read client to a test server, which in turn sends them to the
// data from the test storage.
t.Run("remote", func(t *testing.T) {
@ -3660,3 +3660,107 @@ func TestExtractQueryOpts(t *testing.T) {
})
}
}
// Test query timeout parameter.
func TestQueryTimeout(t *testing.T) {
storage := promql.LoadedStorage(t, `
load 1m
test_metric1{foo="bar"} 0+100x100
`)
t.Cleanup(func() {
_ = storage.Close()
})
now := time.Now()
for _, tc := range []struct {
name string
method string
}{
{
name: "GET method",
method: http.MethodGet,
},
{
name: "POST method",
method: http.MethodPost,
},
} {
t.Run(tc.name, func(t *testing.T) {
engine := &fakeEngine{}
api := &API{
Queryable: storage,
QueryEngine: engine,
ExemplarQueryable: storage.ExemplarQueryable(),
alertmanagerRetriever: testAlertmanagerRetriever{}.toFactory(),
flagsMap: sampleFlagMap,
now: func() time.Time { return now },
config: func() config.Config { return samplePrometheusCfg },
ready: func(f http.HandlerFunc) http.HandlerFunc { return f },
}
query := url.Values{
"query": []string{"2"},
"timeout": []string{"1s"},
}
ctx := context.Background()
req, err := http.NewRequest(tc.method, fmt.Sprintf("http://example.com?%s", query.Encode()), nil)
require.NoError(t, err)
req.RemoteAddr = "127.0.0.1:20201"
res := api.query(req.WithContext(ctx))
assertAPIError(t, res.err, errorNone)
require.Len(t, engine.query.execCalls, 1)
deadline, ok := engine.query.execCalls[0].Deadline()
require.True(t, ok)
require.Equal(t, now.Add(time.Second), deadline)
})
}
}
// fakeEngine is a fake QueryEngine implementation.
type fakeEngine struct {
query fakeQuery
}
func (e *fakeEngine) SetQueryLogger(promql.QueryLogger) {}
func (e *fakeEngine) NewInstantQuery(ctx context.Context, q storage.Queryable, opts promql.QueryOpts, qs string, ts time.Time) (promql.Query, error) {
return &e.query, nil
}
func (e *fakeEngine) NewRangeQuery(ctx context.Context, q storage.Queryable, opts promql.QueryOpts, qs string, start, end time.Time, interval time.Duration) (promql.Query, error) {
return &e.query, nil
}
// fakeQuery is a fake Query implementation.
type fakeQuery struct {
query string
execCalls []context.Context
}
func (q *fakeQuery) Exec(ctx context.Context) *promql.Result {
q.execCalls = append(q.execCalls, ctx)
return &promql.Result{
Value: &parser.StringLiteral{
Val: "test",
},
}
}
func (q *fakeQuery) Close() {}
func (q *fakeQuery) Statement() parser.Statement {
return nil
}
func (q *fakeQuery) Stats() *stats.Statistics {
return nil
}
func (q *fakeQuery) Cancel() {}
func (q *fakeQuery) String() string {
return q.query
}

View file

@ -17,6 +17,7 @@ import (
"fmt"
"net/http"
"sort"
"strings"
"github.com/go-kit/log/level"
"github.com/gogo/protobuf/proto"
@ -169,10 +170,10 @@ Loop:
return
}
slices.SortFunc(vec, func(a, b promql.Sample) bool {
slices.SortFunc(vec, func(a, b promql.Sample) int {
ni := a.Metric.Get(labels.MetricName)
nj := b.Metric.Get(labels.MetricName)
return ni < nj
return strings.Compare(ni, nj)
})
externalLabels := h.config.GlobalConfig.ExternalLabels.Map()

708
web/ui/package-lock.json generated

File diff suppressed because it is too large Load diff

View file

@ -13,6 +13,7 @@ interface RulesContentProps {
interface RuleGroup {
name: string;
file: string;
interval: string;
rules: Rule[];
evaluationTime: string;
lastEvaluation: string;
@ -58,13 +59,16 @@ export const RulesContent: FC<RulesContentProps> = ({ response }) => {
<Table bordered key={i}>
<thead>
<tr>
<td colSpan={3}>
<td>
<a href={'#' + g.name}>
<h4 id={g.name} className="text-break">
{g.name}
</h4>
</a>
</td>
<td colSpan={2}>
<h4>Interval: {humanizeDuration(parseFloat(g.interval) * 1000)}</h4>
</td>
<td>
<h4>{formatRelative(g.lastEvaluation, now())}</h4>
</td>