Merge branch 'main' into mantine-ui

Signed-off-by: Julius Volz <julius.volz@gmail.com>
This commit is contained in:
Julius Volz 2024-07-31 21:01:15 +02:00
commit 0f951774b8
120 changed files with 4139 additions and 2119 deletions

View file

@ -13,7 +13,7 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
- uses: bufbuild/buf-setup-action@dde0b9351db90fbf78e345f41a57de8514bf1091 # v1.32.2 - uses: bufbuild/buf-setup-action@35c243d7f2a909b1d4e40399b348a7fdab27d78d # v1.34.0
with: with:
github_token: ${{ secrets.GITHUB_TOKEN }} github_token: ${{ secrets.GITHUB_TOKEN }}
- uses: bufbuild/buf-lint-action@06f9dd823d873146471cfaaf108a993fe00e5325 # v1.1.1 - uses: bufbuild/buf-lint-action@06f9dd823d873146471cfaaf108a993fe00e5325 # v1.1.1

View file

@ -13,7 +13,7 @@ jobs:
if: github.repository_owner == 'prometheus' if: github.repository_owner == 'prometheus'
steps: steps:
- uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
- uses: bufbuild/buf-setup-action@dde0b9351db90fbf78e345f41a57de8514bf1091 # v1.32.2 - uses: bufbuild/buf-setup-action@35c243d7f2a909b1d4e40399b348a7fdab27d78d # v1.34.0
with: with:
github_token: ${{ secrets.GITHUB_TOKEN }} github_token: ${{ secrets.GITHUB_TOKEN }}
- uses: bufbuild/buf-lint-action@06f9dd823d873146471cfaaf108a993fe00e5325 # v1.1.1 - uses: bufbuild/buf-lint-action@06f9dd823d873146471cfaaf108a993fe00e5325 # v1.1.1

View file

@ -143,6 +143,18 @@ jobs:
with: with:
parallelism: 12 parallelism: 12
thread: ${{ matrix.thread }} thread: ${{ matrix.thread }}
build_all_status:
name: Report status of build Prometheus for all architectures
runs-on: ubuntu-latest
needs: [build_all]
if: github.event_name == 'pull_request' && startsWith(github.event.pull_request.base.ref, 'release-')
steps:
- name: Successful build
if: ${{ !(contains(needs.*.result, 'failure')) && !(contains(needs.*.result, 'cancelled')) }}
run: exit 0
- name: Failing or cancelled build
if: ${{ contains(needs.*.result, 'failure') || contains(needs.*.result, 'cancelled') }}
run: exit 1
check_generated_parser: check_generated_parser:
name: Check generated parser name: Check generated parser
runs-on: ubuntu-latest runs-on: ubuntu-latest

View file

@ -27,12 +27,12 @@ jobs:
uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
- name: Initialize CodeQL - name: Initialize CodeQL
uses: github/codeql-action/init@2e230e8fe0ad3a14a340ad0815ddb96d599d2aff # v3.25.8 uses: github/codeql-action/init@b611370bb5703a7efb587f9d136a52ea24c5c38c # v3.25.11
with: with:
languages: ${{ matrix.language }} languages: ${{ matrix.language }}
- name: Autobuild - name: Autobuild
uses: github/codeql-action/autobuild@2e230e8fe0ad3a14a340ad0815ddb96d599d2aff # v3.25.8 uses: github/codeql-action/autobuild@b611370bb5703a7efb587f9d136a52ea24c5c38c # v3.25.11
- name: Perform CodeQL Analysis - name: Perform CodeQL Analysis
uses: github/codeql-action/analyze@2e230e8fe0ad3a14a340ad0815ddb96d599d2aff # v3.25.8 uses: github/codeql-action/analyze@b611370bb5703a7efb587f9d136a52ea24c5c38c # v3.25.11

View file

@ -45,6 +45,6 @@ jobs:
# Upload the results to GitHub's code scanning dashboard. # Upload the results to GitHub's code scanning dashboard.
- name: "Upload to code-scanning" - name: "Upload to code-scanning"
uses: github/codeql-action/upload-sarif@2e230e8fe0ad3a14a340ad0815ddb96d599d2aff # tag=v3.25.8 uses: github/codeql-action/upload-sarif@b611370bb5703a7efb587f9d136a52ea24c5c38c # tag=v3.25.11
with: with:
sarif_file: results.sarif sarif_file: results.sarif

View file

@ -1,12 +1,5 @@
run: run:
timeout: 15m timeout: 15m
skip-files:
# Skip autogenerated files.
- ^.*\.(pb|y)\.go$
skip-dirs:
# Copied it from a different source
- storage/remote/otlptranslator/prometheusremotewrite
- storage/remote/otlptranslator/prometheus
output: output:
sort-results: true sort-results: true
@ -33,6 +26,13 @@ linters:
issues: issues:
max-same-issues: 0 max-same-issues: 0
exclude-files:
# Skip autogenerated files.
- ^.*\.(pb|y)\.go$
exclude-dirs:
# Copied it from a different source
- storage/remote/otlptranslator/prometheusremotewrite
- storage/remote/otlptranslator/prometheus
exclude-rules: exclude-rules:
- linters: - linters:
- gocritic - gocritic

View file

@ -2,6 +2,18 @@
## unreleased ## unreleased
* [FEATURE] OTLP receiver: Add new option `otlp.promote_resource_attributes`, for any OTel resource attributes that should be promoted to metric labels. #14200
* [FEATURE] Remote-Write: Add sender and receiver support for [Remote Write 2.0-rc.2](https://prometheus.io/docs/specs/remote_write_spec_2_0/) specification #14395 #14427 #14444
* [ENHANCEMENT] Remote-Write: 1.x messages against Remote Write 2.x Receivers will have now correct values for `prometheus_storage_<samples|histograms|exemplar>_failed_total` in case of partial errors #14444
## 2.53.1 / 2024-07-10
Fix a bug which would drop samples in remote-write if the sending flow stalled
for longer than it takes to write one "WAL segment". How long this takes depends on the size
of your Prometheus; as a rough guide with 10 million series it is about 2-3 minutes.
* [BUGFIX] Remote-write: stop dropping samples in catch-up #14446
## 2.53.0 / 2024-06-16 ## 2.53.0 / 2024-06-16
This release changes the default for GOGC, the Go runtime control for the trade-off between excess memory use and CPU usage. We have found that Prometheus operates with minimal additional CPU usage, but greatly reduced memory by adjusting the upstream Go default from 100 to 75. This release changes the default for GOGC, the Go runtime control for the trade-off between excess memory use and CPU usage. We have found that Prometheus operates with minimal additional CPU usage, but greatly reduced memory by adjusting the upstream Go default from 100 to 75.

View file

@ -12,9 +12,10 @@ examples and guides.</p>
[![Docker Pulls](https://img.shields.io/docker/pulls/prom/prometheus.svg?maxAge=604800)][hub] [![Docker Pulls](https://img.shields.io/docker/pulls/prom/prometheus.svg?maxAge=604800)][hub]
[![Go Report Card](https://goreportcard.com/badge/github.com/prometheus/prometheus)](https://goreportcard.com/report/github.com/prometheus/prometheus) [![Go Report Card](https://goreportcard.com/badge/github.com/prometheus/prometheus)](https://goreportcard.com/report/github.com/prometheus/prometheus)
[![CII Best Practices](https://bestpractices.coreinfrastructure.org/projects/486/badge)](https://bestpractices.coreinfrastructure.org/projects/486) [![CII Best Practices](https://bestpractices.coreinfrastructure.org/projects/486/badge)](https://bestpractices.coreinfrastructure.org/projects/486)
[![OpenSSF Scorecard](https://api.securityscorecards.dev/projects/github.com/prometheus/prometheus/badge)](https://securityscorecards.dev/viewer/?uri=github.com/prometheus/prometheus)
[![CLOMonitor](https://img.shields.io/endpoint?url=https://clomonitor.io/api/projects/cncf/prometheus/badge)](https://clomonitor.io/projects/cncf/prometheus)
[![Gitpod ready-to-code](https://img.shields.io/badge/Gitpod-ready--to--code-blue?logo=gitpod)](https://gitpod.io/#https://github.com/prometheus/prometheus) [![Gitpod ready-to-code](https://img.shields.io/badge/Gitpod-ready--to--code-blue?logo=gitpod)](https://gitpod.io/#https://github.com/prometheus/prometheus)
[![Fuzzing Status](https://oss-fuzz-build-logs.storage.googleapis.com/badges/prometheus.svg)](https://bugs.chromium.org/p/oss-fuzz/issues/list?sort=-opened&can=1&q=proj:prometheus) [![Fuzzing Status](https://oss-fuzz-build-logs.storage.googleapis.com/badges/prometheus.svg)](https://bugs.chromium.org/p/oss-fuzz/issues/list?sort=-opened&can=1&q=proj:prometheus)
[![OpenSSF Scorecard](https://api.securityscorecards.dev/projects/github.com/prometheus/prometheus/badge)](https://securityscorecards.dev/viewer/?uri=github.com/prometheus/prometheus)
</div> </div>

48
SECURITY-INSIGHTS.yml Normal file
View file

@ -0,0 +1,48 @@
header:
schema-version: '1.0.0'
expiration-date: '2025-07-30T01:00:00.000Z'
last-updated: '2024-07-30'
last-reviewed: '2024-07-30'
project-url: https://github.com/prometheus/prometheus
changelog: https://github.com/prometheus/prometheus/blob/main/CHANGELOG.md
license: https://github.com/prometheus/prometheus/blob/main/LICENSE
project-lifecycle:
status: active
bug-fixes-only: false
core-maintainers:
- https://github.com/prometheus/prometheus/blob/main/MAINTAINERS.md
contribution-policy:
accepts-pull-requests: true
accepts-automated-pull-requests: true
dependencies:
third-party-packages: true
dependencies-lists:
- https://github.com/prometheus/prometheus/blob/main/go.mod
- https://github.com/prometheus/prometheus/blob/main/web/ui/package.json
env-dependencies-policy:
policy-url: https://github.com/prometheus/prometheus/blob/main/CONTRIBUTING.md#dependency-management
distribution-points:
- https://github.com/prometheus/prometheus/releases
documentation:
- https://prometheus.io/docs/introduction/overview/
security-contacts:
- type: email
value: prometheus-team@googlegroups.com
security-testing:
- tool-type: sca
tool-name: Dependabot
tool-version: latest
integration:
ad-hoc: false
ci: true
before-release: true
- tool-type: sast
tool-name: CodeQL
tool-version: latest
integration:
ad-hoc: false
ci: true
before-release: true
vulnerability-reporting:
accepts-vulnerability-reports: true
security-policy: https://github.com/prometheus/prometheus/security/policy

View file

@ -1 +1 @@
2.53.0 2.53.1

View file

@ -204,6 +204,7 @@ func main() {
pushMetricsHeaders := pushMetricsCmd.Flag("header", "Prometheus remote write header.").StringMap() pushMetricsHeaders := pushMetricsCmd.Flag("header", "Prometheus remote write header.").StringMap()
testCmd := app.Command("test", "Unit testing.") testCmd := app.Command("test", "Unit testing.")
junitOutFile := testCmd.Flag("junit", "File path to store JUnit XML test results.").OpenFile(os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0o644)
testRulesCmd := testCmd.Command("rules", "Unit tests for rules.") testRulesCmd := testCmd.Command("rules", "Unit tests for rules.")
testRulesRun := testRulesCmd.Flag("run", "If set, will only run test groups whose names match the regular expression. Can be specified multiple times.").Strings() testRulesRun := testRulesCmd.Flag("run", "If set, will only run test groups whose names match the regular expression. Can be specified multiple times.").Strings()
testRulesFiles := testRulesCmd.Arg( testRulesFiles := testRulesCmd.Arg(
@ -378,7 +379,11 @@ func main() {
os.Exit(QueryLabels(serverURL, httpRoundTripper, *queryLabelsMatch, *queryLabelsName, *queryLabelsBegin, *queryLabelsEnd, p)) os.Exit(QueryLabels(serverURL, httpRoundTripper, *queryLabelsMatch, *queryLabelsName, *queryLabelsBegin, *queryLabelsEnd, p))
case testRulesCmd.FullCommand(): case testRulesCmd.FullCommand():
os.Exit(RulesUnitTest( results := io.Discard
if *junitOutFile != nil {
results = *junitOutFile
}
os.Exit(RulesUnitTestResult(results,
promqltest.LazyLoaderOpts{ promqltest.LazyLoaderOpts{
EnableAtModifier: true, EnableAtModifier: true,
EnableNegativeOffset: true, EnableNegativeOffset: true,

View file

@ -101,6 +101,7 @@ func PushMetrics(url *url.URL, roundTripper http.RoundTripper, headers map[strin
return successExitCode return successExitCode
} }
// TODO(bwplotka): Add PRW 2.0 support.
func parseAndPushMetrics(client *remote.Client, data []byte, labels map[string]string) bool { func parseAndPushMetrics(client *remote.Client, data []byte, labels map[string]string) bool {
metricsData, err := fmtutil.MetricTextToWriteRequest(bytes.NewReader(data), labels) metricsData, err := fmtutil.MetricTextToWriteRequest(bytes.NewReader(data), labels)
if err != nil { if err != nil {
@ -116,7 +117,7 @@ func parseAndPushMetrics(client *remote.Client, data []byte, labels map[string]s
// Encode the request body into snappy encoding. // Encode the request body into snappy encoding.
compressed := snappy.Encode(nil, raw) compressed := snappy.Encode(nil, raw)
err = client.Store(context.Background(), compressed, 0) _, err = client.Store(context.Background(), compressed, 0)
if err != nil { if err != nil {
fmt.Fprintln(os.Stderr, " FAILED:", err) fmt.Fprintln(os.Stderr, " FAILED:", err)
return false return false

View file

@ -18,6 +18,7 @@ import (
"encoding/json" "encoding/json"
"errors" "errors"
"fmt" "fmt"
"io"
"os" "os"
"path/filepath" "path/filepath"
"sort" "sort"
@ -29,9 +30,10 @@ import (
"github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp"
"github.com/grafana/regexp" "github.com/grafana/regexp"
"github.com/nsf/jsondiff" "github.com/nsf/jsondiff"
"github.com/prometheus/common/model"
"gopkg.in/yaml.v2" "gopkg.in/yaml.v2"
"github.com/prometheus/common/model"
"github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/model/histogram"
"github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/promql" "github.com/prometheus/prometheus/promql"
@ -39,12 +41,18 @@ import (
"github.com/prometheus/prometheus/promql/promqltest" "github.com/prometheus/prometheus/promql/promqltest"
"github.com/prometheus/prometheus/rules" "github.com/prometheus/prometheus/rules"
"github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/storage"
"github.com/prometheus/prometheus/util/junitxml"
) )
// RulesUnitTest does unit testing of rules based on the unit testing files provided. // RulesUnitTest does unit testing of rules based on the unit testing files provided.
// More info about the file format can be found in the docs. // More info about the file format can be found in the docs.
func RulesUnitTest(queryOpts promqltest.LazyLoaderOpts, runStrings []string, diffFlag bool, files ...string) int { func RulesUnitTest(queryOpts promqltest.LazyLoaderOpts, runStrings []string, diffFlag bool, files ...string) int {
return RulesUnitTestResult(io.Discard, queryOpts, runStrings, diffFlag, files...)
}
func RulesUnitTestResult(results io.Writer, queryOpts promqltest.LazyLoaderOpts, runStrings []string, diffFlag bool, files ...string) int {
failed := false failed := false
junit := &junitxml.JUnitXML{}
var run *regexp.Regexp var run *regexp.Regexp
if runStrings != nil { if runStrings != nil {
@ -52,7 +60,7 @@ func RulesUnitTest(queryOpts promqltest.LazyLoaderOpts, runStrings []string, dif
} }
for _, f := range files { for _, f := range files {
if errs := ruleUnitTest(f, queryOpts, run, diffFlag); errs != nil { if errs := ruleUnitTest(f, queryOpts, run, diffFlag, junit.Suite(f)); errs != nil {
fmt.Fprintln(os.Stderr, " FAILED:") fmt.Fprintln(os.Stderr, " FAILED:")
for _, e := range errs { for _, e := range errs {
fmt.Fprintln(os.Stderr, e.Error()) fmt.Fprintln(os.Stderr, e.Error())
@ -64,25 +72,30 @@ func RulesUnitTest(queryOpts promqltest.LazyLoaderOpts, runStrings []string, dif
} }
fmt.Println() fmt.Println()
} }
err := junit.WriteXML(results)
if err != nil {
fmt.Fprintf(os.Stderr, "failed to write JUnit XML: %s\n", err)
}
if failed { if failed {
return failureExitCode return failureExitCode
} }
return successExitCode return successExitCode
} }
func ruleUnitTest(filename string, queryOpts promqltest.LazyLoaderOpts, run *regexp.Regexp, diffFlag bool) []error { func ruleUnitTest(filename string, queryOpts promqltest.LazyLoaderOpts, run *regexp.Regexp, diffFlag bool, ts *junitxml.TestSuite) []error {
fmt.Println("Unit Testing: ", filename)
b, err := os.ReadFile(filename) b, err := os.ReadFile(filename)
if err != nil { if err != nil {
ts.Abort(err)
return []error{err} return []error{err}
} }
var unitTestInp unitTestFile var unitTestInp unitTestFile
if err := yaml.UnmarshalStrict(b, &unitTestInp); err != nil { if err := yaml.UnmarshalStrict(b, &unitTestInp); err != nil {
ts.Abort(err)
return []error{err} return []error{err}
} }
if err := resolveAndGlobFilepaths(filepath.Dir(filename), &unitTestInp); err != nil { if err := resolveAndGlobFilepaths(filepath.Dir(filename), &unitTestInp); err != nil {
ts.Abort(err)
return []error{err} return []error{err}
} }
@ -91,29 +104,38 @@ func ruleUnitTest(filename string, queryOpts promqltest.LazyLoaderOpts, run *reg
} }
evalInterval := time.Duration(unitTestInp.EvaluationInterval) evalInterval := time.Duration(unitTestInp.EvaluationInterval)
ts.Settime(time.Now().Format("2006-01-02T15:04:05"))
// Giving number for groups mentioned in the file for ordering. // Giving number for groups mentioned in the file for ordering.
// Lower number group should be evaluated before higher number group. // Lower number group should be evaluated before higher number group.
groupOrderMap := make(map[string]int) groupOrderMap := make(map[string]int)
for i, gn := range unitTestInp.GroupEvalOrder { for i, gn := range unitTestInp.GroupEvalOrder {
if _, ok := groupOrderMap[gn]; ok { if _, ok := groupOrderMap[gn]; ok {
return []error{fmt.Errorf("group name repeated in evaluation order: %s", gn)} err := fmt.Errorf("group name repeated in evaluation order: %s", gn)
ts.Abort(err)
return []error{err}
} }
groupOrderMap[gn] = i groupOrderMap[gn] = i
} }
// Testing. // Testing.
var errs []error var errs []error
for _, t := range unitTestInp.Tests { for i, t := range unitTestInp.Tests {
if !matchesRun(t.TestGroupName, run) { if !matchesRun(t.TestGroupName, run) {
continue continue
} }
testname := t.TestGroupName
if testname == "" {
testname = fmt.Sprintf("unnamed#%d", i)
}
tc := ts.Case(testname)
if t.Interval == 0 { if t.Interval == 0 {
t.Interval = unitTestInp.EvaluationInterval t.Interval = unitTestInp.EvaluationInterval
} }
ers := t.test(evalInterval, groupOrderMap, queryOpts, diffFlag, unitTestInp.RuleFiles...) ers := t.test(evalInterval, groupOrderMap, queryOpts, diffFlag, unitTestInp.RuleFiles...)
if ers != nil { if ers != nil {
for _, e := range ers {
tc.Fail(e.Error())
}
errs = append(errs, ers...) errs = append(errs, ers...)
} }
} }

View file

@ -14,11 +14,15 @@
package main package main
import ( import (
"bytes"
"encoding/xml"
"fmt"
"testing" "testing"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
"github.com/prometheus/prometheus/promql/promqltest" "github.com/prometheus/prometheus/promql/promqltest"
"github.com/prometheus/prometheus/util/junitxml"
) )
func TestRulesUnitTest(t *testing.T) { func TestRulesUnitTest(t *testing.T) {
@ -125,13 +129,59 @@ func TestRulesUnitTest(t *testing.T) {
want: 0, want: 0,
}, },
} }
reuseFiles := []string{}
reuseCount := [2]int{}
for _, tt := range tests { for _, tt := range tests {
if (tt.queryOpts == promqltest.LazyLoaderOpts{
EnableNegativeOffset: true,
} || tt.queryOpts == promqltest.LazyLoaderOpts{
EnableAtModifier: true,
}) {
reuseFiles = append(reuseFiles, tt.args.files...)
reuseCount[tt.want] += len(tt.args.files)
}
t.Run(tt.name, func(t *testing.T) { t.Run(tt.name, func(t *testing.T) {
if got := RulesUnitTest(tt.queryOpts, nil, false, tt.args.files...); got != tt.want { if got := RulesUnitTest(tt.queryOpts, nil, false, tt.args.files...); got != tt.want {
t.Errorf("RulesUnitTest() = %v, want %v", got, tt.want) t.Errorf("RulesUnitTest() = %v, want %v", got, tt.want)
} }
}) })
} }
t.Run("Junit xml output ", func(t *testing.T) {
var buf bytes.Buffer
if got := RulesUnitTestResult(&buf, promqltest.LazyLoaderOpts{}, nil, false, reuseFiles...); got != 1 {
t.Errorf("RulesUnitTestResults() = %v, want 1", got)
}
var test junitxml.JUnitXML
output := buf.Bytes()
err := xml.Unmarshal(output, &test)
if err != nil {
fmt.Println("error in decoding XML:", err)
return
}
var total int
var passes int
var failures int
var cases int
total = len(test.Suites)
if total != len(reuseFiles) {
t.Errorf("JUnit output had %d testsuite elements; expected %d\n", total, len(reuseFiles))
}
for _, i := range test.Suites {
if i.FailureCount == 0 {
passes++
} else {
failures++
}
cases += len(i.Cases)
}
if total != passes+failures {
t.Errorf("JUnit output mismatch: Total testsuites (%d) does not equal the sum of passes (%d) and failures (%d).", total, passes, failures)
}
if cases < total {
t.Errorf("JUnit output had %d suites without test cases\n", total-cases)
}
})
} }
func TestRulesUnitTestRun(t *testing.T) { func TestRulesUnitTestRun(t *testing.T) {

View file

@ -37,6 +37,7 @@ import (
"github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/model/relabel" "github.com/prometheus/prometheus/model/relabel"
"github.com/prometheus/prometheus/storage/remote/azuread" "github.com/prometheus/prometheus/storage/remote/azuread"
"github.com/prometheus/prometheus/storage/remote/googleiam"
) )
var ( var (
@ -227,6 +228,9 @@ var (
DefaultExemplarsConfig = ExemplarsConfig{ DefaultExemplarsConfig = ExemplarsConfig{
MaxExemplars: 100000, MaxExemplars: 100000,
} }
// DefaultOTLPConfig is the default OTLP configuration.
DefaultOTLPConfig = OTLPConfig{}
) )
// Config is the top-level configuration for Prometheus's config files. // Config is the top-level configuration for Prometheus's config files.
@ -242,6 +246,7 @@ type Config struct {
RemoteWriteConfigs []*RemoteWriteConfig `yaml:"remote_write,omitempty"` RemoteWriteConfigs []*RemoteWriteConfig `yaml:"remote_write,omitempty"`
RemoteReadConfigs []*RemoteReadConfig `yaml:"remote_read,omitempty"` RemoteReadConfigs []*RemoteReadConfig `yaml:"remote_read,omitempty"`
OTLPConfig OTLPConfig `yaml:"otlp,omitempty"`
} }
// SetDirectory joins any relative file paths with dir. // SetDirectory joins any relative file paths with dir.
@ -1119,6 +1124,7 @@ type RemoteWriteConfig struct {
MetadataConfig MetadataConfig `yaml:"metadata_config,omitempty"` MetadataConfig MetadataConfig `yaml:"metadata_config,omitempty"`
SigV4Config *sigv4.SigV4Config `yaml:"sigv4,omitempty"` SigV4Config *sigv4.SigV4Config `yaml:"sigv4,omitempty"`
AzureADConfig *azuread.AzureADConfig `yaml:"azuread,omitempty"` AzureADConfig *azuread.AzureADConfig `yaml:"azuread,omitempty"`
GoogleIAMConfig *googleiam.Config `yaml:"google_iam,omitempty"`
} }
// SetDirectory joins any relative file paths with dir. // SetDirectory joins any relative file paths with dir.
@ -1156,17 +1162,33 @@ func (c *RemoteWriteConfig) UnmarshalYAML(unmarshal func(interface{}) error) err
return err return err
} }
httpClientConfigAuthEnabled := c.HTTPClientConfig.BasicAuth != nil || return validateAuthConfigs(c)
c.HTTPClientConfig.Authorization != nil || c.HTTPClientConfig.OAuth2 != nil }
if httpClientConfigAuthEnabled && (c.SigV4Config != nil || c.AzureADConfig != nil) { // validateAuthConfigs validates that at most one of basic_auth, authorization, oauth2, sigv4, azuread or google_iam must be configured.
return fmt.Errorf("at most one of basic_auth, authorization, oauth2, sigv4, & azuread must be configured") func validateAuthConfigs(c *RemoteWriteConfig) error {
var authConfigured []string
if c.HTTPClientConfig.BasicAuth != nil {
authConfigured = append(authConfigured, "basic_auth")
} }
if c.HTTPClientConfig.Authorization != nil {
if c.SigV4Config != nil && c.AzureADConfig != nil { authConfigured = append(authConfigured, "authorization")
return fmt.Errorf("at most one of basic_auth, authorization, oauth2, sigv4, & azuread must be configured") }
if c.HTTPClientConfig.OAuth2 != nil {
authConfigured = append(authConfigured, "oauth2")
}
if c.SigV4Config != nil {
authConfigured = append(authConfigured, "sigv4")
}
if c.AzureADConfig != nil {
authConfigured = append(authConfigured, "azuread")
}
if c.GoogleIAMConfig != nil {
authConfigured = append(authConfigured, "google_iam")
}
if len(authConfigured) > 1 {
return fmt.Errorf("at most one of basic_auth, authorization, oauth2, sigv4, azuread or google_iam must be configured. Currently configured: %v", authConfigured)
} }
return nil return nil
} }
@ -1185,7 +1207,7 @@ func validateHeadersForTracing(headers map[string]string) error {
func validateHeaders(headers map[string]string) error { func validateHeaders(headers map[string]string) error {
for header := range headers { for header := range headers {
if strings.ToLower(header) == "authorization" { if strings.ToLower(header) == "authorization" {
return errors.New("authorization header must be changed via the basic_auth, authorization, oauth2, sigv4, or azuread parameter") return errors.New("authorization header must be changed via the basic_auth, authorization, oauth2, sigv4, azuread or google_iam parameter")
} }
if _, ok := reservedHeaders[strings.ToLower(header)]; ok { if _, ok := reservedHeaders[strings.ToLower(header)]; ok {
return fmt.Errorf("%s is a reserved header. It must not be changed", header) return fmt.Errorf("%s is a reserved header. It must not be changed", header)
@ -1304,3 +1326,35 @@ func getGoGCEnv() int {
} }
return DefaultRuntimeConfig.GoGC return DefaultRuntimeConfig.GoGC
} }
// OTLPConfig is the configuration for writing to the OTLP endpoint.
type OTLPConfig struct {
PromoteResourceAttributes []string `yaml:"promote_resource_attributes,omitempty"`
}
// UnmarshalYAML implements the yaml.Unmarshaler interface.
func (c *OTLPConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
*c = DefaultOTLPConfig
type plain OTLPConfig
if err := unmarshal((*plain)(c)); err != nil {
return err
}
seen := map[string]struct{}{}
var err error
for i, attr := range c.PromoteResourceAttributes {
attr = strings.TrimSpace(attr)
if attr == "" {
err = errors.Join(err, fmt.Errorf("empty promoted OTel resource attribute"))
continue
}
if _, exists := seen[attr]; exists {
err = errors.Join(err, fmt.Errorf("duplicated promoted OTel resource attribute %q", attr))
continue
}
seen[attr] = struct{}{}
c.PromoteResourceAttributes[i] = attr
}
return err
}

View file

@ -156,6 +156,12 @@ var expectedConf = &Config{
}, },
}, },
OTLPConfig: OTLPConfig{
PromoteResourceAttributes: []string{
"k8s.cluster.name", "k8s.job.name", "k8s.namespace.name",
},
},
RemoteReadConfigs: []*RemoteReadConfig{ RemoteReadConfigs: []*RemoteReadConfig{
{ {
URL: mustParseURL("http://remote1/read"), URL: mustParseURL("http://remote1/read"),
@ -1471,6 +1477,26 @@ func TestRemoteWriteRetryOnRateLimit(t *testing.T) {
require.False(t, got.RemoteWriteConfigs[1].QueueConfig.RetryOnRateLimit) require.False(t, got.RemoteWriteConfigs[1].QueueConfig.RetryOnRateLimit)
} }
func TestOTLPSanitizeResourceAttributes(t *testing.T) {
t.Run("good config", func(t *testing.T) {
want, err := LoadFile(filepath.Join("testdata", "otlp_sanitize_resource_attributes.good.yml"), false, false, log.NewNopLogger())
require.NoError(t, err)
out, err := yaml.Marshal(want)
require.NoError(t, err)
var got Config
require.NoError(t, yaml.UnmarshalStrict(out, &got))
require.Equal(t, []string{"k8s.cluster.name", "k8s.job.name", "k8s.namespace.name"}, got.OTLPConfig.PromoteResourceAttributes)
})
t.Run("bad config", func(t *testing.T) {
_, err := LoadFile(filepath.Join("testdata", "otlp_sanitize_resource_attributes.bad.yml"), false, false, log.NewNopLogger())
require.ErrorContains(t, err, `duplicated promoted OTel resource attribute "k8s.job.name"`)
require.ErrorContains(t, err, `empty promoted OTel resource attribute`)
})
}
func TestLoadConfig(t *testing.T) { func TestLoadConfig(t *testing.T) {
// Parse a valid file that sets a global scrape timeout. This tests whether parsing // Parse a valid file that sets a global scrape timeout. This tests whether parsing
// an overwritten default field in the global config permanently changes the default. // an overwritten default field in the global config permanently changes the default.
@ -1800,7 +1826,7 @@ var expectedErrors = []struct {
}, },
{ {
filename: "remote_write_authorization_header.bad.yml", filename: "remote_write_authorization_header.bad.yml",
errMsg: `authorization header must be changed via the basic_auth, authorization, oauth2, sigv4, or azuread parameter`, errMsg: `authorization header must be changed via the basic_auth, authorization, oauth2, sigv4, azuread or google_iam parameter`,
}, },
{ {
filename: "remote_write_wrong_msg.bad.yml", filename: "remote_write_wrong_msg.bad.yml",

View file

@ -45,6 +45,9 @@ remote_write:
headers: headers:
name: value name: value
otlp:
promote_resource_attributes: ["k8s.cluster.name", "k8s.job.name", "k8s.namespace.name"]
remote_read: remote_read:
- url: http://remote1/read - url: http://remote1/read
read_recent: true read_recent: true

View file

@ -0,0 +1,2 @@
otlp:
promote_resource_attributes: ["k8s.cluster.name", " k8s.job.name ", "k8s.namespace.name", "k8s.job.name", ""]

View file

@ -0,0 +1,2 @@
otlp:
promote_resource_attributes: ["k8s.cluster.name", " k8s.job.name ", "k8s.namespace.name"]

View file

@ -1090,7 +1090,6 @@ func TestCoordinationWithReceiver(t *testing.T) {
} }
for _, tc := range testCases { for _, tc := range testCases {
tc := tc
t.Run(tc.title, func(t *testing.T) { t.Run(tc.title, func(t *testing.T) {
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel() defer cancel()

View file

@ -17,7 +17,7 @@ import (
"context" "context"
"strconv" "strconv"
"github.com/docker/docker/api/types" "github.com/docker/docker/api/types/network"
"github.com/docker/docker/client" "github.com/docker/docker/client"
"github.com/prometheus/prometheus/util/strutil" "github.com/prometheus/prometheus/util/strutil"
@ -34,7 +34,7 @@ const (
) )
func getNetworksLabels(ctx context.Context, client *client.Client, labelPrefix string) (map[string]map[string]string, error) { func getNetworksLabels(ctx context.Context, client *client.Client, labelPrefix string) (map[string]map[string]string, error) {
networks, err := client.NetworkList(ctx, types.NetworkListOptions{}) networks, err := client.NetworkList(ctx, network.ListOptions{})
if err != nil { if err != nil {
return nil, err return nil, err
} }

View file

@ -30,7 +30,7 @@ The Prometheus monitoring server
| <code class="text-nowrap">--web.console.templates</code> | Path to the console template directory, available at /consoles. | `consoles` | | <code class="text-nowrap">--web.console.templates</code> | Path to the console template directory, available at /consoles. | `consoles` |
| <code class="text-nowrap">--web.console.libraries</code> | Path to the console library directory. | `console_libraries` | | <code class="text-nowrap">--web.console.libraries</code> | Path to the console library directory. | `console_libraries` |
| <code class="text-nowrap">--web.page-title</code> | Document title of Prometheus instance. | `Prometheus Time Series Collection and Processing Server` | | <code class="text-nowrap">--web.page-title</code> | Document title of Prometheus instance. | `Prometheus Time Series Collection and Processing Server` |
| <code class="text-nowrap">--web.cors.origin</code> | Regex for CORS origin. It is fully anchored. Example: 'https?://(domain1|domain2)\.com' | `.*` | | <code class="text-nowrap">--web.cors.origin</code> | Regex for CORS origin. It is fully anchored. Example: 'https?://(domain1\|domain2)\.com' | `.*` |
| <code class="text-nowrap">--storage.tsdb.path</code> | Base path for metrics storage. Use with server mode only. | `data/` | | <code class="text-nowrap">--storage.tsdb.path</code> | Base path for metrics storage. Use with server mode only. | `data/` |
| <code class="text-nowrap">--storage.tsdb.retention</code> | [DEPRECATED] How long to retain samples in storage. This flag has been deprecated, use "storage.tsdb.retention.time" instead. Use with server mode only. | | | <code class="text-nowrap">--storage.tsdb.retention</code> | [DEPRECATED] How long to retain samples in storage. This flag has been deprecated, use "storage.tsdb.retention.time" instead. Use with server mode only. | |
| <code class="text-nowrap">--storage.tsdb.retention.time</code> | How long to retain samples in storage. When this flag is set it overrides "storage.tsdb.retention". If neither this flag nor "storage.tsdb.retention" nor "storage.tsdb.retention.size" is set, the retention time defaults to 15d. Units Supported: y, w, d, h, m, s, ms. Use with server mode only. | | | <code class="text-nowrap">--storage.tsdb.retention.time</code> | How long to retain samples in storage. When this flag is set it overrides "storage.tsdb.retention". If neither this flag nor "storage.tsdb.retention" nor "storage.tsdb.retention.size" is set, the retention time defaults to 15d. Units Supported: y, w, d, h, m, s, ms. Use with server mode only. | |

View file

@ -442,6 +442,15 @@ Unit testing.
#### Flags
| Flag | Description |
| --- | --- |
| <code class="text-nowrap">--junit</code> | File path to store JUnit XML test results. |
##### `promtool test rules` ##### `promtool test rules`
Unit tests for rules. Unit tests for rules.

View file

@ -152,6 +152,10 @@ alerting:
remote_write: remote_write:
[ - <remote_write> ... ] [ - <remote_write> ... ]
# Settings related to the OTLP receiver feature.
otlp:
[ promote_resource_attributes: [<string>, ...] | default = [ ] ]
# Settings related to the remote read feature. # Settings related to the remote read feature.
remote_read: remote_read:
[ - <remote_read> ... ] [ - <remote_read> ... ]
@ -458,13 +462,15 @@ metric_relabel_configs:
[ keep_dropped_targets: <int> | default = 0 ] [ keep_dropped_targets: <int> | default = 0 ]
# Limit on total number of positive and negative buckets allowed in a single # Limit on total number of positive and negative buckets allowed in a single
# native histogram. If this is exceeded, the entire scrape will be treated as # native histogram. The resolution of a histogram with more buckets will be
# failed. 0 means no limit. # reduced until the number of buckets is within the limit. If the limit cannot
# be reached, the scrape will fail.
# 0 means no limit.
[ native_histogram_bucket_limit: <int> | default = 0 ] [ native_histogram_bucket_limit: <int> | default = 0 ]
# Lower limit for the growth factor of one bucket to the next in each native # Lower limit for the growth factor of one bucket to the next in each native
# histogram. The resolution of a histogram with a lower growth factor will be # histogram. The resolution of a histogram with a lower growth factor will be
# reduced until it is within the limit. # reduced as much as possible until it is within the limit.
# To set an upper limit for the schema (equivalent to "scale" in OTel's # To set an upper limit for the schema (equivalent to "scale" in OTel's
# exponential histograms), use the following factor limits: # exponential histograms), use the following factor limits:
# #
@ -3395,8 +3401,8 @@ authorization:
# It is mutually exclusive with `credentials`. # It is mutually exclusive with `credentials`.
[ credentials_file: <filename> ] [ credentials_file: <filename> ]
# Optionally configures AWS's Signature Verification 4 signing process to # Optionally configures AWS's Signature Verification 4 signing process to sign requests.
# sign requests. Cannot be set at the same time as basic_auth, authorization, or oauth2. # Cannot be set at the same time as basic_auth, authorization, oauth2, azuread or google_iam.
# To use the default credentials from the AWS SDK, use `sigv4: {}`. # To use the default credentials from the AWS SDK, use `sigv4: {}`.
sigv4: sigv4:
# The AWS region. If blank, the region from the default credentials chain # The AWS region. If blank, the region from the default credentials chain
@ -3649,12 +3655,12 @@ sigv4:
[ role_arn: <string> ] [ role_arn: <string> ]
# Optional OAuth 2.0 configuration. # Optional OAuth 2.0 configuration.
# Cannot be used at the same time as basic_auth, authorization, sigv4, or azuread. # Cannot be used at the same time as basic_auth, authorization, sigv4, azuread or google_iam.
oauth2: oauth2:
[ <oauth2> ] [ <oauth2> ]
# Optional AzureAD configuration. # Optional AzureAD configuration.
# Cannot be used at the same time as basic_auth, authorization, oauth2, or sigv4. # Cannot be used at the same time as basic_auth, authorization, oauth2, sigv4 or google_iam.
azuread: azuread:
# The Azure Cloud. Options are 'AzurePublic', 'AzureChina', or 'AzureGovernment'. # The Azure Cloud. Options are 'AzurePublic', 'AzureChina', or 'AzureGovernment'.
[ cloud: <string> | default = AzurePublic ] [ cloud: <string> | default = AzurePublic ]
@ -3674,6 +3680,14 @@ azuread:
[ sdk: [ sdk:
[ tenant_id: <string> ] ] [ tenant_id: <string> ] ]
# WARNING: Remote write is NOT SUPPORTED by Google Cloud. This configuration is reserved for future use.
# Optional Google Cloud Monitoring configuration.
# Cannot be used at the same time as basic_auth, authorization, oauth2, sigv4 or azuread.
# To use the default credentials from the Google Cloud SDK, use `google_iam: {}`.
google_iam:
# Service account key with monitoring write permessions.
credentials_file: <file_name>
# Configures the remote write request's TLS settings. # Configures the remote write request's TLS settings.
tls_config: tls_config:
[ <tls_config> ] [ <tls_config> ]

View file

@ -92,7 +92,7 @@ series: <string>
# #
# Native histogram notation: # Native histogram notation:
# Native histograms can be used instead of floating point numbers using the following notation: # Native histograms can be used instead of floating point numbers using the following notation:
# {{schema:1 sum:-0.3 count:3.1 z_bucket:7.1 z_bucket_w:0.05 buckets:[5.1 10 7] offset:-3 n_buckets:[4.1 5] n_offset:-5}} # {{schema:1 sum:-0.3 count:3.1 z_bucket:7.1 z_bucket_w:0.05 buckets:[5.1 10 7] offset:-3 n_buckets:[4.1 5] n_offset:-5 counter_reset_hint:gauge}}
# Native histograms support the same expanding notation as floating point numbers, i.e. 'axn', 'a+bxn' and 'a-bxn'. # Native histograms support the same expanding notation as floating point numbers, i.e. 'axn', 'a+bxn' and 'a-bxn'.
# All properties are optional and default to 0. The order is not important. The following properties are supported: # All properties are optional and default to 0. The order is not important. The following properties are supported:
# - schema (int): # - schema (int):
@ -119,6 +119,8 @@ series: <string>
# Observation counts in negative buckets. Each represents an absolute count. # Observation counts in negative buckets. Each represents an absolute count.
# - n_offset (int): # - n_offset (int):
# The starting index of the first entry in the negative buckets. # The starting index of the first entry in the negative buckets.
# - counter_reset_hint (one of 'unknown', 'reset', 'not_reset' or 'gauge')
# The counter reset hint associated with this histogram. Defaults to 'unknown' if not set.
values: <string> values: <string>
``` ```

View file

@ -260,7 +260,7 @@ URL query parameters:
series to return. At least one `match[]` argument must be provided. series to return. At least one `match[]` argument must be provided.
- `start=<rfc3339 | unix_timestamp>`: Start timestamp. - `start=<rfc3339 | unix_timestamp>`: Start timestamp.
- `end=<rfc3339 | unix_timestamp>`: End timestamp. - `end=<rfc3339 | unix_timestamp>`: End timestamp.
- `limit=<number>`: Maximum number of returned series. Optional. - `limit=<number>`: Maximum number of returned series. Optional. 0 means disabled.
You can URL-encode these parameters directly in the request body by using the `POST` method and You can URL-encode these parameters directly in the request body by using the `POST` method and
`Content-Type: application/x-www-form-urlencoded` header. This is useful when specifying a large `Content-Type: application/x-www-form-urlencoded` header. This is useful when specifying a large
@ -311,7 +311,7 @@ URL query parameters:
- `end=<rfc3339 | unix_timestamp>`: End timestamp. Optional. - `end=<rfc3339 | unix_timestamp>`: End timestamp. Optional.
- `match[]=<series_selector>`: Repeated series selector argument that selects the - `match[]=<series_selector>`: Repeated series selector argument that selects the
series from which to read the label names. Optional. series from which to read the label names. Optional.
- `limit=<number>`: Maximum number of returned series. Optional. - `limit=<number>`: Maximum number of returned series. Optional. 0 means disabled.
The `data` section of the JSON response is a list of string label names. The `data` section of the JSON response is a list of string label names.
@ -362,7 +362,7 @@ URL query parameters:
- `end=<rfc3339 | unix_timestamp>`: End timestamp. Optional. - `end=<rfc3339 | unix_timestamp>`: End timestamp. Optional.
- `match[]=<series_selector>`: Repeated series selector argument that selects the - `match[]=<series_selector>`: Repeated series selector argument that selects the
series from which to read the label values. Optional. series from which to read the label values. Optional.
- `limit=<number>`: Maximum number of returned series. Optional. - `limit=<number>`: Maximum number of returned series. Optional. 0 means disabled.
The `data` section of the JSON response is a list of string label values. The `data` section of the JSON response is a list of string label values.

View file

@ -8,9 +8,15 @@ sort_rank: 1
Prometheus provides a functional query language called PromQL (Prometheus Query Prometheus provides a functional query language called PromQL (Prometheus Query
Language) that lets the user select and aggregate time series data in real Language) that lets the user select and aggregate time series data in real
time. The result of an expression can either be shown as a graph, viewed as time.
tabular data in Prometheus's expression browser, or consumed by external
systems via the [HTTP API](api.md). When you send a query request to Prometheus, it can be an _instant query_, evaluated at one point in time,
or a _range query_ at equally-spaced steps between a start and an end time. PromQL works exactly the same
in each cases; the range query is just like an instant query run multiple times at different timestamps.
In the Prometheus UI, the "Table" tab is for instant queries and the "Graph" tab is for range queries.
Other programs can fetch the result of a PromQL expression via the [HTTP API](api.md).
## Examples ## Examples
@ -81,12 +87,20 @@ Examples:
0x8f 0x8f
-Inf -Inf
NaN NaN
As of version 2.54, float literals can also be represented using the syntax of time durations, where the time duration is converted into a float value corresponding to the number of seconds the time duration represents. This is an experimental feature and might still change.
Examples:
1s # Equivalent to 1.0
2m # Equivalent to 120.0
1ms # Equivalent to 0.001
## Time series selectors ## Time series selectors
Time series selectors are responsible for selecting the times series and raw or inferred sample timestamps and values. These are the basic building-blocks that instruct PromQL what data to fetch.
Time series *selectors* are not to be confused with higher level concept of instant and range *queries* that can execute the time series *selectors*. A higher level instant query would evaluate the given selector at one point in time, however the range query would evaluate the selector at multiple different times in between a minimum and maximum timestamp at regular steps.
### Instant vector selectors ### Instant vector selectors
@ -224,6 +238,15 @@ Here are some examples of valid time durations:
5m 5m
10s 10s
As of version 2.54, time durations can also be represented using the syntax of float literals, implying the number of seconds of the time duration. This is an experimental feature and might still change.
Examples:
1.0 # Equivalent to 1s
0.001 # Equivalent to 1ms
120 # Equivalent to 2m
### Offset modifier ### Offset modifier
The `offset` modifier allows changing the time offset for individual The `offset` modifier allows changing the time offset for individual

View file

@ -98,8 +98,9 @@ vector.
clamps the sample values of all elements in `v` to have a lower limit of `min` and an upper limit of `max`. clamps the sample values of all elements in `v` to have a lower limit of `min` and an upper limit of `max`.
Special cases: Special cases:
- Return an empty vector if `min > max`
- Return `NaN` if `min` or `max` is `NaN` * Return an empty vector if `min > max`
* Return `NaN` if `min` or `max` is `NaN`
## `clamp_max()` ## `clamp_max()`
@ -349,8 +350,8 @@ a histogram.
Buckets of classic histograms are cumulative. Therefore, the following should always be the case: Buckets of classic histograms are cumulative. Therefore, the following should always be the case:
- The counts in the buckets are monotonically increasing (strictly non-decreasing). * The counts in the buckets are monotonically increasing (strictly non-decreasing).
- A lack of observations between the upper limits of two consecutive buckets results in equal counts * A lack of observations between the upper limits of two consecutive buckets results in equal counts
in those two buckets. in those two buckets.
However, floating point precision issues (e.g. small discrepancies introduced by computing of buckets However, floating point precision issues (e.g. small discrepancies introduced by computing of buckets
@ -692,21 +693,21 @@ ignore histogram samples.
The trigonometric functions work in radians: The trigonometric functions work in radians:
- `acos(v instant-vector)`: calculates the arccosine of all elements in `v` ([special cases](https://pkg.go.dev/math#Acos)). * `acos(v instant-vector)`: calculates the arccosine of all elements in `v` ([special cases](https://pkg.go.dev/math#Acos)).
- `acosh(v instant-vector)`: calculates the inverse hyperbolic cosine of all elements in `v` ([special cases](https://pkg.go.dev/math#Acosh)). * `acosh(v instant-vector)`: calculates the inverse hyperbolic cosine of all elements in `v` ([special cases](https://pkg.go.dev/math#Acosh)).
- `asin(v instant-vector)`: calculates the arcsine of all elements in `v` ([special cases](https://pkg.go.dev/math#Asin)). * `asin(v instant-vector)`: calculates the arcsine of all elements in `v` ([special cases](https://pkg.go.dev/math#Asin)).
- `asinh(v instant-vector)`: calculates the inverse hyperbolic sine of all elements in `v` ([special cases](https://pkg.go.dev/math#Asinh)). * `asinh(v instant-vector)`: calculates the inverse hyperbolic sine of all elements in `v` ([special cases](https://pkg.go.dev/math#Asinh)).
- `atan(v instant-vector)`: calculates the arctangent of all elements in `v` ([special cases](https://pkg.go.dev/math#Atan)). * `atan(v instant-vector)`: calculates the arctangent of all elements in `v` ([special cases](https://pkg.go.dev/math#Atan)).
- `atanh(v instant-vector)`: calculates the inverse hyperbolic tangent of all elements in `v` ([special cases](https://pkg.go.dev/math#Atanh)). * `atanh(v instant-vector)`: calculates the inverse hyperbolic tangent of all elements in `v` ([special cases](https://pkg.go.dev/math#Atanh)).
- `cos(v instant-vector)`: calculates the cosine of all elements in `v` ([special cases](https://pkg.go.dev/math#Cos)). * `cos(v instant-vector)`: calculates the cosine of all elements in `v` ([special cases](https://pkg.go.dev/math#Cos)).
- `cosh(v instant-vector)`: calculates the hyperbolic cosine of all elements in `v` ([special cases](https://pkg.go.dev/math#Cosh)). * `cosh(v instant-vector)`: calculates the hyperbolic cosine of all elements in `v` ([special cases](https://pkg.go.dev/math#Cosh)).
- `sin(v instant-vector)`: calculates the sine of all elements in `v` ([special cases](https://pkg.go.dev/math#Sin)). * `sin(v instant-vector)`: calculates the sine of all elements in `v` ([special cases](https://pkg.go.dev/math#Sin)).
- `sinh(v instant-vector)`: calculates the hyperbolic sine of all elements in `v` ([special cases](https://pkg.go.dev/math#Sinh)). * `sinh(v instant-vector)`: calculates the hyperbolic sine of all elements in `v` ([special cases](https://pkg.go.dev/math#Sinh)).
- `tan(v instant-vector)`: calculates the tangent of all elements in `v` ([special cases](https://pkg.go.dev/math#Tan)). * `tan(v instant-vector)`: calculates the tangent of all elements in `v` ([special cases](https://pkg.go.dev/math#Tan)).
- `tanh(v instant-vector)`: calculates the hyperbolic tangent of all elements in `v` ([special cases](https://pkg.go.dev/math#Tanh)). * `tanh(v instant-vector)`: calculates the hyperbolic tangent of all elements in `v` ([special cases](https://pkg.go.dev/math#Tanh)).
The following are useful for converting between degrees and radians: The following are useful for converting between degrees and radians:
- `deg(v instant-vector)`: converts radians to degrees for all elements in `v`. * `deg(v instant-vector)`: converts radians to degrees for all elements in `v`.
- `pi()`: returns pi. * `pi()`: returns pi.
- `rad(v instant-vector)`: converts degrees to radians for all elements in `v`. * `rad(v instant-vector)`: converts degrees to radians for all elements in `v`.

View file

@ -137,6 +137,18 @@ will be used.
Expired block cleanup happens in the background. It may take up to two hours Expired block cleanup happens in the background. It may take up to two hours
to remove expired blocks. Blocks must be fully expired before they are removed. to remove expired blocks. Blocks must be fully expired before they are removed.
## Right-Sizing Retention Size
If you are utilizing `storage.tsdb.retention.size` to set a size limit, you
will want to consider the right size for this value relative to the storage you
have allocated for Prometheus. It is wise to reduce the retention size to provide
a buffer, ensuring that older entries will be removed before the allocated storage
for Prometheus becomes full.
At present, we recommend setting the retention size to, at most, 80-85% of your
allocated Prometheus disk space. This increases the likelihood that older entires
will be removed prior to hitting any disk limitations.
## Remote storage integrations ## Remote storage integrations
Prometheus's local storage is limited to a single node's scalability and durability. Prometheus's local storage is limited to a single node's scalability and durability.

View file

@ -1,6 +1,6 @@
module github.com/prometheus/prometheus/documentation/examples/remote_storage module github.com/prometheus/prometheus/documentation/examples/remote_storage
go 1.21 go 1.21.0
require ( require (
github.com/alecthomas/kingpin/v2 v2.4.0 github.com/alecthomas/kingpin/v2 v2.4.0
@ -10,7 +10,7 @@ require (
github.com/influxdata/influxdb v1.11.5 github.com/influxdata/influxdb v1.11.5
github.com/prometheus/client_golang v1.19.1 github.com/prometheus/client_golang v1.19.1
github.com/prometheus/common v0.55.0 github.com/prometheus/common v0.55.0
github.com/prometheus/prometheus v0.52.1 github.com/prometheus/prometheus v0.53.1
github.com/stretchr/testify v1.9.0 github.com/stretchr/testify v1.9.0
) )

102
go.mod
View file

@ -1,22 +1,24 @@
module github.com/prometheus/prometheus module github.com/prometheus/prometheus
go 1.21 go 1.21.0
toolchain go1.22.5
require ( require (
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1 github.com/Azure/azure-sdk-for-go/sdk/azcore v1.13.0
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.2 github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.7.0
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.7.0 github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.7.0
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4 v4.3.0 github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4 v4.3.0
github.com/Code-Hex/go-generics-cache v1.5.1 github.com/Code-Hex/go-generics-cache v1.5.1
github.com/KimMachineGun/automemlimit v0.6.1 github.com/KimMachineGun/automemlimit v0.6.1
github.com/alecthomas/kingpin/v2 v2.4.0 github.com/alecthomas/kingpin/v2 v2.4.0
github.com/alecthomas/units v0.0.0-20231202071711-9a357b53e9c9 github.com/alecthomas/units v0.0.0-20240626203959-61d1e3462e30
github.com/aws/aws-sdk-go v1.53.16 github.com/aws/aws-sdk-go v1.54.19
github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3 github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3
github.com/cespare/xxhash/v2 v2.3.0 github.com/cespare/xxhash/v2 v2.3.0
github.com/dennwc/varint v1.0.0 github.com/dennwc/varint v1.0.0
github.com/digitalocean/godo v1.117.0 github.com/digitalocean/godo v1.119.0
github.com/docker/docker v26.1.3+incompatible github.com/docker/docker v27.0.3+incompatible
github.com/edsrzf/mmap-go v1.1.0 github.com/edsrzf/mmap-go v1.1.0
github.com/envoyproxy/go-control-plane v0.12.0 github.com/envoyproxy/go-control-plane v0.12.0
github.com/envoyproxy/protoc-gen-validate v1.0.4 github.com/envoyproxy/protoc-gen-validate v1.0.4
@ -29,75 +31,75 @@ require (
github.com/gogo/protobuf v1.3.2 github.com/gogo/protobuf v1.3.2
github.com/golang/snappy v0.0.4 github.com/golang/snappy v0.0.4
github.com/google/go-cmp v0.6.0 github.com/google/go-cmp v0.6.0
github.com/google/pprof v0.0.0-20240528025155-186aa0362fba github.com/google/pprof v0.0.0-20240711041743-f6c9dda6c6da
github.com/google/uuid v1.6.0 github.com/google/uuid v1.6.0
github.com/gophercloud/gophercloud v1.12.0 github.com/gophercloud/gophercloud v1.14.0
github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc
github.com/grpc-ecosystem/grpc-gateway v1.16.0 github.com/grpc-ecosystem/grpc-gateway v1.16.0
github.com/hashicorp/consul/api v1.29.1 github.com/hashicorp/consul/api v1.29.2
github.com/hashicorp/nomad/api v0.0.0-20240604134157-e73d8bb1140d github.com/hashicorp/nomad/api v0.0.0-20240717122358-3d93bd3778f3
github.com/hetznercloud/hcloud-go/v2 v2.9.0 github.com/hetznercloud/hcloud-go/v2 v2.12.0
github.com/ionos-cloud/sdk-go/v6 v6.1.11 github.com/ionos-cloud/sdk-go/v6 v6.1.11
github.com/json-iterator/go v1.1.12 github.com/json-iterator/go v1.1.12
github.com/klauspost/compress v1.17.8 github.com/klauspost/compress v1.17.9
github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b
github.com/linode/linodego v1.35.0 github.com/linode/linodego v1.38.0
github.com/miekg/dns v1.1.59 github.com/miekg/dns v1.1.61
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f
github.com/nsf/jsondiff v0.0.0-20230430225905-43f6cf3098c1 github.com/nsf/jsondiff v0.0.0-20230430225905-43f6cf3098c1
github.com/oklog/run v1.1.0 github.com/oklog/run v1.1.0
github.com/oklog/ulid v1.3.1 github.com/oklog/ulid v1.3.1
github.com/ovh/go-ovh v1.5.1 github.com/ovh/go-ovh v1.6.0
github.com/prometheus/alertmanager v0.27.0 github.com/prometheus/alertmanager v0.27.0
github.com/prometheus/client_golang v1.19.1 github.com/prometheus/client_golang v1.19.1
github.com/prometheus/client_model v0.6.1 github.com/prometheus/client_model v0.6.1
github.com/prometheus/common v0.54.0 github.com/prometheus/common v0.55.0
github.com/prometheus/common/assets v0.2.0 github.com/prometheus/common/assets v0.2.0
github.com/prometheus/common/sigv4 v0.1.0 github.com/prometheus/common/sigv4 v0.1.0
github.com/prometheus/exporter-toolkit v0.11.0 github.com/prometheus/exporter-toolkit v0.11.0
github.com/scaleway/scaleway-sdk-go v1.0.0-beta.27 github.com/scaleway/scaleway-sdk-go v1.0.0-beta.29
github.com/shurcooL/httpfs v0.0.0-20230704072500-f1e31cf0ba5c github.com/shurcooL/httpfs v0.0.0-20230704072500-f1e31cf0ba5c
github.com/stretchr/testify v1.9.0 github.com/stretchr/testify v1.9.0
github.com/vultr/govultr/v2 v2.17.2 github.com/vultr/govultr/v2 v2.17.2
go.opentelemetry.io/collector/pdata v1.8.0 go.opentelemetry.io/collector/pdata v1.12.0
go.opentelemetry.io/collector/semconv v0.101.0 go.opentelemetry.io/collector/semconv v0.105.0
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.52.0 go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0
go.opentelemetry.io/otel v1.27.0 go.opentelemetry.io/otel v1.28.0
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.27.0 go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0 go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.28.0
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.27.0 go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.28.0
go.opentelemetry.io/otel/sdk v1.27.0 go.opentelemetry.io/otel/sdk v1.28.0
go.opentelemetry.io/otel/trace v1.27.0 go.opentelemetry.io/otel/trace v1.28.0
go.uber.org/atomic v1.11.0 go.uber.org/atomic v1.11.0
go.uber.org/automaxprocs v1.5.3 go.uber.org/automaxprocs v1.5.3
go.uber.org/goleak v1.3.0 go.uber.org/goleak v1.3.0
go.uber.org/multierr v1.11.0 go.uber.org/multierr v1.11.0
golang.org/x/net v0.26.0 golang.org/x/net v0.27.0
golang.org/x/oauth2 v0.21.0 golang.org/x/oauth2 v0.21.0
golang.org/x/sync v0.7.0 golang.org/x/sync v0.7.0
golang.org/x/sys v0.21.0 golang.org/x/sys v0.22.0
golang.org/x/text v0.16.0 golang.org/x/text v0.16.0
golang.org/x/time v0.5.0 golang.org/x/time v0.5.0
golang.org/x/tools v0.22.0 golang.org/x/tools v0.23.0
google.golang.org/api v0.183.0 google.golang.org/api v0.189.0
google.golang.org/genproto/googleapis/api v0.0.0-20240528184218-531527333157 google.golang.org/genproto/googleapis/api v0.0.0-20240711142825-46eb208f015d
google.golang.org/grpc v1.64.0 google.golang.org/grpc v1.65.0
google.golang.org/protobuf v1.34.1 google.golang.org/protobuf v1.34.2
gopkg.in/yaml.v2 v2.4.0 gopkg.in/yaml.v2 v2.4.0
gopkg.in/yaml.v3 v3.0.1 gopkg.in/yaml.v3 v3.0.1
k8s.io/api v0.29.3 k8s.io/api v0.29.3
k8s.io/apimachinery v0.29.3 k8s.io/apimachinery v0.29.3
k8s.io/client-go v0.29.3 k8s.io/client-go v0.29.3
k8s.io/klog v1.0.0 k8s.io/klog v1.0.0
k8s.io/klog/v2 v2.120.1 k8s.io/klog/v2 v2.130.1
) )
require ( require (
cloud.google.com/go/auth v0.5.1 // indirect cloud.google.com/go/auth v0.7.2 // indirect
cloud.google.com/go/auth/oauth2adapt v0.2.2 // indirect cloud.google.com/go/auth/oauth2adapt v0.2.3 // indirect
cloud.google.com/go/compute/metadata v0.3.0 // indirect cloud.google.com/go/compute/metadata v0.5.0 // indirect
github.com/Azure/azure-sdk-for-go/sdk/internal v1.6.0 // indirect github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 // indirect
github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 // indirect github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 // indirect
github.com/Microsoft/go-winio v0.6.1 // indirect github.com/Microsoft/go-winio v0.6.1 // indirect
github.com/armon/go-metrics v0.4.1 // indirect github.com/armon/go-metrics v0.4.1 // indirect
@ -105,7 +107,7 @@ require (
github.com/beorn7/perks v1.0.1 // indirect github.com/beorn7/perks v1.0.1 // indirect
github.com/cenkalti/backoff/v4 v4.3.0 // indirect github.com/cenkalti/backoff/v4 v4.3.0 // indirect
github.com/cilium/ebpf v0.11.0 // indirect github.com/cilium/ebpf v0.11.0 // indirect
github.com/cncf/xds/go v0.0.0-20240318125728-8a4994d93e50 // indirect github.com/cncf/xds/go v0.0.0-20240423153145-555b57ec207b // indirect
github.com/containerd/cgroups/v3 v3.0.3 // indirect github.com/containerd/cgroups/v3 v3.0.3 // indirect
github.com/containerd/log v0.1.0 // indirect github.com/containerd/log v0.1.0 // indirect
github.com/coreos/go-systemd/v22 v22.5.0 // indirect github.com/coreos/go-systemd/v22 v22.5.0 // indirect
@ -119,7 +121,7 @@ require (
github.com/felixge/httpsnoop v1.0.4 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect
github.com/ghodss/yaml v1.0.0 // indirect github.com/ghodss/yaml v1.0.0 // indirect
github.com/go-kit/kit v0.12.0 // indirect github.com/go-kit/kit v0.12.0 // indirect
github.com/go-logr/logr v1.4.1 // indirect github.com/go-logr/logr v1.4.2 // indirect
github.com/go-logr/stdr v1.2.2 // indirect github.com/go-logr/stdr v1.2.2 // indirect
github.com/go-openapi/analysis v0.22.2 // indirect github.com/go-openapi/analysis v0.22.2 // indirect
github.com/go-openapi/errors v0.22.0 // indirect github.com/go-openapi/errors v0.22.0 // indirect
@ -132,7 +134,7 @@ require (
github.com/go-resty/resty/v2 v2.13.1 // indirect github.com/go-resty/resty/v2 v2.13.1 // indirect
github.com/godbus/dbus/v5 v5.0.4 // indirect github.com/godbus/dbus/v5 v5.0.4 // indirect
github.com/golang-jwt/jwt/v5 v5.2.1 // indirect github.com/golang-jwt/jwt/v5 v5.2.1 // indirect
github.com/golang/glog v1.2.0 // indirect github.com/golang/glog v1.2.1 // indirect
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
github.com/golang/protobuf v1.5.4 // indirect github.com/golang/protobuf v1.5.4 // indirect
github.com/google/gnostic-models v0.6.8 // indirect github.com/google/gnostic-models v0.6.8 // indirect
@ -140,7 +142,7 @@ require (
github.com/google/gofuzz v1.2.0 // indirect github.com/google/gofuzz v1.2.0 // indirect
github.com/google/s2a-go v0.1.7 // indirect github.com/google/s2a-go v0.1.7 // indirect
github.com/googleapis/enterprise-certificate-proxy v0.3.2 // indirect github.com/googleapis/enterprise-certificate-proxy v0.3.2 // indirect
github.com/googleapis/gax-go/v2 v2.12.4 // indirect github.com/googleapis/gax-go/v2 v2.12.5 // indirect
github.com/gorilla/websocket v1.5.0 // indirect github.com/gorilla/websocket v1.5.0 // indirect
github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 // indirect github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 // indirect
github.com/hashicorp/cronexpr v1.1.2 // indirect github.com/hashicorp/cronexpr v1.1.2 // indirect
@ -176,20 +178,20 @@ require (
github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect
github.com/pkg/errors v0.9.1 // indirect github.com/pkg/errors v0.9.1 // indirect
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
github.com/prometheus/procfs v0.12.0 // indirect github.com/prometheus/procfs v0.15.1 // indirect
github.com/sirupsen/logrus v1.9.3 // indirect github.com/sirupsen/logrus v1.9.3 // indirect
github.com/spf13/pflag v1.0.5 // indirect github.com/spf13/pflag v1.0.5 // indirect
github.com/stretchr/objx v0.5.2 // indirect github.com/stretchr/objx v0.5.2 // indirect
github.com/xhit/go-str2duration/v2 v2.1.0 // indirect github.com/xhit/go-str2duration/v2 v2.1.0 // indirect
go.mongodb.org/mongo-driver v1.14.0 // indirect go.mongodb.org/mongo-driver v1.14.0 // indirect
go.opencensus.io v0.24.0 // indirect go.opencensus.io v0.24.0 // indirect
go.opentelemetry.io/otel/metric v1.27.0 // indirect go.opentelemetry.io/otel/metric v1.28.0 // indirect
go.opentelemetry.io/proto/otlp v1.2.0 // indirect go.opentelemetry.io/proto/otlp v1.3.1 // indirect
golang.org/x/crypto v0.24.0 // indirect golang.org/x/crypto v0.25.0 // indirect
golang.org/x/exp v0.0.0-20240119083558-1b970713d09a // indirect golang.org/x/exp v0.0.0-20240119083558-1b970713d09a // indirect
golang.org/x/mod v0.18.0 // indirect golang.org/x/mod v0.19.0 // indirect
golang.org/x/term v0.21.0 // indirect golang.org/x/term v0.22.0 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20240528184218-531527333157 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20240722135656-d784300faade // indirect
gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/inf.v0 v0.9.1 // indirect
gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/ini.v1 v1.67.0 // indirect
gotest.tools/v3 v3.0.3 // indirect gotest.tools/v3 v3.0.3 // indirect

201
go.sum
View file

@ -12,18 +12,18 @@ cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bP
cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk=
cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs=
cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY=
cloud.google.com/go/auth v0.5.1 h1:0QNO7VThG54LUzKiQxv8C6x1YX7lUrzlAa1nVLF8CIw= cloud.google.com/go/auth v0.7.2 h1:uiha352VrCDMXg+yoBtaD0tUF4Kv9vrtrWPYXwutnDE=
cloud.google.com/go/auth v0.5.1/go.mod h1:vbZT8GjzDf3AVqCcQmqeeM32U9HBFc32vVVAbwDsa6s= cloud.google.com/go/auth v0.7.2/go.mod h1:VEc4p5NNxycWQTMQEDQF0bd6aTMb6VgYDXEwiJJQAbs=
cloud.google.com/go/auth/oauth2adapt v0.2.2 h1:+TTV8aXpjeChS9M+aTtN/TjdQnzJvmzKFt//oWu7HX4= cloud.google.com/go/auth/oauth2adapt v0.2.3 h1:MlxF+Pd3OmSudg/b1yZ5lJwoXCEaeedAguodky1PcKI=
cloud.google.com/go/auth/oauth2adapt v0.2.2/go.mod h1:wcYjgpZI9+Yu7LyYBg4pqSiaRkfEK3GQcpb7C/uyF1Q= cloud.google.com/go/auth/oauth2adapt v0.2.3/go.mod h1:tMQXOfZzFuNuUxOypHlQEXgdfX5cuhwU+ffUuXRJE8I=
cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE=
cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc=
cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg=
cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc=
cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ=
cloud.google.com/go/compute/metadata v0.3.0 h1:Tz+eQXMEqDIKRsmY3cHTL6FVaynIjX2QxYC4trgAKZc= cloud.google.com/go/compute/metadata v0.5.0 h1:Zr0eK8JbFv6+Wi4ilXAR8FJ3wyNdpxHKJNPos6LTZOY=
cloud.google.com/go/compute/metadata v0.3.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k= cloud.google.com/go/compute/metadata v0.5.0/go.mod h1:aHnloV2TPI38yx4s9+wAZhHykWvVCfu7hQbF+9CWoiY=
cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk=
cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
@ -36,12 +36,12 @@ cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohl
cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs=
cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0=
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1 h1:E+OJmp2tPvt1W+amx48v1eqbjDYsgN+RzP4q16yV5eM= github.com/Azure/azure-sdk-for-go/sdk/azcore v1.13.0 h1:GJHeeA2N7xrG3q30L2UXDyuWRzDM900/65j70wcM4Ww=
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1/go.mod h1:a6xsAQUZg+VsS3TJ05SRp524Hs4pZ/AeFSr5ENf0Yjo= github.com/Azure/azure-sdk-for-go/sdk/azcore v1.13.0/go.mod h1:l38EPgmsp71HHLq9j7De57JcKOWPyhrsW1Awm1JS6K0=
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.2 h1:FDif4R1+UUR+00q6wquyX90K7A8dN+R5E8GEadoP7sU= github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.7.0 h1:tfLQ34V6F7tVSwoTf/4lH5sE0o6eCJuNDTmH09nDpbc=
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.2/go.mod h1:aiYBYui4BJ/BJCAIKs92XiPyQfTaBWqvHujDwKb6CBU= github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.7.0/go.mod h1:9kIvujWAA58nmPmWB1m23fyWic1kYZMxD9CxaWn4Qpg=
github.com/Azure/azure-sdk-for-go/sdk/internal v1.6.0 h1:sUFnFjzDUie80h24I7mrKtwCKgLY9L8h5Tp2x9+TWqk= github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 h1:ywEEhmNahHBihViHepv3xPBn1663uRv2t2q/ESv9seY=
github.com/Azure/azure-sdk-for-go/sdk/internal v1.6.0/go.mod h1:52JbnQTp15qg5mRkMBHwp0j0ZFwHJ42Sx3zVV5RE9p0= github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0/go.mod h1:iZDifYGJTIgIIkYRNWPENUnqx6bJ2xnSDFI2tjwZNuY=
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.7.0 h1:LkHbJbgF3YyvC53aqYGR+wWQDn2Rdp9AQdGndf9QvY4= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.7.0 h1:LkHbJbgF3YyvC53aqYGR+wWQDn2Rdp9AQdGndf9QvY4=
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.7.0/go.mod h1:QyiQdW4f4/BIfB8ZutZ2s+28RAgfa/pT+zS++ZHyM1I= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.7.0/go.mod h1:QyiQdW4f4/BIfB8ZutZ2s+28RAgfa/pT+zS++ZHyM1I=
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/internal/v2 v2.0.0 h1:PTFGRSlMKCQelWwxUyYVEUqseBJVemLyqWJjvMyt0do= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/internal/v2 v2.0.0 h1:PTFGRSlMKCQelWwxUyYVEUqseBJVemLyqWJjvMyt0do=
@ -75,8 +75,8 @@ github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuy
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho=
github.com/alecthomas/units v0.0.0-20231202071711-9a357b53e9c9 h1:ez/4by2iGztzR4L0zgAOR8lTQK9VlyBVVd7G4omaOQs= github.com/alecthomas/units v0.0.0-20240626203959-61d1e3462e30 h1:t3eaIm0rUkzbrIewtiFmMK5RXHej2XnoXNhxVsAYUfg=
github.com/alecthomas/units v0.0.0-20231202071711-9a357b53e9c9/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE= github.com/alecthomas/units v0.0.0-20240626203959-61d1e3462e30/go.mod h1:fvzegU4vN3H1qMT+8wDmzjAcDONcgo2/SZ/TyfdUOFs=
github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ=
github.com/apache/thrift v0.13.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= github.com/apache/thrift v0.13.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ=
@ -92,8 +92,8 @@ github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:W
github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQwij/eHl5CU= github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQwij/eHl5CU=
github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
github.com/aws/aws-sdk-go v1.38.35/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= github.com/aws/aws-sdk-go v1.38.35/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro=
github.com/aws/aws-sdk-go v1.53.16 h1:8oZjKQO/ml1WLUZw5hvF7pvYjPf8o9f57Wldoy/q9Qc= github.com/aws/aws-sdk-go v1.54.19 h1:tyWV+07jagrNiCcGRzRhdtVjQs7Vy41NwsuOcl0IbVI=
github.com/aws/aws-sdk-go v1.53.16/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk= github.com/aws/aws-sdk-go v1.54.19/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU=
github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g= github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g=
github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3 h1:6df1vn4bBlDDo4tARvBm7l6KA9iVMnE3NWizDeWSrps= github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3 h1:6df1vn4bBlDDo4tARvBm7l6KA9iVMnE3NWizDeWSrps=
github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3/go.mod h1:CIWtjkly68+yqLPbvwwR/fjNJA/idrtULjZWh2v1ys0= github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3/go.mod h1:CIWtjkly68+yqLPbvwwR/fjNJA/idrtULjZWh2v1ys0=
@ -120,8 +120,8 @@ github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp
github.com/clbanning/x2j v0.0.0-20191024224557-825249438eec/go.mod h1:jMjuTZXRI4dUb/I5gc9Hdhagfvm9+RyrPryS/auMzxE= github.com/clbanning/x2j v0.0.0-20191024224557-825249438eec/go.mod h1:jMjuTZXRI4dUb/I5gc9Hdhagfvm9+RyrPryS/auMzxE=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
github.com/cncf/xds/go v0.0.0-20240318125728-8a4994d93e50 h1:DBmgJDC9dTfkVyGgipamEh2BpGYxScCH1TOF1LL1cXc= github.com/cncf/xds/go v0.0.0-20240423153145-555b57ec207b h1:ga8SEFjZ60pxLcmhnThWgvH2wg8376yUJmPhEH4H3kw=
github.com/cncf/xds/go v0.0.0-20240318125728-8a4994d93e50/go.mod h1:5e1+Vvlzido69INQaVO6d87Qn543Xr6nooe9Kz7oBFM= github.com/cncf/xds/go v0.0.0-20240423153145-555b57ec207b/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8=
github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8=
github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI= github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI=
github.com/containerd/cgroups/v3 v3.0.3 h1:S5ByHZ/h9PMe5IOQoN7E+nMc2UcLEM/V48DGDJ9kip0= github.com/containerd/cgroups/v3 v3.0.3 h1:S5ByHZ/h9PMe5IOQoN7E+nMc2UcLEM/V48DGDJ9kip0=
@ -143,14 +143,14 @@ github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8Yc
github.com/dennwc/varint v1.0.0 h1:kGNFFSSw8ToIy3obO/kKr8U9GZYUAxQEVuix4zfDWzE= github.com/dennwc/varint v1.0.0 h1:kGNFFSSw8ToIy3obO/kKr8U9GZYUAxQEVuix4zfDWzE=
github.com/dennwc/varint v1.0.0/go.mod h1:hnItb35rvZvJrbTALZtY/iQfDs48JKRG1RPpgziApxA= github.com/dennwc/varint v1.0.0/go.mod h1:hnItb35rvZvJrbTALZtY/iQfDs48JKRG1RPpgziApxA=
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
github.com/digitalocean/godo v1.117.0 h1:WVlTe09melDYTd7VCVyvHcNWbgB+uI1O115+5LOtdSw= github.com/digitalocean/godo v1.119.0 h1:dmFNQwSIAcH3z+FVovHLkazKDC2uA8oOlGvg5+H4vRw=
github.com/digitalocean/godo v1.117.0/go.mod h1:Vk0vpCot2HOAJwc5WE8wljZGtJ3ZtWIc8MQ8rF38sdo= github.com/digitalocean/godo v1.119.0/go.mod h1:WQVH83OHUy6gC4gXpEVQKtxTd4L5oCp+5OialidkPLY=
github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0= github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0=
github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E=
github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI= github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI=
github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ= github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ=
github.com/docker/docker v26.1.3+incompatible h1:lLCzRbrVZrljpVNobJu1J2FHk8V0s4BawoZippkc+xo= github.com/docker/docker v27.0.3+incompatible h1:aBGI9TeQ4MPlhquTQKq9XbK79rKFVwXNUAYz9aXyEBE=
github.com/docker/docker v26.1.3+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker v27.0.3+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ=
github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec=
github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4=
@ -210,8 +210,8 @@ github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG
github.com/go-logfmt/logfmt v0.6.0 h1:wGYYu3uicYdqXVgoYbvnkrPVXkuLM1p1ifugDMEdRi4= github.com/go-logfmt/logfmt v0.6.0 h1:wGYYu3uicYdqXVgoYbvnkrPVXkuLM1p1ifugDMEdRi4=
github.com/go-logfmt/logfmt v0.6.0/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= github.com/go-logfmt/logfmt v0.6.0/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs=
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ= github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY=
github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
github.com/go-openapi/analysis v0.22.2 h1:ZBmNoP2h5omLKr/srIC9bfqrUGzT6g6gNv03HE9Vpj0= github.com/go-openapi/analysis v0.22.2 h1:ZBmNoP2h5omLKr/srIC9bfqrUGzT6g6gNv03HE9Vpj0=
@ -251,8 +251,8 @@ github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69
github.com/golang-jwt/jwt/v5 v5.2.1 h1:OuVbFODueb089Lh128TAcimifWaLhJwVflnrgM17wHk= github.com/golang-jwt/jwt/v5 v5.2.1 h1:OuVbFODueb089Lh128TAcimifWaLhJwVflnrgM17wHk=
github.com/golang-jwt/jwt/v5 v5.2.1/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk= github.com/golang-jwt/jwt/v5 v5.2.1/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
github.com/golang/glog v1.2.0 h1:uCdmnmatrKCgMBlM4rMuJZWOkPDqdbZPnrMXDY4gI68= github.com/golang/glog v1.2.1 h1:OptwRhECazUx5ix5TTWC3EZhsZEHWcYWY4FQHTIubm4=
github.com/golang/glog v1.2.0/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= github.com/golang/glog v1.2.1/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w=
github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
@ -319,8 +319,8 @@ github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hf
github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
github.com/google/pprof v0.0.0-20240528025155-186aa0362fba h1:ql1qNgCyOB7iAEk8JTNM+zJrgIbnyCKX/wdlyPufP5g= github.com/google/pprof v0.0.0-20240711041743-f6c9dda6c6da h1:xRmpO92tb8y+Z85iUOMOicpCfaYcv7o3Cg3wKrIpg8g=
github.com/google/pprof v0.0.0-20240528025155-186aa0362fba/go.mod h1:K1liHPHnj73Fdn/EKuT8nrFqBihUSKXoLYU0BuatOYo= github.com/google/pprof v0.0.0-20240711041743-f6c9dda6c6da/go.mod h1:K1liHPHnj73Fdn/EKuT8nrFqBihUSKXoLYU0BuatOYo=
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
github.com/google/s2a-go v0.1.7 h1:60BLSyTrOV4/haCDW4zb1guZItoSq8foHCXrAnjBo/o= github.com/google/s2a-go v0.1.7 h1:60BLSyTrOV4/haCDW4zb1guZItoSq8foHCXrAnjBo/o=
github.com/google/s2a-go v0.1.7/go.mod h1:50CgR4k1jNlWBu4UfS4AcfhVe1r6pdZPygJ3R8F0Qdw= github.com/google/s2a-go v0.1.7/go.mod h1:50CgR4k1jNlWBu4UfS4AcfhVe1r6pdZPygJ3R8F0Qdw=
@ -332,10 +332,10 @@ github.com/googleapis/enterprise-certificate-proxy v0.3.2 h1:Vie5ybvEvT75RniqhfF
github.com/googleapis/enterprise-certificate-proxy v0.3.2/go.mod h1:VLSiSSBs/ksPL8kq3OBOQ6WRI2QnaFynd1DCjZ62+V0= github.com/googleapis/enterprise-certificate-proxy v0.3.2/go.mod h1:VLSiSSBs/ksPL8kq3OBOQ6WRI2QnaFynd1DCjZ62+V0=
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
github.com/googleapis/gax-go/v2 v2.12.4 h1:9gWcmF85Wvq4ryPFvGFaOgPIs1AQX0d0bcbGw4Z96qg= github.com/googleapis/gax-go/v2 v2.12.5 h1:8gw9KZK8TiVKB6q3zHY3SBzLnrGp6HQjyfYBYGmXdxA=
github.com/googleapis/gax-go/v2 v2.12.4/go.mod h1:KYEYLorsnIGDi/rPC8b5TdlB9kbKoFubselGIoBMCwI= github.com/googleapis/gax-go/v2 v2.12.5/go.mod h1:BUDKcWo+RaKq5SC9vVYL0wLADa3VcfswbOMMRmB9H3E=
github.com/gophercloud/gophercloud v1.12.0 h1:Jrz16vPAL93l80q16fp8NplrTCp93y7rZh2P3Q4Yq7g= github.com/gophercloud/gophercloud v1.14.0 h1:Bt9zQDhPrbd4qX7EILGmy+i7GP35cc+AAL2+wIJpUE8=
github.com/gophercloud/gophercloud v1.12.0/go.mod h1:aAVqcocTSXh2vYFZ1JTvx4EQmfgzxRcNupUfxZbBNDM= github.com/gophercloud/gophercloud v1.14.0/go.mod h1:aAVqcocTSXh2vYFZ1JTvx4EQmfgzxRcNupUfxZbBNDM=
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg=
github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
@ -353,10 +353,10 @@ github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFb
github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 h1:bkypFPDjIYGfCYD5mRBvpqxfYX1YCS1PXdKYWi8FsN0= github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 h1:bkypFPDjIYGfCYD5mRBvpqxfYX1YCS1PXdKYWi8FsN0=
github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0/go.mod h1:P+Lt/0by1T8bfcF3z737NnSbmxQAppXMRziHUxPOC8k= github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0/go.mod h1:P+Lt/0by1T8bfcF3z737NnSbmxQAppXMRziHUxPOC8k=
github.com/hashicorp/consul/api v1.3.0/go.mod h1:MmDNSzIMUjNpY/mQ398R4bk2FnqQLoPndWW5VkKPlCE= github.com/hashicorp/consul/api v1.3.0/go.mod h1:MmDNSzIMUjNpY/mQ398R4bk2FnqQLoPndWW5VkKPlCE=
github.com/hashicorp/consul/api v1.29.1 h1:UEwOjYJrd3lG1x5w7HxDRMGiAUPrb3f103EoeKuuEcc= github.com/hashicorp/consul/api v1.29.2 h1:aYyRn8EdE2mSfG14S1+L9Qkjtz8RzmaWh6AcNGRNwPw=
github.com/hashicorp/consul/api v1.29.1/go.mod h1:lumfRkY/coLuqMICkI7Fh3ylMG31mQSRZyef2c5YvJI= github.com/hashicorp/consul/api v1.29.2/go.mod h1:0YObcaLNDSbtlgzIRtmRXI1ZkeuK0trCBxwZQ4MYnIk=
github.com/hashicorp/consul/proto-public v0.6.1 h1:+uzH3olCrksXYWAYHKqK782CtK9scfqH+Unlw3UHhCg= github.com/hashicorp/consul/proto-public v0.6.2 h1:+DA/3g/IiKlJZb88NBn0ZgXrxJp2NlvCZdEyl+qxvL0=
github.com/hashicorp/consul/proto-public v0.6.1/go.mod h1:cXXbOg74KBNGajC+o8RlA502Esf0R9prcoJgiOX/2Tg= github.com/hashicorp/consul/proto-public v0.6.2/go.mod h1:cXXbOg74KBNGajC+o8RlA502Esf0R9prcoJgiOX/2Tg=
github.com/hashicorp/consul/sdk v0.3.0/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= github.com/hashicorp/consul/sdk v0.3.0/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8=
github.com/hashicorp/consul/sdk v0.16.1 h1:V8TxTnImoPD5cj0U9Spl0TUxcytjcbbJeADFF07KdHg= github.com/hashicorp/consul/sdk v0.16.1 h1:V8TxTnImoPD5cj0U9Spl0TUxcytjcbbJeADFF07KdHg=
github.com/hashicorp/consul/sdk v0.16.1/go.mod h1:fSXvwxB2hmh1FMZCNl6PwX0Q/1wdWtHJcZ7Ea5tns0s= github.com/hashicorp/consul/sdk v0.16.1/go.mod h1:fSXvwxB2hmh1FMZCNl6PwX0Q/1wdWtHJcZ7Ea5tns0s=
@ -396,8 +396,8 @@ github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/b
github.com/hashicorp/go-uuid v1.0.3 h1:2gKiV6YVmrJ1i2CKKa9obLvRieoRGviZFL26PcT/Co8= github.com/hashicorp/go-uuid v1.0.3 h1:2gKiV6YVmrJ1i2CKKa9obLvRieoRGviZFL26PcT/Co8=
github.com/hashicorp/go-uuid v1.0.3/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-uuid v1.0.3/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
github.com/hashicorp/go-version v1.6.0 h1:feTTfFNnjP967rlCxM/I9g701jU+RN74YKx2mOkIeek= github.com/hashicorp/go-version v1.7.0 h1:5tqGy27NaOTB8yJKUZELlFAS/LTKJkrmONwQKeRZfjY=
github.com/hashicorp/go-version v1.6.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/go-version v1.7.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90=
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
@ -409,13 +409,13 @@ github.com/hashicorp/mdns v1.0.4/go.mod h1:mtBihi+LeNXGtG8L9dX59gAEa12BDtBQSp4v/
github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I=
github.com/hashicorp/memberlist v0.5.0 h1:EtYPN8DpAURiapus508I4n9CzHs2W+8NZGbmmR/prTM= github.com/hashicorp/memberlist v0.5.0 h1:EtYPN8DpAURiapus508I4n9CzHs2W+8NZGbmmR/prTM=
github.com/hashicorp/memberlist v0.5.0/go.mod h1:yvyXLpo0QaGE59Y7hDTsTzDD25JYBZ4mHgHUZ8lrOI0= github.com/hashicorp/memberlist v0.5.0/go.mod h1:yvyXLpo0QaGE59Y7hDTsTzDD25JYBZ4mHgHUZ8lrOI0=
github.com/hashicorp/nomad/api v0.0.0-20240604134157-e73d8bb1140d h1:KHq+mAzWSkumj4PDoXc5VZbycPGcmYu8tohgVLQ6SIc= github.com/hashicorp/nomad/api v0.0.0-20240717122358-3d93bd3778f3 h1:fgVfQ4AC1avVOnu2cfms8VAiD8lUq3vWI8mTocOXN/w=
github.com/hashicorp/nomad/api v0.0.0-20240604134157-e73d8bb1140d/go.mod h1:svtxn6QnrQ69P23VvIWMR34tg3vmwLz4UdUzm1dSCgE= github.com/hashicorp/nomad/api v0.0.0-20240717122358-3d93bd3778f3/go.mod h1:svtxn6QnrQ69P23VvIWMR34tg3vmwLz4UdUzm1dSCgE=
github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc=
github.com/hashicorp/serf v0.10.1 h1:Z1H2J60yRKvfDYAOZLd2MU0ND4AH/WDz7xYHDWQsIPY= github.com/hashicorp/serf v0.10.1 h1:Z1H2J60yRKvfDYAOZLd2MU0ND4AH/WDz7xYHDWQsIPY=
github.com/hashicorp/serf v0.10.1/go.mod h1:yL2t6BqATOLGc5HF7qbFkTfXoPIY0WZdWHfEvMqbG+4= github.com/hashicorp/serf v0.10.1/go.mod h1:yL2t6BqATOLGc5HF7qbFkTfXoPIY0WZdWHfEvMqbG+4=
github.com/hetznercloud/hcloud-go/v2 v2.9.0 h1:s0N6R7Zoi2DPfMtUF5o9VeUBzTtHVY6MIkHOQnfu/AY= github.com/hetznercloud/hcloud-go/v2 v2.12.0 h1:nOgfNTo0gyXZJJdM8mo/XH5MO/e80wAEpldRzdWayhY=
github.com/hetznercloud/hcloud-go/v2 v2.9.0/go.mod h1:qtW/TuU7Bs16ibXl/ktJarWqU2LwHr7eGlwoilHxtgg= github.com/hetznercloud/hcloud-go/v2 v2.12.0/go.mod h1:dhix40Br3fDiBhwaSG/zgaYOFFddpfBm/6R1Zz0IiF0=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg= github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg=
github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
@ -454,8 +454,8 @@ github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8
github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/klauspost/compress v1.17.8 h1:YcnTYrq7MikUT7k0Yb5eceMmALQPYBW/Xltxn0NAMnU= github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA=
github.com/klauspost/compress v1.17.8/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= github.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw=
github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b h1:udzkj9S/zlT5X367kqJis0QP7YMxobob6zhzq6Yre00= github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b h1:udzkj9S/zlT5X367kqJis0QP7YMxobob6zhzq6Yre00=
github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b/go.mod h1:pcaDhQK0/NJZEvtCO0qQPPropqV0sJOJ6YW7X+9kRwM= github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b/go.mod h1:pcaDhQK0/NJZEvtCO0qQPPropqV0sJOJ6YW7X+9kRwM=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
@ -472,8 +472,8 @@ github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0
github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM= github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM=
github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4= github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4=
github.com/linode/linodego v1.35.0 h1:rIhUeCHBLEDlkoRnOTwzSGzljQ3ksXwLxacmXnrV+Do= github.com/linode/linodego v1.38.0 h1:wP3oW9OhGc6vhze8NPf2knbwH4TzSbrjzuCd9okjbTY=
github.com/linode/linodego v1.35.0/go.mod h1:JxuhOEAMfSxun6RU5/MgTKH2GGTmFrhKRj3wL1NFin0= github.com/linode/linodego v1.38.0/go.mod h1:L7GXKFD3PoN2xSEtFc04wIXP5WK65O10jYQx0PQISWQ=
github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ= github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ=
github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0=
github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
@ -500,8 +500,8 @@ github.com/maxatome/go-testdeep v1.12.0/go.mod h1:lPZc/HAcJMP92l7yI6TRz1aZN5URwU
github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg=
github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso= github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso=
github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI= github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI=
github.com/miekg/dns v1.1.59 h1:C9EXc/UToRwKLhK5wKU/I4QVsBUc8kE6MkHBkeypWZs= github.com/miekg/dns v1.1.61 h1:nLxbwF3XxhwVSm8g9Dghm9MHPaUZuqhPiGL+675ZmEs=
github.com/miekg/dns v1.1.59/go.mod h1:nZpewl5p6IvctfgrckopVx2OlSEHPRO/U4SYkRklrEk= github.com/miekg/dns v1.1.61/go.mod h1:mnAarhS3nWaW+NVP2wTkYVIZyHNJ098SJZUki3eykwQ=
github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc=
github.com/mitchellh/cli v1.1.0/go.mod h1:xcISNoH86gajksDmfB23e/pu+B+GeFRMYmoHXxx3xhI= github.com/mitchellh/cli v1.1.0/go.mod h1:xcISNoH86gajksDmfB23e/pu+B+GeFRMYmoHXxx3xhI=
github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
@ -573,8 +573,8 @@ github.com/openzipkin-contrib/zipkin-go-opentracing v0.4.5/go.mod h1:/wsWhb9smxS
github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw= github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw=
github.com/openzipkin/zipkin-go v0.2.1/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= github.com/openzipkin/zipkin-go v0.2.1/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4=
github.com/openzipkin/zipkin-go v0.2.2/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= github.com/openzipkin/zipkin-go v0.2.2/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4=
github.com/ovh/go-ovh v1.5.1 h1:P8O+7H+NQuFK9P/j4sFW5C0fvSS2DnHYGPwdVCp45wI= github.com/ovh/go-ovh v1.6.0 h1:ixLOwxQdzYDx296sXcgS35TOPEahJkpjMGtzPadCjQI=
github.com/ovh/go-ovh v1.5.1/go.mod h1:cTVDnl94z4tl8pP1uZ/8jlVxntjSIf09bNcQ5TJSC7c= github.com/ovh/go-ovh v1.6.0/go.mod h1:cTVDnl94z4tl8pP1uZ/8jlVxntjSIf09bNcQ5TJSC7c=
github.com/pact-foundation/pact-go v1.0.4/go.mod h1:uExwJY4kCzNPcHRj+hCR/HBbOOIwwtUjcrb0b5/5kLM= github.com/pact-foundation/pact-go v1.0.4/go.mod h1:uExwJY4kCzNPcHRj+hCR/HBbOOIwwtUjcrb0b5/5kLM=
github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY= github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY=
@ -625,8 +625,8 @@ github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8b
github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc=
github.com/prometheus/common v0.29.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= github.com/prometheus/common v0.29.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
github.com/prometheus/common v0.54.0 h1:ZlZy0BgJhTwVZUn7dLOkwCZHUkrAqd3WYtcFCWnM1D8= github.com/prometheus/common v0.55.0 h1:KEi6DK7lXW/m7Ig5i47x0vRzuBsHuvJdi5ee6Y3G1dc=
github.com/prometheus/common v0.54.0/go.mod h1:/TQgMJP5CuVYveyT7n/0Ix8yLNNXy9yRSkhnLTHPDIQ= github.com/prometheus/common v0.55.0/go.mod h1:2SECS4xJG1kd8XF9IcM1gMX6510RAEL65zxzNImwdc8=
github.com/prometheus/common/assets v0.2.0 h1:0P5OrzoHrYBOSM1OigWL3mY8ZvV2N4zIE/5AahrSrfM= github.com/prometheus/common/assets v0.2.0 h1:0P5OrzoHrYBOSM1OigWL3mY8ZvV2N4zIE/5AahrSrfM=
github.com/prometheus/common/assets v0.2.0/go.mod h1:D17UVUE12bHbim7HzwUvtqm6gwBEaDQ0F+hIGbFbccI= github.com/prometheus/common/assets v0.2.0/go.mod h1:D17UVUE12bHbim7HzwUvtqm6gwBEaDQ0F+hIGbFbccI=
github.com/prometheus/common/sigv4 v0.1.0 h1:qoVebwtwwEhS85Czm2dSROY5fTo2PAPEVdDeppTwGX4= github.com/prometheus/common/sigv4 v0.1.0 h1:qoVebwtwwEhS85Czm2dSROY5fTo2PAPEVdDeppTwGX4=
@ -639,8 +639,8 @@ github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsT
github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A=
github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo= github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc=
github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo= github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk=
github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
@ -650,8 +650,8 @@ github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E=
github.com/scaleway/scaleway-sdk-go v1.0.0-beta.27 h1:yGAraK1uUjlhSXgNMIy8o/J4LFNcy7yeipBqt9N9mVg= github.com/scaleway/scaleway-sdk-go v1.0.0-beta.29 h1:BkTk4gynLjguayxrYxZoMZjBnAOh7ntQvUkOFmkMqPU=
github.com/scaleway/scaleway-sdk-go v1.0.0-beta.27/go.mod h1:fCa7OJZ/9DRTnOKmxvT6pn+LPWUptQAmHF/SBJUGEcg= github.com/scaleway/scaleway-sdk-go v1.0.0-beta.29/go.mod h1:fCa7OJZ/9DRTnOKmxvT6pn+LPWUptQAmHF/SBJUGEcg=
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I=
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
github.com/shoenig/test v1.7.1 h1:UJcjSAI3aUKx52kfcfhblgyhZceouhvvs3OYdWgn+PY= github.com/shoenig/test v1.7.1 h1:UJcjSAI3aUKx52kfcfhblgyhZceouhvvs3OYdWgn+PY=
@ -694,6 +694,7 @@ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/
github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals=
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
@ -723,28 +724,28 @@ go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0=
go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo=
go.opentelemetry.io/collector/pdata v1.8.0 h1:d/QQgZxB4Y+d3mqLVh2ozvzujUhloD3P/fk7X+In764= go.opentelemetry.io/collector/pdata v1.12.0 h1:Xx5VK1p4VO0md8MWm2icwC1MnJ7f8EimKItMWw46BmA=
go.opentelemetry.io/collector/pdata v1.8.0/go.mod h1:/W7clu0wFC4WSRp94Ucn6Vm36Wkrt+tmtlDb1aiNZCY= go.opentelemetry.io/collector/pdata v1.12.0/go.mod h1:MYeB0MmMAxeM0hstCFrCqWLzdyeYySim2dG6pDT6nYI=
go.opentelemetry.io/collector/semconv v0.101.0 h1:tOe9iTe9dDCnvz/bqgfNRr4w80kXG8505tQJ5h5v08Q= go.opentelemetry.io/collector/semconv v0.105.0 h1:8p6dZ3JfxFTjbY38d8xlQGB1TQ3nPUvs+D0RERniZ1g=
go.opentelemetry.io/collector/semconv v0.101.0/go.mod h1:8ElcRZ8Cdw5JnvhTOQOdYizkJaQ10Z2fS+R6djOnj6A= go.opentelemetry.io/collector/semconv v0.105.0/go.mod h1:yMVUCNoQPZVq/IPfrHrnntZTWsLf5YGZ7qwKulIl5hw=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.52.0 h1:9l89oX4ba9kHbBol3Xin3leYJ+252h0zszDtBwyKe2A= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 h1:4K4tsIXefpVJtvA/8srF4V4y0akAoPHkIslgAkjixJA=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.52.0/go.mod h1:XLZfZboOJWHNKUv7eH0inh0E9VV6eWDFB/9yJyTLPp0= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0/go.mod h1:jjdQuTGVsXV4vSs+CJ2qYDeDPf9yIJV23qlIzBm73Vg=
go.opentelemetry.io/otel v1.27.0 h1:9BZoF3yMK/O1AafMiQTVu0YDj5Ea4hPhxCs7sGva+cg= go.opentelemetry.io/otel v1.28.0 h1:/SqNcYk+idO0CxKEUOtKQClMK/MimZihKYMruSMViUo=
go.opentelemetry.io/otel v1.27.0/go.mod h1:DMpAK8fzYRzs+bi3rS5REupisuqTheUlSZJ1WnZaPAQ= go.opentelemetry.io/otel v1.28.0/go.mod h1:q68ijF8Fc8CnMHKyzqL6akLO46ePnjkgfIMIjUIX9z4=
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.27.0 h1:R9DE4kQ4k+YtfLI2ULwX82VtNQ2J8yZmA7ZIF/D+7Mc= go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0 h1:3Q/xZUyC1BBkualc9ROb4G8qkH90LXEIICcs5zv1OYY=
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.27.0/go.mod h1:OQFyQVrDlbe+R7xrEyDr/2Wr67Ol0hRUgsfA+V5A95s= go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0/go.mod h1:s75jGIWA9OfCMzF0xr+ZgfrB5FEbbV7UuYo32ahUiFI=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0 h1:qFffATk0X+HD+f1Z8lswGiOQYKHRlzfmdJm0wEaVrFA= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.28.0 h1:R3X6ZXmNPRR8ul6i3WgFURCHzaXjHdm0karRG/+dj3s=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0/go.mod h1:MOiCmryaYtc+V0Ei+Tx9o5S1ZjA7kzLucuVuyzBZloQ= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.28.0/go.mod h1:QWFXnDavXWwMx2EEcZsf3yxgEKAqsxQ+Syjp+seyInw=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.27.0 h1:QY7/0NeRPKlzusf40ZE4t1VlMKbqSNT7cJRYzWuja0s= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.28.0 h1:j9+03ymgYhPKmeXGk5Zu+cIZOlVzd9Zv7QIiyItjFBU=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.27.0/go.mod h1:HVkSiDhTM9BoUJU8qE6j2eSWLLXvi1USXjyd2BXT8PY= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.28.0/go.mod h1:Y5+XiUG4Emn1hTfciPzGPJaSI+RpDts6BnCIir0SLqk=
go.opentelemetry.io/otel/metric v1.27.0 h1:hvj3vdEKyeCi4YaYfNjv2NUje8FqKqUY8IlF0FxV/ik= go.opentelemetry.io/otel/metric v1.28.0 h1:f0HGvSl1KRAU1DLgLGFjrwVyismPlnuU6JD6bOeuA5Q=
go.opentelemetry.io/otel/metric v1.27.0/go.mod h1:mVFgmRlhljgBiuk/MP/oKylr4hs85GZAylncepAX/ak= go.opentelemetry.io/otel/metric v1.28.0/go.mod h1:Fb1eVBFZmLVTMb6PPohq3TO9IIhUisDsbJoL/+uQW4s=
go.opentelemetry.io/otel/sdk v1.27.0 h1:mlk+/Y1gLPLn84U4tI8d3GNJmGT/eXe3ZuOXN9kTWmI= go.opentelemetry.io/otel/sdk v1.28.0 h1:b9d7hIry8yZsgtbmM0DKyPWMMUMlK9NEKuIG4aBqWyE=
go.opentelemetry.io/otel/sdk v1.27.0/go.mod h1:Ha9vbLwJE6W86YstIywK2xFfPjbWlCuwPtMkKdz/Y4A= go.opentelemetry.io/otel/sdk v1.28.0/go.mod h1:oYj7ClPUA7Iw3m+r7GeEjz0qckQRJK2B8zjcZEfu7Pg=
go.opentelemetry.io/otel/trace v1.27.0 h1:IqYb813p7cmbHk0a5y6pD5JPakbVfftRXABGt5/Rscw= go.opentelemetry.io/otel/trace v1.28.0 h1:GhQ9cUuQGmNDd5BTCP2dAvv75RdMxEfTmYejp+lkx9g=
go.opentelemetry.io/otel/trace v1.27.0/go.mod h1:6RiD1hkAprV4/q+yd2ln1HG9GoPx39SuvvstaLBl+l4= go.opentelemetry.io/otel/trace v1.28.0/go.mod h1:jPyXzNPg6da9+38HEwElrQiHlVMTnVfM3/yv2OlIHaI=
go.opentelemetry.io/proto/otlp v1.2.0 h1:pVeZGk7nXDC9O2hncA6nHldxEjm6LByfA2aN8IOkz94= go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0=
go.opentelemetry.io/proto/otlp v1.2.0/go.mod h1:gGpR8txAl5M03pDhMC79G6SdqNV26naRm/KDsgaHD8A= go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8=
go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE=
@ -773,8 +774,8 @@ golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5y
golang.org/x/crypto v0.0.0-20220829220503-c86fa9a7ed90/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220829220503-c86fa9a7ed90/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU=
golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8= golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8=
golang.org/x/crypto v0.24.0 h1:mnl8DM0o513X8fdIkmyFE/5hTYxbwYOjDS/+rK6qpRI= golang.org/x/crypto v0.25.0 h1:ypSNr+bnYL2YhwoMt2zPxHFmbAN1KZs/njMG3hxUp30=
golang.org/x/crypto v0.24.0/go.mod h1:Z1PMYSOR5nyMcyAVAIQSKCDwalqy85Aqn1x3Ws4L5DM= golang.org/x/crypto v0.25.0/go.mod h1:T+wALwcMOSE0kXgUAnPAHqTLW+XHgcELELW8VaDgm/M=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
@ -809,8 +810,8 @@ golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/mod v0.18.0 h1:5+9lSbEzPSdWkH32vYPBwEpX8KwDbM52Ud9xBUvNlb0= golang.org/x/mod v0.19.0 h1:fEdghXQSo20giMthA7cd28ZC+jts4amQ3YMXiP5oMQ8=
golang.org/x/mod v0.18.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/mod v0.19.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
@ -856,8 +857,8 @@ golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44=
golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM= golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM=
golang.org/x/net v0.26.0 h1:soB7SVo0PWrY4vPW/+ay0jKDNScG2X9wFeYlXIvJsOQ= golang.org/x/net v0.27.0 h1:5K3Njcw06/l2y9vpGCSdcxWOYHOUk3dVNGDXN+FvAys=
golang.org/x/net v0.26.0/go.mod h1:5YKkiSynbBIh3p6iOc/vibscux0x38BZDkn8sCUPxHE= golang.org/x/net v0.27.0/go.mod h1:dDi0PyhWNoiUOrAS8uXv/vnScO4wnHQO4mj9fn/RytE=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
@ -946,16 +947,16 @@ golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.21.0 h1:rF+pYz3DAGSQAxAu1CbC7catZg4ebC4UIeIhKxBZvws= golang.org/x/sys v0.22.0 h1:RI27ohtqKCnwULzJLqkv897zojh5/DwS/ENaMzUOaWI=
golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.22.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo=
golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk=
golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY= golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY=
golang.org/x/term v0.21.0 h1:WVXCp+/EBEHOj53Rvu+7KiT/iElMrO8ACK16SMZ3jaA= golang.org/x/term v0.22.0 h1:BbsgPEJULsl2fV/AT3v15Mjva5yXKQDyKf+TbDz7QJk=
golang.org/x/term v0.21.0/go.mod h1:ooXLefLobQVslOqselCNF4SxFAaoS6KujMbsGzSDmX0= golang.org/x/term v0.22.0/go.mod h1:F3qCibpT5AMpCRfhfT53vVJwhLtIVHhB9XDjfFvnMI4=
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
@ -1025,8 +1026,8 @@ golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
golang.org/x/tools v0.22.0 h1:gqSGLZqv+AI9lIQzniJ0nZDRG5GBPsSi+DRNHWNz6yA= golang.org/x/tools v0.23.0 h1:SGsXPZ+2l4JsgaCKkx+FQ9YZ5XEtA1GZYuoDjenLjvg=
golang.org/x/tools v0.22.0/go.mod h1:aCwcsjqvq7Yqt6TNyX7QMU2enbQ/Gt0bo6krSeEri+c= golang.org/x/tools v0.23.0/go.mod h1:pnu6ufv6vQkll6szChhK3C3L/ruaIv5eBeztNG8wtsI=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
@ -1046,8 +1047,8 @@ google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/
google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
google.golang.org/api v0.183.0 h1:PNMeRDwo1pJdgNcFQ9GstuLe/noWKIc89pRWRLMvLwE= google.golang.org/api v0.189.0 h1:equMo30LypAkdkLMBqfeIqtyAnlyig1JSZArl4XPwdI=
google.golang.org/api v0.183.0/go.mod h1:q43adC5/pHoSZTx5h2mSmdF7NcyfW9JuDyIOJAgS9ZQ= google.golang.org/api v0.189.0/go.mod h1:FLWGJKb0hb+pU2j+rJqwbnsF+ym+fQs73rbJ+KAUgy8=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
@ -1084,10 +1085,10 @@ google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1m
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA=
google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto/googleapis/api v0.0.0-20240528184218-531527333157 h1:7whR9kGa5LUwFtpLm2ArCEejtnxlGeLbAyjFY8sGNFw= google.golang.org/genproto/googleapis/api v0.0.0-20240711142825-46eb208f015d h1:kHjw/5UfflP/L5EbledDrcG4C2597RtymmGRZvHiCuY=
google.golang.org/genproto/googleapis/api v0.0.0-20240528184218-531527333157/go.mod h1:99sLkeliLXfdj2J75X3Ho+rrVCaJze0uwN7zDDkjPVU= google.golang.org/genproto/googleapis/api v0.0.0-20240711142825-46eb208f015d/go.mod h1:mw8MG/Qz5wfgYr6VqVCiZcHe/GJEfI+oGGDCohaVgB0=
google.golang.org/genproto/googleapis/rpc v0.0.0-20240528184218-531527333157 h1:Zy9XzmMEflZ/MAaA7vNcoebnRAld7FsPW1EeBB7V0m8= google.golang.org/genproto/googleapis/rpc v0.0.0-20240722135656-d784300faade h1:oCRSWfwGXQsqlVdErcyTt4A93Y8fo0/9D4b1gnI++qo=
google.golang.org/genproto/googleapis/rpc v0.0.0-20240528184218-531527333157/go.mod h1:EfXuqaE1J41VCDicxHzUDm+8rk+7ZdXzHV0IhO/I6s0= google.golang.org/genproto/googleapis/rpc v0.0.0-20240722135656-d784300faade/go.mod h1:Ue6ibwXGpU+dqIcODieyLOcgj7z8+IcskoNIgZxtrFY=
google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM= google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM=
@ -1106,8 +1107,8 @@ google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3Iji
google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0=
google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=
google.golang.org/grpc v1.64.0 h1:KH3VH9y/MgNQg1dE7b3XfVK0GsPSIzJwdF617gUSbvY= google.golang.org/grpc v1.65.0 h1:bs/cUb4lp1G5iImFFd3u5ixQzweKizoZJAwBNLR42lc=
google.golang.org/grpc v1.64.0/go.mod h1:oxjF8E3FBnjp+/gVFYdWacaLDx9na1aqy9oovLpxQYg= google.golang.org/grpc v1.65.0/go.mod h1:WgYC2ypjlB0EiQi6wdKixMqukr6lBc0Vo+oOgjrM5ZQ=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
@ -1119,8 +1120,8 @@ google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpAD
google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4=
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
google.golang.org/protobuf v1.34.1 h1:9ddQBjfCyZPOHPUiPxpYESBLc+T8P3E+Vo4IbKZgFWg= google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg=
google.golang.org/protobuf v1.34.1/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=

View file

@ -38,10 +38,10 @@ func (ls Labels) Bytes(buf []byte) []byte {
b.WriteByte(labelSep) b.WriteByte(labelSep)
for i, l := range ls { for i, l := range ls {
if i > 0 { if i > 0 {
b.WriteByte(seps[0]) b.WriteByte(sep)
} }
b.WriteString(l.Name) b.WriteString(l.Name)
b.WriteByte(seps[0]) b.WriteByte(sep)
b.WriteString(l.Value) b.WriteString(l.Value)
} }
return b.Bytes() return b.Bytes()
@ -86,9 +86,9 @@ func (ls Labels) Hash() uint64 {
} }
b = append(b, v.Name...) b = append(b, v.Name...)
b = append(b, seps[0]) b = append(b, sep)
b = append(b, v.Value...) b = append(b, v.Value...)
b = append(b, seps[0]) b = append(b, sep)
} }
return xxhash.Sum64(b) return xxhash.Sum64(b)
} }
@ -106,9 +106,9 @@ func (ls Labels) HashForLabels(b []byte, names ...string) (uint64, []byte) {
i++ i++
default: default:
b = append(b, ls[i].Name...) b = append(b, ls[i].Name...)
b = append(b, seps[0]) b = append(b, sep)
b = append(b, ls[i].Value...) b = append(b, ls[i].Value...)
b = append(b, seps[0]) b = append(b, sep)
i++ i++
j++ j++
} }
@ -130,9 +130,9 @@ func (ls Labels) HashWithoutLabels(b []byte, names ...string) (uint64, []byte) {
continue continue
} }
b = append(b, ls[i].Name...) b = append(b, ls[i].Name...)
b = append(b, seps[0]) b = append(b, sep)
b = append(b, ls[i].Value...) b = append(b, ls[i].Value...)
b = append(b, seps[0]) b = append(b, sep)
} }
return xxhash.Sum64(b), b return xxhash.Sum64(b), b
} }
@ -151,10 +151,10 @@ func (ls Labels) BytesWithLabels(buf []byte, names ...string) []byte {
i++ i++
default: default:
if b.Len() > 1 { if b.Len() > 1 {
b.WriteByte(seps[0]) b.WriteByte(sep)
} }
b.WriteString(ls[i].Name) b.WriteString(ls[i].Name)
b.WriteByte(seps[0]) b.WriteByte(sep)
b.WriteString(ls[i].Value) b.WriteString(ls[i].Value)
i++ i++
j++ j++
@ -177,10 +177,10 @@ func (ls Labels) BytesWithoutLabels(buf []byte, names ...string) []byte {
continue continue
} }
if b.Len() > 1 { if b.Len() > 1 {
b.WriteByte(seps[0]) b.WriteByte(sep)
} }
b.WriteString(ls[i].Name) b.WriteString(ls[i].Name)
b.WriteByte(seps[0]) b.WriteByte(sep)
b.WriteString(ls[i].Value) b.WriteString(ls[i].Value)
} }
return b.Bytes() return b.Bytes()

View file

@ -29,10 +29,11 @@ const (
BucketLabel = "le" BucketLabel = "le"
InstanceName = "instance" InstanceName = "instance"
labelSep = '\xfe' labelSep = '\xfe' // Used at beginning of `Bytes` return.
sep = '\xff' // Used between labels in `Bytes` and `Hash`.
) )
var seps = []byte{'\xff'} var seps = []byte{sep} // Used with Hash, which has no WriteByte method.
// Label is a key/value pair of strings. // Label is a key/value pair of strings.
type Label struct { type Label struct {

View file

@ -146,13 +146,13 @@ func (ls Labels) Bytes(buf []byte) []byte {
b := bytes.NewBuffer(buf[:0]) b := bytes.NewBuffer(buf[:0])
for i := 0; i < len(ls.data); { for i := 0; i < len(ls.data); {
if i > 0 { if i > 0 {
b.WriteByte(seps[0]) b.WriteByte(sep)
} }
var name, value string var name, value string
name, i = decodeString(ls.syms, ls.data, i) name, i = decodeString(ls.syms, ls.data, i)
value, i = decodeString(ls.syms, ls.data, i) value, i = decodeString(ls.syms, ls.data, i)
b.WriteString(name) b.WriteString(name)
b.WriteByte(seps[0]) b.WriteByte(sep)
b.WriteString(value) b.WriteString(value)
} }
return b.Bytes() return b.Bytes()
@ -201,9 +201,9 @@ func (ls Labels) Hash() uint64 {
} }
b = append(b, name...) b = append(b, name...)
b = append(b, seps[0]) b = append(b, sep)
b = append(b, value...) b = append(b, value...)
b = append(b, seps[0]) b = append(b, sep)
pos = newPos pos = newPos
} }
return xxhash.Sum64(b) return xxhash.Sum64(b)
@ -226,9 +226,9 @@ func (ls Labels) HashForLabels(b []byte, names ...string) (uint64, []byte) {
} }
if name == names[j] { if name == names[j] {
b = append(b, name...) b = append(b, name...)
b = append(b, seps[0]) b = append(b, sep)
b = append(b, value...) b = append(b, value...)
b = append(b, seps[0]) b = append(b, sep)
} }
} }
@ -252,9 +252,9 @@ func (ls Labels) HashWithoutLabels(b []byte, names ...string) (uint64, []byte) {
continue continue
} }
b = append(b, name...) b = append(b, name...)
b = append(b, seps[0]) b = append(b, sep)
b = append(b, value...) b = append(b, value...)
b = append(b, seps[0]) b = append(b, sep)
} }
return xxhash.Sum64(b), b return xxhash.Sum64(b), b
} }
@ -275,10 +275,10 @@ func (ls Labels) BytesWithLabels(buf []byte, names ...string) []byte {
} }
if lName == names[j] { if lName == names[j] {
if b.Len() > 1 { if b.Len() > 1 {
b.WriteByte(seps[0]) b.WriteByte(sep)
} }
b.WriteString(lName) b.WriteString(lName)
b.WriteByte(seps[0]) b.WriteByte(sep)
b.WriteString(lValue) b.WriteString(lValue)
} }
pos = newPos pos = newPos
@ -299,10 +299,10 @@ func (ls Labels) BytesWithoutLabels(buf []byte, names ...string) []byte {
} }
if j == len(names) || lName != names[j] { if j == len(names) || lName != names[j] {
if b.Len() > 1 { if b.Len() > 1 {
b.WriteByte(seps[0]) b.WriteByte(sep)
} }
b.WriteString(lName) b.WriteString(lName)
b.WriteByte(seps[0]) b.WriteByte(sep)
b.WriteString(lValue) b.WriteString(lValue)
} }
pos = newPos pos = newPos

View file

@ -112,9 +112,9 @@ func (ls Labels) HashForLabels(b []byte, names ...string) (uint64, []byte) {
} }
if name == names[j] { if name == names[j] {
b = append(b, name...) b = append(b, name...)
b = append(b, seps[0]) b = append(b, sep)
b = append(b, value...) b = append(b, value...)
b = append(b, seps[0]) b = append(b, sep)
} }
} }
@ -138,9 +138,9 @@ func (ls Labels) HashWithoutLabels(b []byte, names ...string) (uint64, []byte) {
continue continue
} }
b = append(b, name...) b = append(b, name...)
b = append(b, seps[0]) b = append(b, sep)
b = append(b, value...) b = append(b, value...)
b = append(b, seps[0]) b = append(b, sep)
} }
return xxhash.Sum64(b), b return xxhash.Sum64(b), b
} }

View file

@ -39,9 +39,9 @@ func StableHash(ls Labels) uint64 {
} }
b = append(b, v.Name...) b = append(b, v.Name...)
b = append(b, seps[0]) b = append(b, sep)
b = append(b, v.Value...) b = append(b, v.Value...)
b = append(b, seps[0]) b = append(b, sep)
} }
return xxhash.Sum64(b) return xxhash.Sum64(b)
} }

View file

@ -43,9 +43,9 @@ func StableHash(ls Labels) uint64 {
} }
b = append(b, name...) b = append(b, name...)
b = append(b, seps[0]) b = append(b, sep)
b = append(b, value...) b = append(b, value...)
b = append(b, seps[0]) b = append(b, sep)
pos = newPos pos = newPos
} }
return xxhash.Sum64(b) return xxhash.Sum64(b)

View file

@ -43,9 +43,9 @@ func StableHash(ls Labels) uint64 {
} }
b = append(b, v.Name...) b = append(b, v.Name...)
b = append(b, seps[0]) b = append(b, sep)
b = append(b, v.Value...) b = append(b, v.Value...)
b = append(b, seps[0]) b = append(b, sep)
} }
if h != nil { if h != nil {
return h.Sum64() return h.Sum64()

View file

@ -213,6 +213,10 @@ func (re Regexp) IsZero() bool {
// String returns the original string used to compile the regular expression. // String returns the original string used to compile the regular expression.
func (re Regexp) String() string { func (re Regexp) String() string {
if re.Regexp == nil {
return ""
}
str := re.Regexp.String() str := re.Regexp.String()
// Trim the anchor `^(?:` prefix and `)$` suffix. // Trim the anchor `^(?:` prefix and `)$` suffix.
return str[4 : len(str)-2] return str[4 : len(str)-2]

View file

@ -900,3 +900,16 @@ action: replace
}) })
} }
} }
func TestRegexp_ShouldMarshalAndUnmarshalZeroValue(t *testing.T) {
var zero Regexp
marshalled, err := yaml.Marshal(&zero)
require.NoError(t, err)
require.Equal(t, "null\n", string(marshalled))
var unmarshalled Regexp
err = yaml.Unmarshal(marshalled, &unmarshalled)
require.NoError(t, err)
require.Nil(t, unmarshalled.Regexp)
}

View file

@ -26,7 +26,6 @@ import (
"time" "time"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
"go.uber.org/goleak"
"github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/model/histogram"
"github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/labels"
@ -51,7 +50,7 @@ const (
func TestMain(m *testing.M) { func TestMain(m *testing.M) {
// Enable experimental functions testing // Enable experimental functions testing
parser.EnableExperimentalFunctions = true parser.EnableExperimentalFunctions = true
goleak.VerifyTestMain(m) testutil.TolerantVerifyLeak(m)
} }
func TestQueryConcurrency(t *testing.T) { func TestQueryConcurrency(t *testing.T) {
@ -238,11 +237,11 @@ func (q *errQuerier) Select(context.Context, bool, *storage.SelectHints, ...*lab
return errSeriesSet{err: q.err} return errSeriesSet{err: q.err}
} }
func (*errQuerier) LabelValues(context.Context, string, ...*labels.Matcher) ([]string, annotations.Annotations, error) { func (*errQuerier) LabelValues(context.Context, string, *storage.LabelHints, ...*labels.Matcher) ([]string, annotations.Annotations, error) {
return nil, nil, nil return nil, nil, nil
} }
func (*errQuerier) LabelNames(context.Context, ...*labels.Matcher) ([]string, annotations.Annotations, error) { func (*errQuerier) LabelNames(context.Context, *storage.LabelHints, ...*labels.Matcher) ([]string, annotations.Annotations, error) {
return nil, nil, nil return nil, nil, nil
} }
func (*errQuerier) Close() error { return nil } func (*errQuerier) Close() error { return nil }

View file

@ -48,7 +48,6 @@ func (f *histogramStatsIterator) AtHistogram(h *histogram.Histogram) (int64, *hi
var t int64 var t int64
t, f.currentH = f.Iterator.AtHistogram(f.currentH) t, f.currentH = f.Iterator.AtHistogram(f.currentH)
if value.IsStaleNaN(f.currentH.Sum) { if value.IsStaleNaN(f.currentH.Sum) {
f.setLastH(f.currentH)
h = &histogram.Histogram{Sum: f.currentH.Sum} h = &histogram.Histogram{Sum: f.currentH.Sum}
return t, h return t, h
} }
@ -63,9 +62,13 @@ func (f *histogramStatsIterator) AtHistogram(h *histogram.Histogram) (int64, *hi
return t, h return t, h
} }
h.CounterResetHint = f.getResetHint(f.currentH) returnValue := histogram.Histogram{
h.Count = f.currentH.Count CounterResetHint: f.getResetHint(f.currentH),
h.Sum = f.currentH.Sum Count: f.currentH.Count,
Sum: f.currentH.Sum,
}
returnValue.CopyTo(h)
f.setLastH(f.currentH) f.setLastH(f.currentH)
return t, h return t, h
} }
@ -77,7 +80,6 @@ func (f *histogramStatsIterator) AtFloatHistogram(fh *histogram.FloatHistogram)
var t int64 var t int64
t, f.currentFH = f.Iterator.AtFloatHistogram(f.currentFH) t, f.currentFH = f.Iterator.AtFloatHistogram(f.currentFH)
if value.IsStaleNaN(f.currentFH.Sum) { if value.IsStaleNaN(f.currentFH.Sum) {
f.setLastFH(f.currentFH)
return t, &histogram.FloatHistogram{Sum: f.currentFH.Sum} return t, &histogram.FloatHistogram{Sum: f.currentFH.Sum}
} }
@ -91,9 +93,13 @@ func (f *histogramStatsIterator) AtFloatHistogram(fh *histogram.FloatHistogram)
return t, fh return t, fh
} }
fh.CounterResetHint = f.getFloatResetHint(f.currentFH.CounterResetHint) returnValue := histogram.FloatHistogram{
fh.Count = f.currentFH.Count CounterResetHint: f.getFloatResetHint(f.currentFH.CounterResetHint),
fh.Sum = f.currentFH.Sum Count: f.currentFH.Count,
Sum: f.currentFH.Sum,
}
returnValue.CopyTo(fh)
f.setLastFH(f.currentFH) f.setLastFH(f.currentFH)
return t, fh return t, fh
} }

View file

@ -14,62 +14,132 @@
package promql package promql
import ( import (
"fmt"
"math"
"testing" "testing"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
"github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/model/histogram"
"github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/model/value"
"github.com/prometheus/prometheus/tsdb/chunkenc" "github.com/prometheus/prometheus/tsdb/chunkenc"
"github.com/prometheus/prometheus/tsdb/tsdbutil" "github.com/prometheus/prometheus/tsdb/tsdbutil"
) )
func TestHistogramStatsDecoding(t *testing.T) { func TestHistogramStatsDecoding(t *testing.T) {
histograms := []*histogram.Histogram{ cases := []struct {
tsdbutil.GenerateTestHistogram(0), name string
tsdbutil.GenerateTestHistogram(1), histograms []*histogram.Histogram
tsdbutil.GenerateTestHistogram(2), expectedHints []histogram.CounterResetHint
tsdbutil.GenerateTestHistogram(2), }{
} {
histograms[0].CounterResetHint = histogram.NotCounterReset name: "unknown counter reset triggers detection",
histograms[1].CounterResetHint = histogram.UnknownCounterReset histograms: []*histogram.Histogram{
histograms[2].CounterResetHint = histogram.CounterReset tsdbutil.GenerateTestHistogramWithHint(0, histogram.NotCounterReset),
histograms[3].CounterResetHint = histogram.UnknownCounterReset tsdbutil.GenerateTestHistogramWithHint(1, histogram.UnknownCounterReset),
tsdbutil.GenerateTestHistogramWithHint(2, histogram.CounterReset),
expectedHints := []histogram.CounterResetHint{ tsdbutil.GenerateTestHistogramWithHint(2, histogram.UnknownCounterReset),
histogram.NotCounterReset, },
histogram.NotCounterReset, expectedHints: []histogram.CounterResetHint{
histogram.CounterReset, histogram.NotCounterReset,
histogram.NotCounterReset, histogram.NotCounterReset,
histogram.CounterReset,
histogram.NotCounterReset,
},
},
{
name: "stale sample before unknown reset hint",
histograms: []*histogram.Histogram{
tsdbutil.GenerateTestHistogramWithHint(0, histogram.NotCounterReset),
tsdbutil.GenerateTestHistogramWithHint(1, histogram.UnknownCounterReset),
{Sum: math.Float64frombits(value.StaleNaN)},
tsdbutil.GenerateTestHistogramWithHint(1, histogram.UnknownCounterReset),
},
expectedHints: []histogram.CounterResetHint{
histogram.NotCounterReset,
histogram.NotCounterReset,
histogram.UnknownCounterReset,
histogram.NotCounterReset,
},
},
{
name: "unknown counter reset at the beginning",
histograms: []*histogram.Histogram{
tsdbutil.GenerateTestHistogramWithHint(1, histogram.UnknownCounterReset),
},
expectedHints: []histogram.CounterResetHint{
histogram.NotCounterReset,
},
},
{
name: "detect real counter reset",
histograms: []*histogram.Histogram{
tsdbutil.GenerateTestHistogramWithHint(2, histogram.UnknownCounterReset),
tsdbutil.GenerateTestHistogramWithHint(1, histogram.UnknownCounterReset),
},
expectedHints: []histogram.CounterResetHint{
histogram.NotCounterReset,
histogram.CounterReset,
},
},
{
name: "detect real counter reset after stale NaN",
histograms: []*histogram.Histogram{
tsdbutil.GenerateTestHistogramWithHint(2, histogram.UnknownCounterReset),
{Sum: math.Float64frombits(value.StaleNaN)},
tsdbutil.GenerateTestHistogramWithHint(1, histogram.UnknownCounterReset),
},
expectedHints: []histogram.CounterResetHint{
histogram.NotCounterReset,
histogram.UnknownCounterReset,
histogram.CounterReset,
},
},
} }
t.Run("histogram_stats", func(t *testing.T) { for _, tc := range cases {
decodedStats := make([]*histogram.Histogram, 0) t.Run(tc.name, func(t *testing.T) {
statsIterator := NewHistogramStatsIterator(newHistogramSeries(histograms).Iterator(nil)) t.Run("histogram_stats", func(t *testing.T) {
for statsIterator.Next() != chunkenc.ValNone { decodedStats := make([]*histogram.Histogram, 0)
_, h := statsIterator.AtHistogram(nil) statsIterator := NewHistogramStatsIterator(newHistogramSeries(tc.histograms).Iterator(nil))
decodedStats = append(decodedStats, h) for statsIterator.Next() != chunkenc.ValNone {
} _, h := statsIterator.AtHistogram(nil)
for i := 0; i < len(histograms); i++ { decodedStats = append(decodedStats, h)
require.Equal(t, expectedHints[i], decodedStats[i].CounterResetHint) }
require.Equal(t, histograms[i].Count, decodedStats[i].Count) for i := 0; i < len(tc.histograms); i++ {
require.Equal(t, histograms[i].Sum, decodedStats[i].Sum) require.Equal(t, tc.expectedHints[i], decodedStats[i].CounterResetHint, fmt.Sprintf("mismatch in counter reset hint for histogram %d", i))
} h := tc.histograms[i]
}) if value.IsStaleNaN(h.Sum) {
t.Run("float_histogram_stats", func(t *testing.T) { require.True(t, value.IsStaleNaN(decodedStats[i].Sum))
decodedStats := make([]*histogram.FloatHistogram, 0) require.Equal(t, uint64(0), decodedStats[i].Count)
statsIterator := NewHistogramStatsIterator(newHistogramSeries(histograms).Iterator(nil)) } else {
for statsIterator.Next() != chunkenc.ValNone { require.Equal(t, tc.histograms[i].Count, decodedStats[i].Count)
_, h := statsIterator.AtFloatHistogram(nil) require.Equal(t, tc.histograms[i].Sum, decodedStats[i].Sum)
decodedStats = append(decodedStats, h) }
} }
for i := 0; i < len(histograms); i++ { })
fh := histograms[i].ToFloat(nil) t.Run("float_histogram_stats", func(t *testing.T) {
require.Equal(t, expectedHints[i], decodedStats[i].CounterResetHint) decodedStats := make([]*histogram.FloatHistogram, 0)
require.Equal(t, fh.Count, decodedStats[i].Count) statsIterator := NewHistogramStatsIterator(newHistogramSeries(tc.histograms).Iterator(nil))
require.Equal(t, fh.Sum, decodedStats[i].Sum) for statsIterator.Next() != chunkenc.ValNone {
} _, h := statsIterator.AtFloatHistogram(nil)
}) decodedStats = append(decodedStats, h)
}
for i := 0; i < len(tc.histograms); i++ {
require.Equal(t, tc.expectedHints[i], decodedStats[i].CounterResetHint)
fh := tc.histograms[i].ToFloat(nil)
if value.IsStaleNaN(fh.Sum) {
require.True(t, value.IsStaleNaN(decodedStats[i].Sum))
require.Equal(t, float64(0), decodedStats[i].Count)
} else {
require.Equal(t, fh.Count, decodedStats[i].Count)
require.Equal(t, fh.Sum, decodedStats[i].Sum)
}
}
})
})
}
} }
type histogramSeries struct { type histogramSeries struct {

View file

@ -43,7 +43,6 @@ import (
int int64 int int64
uint uint64 uint uint64
float float64 float float64
duration time.Duration
} }
@ -85,6 +84,7 @@ NEGATIVE_BUCKETS_DESC
ZERO_BUCKET_DESC ZERO_BUCKET_DESC
ZERO_BUCKET_WIDTH_DESC ZERO_BUCKET_WIDTH_DESC
CUSTOM_VALUES_DESC CUSTOM_VALUES_DESC
COUNTER_RESET_HINT_DESC
%token histogramDescEnd %token histogramDescEnd
// Operators. // Operators.
@ -150,6 +150,14 @@ START
END END
%token preprocessorEnd %token preprocessorEnd
// Counter reset hints.
%token counterResetHintsStart
%token <item>
UNKNOWN_COUNTER_RESET
COUNTER_RESET
NOT_COUNTER_RESET
GAUGE_TYPE
%token counterResetHintsEnd
// Start symbols for the generated parser. // Start symbols for the generated parser.
%token startSymbolsStart %token startSymbolsStart
@ -164,7 +172,7 @@ START_METRIC_SELECTOR
// Type definitions for grammar rules. // Type definitions for grammar rules.
%type <matchers> label_match_list %type <matchers> label_match_list
%type <matcher> label_matcher %type <matcher> label_matcher
%type <item> aggregate_op grouping_label match_op maybe_label metric_identifier unary_op at_modifier_preprocessors string_identifier %type <item> aggregate_op grouping_label match_op maybe_label metric_identifier unary_op at_modifier_preprocessors string_identifier counter_reset_hint
%type <labels> label_set metric %type <labels> label_set metric
%type <lblList> label_set_list %type <lblList> label_set_list
%type <label> label_set_item %type <label> label_set_item
@ -176,8 +184,7 @@ START_METRIC_SELECTOR
%type <int> int %type <int> int
%type <uint> uint %type <uint> uint
%type <float> number series_value signed_number signed_or_unsigned_number %type <float> number series_value signed_number signed_or_unsigned_number
%type <node> step_invariant_expr aggregate_expr aggregate_modifier bin_modifier binary_expr bool_modifier expr function_call function_call_args function_call_body group_modifiers label_matchers matrix_selector number_literal offset_expr on_or_ignoring paren_expr string_literal subquery_expr unary_expr vector_selector %type <node> step_invariant_expr aggregate_expr aggregate_modifier bin_modifier binary_expr bool_modifier expr function_call function_call_args function_call_body group_modifiers label_matchers matrix_selector number_duration_literal offset_expr on_or_ignoring paren_expr string_literal subquery_expr unary_expr vector_selector
%type <duration> duration maybe_duration
%start start %start start
@ -218,7 +225,7 @@ expr :
| binary_expr | binary_expr
| function_call | function_call
| matrix_selector | matrix_selector
| number_literal | number_duration_literal
| offset_expr | offset_expr
| paren_expr | paren_expr
| string_literal | string_literal
@ -415,18 +422,22 @@ paren_expr : LEFT_PAREN expr RIGHT_PAREN
* Offset modifiers. * Offset modifiers.
*/ */
offset_expr: expr OFFSET duration offset_expr: expr OFFSET number_duration_literal
{ {
yylex.(*parser).addOffset($1, $3) numLit, _ := $3.(*NumberLiteral)
$$ = $1 dur := time.Duration(numLit.Val * 1000) * time.Millisecond
yylex.(*parser).addOffset($1, dur)
$$ = $1
} }
| expr OFFSET SUB duration | expr OFFSET SUB number_duration_literal
{ {
yylex.(*parser).addOffset($1, -$4) numLit, _ := $4.(*NumberLiteral)
$$ = $1 dur := time.Duration(numLit.Val * 1000) * time.Millisecond
yylex.(*parser).addOffset($1, -dur)
$$ = $1
} }
| expr OFFSET error | expr OFFSET error
{ yylex.(*parser).unexpected("offset", "duration"); $$ = $1 } { yylex.(*parser).unexpected("offset", "number or duration"); $$ = $1 }
; ;
/* /*
* @ modifiers. * @ modifiers.
@ -452,7 +463,7 @@ at_modifier_preprocessors: START | END;
* Subquery and range selectors. * Subquery and range selectors.
*/ */
matrix_selector : expr LEFT_BRACKET duration RIGHT_BRACKET matrix_selector : expr LEFT_BRACKET number_duration_literal RIGHT_BRACKET
{ {
var errMsg string var errMsg string
vs, ok := $1.(*VectorSelector) vs, ok := $1.(*VectorSelector)
@ -469,32 +480,44 @@ matrix_selector : expr LEFT_BRACKET duration RIGHT_BRACKET
yylex.(*parser).addParseErrf(errRange, errMsg) yylex.(*parser).addParseErrf(errRange, errMsg)
} }
numLit, _ := $3.(*NumberLiteral)
$$ = &MatrixSelector{ $$ = &MatrixSelector{
VectorSelector: $1.(Expr), VectorSelector: $1.(Expr),
Range: $3, Range: time.Duration(numLit.Val * 1000) * time.Millisecond,
EndPos: yylex.(*parser).lastClosing, EndPos: yylex.(*parser).lastClosing,
} }
} }
; ;
subquery_expr : expr LEFT_BRACKET duration COLON maybe_duration RIGHT_BRACKET subquery_expr : expr LEFT_BRACKET number_duration_literal COLON number_duration_literal RIGHT_BRACKET
{ {
numLitRange, _ := $3.(*NumberLiteral)
numLitStep, _ := $5.(*NumberLiteral)
$$ = &SubqueryExpr{ $$ = &SubqueryExpr{
Expr: $1.(Expr), Expr: $1.(Expr),
Range: $3, Range: time.Duration(numLitRange.Val * 1000) * time.Millisecond,
Step: $5, Step: time.Duration(numLitStep.Val * 1000) * time.Millisecond,
EndPos: $6.Pos + 1, EndPos: $6.Pos + 1,
} }
} }
| expr LEFT_BRACKET duration COLON duration error | expr LEFT_BRACKET number_duration_literal COLON RIGHT_BRACKET
{
numLitRange, _ := $3.(*NumberLiteral)
$$ = &SubqueryExpr{
Expr: $1.(Expr),
Range: time.Duration(numLitRange.Val * 1000) * time.Millisecond,
Step: 0,
EndPos: $5.Pos + 1,
}
}
| expr LEFT_BRACKET number_duration_literal COLON number_duration_literal error
{ yylex.(*parser).unexpected("subquery selector", "\"]\""); $$ = $1 } { yylex.(*parser).unexpected("subquery selector", "\"]\""); $$ = $1 }
| expr LEFT_BRACKET duration COLON error | expr LEFT_BRACKET number_duration_literal COLON error
{ yylex.(*parser).unexpected("subquery selector", "duration or \"]\""); $$ = $1 } { yylex.(*parser).unexpected("subquery selector", "number or duration or \"]\""); $$ = $1 }
| expr LEFT_BRACKET duration error | expr LEFT_BRACKET number_duration_literal error
{ yylex.(*parser).unexpected("subquery or range", "\":\" or \"]\""); $$ = $1 } { yylex.(*parser).unexpected("subquery or range", "\":\" or \"]\""); $$ = $1 }
| expr LEFT_BRACKET error | expr LEFT_BRACKET error
{ yylex.(*parser).unexpected("subquery selector", "duration"); $$ = $1 } { yylex.(*parser).unexpected("subquery selector", "number or duration"); $$ = $1 }
; ;
/* /*
@ -825,6 +848,11 @@ histogram_desc_item
$$ = yylex.(*parser).newMap() $$ = yylex.(*parser).newMap()
$$["n_offset"] = $3 $$["n_offset"] = $3
} }
| COUNTER_RESET_HINT_DESC COLON counter_reset_hint
{
$$ = yylex.(*parser).newMap()
$$["counter_reset_hint"] = $3
}
; ;
bucket_set : LEFT_BRACKET bucket_set_list SPACE RIGHT_BRACKET bucket_set : LEFT_BRACKET bucket_set_list SPACE RIGHT_BRACKET
@ -848,6 +876,7 @@ bucket_set_list : bucket_set_list SPACE number
| bucket_set_list error | bucket_set_list error
; ;
counter_reset_hint : UNKNOWN_COUNTER_RESET | COUNTER_RESET | NOT_COUNTER_RESET | GAUGE_TYPE;
/* /*
* Keyword lists. * Keyword lists.
@ -866,16 +895,43 @@ match_op : EQL | NEQ | EQL_REGEX | NEQ_REGEX ;
* Literals. * Literals.
*/ */
number_literal : NUMBER number_duration_literal : NUMBER
{ {
$$ = &NumberLiteral{ $$ = &NumberLiteral{
Val: yylex.(*parser).number($1.Val), Val: yylex.(*parser).number($1.Val),
PosRange: $1.PositionRange(), PosRange: $1.PositionRange(),
}
} }
| DURATION
{
var err error
var dur time.Duration
dur, err = parseDuration($1.Val)
if err != nil {
yylex.(*parser).addParseErr($1.PositionRange(), err)
}
$$ = &NumberLiteral{
Val: dur.Seconds(),
PosRange: $1.PositionRange(),
}
} }
; ;
number : NUMBER { $$ = yylex.(*parser).number($1.Val) } ; number : NUMBER
{
$$ = yylex.(*parser).number($1.Val)
}
| DURATION
{
var err error
var dur time.Duration
dur, err = parseDuration($1.Val)
if err != nil {
yylex.(*parser).addParseErr($1.PositionRange(), err)
}
$$ = dur.Seconds()
}
;
signed_number : ADD number { $$ = $2 } signed_number : ADD number { $$ = $2 }
| SUB number { $$ = -$2 } | SUB number { $$ = -$2 }
@ -897,17 +953,6 @@ int : SUB uint { $$ = -int64($2) }
| uint { $$ = int64($1) } | uint { $$ = int64($1) }
; ;
duration : DURATION
{
var err error
$$, err = parseDuration($1.Val)
if err != nil {
yylex.(*parser).addParseErr($1.PositionRange(), err)
}
}
;
string_literal : STRING string_literal : STRING
{ {
$$ = &StringLiteral{ $$ = &StringLiteral{
@ -931,11 +976,6 @@ string_identifier : STRING
* Wrappers for optional arguments. * Wrappers for optional arguments.
*/ */
maybe_duration : /* empty */
{$$ = 0}
| duration
;
maybe_grouping_labels: /* empty */ { $$ = nil } maybe_grouping_labels: /* empty */ { $$ = nil }
| grouping_labels | grouping_labels
; ;

File diff suppressed because it is too large Load diff

View file

@ -137,16 +137,24 @@ var key = map[string]ItemType{
} }
var histogramDesc = map[string]ItemType{ var histogramDesc = map[string]ItemType{
"sum": SUM_DESC, "sum": SUM_DESC,
"count": COUNT_DESC, "count": COUNT_DESC,
"schema": SCHEMA_DESC, "schema": SCHEMA_DESC,
"offset": OFFSET_DESC, "offset": OFFSET_DESC,
"n_offset": NEGATIVE_OFFSET_DESC, "n_offset": NEGATIVE_OFFSET_DESC,
"buckets": BUCKETS_DESC, "buckets": BUCKETS_DESC,
"n_buckets": NEGATIVE_BUCKETS_DESC, "n_buckets": NEGATIVE_BUCKETS_DESC,
"z_bucket": ZERO_BUCKET_DESC, "z_bucket": ZERO_BUCKET_DESC,
"z_bucket_w": ZERO_BUCKET_WIDTH_DESC, "z_bucket_w": ZERO_BUCKET_WIDTH_DESC,
"custom_values": CUSTOM_VALUES_DESC, "custom_values": CUSTOM_VALUES_DESC,
"counter_reset_hint": COUNTER_RESET_HINT_DESC,
}
var counterResetHints = map[string]ItemType{
"unknown": UNKNOWN_COUNTER_RESET,
"reset": COUNTER_RESET,
"not_reset": NOT_COUNTER_RESET,
"gauge": GAUGE_TYPE,
} }
// ItemTypeStr is the default string representations for common Items. It does not // ItemTypeStr is the default string representations for common Items. It does not
@ -478,7 +486,7 @@ func lexStatements(l *Lexer) stateFn {
skipSpaces(l) skipSpaces(l)
} }
l.bracketOpen = true l.bracketOpen = true
return lexDuration return lexNumberOrDuration
case r == ']': case r == ']':
if !l.bracketOpen { if !l.bracketOpen {
return l.errorf("unexpected right bracket %q", r) return l.errorf("unexpected right bracket %q", r)
@ -585,6 +593,11 @@ Loop:
return lexHistogram return lexHistogram
} }
} }
if desc, ok := counterResetHints[strings.ToLower(word)]; ok {
l.emit(desc)
return lexHistogram
}
l.errorf("bad histogram descriptor found: %q", word) l.errorf("bad histogram descriptor found: %q", word)
break Loop break Loop
} }
@ -846,18 +859,6 @@ func lexLineComment(l *Lexer) stateFn {
return lexStatements return lexStatements
} }
func lexDuration(l *Lexer) stateFn {
if l.scanNumber() {
return l.errorf("missing unit character in duration")
}
if !acceptRemainingDuration(l) {
return l.errorf("bad duration syntax: %q", l.input[l.start:l.pos])
}
l.backup()
l.emit(DURATION)
return lexStatements
}
// lexNumber scans a number: decimal, hex, oct or float. // lexNumber scans a number: decimal, hex, oct or float.
func lexNumber(l *Lexer) stateFn { func lexNumber(l *Lexer) stateFn {
if !l.scanNumber() { if !l.scanNumber() {
@ -909,6 +910,7 @@ func acceptRemainingDuration(l *Lexer) bool {
// scanNumber scans numbers of different formats. The scanned Item is // scanNumber scans numbers of different formats. The scanned Item is
// not necessarily a valid number. This case is caught by the parser. // not necessarily a valid number. This case is caught by the parser.
func (l *Lexer) scanNumber() bool { func (l *Lexer) scanNumber() bool {
initialPos := l.pos
// Modify the digit pattern if the number is hexadecimal. // Modify the digit pattern if the number is hexadecimal.
digitPattern := "0123456789" digitPattern := "0123456789"
// Disallow hexadecimal in series descriptions as the syntax is ambiguous. // Disallow hexadecimal in series descriptions as the syntax is ambiguous.
@ -980,7 +982,10 @@ func (l *Lexer) scanNumber() bool {
// Handle digits at the end since we already consumed before this loop. // Handle digits at the end since we already consumed before this loop.
l.acceptRun(digitPattern) l.acceptRun(digitPattern)
} }
// Empty string is not a valid number.
if l.pos == initialPos {
return false
}
// Next thing must not be alphanumeric unless it's the times token // Next thing must not be alphanumeric unless it's the times token
// for series repetitions. // for series repetitions.
if r := l.peek(); (l.seriesDesc && r == 'x') || !isAlphaNumeric(r) { if r := l.peek(); (l.seriesDesc && r == 'x') || !isAlphaNumeric(r) {

View file

@ -580,6 +580,28 @@ func (p *parser) buildHistogramFromMap(desc *map[string]interface{}) *histogram.
} }
} }
val, ok = (*desc)["counter_reset_hint"]
if ok {
resetHint, ok := val.(Item)
if ok {
switch resetHint.Typ {
case UNKNOWN_COUNTER_RESET:
output.CounterResetHint = histogram.UnknownCounterReset
case COUNTER_RESET:
output.CounterResetHint = histogram.CounterReset
case NOT_COUNTER_RESET:
output.CounterResetHint = histogram.NotCounterReset
case GAUGE_TYPE:
output.CounterResetHint = histogram.GaugeType
default:
p.addParseErrf(p.yyParser.lval.item.PositionRange(), "error parsing counter_reset_hint: unknown value %v", resetHint.Typ)
}
} else {
p.addParseErrf(p.yyParser.lval.item.PositionRange(), "error parsing counter_reset_hint: %v", val)
}
}
buckets, spans := p.buildHistogramBucketsAndSpans(desc, "buckets", "offset") buckets, spans := p.buildHistogramBucketsAndSpans(desc, "buckets", "offset")
output.PositiveBuckets = buckets output.PositiveBuckets = buckets
output.PositiveSpans = spans output.PositiveSpans = spans

View file

@ -2133,6 +2133,115 @@ var testExpr = []struct {
EndPos: 25, EndPos: 25,
}, },
}, },
{
input: `test{a="b"}[5m] OFFSET 3600`,
expected: &MatrixSelector{
VectorSelector: &VectorSelector{
Name: "test",
OriginalOffset: 1 * time.Hour,
LabelMatchers: []*labels.Matcher{
MustLabelMatcher(labels.MatchEqual, "a", "b"),
MustLabelMatcher(labels.MatchEqual, model.MetricNameLabel, "test"),
},
PosRange: posrange.PositionRange{
Start: 0,
End: 11,
},
},
Range: 5 * time.Minute,
EndPos: 27,
},
},
{
input: `foo[3ms] @ 2.345`,
expected: &MatrixSelector{
VectorSelector: &VectorSelector{
Name: "foo",
Timestamp: makeInt64Pointer(2345),
LabelMatchers: []*labels.Matcher{
MustLabelMatcher(labels.MatchEqual, model.MetricNameLabel, "foo"),
},
PosRange: posrange.PositionRange{
Start: 0,
End: 3,
},
},
Range: 3 * time.Millisecond,
EndPos: 16,
},
},
{
input: `foo[4s180ms] @ 2.345`,
expected: &MatrixSelector{
VectorSelector: &VectorSelector{
Name: "foo",
Timestamp: makeInt64Pointer(2345),
LabelMatchers: []*labels.Matcher{
MustLabelMatcher(labels.MatchEqual, model.MetricNameLabel, "foo"),
},
PosRange: posrange.PositionRange{
Start: 0,
End: 3,
},
},
Range: 4*time.Second + 180*time.Millisecond,
EndPos: 20,
},
},
{
input: `foo[4.18] @ 2.345`,
expected: &MatrixSelector{
VectorSelector: &VectorSelector{
Name: "foo",
Timestamp: makeInt64Pointer(2345),
LabelMatchers: []*labels.Matcher{
MustLabelMatcher(labels.MatchEqual, model.MetricNameLabel, "foo"),
},
PosRange: posrange.PositionRange{
Start: 0,
End: 3,
},
},
Range: 4*time.Second + 180*time.Millisecond,
EndPos: 17,
},
},
{
input: `foo[4s18ms] @ 2.345`,
expected: &MatrixSelector{
VectorSelector: &VectorSelector{
Name: "foo",
Timestamp: makeInt64Pointer(2345),
LabelMatchers: []*labels.Matcher{
MustLabelMatcher(labels.MatchEqual, model.MetricNameLabel, "foo"),
},
PosRange: posrange.PositionRange{
Start: 0,
End: 3,
},
},
Range: 4*time.Second + 18*time.Millisecond,
EndPos: 19,
},
},
{
input: `foo[4.018] @ 2.345`,
expected: &MatrixSelector{
VectorSelector: &VectorSelector{
Name: "foo",
Timestamp: makeInt64Pointer(2345),
LabelMatchers: []*labels.Matcher{
MustLabelMatcher(labels.MatchEqual, model.MetricNameLabel, "foo"),
},
PosRange: posrange.PositionRange{
Start: 0,
End: 3,
},
},
Range: 4*time.Second + 18*time.Millisecond,
EndPos: 18,
},
},
{ {
input: `test{a="b"}[5y] @ 1603774699`, input: `test{a="b"}[5y] @ 1603774699`,
expected: &MatrixSelector{ expected: &MatrixSelector{
@ -2152,15 +2261,50 @@ var testExpr = []struct {
EndPos: 28, EndPos: 28,
}, },
}, },
{
input: "test[5]",
expected: &MatrixSelector{
VectorSelector: &VectorSelector{
Name: "test",
LabelMatchers: []*labels.Matcher{
MustLabelMatcher(labels.MatchEqual, model.MetricNameLabel, "test"),
},
PosRange: posrange.PositionRange{
Start: 0,
End: 4,
},
},
Range: 5 * time.Second,
EndPos: 7,
},
},
{
input: `some_metric[5m] @ 1m`,
expected: &MatrixSelector{
VectorSelector: &VectorSelector{
Name: "some_metric",
Timestamp: makeInt64Pointer(60000),
LabelMatchers: []*labels.Matcher{
MustLabelMatcher(labels.MatchEqual, model.MetricNameLabel, "some_metric"),
},
PosRange: posrange.PositionRange{
Start: 0,
End: 11,
},
},
Range: 5 * time.Minute,
EndPos: 20,
},
},
{ {
input: `foo[5mm]`, input: `foo[5mm]`,
fail: true, fail: true,
errMsg: "bad duration syntax: \"5mm\"", errMsg: "bad number or duration syntax: \"5mm\"",
}, },
{ {
input: `foo[5m1]`, input: `foo[5m1]`,
fail: true, fail: true,
errMsg: "bad duration syntax: \"5m1\"", errMsg: "bad number or duration syntax: \"5m1\"",
}, },
{ {
input: `foo[5m:1m1]`, input: `foo[5m:1m1]`,
@ -2194,17 +2338,12 @@ var testExpr = []struct {
{ {
input: `foo[]`, input: `foo[]`,
fail: true, fail: true,
errMsg: "missing unit character in duration", errMsg: "bad number or duration syntax: \"\"",
}, },
{ {
input: `foo[1]`, input: `foo[-1]`,
fail: true, fail: true,
errMsg: "missing unit character in duration", errMsg: "bad number or duration syntax: \"\"",
},
{
input: `some_metric[5m] OFFSET 1`,
fail: true,
errMsg: "unexpected number \"1\" in offset, expected duration",
}, },
{ {
input: `some_metric[5m] OFFSET 1mm`, input: `some_metric[5m] OFFSET 1mm`,
@ -2214,18 +2353,13 @@ var testExpr = []struct {
{ {
input: `some_metric[5m] OFFSET`, input: `some_metric[5m] OFFSET`,
fail: true, fail: true,
errMsg: "unexpected end of input in offset, expected duration", errMsg: "unexpected end of input in offset, expected number or duration",
}, },
{ {
input: `some_metric OFFSET 1m[5m]`, input: `some_metric OFFSET 1m[5m]`,
fail: true, fail: true,
errMsg: "1:22: parse error: no offset modifiers allowed before range", errMsg: "1:22: parse error: no offset modifiers allowed before range",
}, },
{
input: `some_metric[5m] @ 1m`,
fail: true,
errMsg: "1:19: parse error: unexpected duration \"1m\" in @, expected timestamp",
},
{ {
input: `some_metric[5m] @`, input: `some_metric[5m] @`,
fail: true, fail: true,
@ -2910,6 +3044,11 @@ var testExpr = []struct {
errMsg: "illegal character U+002E '.' in escape sequence", errMsg: "illegal character U+002E '.' in escape sequence",
}, },
// Subquery. // Subquery.
{
input: `foo{bar="baz"}[`,
fail: true,
errMsg: `1:16: parse error: bad number or duration syntax: ""`,
},
{ {
input: `foo{bar="baz"}[10m:6s]`, input: `foo{bar="baz"}[10m:6s]`,
expected: &SubqueryExpr{ expected: &SubqueryExpr{
@ -3899,32 +4038,34 @@ func TestParseHistogramSeries(t *testing.T) {
}, },
{ {
name: "all properties used", name: "all properties used",
input: `{} {{schema:1 sum:-0.3 count:3.1 z_bucket:7.1 z_bucket_w:0.05 buckets:[5.1 10 7] offset:-3 n_buckets:[4.1 5] n_offset:-5}}`, input: `{} {{schema:1 sum:-0.3 count:3.1 z_bucket:7.1 z_bucket_w:0.05 buckets:[5.1 10 7] offset:-3 n_buckets:[4.1 5] n_offset:-5 counter_reset_hint:gauge}}`,
expected: []histogram.FloatHistogram{{ expected: []histogram.FloatHistogram{{
Schema: 1, Schema: 1,
Sum: -0.3, Sum: -0.3,
Count: 3.1, Count: 3.1,
ZeroCount: 7.1, ZeroCount: 7.1,
ZeroThreshold: 0.05, ZeroThreshold: 0.05,
PositiveBuckets: []float64{5.1, 10, 7}, PositiveBuckets: []float64{5.1, 10, 7},
PositiveSpans: []histogram.Span{{Offset: -3, Length: 3}}, PositiveSpans: []histogram.Span{{Offset: -3, Length: 3}},
NegativeBuckets: []float64{4.1, 5}, NegativeBuckets: []float64{4.1, 5},
NegativeSpans: []histogram.Span{{Offset: -5, Length: 2}}, NegativeSpans: []histogram.Span{{Offset: -5, Length: 2}},
CounterResetHint: histogram.GaugeType,
}}, }},
}, },
{ {
name: "all properties used - with spaces", name: "all properties used - with spaces",
input: `{} {{schema:1 sum:0.3 count:3 z_bucket:7 z_bucket_w:5 buckets:[5 10 7 ] offset:-3 n_buckets:[4 5] n_offset:5 }}`, input: `{} {{schema:1 sum:0.3 count:3 z_bucket:7 z_bucket_w:5 buckets:[5 10 7 ] offset:-3 n_buckets:[4 5] n_offset:5 counter_reset_hint:gauge }}`,
expected: []histogram.FloatHistogram{{ expected: []histogram.FloatHistogram{{
Schema: 1, Schema: 1,
Sum: 0.3, Sum: 0.3,
Count: 3, Count: 3,
ZeroCount: 7, ZeroCount: 7,
ZeroThreshold: 5, ZeroThreshold: 5,
PositiveBuckets: []float64{5, 10, 7}, PositiveBuckets: []float64{5, 10, 7},
PositiveSpans: []histogram.Span{{Offset: -3, Length: 3}}, PositiveSpans: []histogram.Span{{Offset: -3, Length: 3}},
NegativeBuckets: []float64{4, 5}, NegativeBuckets: []float64{4, 5},
NegativeSpans: []histogram.Span{{Offset: 5, Length: 2}}, NegativeSpans: []histogram.Span{{Offset: 5, Length: 2}},
CounterResetHint: histogram.GaugeType,
}}, }},
}, },
{ {
@ -4111,6 +4252,39 @@ func TestParseHistogramSeries(t *testing.T) {
input: `{} {{ schema:1}}`, input: `{} {{ schema:1}}`,
expectedError: `1:7: parse error: unexpected "<Item 57372>" "schema" in series values`, expectedError: `1:7: parse error: unexpected "<Item 57372>" "schema" in series values`,
}, },
{
name: "invalid counter reset hint value",
input: `{} {{counter_reset_hint:foo}}`,
expectedError: `1:25: parse error: bad histogram descriptor found: "foo"`,
},
{
name: "'unknown' counter reset hint value",
input: `{} {{counter_reset_hint:unknown}}`,
expected: []histogram.FloatHistogram{{
CounterResetHint: histogram.UnknownCounterReset,
}},
},
{
name: "'reset' counter reset hint value",
input: `{} {{counter_reset_hint:reset}}`,
expected: []histogram.FloatHistogram{{
CounterResetHint: histogram.CounterReset,
}},
},
{
name: "'not_reset' counter reset hint value",
input: `{} {{counter_reset_hint:not_reset}}`,
expected: []histogram.FloatHistogram{{
CounterResetHint: histogram.NotCounterReset,
}},
},
{
name: "'gauge' counter reset hint value",
input: `{} {{counter_reset_hint:gauge}}`,
expected: []histogram.FloatHistogram{{
CounterResetHint: histogram.GaugeType,
}},
},
} { } {
t.Run(test.name, func(t *testing.T) { t.Run(test.name, func(t *testing.T) {
_, vals, err := ParseSeriesDesc(test.input) _, vals, err := ParseSeriesDesc(test.input)

View file

@ -55,6 +55,11 @@ const (
DefaultMaxSamplesPerQuery = 10000 DefaultMaxSamplesPerQuery = 10000
) )
type TBRun interface {
testing.TB
Run(string, func(*testing.T)) bool
}
var testStartTime = time.Unix(0, 0).UTC() var testStartTime = time.Unix(0, 0).UTC()
// LoadedStorage returns storage with generated data using the provided load statements. // LoadedStorage returns storage with generated data using the provided load statements.
@ -89,7 +94,7 @@ func NewTestEngine(enablePerStepStats bool, lookbackDelta time.Duration, maxSamp
} }
// RunBuiltinTests runs an acceptance test suite against the provided engine. // RunBuiltinTests runs an acceptance test suite against the provided engine.
func RunBuiltinTests(t *testing.T, engine promql.QueryEngine) { func RunBuiltinTests(t TBRun, engine promql.QueryEngine) {
t.Cleanup(func() { parser.EnableExperimentalFunctions = false }) t.Cleanup(func() { parser.EnableExperimentalFunctions = false })
parser.EnableExperimentalFunctions = true parser.EnableExperimentalFunctions = true

View file

@ -10,22 +10,54 @@ eval instant at 10s metric @ 100
metric{job="1"} 10 metric{job="1"} 10
metric{job="2"} 20 metric{job="2"} 20
eval instant at 10s metric @ 100s
metric{job="1"} 10
metric{job="2"} 20
eval instant at 10s metric @ 1m40s
metric{job="1"} 10
metric{job="2"} 20
eval instant at 10s metric @ 100 offset 50s eval instant at 10s metric @ 100 offset 50s
metric{job="1"} 5 metric{job="1"} 5
metric{job="2"} 10 metric{job="2"} 10
eval instant at 10s metric @ 100 offset 50
metric{job="1"} 5
metric{job="2"} 10
eval instant at 10s metric offset 50s @ 100 eval instant at 10s metric offset 50s @ 100
metric{job="1"} 5 metric{job="1"} 5
metric{job="2"} 10 metric{job="2"} 10
eval instant at 10s metric offset 50 @ 100
metric{job="1"} 5
metric{job="2"} 10
eval instant at 10s metric @ 0 offset -50s eval instant at 10s metric @ 0 offset -50s
metric{job="1"} 5 metric{job="1"} 5
metric{job="2"} 10 metric{job="2"} 10
eval instant at 10s metric @ 0 offset -50
metric{job="1"} 5
metric{job="2"} 10
eval instant at 10s metric offset -50s @ 0 eval instant at 10s metric offset -50s @ 0
metric{job="1"} 5 metric{job="1"} 5
metric{job="2"} 10 metric{job="2"} 10
eval instant at 10s metric offset -50 @ 0
metric{job="1"} 5
metric{job="2"} 10
eval instant at 10s metric @ 0 offset -50s
metric{job="1"} 5
metric{job="2"} 10
eval instant at 10s metric @ 0 offset -50
metric{job="1"} 5
metric{job="2"} 10
eval instant at 10s -metric @ 100 eval instant at 10s -metric @ 100
{job="1"} -10 {job="1"} -10
{job="2"} -20 {job="2"} -20
@ -48,6 +80,12 @@ eval instant at 25s sum_over_time(metric{job="1"}[100s] @ 100 offset 50s)
eval instant at 25s sum_over_time(metric{job="1"}[100s] offset 50s @ 100) eval instant at 25s sum_over_time(metric{job="1"}[100s] offset 50s @ 100)
{job="1"} 15 {job="1"} 15
eval instant at 25s sum_over_time(metric{job="1"}[100] @ 100 offset 50)
{job="1"} 15
eval instant at 25s sum_over_time(metric{job="1"}[100] offset 50s @ 100)
{job="1"} 15
# Different timestamps. # Different timestamps.
eval instant at 25s metric{job="1"} @ 50 + metric{job="1"} @ 100 eval instant at 25s metric{job="1"} @ 50 + metric{job="1"} @ 100
{job="1"} 15 {job="1"} 15
@ -58,6 +96,9 @@ eval instant at 25s rate(metric{job="1"}[100s] @ 100) + label_replace(rate(metri
eval instant at 25s sum_over_time(metric{job="1"}[100s] @ 100) + label_replace(sum_over_time(metric{job="2"}[100s] @ 100), "job", "1", "", "") eval instant at 25s sum_over_time(metric{job="1"}[100s] @ 100) + label_replace(sum_over_time(metric{job="2"}[100s] @ 100), "job", "1", "", "")
{job="1"} 165 {job="1"} 165
eval instant at 25s sum_over_time(metric{job="1"}[100] @ 100) + label_replace(sum_over_time(metric{job="2"}[100] @ 100), "job", "1", "", "")
{job="1"} 165
# Subqueries. # Subqueries.
# 10*(1+2+...+9) + 10. # 10*(1+2+...+9) + 10.
@ -72,6 +113,10 @@ eval instant at 25s sum_over_time(metric{job="1"}[100s:1s] @ 100 offset 20s)
eval instant at 25s sum_over_time(metric{job="1"}[100s:1s] offset 20s @ 100) eval instant at 25s sum_over_time(metric{job="1"}[100s:1s] offset 20s @ 100)
{job="1"} 288 {job="1"} 288
# 10*(1+2+...+7) + 8.
eval instant at 25s sum_over_time(metric{job="1"}[100:1] offset 20 @ 100)
{job="1"} 288
# Subquery with different timestamps. # Subquery with different timestamps.
# Since vector selector has timestamp, the result value does not depend on the timestamp of subqueries. # Since vector selector has timestamp, the result value does not depend on the timestamp of subqueries.

View file

@ -10,6 +10,11 @@ eval instant at 50m resets(http_requests[5m])
{path="/bar"} 0 {path="/bar"} 0
{path="/biz"} 0 {path="/biz"} 0
eval instant at 50m resets(http_requests[300])
{path="/foo"} 0
{path="/bar"} 0
{path="/biz"} 0
eval instant at 50m resets(http_requests[20m]) eval instant at 50m resets(http_requests[20m])
{path="/foo"} 1 {path="/foo"} 1
{path="/bar"} 0 {path="/bar"} 0
@ -239,10 +244,16 @@ eval instant at 50m deriv(testcounter_reset_middle[100m])
eval instant at 50m predict_linear(testcounter_reset_middle[50m], 3600) eval instant at 50m predict_linear(testcounter_reset_middle[50m], 3600)
{} 76.81818181818181 {} 76.81818181818181
eval instant at 50m predict_linear(testcounter_reset_middle[50m], 1h)
{} 76.81818181818181
# intercept at t = 3000+3600 = 6600 # intercept at t = 3000+3600 = 6600
eval instant at 50m predict_linear(testcounter_reset_middle[50m] @ 3000, 3600) eval instant at 50m predict_linear(testcounter_reset_middle[50m] @ 3000, 3600)
{} 76.81818181818181 {} 76.81818181818181
eval instant at 50m predict_linear(testcounter_reset_middle[50m] @ 3000, 1h)
{} 76.81818181818181
# intercept at t = 600+3600 = 4200 # intercept at t = 600+3600 = 4200
eval instant at 10m predict_linear(testcounter_reset_middle[50m] @ 3000, 3600) eval instant at 10m predict_linear(testcounter_reset_middle[50m] @ 3000, 3600)
{} 51.36363636363637 {} 51.36363636363637

View file

@ -73,22 +73,32 @@ eval instant at 50m histogram_count(testhistogram3)
{start="positive"} 110 {start="positive"} 110
{start="negative"} 20 {start="negative"} 20
# Classic way of accessing the count still works.
eval instant at 50m testhistogram3_count
testhistogram3_count{start="positive"} 110
testhistogram3_count{start="negative"} 20
# Test histogram_sum. # Test histogram_sum.
eval instant at 50m histogram_sum(testhistogram3) eval instant at 50m histogram_sum(testhistogram3)
{start="positive"} 330 {start="positive"} 330
{start="negative"} 80 {start="negative"} 80
# Test histogram_avg. # Classic way of accessing the sum still works.
eval instant at 50m testhistogram3_sum
testhistogram3_sum{start="positive"} 330
testhistogram3_sum{start="negative"} 80
# Test histogram_avg. This has no classic equivalent.
eval instant at 50m histogram_avg(testhistogram3) eval instant at 50m histogram_avg(testhistogram3)
{start="positive"} 3 {start="positive"} 3
{start="negative"} 4 {start="negative"} 4
# Test histogram_stddev. # Test histogram_stddev. This has no classic equivalent.
eval instant at 50m histogram_stddev(testhistogram3) eval instant at 50m histogram_stddev(testhistogram3)
{start="positive"} 2.8189265757336734 {start="positive"} 2.8189265757336734
{start="negative"} 4.182715937754936 {start="negative"} 4.182715937754936
# Test histogram_stdvar. # Test histogram_stdvar. This has no classic equivalent.
eval instant at 50m histogram_stdvar(testhistogram3) eval instant at 50m histogram_stdvar(testhistogram3)
{start="positive"} 7.946347039377573 {start="positive"} 7.946347039377573
{start="negative"} 17.495112615949154 {start="negative"} 17.495112615949154
@ -103,137 +113,282 @@ eval instant at 50m histogram_fraction(0, 0.2, rate(testhistogram3[5m]))
{start="positive"} 0.6363636363636364 {start="positive"} 0.6363636363636364
{start="negative"} 0 {start="negative"} 0
# Test histogram_quantile. # In the classic histogram, we can access the corresponding bucket (if
# it exists) and divide by the count to get the same result.
eval instant at 50m testhistogram3_bucket{le=".2"} / ignoring(le) testhistogram3_count
{start="positive"} 0.6363636363636364
eval instant at 50m rate(testhistogram3_bucket{le=".2"}[5m]) / ignoring(le) rate(testhistogram3_count[5m])
{start="positive"} 0.6363636363636364
# Test histogram_quantile, native and classic.
eval instant at 50m histogram_quantile(0, testhistogram3)
{start="positive"} 0
{start="negative"} -0.25
eval instant at 50m histogram_quantile(0, testhistogram3_bucket) eval instant at 50m histogram_quantile(0, testhistogram3_bucket)
{start="positive"} 0 {start="positive"} 0
{start="negative"} -0.25 {start="negative"} -0.25
eval instant at 50m histogram_quantile(0.25, testhistogram3)
{start="positive"} 0.055
{start="negative"} -0.225
eval instant at 50m histogram_quantile(0.25, testhistogram3_bucket) eval instant at 50m histogram_quantile(0.25, testhistogram3_bucket)
{start="positive"} 0.055 {start="positive"} 0.055
{start="negative"} -0.225 {start="negative"} -0.225
eval instant at 50m histogram_quantile(0.5, testhistogram3)
{start="positive"} 0.125
{start="negative"} -0.2
eval instant at 50m histogram_quantile(0.5, testhistogram3_bucket) eval instant at 50m histogram_quantile(0.5, testhistogram3_bucket)
{start="positive"} 0.125 {start="positive"} 0.125
{start="negative"} -0.2 {start="negative"} -0.2
eval instant at 50m histogram_quantile(0.75, testhistogram3)
{start="positive"} 0.45
{start="negative"} -0.15
eval instant at 50m histogram_quantile(0.75, testhistogram3_bucket) eval instant at 50m histogram_quantile(0.75, testhistogram3_bucket)
{start="positive"} 0.45 {start="positive"} 0.45
{start="negative"} -0.15 {start="negative"} -0.15
eval instant at 50m histogram_quantile(1, testhistogram3)
{start="positive"} 1
{start="negative"} -0.1
eval instant at 50m histogram_quantile(1, testhistogram3_bucket) eval instant at 50m histogram_quantile(1, testhistogram3_bucket)
{start="positive"} 1 {start="positive"} 1
{start="negative"} -0.1 {start="negative"} -0.1
# Quantile too low. # Quantile too low.
eval_warn instant at 50m histogram_quantile(-0.1, testhistogram)
{start="positive"} -Inf
{start="negative"} -Inf
eval_warn instant at 50m histogram_quantile(-0.1, testhistogram_bucket) eval_warn instant at 50m histogram_quantile(-0.1, testhistogram_bucket)
{start="positive"} -Inf {start="positive"} -Inf
{start="negative"} -Inf {start="negative"} -Inf
# Quantile too high. # Quantile too high.
eval_warn instant at 50m histogram_quantile(1.01, testhistogram)
{start="positive"} +Inf
{start="negative"} +Inf
eval_warn instant at 50m histogram_quantile(1.01, testhistogram_bucket) eval_warn instant at 50m histogram_quantile(1.01, testhistogram_bucket)
{start="positive"} +Inf {start="positive"} +Inf
{start="negative"} +Inf {start="negative"} +Inf
# Quantile invalid. # Quantile invalid.
eval_warn instant at 50m histogram_quantile(NaN, testhistogram)
{start="positive"} NaN
{start="negative"} NaN
eval_warn instant at 50m histogram_quantile(NaN, testhistogram_bucket) eval_warn instant at 50m histogram_quantile(NaN, testhistogram_bucket)
{start="positive"} NaN {start="positive"} NaN
{start="negative"} NaN {start="negative"} NaN
# Quantile value in lowest bucket. # Quantile value in lowest bucket.
eval instant at 50m histogram_quantile(0, testhistogram)
{start="positive"} 0
{start="negative"} -0.2
eval instant at 50m histogram_quantile(0, testhistogram_bucket) eval instant at 50m histogram_quantile(0, testhistogram_bucket)
{start="positive"} 0 {start="positive"} 0
{start="negative"} -0.2 {start="negative"} -0.2
# Quantile value in highest bucket. # Quantile value in highest bucket.
eval instant at 50m histogram_quantile(1, testhistogram)
{start="positive"} 1
{start="negative"} 0.3
eval instant at 50m histogram_quantile(1, testhistogram_bucket) eval instant at 50m histogram_quantile(1, testhistogram_bucket)
{start="positive"} 1 {start="positive"} 1
{start="negative"} 0.3 {start="negative"} 0.3
# Finally some useful quantiles. # Finally some useful quantiles.
eval instant at 50m histogram_quantile(0.2, testhistogram)
{start="positive"} 0.048
{start="negative"} -0.2
eval instant at 50m histogram_quantile(0.2, testhistogram_bucket) eval instant at 50m histogram_quantile(0.2, testhistogram_bucket)
{start="positive"} 0.048 {start="positive"} 0.048
{start="negative"} -0.2 {start="negative"} -0.2
eval instant at 50m histogram_quantile(0.5, testhistogram)
{start="positive"} 0.15
{start="negative"} -0.15
eval instant at 50m histogram_quantile(0.5, testhistogram_bucket) eval instant at 50m histogram_quantile(0.5, testhistogram_bucket)
{start="positive"} 0.15 {start="positive"} 0.15
{start="negative"} -0.15 {start="negative"} -0.15
eval instant at 50m histogram_quantile(0.8, testhistogram)
{start="positive"} 0.72
{start="negative"} 0.3
eval instant at 50m histogram_quantile(0.8, testhistogram_bucket) eval instant at 50m histogram_quantile(0.8, testhistogram_bucket)
{start="positive"} 0.72 {start="positive"} 0.72
{start="negative"} 0.3 {start="negative"} 0.3
# More realistic with rates. # More realistic with rates.
eval instant at 50m histogram_quantile(0.2, rate(testhistogram[5m]))
{start="positive"} 0.048
{start="negative"} -0.2
eval instant at 50m histogram_quantile(0.2, rate(testhistogram_bucket[5m])) eval instant at 50m histogram_quantile(0.2, rate(testhistogram_bucket[5m]))
{start="positive"} 0.048 {start="positive"} 0.048
{start="negative"} -0.2 {start="negative"} -0.2
eval instant at 50m histogram_quantile(0.5, rate(testhistogram[5m]))
{start="positive"} 0.15
{start="negative"} -0.15
eval instant at 50m histogram_quantile(0.5, rate(testhistogram_bucket[5m])) eval instant at 50m histogram_quantile(0.5, rate(testhistogram_bucket[5m]))
{start="positive"} 0.15 {start="positive"} 0.15
{start="negative"} -0.15 {start="negative"} -0.15
eval instant at 50m histogram_quantile(0.8, rate(testhistogram[5m]))
{start="positive"} 0.72
{start="negative"} 0.3
eval instant at 50m histogram_quantile(0.8, rate(testhistogram_bucket[5m])) eval instant at 50m histogram_quantile(0.8, rate(testhistogram_bucket[5m]))
{start="positive"} 0.72 {start="positive"} 0.72
{start="negative"} 0.3 {start="negative"} 0.3
# Want results exactly in the middle of the bucket. # Want results exactly in the middle of the bucket.
eval instant at 7m histogram_quantile(1./6., testhistogram2)
{} 1
eval instant at 7m histogram_quantile(1./6., testhistogram2_bucket) eval instant at 7m histogram_quantile(1./6., testhistogram2_bucket)
{} 1 {} 1
eval instant at 7m histogram_quantile(0.5, testhistogram2)
{} 3
eval instant at 7m histogram_quantile(0.5, testhistogram2_bucket) eval instant at 7m histogram_quantile(0.5, testhistogram2_bucket)
{} 3 {} 3
eval instant at 7m histogram_quantile(5./6., testhistogram2)
{} 5
eval instant at 7m histogram_quantile(5./6., testhistogram2_bucket) eval instant at 7m histogram_quantile(5./6., testhistogram2_bucket)
{} 5 {} 5
eval instant at 47m histogram_quantile(1./6., rate(testhistogram2[15m]))
{} 1
eval instant at 47m histogram_quantile(1./6., rate(testhistogram2_bucket[15m])) eval instant at 47m histogram_quantile(1./6., rate(testhistogram2_bucket[15m]))
{} 1 {} 1
eval instant at 47m histogram_quantile(0.5, rate(testhistogram2[15m]))
{} 3
eval instant at 47m histogram_quantile(0.5, rate(testhistogram2_bucket[15m])) eval instant at 47m histogram_quantile(0.5, rate(testhistogram2_bucket[15m]))
{} 3 {} 3
eval instant at 47m histogram_quantile(5./6., rate(testhistogram2[15m]))
{} 5
eval instant at 47m histogram_quantile(5./6., rate(testhistogram2_bucket[15m])) eval instant at 47m histogram_quantile(5./6., rate(testhistogram2_bucket[15m]))
{} 5 {} 5
# Aggregated histogram: Everything in one. # Aggregated histogram: Everything in one. Note how native histograms
# don't require aggregation by le.
eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds[5m])))
{} 0.075
eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds_bucket[5m])) by (le)) eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds_bucket[5m])) by (le))
{} 0.075 {} 0.075
eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds[5m])))
{} 0.1277777777777778
eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds_bucket[5m])) by (le)) eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds_bucket[5m])) by (le))
{} 0.1277777777777778 {} 0.1277777777777778
# Aggregated histogram: Everything in one. Now with avg, which does not change anything. # Aggregated histogram: Everything in one. Now with avg, which does not change anything.
eval instant at 50m histogram_quantile(0.3, avg(rate(request_duration_seconds[5m])))
{} 0.075
eval instant at 50m histogram_quantile(0.3, avg(rate(request_duration_seconds_bucket[5m])) by (le)) eval instant at 50m histogram_quantile(0.3, avg(rate(request_duration_seconds_bucket[5m])) by (le))
{} 0.075 {} 0.075
eval instant at 50m histogram_quantile(0.5, avg(rate(request_duration_seconds[5m])))
{} 0.12777777777777778
eval instant at 50m histogram_quantile(0.5, avg(rate(request_duration_seconds_bucket[5m])) by (le)) eval instant at 50m histogram_quantile(0.5, avg(rate(request_duration_seconds_bucket[5m])) by (le))
{} 0.12777777777777778 {} 0.12777777777777778
# Aggregated histogram: By instance. # Aggregated histogram: By instance.
eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds[5m])) by (instance))
{instance="ins1"} 0.075
{instance="ins2"} 0.075
eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds_bucket[5m])) by (le, instance)) eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds_bucket[5m])) by (le, instance))
{instance="ins1"} 0.075 {instance="ins1"} 0.075
{instance="ins2"} 0.075 {instance="ins2"} 0.075
eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds[5m])) by (instance))
{instance="ins1"} 0.1333333333
{instance="ins2"} 0.125
eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds_bucket[5m])) by (le, instance)) eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds_bucket[5m])) by (le, instance))
{instance="ins1"} 0.1333333333 {instance="ins1"} 0.1333333333
{instance="ins2"} 0.125 {instance="ins2"} 0.125
# Aggregated histogram: By job. # Aggregated histogram: By job.
eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds[5m])) by (job))
{job="job1"} 0.1
{job="job2"} 0.0642857142857143
eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds_bucket[5m])) by (le, job)) eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds_bucket[5m])) by (le, job))
{job="job1"} 0.1 {job="job1"} 0.1
{job="job2"} 0.0642857142857143 {job="job2"} 0.0642857142857143
eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds[5m])) by (job))
{job="job1"} 0.14
{job="job2"} 0.1125
eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds_bucket[5m])) by (le, job)) eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds_bucket[5m])) by (le, job))
{job="job1"} 0.14 {job="job1"} 0.14
{job="job2"} 0.1125 {job="job2"} 0.1125
# Aggregated histogram: By job and instance. # Aggregated histogram: By job and instance.
eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds[5m])) by (job, instance))
{instance="ins1", job="job1"} 0.11
{instance="ins2", job="job1"} 0.09
{instance="ins1", job="job2"} 0.06
{instance="ins2", job="job2"} 0.0675
eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds_bucket[5m])) by (le, job, instance)) eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds_bucket[5m])) by (le, job, instance))
{instance="ins1", job="job1"} 0.11 {instance="ins1", job="job1"} 0.11
{instance="ins2", job="job1"} 0.09 {instance="ins2", job="job1"} 0.09
{instance="ins1", job="job2"} 0.06 {instance="ins1", job="job2"} 0.06
{instance="ins2", job="job2"} 0.0675 {instance="ins2", job="job2"} 0.0675
eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds[5m])) by (job, instance))
{instance="ins1", job="job1"} 0.15
{instance="ins2", job="job1"} 0.1333333333333333
{instance="ins1", job="job2"} 0.1
{instance="ins2", job="job2"} 0.1166666666666667
eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds_bucket[5m])) by (le, job, instance)) eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds_bucket[5m])) by (le, job, instance))
{instance="ins1", job="job1"} 0.15 {instance="ins1", job="job1"} 0.15
{instance="ins2", job="job1"} 0.1333333333333333 {instance="ins2", job="job1"} 0.1333333333333333
@ -241,18 +396,32 @@ eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds_bu
{instance="ins2", job="job2"} 0.1166666666666667 {instance="ins2", job="job2"} 0.1166666666666667
# The unaggregated histogram for comparison. Same result as the previous one. # The unaggregated histogram for comparison. Same result as the previous one.
eval instant at 50m histogram_quantile(0.3, rate(request_duration_seconds[5m]))
{instance="ins1", job="job1"} 0.11
{instance="ins2", job="job1"} 0.09
{instance="ins1", job="job2"} 0.06
{instance="ins2", job="job2"} 0.0675
eval instant at 50m histogram_quantile(0.3, rate(request_duration_seconds_bucket[5m])) eval instant at 50m histogram_quantile(0.3, rate(request_duration_seconds_bucket[5m]))
{instance="ins1", job="job1"} 0.11 {instance="ins1", job="job1"} 0.11
{instance="ins2", job="job1"} 0.09 {instance="ins2", job="job1"} 0.09
{instance="ins1", job="job2"} 0.06 {instance="ins1", job="job2"} 0.06
{instance="ins2", job="job2"} 0.0675 {instance="ins2", job="job2"} 0.0675
eval instant at 50m histogram_quantile(0.5, rate(request_duration_seconds[5m]))
{instance="ins1", job="job1"} 0.15
{instance="ins2", job="job1"} 0.13333333333333333
{instance="ins1", job="job2"} 0.1
{instance="ins2", job="job2"} 0.11666666666666667
eval instant at 50m histogram_quantile(0.5, rate(request_duration_seconds_bucket[5m])) eval instant at 50m histogram_quantile(0.5, rate(request_duration_seconds_bucket[5m]))
{instance="ins1", job="job1"} 0.15 {instance="ins1", job="job1"} 0.15
{instance="ins2", job="job1"} 0.13333333333333333 {instance="ins2", job="job1"} 0.13333333333333333
{instance="ins1", job="job2"} 0.1 {instance="ins1", job="job2"} 0.1
{instance="ins2", job="job2"} 0.11666666666666667 {instance="ins2", job="job2"} 0.11666666666666667
# All NHCBs summed into one.
eval instant at 50m sum(request_duration_seconds) eval instant at 50m sum(request_duration_seconds)
{} {{schema:-53 count:250 custom_values:[0.1 0.2] buckets:[100 90 60]}} {} {{schema:-53 count:250 custom_values:[0.1 0.2] buckets:[100 90 60]}}
@ -303,11 +472,13 @@ load_with_nhcb 5m
eval instant at 50m histogram_quantile(0.2, rate(empty_bucket[5m])) eval instant at 50m histogram_quantile(0.2, rate(empty_bucket[5m]))
{instance="ins1", job="job1"} NaN {instance="ins1", job="job1"} NaN
# Load a duplicate histogram with a different name to test failure scenario on multiple histograms with the same label set # Load a duplicate histogram with a different name to test failure scenario on multiple histograms with the same label set.
# https://github.com/prometheus/prometheus/issues/9910 # https://github.com/prometheus/prometheus/issues/9910
load_with_nhcb 5m load_with_nhcb 5m
request_duration_seconds2_bucket{job="job1", instance="ins1", le="0.1"} 0+1x10 request_duration_seconds2_bucket{job="job1", instance="ins1", le="0.1"} 0+1x10
request_duration_seconds2_bucket{job="job1", instance="ins1", le="0.2"} 0+3x10 request_duration_seconds2_bucket{job="job1", instance="ins1", le="0.2"} 0+3x10
request_duration_seconds2_bucket{job="job1", instance="ins1", le="+Inf"} 0+4x10 request_duration_seconds2_bucket{job="job1", instance="ins1", le="+Inf"} 0+4x10
eval_fail instant at 50m histogram_quantile(0.99, {__name__=~"request_duration_seconds\\d*_bucket$"}) eval_fail instant at 50m histogram_quantile(0.99, {__name__=~"request_duration_seconds\\d*_bucket"})
eval_fail instant at 50m histogram_quantile(0.99, {__name__=~"request_duration_seconds\\d*"})

View file

@ -32,6 +32,9 @@ eval instant at 20s count_over_time(metric[1s])
eval instant at 20s count_over_time(metric[10s]) eval instant at 20s count_over_time(metric[10s])
{} 1 {} 1
eval instant at 20s count_over_time(metric[10])
{} 1
clear clear

View file

@ -76,6 +76,21 @@ eval instant at 1010s sum_over_time(metric1[30s:10s] offset 3s)
eval instant at 1010s sum_over_time((metric1)[30s:10s] offset 3s) eval instant at 1010s sum_over_time((metric1)[30s:10s] offset 3s)
{} 297 {} 297
eval instant at 1010s sum_over_time(metric1[30:10] offset 3)
{} 297
eval instant at 1010s sum_over_time((metric1)[30:10s] offset 3s)
{} 297
eval instant at 1010s sum_over_time((metric1)[30:10s] offset 3s)
{} 297
eval instant at 1010s sum_over_time((metric1)[30:10] offset 3s)
{} 297
eval instant at 1010s sum_over_time((metric1)[30:10] offset 3)
{} 297
# Nested subqueries # Nested subqueries
eval instant at 1000s rate(sum_over_time(metric1[30s:10s])[50s:10s]) eval instant at 1000s rate(sum_over_time(metric1[30s:10s])[50s:10s])
{} 0.4 {} 0.4

View file

@ -621,14 +621,12 @@ func (g *Group) Eval(ctx context.Context, ts time.Time) {
} }
} }
// If the rule has no dependencies, it can run concurrently because no other rules in this group depend on its output. if ctrl := g.concurrencyController; ctrl.Allow(ctx, g, rule) {
// Try run concurrently if there are slots available.
if ctrl := g.concurrencyController; isRuleEligibleForConcurrentExecution(rule) && ctrl.Allow() {
wg.Add(1) wg.Add(1)
go eval(i, rule, func() { go eval(i, rule, func() {
wg.Done() wg.Done()
ctrl.Done() ctrl.Done(ctx)
}) })
} else { } else {
eval(i, rule, nil) eval(i, rule, nil)
@ -1094,7 +1092,3 @@ func buildDependencyMap(rules []Rule) dependencyMap {
return dependencies return dependencies
} }
func isRuleEligibleForConcurrentExecution(rule Rule) bool {
return rule.NoDependentRules() && rule.NoDependencyRules()
}

View file

@ -457,67 +457,47 @@ func (c ruleDependencyController) AnalyseRules(rules []Rule) {
// Its purpose is to bound the amount of concurrency in rule evaluations to avoid overwhelming the Prometheus // Its purpose is to bound the amount of concurrency in rule evaluations to avoid overwhelming the Prometheus
// server with additional query load. Concurrency is controlled globally, not on a per-group basis. // server with additional query load. Concurrency is controlled globally, not on a per-group basis.
type RuleConcurrencyController interface { type RuleConcurrencyController interface {
// Allow determines whether any concurrent evaluation slots are available. // Allow determines if the given rule is allowed to be evaluated concurrently.
// If Allow() returns true, then Done() must be called to release the acquired slot. // If Allow() returns true, then Done() must be called to release the acquired slot and corresponding cleanup is done.
Allow() bool // It is important that both *Group and Rule are not retained and only be used for the duration of the call.
Allow(ctx context.Context, group *Group, rule Rule) bool
// Done releases a concurrent evaluation slot. // Done releases a concurrent evaluation slot.
Done() Done(ctx context.Context)
} }
// concurrentRuleEvalController holds a weighted semaphore which controls the concurrent evaluation of rules. // concurrentRuleEvalController holds a weighted semaphore which controls the concurrent evaluation of rules.
type concurrentRuleEvalController struct { type concurrentRuleEvalController struct {
sema *semaphore.Weighted sema *semaphore.Weighted
depMapsMu sync.Mutex
depMaps map[*Group]dependencyMap
} }
func newRuleConcurrencyController(maxConcurrency int64) RuleConcurrencyController { func newRuleConcurrencyController(maxConcurrency int64) RuleConcurrencyController {
return &concurrentRuleEvalController{ return &concurrentRuleEvalController{
sema: semaphore.NewWeighted(maxConcurrency), sema: semaphore.NewWeighted(maxConcurrency),
depMaps: map[*Group]dependencyMap{},
} }
} }
func (c *concurrentRuleEvalController) RuleEligible(g *Group, r Rule) bool { func (c *concurrentRuleEvalController) Allow(_ context.Context, _ *Group, rule Rule) bool {
c.depMapsMu.Lock() // To allow a rule to be executed concurrently, we need 3 conditions:
defer c.depMapsMu.Unlock() // 1. The rule must not have any rules that depend on it.
// 2. The rule itself must not depend on any other rules.
depMap, found := c.depMaps[g] // 3. If 1 & 2 are true, then and only then we should try to acquire the concurrency slot.
if !found { if rule.NoDependentRules() && rule.NoDependencyRules() {
depMap = buildDependencyMap(g.rules) return c.sema.TryAcquire(1)
c.depMaps[g] = depMap
} }
return depMap.isIndependent(r) return false
} }
func (c *concurrentRuleEvalController) Allow() bool { func (c *concurrentRuleEvalController) Done(_ context.Context) {
return c.sema.TryAcquire(1)
}
func (c *concurrentRuleEvalController) Done() {
c.sema.Release(1) c.sema.Release(1)
} }
func (c *concurrentRuleEvalController) Invalidate() {
c.depMapsMu.Lock()
defer c.depMapsMu.Unlock()
// Clear out the memoized dependency maps because some or all groups may have been updated.
c.depMaps = map[*Group]dependencyMap{}
}
// sequentialRuleEvalController is a RuleConcurrencyController that runs every rule sequentially. // sequentialRuleEvalController is a RuleConcurrencyController that runs every rule sequentially.
type sequentialRuleEvalController struct{} type sequentialRuleEvalController struct{}
func (c sequentialRuleEvalController) RuleEligible(_ *Group, _ Rule) bool { func (c sequentialRuleEvalController) Allow(_ context.Context, _ *Group, _ Rule) bool {
return false return false
} }
func (c sequentialRuleEvalController) Allow() bool { func (c sequentialRuleEvalController) Done(_ context.Context) {}
return false
}
func (c sequentialRuleEvalController) Done() {}
func (c sequentialRuleEvalController) Invalidate() {}

View file

@ -32,7 +32,6 @@ import (
"github.com/prometheus/common/model" "github.com/prometheus/common/model"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
"go.uber.org/atomic" "go.uber.org/atomic"
"go.uber.org/goleak"
"gopkg.in/yaml.v2" "gopkg.in/yaml.v2"
"github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/labels"
@ -50,7 +49,7 @@ import (
) )
func TestMain(m *testing.M) { func TestMain(m *testing.M) {
goleak.VerifyTestMain(m) prom_testutil.TolerantVerifyLeak(m)
} }
func TestAlertingRule(t *testing.T) { func TestAlertingRule(t *testing.T) {

View file

@ -24,7 +24,7 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- name: Checkout repository - name: Checkout repository
uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
- name: Install Go - name: Install Go
uses: actions/setup-go@cdcb36043654635271a94b9a6d1392de5bb323a7 # v5.0.1 uses: actions/setup-go@cdcb36043654635271a94b9a6d1392de5bb323a7 # v5.0.1
with: with:

View file

@ -238,11 +238,11 @@ func (errQuerier) Select(context.Context, bool, *storage.SelectHints, ...*labels
return storage.ErrSeriesSet(errSelect) return storage.ErrSeriesSet(errSelect)
} }
func (errQuerier) LabelValues(context.Context, string, ...*labels.Matcher) ([]string, annotations.Annotations, error) { func (errQuerier) LabelValues(context.Context, string, *storage.LabelHints, ...*labels.Matcher) ([]string, annotations.Annotations, error) {
return nil, nil, errors.New("label values error") return nil, nil, errors.New("label values error")
} }
func (errQuerier) LabelNames(context.Context, ...*labels.Matcher) ([]string, annotations.Annotations, error) { func (errQuerier) LabelNames(context.Context, *storage.LabelHints, ...*labels.Matcher) ([]string, annotations.Annotations, error) {
return nil, nil, errors.New("label names error") return nil, nil, errors.New("label names error")
} }

View file

@ -122,11 +122,11 @@ type MockQuerier struct {
SelectMockFunction func(sortSeries bool, hints *SelectHints, matchers ...*labels.Matcher) SeriesSet SelectMockFunction func(sortSeries bool, hints *SelectHints, matchers ...*labels.Matcher) SeriesSet
} }
func (q *MockQuerier) LabelValues(context.Context, string, ...*labels.Matcher) ([]string, annotations.Annotations, error) { func (q *MockQuerier) LabelValues(context.Context, string, *LabelHints, ...*labels.Matcher) ([]string, annotations.Annotations, error) {
return nil, nil, nil return nil, nil, nil
} }
func (q *MockQuerier) LabelNames(context.Context, ...*labels.Matcher) ([]string, annotations.Annotations, error) { func (q *MockQuerier) LabelNames(context.Context, *LabelHints, ...*labels.Matcher) ([]string, annotations.Annotations, error) {
return nil, nil, nil return nil, nil, nil
} }
@ -161,12 +161,12 @@ type LabelQuerier interface {
// It is not safe to use the strings beyond the lifetime of the querier. // It is not safe to use the strings beyond the lifetime of the querier.
// If matchers are specified the returned result set is reduced // If matchers are specified the returned result set is reduced
// to label values of metrics matching the matchers. // to label values of metrics matching the matchers.
LabelValues(ctx context.Context, name string, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) LabelValues(ctx context.Context, name string, hints *LabelHints, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error)
// LabelNames returns all the unique label names present in the block in sorted order. // LabelNames returns all the unique label names present in the block in sorted order.
// If matchers are specified the returned result set is reduced // If matchers are specified the returned result set is reduced
// to label names of metrics matching the matchers. // to label names of metrics matching the matchers.
LabelNames(ctx context.Context, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) LabelNames(ctx context.Context, hints *LabelHints, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error)
// Close releases the resources of the Querier. // Close releases the resources of the Querier.
Close() error Close() error
@ -190,6 +190,9 @@ type SelectHints struct {
Start int64 // Start time in milliseconds for this select. Start int64 // Start time in milliseconds for this select.
End int64 // End time in milliseconds for this select. End int64 // End time in milliseconds for this select.
// Maximum number of results returned. Use a value of 0 to disable.
Limit int
Step int64 // Query step size in milliseconds. Step int64 // Query step size in milliseconds.
Func string // String representation of surrounding function or aggregation. Func string // String representation of surrounding function or aggregation.
@ -217,6 +220,13 @@ type SelectHints struct {
DisableTrimming bool DisableTrimming bool
} }
// LabelHints specifies hints passed for label reads.
// This is used only as an option for implementation to use.
type LabelHints struct {
// Maximum number of results returned. Use a value of 0 to disable.
Limit int
}
// TODO(bwplotka): Move to promql/engine_test.go? // TODO(bwplotka): Move to promql/engine_test.go?
// QueryableFunc is an adapter to allow the use of ordinary functions as // QueryableFunc is an adapter to allow the use of ordinary functions as
// Queryables. It follows the idea of http.HandlerFunc. // Queryables. It follows the idea of http.HandlerFunc.

View file

@ -136,6 +136,11 @@ func (b *MemoizedSeriesIterator) AtFloatHistogram() (int64, *histogram.FloatHist
return b.it.AtFloatHistogram(nil) return b.it.AtFloatHistogram(nil)
} }
// AtT returns the timestamp of the current element of the iterator.
func (b *MemoizedSeriesIterator) AtT() int64 {
return b.it.AtT()
}
// Err returns the last encountered error. // Err returns the last encountered error.
func (b *MemoizedSeriesIterator) Err() error { func (b *MemoizedSeriesIterator) Err() error {
return b.it.Err() return b.it.Err()

View file

@ -29,13 +29,15 @@ func TestMemoizedSeriesIterator(t *testing.T) {
sampleEq := func(ets int64, ev float64, efh *histogram.FloatHistogram) { sampleEq := func(ets int64, ev float64, efh *histogram.FloatHistogram) {
if efh == nil { if efh == nil {
ts, v := it.At() ts, v := it.At()
require.Equal(t, ets, ts, "timestamp mismatch") require.Equal(t, ets, ts, "At() timestamp mismatch")
require.Equal(t, ev, v, "value mismatch") require.Equal(t, ev, v, "At() value mismatch")
} else { } else {
ts, fh := it.AtFloatHistogram() ts, fh := it.AtFloatHistogram()
require.Equal(t, ets, ts, "timestamp mismatch") require.Equal(t, ets, ts, "AtFloatHistogram() timestamp mismatch")
require.Equal(t, efh, fh, "histogram mismatch") require.Equal(t, efh, fh, "AtFloatHistogram() histogram mismatch")
} }
require.Equal(t, ets, it.AtT(), "AtT() timestamp mismatch")
} }
prevSampleEq := func(ets int64, ev float64, efh *histogram.FloatHistogram, eok bool) { prevSampleEq := func(ets int64, ev float64, efh *histogram.FloatHistogram, eok bool) {
ts, v, fh, ok := it.PeekPrev() ts, v, fh, ok := it.PeekPrev()

View file

@ -45,25 +45,24 @@ type mergeGenericQuerier struct {
// //
// In case of overlaps between the data given by primaries' and secondaries' Selects, merge function will be used. // In case of overlaps between the data given by primaries' and secondaries' Selects, merge function will be used.
func NewMergeQuerier(primaries, secondaries []Querier, mergeFn VerticalSeriesMergeFunc) Querier { func NewMergeQuerier(primaries, secondaries []Querier, mergeFn VerticalSeriesMergeFunc) Querier {
primaries = filterQueriers(primaries)
secondaries = filterQueriers(secondaries)
switch { switch {
case len(primaries)+len(secondaries) == 0: case len(primaries) == 0 && len(secondaries) == 0:
return noopQuerier{} return noopQuerier{}
case len(primaries) == 1 && len(secondaries) == 0: case len(primaries) == 1 && len(secondaries) == 0:
return primaries[0] return primaries[0]
case len(primaries) == 0 && len(secondaries) == 1: case len(primaries) == 0 && len(secondaries) == 1:
return secondaries[0] return &querierAdapter{newSecondaryQuerierFrom(secondaries[0])}
} }
queriers := make([]genericQuerier, 0, len(primaries)+len(secondaries)) queriers := make([]genericQuerier, 0, len(primaries)+len(secondaries))
for _, q := range primaries { for _, q := range primaries {
if _, ok := q.(noopQuerier); !ok && q != nil { queriers = append(queriers, newGenericQuerierFrom(q))
queriers = append(queriers, newGenericQuerierFrom(q))
}
} }
for _, q := range secondaries { for _, q := range secondaries {
if _, ok := q.(noopQuerier); !ok && q != nil { queriers = append(queriers, newSecondaryQuerierFrom(q))
queriers = append(queriers, newSecondaryQuerierFrom(q))
}
} }
concurrentSelect := false concurrentSelect := false
@ -77,31 +76,40 @@ func NewMergeQuerier(primaries, secondaries []Querier, mergeFn VerticalSeriesMer
}} }}
} }
func filterQueriers(qs []Querier) []Querier {
ret := make([]Querier, 0, len(qs))
for _, q := range qs {
if _, ok := q.(noopQuerier); !ok && q != nil {
ret = append(ret, q)
}
}
return ret
}
// NewMergeChunkQuerier returns a new Chunk Querier that merges results of given primary and secondary chunk queriers. // NewMergeChunkQuerier returns a new Chunk Querier that merges results of given primary and secondary chunk queriers.
// See NewFanout commentary to learn more about primary vs secondary differences. // See NewFanout commentary to learn more about primary vs secondary differences.
// //
// In case of overlaps between the data given by primaries' and secondaries' Selects, merge function will be used. // In case of overlaps between the data given by primaries' and secondaries' Selects, merge function will be used.
// TODO(bwplotka): Currently merge will compact overlapping chunks with bigger chunk, without limit. Split it: https://github.com/prometheus/tsdb/issues/670 // TODO(bwplotka): Currently merge will compact overlapping chunks with bigger chunk, without limit. Split it: https://github.com/prometheus/tsdb/issues/670
func NewMergeChunkQuerier(primaries, secondaries []ChunkQuerier, mergeFn VerticalChunkSeriesMergeFunc) ChunkQuerier { func NewMergeChunkQuerier(primaries, secondaries []ChunkQuerier, mergeFn VerticalChunkSeriesMergeFunc) ChunkQuerier {
primaries = filterChunkQueriers(primaries)
secondaries = filterChunkQueriers(secondaries)
switch { switch {
case len(primaries) == 0 && len(secondaries) == 0: case len(primaries) == 0 && len(secondaries) == 0:
return noopChunkQuerier{} return noopChunkQuerier{}
case len(primaries) == 1 && len(secondaries) == 0: case len(primaries) == 1 && len(secondaries) == 0:
return primaries[0] return primaries[0]
case len(primaries) == 0 && len(secondaries) == 1: case len(primaries) == 0 && len(secondaries) == 1:
return secondaries[0] return &chunkQuerierAdapter{newSecondaryQuerierFromChunk(secondaries[0])}
} }
queriers := make([]genericQuerier, 0, len(primaries)+len(secondaries)) queriers := make([]genericQuerier, 0, len(primaries)+len(secondaries))
for _, q := range primaries { for _, q := range primaries {
if _, ok := q.(noopChunkQuerier); !ok && q != nil { queriers = append(queriers, newGenericQuerierFromChunk(q))
queriers = append(queriers, newGenericQuerierFromChunk(q))
}
} }
for _, querier := range secondaries { for _, q := range secondaries {
if _, ok := querier.(noopChunkQuerier); !ok && querier != nil { queriers = append(queriers, newSecondaryQuerierFromChunk(q))
queriers = append(queriers, newSecondaryQuerierFromChunk(querier))
}
} }
concurrentSelect := false concurrentSelect := false
@ -115,6 +123,16 @@ func NewMergeChunkQuerier(primaries, secondaries []ChunkQuerier, mergeFn Vertica
}} }}
} }
func filterChunkQueriers(qs []ChunkQuerier) []ChunkQuerier {
ret := make([]ChunkQuerier, 0, len(qs))
for _, q := range qs {
if _, ok := q.(noopChunkQuerier); !ok && q != nil {
ret = append(ret, q)
}
}
return ret
}
// Select returns a set of series that matches the given label matchers. // Select returns a set of series that matches the given label matchers.
func (q *mergeGenericQuerier) Select(ctx context.Context, sortSeries bool, hints *SelectHints, matchers ...*labels.Matcher) genericSeriesSet { func (q *mergeGenericQuerier) Select(ctx context.Context, sortSeries bool, hints *SelectHints, matchers ...*labels.Matcher) genericSeriesSet {
seriesSets := make([]genericSeriesSet, 0, len(q.queriers)) seriesSets := make([]genericSeriesSet, 0, len(q.queriers))
@ -169,8 +187,8 @@ func (l labelGenericQueriers) SplitByHalf() (labelGenericQueriers, labelGenericQ
// LabelValues returns all potential values for a label name. // LabelValues returns all potential values for a label name.
// If matchers are specified the returned result set is reduced // If matchers are specified the returned result set is reduced
// to label values of metrics matching the matchers. // to label values of metrics matching the matchers.
func (q *mergeGenericQuerier) LabelValues(ctx context.Context, name string, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) { func (q *mergeGenericQuerier) LabelValues(ctx context.Context, name string, hints *LabelHints, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) {
res, ws, err := q.lvals(ctx, q.queriers, name, matchers...) res, ws, err := q.lvals(ctx, q.queriers, name, hints, matchers...)
if err != nil { if err != nil {
return nil, nil, fmt.Errorf("LabelValues() from merge generic querier for label %s: %w", name, err) return nil, nil, fmt.Errorf("LabelValues() from merge generic querier for label %s: %w", name, err)
} }
@ -178,22 +196,22 @@ func (q *mergeGenericQuerier) LabelValues(ctx context.Context, name string, matc
} }
// lvals performs merge sort for LabelValues from multiple queriers. // lvals performs merge sort for LabelValues from multiple queriers.
func (q *mergeGenericQuerier) lvals(ctx context.Context, lq labelGenericQueriers, n string, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) { func (q *mergeGenericQuerier) lvals(ctx context.Context, lq labelGenericQueriers, n string, hints *LabelHints, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) {
if lq.Len() == 0 { if lq.Len() == 0 {
return nil, nil, nil return nil, nil, nil
} }
if lq.Len() == 1 { if lq.Len() == 1 {
return lq.Get(0).LabelValues(ctx, n, matchers...) return lq.Get(0).LabelValues(ctx, n, hints, matchers...)
} }
a, b := lq.SplitByHalf() a, b := lq.SplitByHalf()
var ws annotations.Annotations var ws annotations.Annotations
s1, w, err := q.lvals(ctx, a, n, matchers...) s1, w, err := q.lvals(ctx, a, n, hints, matchers...)
ws.Merge(w) ws.Merge(w)
if err != nil { if err != nil {
return nil, ws, err return nil, ws, err
} }
s2, ws, err := q.lvals(ctx, b, n, matchers...) s2, ws, err := q.lvals(ctx, b, n, hints, matchers...)
ws.Merge(w) ws.Merge(w)
if err != nil { if err != nil {
return nil, ws, err return nil, ws, err
@ -229,13 +247,13 @@ func mergeStrings(a, b []string) []string {
} }
// LabelNames returns all the unique label names present in all queriers in sorted order. // LabelNames returns all the unique label names present in all queriers in sorted order.
func (q *mergeGenericQuerier) LabelNames(ctx context.Context, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) { func (q *mergeGenericQuerier) LabelNames(ctx context.Context, hints *LabelHints, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) {
var ( var (
labelNamesMap = make(map[string]struct{}) labelNamesMap = make(map[string]struct{})
warnings annotations.Annotations warnings annotations.Annotations
) )
for _, querier := range q.queriers { for _, querier := range q.queriers {
names, wrn, err := querier.LabelNames(ctx, matchers...) names, wrn, err := querier.LabelNames(ctx, hints, matchers...)
if wrn != nil { if wrn != nil {
// TODO(bwplotka): We could potentially wrap warnings. // TODO(bwplotka): We could potentially wrap warnings.
warnings.Merge(wrn) warnings.Merge(wrn)

View file

@ -912,9 +912,23 @@ func TestConcatenatingChunkIterator(t *testing.T) {
} }
type mockQuerier struct { type mockQuerier struct {
LabelQuerier mtx sync.Mutex
toReturn []Series toReturn []Series // Response for Select.
closed bool
labelNamesCalls int
labelNamesRequested []labelNameRequest
sortedSeriesRequested []bool
resp []string // Response for LabelNames and LabelValues; turned into Select response if toReturn is not supplied.
warnings annotations.Annotations
err error
}
type labelNameRequest struct {
name string
matchers []*labels.Matcher
} }
type seriesByLabel []Series type seriesByLabel []Series
@ -924,13 +938,47 @@ func (a seriesByLabel) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
func (a seriesByLabel) Less(i, j int) bool { return labels.Compare(a[i].Labels(), a[j].Labels()) < 0 } func (a seriesByLabel) Less(i, j int) bool { return labels.Compare(a[i].Labels(), a[j].Labels()) < 0 }
func (m *mockQuerier) Select(_ context.Context, sortSeries bool, _ *SelectHints, _ ...*labels.Matcher) SeriesSet { func (m *mockQuerier) Select(_ context.Context, sortSeries bool, _ *SelectHints, _ ...*labels.Matcher) SeriesSet {
cpy := make([]Series, len(m.toReturn)) m.mtx.Lock()
copy(cpy, m.toReturn) defer m.mtx.Unlock()
m.sortedSeriesRequested = append(m.sortedSeriesRequested, sortSeries)
var ret []Series
if len(m.toReturn) > 0 {
ret = make([]Series, len(m.toReturn))
copy(ret, m.toReturn)
} else if len(m.resp) > 0 {
ret = make([]Series, 0, len(m.resp))
for _, l := range m.resp {
ret = append(ret, NewListSeries(labels.FromStrings("test", l), nil))
}
}
if sortSeries { if sortSeries {
sort.Sort(seriesByLabel(cpy)) sort.Sort(seriesByLabel(ret))
} }
return NewMockSeriesSet(cpy...) return &mockSeriesSet{idx: -1, series: ret, warnings: m.warnings, err: m.err}
}
func (m *mockQuerier) LabelValues(_ context.Context, name string, hints *LabelHints, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) {
m.mtx.Lock()
m.labelNamesRequested = append(m.labelNamesRequested, labelNameRequest{
name: name,
matchers: matchers,
})
m.mtx.Unlock()
return m.resp, m.warnings, m.err
}
func (m *mockQuerier) LabelNames(context.Context, *LabelHints, ...*labels.Matcher) ([]string, annotations.Annotations, error) {
m.mtx.Lock()
m.labelNamesCalls++
m.mtx.Unlock()
return m.resp, m.warnings, m.err
}
func (m *mockQuerier) Close() error {
m.closed = true
return nil
} }
type mockChunkQuerier struct { type mockChunkQuerier struct {
@ -960,6 +1008,9 @@ func (m *mockChunkQuerier) Select(_ context.Context, sortSeries bool, _ *SelectH
type mockSeriesSet struct { type mockSeriesSet struct {
idx int idx int
series []Series series []Series
warnings annotations.Annotations
err error
} }
func NewMockSeriesSet(series ...Series) SeriesSet { func NewMockSeriesSet(series ...Series) SeriesSet {
@ -970,15 +1021,18 @@ func NewMockSeriesSet(series ...Series) SeriesSet {
} }
func (m *mockSeriesSet) Next() bool { func (m *mockSeriesSet) Next() bool {
if m.err != nil {
return false
}
m.idx++ m.idx++
return m.idx < len(m.series) return m.idx < len(m.series)
} }
func (m *mockSeriesSet) At() Series { return m.series[m.idx] } func (m *mockSeriesSet) At() Series { return m.series[m.idx] }
func (m *mockSeriesSet) Err() error { return nil } func (m *mockSeriesSet) Err() error { return m.err }
func (m *mockSeriesSet) Warnings() annotations.Annotations { return nil } func (m *mockSeriesSet) Warnings() annotations.Annotations { return m.warnings }
type mockChunkSeriesSet struct { type mockChunkSeriesSet struct {
idx int idx int
@ -1336,105 +1390,44 @@ func BenchmarkMergeSeriesSet(b *testing.B) {
} }
} }
type mockGenericQuerier struct { func visitMockQueriers(t *testing.T, qr Querier, f func(t *testing.T, q *mockQuerier)) int {
mtx sync.Mutex count := 0
switch x := qr.(type) {
closed bool case *mockQuerier:
labelNamesCalls int count++
labelNamesRequested []labelNameRequest f(t, x)
sortedSeriesRequested []bool case *querierAdapter:
count += visitMockQueriersInGenericQuerier(t, x.genericQuerier, f)
resp []string
warnings annotations.Annotations
err error
}
type labelNameRequest struct {
name string
matchers []*labels.Matcher
}
func (m *mockGenericQuerier) Select(_ context.Context, b bool, _ *SelectHints, _ ...*labels.Matcher) genericSeriesSet {
m.mtx.Lock()
m.sortedSeriesRequested = append(m.sortedSeriesRequested, b)
m.mtx.Unlock()
return &mockGenericSeriesSet{resp: m.resp, warnings: m.warnings, err: m.err}
}
func (m *mockGenericQuerier) LabelValues(_ context.Context, name string, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) {
m.mtx.Lock()
m.labelNamesRequested = append(m.labelNamesRequested, labelNameRequest{
name: name,
matchers: matchers,
})
m.mtx.Unlock()
return m.resp, m.warnings, m.err
}
func (m *mockGenericQuerier) LabelNames(context.Context, ...*labels.Matcher) ([]string, annotations.Annotations, error) {
m.mtx.Lock()
m.labelNamesCalls++
m.mtx.Unlock()
return m.resp, m.warnings, m.err
}
func (m *mockGenericQuerier) Close() error {
m.closed = true
return nil
}
type mockGenericSeriesSet struct {
resp []string
warnings annotations.Annotations
err error
curr int
}
func (m *mockGenericSeriesSet) Next() bool {
if m.err != nil {
return false
} }
if m.curr >= len(m.resp) { return count
return false }
func visitMockQueriersInGenericQuerier(t *testing.T, g genericQuerier, f func(t *testing.T, q *mockQuerier)) int {
count := 0
switch x := g.(type) {
case *mergeGenericQuerier:
for _, q := range x.queriers {
count += visitMockQueriersInGenericQuerier(t, q, f)
}
case *genericQuerierAdapter:
// Visitor for chunkQuerier not implemented.
count += visitMockQueriers(t, x.q, f)
case *secondaryQuerier:
count += visitMockQueriersInGenericQuerier(t, x.genericQuerier, f)
} }
m.curr++ return count
return true
} }
func (m *mockGenericSeriesSet) Err() error { return m.err } func TestMergeQuerierWithSecondaries_ErrorHandling(t *testing.T) {
func (m *mockGenericSeriesSet) Warnings() annotations.Annotations { return m.warnings }
func (m *mockGenericSeriesSet) At() Labels {
return mockLabels(m.resp[m.curr-1])
}
type mockLabels string
func (l mockLabels) Labels() labels.Labels {
return labels.FromStrings("test", string(l))
}
func unwrapMockGenericQuerier(t *testing.T, qr genericQuerier) *mockGenericQuerier {
m, ok := qr.(*mockGenericQuerier)
if !ok {
s, ok := qr.(*secondaryQuerier)
require.True(t, ok, "expected secondaryQuerier got something else")
m, ok = s.genericQuerier.(*mockGenericQuerier)
require.True(t, ok, "expected mockGenericQuerier got something else")
}
return m
}
func TestMergeGenericQuerierWithSecondaries_ErrorHandling(t *testing.T) {
var ( var (
errStorage = errors.New("storage error") errStorage = errors.New("storage error")
warnStorage = errors.New("storage warning") warnStorage = errors.New("storage warning")
ctx = context.Background() ctx = context.Background()
) )
for _, tcase := range []struct { for _, tcase := range []struct {
name string name string
queriers []genericQuerier primaries []Querier
secondaries []Querier
expectedSelectsSeries []labels.Labels expectedSelectsSeries []labels.Labels
expectedLabels []string expectedLabels []string
@ -1443,10 +1436,8 @@ func TestMergeGenericQuerierWithSecondaries_ErrorHandling(t *testing.T) {
expectedErrs [4]error expectedErrs [4]error
}{ }{
{ {
// NewMergeQuerier will not create a mergeGenericQuerier name: "one successful primary querier",
// with just one querier inside, but we can test it anyway. primaries: []Querier{&mockQuerier{resp: []string{"a", "b"}, warnings: nil, err: nil}},
name: "one successful primary querier",
queriers: []genericQuerier{&mockGenericQuerier{resp: []string{"a", "b"}, warnings: nil, err: nil}},
expectedSelectsSeries: []labels.Labels{ expectedSelectsSeries: []labels.Labels{
labels.FromStrings("test", "a"), labels.FromStrings("test", "a"),
labels.FromStrings("test", "b"), labels.FromStrings("test", "b"),
@ -1455,9 +1446,9 @@ func TestMergeGenericQuerierWithSecondaries_ErrorHandling(t *testing.T) {
}, },
{ {
name: "multiple successful primary queriers", name: "multiple successful primary queriers",
queriers: []genericQuerier{ primaries: []Querier{
&mockGenericQuerier{resp: []string{"a", "b"}, warnings: nil, err: nil}, &mockQuerier{resp: []string{"a", "b"}, warnings: nil, err: nil},
&mockGenericQuerier{resp: []string{"b", "c"}, warnings: nil, err: nil}, &mockQuerier{resp: []string{"b", "c"}, warnings: nil, err: nil},
}, },
expectedSelectsSeries: []labels.Labels{ expectedSelectsSeries: []labels.Labels{
labels.FromStrings("test", "a"), labels.FromStrings("test", "a"),
@ -1468,15 +1459,17 @@ func TestMergeGenericQuerierWithSecondaries_ErrorHandling(t *testing.T) {
}, },
{ {
name: "one failed primary querier", name: "one failed primary querier",
queriers: []genericQuerier{&mockGenericQuerier{warnings: nil, err: errStorage}}, primaries: []Querier{&mockQuerier{warnings: nil, err: errStorage}},
expectedErrs: [4]error{errStorage, errStorage, errStorage, errStorage}, expectedErrs: [4]error{errStorage, errStorage, errStorage, errStorage},
}, },
{ {
name: "one successful primary querier with successful secondaries", name: "one successful primary querier with successful secondaries",
queriers: []genericQuerier{ primaries: []Querier{
&mockGenericQuerier{resp: []string{"a", "b"}, warnings: nil, err: nil}, &mockQuerier{resp: []string{"a", "b"}, warnings: nil, err: nil},
&secondaryQuerier{genericQuerier: &mockGenericQuerier{resp: []string{"b"}, warnings: nil, err: nil}}, },
&secondaryQuerier{genericQuerier: &mockGenericQuerier{resp: []string{"c"}, warnings: nil, err: nil}}, secondaries: []Querier{
&mockQuerier{resp: []string{"b"}, warnings: nil, err: nil},
&mockQuerier{resp: []string{"c"}, warnings: nil, err: nil},
}, },
expectedSelectsSeries: []labels.Labels{ expectedSelectsSeries: []labels.Labels{
labels.FromStrings("test", "a"), labels.FromStrings("test", "a"),
@ -1487,10 +1480,12 @@ func TestMergeGenericQuerierWithSecondaries_ErrorHandling(t *testing.T) {
}, },
{ {
name: "one successful primary querier with empty response and successful secondaries", name: "one successful primary querier with empty response and successful secondaries",
queriers: []genericQuerier{ primaries: []Querier{
&mockGenericQuerier{resp: []string{}, warnings: nil, err: nil}, &mockQuerier{resp: []string{}, warnings: nil, err: nil},
&secondaryQuerier{genericQuerier: &mockGenericQuerier{resp: []string{"b"}, warnings: nil, err: nil}}, },
&secondaryQuerier{genericQuerier: &mockGenericQuerier{resp: []string{"c"}, warnings: nil, err: nil}}, secondaries: []Querier{
&mockQuerier{resp: []string{"b"}, warnings: nil, err: nil},
&mockQuerier{resp: []string{"c"}, warnings: nil, err: nil},
}, },
expectedSelectsSeries: []labels.Labels{ expectedSelectsSeries: []labels.Labels{
labels.FromStrings("test", "b"), labels.FromStrings("test", "b"),
@ -1500,19 +1495,42 @@ func TestMergeGenericQuerierWithSecondaries_ErrorHandling(t *testing.T) {
}, },
{ {
name: "one failed primary querier with successful secondaries", name: "one failed primary querier with successful secondaries",
queriers: []genericQuerier{ primaries: []Querier{
&mockGenericQuerier{warnings: nil, err: errStorage}, &mockQuerier{warnings: nil, err: errStorage},
&secondaryQuerier{genericQuerier: &mockGenericQuerier{resp: []string{"b"}, warnings: nil, err: nil}}, },
&secondaryQuerier{genericQuerier: &mockGenericQuerier{resp: []string{"c"}, warnings: nil, err: nil}}, secondaries: []Querier{
&mockQuerier{resp: []string{"b"}, warnings: nil, err: nil},
&mockQuerier{resp: []string{"c"}, warnings: nil, err: nil},
}, },
expectedErrs: [4]error{errStorage, errStorage, errStorage, errStorage}, expectedErrs: [4]error{errStorage, errStorage, errStorage, errStorage},
}, },
{
name: "nil primary querier with failed secondary",
primaries: nil,
secondaries: []Querier{
&mockQuerier{resp: []string{"b"}, warnings: nil, err: errStorage},
},
expectedLabels: []string{},
expectedWarnings: annotations.New().Add(errStorage),
},
{
name: "nil primary querier with two failed secondaries",
primaries: nil,
secondaries: []Querier{
&mockQuerier{resp: []string{"b"}, warnings: nil, err: errStorage},
&mockQuerier{resp: []string{"c"}, warnings: nil, err: errStorage},
},
expectedLabels: []string{},
expectedWarnings: annotations.New().Add(errStorage),
},
{ {
name: "one successful primary querier with failed secondaries", name: "one successful primary querier with failed secondaries",
queriers: []genericQuerier{ primaries: []Querier{
&mockGenericQuerier{resp: []string{"a"}, warnings: nil, err: nil}, &mockQuerier{resp: []string{"a"}, warnings: nil, err: nil},
&secondaryQuerier{genericQuerier: &mockGenericQuerier{resp: []string{"b"}, warnings: nil, err: errStorage}}, },
&secondaryQuerier{genericQuerier: &mockGenericQuerier{resp: []string{"c"}, warnings: nil, err: errStorage}}, secondaries: []Querier{
&mockQuerier{resp: []string{"b"}, warnings: nil, err: errStorage},
&mockQuerier{resp: []string{"c"}, warnings: nil, err: errStorage},
}, },
expectedSelectsSeries: []labels.Labels{ expectedSelectsSeries: []labels.Labels{
labels.FromStrings("test", "a"), labels.FromStrings("test", "a"),
@ -1522,9 +1540,11 @@ func TestMergeGenericQuerierWithSecondaries_ErrorHandling(t *testing.T) {
}, },
{ {
name: "successful queriers with warnings", name: "successful queriers with warnings",
queriers: []genericQuerier{ primaries: []Querier{
&mockGenericQuerier{resp: []string{"a"}, warnings: annotations.New().Add(warnStorage), err: nil}, &mockQuerier{resp: []string{"a"}, warnings: annotations.New().Add(warnStorage), err: nil},
&secondaryQuerier{genericQuerier: &mockGenericQuerier{resp: []string{"b"}, warnings: annotations.New().Add(warnStorage), err: nil}}, },
secondaries: []Querier{
&mockQuerier{resp: []string{"b"}, warnings: annotations.New().Add(warnStorage), err: nil},
}, },
expectedSelectsSeries: []labels.Labels{ expectedSelectsSeries: []labels.Labels{
labels.FromStrings("test", "a"), labels.FromStrings("test", "a"),
@ -1535,10 +1555,7 @@ func TestMergeGenericQuerierWithSecondaries_ErrorHandling(t *testing.T) {
}, },
} { } {
t.Run(tcase.name, func(t *testing.T) { t.Run(tcase.name, func(t *testing.T) {
q := &mergeGenericQuerier{ q := NewMergeQuerier(tcase.primaries, tcase.secondaries, func(s ...Series) Series { return s[0] })
queriers: tcase.queriers,
mergeFn: func(l ...Labels) Labels { return l[0] },
}
t.Run("Select", func(t *testing.T) { t.Run("Select", func(t *testing.T) {
res := q.Select(context.Background(), false, nil) res := q.Select(context.Background(), false, nil)
@ -1551,65 +1568,70 @@ func TestMergeGenericQuerierWithSecondaries_ErrorHandling(t *testing.T) {
require.ErrorIs(t, res.Err(), tcase.expectedErrs[0], "expected error doesn't match") require.ErrorIs(t, res.Err(), tcase.expectedErrs[0], "expected error doesn't match")
require.Equal(t, tcase.expectedSelectsSeries, lbls) require.Equal(t, tcase.expectedSelectsSeries, lbls)
for _, qr := range q.queriers { n := visitMockQueriers(t, q, func(t *testing.T, m *mockQuerier) {
m := unwrapMockGenericQuerier(t, qr) // Single queries should be unsorted; merged queries sorted.
// mergeGenericQuerier forces all Selects to be sorted. exp := len(tcase.primaries)+len(tcase.secondaries) > 1
require.Equal(t, []bool{true}, m.sortedSeriesRequested) require.Equal(t, []bool{exp}, m.sortedSeriesRequested)
} })
// Check we visited all queriers.
require.Equal(t, len(tcase.primaries)+len(tcase.secondaries), n)
}) })
t.Run("LabelNames", func(t *testing.T) { t.Run("LabelNames", func(t *testing.T) {
res, w, err := q.LabelNames(ctx) res, w, err := q.LabelNames(ctx, nil)
require.Subset(t, tcase.expectedWarnings, w) require.Subset(t, tcase.expectedWarnings, w)
require.ErrorIs(t, err, tcase.expectedErrs[1], "expected error doesn't match") require.ErrorIs(t, err, tcase.expectedErrs[1], "expected error doesn't match")
require.Equal(t, tcase.expectedLabels, res) requireEqualSlice(t, tcase.expectedLabels, res)
if err != nil { if err != nil {
return return
} }
for _, qr := range q.queriers { visitMockQueriers(t, q, func(t *testing.T, m *mockQuerier) {
m := unwrapMockGenericQuerier(t, qr)
require.Equal(t, 1, m.labelNamesCalls) require.Equal(t, 1, m.labelNamesCalls)
} })
}) })
t.Run("LabelValues", func(t *testing.T) { t.Run("LabelValues", func(t *testing.T) {
res, w, err := q.LabelValues(ctx, "test") res, w, err := q.LabelValues(ctx, "test", nil)
require.Subset(t, tcase.expectedWarnings, w) require.Subset(t, tcase.expectedWarnings, w)
require.ErrorIs(t, err, tcase.expectedErrs[2], "expected error doesn't match") require.ErrorIs(t, err, tcase.expectedErrs[2], "expected error doesn't match")
require.Equal(t, tcase.expectedLabels, res) requireEqualSlice(t, tcase.expectedLabels, res)
if err != nil { if err != nil {
return return
} }
for _, qr := range q.queriers { visitMockQueriers(t, q, func(t *testing.T, m *mockQuerier) {
m := unwrapMockGenericQuerier(t, qr)
require.Equal(t, []labelNameRequest{{name: "test"}}, m.labelNamesRequested) require.Equal(t, []labelNameRequest{{name: "test"}}, m.labelNamesRequested)
} })
}) })
t.Run("LabelValuesWithMatchers", func(t *testing.T) { t.Run("LabelValuesWithMatchers", func(t *testing.T) {
matcher := labels.MustNewMatcher(labels.MatchEqual, "otherLabel", "someValue") matcher := labels.MustNewMatcher(labels.MatchEqual, "otherLabel", "someValue")
res, w, err := q.LabelValues(ctx, "test2", matcher) res, w, err := q.LabelValues(ctx, "test2", nil, matcher)
require.Subset(t, tcase.expectedWarnings, w) require.Subset(t, tcase.expectedWarnings, w)
require.ErrorIs(t, err, tcase.expectedErrs[3], "expected error doesn't match") require.ErrorIs(t, err, tcase.expectedErrs[3], "expected error doesn't match")
require.Equal(t, tcase.expectedLabels, res) requireEqualSlice(t, tcase.expectedLabels, res)
if err != nil { if err != nil {
return return
} }
for _, qr := range q.queriers { visitMockQueriers(t, q, func(t *testing.T, m *mockQuerier) {
m := unwrapMockGenericQuerier(t, qr)
require.Equal(t, []labelNameRequest{ require.Equal(t, []labelNameRequest{
{name: "test"}, {name: "test"},
{name: "test2", matchers: []*labels.Matcher{matcher}}, {name: "test2", matchers: []*labels.Matcher{matcher}},
}, m.labelNamesRequested) }, m.labelNamesRequested)
} })
}) })
}) })
} }
} }
// Check slice but ignore difference between nil and empty.
func requireEqualSlice[T any](t require.TestingT, a, b []T, msgAndArgs ...interface{}) {
if len(a) == 0 {
require.Empty(t, b, msgAndArgs...)
} else {
require.Equal(t, a, b, msgAndArgs...)
}
}
type errIterator struct { type errIterator struct {
err error err error
} }

View file

@ -31,11 +31,11 @@ func (noopQuerier) Select(context.Context, bool, *SelectHints, ...*labels.Matche
return NoopSeriesSet() return NoopSeriesSet()
} }
func (noopQuerier) LabelValues(context.Context, string, ...*labels.Matcher) ([]string, annotations.Annotations, error) { func (noopQuerier) LabelValues(context.Context, string, *LabelHints, ...*labels.Matcher) ([]string, annotations.Annotations, error) {
return nil, nil, nil return nil, nil, nil
} }
func (noopQuerier) LabelNames(context.Context, ...*labels.Matcher) ([]string, annotations.Annotations, error) { func (noopQuerier) LabelNames(context.Context, *LabelHints, ...*labels.Matcher) ([]string, annotations.Annotations, error) {
return nil, nil, nil return nil, nil, nil
} }
@ -54,11 +54,11 @@ func (noopChunkQuerier) Select(context.Context, bool, *SelectHints, ...*labels.M
return NoopChunkedSeriesSet() return NoopChunkedSeriesSet()
} }
func (noopChunkQuerier) LabelValues(context.Context, string, ...*labels.Matcher) ([]string, annotations.Annotations, error) { func (noopChunkQuerier) LabelValues(context.Context, string, *LabelHints, ...*labels.Matcher) ([]string, annotations.Annotations, error) {
return nil, nil, nil return nil, nil, nil
} }
func (noopChunkQuerier) LabelNames(context.Context, ...*labels.Matcher) ([]string, annotations.Annotations, error) { func (noopChunkQuerier) LabelNames(context.Context, *LabelHints, ...*labels.Matcher) ([]string, annotations.Annotations, error) {
return nil, nil, nil return nil, nil, nil
} }

View file

@ -14,7 +14,6 @@
package remote package remote
import ( import (
"bufio"
"bytes" "bytes"
"context" "context"
"fmt" "fmt"
@ -38,6 +37,7 @@ import (
"github.com/prometheus/prometheus/config" "github.com/prometheus/prometheus/config"
"github.com/prometheus/prometheus/prompb" "github.com/prometheus/prometheus/prompb"
"github.com/prometheus/prometheus/storage/remote/azuread" "github.com/prometheus/prometheus/storage/remote/azuread"
"github.com/prometheus/prometheus/storage/remote/googleiam"
) )
const maxErrMsgLen = 1024 const maxErrMsgLen = 1024
@ -132,6 +132,7 @@ type ClientConfig struct {
HTTPClientConfig config_util.HTTPClientConfig HTTPClientConfig config_util.HTTPClientConfig
SigV4Config *sigv4.SigV4Config SigV4Config *sigv4.SigV4Config
AzureADConfig *azuread.AzureADConfig AzureADConfig *azuread.AzureADConfig
GoogleIAMConfig *googleiam.Config
Headers map[string]string Headers map[string]string
RetryOnRateLimit bool RetryOnRateLimit bool
WriteProtoMsg config.RemoteWriteProtoMsg WriteProtoMsg config.RemoteWriteProtoMsg
@ -193,6 +194,13 @@ func NewWriteClient(name string, conf *ClientConfig) (WriteClient, error) {
} }
} }
if conf.GoogleIAMConfig != nil {
t, err = googleiam.NewRoundTripper(conf.GoogleIAMConfig, t)
if err != nil {
return nil, err
}
}
writeProtoMsg := config.RemoteWriteProtoMsgV1 writeProtoMsg := config.RemoteWriteProtoMsgV1
if conf.WriteProtoMsg != "" { if conf.WriteProtoMsg != "" {
writeProtoMsg = conf.WriteProtoMsg writeProtoMsg = conf.WriteProtoMsg
@ -235,12 +243,12 @@ type RecoverableError struct {
// Store sends a batch of samples to the HTTP endpoint, the request is the proto marshalled // Store sends a batch of samples to the HTTP endpoint, the request is the proto marshalled
// and encoded bytes from codec.go. // and encoded bytes from codec.go.
func (c *Client) Store(ctx context.Context, req []byte, attempt int) error { func (c *Client) Store(ctx context.Context, req []byte, attempt int) (WriteResponseStats, error) {
httpReq, err := http.NewRequest(http.MethodPost, c.urlString, bytes.NewReader(req)) httpReq, err := http.NewRequest(http.MethodPost, c.urlString, bytes.NewReader(req))
if err != nil { if err != nil {
// Errors from NewRequest are from unparsable URLs, so are not // Errors from NewRequest are from unparsable URLs, so are not
// recoverable. // recoverable.
return err return WriteResponseStats{}, err
} }
httpReq.Header.Add("Content-Encoding", string(c.writeCompression)) httpReq.Header.Add("Content-Encoding", string(c.writeCompression))
@ -267,28 +275,34 @@ func (c *Client) Store(ctx context.Context, req []byte, attempt int) error {
if err != nil { if err != nil {
// Errors from Client.Do are from (for example) network errors, so are // Errors from Client.Do are from (for example) network errors, so are
// recoverable. // recoverable.
return RecoverableError{err, defaultBackoff} return WriteResponseStats{}, RecoverableError{err, defaultBackoff}
} }
defer func() { defer func() {
io.Copy(io.Discard, httpResp.Body) io.Copy(io.Discard, httpResp.Body)
httpResp.Body.Close() httpResp.Body.Close()
}() }()
// TODO(bwplotka): Pass logger and emit debug on error?
// Parsing error means there were some response header values we can't parse,
// we can continue handling.
rs, _ := ParseWriteResponseStats(httpResp)
//nolint:usestdlibvars //nolint:usestdlibvars
if httpResp.StatusCode/100 != 2 { if httpResp.StatusCode/100 == 2 {
scanner := bufio.NewScanner(io.LimitReader(httpResp.Body, maxErrMsgLen)) return rs, nil
line := ""
if scanner.Scan() {
line = scanner.Text()
}
err = fmt.Errorf("server returned HTTP status %s: %s", httpResp.Status, line)
} }
// Handling errors e.g. read potential error in the body.
// TODO(bwplotka): Pass logger and emit debug on error?
body, _ := io.ReadAll(io.LimitReader(httpResp.Body, maxErrMsgLen))
err = fmt.Errorf("server returned HTTP status %s: %s", httpResp.Status, body)
//nolint:usestdlibvars //nolint:usestdlibvars
if httpResp.StatusCode/100 == 5 || if httpResp.StatusCode/100 == 5 ||
(c.retryOnRateLimit && httpResp.StatusCode == http.StatusTooManyRequests) { (c.retryOnRateLimit && httpResp.StatusCode == http.StatusTooManyRequests) {
return RecoverableError{err, retryAfterDuration(httpResp.Header.Get("Retry-After"))} return rs, RecoverableError{err, retryAfterDuration(httpResp.Header.Get("Retry-After"))}
} }
return err return rs, err
} }
// retryAfterDuration returns the duration for the Retry-After header. In case of any errors, it // retryAfterDuration returns the duration for the Retry-After header. In case of any errors, it

View file

@ -73,7 +73,7 @@ func TestStoreHTTPErrorHandling(t *testing.T) {
c, err := NewWriteClient(hash, conf) c, err := NewWriteClient(hash, conf)
require.NoError(t, err) require.NoError(t, err)
err = c.Store(context.Background(), []byte{}, 0) _, err = c.Store(context.Background(), []byte{}, 0)
if test.err != nil { if test.err != nil {
require.EqualError(t, err, test.err.Error()) require.EqualError(t, err, test.err.Error())
} else { } else {
@ -133,7 +133,7 @@ func TestClientRetryAfter(t *testing.T) {
c := getClient(getClientConfig(serverURL, tc.retryOnRateLimit)) c := getClient(getClientConfig(serverURL, tc.retryOnRateLimit))
var recErr RecoverableError var recErr RecoverableError
err = c.Store(context.Background(), []byte{}, 0) _, err = c.Store(context.Background(), []byte{}, 0)
require.Equal(t, tc.expectedRecoverable, errors.As(err, &recErr), "Mismatch in expected recoverable error status.") require.Equal(t, tc.expectedRecoverable, errors.As(err, &recErr), "Mismatch in expected recoverable error status.")
if tc.expectedRecoverable { if tc.expectedRecoverable {
require.Equal(t, tc.expectedRetryAfter, recErr.retryAfter) require.Equal(t, tc.expectedRetryAfter, recErr.retryAfter)
@ -169,7 +169,7 @@ func TestRetryAfterDuration(t *testing.T) {
} }
} }
func TestClientHeaders(t *testing.T) { func TestClientCustomHeaders(t *testing.T) {
headersToSend := map[string]string{"Foo": "Bar", "Baz": "qux"} headersToSend := map[string]string{"Foo": "Bar", "Baz": "qux"}
var called bool var called bool
@ -203,7 +203,7 @@ func TestClientHeaders(t *testing.T) {
c, err := NewWriteClient("c", conf) c, err := NewWriteClient("c", conf)
require.NoError(t, err) require.NoError(t, err)
err = c.Store(context.Background(), []byte{}, 0) _, err = c.Store(context.Background(), []byte{}, 0)
require.NoError(t, err) require.NoError(t, err)
require.True(t, called, "The remote server wasn't called") require.True(t, called, "The remote server wasn't called")

View file

@ -36,48 +36,48 @@ import (
"github.com/prometheus/prometheus/util/annotations" "github.com/prometheus/prometheus/util/annotations"
) )
var testHistogram = histogram.Histogram{
Schema: 2,
ZeroThreshold: 1e-128,
ZeroCount: 0,
Count: 0,
Sum: 20,
PositiveSpans: []histogram.Span{{Offset: 0, Length: 1}},
PositiveBuckets: []int64{1},
NegativeSpans: []histogram.Span{{Offset: 0, Length: 1}},
NegativeBuckets: []int64{-1},
}
var writeRequestFixture = &prompb.WriteRequest{
Timeseries: []prompb.TimeSeries{
{
Labels: []prompb.Label{
{Name: "__name__", Value: "test_metric1"},
{Name: "b", Value: "c"},
{Name: "baz", Value: "qux"},
{Name: "d", Value: "e"},
{Name: "foo", Value: "bar"},
},
Samples: []prompb.Sample{{Value: 1, Timestamp: 0}},
Exemplars: []prompb.Exemplar{{Labels: []prompb.Label{{Name: "f", Value: "g"}}, Value: 1, Timestamp: 0}},
Histograms: []prompb.Histogram{prompb.FromIntHistogram(0, &testHistogram), prompb.FromFloatHistogram(1, testHistogram.ToFloat(nil))},
},
{
Labels: []prompb.Label{
{Name: "__name__", Value: "test_metric1"},
{Name: "b", Value: "c"},
{Name: "baz", Value: "qux"},
{Name: "d", Value: "e"},
{Name: "foo", Value: "bar"},
},
Samples: []prompb.Sample{{Value: 2, Timestamp: 1}},
Exemplars: []prompb.Exemplar{{Labels: []prompb.Label{{Name: "h", Value: "i"}}, Value: 2, Timestamp: 1}},
Histograms: []prompb.Histogram{prompb.FromIntHistogram(2, &testHistogram), prompb.FromFloatHistogram(3, testHistogram.ToFloat(nil))},
},
},
}
var ( var (
testHistogram = histogram.Histogram{
Schema: 2,
ZeroThreshold: 1e-128,
ZeroCount: 0,
Count: 0,
Sum: 20,
PositiveSpans: []histogram.Span{{Offset: 0, Length: 1}},
PositiveBuckets: []int64{1},
NegativeSpans: []histogram.Span{{Offset: 0, Length: 1}},
NegativeBuckets: []int64{-1},
}
writeRequestFixture = &prompb.WriteRequest{
Timeseries: []prompb.TimeSeries{
{
Labels: []prompb.Label{
{Name: "__name__", Value: "test_metric1"},
{Name: "b", Value: "c"},
{Name: "baz", Value: "qux"},
{Name: "d", Value: "e"},
{Name: "foo", Value: "bar"},
},
Samples: []prompb.Sample{{Value: 1, Timestamp: 1}},
Exemplars: []prompb.Exemplar{{Labels: []prompb.Label{{Name: "f", Value: "g"}}, Value: 1, Timestamp: 1}},
Histograms: []prompb.Histogram{prompb.FromIntHistogram(1, &testHistogram), prompb.FromFloatHistogram(2, testHistogram.ToFloat(nil))},
},
{
Labels: []prompb.Label{
{Name: "__name__", Value: "test_metric1"},
{Name: "b", Value: "c"},
{Name: "baz", Value: "qux"},
{Name: "d", Value: "e"},
{Name: "foo", Value: "bar"},
},
Samples: []prompb.Sample{{Value: 2, Timestamp: 2}},
Exemplars: []prompb.Exemplar{{Labels: []prompb.Label{{Name: "h", Value: "i"}}, Value: 2, Timestamp: 2}},
Histograms: []prompb.Histogram{prompb.FromIntHistogram(3, &testHistogram), prompb.FromFloatHistogram(4, testHistogram.ToFloat(nil))},
},
},
}
writeV2RequestSeries1Metadata = metadata.Metadata{ writeV2RequestSeries1Metadata = metadata.Metadata{
Type: model.MetricTypeGauge, Type: model.MetricTypeGauge,
Help: "Test gauge for test purposes", Help: "Test gauge for test purposes",
@ -88,43 +88,78 @@ var (
Help: "Test counter for test purposes", Help: "Test counter for test purposes",
} }
// writeV2RequestFixture represents the same request as writeRequestFixture, but using the v2 representation. // writeV2RequestFixture represents the same request as writeRequestFixture,
writeV2RequestFixture = func() *writev2.Request { // but using the v2 representation, plus includes writeV2RequestSeries1Metadata and writeV2RequestSeries2Metadata.
st := writev2.NewSymbolTable() // NOTE: Use TestWriteV2RequestFixture and copy the diff to regenerate if needed.
b := labels.NewScratchBuilder(0) writeV2RequestFixture = &writev2.Request{
labelRefs := st.SymbolizeLabels(writeRequestFixture.Timeseries[0].ToLabels(&b, nil), nil) Symbols: []string{"", "__name__", "test_metric1", "b", "c", "baz", "qux", "d", "e", "foo", "bar", "f", "g", "h", "i", "Test gauge for test purposes", "Maybe op/sec who knows (:", "Test counter for test purposes"},
exemplar1LabelRefs := st.SymbolizeLabels(writeRequestFixture.Timeseries[0].Exemplars[0].ToExemplar(&b, nil).Labels, nil) Timeseries: []writev2.TimeSeries{
exemplar2LabelRefs := st.SymbolizeLabels(writeRequestFixture.Timeseries[0].Exemplars[0].ToExemplar(&b, nil).Labels, nil) {
return &writev2.Request{ LabelsRefs: []uint32{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, // Symbolized writeRequestFixture.Timeseries[0].Labels
Timeseries: []writev2.TimeSeries{ Metadata: writev2.Metadata{
{ Type: writev2.Metadata_METRIC_TYPE_GAUGE, // writeV2RequestSeries1Metadata.Type.
LabelsRefs: labelRefs,
Metadata: writev2.Metadata{ HelpRef: 15, // Symbolized writeV2RequestSeries1Metadata.Help.
Type: writev2.Metadata_METRIC_TYPE_GAUGE, // Same as writeV2RequestSeries1Metadata.Type, but in writev2. UnitRef: 16, // Symbolized writeV2RequestSeries1Metadata.Unit.
HelpRef: st.Symbolize(writeV2RequestSeries1Metadata.Help),
UnitRef: st.Symbolize(writeV2RequestSeries1Metadata.Unit),
},
Samples: []writev2.Sample{{Value: 1, Timestamp: 0}},
Exemplars: []writev2.Exemplar{{LabelsRefs: exemplar1LabelRefs, Value: 1, Timestamp: 0}},
Histograms: []writev2.Histogram{writev2.FromIntHistogram(0, &testHistogram), writev2.FromFloatHistogram(1, testHistogram.ToFloat(nil))},
},
{
LabelsRefs: labelRefs,
Metadata: writev2.Metadata{
Type: writev2.Metadata_METRIC_TYPE_COUNTER, // Same as writeV2RequestSeries2Metadata.Type, but in writev2.
HelpRef: st.Symbolize(writeV2RequestSeries2Metadata.Help),
// No unit.
},
Samples: []writev2.Sample{{Value: 2, Timestamp: 1}},
Exemplars: []writev2.Exemplar{{LabelsRefs: exemplar2LabelRefs, Value: 2, Timestamp: 1}},
Histograms: []writev2.Histogram{writev2.FromIntHistogram(2, &testHistogram), writev2.FromFloatHistogram(3, testHistogram.ToFloat(nil))},
}, },
Samples: []writev2.Sample{{Value: 1, Timestamp: 1}},
Exemplars: []writev2.Exemplar{{LabelsRefs: []uint32{11, 12}, Value: 1, Timestamp: 1}},
Histograms: []writev2.Histogram{writev2.FromIntHistogram(1, &testHistogram), writev2.FromFloatHistogram(2, testHistogram.ToFloat(nil))},
}, },
Symbols: st.Symbols(), {
} LabelsRefs: []uint32{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, // Same series as first.
}() Metadata: writev2.Metadata{
Type: writev2.Metadata_METRIC_TYPE_COUNTER, // writeV2RequestSeries2Metadata.Type.
HelpRef: 17, // Symbolized writeV2RequestSeries2Metadata.Help.
// No unit.
},
Samples: []writev2.Sample{{Value: 2, Timestamp: 2}},
Exemplars: []writev2.Exemplar{{LabelsRefs: []uint32{13, 14}, Value: 2, Timestamp: 2}},
Histograms: []writev2.Histogram{writev2.FromIntHistogram(3, &testHistogram), writev2.FromFloatHistogram(4, testHistogram.ToFloat(nil))},
},
},
}
) )
func TestWriteV2RequestFixture(t *testing.T) {
// Generate dynamically writeV2RequestFixture, reusing v1 fixture elements.
st := writev2.NewSymbolTable()
b := labels.NewScratchBuilder(0)
labelRefs := st.SymbolizeLabels(writeRequestFixture.Timeseries[0].ToLabels(&b, nil), nil)
exemplar1LabelRefs := st.SymbolizeLabels(writeRequestFixture.Timeseries[0].Exemplars[0].ToExemplar(&b, nil).Labels, nil)
exemplar2LabelRefs := st.SymbolizeLabels(writeRequestFixture.Timeseries[1].Exemplars[0].ToExemplar(&b, nil).Labels, nil)
expected := &writev2.Request{
Timeseries: []writev2.TimeSeries{
{
LabelsRefs: labelRefs,
Metadata: writev2.Metadata{
Type: writev2.Metadata_METRIC_TYPE_GAUGE,
HelpRef: st.Symbolize(writeV2RequestSeries1Metadata.Help),
UnitRef: st.Symbolize(writeV2RequestSeries1Metadata.Unit),
},
Samples: []writev2.Sample{{Value: 1, Timestamp: 1}},
Exemplars: []writev2.Exemplar{{LabelsRefs: exemplar1LabelRefs, Value: 1, Timestamp: 1}},
Histograms: []writev2.Histogram{writev2.FromIntHistogram(1, &testHistogram), writev2.FromFloatHistogram(2, testHistogram.ToFloat(nil))},
},
{
LabelsRefs: labelRefs,
Metadata: writev2.Metadata{
Type: writev2.Metadata_METRIC_TYPE_COUNTER,
HelpRef: st.Symbolize(writeV2RequestSeries2Metadata.Help),
// No unit.
},
Samples: []writev2.Sample{{Value: 2, Timestamp: 2}},
Exemplars: []writev2.Exemplar{{LabelsRefs: exemplar2LabelRefs, Value: 2, Timestamp: 2}},
Histograms: []writev2.Histogram{writev2.FromIntHistogram(3, &testHistogram), writev2.FromFloatHistogram(4, testHistogram.ToFloat(nil))},
},
},
Symbols: st.Symbols(),
}
// Check if it matches static writeV2RequestFixture.
require.Equal(t, expected, writeV2RequestFixture)
}
func TestValidateLabelsAndMetricName(t *testing.T) { func TestValidateLabelsAndMetricName(t *testing.T) {
tests := []struct { tests := []struct {
input []prompb.Label input []prompb.Label

View file

@ -0,0 +1,54 @@
// Copyright 2024 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package googleiam provides an http.RoundTripper that attaches an Google Cloud accessToken
// to remote write requests.
package googleiam
import (
"context"
"fmt"
"net/http"
"golang.org/x/oauth2/google"
"google.golang.org/api/option"
apihttp "google.golang.org/api/transport/http"
)
type Config struct {
CredentialsFile string `yaml:"credentials_file,omitempty"`
}
// NewRoundTripper creates a round tripper that adds Google Cloud Monitoring authorization to calls
// using either a credentials file or the default credentials.
func NewRoundTripper(cfg *Config, next http.RoundTripper) (http.RoundTripper, error) {
if next == nil {
next = http.DefaultTransport
}
const scopes = "https://www.googleapis.com/auth/monitoring.write"
ctx := context.Background()
opts := []option.ClientOption{
option.WithScopes(scopes),
}
if cfg.CredentialsFile != "" {
opts = append(opts, option.WithCredentialsFile(cfg.CredentialsFile))
} else {
creds, err := google.FindDefaultCredentials(ctx, scopes)
if err != nil {
return nil, fmt.Errorf("error finding default Google credentials: %w", err)
}
opts = append(opts, option.WithCredentials(creds))
}
return apihttp.NewTransport(ctx, next, opts...)
}

View file

@ -39,9 +39,3 @@ func (m *maxTimestamp) Get() float64 {
defer m.mtx.Unlock() defer m.mtx.Unlock()
return m.value return m.value
} }
func (m *maxTimestamp) Collect(c chan<- prometheus.Metric) {
if m.Get() > 0 {
m.Gauge.Collect(c)
}
}

View file

@ -65,14 +65,14 @@ type bucketBoundsData struct {
bound float64 bound float64
} }
// byBucketBoundsData enables the usage of sort.Sort() with a slice of bucket bounds // byBucketBoundsData enables the usage of sort.Sort() with a slice of bucket bounds.
type byBucketBoundsData []bucketBoundsData type byBucketBoundsData []bucketBoundsData
func (m byBucketBoundsData) Len() int { return len(m) } func (m byBucketBoundsData) Len() int { return len(m) }
func (m byBucketBoundsData) Less(i, j int) bool { return m[i].bound < m[j].bound } func (m byBucketBoundsData) Less(i, j int) bool { return m[i].bound < m[j].bound }
func (m byBucketBoundsData) Swap(i, j int) { m[i], m[j] = m[j], m[i] } func (m byBucketBoundsData) Swap(i, j int) { m[i], m[j] = m[j], m[i] }
// ByLabelName enables the usage of sort.Sort() with a slice of labels // ByLabelName enables the usage of sort.Sort() with a slice of labels.
type ByLabelName []prompb.Label type ByLabelName []prompb.Label
func (a ByLabelName) Len() int { return len(a) } func (a ByLabelName) Len() int { return len(a) }
@ -115,14 +115,23 @@ var seps = []byte{'\xff'}
// createAttributes creates a slice of Prometheus Labels with OTLP attributes and pairs of string values. // createAttributes creates a slice of Prometheus Labels with OTLP attributes and pairs of string values.
// Unpaired string values are ignored. String pairs overwrite OTLP labels if collisions happen and // Unpaired string values are ignored. String pairs overwrite OTLP labels if collisions happen and
// if logOnOverwrite is true, the overwrite is logged. Resulting label names are sanitized. // if logOnOverwrite is true, the overwrite is logged. Resulting label names are sanitized.
func createAttributes(resource pcommon.Resource, attributes pcommon.Map, externalLabels map[string]string, // If settings.PromoteResourceAttributes is not empty, it's a set of resource attributes that should be promoted to labels.
func createAttributes(resource pcommon.Resource, attributes pcommon.Map, settings Settings,
ignoreAttrs []string, logOnOverwrite bool, extras ...string) []prompb.Label { ignoreAttrs []string, logOnOverwrite bool, extras ...string) []prompb.Label {
resourceAttrs := resource.Attributes() resourceAttrs := resource.Attributes()
serviceName, haveServiceName := resourceAttrs.Get(conventions.AttributeServiceName) serviceName, haveServiceName := resourceAttrs.Get(conventions.AttributeServiceName)
instance, haveInstanceID := resourceAttrs.Get(conventions.AttributeServiceInstanceID) instance, haveInstanceID := resourceAttrs.Get(conventions.AttributeServiceInstanceID)
promotedAttrs := make([]prompb.Label, 0, len(settings.PromoteResourceAttributes))
for _, name := range settings.PromoteResourceAttributes {
if value, exists := resourceAttrs.Get(name); exists {
promotedAttrs = append(promotedAttrs, prompb.Label{Name: name, Value: value.AsString()})
}
}
sort.Stable(ByLabelName(promotedAttrs))
// Calculate the maximum possible number of labels we could return so we can preallocate l // Calculate the maximum possible number of labels we could return so we can preallocate l
maxLabelCount := attributes.Len() + len(externalLabels) + len(extras)/2 maxLabelCount := attributes.Len() + len(settings.ExternalLabels) + len(promotedAttrs) + len(extras)/2
if haveServiceName { if haveServiceName {
maxLabelCount++ maxLabelCount++
@ -132,9 +141,6 @@ func createAttributes(resource pcommon.Resource, attributes pcommon.Map, externa
maxLabelCount++ maxLabelCount++
} }
// map ensures no duplicate label name
l := make(map[string]string, maxLabelCount)
// Ensure attributes are sorted by key for consistent merging of keys which // Ensure attributes are sorted by key for consistent merging of keys which
// collide when sanitized. // collide when sanitized.
labels := make([]prompb.Label, 0, maxLabelCount) labels := make([]prompb.Label, 0, maxLabelCount)
@ -148,6 +154,8 @@ func createAttributes(resource pcommon.Resource, attributes pcommon.Map, externa
}) })
sort.Stable(ByLabelName(labels)) sort.Stable(ByLabelName(labels))
// map ensures no duplicate label names.
l := make(map[string]string, maxLabelCount)
for _, label := range labels { for _, label := range labels {
var finalKey = prometheustranslator.NormalizeLabel(label.Name) var finalKey = prometheustranslator.NormalizeLabel(label.Name)
if existingValue, alreadyExists := l[finalKey]; alreadyExists { if existingValue, alreadyExists := l[finalKey]; alreadyExists {
@ -157,6 +165,13 @@ func createAttributes(resource pcommon.Resource, attributes pcommon.Map, externa
} }
} }
for _, lbl := range promotedAttrs {
normalized := prometheustranslator.NormalizeLabel(lbl.Name)
if _, exists := l[normalized]; !exists {
l[normalized] = lbl.Value
}
}
// Map service.name + service.namespace to job // Map service.name + service.namespace to job
if haveServiceName { if haveServiceName {
val := serviceName.AsString() val := serviceName.AsString()
@ -169,7 +184,7 @@ func createAttributes(resource pcommon.Resource, attributes pcommon.Map, externa
if haveInstanceID { if haveInstanceID {
l[model.InstanceLabel] = instance.AsString() l[model.InstanceLabel] = instance.AsString()
} }
for key, value := range externalLabels { for key, value := range settings.ExternalLabels {
// External labels have already been sanitized // External labels have already been sanitized
if _, alreadyExists := l[key]; alreadyExists { if _, alreadyExists := l[key]; alreadyExists {
// Skip external labels if they are overridden by metric attributes // Skip external labels if they are overridden by metric attributes
@ -232,7 +247,7 @@ func (c *PrometheusConverter) addHistogramDataPoints(dataPoints pmetric.Histogra
for x := 0; x < dataPoints.Len(); x++ { for x := 0; x < dataPoints.Len(); x++ {
pt := dataPoints.At(x) pt := dataPoints.At(x)
timestamp := convertTimeStamp(pt.Timestamp()) timestamp := convertTimeStamp(pt.Timestamp())
baseLabels := createAttributes(resource, pt.Attributes(), settings.ExternalLabels, nil, false) baseLabels := createAttributes(resource, pt.Attributes(), settings, nil, false)
// If the sum is unset, it indicates the _sum metric point should be // If the sum is unset, it indicates the _sum metric point should be
// omitted // omitted
@ -408,7 +423,7 @@ func (c *PrometheusConverter) addSummaryDataPoints(dataPoints pmetric.SummaryDat
for x := 0; x < dataPoints.Len(); x++ { for x := 0; x < dataPoints.Len(); x++ {
pt := dataPoints.At(x) pt := dataPoints.At(x)
timestamp := convertTimeStamp(pt.Timestamp()) timestamp := convertTimeStamp(pt.Timestamp())
baseLabels := createAttributes(resource, pt.Attributes(), settings.ExternalLabels, nil, false) baseLabels := createAttributes(resource, pt.Attributes(), settings, nil, false)
// treat sum as a sample in an individual TimeSeries // treat sum as a sample in an individual TimeSeries
sum := &prompb.Sample{ sum := &prompb.Sample{
@ -554,7 +569,8 @@ func addResourceTargetInfo(resource pcommon.Resource, settings Settings, timesta
name = settings.Namespace + "_" + name name = settings.Namespace + "_" + name
} }
labels := createAttributes(resource, attributes, settings.ExternalLabels, identifyingAttrs, false, model.MetricNameLabel, name) settings.PromoteResourceAttributes = nil
labels := createAttributes(resource, attributes, settings, identifyingAttrs, false, model.MetricNameLabel, name)
haveIdentifier := false haveIdentifier := false
for _, l := range labels { for _, l := range labels {
if l.Name == model.JobLabel || l.Name == model.InstanceLabel { if l.Name == model.JobLabel || l.Name == model.InstanceLabel {

View file

@ -0,0 +1,161 @@
// Copyright 2024 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package prometheusremotewrite
import (
"testing"
"github.com/stretchr/testify/assert"
"go.opentelemetry.io/collector/pdata/pcommon"
"github.com/prometheus/prometheus/prompb"
)
func TestCreateAttributes(t *testing.T) {
resourceAttrs := map[string]string{
"service.name": "service name",
"service.instance.id": "service ID",
"existent-attr": "resource value",
// This one is for testing conflict with metric attribute.
"metric-attr": "resource value",
// This one is for testing conflict with auto-generated job attribute.
"job": "resource value",
// This one is for testing conflict with auto-generated instance attribute.
"instance": "resource value",
}
resource := pcommon.NewResource()
for k, v := range resourceAttrs {
resource.Attributes().PutStr(k, v)
}
attrs := pcommon.NewMap()
attrs.PutStr("__name__", "test_metric")
attrs.PutStr("metric-attr", "metric value")
testCases := []struct {
name string
promoteResourceAttributes []string
expectedLabels []prompb.Label
}{
{
name: "Successful conversion without resource attribute promotion",
promoteResourceAttributes: nil,
expectedLabels: []prompb.Label{
{
Name: "__name__",
Value: "test_metric",
},
{
Name: "instance",
Value: "service ID",
},
{
Name: "job",
Value: "service name",
},
{
Name: "metric_attr",
Value: "metric value",
},
},
},
{
name: "Successful conversion with resource attribute promotion",
promoteResourceAttributes: []string{"non-existent-attr", "existent-attr"},
expectedLabels: []prompb.Label{
{
Name: "__name__",
Value: "test_metric",
},
{
Name: "instance",
Value: "service ID",
},
{
Name: "job",
Value: "service name",
},
{
Name: "metric_attr",
Value: "metric value",
},
{
Name: "existent_attr",
Value: "resource value",
},
},
},
{
name: "Successful conversion with resource attribute promotion, conflicting resource attributes are ignored",
promoteResourceAttributes: []string{"non-existent-attr", "existent-attr", "metric-attr", "job", "instance"},
expectedLabels: []prompb.Label{
{
Name: "__name__",
Value: "test_metric",
},
{
Name: "instance",
Value: "service ID",
},
{
Name: "job",
Value: "service name",
},
{
Name: "existent_attr",
Value: "resource value",
},
{
Name: "metric_attr",
Value: "metric value",
},
},
},
{
name: "Successful conversion with resource attribute promotion, attributes are only promoted once",
promoteResourceAttributes: []string{"existent-attr", "existent-attr"},
expectedLabels: []prompb.Label{
{
Name: "__name__",
Value: "test_metric",
},
{
Name: "instance",
Value: "service ID",
},
{
Name: "job",
Value: "service name",
},
{
Name: "existent_attr",
Value: "resource value",
},
{
Name: "metric_attr",
Value: "metric value",
},
},
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
settings := Settings{
PromoteResourceAttributes: tc.promoteResourceAttributes,
}
lbls := createAttributes(resource, attrs, settings, nil, false)
assert.ElementsMatch(t, lbls, tc.expectedLabels)
})
}
}

View file

@ -45,7 +45,7 @@ func (c *PrometheusConverter) addExponentialHistogramDataPoints(dataPoints pmetr
lbls := createAttributes( lbls := createAttributes(
resource, resource,
pt.Attributes(), pt.Attributes(),
settings.ExternalLabels, settings,
nil, nil,
true, true,
model.MetricNameLabel, model.MetricNameLabel,

View file

@ -30,12 +30,13 @@ import (
) )
type Settings struct { type Settings struct {
Namespace string Namespace string
ExternalLabels map[string]string ExternalLabels map[string]string
DisableTargetInfo bool DisableTargetInfo bool
ExportCreatedMetric bool ExportCreatedMetric bool
AddMetricSuffixes bool AddMetricSuffixes bool
SendMetadata bool SendMetadata bool
PromoteResourceAttributes []string
} }
// PrometheusConverter converts from OTel write format to Prometheus remote write format. // PrometheusConverter converts from OTel write format to Prometheus remote write format.

View file

@ -34,7 +34,7 @@ func (c *PrometheusConverter) addGaugeNumberDataPoints(dataPoints pmetric.Number
labels := createAttributes( labels := createAttributes(
resource, resource,
pt.Attributes(), pt.Attributes(),
settings.ExternalLabels, settings,
nil, nil,
true, true,
model.MetricNameLabel, model.MetricNameLabel,
@ -64,7 +64,7 @@ func (c *PrometheusConverter) addSumNumberDataPoints(dataPoints pmetric.NumberDa
lbls := createAttributes( lbls := createAttributes(
resource, resource,
pt.Attributes(), pt.Attributes(),
settings.ExternalLabels, settings,
nil, nil,
true, true,
model.MetricNameLabel, model.MetricNameLabel,

View file

@ -232,7 +232,7 @@ func newQueueManagerMetrics(r prometheus.Registerer, rn, e string) *queueManager
Namespace: namespace, Namespace: namespace,
Subsystem: subsystem, Subsystem: subsystem,
Name: "queue_highest_sent_timestamp_seconds", Name: "queue_highest_sent_timestamp_seconds",
Help: "Timestamp from a WAL sample, the highest timestamp successfully sent by this queue, in seconds since epoch.", Help: "Timestamp from a WAL sample, the highest timestamp successfully sent by this queue, in seconds since epoch. Initialized to 0 when no data has been sent yet.",
ConstLabels: constLabels, ConstLabels: constLabels,
}), }),
} }
@ -391,7 +391,7 @@ func (m *queueManagerMetrics) unregister() {
// external timeseries database. // external timeseries database.
type WriteClient interface { type WriteClient interface {
// Store stores the given samples in the remote storage. // Store stores the given samples in the remote storage.
Store(ctx context.Context, req []byte, retryAttempt int) error Store(ctx context.Context, req []byte, retryAttempt int) (WriteResponseStats, error)
// Name uniquely identifies the remote storage. // Name uniquely identifies the remote storage.
Name() string Name() string
// Endpoint is the remote read or write endpoint for the storage client. // Endpoint is the remote read or write endpoint for the storage client.
@ -597,14 +597,15 @@ func (t *QueueManager) sendMetadataWithBackoff(ctx context.Context, metadata []p
} }
begin := time.Now() begin := time.Now()
err := t.storeClient.Store(ctx, req, try) // Ignoring WriteResponseStats, because there is nothing for metadata, since it's
// embedded in v2 calls now, and we do v1 here.
_, err := t.storeClient.Store(ctx, req, try)
t.metrics.sentBatchDuration.Observe(time.Since(begin).Seconds()) t.metrics.sentBatchDuration.Observe(time.Since(begin).Seconds())
if err != nil { if err != nil {
span.RecordError(err) span.RecordError(err)
return err return err
} }
return nil return nil
} }
@ -1108,9 +1109,9 @@ func (t *QueueManager) shouldReshard(desiredShards int) bool {
if desiredShards == t.numShards { if desiredShards == t.numShards {
return false return false
} }
// We shouldn't reshard if Prometheus hasn't been able to send to the // We shouldn't reshard if Prometheus hasn't been able to send
// remote endpoint successfully within some period of time. // since the last time it checked if it should reshard.
minSendTimestamp := time.Now().Add(-2 * time.Duration(t.cfg.BatchSendDeadline)).Unix() minSendTimestamp := time.Now().Add(-1 * shardUpdateDuration).Unix()
lsts := t.lastSendTimestamp.Load() lsts := t.lastSendTimestamp.Load()
if lsts < minSendTimestamp { if lsts < minSendTimestamp {
level.Warn(t.logger).Log("msg", "Skipping resharding, last successful send was beyond threshold", "lastSendTimestamp", lsts, "minSendTimestamp", minSendTimestamp) level.Warn(t.logger).Log("msg", "Skipping resharding, last successful send was beyond threshold", "lastSendTimestamp", lsts, "minSendTimestamp", minSendTimestamp)
@ -1468,6 +1469,8 @@ func (q *queue) FlushAndShutdown(done <-chan struct{}) {
for q.tryEnqueueingBatch(done) { for q.tryEnqueueingBatch(done) {
time.Sleep(time.Second) time.Sleep(time.Second)
} }
q.batchMtx.Lock()
defer q.batchMtx.Unlock()
q.batch = nil q.batch = nil
close(q.batchQueue) close(q.batchQueue)
} }
@ -1659,8 +1662,8 @@ func populateTimeSeries(batch []timeSeries, pendingData []prompb.TimeSeries, sen
func (s *shards) sendSamples(ctx context.Context, samples []prompb.TimeSeries, sampleCount, exemplarCount, histogramCount int, pBuf *proto.Buffer, buf *[]byte, enc Compression) error { func (s *shards) sendSamples(ctx context.Context, samples []prompb.TimeSeries, sampleCount, exemplarCount, histogramCount int, pBuf *proto.Buffer, buf *[]byte, enc Compression) error {
begin := time.Now() begin := time.Now()
err := s.sendSamplesWithBackoff(ctx, samples, sampleCount, exemplarCount, histogramCount, 0, pBuf, buf, enc) rs, err := s.sendSamplesWithBackoff(ctx, samples, sampleCount, exemplarCount, histogramCount, 0, pBuf, buf, enc)
s.updateMetrics(ctx, err, sampleCount, exemplarCount, histogramCount, 0, time.Since(begin)) s.updateMetrics(ctx, err, sampleCount, exemplarCount, histogramCount, 0, rs, time.Since(begin))
return err return err
} }
@ -1668,17 +1671,29 @@ func (s *shards) sendSamples(ctx context.Context, samples []prompb.TimeSeries, s
// See https://github.com/prometheus/prometheus/issues/14409 // See https://github.com/prometheus/prometheus/issues/14409
func (s *shards) sendV2Samples(ctx context.Context, samples []writev2.TimeSeries, labels []string, sampleCount, exemplarCount, histogramCount, metadataCount int, pBuf, buf *[]byte, enc Compression) error { func (s *shards) sendV2Samples(ctx context.Context, samples []writev2.TimeSeries, labels []string, sampleCount, exemplarCount, histogramCount, metadataCount int, pBuf, buf *[]byte, enc Compression) error {
begin := time.Now() begin := time.Now()
err := s.sendV2SamplesWithBackoff(ctx, samples, labels, sampleCount, exemplarCount, histogramCount, metadataCount, pBuf, buf, enc) rs, err := s.sendV2SamplesWithBackoff(ctx, samples, labels, sampleCount, exemplarCount, histogramCount, metadataCount, pBuf, buf, enc)
s.updateMetrics(ctx, err, sampleCount, exemplarCount, histogramCount, metadataCount, time.Since(begin)) s.updateMetrics(ctx, err, sampleCount, exemplarCount, histogramCount, metadataCount, rs, time.Since(begin))
return err return err
} }
func (s *shards) updateMetrics(_ context.Context, err error, sampleCount, exemplarCount, histogramCount, metadataCount int, duration time.Duration) { func (s *shards) updateMetrics(_ context.Context, err error, sampleCount, exemplarCount, histogramCount, metadataCount int, rs WriteResponseStats, duration time.Duration) {
// Partial errors may happen -- account for that.
sampleDiff := sampleCount - rs.Samples
if sampleDiff > 0 {
s.qm.metrics.failedSamplesTotal.Add(float64(sampleDiff))
}
histogramDiff := histogramCount - rs.Histograms
if histogramDiff > 0 {
s.qm.metrics.failedHistogramsTotal.Add(float64(histogramDiff))
}
exemplarDiff := exemplarCount - rs.Exemplars
if exemplarDiff > 0 {
s.qm.metrics.failedExemplarsTotal.Add(float64(exemplarDiff))
}
if err != nil { if err != nil {
level.Error(s.qm.logger).Log("msg", "non-recoverable error", "count", sampleCount, "exemplarCount", exemplarCount, "histogramCount", histogramCount, "err", err) level.Error(s.qm.logger).Log("msg", "non-recoverable error", "failedSampleCount", sampleDiff, "failedHistogramCount", histogramDiff, "failedExemplarCount", exemplarDiff, "err", err)
s.qm.metrics.failedSamplesTotal.Add(float64(sampleCount)) } else if sampleDiff+exemplarDiff+histogramDiff > 0 {
s.qm.metrics.failedExemplarsTotal.Add(float64(exemplarCount)) level.Error(s.qm.logger).Log("msg", "we got 2xx status code from the Receiver yet statistics indicate some dat was not written; investigation needed", "failedSampleCount", sampleDiff, "failedHistogramCount", histogramDiff, "failedExemplarCount", exemplarDiff)
s.qm.metrics.failedHistogramsTotal.Add(float64(histogramCount))
} }
// These counters are used to calculate the dynamic sharding, and as such // These counters are used to calculate the dynamic sharding, and as such
@ -1686,6 +1701,7 @@ func (s *shards) updateMetrics(_ context.Context, err error, sampleCount, exempl
s.qm.dataOut.incr(int64(sampleCount + exemplarCount + histogramCount + metadataCount)) s.qm.dataOut.incr(int64(sampleCount + exemplarCount + histogramCount + metadataCount))
s.qm.dataOutDuration.incr(int64(duration)) s.qm.dataOutDuration.incr(int64(duration))
s.qm.lastSendTimestamp.Store(time.Now().Unix()) s.qm.lastSendTimestamp.Store(time.Now().Unix())
// Pending samples/exemplars/histograms also should be subtracted, as an error means // Pending samples/exemplars/histograms also should be subtracted, as an error means
// they will not be retried. // they will not be retried.
s.qm.metrics.pendingSamples.Sub(float64(sampleCount)) s.qm.metrics.pendingSamples.Sub(float64(sampleCount))
@ -1697,19 +1713,29 @@ func (s *shards) updateMetrics(_ context.Context, err error, sampleCount, exempl
} }
// sendSamples to the remote storage with backoff for recoverable errors. // sendSamples to the remote storage with backoff for recoverable errors.
func (s *shards) sendSamplesWithBackoff(ctx context.Context, samples []prompb.TimeSeries, sampleCount, exemplarCount, histogramCount, metadataCount int, pBuf *proto.Buffer, buf *[]byte, enc Compression) error { func (s *shards) sendSamplesWithBackoff(ctx context.Context, samples []prompb.TimeSeries, sampleCount, exemplarCount, histogramCount, metadataCount int, pBuf *proto.Buffer, buf *[]byte, enc Compression) (WriteResponseStats, error) {
// Build the WriteRequest with no metadata. // Build the WriteRequest with no metadata.
req, highest, lowest, err := buildWriteRequest(s.qm.logger, samples, nil, pBuf, buf, nil, enc) req, highest, lowest, err := buildWriteRequest(s.qm.logger, samples, nil, pBuf, buf, nil, enc)
s.qm.buildRequestLimitTimestamp.Store(lowest) s.qm.buildRequestLimitTimestamp.Store(lowest)
if err != nil { if err != nil {
// Failing to build the write request is non-recoverable, since it will // Failing to build the write request is non-recoverable, since it will
// only error if marshaling the proto to bytes fails. // only error if marshaling the proto to bytes fails.
return err return WriteResponseStats{}, err
} }
reqSize := len(req) reqSize := len(req)
*buf = req *buf = req
// Since we retry writes via attemptStore and sendWriteRequestWithBackoff we need
// to track the total amount of accepted data across the various attempts.
accumulatedStats := WriteResponseStats{}
var accumulatedStatsMu sync.Mutex
addStats := func(rs WriteResponseStats) {
accumulatedStatsMu.Lock()
accumulatedStats = accumulatedStats.Add(rs)
accumulatedStatsMu.Unlock()
}
// An anonymous function allows us to defer the completion of our per-try spans // An anonymous function allows us to defer the completion of our per-try spans
// without causing a memory leak, and it has the nice effect of not propagating any // without causing a memory leak, and it has the nice effect of not propagating any
// parameters for sendSamplesWithBackoff/3. // parameters for sendSamplesWithBackoff/3.
@ -1757,15 +1783,19 @@ func (s *shards) sendSamplesWithBackoff(ctx context.Context, samples []prompb.Ti
s.qm.metrics.exemplarsTotal.Add(float64(exemplarCount)) s.qm.metrics.exemplarsTotal.Add(float64(exemplarCount))
s.qm.metrics.histogramsTotal.Add(float64(histogramCount)) s.qm.metrics.histogramsTotal.Add(float64(histogramCount))
s.qm.metrics.metadataTotal.Add(float64(metadataCount)) s.qm.metrics.metadataTotal.Add(float64(metadataCount))
err := s.qm.client().Store(ctx, *buf, try) // Technically for v1, we will likely have empty response stats, but for
// newer Receivers this might be not, so used it in a best effort.
rs, err := s.qm.client().Store(ctx, *buf, try)
s.qm.metrics.sentBatchDuration.Observe(time.Since(begin).Seconds()) s.qm.metrics.sentBatchDuration.Observe(time.Since(begin).Seconds())
// TODO(bwplotka): Revisit this once we have Receivers doing retriable partial error
// so far we don't have those, so it's ok to potentially skew statistics.
addStats(rs)
if err != nil { if err == nil {
span.RecordError(err) return nil
return err
} }
span.RecordError(err)
return nil return err
} }
onRetry := func() { onRetry := func() {
@ -1778,29 +1808,48 @@ func (s *shards) sendSamplesWithBackoff(ctx context.Context, samples []prompb.Ti
if errors.Is(err, context.Canceled) { if errors.Is(err, context.Canceled) {
// When there is resharding, we cancel the context for this queue, which means the data is not sent. // When there is resharding, we cancel the context for this queue, which means the data is not sent.
// So we exit early to not update the metrics. // So we exit early to not update the metrics.
return err return accumulatedStats, err
} }
s.qm.metrics.sentBytesTotal.Add(float64(reqSize)) s.qm.metrics.sentBytesTotal.Add(float64(reqSize))
s.qm.metrics.highestSentTimestamp.Set(float64(highest / 1000)) s.qm.metrics.highestSentTimestamp.Set(float64(highest / 1000))
return err if err == nil && !accumulatedStats.Confirmed {
// No 2.0 response headers, and we sent v1 message, so likely it's 1.0 Receiver.
// Assume success, don't rely on headers.
return WriteResponseStats{
Samples: sampleCount,
Histograms: histogramCount,
Exemplars: exemplarCount,
}, nil
}
return accumulatedStats, err
} }
// sendV2Samples to the remote storage with backoff for recoverable errors. // sendV2Samples to the remote storage with backoff for recoverable errors.
func (s *shards) sendV2SamplesWithBackoff(ctx context.Context, samples []writev2.TimeSeries, labels []string, sampleCount, exemplarCount, histogramCount, metadataCount int, pBuf, buf *[]byte, enc Compression) error { func (s *shards) sendV2SamplesWithBackoff(ctx context.Context, samples []writev2.TimeSeries, labels []string, sampleCount, exemplarCount, histogramCount, metadataCount int, pBuf, buf *[]byte, enc Compression) (WriteResponseStats, error) {
// Build the WriteRequest with no metadata. // Build the WriteRequest with no metadata.
req, highest, lowest, err := buildV2WriteRequest(s.qm.logger, samples, labels, pBuf, buf, nil, enc) req, highest, lowest, err := buildV2WriteRequest(s.qm.logger, samples, labels, pBuf, buf, nil, enc)
s.qm.buildRequestLimitTimestamp.Store(lowest) s.qm.buildRequestLimitTimestamp.Store(lowest)
if err != nil { if err != nil {
// Failing to build the write request is non-recoverable, since it will // Failing to build the write request is non-recoverable, since it will
// only error if marshaling the proto to bytes fails. // only error if marshaling the proto to bytes fails.
return err return WriteResponseStats{}, err
} }
reqSize := len(req) reqSize := len(req)
*buf = req *buf = req
// Since we retry writes via attemptStore and sendWriteRequestWithBackoff we need
// to track the total amount of accepted data across the various attempts.
accumulatedStats := WriteResponseStats{}
var accumulatedStatsMu sync.Mutex
addStats := func(rs WriteResponseStats) {
accumulatedStatsMu.Lock()
accumulatedStats = accumulatedStats.Add(rs)
accumulatedStatsMu.Unlock()
}
// An anonymous function allows us to defer the completion of our per-try spans // An anonymous function allows us to defer the completion of our per-try spans
// without causing a memory leak, and it has the nice effect of not propagating any // without causing a memory leak, and it has the nice effect of not propagating any
// parameters for sendSamplesWithBackoff/3. // parameters for sendSamplesWithBackoff/3.
@ -1848,15 +1897,28 @@ func (s *shards) sendV2SamplesWithBackoff(ctx context.Context, samples []writev2
s.qm.metrics.exemplarsTotal.Add(float64(exemplarCount)) s.qm.metrics.exemplarsTotal.Add(float64(exemplarCount))
s.qm.metrics.histogramsTotal.Add(float64(histogramCount)) s.qm.metrics.histogramsTotal.Add(float64(histogramCount))
s.qm.metrics.metadataTotal.Add(float64(metadataCount)) s.qm.metrics.metadataTotal.Add(float64(metadataCount))
err := s.qm.client().Store(ctx, *buf, try) rs, err := s.qm.client().Store(ctx, *buf, try)
s.qm.metrics.sentBatchDuration.Observe(time.Since(begin).Seconds()) s.qm.metrics.sentBatchDuration.Observe(time.Since(begin).Seconds())
// TODO(bwplotka): Revisit this once we have Receivers doing retriable partial error
// so far we don't have those, so it's ok to potentially skew statistics.
addStats(rs)
if err != nil { if err == nil {
span.RecordError(err) // Check the case mentioned in PRW 2.0
return err // https://prometheus.io/docs/specs/remote_write_spec_2_0/#required-written-response-headers.
if sampleCount+histogramCount+exemplarCount > 0 && rs.NoDataWritten() {
err = fmt.Errorf("sent v2 request with %v samples, %v histograms and %v exemplars; got 2xx, but PRW 2.0 response header statistics indicate %v samples, %v histograms and %v exemplars were accepted;"+
" assumining failure e.g. the target only supports PRW 1.0 prometheus.WriteRequest, but does not check the Content-Type header correctly",
sampleCount, histogramCount, exemplarCount,
rs.Samples, rs.Histograms, rs.Exemplars,
)
span.RecordError(err)
return err
}
return nil
} }
span.RecordError(err)
return nil return err
} }
onRetry := func() { onRetry := func() {
@ -1869,13 +1931,12 @@ func (s *shards) sendV2SamplesWithBackoff(ctx context.Context, samples []writev2
if errors.Is(err, context.Canceled) { if errors.Is(err, context.Canceled) {
// When there is resharding, we cancel the context for this queue, which means the data is not sent. // When there is resharding, we cancel the context for this queue, which means the data is not sent.
// So we exit early to not update the metrics. // So we exit early to not update the metrics.
return err return accumulatedStats, err
} }
s.qm.metrics.sentBytesTotal.Add(float64(reqSize)) s.qm.metrics.sentBytesTotal.Add(float64(reqSize))
s.qm.metrics.highestSentTimestamp.Set(float64(highest / 1000)) s.qm.metrics.highestSentTimestamp.Set(float64(highest / 1000))
return accumulatedStats, err
return err
} }
func populateV2TimeSeries(symbolTable *writev2.SymbolsTable, batch []timeSeries, pendingData []writev2.TimeSeries, sendExemplars, sendNativeHistograms bool) (int, int, int, int) { func populateV2TimeSeries(symbolTable *writev2.SymbolsTable, batch []timeSeries, pendingData []writev2.TimeSeries, sendExemplars, sendNativeHistograms bool) (int, int, int, int) {

View file

@ -60,7 +60,7 @@ func newHighestTimestampMetric() *maxTimestamp {
Namespace: namespace, Namespace: namespace,
Subsystem: subsystem, Subsystem: subsystem,
Name: "highest_timestamp_in_seconds", Name: "highest_timestamp_in_seconds",
Help: "Highest timestamp that has come into the remote storage via the Appender interface, in seconds since epoch.", Help: "Highest timestamp that has come into the remote storage via the Appender interface, in seconds since epoch. Initialized to 0 when no data has been received yet",
}), }),
} }
} }
@ -118,10 +118,10 @@ func TestBasicContentNegotiation(t *testing.T) {
expectFail: true, expectFail: true,
}, },
{ {
name: "v2 talks to v1 that tries to unmarshal v2 payload with v1 proto", name: "v2 talks to (broken) v1 that tries to unmarshal v2 payload with v1 proto",
senderProtoMsg: config.RemoteWriteProtoMsgV2, receiverProtoMsg: config.RemoteWriteProtoMsgV1, senderProtoMsg: config.RemoteWriteProtoMsgV2, receiverProtoMsg: config.RemoteWriteProtoMsgV1,
injectErrs: []error{nil}, injectErrs: []error{nil},
expectFail: true, // invalid request, no timeseries expectFail: true, // We detect this thanks to https://github.com/prometheus/prometheus/issues/14359
}, },
// Opposite, v1 talking to v2 only server. // Opposite, v1 talking to v2 only server.
{ {
@ -130,12 +130,6 @@ func TestBasicContentNegotiation(t *testing.T) {
injectErrs: []error{errors.New("pretend unrecoverable err")}, injectErrs: []error{errors.New("pretend unrecoverable err")},
expectFail: true, expectFail: true,
}, },
{
name: "v1 talks to (broken) v2 that tries to unmarshal v1 payload with v2 proto",
senderProtoMsg: config.RemoteWriteProtoMsgV1, receiverProtoMsg: config.RemoteWriteProtoMsgV2,
injectErrs: []error{nil},
expectFail: true, // invalid request, no timeseries
},
} { } {
t.Run(tc.name, func(t *testing.T) { t.Run(tc.name, func(t *testing.T) {
dir := t.TempDir() dir := t.TempDir()
@ -182,7 +176,6 @@ func TestBasicContentNegotiation(t *testing.T) {
if !tc.expectFail { if !tc.expectFail {
// No error expected, so wait for data. // No error expected, so wait for data.
c.waitForExpectedData(t, 5*time.Second) c.waitForExpectedData(t, 5*time.Second)
require.Equal(t, 1, c.writesReceived)
require.Equal(t, 0.0, client_testutil.ToFloat64(qm.metrics.failedSamplesTotal)) require.Equal(t, 0.0, client_testutil.ToFloat64(qm.metrics.failedSamplesTotal))
} else { } else {
// Wait for failure to be recorded in metrics. // Wait for failure to be recorded in metrics.
@ -190,11 +183,10 @@ func TestBasicContentNegotiation(t *testing.T) {
defer cancel() defer cancel()
require.NoError(t, runutil.Retry(500*time.Millisecond, ctx.Done(), func() error { require.NoError(t, runutil.Retry(500*time.Millisecond, ctx.Done(), func() error {
if client_testutil.ToFloat64(qm.metrics.failedSamplesTotal) != 1.0 { if client_testutil.ToFloat64(qm.metrics.failedSamplesTotal) != 1.0 {
return errors.New("expected one sample failed in qm metrics") return fmt.Errorf("expected one sample failed in qm metrics; got %v", client_testutil.ToFloat64(qm.metrics.failedSamplesTotal))
} }
return nil return nil
})) }))
require.Equal(t, 0, c.writesReceived)
} }
// samplesTotal means attempts. // samplesTotal means attempts.
@ -711,32 +703,35 @@ func TestShouldReshard(t *testing.T) {
startingShards int startingShards int
samplesIn, samplesOut, lastSendTimestamp int64 samplesIn, samplesOut, lastSendTimestamp int64
expectedToReshard bool expectedToReshard bool
sendDeadline model.Duration
} }
cases := []testcase{ cases := []testcase{
{ {
// Resharding shouldn't take place if the last successful send was > batch send deadline*2 seconds ago. // resharding shouldn't take place if we haven't successfully sent
// since the last shardUpdateDuration, even if the send deadline is very low
startingShards: 10, startingShards: 10,
samplesIn: 1000, samplesIn: 1000,
samplesOut: 10, samplesOut: 10,
lastSendTimestamp: time.Now().Unix() - int64(3*time.Duration(config.DefaultQueueConfig.BatchSendDeadline)/time.Second), lastSendTimestamp: time.Now().Unix() - int64(shardUpdateDuration),
expectedToReshard: false, expectedToReshard: false,
sendDeadline: model.Duration(100 * time.Millisecond),
}, },
{ {
startingShards: 5, startingShards: 10,
samplesIn: 1000, samplesIn: 1000,
samplesOut: 10, samplesOut: 10,
lastSendTimestamp: time.Now().Unix(), lastSendTimestamp: time.Now().Unix(),
expectedToReshard: true, expectedToReshard: true,
sendDeadline: config.DefaultQueueConfig.BatchSendDeadline,
}, },
} }
for _, c := range cases { for _, c := range cases {
_, m := newTestClientAndQueueManager(t, defaultFlushDeadline, config.RemoteWriteProtoMsgV1) _, m := newTestClientAndQueueManager(t, time.Duration(c.sendDeadline), config.RemoteWriteProtoMsgV1)
m.numShards = c.startingShards m.numShards = c.startingShards
m.dataIn.incr(c.samplesIn) m.dataIn.incr(c.samplesIn)
m.dataOut.incr(c.samplesOut) m.dataOut.incr(c.samplesOut)
m.lastSendTimestamp.Store(c.lastSendTimestamp) m.lastSendTimestamp.Store(c.lastSendTimestamp)
m.Start() m.Start()
desiredShards := m.calculateDesiredShards() desiredShards := m.calculateDesiredShards()
@ -764,10 +759,10 @@ func TestDisableReshardOnRetry(t *testing.T) {
metrics = newQueueManagerMetrics(nil, "", "") metrics = newQueueManagerMetrics(nil, "", "")
client = &MockWriteClient{ client = &MockWriteClient{
StoreFunc: func(ctx context.Context, b []byte, i int) error { StoreFunc: func(ctx context.Context, b []byte, i int) (WriteResponseStats, error) {
onStoreCalled() onStoreCalled()
return RecoverableError{ return WriteResponseStats{}, RecoverableError{
error: fmt.Errorf("fake error"), error: fmt.Errorf("fake error"),
retryAfter: model.Duration(retryAfter), retryAfter: model.Duration(retryAfter),
} }
@ -1113,14 +1108,14 @@ func (c *TestWriteClient) SetReturnError(err error) {
c.returnError = err c.returnError = err
} }
func (c *TestWriteClient) Store(_ context.Context, req []byte, _ int) error { func (c *TestWriteClient) Store(_ context.Context, req []byte, _ int) (WriteResponseStats, error) {
c.mtx.Lock() c.mtx.Lock()
defer c.mtx.Unlock() defer c.mtx.Unlock()
if c.storeWait > 0 { if c.storeWait > 0 {
time.Sleep(c.storeWait) time.Sleep(c.storeWait)
} }
if c.returnError != nil { if c.returnError != nil {
return c.returnError return WriteResponseStats{}, c.returnError
} }
// nil buffers are ok for snappy, ignore cast error. // nil buffers are ok for snappy, ignore cast error.
if c.buf != nil { if c.buf != nil {
@ -1130,14 +1125,14 @@ func (c *TestWriteClient) Store(_ context.Context, req []byte, _ int) error {
reqBuf, err := snappy.Decode(c.buf, req) reqBuf, err := snappy.Decode(c.buf, req)
c.buf = reqBuf c.buf = reqBuf
if err != nil { if err != nil {
return err return WriteResponseStats{}, err
} }
// Check if we've been told to inject err for this call. // Check if we've been told to inject err for this call.
if len(c.injectedErrs) > 0 { if len(c.injectedErrs) > 0 {
c.currErr++ c.currErr++
if err = c.injectedErrs[c.currErr]; err != nil { if err = c.injectedErrs[c.currErr]; err != nil {
return err return WriteResponseStats{}, err
} }
} }
@ -1156,13 +1151,10 @@ func (c *TestWriteClient) Store(_ context.Context, req []byte, _ int) error {
} }
} }
if err != nil { if err != nil {
return err return WriteResponseStats{}, err
}
if len(reqProto.Timeseries) == 0 && len(reqProto.Metadata) == 0 {
return errors.New("invalid request, no timeseries")
} }
rs := WriteResponseStats{}
b := labels.NewScratchBuilder(0) b := labels.NewScratchBuilder(0)
for _, ts := range reqProto.Timeseries { for _, ts := range reqProto.Timeseries {
labels := ts.ToLabels(&b, nil) labels := ts.ToLabels(&b, nil)
@ -1170,10 +1162,12 @@ func (c *TestWriteClient) Store(_ context.Context, req []byte, _ int) error {
if len(ts.Samples) > 0 { if len(ts.Samples) > 0 {
c.receivedSamples[tsID] = append(c.receivedSamples[tsID], ts.Samples...) c.receivedSamples[tsID] = append(c.receivedSamples[tsID], ts.Samples...)
} }
rs.Samples += len(ts.Samples)
if len(ts.Exemplars) > 0 { if len(ts.Exemplars) > 0 {
c.receivedExemplars[tsID] = append(c.receivedExemplars[tsID], ts.Exemplars...) c.receivedExemplars[tsID] = append(c.receivedExemplars[tsID], ts.Exemplars...)
} }
rs.Exemplars += len(ts.Exemplars)
for _, h := range ts.Histograms { for _, h := range ts.Histograms {
if h.IsFloatHistogram() { if h.IsFloatHistogram() {
@ -1182,13 +1176,14 @@ func (c *TestWriteClient) Store(_ context.Context, req []byte, _ int) error {
c.receivedHistograms[tsID] = append(c.receivedHistograms[tsID], h) c.receivedHistograms[tsID] = append(c.receivedHistograms[tsID], h)
} }
} }
rs.Histograms += len(ts.Histograms)
} }
for _, m := range reqProto.Metadata { for _, m := range reqProto.Metadata {
c.receivedMetadata[m.MetricFamilyName] = append(c.receivedMetadata[m.MetricFamilyName], m) c.receivedMetadata[m.MetricFamilyName] = append(c.receivedMetadata[m.MetricFamilyName], m)
} }
c.writesReceived++ c.writesReceived++
return nil return rs, nil
} }
func (c *TestWriteClient) Name() string { func (c *TestWriteClient) Name() string {
@ -1256,10 +1251,10 @@ func NewTestBlockedWriteClient() *TestBlockingWriteClient {
return &TestBlockingWriteClient{} return &TestBlockingWriteClient{}
} }
func (c *TestBlockingWriteClient) Store(ctx context.Context, _ []byte, _ int) error { func (c *TestBlockingWriteClient) Store(ctx context.Context, _ []byte, _ int) (WriteResponseStats, error) {
c.numCalls.Inc() c.numCalls.Inc()
<-ctx.Done() <-ctx.Done()
return nil return WriteResponseStats{}, nil
} }
func (c *TestBlockingWriteClient) NumCalls() uint64 { func (c *TestBlockingWriteClient) NumCalls() uint64 {
@ -1278,19 +1273,19 @@ func (c *TestBlockingWriteClient) Endpoint() string {
type NopWriteClient struct{} type NopWriteClient struct{}
func NewNopWriteClient() *NopWriteClient { return &NopWriteClient{} } func NewNopWriteClient() *NopWriteClient { return &NopWriteClient{} }
func (c *NopWriteClient) Store(context.Context, []byte, int) error { func (c *NopWriteClient) Store(context.Context, []byte, int) (WriteResponseStats, error) {
return nil return WriteResponseStats{}, nil
} }
func (c *NopWriteClient) Name() string { return "nopwriteclient" } func (c *NopWriteClient) Name() string { return "nopwriteclient" }
func (c *NopWriteClient) Endpoint() string { return "http://test-remote.com/1234" } func (c *NopWriteClient) Endpoint() string { return "http://test-remote.com/1234" }
type MockWriteClient struct { type MockWriteClient struct {
StoreFunc func(context.Context, []byte, int) error StoreFunc func(context.Context, []byte, int) (WriteResponseStats, error)
NameFunc func() string NameFunc func() string
EndpointFunc func() string EndpointFunc func() string
} }
func (c *MockWriteClient) Store(ctx context.Context, bb []byte, n int) error { func (c *MockWriteClient) Store(ctx context.Context, bb []byte, n int) (WriteResponseStats, error) {
return c.StoreFunc(ctx, bb, n) return c.StoreFunc(ctx, bb, n)
} }
func (c *MockWriteClient) Name() string { return c.NameFunc() } func (c *MockWriteClient) Name() string { return c.NameFunc() }

View file

@ -210,13 +210,13 @@ func (q querier) addExternalLabels(ms []*labels.Matcher) ([]*labels.Matcher, []s
} }
// LabelValues implements storage.Querier and is a noop. // LabelValues implements storage.Querier and is a noop.
func (q *querier) LabelValues(context.Context, string, ...*labels.Matcher) ([]string, annotations.Annotations, error) { func (q *querier) LabelValues(context.Context, string, *storage.LabelHints, ...*labels.Matcher) ([]string, annotations.Annotations, error) {
// TODO: Implement: https://github.com/prometheus/prometheus/issues/3351 // TODO: Implement: https://github.com/prometheus/prometheus/issues/3351
return nil, nil, errors.New("not implemented") return nil, nil, errors.New("not implemented")
} }
// LabelNames implements storage.Querier and is a noop. // LabelNames implements storage.Querier and is a noop.
func (q *querier) LabelNames(context.Context, ...*labels.Matcher) ([]string, annotations.Annotations, error) { func (q *querier) LabelNames(context.Context, *storage.LabelHints, ...*labels.Matcher) ([]string, annotations.Annotations, error) {
// TODO: Implement: https://github.com/prometheus/prometheus/issues/3351 // TODO: Implement: https://github.com/prometheus/prometheus/issues/3351
return nil, nil, errors.New("not implemented") return nil, nil, errors.New("not implemented")
} }

View file

@ -202,16 +202,34 @@ func (h *readHandler) remoteReadStreamedXORChunks(ctx context.Context, w http.Re
return err return err
} }
chunks := h.getChunkSeriesSet(ctx, query, filteredMatchers) querier, err := h.queryable.ChunkQuerier(query.StartTimestampMs, query.EndTimestampMs)
if err := chunks.Err(); err != nil { if err != nil {
return err return err
} }
defer func() {
if err := querier.Close(); err != nil {
level.Warn(h.logger).Log("msg", "Error on chunk querier close", "err", err.Error())
}
}()
var hints *storage.SelectHints
if query.Hints != nil {
hints = &storage.SelectHints{
Start: query.Hints.StartMs,
End: query.Hints.EndMs,
Step: query.Hints.StepMs,
Func: query.Hints.Func,
Grouping: query.Hints.Grouping,
Range: query.Hints.RangeMs,
By: query.Hints.By,
}
}
ws, err := StreamChunkedReadResponses( ws, err := StreamChunkedReadResponses(
NewChunkedWriter(w, f), NewChunkedWriter(w, f),
int64(i), int64(i),
// The streaming API has to provide the series sorted. // The streaming API has to provide the series sorted.
chunks, querier.Select(ctx, true, hints, filteredMatchers...),
sortedExternalLabels, sortedExternalLabels,
h.remoteReadMaxBytesInFrame, h.remoteReadMaxBytesInFrame,
h.marshalPool, h.marshalPool,
@ -236,35 +254,6 @@ func (h *readHandler) remoteReadStreamedXORChunks(ctx context.Context, w http.Re
} }
} }
// getChunkSeriesSet executes a query to retrieve a ChunkSeriesSet,
// encapsulating the operation in its own function to ensure timely release of
// the querier resources.
func (h *readHandler) getChunkSeriesSet(ctx context.Context, query *prompb.Query, filteredMatchers []*labels.Matcher) storage.ChunkSeriesSet {
querier, err := h.queryable.ChunkQuerier(query.StartTimestampMs, query.EndTimestampMs)
if err != nil {
return storage.ErrChunkSeriesSet(err)
}
defer func() {
if err := querier.Close(); err != nil {
level.Warn(h.logger).Log("msg", "Error on chunk querier close", "err", err.Error())
}
}()
var hints *storage.SelectHints
if query.Hints != nil {
hints = &storage.SelectHints{
Start: query.Hints.StartMs,
End: query.Hints.EndMs,
Step: query.Hints.StepMs,
Func: query.Hints.Func,
Grouping: query.Hints.Grouping,
Range: query.Hints.RangeMs,
By: query.Hints.By,
}
}
return querier.Select(ctx, true, hints, filteredMatchers...)
}
// filterExtLabelsFromMatchers change equality matchers which match external labels // filterExtLabelsFromMatchers change equality matchers which match external labels
// to a matcher that looks for an empty label, // to a matcher that looks for an empty label,
// as that label should not be present in the storage. // as that label should not be present in the storage.

107
storage/remote/stats.go Normal file
View file

@ -0,0 +1,107 @@
// Copyright 2024 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package remote
import (
"errors"
"net/http"
"strconv"
)
const (
rw20WrittenSamplesHeader = "X-Prometheus-Remote-Write-Samples-Written"
rw20WrittenHistogramsHeader = "X-Prometheus-Remote-Write-Histograms-Written"
rw20WrittenExemplarsHeader = "X-Prometheus-Remote-Write-Exemplars-Written"
)
// WriteResponseStats represents the response write statistics specified in https://github.com/prometheus/docs/pull/2486
type WriteResponseStats struct {
// Samples represents X-Prometheus-Remote-Write-Written-Samples
Samples int
// Histograms represents X-Prometheus-Remote-Write-Written-Histograms
Histograms int
// Exemplars represents X-Prometheus-Remote-Write-Written-Exemplars
Exemplars int
// Confirmed means we can trust those statistics from the point of view
// of the PRW 2.0 spec. When parsed from headers, it means we got at least one
// response header from the Receiver to confirm those numbers, meaning it must
// be a at least 2.0 Receiver. See ParseWriteResponseStats for details.
Confirmed bool
}
// NoDataWritten returns true if statistics indicate no data was written.
func (s WriteResponseStats) NoDataWritten() bool {
return (s.Samples + s.Histograms + s.Exemplars) == 0
}
// AllSamples returns both float and histogram sample numbers.
func (s WriteResponseStats) AllSamples() int {
return s.Samples + s.Histograms
}
// Add returns the sum of this WriteResponseStats plus the given WriteResponseStats.
func (s WriteResponseStats) Add(rs WriteResponseStats) WriteResponseStats {
s.Confirmed = rs.Confirmed
s.Samples += rs.Samples
s.Histograms += rs.Histograms
s.Exemplars += rs.Exemplars
return s
}
// SetHeaders sets response headers in a given response writer.
// Make sure to use it before http.ResponseWriter.WriteHeader and .Write.
func (s WriteResponseStats) SetHeaders(w http.ResponseWriter) {
h := w.Header()
h.Set(rw20WrittenSamplesHeader, strconv.Itoa(s.Samples))
h.Set(rw20WrittenHistogramsHeader, strconv.Itoa(s.Histograms))
h.Set(rw20WrittenExemplarsHeader, strconv.Itoa(s.Exemplars))
}
// ParseWriteResponseStats returns WriteResponseStats parsed from the response headers.
//
// As per 2.0 spec, missing header means 0. However, abrupt HTTP errors, 1.0 Receivers
// or buggy 2.0 Receivers might result in no response headers specified and that
// might NOT necessarily mean nothing was written. To represent that we set
// s.Confirmed = true only when see at least on response header.
//
// Error is returned when any of the header fails to parse as int64.
func ParseWriteResponseStats(r *http.Response) (s WriteResponseStats, err error) {
var (
errs []error
h = r.Header
)
if v := h.Get(rw20WrittenSamplesHeader); v != "" { // Empty means zero.
s.Confirmed = true
if s.Samples, err = strconv.Atoi(v); err != nil {
s.Samples = 0
errs = append(errs, err)
}
}
if v := h.Get(rw20WrittenHistogramsHeader); v != "" { // Empty means zero.
s.Confirmed = true
if s.Histograms, err = strconv.Atoi(v); err != nil {
s.Histograms = 0
errs = append(errs, err)
}
}
if v := h.Get(rw20WrittenExemplarsHeader); v != "" { // Empty means zero.
s.Confirmed = true
if s.Exemplars, err = strconv.Atoi(v); err != nil {
s.Exemplars = 0
errs = append(errs, err)
}
}
return s, errors.Join(errs...)
}

View file

@ -100,7 +100,7 @@ func NewWriteStorage(logger log.Logger, reg prometheus.Registerer, dir string, f
Namespace: namespace, Namespace: namespace,
Subsystem: subsystem, Subsystem: subsystem,
Name: "highest_timestamp_in_seconds", Name: "highest_timestamp_in_seconds",
Help: "Highest timestamp that has come into the remote storage via the Appender interface, in seconds since epoch.", Help: "Highest timestamp that has come into the remote storage via the Appender interface, in seconds since epoch. Initialized to 0 when no data has been received yet.",
}), }),
}, },
} }
@ -176,6 +176,7 @@ func (rws *WriteStorage) ApplyConfig(conf *config.Config) error {
HTTPClientConfig: rwConf.HTTPClientConfig, HTTPClientConfig: rwConf.HTTPClientConfig,
SigV4Config: rwConf.SigV4Config, SigV4Config: rwConf.SigV4Config,
AzureADConfig: rwConf.AzureADConfig, AzureADConfig: rwConf.AzureADConfig,
GoogleIAMConfig: rwConf.GoogleIAMConfig,
Headers: rwConf.Headers, Headers: rwConf.Headers,
RetryOnRateLimit: rwConf.QueueConfig.RetryOnRateLimit, RetryOnRateLimit: rwConf.QueueConfig.RetryOnRateLimit,
}) })

View file

@ -27,6 +27,7 @@ import (
"github.com/gogo/protobuf/proto" "github.com/gogo/protobuf/proto"
"github.com/golang/snappy" "github.com/golang/snappy"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
"github.com/prometheus/prometheus/config" "github.com/prometheus/prometheus/config"
"github.com/prometheus/prometheus/model/exemplar" "github.com/prometheus/prometheus/model/exemplar"
@ -43,7 +44,8 @@ type writeHandler struct {
logger log.Logger logger log.Logger
appendable storage.Appendable appendable storage.Appendable
samplesWithInvalidLabelsTotal prometheus.Counter samplesWithInvalidLabelsTotal prometheus.Counter
samplesAppendedWithoutMetadata prometheus.Counter
acceptedProtoMsgs map[config.RemoteWriteProtoMsg]struct{} acceptedProtoMsgs map[config.RemoteWriteProtoMsg]struct{}
} }
@ -52,6 +54,9 @@ const maxAheadTime = 10 * time.Minute
// NewWriteHandler creates a http.Handler that accepts remote write requests with // NewWriteHandler creates a http.Handler that accepts remote write requests with
// the given message in acceptedProtoMsgs and writes them to the provided appendable. // the given message in acceptedProtoMsgs and writes them to the provided appendable.
//
// NOTE(bwplotka): When accepting v2 proto and spec, partial writes are possible
// as per https://prometheus.io/docs/specs/remote_write_spec_2_0/#partial-write.
func NewWriteHandler(logger log.Logger, reg prometheus.Registerer, appendable storage.Appendable, acceptedProtoMsgs []config.RemoteWriteProtoMsg) http.Handler { func NewWriteHandler(logger log.Logger, reg prometheus.Registerer, appendable storage.Appendable, acceptedProtoMsgs []config.RemoteWriteProtoMsg) http.Handler {
protoMsgs := map[config.RemoteWriteProtoMsg]struct{}{} protoMsgs := map[config.RemoteWriteProtoMsg]struct{}{}
for _, acc := range acceptedProtoMsgs { for _, acc := range acceptedProtoMsgs {
@ -61,15 +66,18 @@ func NewWriteHandler(logger log.Logger, reg prometheus.Registerer, appendable st
logger: logger, logger: logger,
appendable: appendable, appendable: appendable,
acceptedProtoMsgs: protoMsgs, acceptedProtoMsgs: protoMsgs,
samplesWithInvalidLabelsTotal: prometheus.NewCounter(prometheus.CounterOpts{ samplesWithInvalidLabelsTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Namespace: "prometheus", Namespace: "prometheus",
Subsystem: "api", Subsystem: "api",
Name: "remote_write_invalid_labels_samples_total", Name: "remote_write_invalid_labels_samples_total",
Help: "The total number of remote write samples which contains invalid labels.", Help: "The total number of received remote write samples and histogram samples which were rejected due to invalid labels.",
}),
samplesAppendedWithoutMetadata: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Namespace: "prometheus",
Subsystem: "api",
Name: "remote_write_without_metadata_appended_samples_total",
Help: "The total number of received remote write samples (and histogram samples) which were ingested without corresponding metadata.",
}), }),
}
if reg != nil {
reg.MustRegister(h.samplesWithInvalidLabelsTotal)
} }
return h return h
} }
@ -108,15 +116,15 @@ func (h *writeHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
contentType = appProtoContentType contentType = appProtoContentType
} }
msg, err := h.parseProtoMsg(contentType) msgType, err := h.parseProtoMsg(contentType)
if err != nil { if err != nil {
level.Error(h.logger).Log("msg", "Error decoding remote write request", "err", err) level.Error(h.logger).Log("msg", "Error decoding remote write request", "err", err)
http.Error(w, err.Error(), http.StatusUnsupportedMediaType) http.Error(w, err.Error(), http.StatusUnsupportedMediaType)
return return
} }
if _, ok := h.acceptedProtoMsgs[msg]; !ok { if _, ok := h.acceptedProtoMsgs[msgType]; !ok {
err := fmt.Errorf("%v protobuf message is not accepted by this server; accepted %v", msg, func() (ret []string) { err := fmt.Errorf("%v protobuf message is not accepted by this server; accepted %v", msgType, func() (ret []string) {
for k := range h.acceptedProtoMsgs { for k := range h.acceptedProtoMsgs {
ret = append(ret, string(k)) ret = append(ret, string(k))
} }
@ -154,100 +162,111 @@ func (h *writeHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
} }
// Now we have a decompressed buffer we can unmarshal it. // Now we have a decompressed buffer we can unmarshal it.
switch msg {
case config.RemoteWriteProtoMsgV1: if msgType == config.RemoteWriteProtoMsgV1 {
// PRW 1.0 flow has different proto message and no partial write handling.
var req prompb.WriteRequest var req prompb.WriteRequest
if err := proto.Unmarshal(decompressed, &req); err != nil { if err := proto.Unmarshal(decompressed, &req); err != nil {
// TODO(bwplotka): Add more context to responded error? // TODO(bwplotka): Add more context to responded error?
level.Error(h.logger).Log("msg", "Error decoding v1 remote write request", "protobuf_message", msg, "err", err.Error()) level.Error(h.logger).Log("msg", "Error decoding v1 remote write request", "protobuf_message", msgType, "err", err.Error())
http.Error(w, err.Error(), http.StatusBadRequest) http.Error(w, err.Error(), http.StatusBadRequest)
return return
} }
err = h.write(r.Context(), &req) if err = h.write(r.Context(), &req); err != nil {
case config.RemoteWriteProtoMsgV2: switch {
var req writev2.Request case errors.Is(err, storage.ErrOutOfOrderSample), errors.Is(err, storage.ErrOutOfBounds), errors.Is(err, storage.ErrDuplicateSampleForTimestamp), errors.Is(err, storage.ErrTooOldSample):
if err := proto.Unmarshal(decompressed, &req); err != nil { // Indicated an out-of-order sample is a bad request to prevent retries.
// TODO(bwplotka): Add more context to responded error? http.Error(w, err.Error(), http.StatusBadRequest)
level.Error(h.logger).Log("msg", "Error decoding v2 remote write request", "protobuf_message", msg, "err", err.Error()) return
http.Error(w, err.Error(), http.StatusBadRequest) default:
return level.Error(h.logger).Log("msg", "Error while remote writing the v1 request", "err", err.Error())
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
} }
err = h.writeV2(r.Context(), &req) w.WriteHeader(http.StatusNoContent)
return
} }
switch { // Remote Write 2.x proto message handling.
case err == nil: var req writev2.Request
case errors.Is(err, storage.ErrOutOfOrderSample), errors.Is(err, storage.ErrOutOfBounds), errors.Is(err, storage.ErrDuplicateSampleForTimestamp), errors.Is(err, storage.ErrTooOldSample): if err := proto.Unmarshal(decompressed, &req); err != nil {
// Indicated an out of order sample is a bad request to prevent retries. // TODO(bwplotka): Add more context to responded error?
level.Error(h.logger).Log("msg", "Error decoding v2 remote write request", "protobuf_message", msgType, "err", err.Error())
http.Error(w, err.Error(), http.StatusBadRequest) http.Error(w, err.Error(), http.StatusBadRequest)
return return
default: }
level.Error(h.logger).Log("msg", "Error appending remote write", "err", err.Error())
http.Error(w, err.Error(), http.StatusInternalServerError) respStats, errHTTPCode, err := h.writeV2(r.Context(), &req)
// Set required X-Prometheus-Remote-Write-Written-* response headers, in all cases.
respStats.SetHeaders(w)
if err != nil {
if errHTTPCode/5 == 100 { // 5xx
level.Error(h.logger).Log("msg", "Error while remote writing the v2 request", "err", err.Error())
}
http.Error(w, err.Error(), errHTTPCode)
return return
} }
w.WriteHeader(http.StatusNoContent) w.WriteHeader(http.StatusNoContent)
} }
// checkAppendExemplarError modifies the AppendExemplar's returned error based on the error cause.
func (h *writeHandler) checkAppendExemplarError(err error, e exemplar.Exemplar, outOfOrderErrs *int) error {
unwrappedErr := errors.Unwrap(err)
if unwrappedErr == nil {
unwrappedErr = err
}
switch {
case errors.Is(unwrappedErr, storage.ErrNotFound):
return storage.ErrNotFound
case errors.Is(unwrappedErr, storage.ErrOutOfOrderExemplar):
*outOfOrderErrs++
level.Debug(h.logger).Log("msg", "Out of order exemplar", "exemplar", fmt.Sprintf("%+v", e))
return nil
default:
return err
}
}
func (h *writeHandler) write(ctx context.Context, req *prompb.WriteRequest) (err error) { func (h *writeHandler) write(ctx context.Context, req *prompb.WriteRequest) (err error) {
outOfOrderExemplarErrs := 0 outOfOrderExemplarErrs := 0
samplesWithInvalidLabels := 0 samplesWithInvalidLabels := 0
samplesAppended := 0
timeLimitApp := &timeLimitAppender{ app := &timeLimitAppender{
Appender: h.appendable.Appender(ctx), Appender: h.appendable.Appender(ctx),
maxTime: timestamp.FromTime(time.Now().Add(maxAheadTime)), maxTime: timestamp.FromTime(time.Now().Add(maxAheadTime)),
} }
defer func() { defer func() {
if err != nil { if err != nil {
_ = timeLimitApp.Rollback() _ = app.Rollback()
return return
} }
err = timeLimitApp.Commit() err = app.Commit()
if err != nil {
h.samplesAppendedWithoutMetadata.Add(float64(samplesAppended))
}
}() }()
b := labels.NewScratchBuilder(0) b := labels.NewScratchBuilder(0)
for _, ts := range req.Timeseries { for _, ts := range req.Timeseries {
ls := ts.ToLabels(&b, nil) ls := ts.ToLabels(&b, nil)
if !ls.IsValid() { if !ls.Has(labels.MetricName) || !ls.IsValid() {
level.Warn(h.logger).Log("msg", "Invalid metric names or labels", "got", ls.String()) level.Warn(h.logger).Log("msg", "Invalid metric names or labels", "got", ls.String())
samplesWithInvalidLabels++ samplesWithInvalidLabels++
// TODO(bwplotka): Even as per 1.0 spec, this should be a 400 error, while other samples are
// potentially written. Perhaps unify with fixed writeV2 implementation a bit.
continue continue
} }
err := h.appendSamples(timeLimitApp, ts.Samples, ls) if err := h.appendV1Samples(app, ts.Samples, ls); err != nil {
if err != nil {
return err return err
} }
samplesAppended += len(ts.Samples)
for _, ep := range ts.Exemplars { for _, ep := range ts.Exemplars {
e := ep.ToExemplar(&b, nil) e := ep.ToExemplar(&b, nil)
h.appendExemplar(timeLimitApp, e, ls, &outOfOrderExemplarErrs) if _, err := app.AppendExemplar(0, ls, e); err != nil {
switch {
case errors.Is(err, storage.ErrOutOfOrderExemplar):
outOfOrderExemplarErrs++
level.Debug(h.logger).Log("msg", "Out of order exemplar", "series", ls.String(), "exemplar", fmt.Sprintf("%+v", e))
default:
// Since exemplar storage is still experimental, we don't fail the request on ingestion errors
level.Debug(h.logger).Log("msg", "Error while adding exemplar in AppendExemplar", "series", ls.String(), "exemplar", fmt.Sprintf("%+v", e), "err", err)
}
}
} }
err = h.appendHistograms(timeLimitApp, ts.Histograms, ls) if err = h.appendV1Histograms(app, ts.Histograms, ls); err != nil {
if err != nil {
return err return err
} }
samplesAppended += len(ts.Histograms)
} }
if outOfOrderExemplarErrs > 0 { if outOfOrderExemplarErrs > 0 {
@ -256,170 +275,220 @@ func (h *writeHandler) write(ctx context.Context, req *prompb.WriteRequest) (err
if samplesWithInvalidLabels > 0 { if samplesWithInvalidLabels > 0 {
h.samplesWithInvalidLabelsTotal.Add(float64(samplesWithInvalidLabels)) h.samplesWithInvalidLabelsTotal.Add(float64(samplesWithInvalidLabels))
} }
return nil return nil
} }
func (h *writeHandler) writeV2(ctx context.Context, req *writev2.Request) (err error) { func (h *writeHandler) appendV1Samples(app storage.Appender, ss []prompb.Sample, labels labels.Labels) error {
outOfOrderExemplarErrs := 0 var ref storage.SeriesRef
var err error
for _, s := range ss {
ref, err = app.Append(ref, labels, s.GetTimestamp(), s.GetValue())
if err != nil {
if errors.Is(err, storage.ErrOutOfOrderSample) ||
errors.Is(err, storage.ErrOutOfBounds) ||
errors.Is(err, storage.ErrDuplicateSampleForTimestamp) {
level.Error(h.logger).Log("msg", "Out of order sample from remote write", "err", err.Error(), "series", labels.String(), "timestamp", s.Timestamp)
}
return err
}
}
return nil
}
timeLimitApp := &timeLimitAppender{ func (h *writeHandler) appendV1Histograms(app storage.Appender, hh []prompb.Histogram, labels labels.Labels) error {
var err error
for _, hp := range hh {
if hp.IsFloatHistogram() {
_, err = app.AppendHistogram(0, labels, hp.Timestamp, nil, hp.ToFloatHistogram())
} else {
_, err = app.AppendHistogram(0, labels, hp.Timestamp, hp.ToIntHistogram(), nil)
}
if err != nil {
// Although AppendHistogram does not currently return ErrDuplicateSampleForTimestamp there is
// a note indicating its inclusion in the future.
if errors.Is(err, storage.ErrOutOfOrderSample) ||
errors.Is(err, storage.ErrOutOfBounds) ||
errors.Is(err, storage.ErrDuplicateSampleForTimestamp) {
level.Error(h.logger).Log("msg", "Out of order histogram from remote write", "err", err.Error(), "series", labels.String(), "timestamp", hp.Timestamp)
}
return err
}
}
return nil
}
// writeV2 is similar to write, but it works with v2 proto message,
// allows partial 4xx writes and gathers statistics.
//
// writeV2 returns the statistics.
// In error cases, writeV2, also returns statistics, but also the error that
// should be propagated to the remote write sender and httpCode to use for status.
//
// NOTE(bwplotka): TSDB storage is NOT idempotent, so we don't allow "partial retry-able" errors.
// Once we have 5xx type of error, we immediately stop and rollback all appends.
func (h *writeHandler) writeV2(ctx context.Context, req *writev2.Request) (_ WriteResponseStats, errHTTPCode int, _ error) {
app := &timeLimitAppender{
Appender: h.appendable.Appender(ctx), Appender: h.appendable.Appender(ctx),
maxTime: timestamp.FromTime(time.Now().Add(maxAheadTime)), maxTime: timestamp.FromTime(time.Now().Add(maxAheadTime)),
} }
defer func() { s := WriteResponseStats{}
if err != nil { samplesWithoutMetadata, errHTTPCode, err := h.appendV2(app, req, &s)
_ = timeLimitApp.Rollback() if err != nil {
return if errHTTPCode/5 == 100 {
// On 5xx, we always rollback, because we expect
// sender to retry and TSDB is not idempotent.
if rerr := app.Rollback(); rerr != nil {
level.Error(h.logger).Log("msg", "writev2 rollback failed on retry-able error", "err", rerr)
}
return WriteResponseStats{}, errHTTPCode, err
} }
err = timeLimitApp.Commit()
}()
b := labels.NewScratchBuilder(0) // Non-retriable (e.g. bad request error case). Can be partially written.
commitErr := app.Commit()
if commitErr != nil {
// Bad requests does not matter as we have internal error (retryable).
return WriteResponseStats{}, http.StatusInternalServerError, commitErr
}
// Bad request error happened, but rest of data (if any) was written.
h.samplesAppendedWithoutMetadata.Add(float64(samplesWithoutMetadata))
return s, errHTTPCode, err
}
// All good just commit.
if err := app.Commit(); err != nil {
return WriteResponseStats{}, http.StatusInternalServerError, err
}
h.samplesAppendedWithoutMetadata.Add(float64(samplesWithoutMetadata))
return s, 0, nil
}
func (h *writeHandler) appendV2(app storage.Appender, req *writev2.Request, rs *WriteResponseStats) (samplesWithoutMetadata, errHTTPCode int, err error) {
var (
badRequestErrs []error
outOfOrderExemplarErrs, samplesWithInvalidLabels int
b = labels.NewScratchBuilder(0)
)
for _, ts := range req.Timeseries { for _, ts := range req.Timeseries {
ls := ts.ToLabels(&b, req.Symbols) ls := ts.ToLabels(&b, req.Symbols)
// Validate series labels early.
err := h.appendSamplesV2(timeLimitApp, ts.Samples, ls) // NOTE(bwplotka): While spec allows UTF-8, Prometheus Receiver may impose
if err != nil { // specific limits and follow https://prometheus.io/docs/specs/remote_write_spec_2_0/#invalid-samples case.
return err if !ls.Has(labels.MetricName) || !ls.IsValid() {
badRequestErrs = append(badRequestErrs, fmt.Errorf("invalid metric name or labels, got %v", ls.String()))
samplesWithInvalidLabels += len(ts.Samples) + len(ts.Histograms)
continue
} }
allSamplesSoFar := rs.AllSamples()
var ref storage.SeriesRef
// Samples.
for _, s := range ts.Samples {
ref, err = app.Append(ref, ls, s.GetTimestamp(), s.GetValue())
if err == nil {
rs.Samples++
continue
}
// Handle append error.
if errors.Is(err, storage.ErrOutOfOrderSample) ||
errors.Is(err, storage.ErrOutOfBounds) ||
errors.Is(err, storage.ErrDuplicateSampleForTimestamp) ||
errors.Is(err, storage.ErrTooOldSample) {
// TODO(bwplotka): Not too spammy log?
level.Error(h.logger).Log("msg", "Out of order sample from remote write", "err", err.Error(), "series", ls.String(), "timestamp", s.Timestamp)
badRequestErrs = append(badRequestErrs, fmt.Errorf("%w for series %v", err, ls.String()))
continue
}
return 0, http.StatusInternalServerError, err
}
// Native Histograms.
for _, hp := range ts.Histograms {
if hp.IsFloatHistogram() {
ref, err = app.AppendHistogram(ref, ls, hp.Timestamp, nil, hp.ToFloatHistogram())
} else {
ref, err = app.AppendHistogram(ref, ls, hp.Timestamp, hp.ToIntHistogram(), nil)
}
if err == nil {
rs.Histograms++
continue
}
// Handle append error.
// Although AppendHistogram does not currently return ErrDuplicateSampleForTimestamp there is
// a note indicating its inclusion in the future.
if errors.Is(err, storage.ErrOutOfOrderSample) ||
errors.Is(err, storage.ErrOutOfBounds) ||
errors.Is(err, storage.ErrDuplicateSampleForTimestamp) {
// TODO(bwplotka): Not too spammy log?
level.Error(h.logger).Log("msg", "Out of order histogram from remote write", "err", err.Error(), "series", ls.String(), "timestamp", hp.Timestamp)
badRequestErrs = append(badRequestErrs, fmt.Errorf("%w for series %v", err, ls.String()))
continue
}
return 0, http.StatusInternalServerError, err
}
// Exemplars.
for _, ep := range ts.Exemplars { for _, ep := range ts.Exemplars {
e := ep.ToExemplar(&b, req.Symbols) e := ep.ToExemplar(&b, req.Symbols)
h.appendExemplar(timeLimitApp, e, ls, &outOfOrderExemplarErrs) ref, err = app.AppendExemplar(ref, ls, e)
} if err == nil {
rs.Exemplars++
err = h.appendHistogramsV2(timeLimitApp, ts.Histograms, ls) continue
if err != nil { }
return err // Handle append error.
if errors.Is(err, storage.ErrOutOfOrderExemplar) {
outOfOrderExemplarErrs++ // Maintain old metrics, but technically not needed, given we fail here.
level.Error(h.logger).Log("msg", "Out of order exemplar", "err", err.Error(), "series", ls.String(), "exemplar", fmt.Sprintf("%+v", e))
badRequestErrs = append(badRequestErrs, fmt.Errorf("%w for series %v", err, ls.String()))
continue
}
// TODO(bwplotka): Add strict mode which would trigger rollback of everything if needed.
// For now we keep the previously released flow (just error not debug leve) of dropping them without rollback and 5xx.
level.Error(h.logger).Log("msg", "failed to ingest exemplar, emitting error log, but no error for PRW caller", "err", err.Error(), "series", ls.String(), "exemplar", fmt.Sprintf("%+v", e))
} }
m := ts.ToMetadata(req.Symbols) m := ts.ToMetadata(req.Symbols)
if _, err = timeLimitApp.UpdateMetadata(0, ls, m); err != nil { if _, err = app.UpdateMetadata(ref, ls, m); err != nil {
level.Debug(h.logger).Log("msg", "error while updating metadata from remote write", "err", err) level.Debug(h.logger).Log("msg", "error while updating metadata from remote write", "err", err)
// Metadata is attached to each series, so since Prometheus does not reject sample without metadata information,
// we don't report remote write error either. We increment metric instead.
samplesWithoutMetadata += rs.AllSamples() - allSamplesSoFar
} }
} }
if outOfOrderExemplarErrs > 0 { if outOfOrderExemplarErrs > 0 {
_ = level.Warn(h.logger).Log("msg", "Error on ingesting out-of-order exemplars", "num_dropped", outOfOrderExemplarErrs) level.Warn(h.logger).Log("msg", "Error on ingesting out-of-order exemplars", "num_dropped", outOfOrderExemplarErrs)
} }
h.samplesWithInvalidLabelsTotal.Add(float64(samplesWithInvalidLabels))
return nil if len(badRequestErrs) == 0 {
} return samplesWithoutMetadata, 0, nil
func (h *writeHandler) appendExemplar(app storage.Appender, e exemplar.Exemplar, labels labels.Labels, outOfOrderExemplarErrs *int) {
_, err := app.AppendExemplar(0, labels, e)
err = h.checkAppendExemplarError(err, e, outOfOrderExemplarErrs)
if err != nil {
// Since exemplar storage is still experimental, we don't fail the request on ingestion errors
level.Debug(h.logger).Log("msg", "Error while adding exemplar in AddExemplar", "exemplar", fmt.Sprintf("%+v", e), "err", err)
} }
} // TODO(bwplotka): Better concat formatting? Perhaps add size limit?
return samplesWithoutMetadata, http.StatusBadRequest, errors.Join(badRequestErrs...)
func (h *writeHandler) appendSamples(app storage.Appender, ss []prompb.Sample, labels labels.Labels) error {
var ref storage.SeriesRef
var err error
for _, s := range ss {
ref, err = app.Append(ref, labels, s.GetTimestamp(), s.GetValue())
if err != nil {
unwrappedErr := errors.Unwrap(err)
if unwrappedErr == nil {
unwrappedErr = err
}
if errors.Is(err, storage.ErrOutOfOrderSample) || errors.Is(unwrappedErr, storage.ErrOutOfBounds) || errors.Is(unwrappedErr, storage.ErrDuplicateSampleForTimestamp) {
level.Error(h.logger).Log("msg", "Out of order sample from remote write", "err", err.Error(), "series", labels.String(), "timestamp", s.Timestamp)
}
return err
}
}
return nil
}
func (h *writeHandler) appendSamplesV2(app storage.Appender, ss []writev2.Sample, labels labels.Labels) error {
var ref storage.SeriesRef
var err error
for _, s := range ss {
ref, err = app.Append(ref, labels, s.GetTimestamp(), s.GetValue())
if err != nil {
unwrappedErr := errors.Unwrap(err)
if unwrappedErr == nil {
unwrappedErr = err
}
if errors.Is(err, storage.ErrOutOfOrderSample) || errors.Is(unwrappedErr, storage.ErrOutOfBounds) || errors.Is(unwrappedErr, storage.ErrDuplicateSampleForTimestamp) {
level.Error(h.logger).Log("msg", "Out of order sample from remote write", "err", err.Error(), "series", labels.String(), "timestamp", s.Timestamp)
}
return err
}
}
return nil
}
func (h *writeHandler) appendHistograms(app storage.Appender, hh []prompb.Histogram, labels labels.Labels) error {
var err error
for _, hp := range hh {
if hp.IsFloatHistogram() {
_, err = app.AppendHistogram(0, labels, hp.Timestamp, nil, hp.ToFloatHistogram())
} else {
_, err = app.AppendHistogram(0, labels, hp.Timestamp, hp.ToIntHistogram(), nil)
}
if err != nil {
unwrappedErr := errors.Unwrap(err)
if unwrappedErr == nil {
unwrappedErr = err
}
// Although AppendHistogram does not currently return ErrDuplicateSampleForTimestamp there is
// a note indicating its inclusion in the future.
if errors.Is(unwrappedErr, storage.ErrOutOfOrderSample) || errors.Is(unwrappedErr, storage.ErrOutOfBounds) || errors.Is(unwrappedErr, storage.ErrDuplicateSampleForTimestamp) {
level.Error(h.logger).Log("msg", "Out of order histogram from remote write", "err", err.Error(), "series", labels.String(), "timestamp", hp.Timestamp)
}
return err
}
}
return nil
}
func (h *writeHandler) appendHistogramsV2(app storage.Appender, hh []writev2.Histogram, labels labels.Labels) error {
var err error
for _, hp := range hh {
if hp.IsFloatHistogram() {
_, err = app.AppendHistogram(0, labels, hp.Timestamp, nil, hp.ToFloatHistogram())
} else {
_, err = app.AppendHistogram(0, labels, hp.Timestamp, hp.ToIntHistogram(), nil)
}
if err != nil {
unwrappedErr := errors.Unwrap(err)
if unwrappedErr == nil {
unwrappedErr = err
}
// Although AppendHistogram does not currently return ErrDuplicateSampleForTimestamp there is
// a note indicating its inclusion in the future.
if errors.Is(unwrappedErr, storage.ErrOutOfOrderSample) || errors.Is(unwrappedErr, storage.ErrOutOfBounds) || errors.Is(unwrappedErr, storage.ErrDuplicateSampleForTimestamp) {
level.Error(h.logger).Log("msg", "Out of order histogram from remote write", "err", err.Error(), "series", labels.String(), "timestamp", hp.Timestamp)
}
return err
}
}
return nil
} }
// NewOTLPWriteHandler creates a http.Handler that accepts OTLP write requests and // NewOTLPWriteHandler creates a http.Handler that accepts OTLP write requests and
// writes them to the provided appendable. // writes them to the provided appendable.
func NewOTLPWriteHandler(logger log.Logger, appendable storage.Appendable) http.Handler { func NewOTLPWriteHandler(logger log.Logger, appendable storage.Appendable, configFunc func() config.Config) http.Handler {
rwHandler := &writeHandler{ rwHandler := &writeHandler{
logger: logger, logger: logger,
appendable: appendable, appendable: appendable,
} }
return &otlpWriteHandler{ return &otlpWriteHandler{
logger: logger, logger: logger,
rwHandler: rwHandler, rwHandler: rwHandler,
configFunc: configFunc,
} }
} }
type otlpWriteHandler struct { type otlpWriteHandler struct {
logger log.Logger logger log.Logger
rwHandler *writeHandler rwHandler *writeHandler
configFunc func() config.Config
} }
func (h *otlpWriteHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { func (h *otlpWriteHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
@ -430,9 +499,12 @@ func (h *otlpWriteHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
return return
} }
otlpCfg := h.configFunc().OTLPConfig
converter := otlptranslator.NewPrometheusConverter() converter := otlptranslator.NewPrometheusConverter()
if err := converter.FromMetrics(req.Metrics(), otlptranslator.Settings{ if err := converter.FromMetrics(req.Metrics(), otlptranslator.Settings{
AddMetricSuffixes: true, AddMetricSuffixes: true,
PromoteResourceAttributes: otlpCfg.PromoteResourceAttributes,
}); err != nil { }); err != nil {
level.Warn(h.logger).Log("msg", "Error translating OTLP metrics to Prometheus write request", "err", err) level.Warn(h.logger).Log("msg", "Error translating OTLP metrics to Prometheus write request", "err", err)
} }

View file

@ -16,6 +16,7 @@ package remote
import ( import (
"bytes" "bytes"
"context" "context"
"errors"
"fmt" "fmt"
"io" "io"
"math" "math"
@ -27,6 +28,7 @@ import (
"time" "time"
"github.com/go-kit/log" "github.com/go-kit/log"
"github.com/gogo/protobuf/proto"
"github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
@ -290,64 +292,224 @@ func TestRemoteWriteHandler_V1Message(t *testing.T) {
} }
} }
func expectHeaderValue(t testing.TB, expected int, got string) {
t.Helper()
require.NotEmpty(t, got)
i, err := strconv.Atoi(got)
require.NoError(t, err)
require.Equal(t, expected, i)
}
func TestRemoteWriteHandler_V2Message(t *testing.T) { func TestRemoteWriteHandler_V2Message(t *testing.T) {
payload, _, _, err := buildV2WriteRequest(log.NewNopLogger(), writeV2RequestFixture.Timeseries, writeV2RequestFixture.Symbols, nil, nil, nil, "snappy") // V2 supports partial writes for non-retriable errors, so test them.
require.NoError(t, err) for _, tc := range []struct {
desc string
input []writev2.TimeSeries
expectedCode int
expectedRespBody string
req, err := http.NewRequest("", "", bytes.NewReader(payload)) commitErr error
require.NoError(t, err) appendSampleErr error
appendHistogramErr error
appendExemplarErr error
updateMetadataErr error
}{
{
desc: "All timeseries accepted",
input: writeV2RequestFixture.Timeseries,
expectedCode: http.StatusNoContent,
},
{
desc: "Partial write; first series with invalid labels (no metric name)",
input: append(
// Series with test_metric1="test_metric1" labels.
[]writev2.TimeSeries{{LabelsRefs: []uint32{2, 2}, Samples: []writev2.Sample{{Value: 1, Timestamp: 1}}}},
writeV2RequestFixture.Timeseries...),
expectedCode: http.StatusBadRequest,
expectedRespBody: "invalid metric name or labels, got {test_metric1=\"test_metric1\"}\n",
},
{
desc: "Partial write; first series with invalid labels (empty metric name)",
input: append(
// Series with __name__="" labels.
[]writev2.TimeSeries{{LabelsRefs: []uint32{1, 0}, Samples: []writev2.Sample{{Value: 1, Timestamp: 1}}}},
writeV2RequestFixture.Timeseries...),
expectedCode: http.StatusBadRequest,
expectedRespBody: "invalid metric name or labels, got {__name__=\"\"}\n",
},
{
desc: "Partial write; first series with one OOO sample",
input: func() []writev2.TimeSeries {
f := proto.Clone(writeV2RequestFixture).(*writev2.Request)
f.Timeseries[0].Samples = append(f.Timeseries[0].Samples, writev2.Sample{Value: 2, Timestamp: 0})
return f.Timeseries
}(),
expectedCode: http.StatusBadRequest,
expectedRespBody: "out of order sample for series {__name__=\"test_metric1\", b=\"c\", baz=\"qux\", d=\"e\", foo=\"bar\"}\n",
},
{
desc: "Partial write; first series with one dup sample",
input: func() []writev2.TimeSeries {
f := proto.Clone(writeV2RequestFixture).(*writev2.Request)
f.Timeseries[0].Samples = append(f.Timeseries[0].Samples, f.Timeseries[0].Samples[0])
return f.Timeseries
}(),
expectedCode: http.StatusBadRequest,
expectedRespBody: "duplicate sample for timestamp for series {__name__=\"test_metric1\", b=\"c\", baz=\"qux\", d=\"e\", foo=\"bar\"}\n",
},
{
desc: "Partial write; first series with one OOO histogram sample",
input: func() []writev2.TimeSeries {
f := proto.Clone(writeV2RequestFixture).(*writev2.Request)
f.Timeseries[0].Histograms = append(f.Timeseries[0].Histograms, writev2.FromFloatHistogram(1, testHistogram.ToFloat(nil)))
return f.Timeseries
}(),
expectedCode: http.StatusBadRequest,
expectedRespBody: "out of order sample for series {__name__=\"test_metric1\", b=\"c\", baz=\"qux\", d=\"e\", foo=\"bar\"}\n",
},
{
desc: "Partial write; first series with one dup histogram sample",
input: func() []writev2.TimeSeries {
f := proto.Clone(writeV2RequestFixture).(*writev2.Request)
f.Timeseries[0].Histograms = append(f.Timeseries[0].Histograms, f.Timeseries[0].Histograms[1])
return f.Timeseries
}(),
expectedCode: http.StatusBadRequest,
expectedRespBody: "duplicate sample for timestamp for series {__name__=\"test_metric1\", b=\"c\", baz=\"qux\", d=\"e\", foo=\"bar\"}\n",
},
// Non retriable errors from various parts.
{
desc: "Internal sample append error; rollback triggered",
input: writeV2RequestFixture.Timeseries,
appendSampleErr: errors.New("some sample internal append error"),
req.Header.Set("Content-Type", remoteWriteContentTypeHeaders[config.RemoteWriteProtoMsgV2]) expectedCode: http.StatusInternalServerError,
req.Header.Set("Content-Encoding", string(SnappyBlockCompression)) expectedRespBody: "some sample internal append error\n",
req.Header.Set(RemoteWriteVersionHeader, RemoteWriteVersion20HeaderValue) },
{
desc: "Internal histogram sample append error; rollback triggered",
input: writeV2RequestFixture.Timeseries,
appendHistogramErr: errors.New("some histogram sample internal append error"),
appendable := &mockAppendable{} expectedCode: http.StatusInternalServerError,
handler := NewWriteHandler(log.NewNopLogger(), nil, appendable, []config.RemoteWriteProtoMsg{config.RemoteWriteProtoMsgV2}) expectedRespBody: "some histogram sample internal append error\n",
},
{
desc: "Partial write; skipped exemplar; exemplar storage errs are noop",
input: writeV2RequestFixture.Timeseries,
appendExemplarErr: errors.New("some exemplar internal append error"),
recorder := httptest.NewRecorder() expectedCode: http.StatusNoContent,
handler.ServeHTTP(recorder, req) },
{
desc: "Partial write; skipped metadata; metadata storage errs are noop",
input: writeV2RequestFixture.Timeseries,
updateMetadataErr: errors.New("some metadata update error"),
resp := recorder.Result() expectedCode: http.StatusNoContent,
require.Equal(t, http.StatusNoContent, resp.StatusCode) },
{
desc: "Internal commit error; rollback triggered",
input: writeV2RequestFixture.Timeseries,
commitErr: errors.New("storage error"),
b := labels.NewScratchBuilder(0) expectedCode: http.StatusInternalServerError,
i := 0 expectedRespBody: "storage error\n",
j := 0 },
k := 0 } {
for _, ts := range writeV2RequestFixture.Timeseries { t.Run(tc.desc, func(t *testing.T) {
ls := ts.ToLabels(&b, writeV2RequestFixture.Symbols) payload, _, _, err := buildV2WriteRequest(log.NewNopLogger(), tc.input, writeV2RequestFixture.Symbols, nil, nil, nil, "snappy")
require.NoError(t, err)
for _, s := range ts.Samples { req, err := http.NewRequest("", "", bytes.NewReader(payload))
requireEqual(t, mockSample{ls, s.Timestamp, s.Value}, appendable.samples[i]) require.NoError(t, err)
switch i { req.Header.Set("Content-Type", remoteWriteContentTypeHeaders[config.RemoteWriteProtoMsgV2])
case 0: req.Header.Set("Content-Encoding", string(SnappyBlockCompression))
requireEqual(t, mockMetadata{ls, writeV2RequestSeries1Metadata}, appendable.metadata[i]) req.Header.Set(RemoteWriteVersionHeader, RemoteWriteVersion20HeaderValue)
case 1:
requireEqual(t, mockMetadata{ls, writeV2RequestSeries2Metadata}, appendable.metadata[i]) appendable := &mockAppendable{
default: commitErr: tc.commitErr,
t.Fatal("more series/samples then expected") appendSampleErr: tc.appendSampleErr,
appendHistogramErr: tc.appendHistogramErr,
appendExemplarErr: tc.appendExemplarErr,
updateMetadataErr: tc.updateMetadataErr,
} }
i++ handler := NewWriteHandler(log.NewNopLogger(), nil, appendable, []config.RemoteWriteProtoMsg{config.RemoteWriteProtoMsgV2})
}
for _, e := range ts.Exemplars { recorder := httptest.NewRecorder()
exemplarLabels := e.ToExemplar(&b, writeV2RequestFixture.Symbols).Labels handler.ServeHTTP(recorder, req)
requireEqual(t, mockExemplar{ls, exemplarLabels, e.Timestamp, e.Value}, appendable.exemplars[j])
j++ resp := recorder.Result()
} require.Equal(t, tc.expectedCode, resp.StatusCode)
for _, hp := range ts.Histograms { respBody, err := io.ReadAll(resp.Body)
if hp.IsFloatHistogram() { require.NoError(t, err)
fh := hp.ToFloatHistogram() require.Equal(t, tc.expectedRespBody, string(respBody))
requireEqual(t, mockHistogram{ls, hp.Timestamp, nil, fh}, appendable.histograms[k])
if tc.expectedCode == http.StatusInternalServerError {
// We don't expect writes for partial writes with retry-able code.
expectHeaderValue(t, 0, resp.Header.Get(rw20WrittenSamplesHeader))
expectHeaderValue(t, 0, resp.Header.Get(rw20WrittenHistogramsHeader))
expectHeaderValue(t, 0, resp.Header.Get(rw20WrittenExemplarsHeader))
require.Empty(t, len(appendable.samples))
require.Empty(t, len(appendable.histograms))
require.Empty(t, len(appendable.exemplars))
require.Empty(t, len(appendable.metadata))
return
}
// Double check mandatory 2.0 stats.
// writeV2RequestFixture has 2 series with 1 sample, 2 histograms, 1 exemplar each.
expectHeaderValue(t, 2, resp.Header.Get(rw20WrittenSamplesHeader))
expectHeaderValue(t, 4, resp.Header.Get(rw20WrittenHistogramsHeader))
if tc.appendExemplarErr != nil {
expectHeaderValue(t, 0, resp.Header.Get(rw20WrittenExemplarsHeader))
} else { } else {
h := hp.ToIntHistogram() expectHeaderValue(t, 2, resp.Header.Get(rw20WrittenExemplarsHeader))
requireEqual(t, mockHistogram{ls, hp.Timestamp, h, nil}, appendable.histograms[k])
} }
k++
} // Double check what was actually appended.
var (
b = labels.NewScratchBuilder(0)
i, j, k, m int
)
for _, ts := range writeV2RequestFixture.Timeseries {
ls := ts.ToLabels(&b, writeV2RequestFixture.Symbols)
for _, s := range ts.Samples {
requireEqual(t, mockSample{ls, s.Timestamp, s.Value}, appendable.samples[i])
i++
}
for _, hp := range ts.Histograms {
if hp.IsFloatHistogram() {
fh := hp.ToFloatHistogram()
requireEqual(t, mockHistogram{ls, hp.Timestamp, nil, fh}, appendable.histograms[k])
} else {
h := hp.ToIntHistogram()
requireEqual(t, mockHistogram{ls, hp.Timestamp, h, nil}, appendable.histograms[k])
}
k++
}
if tc.appendExemplarErr == nil {
for _, e := range ts.Exemplars {
exemplarLabels := e.ToExemplar(&b, writeV2RequestFixture.Symbols).Labels
requireEqual(t, mockExemplar{ls, exemplarLabels, e.Timestamp, e.Value}, appendable.exemplars[j])
j++
}
}
if tc.updateMetadataErr == nil {
expectedMeta := ts.ToMetadata(writeV2RequestFixture.Symbols)
requireEqual(t, mockMetadata{ls, expectedMeta}, appendable.metadata[m])
m++
}
}
})
} }
} }
// NOTE: V2 Message is tested in TestRemoteWriteHandler_V2Message.
func TestOutOfOrderSample_V1Message(t *testing.T) { func TestOutOfOrderSample_V1Message(t *testing.T) {
for _, tc := range []struct { for _, tc := range []struct {
Name string Name string
@ -372,7 +534,7 @@ func TestOutOfOrderSample_V1Message(t *testing.T) {
req, err := http.NewRequest("", "", bytes.NewReader(payload)) req, err := http.NewRequest("", "", bytes.NewReader(payload))
require.NoError(t, err) require.NoError(t, err)
appendable := &mockAppendable{latestSample: 100} appendable := &mockAppendable{latestSample: map[uint64]int64{labels.FromStrings("__name__", "test_metric").Hash(): 100}}
handler := NewWriteHandler(log.NewNopLogger(), nil, appendable, []config.RemoteWriteProtoMsg{config.RemoteWriteProtoMsgV1}) handler := NewWriteHandler(log.NewNopLogger(), nil, appendable, []config.RemoteWriteProtoMsg{config.RemoteWriteProtoMsgV1})
recorder := httptest.NewRecorder() recorder := httptest.NewRecorder()
@ -384,49 +546,10 @@ func TestOutOfOrderSample_V1Message(t *testing.T) {
} }
} }
func TestOutOfOrderSample_V2Message(t *testing.T) {
for _, tc := range []struct {
Name string
Timestamp int64
}{
{
Name: "historic",
Timestamp: 0,
},
{
Name: "future",
Timestamp: math.MaxInt64,
},
} {
t.Run(tc.Name, func(t *testing.T) {
payload, _, _, err := buildV2WriteRequest(nil, []writev2.TimeSeries{{
LabelsRefs: []uint32{1, 2},
Samples: []writev2.Sample{{Value: 1, Timestamp: tc.Timestamp}},
}}, []string{"", "__name__", "metric1"}, nil, nil, nil, "snappy")
require.NoError(t, err)
req, err := http.NewRequest("", "", bytes.NewReader(payload))
require.NoError(t, err)
req.Header.Set("Content-Type", remoteWriteContentTypeHeaders[config.RemoteWriteProtoMsgV2])
req.Header.Set("Content-Encoding", string(SnappyBlockCompression))
req.Header.Set(RemoteWriteVersionHeader, RemoteWriteVersion20HeaderValue)
appendable := &mockAppendable{latestSample: 100}
handler := NewWriteHandler(log.NewNopLogger(), nil, appendable, []config.RemoteWriteProtoMsg{config.RemoteWriteProtoMsgV2})
recorder := httptest.NewRecorder()
handler.ServeHTTP(recorder, req)
resp := recorder.Result()
require.Equal(t, http.StatusBadRequest, resp.StatusCode)
})
}
}
// This test case currently aims to verify that the WriteHandler endpoint // This test case currently aims to verify that the WriteHandler endpoint
// don't fail on exemplar ingestion errors since the exemplar storage is // don't fail on exemplar ingestion errors since the exemplar storage is
// still experimental. // still experimental.
// NOTE: V2 Message is tested in TestRemoteWriteHandler_V2Message.
func TestOutOfOrderExemplar_V1Message(t *testing.T) { func TestOutOfOrderExemplar_V1Message(t *testing.T) {
tests := []struct { tests := []struct {
Name string Name string
@ -453,7 +576,7 @@ func TestOutOfOrderExemplar_V1Message(t *testing.T) {
req, err := http.NewRequest("", "", bytes.NewReader(payload)) req, err := http.NewRequest("", "", bytes.NewReader(payload))
require.NoError(t, err) require.NoError(t, err)
appendable := &mockAppendable{latestExemplar: 100} appendable := &mockAppendable{latestSample: map[uint64]int64{labels.FromStrings("__name__", "test_metric").Hash(): 100}}
handler := NewWriteHandler(log.NewNopLogger(), nil, appendable, []config.RemoteWriteProtoMsg{config.RemoteWriteProtoMsgV1}) handler := NewWriteHandler(log.NewNopLogger(), nil, appendable, []config.RemoteWriteProtoMsg{config.RemoteWriteProtoMsgV1})
recorder := httptest.NewRecorder() recorder := httptest.NewRecorder()
@ -466,49 +589,7 @@ func TestOutOfOrderExemplar_V1Message(t *testing.T) {
} }
} }
func TestOutOfOrderExemplar_V2Message(t *testing.T) { // NOTE: V2 Message is tested in TestRemoteWriteHandler_V2Message.
tests := []struct {
Name string
Timestamp int64
}{
{
Name: "historic",
Timestamp: 0,
},
{
Name: "future",
Timestamp: math.MaxInt64,
},
}
for _, tc := range tests {
t.Run(tc.Name, func(t *testing.T) {
payload, _, _, err := buildV2WriteRequest(nil, []writev2.TimeSeries{{
LabelsRefs: []uint32{1, 2},
Exemplars: []writev2.Exemplar{{LabelsRefs: []uint32{3, 4}, Value: 1, Timestamp: tc.Timestamp}},
}}, []string{"", "__name__", "metric1", "foo", "bar"}, nil, nil, nil, "snappy")
require.NoError(t, err)
req, err := http.NewRequest("", "", bytes.NewReader(payload))
require.NoError(t, err)
req.Header.Set("Content-Type", remoteWriteContentTypeHeaders[config.RemoteWriteProtoMsgV2])
req.Header.Set("Content-Encoding", string(SnappyBlockCompression))
req.Header.Set(RemoteWriteVersionHeader, RemoteWriteVersion20HeaderValue)
appendable := &mockAppendable{latestExemplar: 100}
handler := NewWriteHandler(log.NewNopLogger(), nil, appendable, []config.RemoteWriteProtoMsg{config.RemoteWriteProtoMsgV2})
recorder := httptest.NewRecorder()
handler.ServeHTTP(recorder, req)
resp := recorder.Result()
// TODO: update to require.Equal(t, http.StatusConflict, resp.StatusCode) once exemplar storage is not experimental.
require.Equal(t, http.StatusNoContent, resp.StatusCode)
})
}
}
func TestOutOfOrderHistogram_V1Message(t *testing.T) { func TestOutOfOrderHistogram_V1Message(t *testing.T) {
for _, tc := range []struct { for _, tc := range []struct {
Name string Name string
@ -533,7 +614,7 @@ func TestOutOfOrderHistogram_V1Message(t *testing.T) {
req, err := http.NewRequest("", "", bytes.NewReader(payload)) req, err := http.NewRequest("", "", bytes.NewReader(payload))
require.NoError(t, err) require.NoError(t, err)
appendable := &mockAppendable{latestHistogram: 100} appendable := &mockAppendable{latestSample: map[uint64]int64{labels.FromStrings("__name__", "test_metric").Hash(): 100}}
handler := NewWriteHandler(log.NewNopLogger(), nil, appendable, []config.RemoteWriteProtoMsg{config.RemoteWriteProtoMsgV1}) handler := NewWriteHandler(log.NewNopLogger(), nil, appendable, []config.RemoteWriteProtoMsg{config.RemoteWriteProtoMsgV1})
recorder := httptest.NewRecorder() recorder := httptest.NewRecorder()
@ -545,46 +626,6 @@ func TestOutOfOrderHistogram_V1Message(t *testing.T) {
} }
} }
func TestOutOfOrderHistogram_V2Message(t *testing.T) {
for _, tc := range []struct {
Name string
Timestamp int64
}{
{
Name: "historic",
Timestamp: 0,
},
{
Name: "future",
Timestamp: math.MaxInt64,
},
} {
t.Run(tc.Name, func(t *testing.T) {
payload, _, _, err := buildV2WriteRequest(nil, []writev2.TimeSeries{{
LabelsRefs: []uint32{0, 1},
Histograms: []writev2.Histogram{writev2.FromIntHistogram(0, &testHistogram), writev2.FromFloatHistogram(1, testHistogram.ToFloat(nil))},
}}, []string{"__name__", "metric1"}, nil, nil, nil, "snappy")
require.NoError(t, err)
req, err := http.NewRequest("", "", bytes.NewReader(payload))
require.NoError(t, err)
req.Header.Set("Content-Type", remoteWriteContentTypeHeaders[config.RemoteWriteProtoMsgV2])
req.Header.Set("Content-Encoding", string(SnappyBlockCompression))
req.Header.Set(RemoteWriteVersionHeader, RemoteWriteVersion20HeaderValue)
appendable := &mockAppendable{latestHistogram: 100}
handler := NewWriteHandler(log.NewNopLogger(), nil, appendable, []config.RemoteWriteProtoMsg{config.RemoteWriteProtoMsgV2})
recorder := httptest.NewRecorder()
handler.ServeHTTP(recorder, req)
resp := recorder.Result()
require.Equal(t, http.StatusBadRequest, resp.StatusCode)
})
}
}
func BenchmarkRemoteWriteHandler(b *testing.B) { func BenchmarkRemoteWriteHandler(b *testing.B) {
const labelValue = "abcdefg'hijlmn234!@#$%^&*()_+~`\"{}[],./<>?hello0123hiOlá你好Dzieńdobry9Zd8ra765v4stvuyte" const labelValue = "abcdefg'hijlmn234!@#$%^&*()_+~`\"{}[],./<>?hello0123hiOlá你好Dzieńdobry9Zd8ra765v4stvuyte"
var reqs []*http.Request var reqs []*http.Request
@ -719,15 +760,20 @@ func genSeriesWithSample(numSeries int, ts int64) []prompb.TimeSeries {
} }
type mockAppendable struct { type mockAppendable struct {
latestSample int64 latestSample map[uint64]int64
samples []mockSample samples []mockSample
latestExemplar int64 latestExemplar map[uint64]int64
exemplars []mockExemplar exemplars []mockExemplar
latestHistogram int64 latestHistogram map[uint64]int64
histograms []mockHistogram histograms []mockHistogram
metadata []mockMetadata metadata []mockMetadata
commitErr error // optional errors to inject.
commitErr error
appendSampleErr error
appendHistogramErr error
appendExemplarErr error
updateMetadataErr error
} }
type mockSample struct { type mockSample struct {
@ -765,48 +811,92 @@ func requireEqual(t *testing.T, expected, actual interface{}, msgAndArgs ...inte
} }
func (m *mockAppendable) Appender(_ context.Context) storage.Appender { func (m *mockAppendable) Appender(_ context.Context) storage.Appender {
if m.latestSample == nil {
m.latestSample = map[uint64]int64{}
}
if m.latestHistogram == nil {
m.latestHistogram = map[uint64]int64{}
}
if m.latestExemplar == nil {
m.latestExemplar = map[uint64]int64{}
}
return m return m
} }
func (m *mockAppendable) Append(_ storage.SeriesRef, l labels.Labels, t int64, v float64) (storage.SeriesRef, error) { func (m *mockAppendable) Append(_ storage.SeriesRef, l labels.Labels, t int64, v float64) (storage.SeriesRef, error) {
if t < m.latestSample { if m.appendSampleErr != nil {
return 0, storage.ErrOutOfOrderSample return 0, m.appendSampleErr
} }
m.latestSample = t latestTs := m.latestSample[l.Hash()]
if t < latestTs {
return 0, storage.ErrOutOfOrderSample
}
if t == latestTs {
return 0, storage.ErrDuplicateSampleForTimestamp
}
m.latestSample[l.Hash()] = t
m.samples = append(m.samples, mockSample{l, t, v}) m.samples = append(m.samples, mockSample{l, t, v})
return 0, nil return 0, nil
} }
func (m *mockAppendable) Commit() error { func (m *mockAppendable) Commit() error {
if m.commitErr != nil {
_ = m.Rollback() // As per Commit method contract.
}
return m.commitErr return m.commitErr
} }
func (*mockAppendable) Rollback() error { func (m *mockAppendable) Rollback() error {
return fmt.Errorf("not implemented") m.samples = m.samples[:0]
m.exemplars = m.exemplars[:0]
m.histograms = m.histograms[:0]
m.metadata = m.metadata[:0]
return nil
} }
func (m *mockAppendable) AppendExemplar(_ storage.SeriesRef, l labels.Labels, e exemplar.Exemplar) (storage.SeriesRef, error) { func (m *mockAppendable) AppendExemplar(_ storage.SeriesRef, l labels.Labels, e exemplar.Exemplar) (storage.SeriesRef, error) {
if e.Ts < m.latestExemplar { if m.appendExemplarErr != nil {
return 0, storage.ErrOutOfOrderExemplar return 0, m.appendExemplarErr
} }
m.latestExemplar = e.Ts latestTs := m.latestExemplar[l.Hash()]
if e.Ts < latestTs {
return 0, storage.ErrOutOfOrderExemplar
}
if e.Ts == latestTs {
return 0, storage.ErrDuplicateExemplar
}
m.latestExemplar[l.Hash()] = e.Ts
m.exemplars = append(m.exemplars, mockExemplar{l, e.Labels, e.Ts, e.Value}) m.exemplars = append(m.exemplars, mockExemplar{l, e.Labels, e.Ts, e.Value})
return 0, nil return 0, nil
} }
func (m *mockAppendable) AppendHistogram(_ storage.SeriesRef, l labels.Labels, t int64, h *histogram.Histogram, fh *histogram.FloatHistogram) (storage.SeriesRef, error) { func (m *mockAppendable) AppendHistogram(_ storage.SeriesRef, l labels.Labels, t int64, h *histogram.Histogram, fh *histogram.FloatHistogram) (storage.SeriesRef, error) {
if t < m.latestHistogram { if m.appendHistogramErr != nil {
return 0, storage.ErrOutOfOrderSample return 0, m.appendHistogramErr
} }
m.latestHistogram = t latestTs := m.latestHistogram[l.Hash()]
if t < latestTs {
return 0, storage.ErrOutOfOrderSample
}
if t == latestTs {
return 0, storage.ErrDuplicateSampleForTimestamp
}
m.latestHistogram[l.Hash()] = t
m.histograms = append(m.histograms, mockHistogram{l, t, h, fh}) m.histograms = append(m.histograms, mockHistogram{l, t, h, fh})
return 0, nil return 0, nil
} }
func (m *mockAppendable) UpdateMetadata(_ storage.SeriesRef, l labels.Labels, mp metadata.Metadata) (storage.SeriesRef, error) { func (m *mockAppendable) UpdateMetadata(_ storage.SeriesRef, l labels.Labels, mp metadata.Metadata) (storage.SeriesRef, error) {
if m.updateMetadataErr != nil {
return 0, m.updateMetadataErr
}
m.metadata = append(m.metadata, mockMetadata{l: l, m: mp}) m.metadata = append(m.metadata, mockMetadata{l: l, m: mp})
return 0, nil return 0, nil
} }

View file

@ -369,7 +369,7 @@ func TestWriteStorageApplyConfig_PartialUpdate(t *testing.T) {
} }
func TestOTLPWriteHandler(t *testing.T) { func TestOTLPWriteHandler(t *testing.T) {
exportRequest := generateOTLPWriteRequest(t) exportRequest := generateOTLPWriteRequest()
buf, err := exportRequest.MarshalProto() buf, err := exportRequest.MarshalProto()
require.NoError(t, err) require.NoError(t, err)
@ -379,7 +379,11 @@ func TestOTLPWriteHandler(t *testing.T) {
req.Header.Set("Content-Type", "application/x-protobuf") req.Header.Set("Content-Type", "application/x-protobuf")
appendable := &mockAppendable{} appendable := &mockAppendable{}
handler := NewOTLPWriteHandler(nil, appendable) handler := NewOTLPWriteHandler(nil, appendable, func() config.Config {
return config.Config{
OTLPConfig: config.DefaultOTLPConfig,
}
})
recorder := httptest.NewRecorder() recorder := httptest.NewRecorder()
handler.ServeHTTP(recorder, req) handler.ServeHTTP(recorder, req)
@ -392,7 +396,7 @@ func TestOTLPWriteHandler(t *testing.T) {
require.Len(t, appendable.exemplars, 1) // 1 (exemplar) require.Len(t, appendable.exemplars, 1) // 1 (exemplar)
} }
func generateOTLPWriteRequest(t *testing.T) pmetricotlp.ExportRequest { func generateOTLPWriteRequest() pmetricotlp.ExportRequest {
d := pmetric.NewMetrics() d := pmetric.NewMetrics()
// Generate One Counter, One Gauge, One Histogram, One Exponential-Histogram // Generate One Counter, One Gauge, One Histogram, One Exponential-Histogram
@ -422,6 +426,7 @@ func generateOTLPWriteRequest(t *testing.T) pmetricotlp.ExportRequest {
counterDataPoint.Attributes().PutStr("foo.bar", "baz") counterDataPoint.Attributes().PutStr("foo.bar", "baz")
counterExemplar := counterDataPoint.Exemplars().AppendEmpty() counterExemplar := counterDataPoint.Exemplars().AppendEmpty()
counterExemplar.SetTimestamp(pcommon.NewTimestampFromTime(timestamp)) counterExemplar.SetTimestamp(pcommon.NewTimestampFromTime(timestamp))
counterExemplar.SetDoubleValue(10.0) counterExemplar.SetDoubleValue(10.0)
counterExemplar.SetSpanID(pcommon.SpanID{0, 1, 2, 3, 4, 5, 6, 7}) counterExemplar.SetSpanID(pcommon.SpanID{0, 1, 2, 3, 4, 5, 6, 7})

View file

@ -49,16 +49,16 @@ func newSecondaryQuerierFromChunk(cq ChunkQuerier) genericQuerier {
return &secondaryQuerier{genericQuerier: newGenericQuerierFromChunk(cq)} return &secondaryQuerier{genericQuerier: newGenericQuerierFromChunk(cq)}
} }
func (s *secondaryQuerier) LabelValues(ctx context.Context, name string, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) { func (s *secondaryQuerier) LabelValues(ctx context.Context, name string, hints *LabelHints, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) {
vals, w, err := s.genericQuerier.LabelValues(ctx, name, matchers...) vals, w, err := s.genericQuerier.LabelValues(ctx, name, hints, matchers...)
if err != nil { if err != nil {
return nil, w.Add(err), nil return nil, w.Add(err), nil
} }
return vals, w, nil return vals, w, nil
} }
func (s *secondaryQuerier) LabelNames(ctx context.Context, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) { func (s *secondaryQuerier) LabelNames(ctx context.Context, hints *LabelHints, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) {
names, w, err := s.genericQuerier.LabelNames(ctx, matchers...) names, w, err := s.genericQuerier.LabelNames(ctx, hints, matchers...)
if err != nil { if err != nil {
return nil, w.Add(err), nil return nil, w.Add(err), nil
} }

View file

@ -30,7 +30,7 @@ import (
"go.opentelemetry.io/otel/propagation" "go.opentelemetry.io/otel/propagation"
"go.opentelemetry.io/otel/sdk/resource" "go.opentelemetry.io/otel/sdk/resource"
tracesdk "go.opentelemetry.io/otel/sdk/trace" tracesdk "go.opentelemetry.io/otel/sdk/trace"
semconv "go.opentelemetry.io/otel/semconv/v1.25.0" semconv "go.opentelemetry.io/otel/semconv/v1.26.0"
"go.opentelemetry.io/otel/trace" "go.opentelemetry.io/otel/trace"
"go.opentelemetry.io/otel/trace/noop" "go.opentelemetry.io/otel/trace/noop"
"google.golang.org/grpc/credentials" "google.golang.org/grpc/credentials"

View file

@ -63,7 +63,10 @@ func TestMain(m *testing.M) {
flag.Parse() flag.Parse()
defaultIsolationDisabled = !isolationEnabled defaultIsolationDisabled = !isolationEnabled
goleak.VerifyTestMain(m, goleak.IgnoreTopFunction("github.com/prometheus/prometheus/tsdb.(*SegmentWAL).cut.func1"), goleak.IgnoreTopFunction("github.com/prometheus/prometheus/tsdb.(*SegmentWAL).cut.func2")) goleak.VerifyTestMain(m,
goleak.IgnoreTopFunction("github.com/prometheus/prometheus/tsdb.(*SegmentWAL).cut.func1"),
goleak.IgnoreTopFunction("github.com/prometheus/prometheus/tsdb.(*SegmentWAL).cut.func2"),
goleak.IgnoreTopFunction("go.opencensus.io/stats/view.(*worker).start"))
} }
func openTestDB(t testing.TB, opts *Options, rngs []int64) (db *DB) { func openTestDB(t testing.TB, opts *Options, rngs []int64) (db *DB) {
@ -1001,7 +1004,7 @@ func TestWALFlushedOnDBClose(t *testing.T) {
q, err := db.Querier(0, 1) q, err := db.Querier(0, 1)
require.NoError(t, err) require.NoError(t, err)
values, ws, err := q.LabelValues(ctx, "labelname") values, ws, err := q.LabelValues(ctx, "labelname", nil)
require.NoError(t, err) require.NoError(t, err)
require.Empty(t, ws) require.Empty(t, ws)
require.Equal(t, []string{"labelvalue"}, values) require.Equal(t, []string{"labelvalue"}, values)
@ -1976,7 +1979,7 @@ func TestQuerierWithBoundaryChunks(t *testing.T) {
defer q.Close() defer q.Close()
// The requested interval covers 2 blocks, so the querier's label values for blockID should give us 2 values, one from each block. // The requested interval covers 2 blocks, so the querier's label values for blockID should give us 2 values, one from each block.
b, ws, err := q.LabelValues(ctx, "blockID") b, ws, err := q.LabelValues(ctx, "blockID", nil)
require.NoError(t, err) require.NoError(t, err)
var nilAnnotations annotations.Annotations var nilAnnotations annotations.Annotations
require.Equal(t, nilAnnotations, ws) require.Equal(t, nilAnnotations, ws)
@ -2288,7 +2291,7 @@ func TestDB_LabelNames(t *testing.T) {
q, err := db.Querier(math.MinInt64, math.MaxInt64) q, err := db.Querier(math.MinInt64, math.MaxInt64)
require.NoError(t, err) require.NoError(t, err)
var ws annotations.Annotations var ws annotations.Annotations
labelNames, ws, err = q.LabelNames(ctx) labelNames, ws, err = q.LabelNames(ctx, nil)
require.NoError(t, err) require.NoError(t, err)
require.Empty(t, ws) require.Empty(t, ws)
require.NoError(t, q.Close()) require.NoError(t, q.Close())

View file

@ -178,6 +178,7 @@ type HeadOptions struct {
WALReplayConcurrency int WALReplayConcurrency int
// EnableSharding enables ShardedPostings() support in the Head. // EnableSharding enables ShardedPostings() support in the Head.
// EnableSharding is temporarily disabled during Init().
EnableSharding bool EnableSharding bool
} }
@ -609,7 +610,7 @@ const cardinalityCacheExpirationTime = time.Duration(30) * time.Second
// Init loads data from the write ahead log and prepares the head for writes. // Init loads data from the write ahead log and prepares the head for writes.
// It should be called before using an appender so that it // It should be called before using an appender so that it
// limits the ingested samples to the head min valid time. // limits the ingested samples to the head min valid time.
func (h *Head) Init(minValidTime int64) error { func (h *Head) Init(minValidTime int64) (err error) {
h.minValidTime.Store(minValidTime) h.minValidTime.Store(minValidTime)
defer func() { defer func() {
h.postings.EnsureOrder(h.opts.WALReplayConcurrency) h.postings.EnsureOrder(h.opts.WALReplayConcurrency)
@ -623,6 +624,24 @@ func (h *Head) Init(minValidTime int64) error {
} }
}() }()
// If sharding is enabled, disable it while initializing, and calculate the shards later.
// We're going to use that field for other purposes during WAL replay,
// so we don't want to waste time on calculating the shard that we're going to lose anyway.
if h.opts.EnableSharding {
h.opts.EnableSharding = false
defer func() {
h.opts.EnableSharding = true
if err == nil {
// No locking is needed here as nobody should be writing while we're in Init.
for _, stripe := range h.series.series {
for _, s := range stripe {
s.shardHashOrMemoryMappedMaxTime = labels.StableHash(s.lset)
}
}
}
}()
}
level.Info(h.logger).Log("msg", "Replaying on-disk memory mappable chunks if any") level.Info(h.logger).Log("msg", "Replaying on-disk memory mappable chunks if any")
start := time.Now() start := time.Now()
@ -683,7 +702,6 @@ func (h *Head) Init(minValidTime int64) error {
mmappedChunks map[chunks.HeadSeriesRef][]*mmappedChunk mmappedChunks map[chunks.HeadSeriesRef][]*mmappedChunk
oooMmappedChunks map[chunks.HeadSeriesRef][]*mmappedChunk oooMmappedChunks map[chunks.HeadSeriesRef][]*mmappedChunk
lastMmapRef chunks.ChunkDiskMapperRef lastMmapRef chunks.ChunkDiskMapperRef
err error
mmapChunkReplayDuration time.Duration mmapChunkReplayDuration time.Duration
) )
@ -2068,9 +2086,11 @@ type memSeries struct {
ref chunks.HeadSeriesRef ref chunks.HeadSeriesRef
meta *metadata.Metadata meta *metadata.Metadata
// Series labels hash to use for sharding purposes. The value is always 0 when sharding has not // Series labels hash to use for sharding purposes.
// been explicitly enabled in TSDB. // The value is always 0 when sharding has not been explicitly enabled in TSDB.
shardHash uint64 // While the WAL replay the value stored here is the max time of any mmapped chunk,
// and the shard hash is re-calculated after WAL replay is complete.
shardHashOrMemoryMappedMaxTime uint64
// Everything after here should only be accessed with the lock held. // Everything after here should only be accessed with the lock held.
sync.Mutex sync.Mutex
@ -2095,10 +2115,9 @@ type memSeries struct {
ooo *memSeriesOOOFields ooo *memSeriesOOOFields
mmMaxTime int64 // Max time of any mmapped chunk, only used during WAL replay.
nextAt int64 // Timestamp at which to cut the next chunk. nextAt int64 // Timestamp at which to cut the next chunk.
histogramChunkHasComputedEndTime bool // True if nextAt has been predicted for the current histograms chunk; false otherwise. histogramChunkHasComputedEndTime bool // True if nextAt has been predicted for the current histograms chunk; false otherwise.
pendingCommit bool // Whether there are samples waiting to be committed to this series.
// We keep the last value here (in addition to appending it to the chunk) so we can check for duplicates. // We keep the last value here (in addition to appending it to the chunk) so we can check for duplicates.
lastValue float64 lastValue float64
@ -2114,8 +2133,6 @@ type memSeries struct {
// txs is nil if isolation is disabled. // txs is nil if isolation is disabled.
txs *txRing txs *txRing
pendingCommit bool // Whether there are samples waiting to be committed to this series.
} }
// memSeriesOOOFields contains the fields required by memSeries // memSeriesOOOFields contains the fields required by memSeries
@ -2128,10 +2145,10 @@ type memSeriesOOOFields struct {
func newMemSeries(lset labels.Labels, id chunks.HeadSeriesRef, shardHash uint64, isolationDisabled bool) *memSeries { func newMemSeries(lset labels.Labels, id chunks.HeadSeriesRef, shardHash uint64, isolationDisabled bool) *memSeries {
s := &memSeries{ s := &memSeries{
lset: lset, lset: lset,
ref: id, ref: id,
nextAt: math.MinInt64, nextAt: math.MinInt64,
shardHash: shardHash, shardHashOrMemoryMappedMaxTime: shardHash,
} }
if !isolationDisabled { if !isolationDisabled {
s.txs = newTxRing(0) s.txs = newTxRing(0)
@ -2219,6 +2236,12 @@ func (s *memSeries) truncateChunksBefore(mint int64, minOOOMmapRef chunks.ChunkD
return removedInOrder + removedOOO return removedInOrder + removedOOO
} }
// shardHash returns the shard hash of the series, only available after WAL replay.
func (s *memSeries) shardHash() uint64 { return s.shardHashOrMemoryMappedMaxTime }
// mmMaxTime returns the max time of any mmapped chunk in the series, only available during WAL replay.
func (s *memSeries) mmMaxTime() int64 { return int64(s.shardHashOrMemoryMappedMaxTime) }
// cleanupAppendIDsBelow cleans up older appendIDs. Has to be called after // cleanupAppendIDsBelow cleans up older appendIDs. Has to be called after
// acquiring lock. // acquiring lock.
func (s *memSeries) cleanupAppendIDsBelow(bound uint64) { func (s *memSeries) cleanupAppendIDsBelow(bound uint64) {

View file

@ -846,16 +846,17 @@ func (a *headAppender) Commit() (err error) {
// number of samples rejected due to: out of bounds: with t < minValidTime (OOO support disabled) // number of samples rejected due to: out of bounds: with t < minValidTime (OOO support disabled)
floatOOBRejected int floatOOBRejected int
inOrderMint int64 = math.MaxInt64 inOrderMint int64 = math.MaxInt64
inOrderMaxt int64 = math.MinInt64 inOrderMaxt int64 = math.MinInt64
ooomint int64 = math.MaxInt64 oooMinT int64 = math.MaxInt64
ooomaxt int64 = math.MinInt64 oooMaxT int64 = math.MinInt64
wblSamples []record.RefSample wblSamples []record.RefSample
oooMmapMarkers map[chunks.HeadSeriesRef]chunks.ChunkDiskMapperRef oooMmapMarkers map[chunks.HeadSeriesRef][]chunks.ChunkDiskMapperRef
oooRecords [][]byte oooMmapMarkersCount int
oooCapMax = a.head.opts.OutOfOrderCapMax.Load() oooRecords [][]byte
series *memSeries oooCapMax = a.head.opts.OutOfOrderCapMax.Load()
appendChunkOpts = chunkOpts{ series *memSeries
appendChunkOpts = chunkOpts{
chunkDiskMapper: a.head.chunkDiskMapper, chunkDiskMapper: a.head.chunkDiskMapper,
chunkRange: a.head.chunkRange.Load(), chunkRange: a.head.chunkRange.Load(),
samplesPerChunk: a.head.opts.SamplesPerChunk, samplesPerChunk: a.head.opts.SamplesPerChunk,
@ -872,6 +873,7 @@ func (a *headAppender) Commit() (err error) {
// WBL is not enabled. So no need to collect. // WBL is not enabled. So no need to collect.
wblSamples = nil wblSamples = nil
oooMmapMarkers = nil oooMmapMarkers = nil
oooMmapMarkersCount = 0
return return
} }
// The m-map happens before adding a new sample. So we collect // The m-map happens before adding a new sample. So we collect
@ -880,12 +882,14 @@ func (a *headAppender) Commit() (err error) {
// WBL Before this Commit(): [old samples before this commit for chunk 1] // WBL Before this Commit(): [old samples before this commit for chunk 1]
// WBL After this Commit(): [old samples before this commit for chunk 1][new samples in this commit for chunk 1]mmapmarker1[samples for chunk 2]mmapmarker2[samples for chunk 3] // WBL After this Commit(): [old samples before this commit for chunk 1][new samples in this commit for chunk 1]mmapmarker1[samples for chunk 2]mmapmarker2[samples for chunk 3]
if oooMmapMarkers != nil { if oooMmapMarkers != nil {
markers := make([]record.RefMmapMarker, 0, len(oooMmapMarkers)) markers := make([]record.RefMmapMarker, 0, oooMmapMarkersCount)
for ref, mmapRef := range oooMmapMarkers { for ref, mmapRefs := range oooMmapMarkers {
markers = append(markers, record.RefMmapMarker{ for _, mmapRef := range mmapRefs {
Ref: ref, markers = append(markers, record.RefMmapMarker{
MmapRef: mmapRef, Ref: ref,
}) MmapRef: mmapRef,
})
}
} }
r := enc.MmapMarkers(markers, a.head.getBytesBuffer()) r := enc.MmapMarkers(markers, a.head.getBytesBuffer())
oooRecords = append(oooRecords, r) oooRecords = append(oooRecords, r)
@ -928,32 +932,39 @@ func (a *headAppender) Commit() (err error) {
case oooSample: case oooSample:
// Sample is OOO and OOO handling is enabled // Sample is OOO and OOO handling is enabled
// and the delta is within the OOO tolerance. // and the delta is within the OOO tolerance.
var mmapRef chunks.ChunkDiskMapperRef var mmapRefs []chunks.ChunkDiskMapperRef
ok, chunkCreated, mmapRef = series.insert(s.T, s.V, a.head.chunkDiskMapper, oooCapMax) ok, chunkCreated, mmapRefs = series.insert(s.T, s.V, a.head.chunkDiskMapper, oooCapMax)
if chunkCreated { if chunkCreated {
r, ok := oooMmapMarkers[series.ref] r, ok := oooMmapMarkers[series.ref]
if !ok || r != 0 { if !ok || r != nil {
// !ok means there are no markers collected for these samples yet. So we first flush the samples // !ok means there are no markers collected for these samples yet. So we first flush the samples
// before setting this m-map marker. // before setting this m-map marker.
// r != 0 means we have already m-mapped a chunk for this series in the same Commit(). // r != nil means we have already m-mapped a chunk for this series in the same Commit().
// Hence, before we m-map again, we should add the samples and m-map markers // Hence, before we m-map again, we should add the samples and m-map markers
// seen till now to the WBL records. // seen till now to the WBL records.
collectOOORecords() collectOOORecords()
} }
if oooMmapMarkers == nil { if oooMmapMarkers == nil {
oooMmapMarkers = make(map[chunks.HeadSeriesRef]chunks.ChunkDiskMapperRef) oooMmapMarkers = make(map[chunks.HeadSeriesRef][]chunks.ChunkDiskMapperRef)
}
if len(mmapRefs) > 0 {
oooMmapMarkers[series.ref] = mmapRefs
oooMmapMarkersCount += len(mmapRefs)
} else {
// No chunk was written to disk, so we need to set an initial marker for this series.
oooMmapMarkers[series.ref] = []chunks.ChunkDiskMapperRef{0}
oooMmapMarkersCount++
} }
oooMmapMarkers[series.ref] = mmapRef
} }
if ok { if ok {
wblSamples = append(wblSamples, s) wblSamples = append(wblSamples, s)
if s.T < ooomint { if s.T < oooMinT {
ooomint = s.T oooMinT = s.T
} }
if s.T > ooomaxt { if s.T > oooMaxT {
ooomaxt = s.T oooMaxT = s.T
} }
floatOOOAccepted++ floatOOOAccepted++
} else { } else {
@ -1053,7 +1064,7 @@ func (a *headAppender) Commit() (err error) {
a.head.metrics.samplesAppended.WithLabelValues(sampleMetricTypeHistogram).Add(float64(histogramsAppended)) a.head.metrics.samplesAppended.WithLabelValues(sampleMetricTypeHistogram).Add(float64(histogramsAppended))
a.head.metrics.outOfOrderSamplesAppended.WithLabelValues(sampleMetricTypeFloat).Add(float64(floatOOOAccepted)) a.head.metrics.outOfOrderSamplesAppended.WithLabelValues(sampleMetricTypeFloat).Add(float64(floatOOOAccepted))
a.head.updateMinMaxTime(inOrderMint, inOrderMaxt) a.head.updateMinMaxTime(inOrderMint, inOrderMaxt)
a.head.updateMinOOOMaxOOOTime(ooomint, ooomaxt) a.head.updateMinOOOMaxOOOTime(oooMinT, oooMaxT)
collectOOORecords() collectOOORecords()
if a.head.wbl != nil { if a.head.wbl != nil {
@ -1069,14 +1080,14 @@ func (a *headAppender) Commit() (err error) {
} }
// insert is like append, except it inserts. Used for OOO samples. // insert is like append, except it inserts. Used for OOO samples.
func (s *memSeries) insert(t int64, v float64, chunkDiskMapper *chunks.ChunkDiskMapper, oooCapMax int64) (inserted, chunkCreated bool, mmapRef chunks.ChunkDiskMapperRef) { func (s *memSeries) insert(t int64, v float64, chunkDiskMapper *chunks.ChunkDiskMapper, oooCapMax int64) (inserted, chunkCreated bool, mmapRefs []chunks.ChunkDiskMapperRef) {
if s.ooo == nil { if s.ooo == nil {
s.ooo = &memSeriesOOOFields{} s.ooo = &memSeriesOOOFields{}
} }
c := s.ooo.oooHeadChunk c := s.ooo.oooHeadChunk
if c == nil || c.chunk.NumSamples() == int(oooCapMax) { if c == nil || c.chunk.NumSamples() == int(oooCapMax) {
// Note: If no new samples come in then we rely on compaction to clean up stale in-memory OOO chunks. // Note: If no new samples come in then we rely on compaction to clean up stale in-memory OOO chunks.
c, mmapRef = s.cutNewOOOHeadChunk(t, chunkDiskMapper) c, mmapRefs = s.cutNewOOOHeadChunk(t, chunkDiskMapper)
chunkCreated = true chunkCreated = true
} }
@ -1089,7 +1100,7 @@ func (s *memSeries) insert(t int64, v float64, chunkDiskMapper *chunks.ChunkDisk
c.maxTime = t c.maxTime = t
} }
} }
return ok, chunkCreated, mmapRef return ok, chunkCreated, mmapRefs
} }
// chunkOpts are chunk-level options that are passed when appending to a memSeries. // chunkOpts are chunk-level options that are passed when appending to a memSeries.
@ -1431,7 +1442,7 @@ func (s *memSeries) cutNewHeadChunk(mint int64, e chunkenc.Encoding, chunkRange
// cutNewOOOHeadChunk cuts a new OOO chunk and m-maps the old chunk. // cutNewOOOHeadChunk cuts a new OOO chunk and m-maps the old chunk.
// The caller must ensure that s.ooo is not nil. // The caller must ensure that s.ooo is not nil.
func (s *memSeries) cutNewOOOHeadChunk(mint int64, chunkDiskMapper *chunks.ChunkDiskMapper) (*oooHeadChunk, chunks.ChunkDiskMapperRef) { func (s *memSeries) cutNewOOOHeadChunk(mint int64, chunkDiskMapper *chunks.ChunkDiskMapper) (*oooHeadChunk, []chunks.ChunkDiskMapperRef) {
ref := s.mmapCurrentOOOHeadChunk(chunkDiskMapper) ref := s.mmapCurrentOOOHeadChunk(chunkDiskMapper)
s.ooo.oooHeadChunk = &oooHeadChunk{ s.ooo.oooHeadChunk = &oooHeadChunk{
@ -1443,21 +1454,29 @@ func (s *memSeries) cutNewOOOHeadChunk(mint int64, chunkDiskMapper *chunks.Chunk
return s.ooo.oooHeadChunk, ref return s.ooo.oooHeadChunk, ref
} }
func (s *memSeries) mmapCurrentOOOHeadChunk(chunkDiskMapper *chunks.ChunkDiskMapper) chunks.ChunkDiskMapperRef { func (s *memSeries) mmapCurrentOOOHeadChunk(chunkDiskMapper *chunks.ChunkDiskMapper) []chunks.ChunkDiskMapperRef {
if s.ooo == nil || s.ooo.oooHeadChunk == nil { if s.ooo == nil || s.ooo.oooHeadChunk == nil {
// There is no head chunk, so nothing to m-map here. // OOO is not enabled or there is no head chunk, so nothing to m-map here.
return 0 return nil
}
chks, err := s.ooo.oooHeadChunk.chunk.ToEncodedChunks(math.MinInt64, math.MaxInt64)
if err != nil {
handleChunkWriteError(err)
return nil
}
chunkRefs := make([]chunks.ChunkDiskMapperRef, 0, 1)
for _, memchunk := range chks {
chunkRef := chunkDiskMapper.WriteChunk(s.ref, s.ooo.oooHeadChunk.minTime, s.ooo.oooHeadChunk.maxTime, memchunk.chunk, true, handleChunkWriteError)
chunkRefs = append(chunkRefs, chunkRef)
s.ooo.oooMmappedChunks = append(s.ooo.oooMmappedChunks, &mmappedChunk{
ref: chunkRef,
numSamples: uint16(memchunk.chunk.NumSamples()),
minTime: memchunk.minTime,
maxTime: memchunk.maxTime,
})
} }
xor, _ := s.ooo.oooHeadChunk.chunk.ToXOR() // Encode to XorChunk which is more compact and implements all of the needed functionality.
chunkRef := chunkDiskMapper.WriteChunk(s.ref, s.ooo.oooHeadChunk.minTime, s.ooo.oooHeadChunk.maxTime, xor, true, handleChunkWriteError)
s.ooo.oooMmappedChunks = append(s.ooo.oooMmappedChunks, &mmappedChunk{
ref: chunkRef,
numSamples: uint16(xor.NumSamples()),
minTime: s.ooo.oooHeadChunk.minTime,
maxTime: s.ooo.oooHeadChunk.maxTime,
})
s.ooo.oooHeadChunk = nil s.ooo.oooHeadChunk = nil
return chunkRef return chunkRefs
} }
// mmapChunks will m-map all but first chunk on s.headChunks list. // mmapChunks will m-map all but first chunk on s.headChunks list.

View file

@ -170,7 +170,7 @@ func (h *headIndexReader) ShardedPostings(p index.Postings, shardIndex, shardCou
} }
// Check if the series belong to the shard. // Check if the series belong to the shard.
if s.shardHash%shardCount != shardIndex { if s.shardHash()%shardCount != shardIndex {
continue continue
} }

View file

@ -23,6 +23,7 @@ import (
"path" "path"
"path/filepath" "path/filepath"
"reflect" "reflect"
"runtime/pprof"
"sort" "sort"
"strconv" "strconv"
"strings" "strings"
@ -89,6 +90,43 @@ func newTestHeadWithOptions(t testing.TB, compressWAL wlog.CompressionType, opts
return h, wal return h, wal
} }
// BenchmarkLoadRealWLs will be skipped unless the BENCHMARK_LOAD_REAL_WLS_DIR environment variable is set.
// BENCHMARK_LOAD_REAL_WLS_DIR should be the folder where `wal` and `chunks_head` are located.
// Optionally, BENCHMARK_LOAD_REAL_WLS_PROFILE can be set to a file path to write a CPU profile.
func BenchmarkLoadRealWLs(b *testing.B) {
dir := os.Getenv("BENCHMARK_LOAD_REAL_WLS_DIR")
if dir == "" {
b.Skipped()
}
profileFile := os.Getenv("BENCHMARK_LOAD_REAL_WLS_PROFILE")
if profileFile != "" {
b.Logf("Will profile in %s", profileFile)
f, err := os.Create(profileFile)
require.NoError(b, err)
b.Cleanup(func() { f.Close() })
require.NoError(b, pprof.StartCPUProfile(f))
b.Cleanup(pprof.StopCPUProfile)
}
wal, err := wlog.New(nil, nil, filepath.Join(dir, "wal"), wlog.CompressionNone)
require.NoError(b, err)
b.Cleanup(func() { wal.Close() })
wbl, err := wlog.New(nil, nil, filepath.Join(dir, "wbl"), wlog.CompressionNone)
require.NoError(b, err)
b.Cleanup(func() { wbl.Close() })
// Load the WAL.
for i := 0; i < b.N; i++ {
opts := DefaultHeadOptions()
opts.ChunkDirRoot = dir
h, err := NewHead(nil, nil, wal, wbl, opts, nil)
require.NoError(b, err)
h.Init(0)
}
}
func BenchmarkCreateSeries(b *testing.B) { func BenchmarkCreateSeries(b *testing.B) {
series := genSeries(b.N, 10, 0, 0) series := genSeries(b.N, 10, 0, 0)
h, _ := newTestHead(b, 10000, wlog.CompressionNone, false) h, _ := newTestHead(b, 10000, wlog.CompressionNone, false)
@ -4730,6 +4768,14 @@ func TestChunkSnapshotTakenAfterIncompleteSnapshot(t *testing.T) {
// TestWBLReplay checks the replay at a low level. // TestWBLReplay checks the replay at a low level.
func TestWBLReplay(t *testing.T) { func TestWBLReplay(t *testing.T) {
for name, scenario := range sampleTypeScenarios {
t.Run(name, func(t *testing.T) {
testWBLReplay(t, scenario)
})
}
}
func testWBLReplay(t *testing.T, scenario sampleTypeScenario) {
dir := t.TempDir() dir := t.TempDir()
wal, err := wlog.NewSize(nil, nil, filepath.Join(dir, "wal"), 32768, wlog.CompressionSnappy) wal, err := wlog.NewSize(nil, nil, filepath.Join(dir, "wal"), 32768, wlog.CompressionSnappy)
require.NoError(t, err) require.NoError(t, err)
@ -4745,11 +4791,11 @@ func TestWBLReplay(t *testing.T) {
require.NoError(t, err) require.NoError(t, err)
require.NoError(t, h.Init(0)) require.NoError(t, h.Init(0))
var expOOOSamples []sample var expOOOSamples []chunks.Sample
l := labels.FromStrings("foo", "bar") l := labels.FromStrings("foo", "bar")
appendSample := func(mins int64, isOOO bool) { appendSample := func(mins int64, val float64, isOOO bool) {
app := h.Appender(context.Background()) app := h.Appender(context.Background())
ts, v := mins*time.Minute.Milliseconds(), float64(mins) ts, v := mins*time.Minute.Milliseconds(), val
_, err := app.Append(0, l, ts, v) _, err := app.Append(0, l, ts, v)
require.NoError(t, err) require.NoError(t, err)
require.NoError(t, app.Commit()) require.NoError(t, app.Commit())
@ -4760,15 +4806,15 @@ func TestWBLReplay(t *testing.T) {
} }
// In-order sample. // In-order sample.
appendSample(60, false) appendSample(60, 60, false)
// Out of order samples. // Out of order samples.
appendSample(40, true) appendSample(40, 40, true)
appendSample(35, true) appendSample(35, 35, true)
appendSample(50, true) appendSample(50, 50, true)
appendSample(55, true) appendSample(55, 55, true)
appendSample(59, true) appendSample(59, 59, true)
appendSample(31, true) appendSample(31, 31, true)
// Check that Head's time ranges are set properly. // Check that Head's time ranges are set properly.
require.Equal(t, 60*time.Minute.Milliseconds(), h.MinTime()) require.Equal(t, 60*time.Minute.Milliseconds(), h.MinTime())
@ -4792,22 +4838,23 @@ func TestWBLReplay(t *testing.T) {
require.False(t, ok) require.False(t, ok)
require.NotNil(t, ms) require.NotNil(t, ms)
xor, err := ms.ooo.oooHeadChunk.chunk.ToXOR() chks, err := ms.ooo.oooHeadChunk.chunk.ToEncodedChunks(math.MinInt64, math.MaxInt64)
require.NoError(t, err) require.NoError(t, err)
require.Len(t, chks, 1)
it := xor.Iterator(nil) it := chks[0].chunk.Iterator(nil)
actOOOSamples := make([]sample, 0, len(expOOOSamples)) actOOOSamples, err := storage.ExpandSamples(it, nil)
for it.Next() == chunkenc.ValFloat { require.NoError(t, err)
ts, v := it.At()
actOOOSamples = append(actOOOSamples, sample{t: ts, f: v})
}
// OOO chunk will be sorted. Hence sort the expected samples. // OOO chunk will be sorted. Hence sort the expected samples.
sort.Slice(expOOOSamples, func(i, j int) bool { sort.Slice(expOOOSamples, func(i, j int) bool {
return expOOOSamples[i].t < expOOOSamples[j].t return expOOOSamples[i].T() < expOOOSamples[j].T()
}) })
require.Equal(t, expOOOSamples, actOOOSamples) // Passing in true for the 'ignoreCounterResets' parameter prevents differences in counter reset headers
// from being factored in to the sample comparison
// TODO(fionaliao): understand counter reset behaviour, might want to modify this later
requireEqualSamples(t, l.String(), expOOOSamples, actOOOSamples, true)
require.NoError(t, h.Close()) require.NoError(t, h.Close())
} }

View file

@ -435,6 +435,8 @@ Outer:
return nil return nil
} }
func minInt64() int64 { return math.MinInt64 }
// resetSeriesWithMMappedChunks is only used during the WAL replay. // resetSeriesWithMMappedChunks is only used during the WAL replay.
func (h *Head) resetSeriesWithMMappedChunks(mSeries *memSeries, mmc, oooMmc []*mmappedChunk, walSeriesRef chunks.HeadSeriesRef) (overlapped bool) { func (h *Head) resetSeriesWithMMappedChunks(mSeries *memSeries, mmc, oooMmc []*mmappedChunk, walSeriesRef chunks.HeadSeriesRef) (overlapped bool) {
if mSeries.ref != walSeriesRef { if mSeries.ref != walSeriesRef {
@ -481,10 +483,11 @@ func (h *Head) resetSeriesWithMMappedChunks(mSeries *memSeries, mmc, oooMmc []*m
} }
// Cache the last mmapped chunk time, so we can skip calling append() for samples it will reject. // Cache the last mmapped chunk time, so we can skip calling append() for samples it will reject.
if len(mmc) == 0 { if len(mmc) == 0 {
mSeries.mmMaxTime = math.MinInt64 mSeries.shardHashOrMemoryMappedMaxTime = uint64(minInt64())
} else { } else {
mSeries.mmMaxTime = mmc[len(mmc)-1].maxTime mmMaxTime := mmc[len(mmc)-1].maxTime
h.updateMinMaxTime(mmc[0].minTime, mSeries.mmMaxTime) mSeries.shardHashOrMemoryMappedMaxTime = uint64(mmMaxTime)
h.updateMinMaxTime(mmc[0].minTime, mmMaxTime)
} }
if len(oooMmc) != 0 { if len(oooMmc) != 0 {
// Mint and maxt can be in any chunk, they are not sorted. // Mint and maxt can be in any chunk, they are not sorted.
@ -585,7 +588,7 @@ func (wp *walSubsetProcessor) processWALSamples(h *Head, mmappedChunks, oooMmapp
unknownRefs++ unknownRefs++
continue continue
} }
if s.T <= ms.mmMaxTime { if s.T <= ms.mmMaxTime() {
continue continue
} }
if _, chunkCreated := ms.append(s.T, s.V, 0, appendChunkOpts); chunkCreated { if _, chunkCreated := ms.append(s.T, s.V, 0, appendChunkOpts); chunkCreated {
@ -614,7 +617,7 @@ func (wp *walSubsetProcessor) processWALSamples(h *Head, mmappedChunks, oooMmapp
unknownHistogramRefs++ unknownHistogramRefs++
continue continue
} }
if s.t <= ms.mmMaxTime { if s.t <= ms.mmMaxTime() {
continue continue
} }
var chunkCreated bool var chunkCreated bool

View file

@ -17,9 +17,10 @@ import (
"fmt" "fmt"
"sort" "sort"
"github.com/prometheus/prometheus/tsdb/chunkenc"
"github.com/oklog/ulid" "github.com/oklog/ulid"
"github.com/prometheus/prometheus/tsdb/chunkenc"
"github.com/prometheus/prometheus/tsdb/chunks" "github.com/prometheus/prometheus/tsdb/chunks"
"github.com/prometheus/prometheus/tsdb/tombstones" "github.com/prometheus/prometheus/tsdb/tombstones"
) )
@ -74,24 +75,22 @@ func (o *OOOChunk) NumSamples() int {
return len(o.samples) return len(o.samples)
} }
func (o *OOOChunk) ToXOR() (*chunkenc.XORChunk, error) { // ToEncodedChunks returns chunks with the samples in the OOOChunk.
x := chunkenc.NewXORChunk() //
app, err := x.Appender() //nolint:revive // unexported-return.
if err != nil { func (o *OOOChunk) ToEncodedChunks(mint, maxt int64) (chks []memChunk, err error) {
return nil, err if len(o.samples) == 0 {
} return nil, nil
for _, s := range o.samples {
app.Append(s.t, s.f)
}
return x, nil
}
func (o *OOOChunk) ToXORBetweenTimestamps(mint, maxt int64) (*chunkenc.XORChunk, error) {
x := chunkenc.NewXORChunk()
app, err := x.Appender()
if err != nil {
return nil, err
} }
// The most common case is that there will be a single chunk, with the same type of samples in it - this is always true for float samples.
chks = make([]memChunk, 0, 1)
var (
cmint int64
cmaxt int64
chunk chunkenc.Chunk
app chunkenc.Appender
)
prevEncoding := chunkenc.EncNone // Yes we could call the chunk for this, but this is more efficient.
for _, s := range o.samples { for _, s := range o.samples {
if s.t < mint { if s.t < mint {
continue continue
@ -99,9 +98,77 @@ func (o *OOOChunk) ToXORBetweenTimestamps(mint, maxt int64) (*chunkenc.XORChunk,
if s.t > maxt { if s.t > maxt {
break break
} }
app.Append(s.t, s.f) encoding := chunkenc.EncXOR
if s.h != nil {
encoding = chunkenc.EncHistogram
} else if s.fh != nil {
encoding = chunkenc.EncFloatHistogram
}
// prevApp is the appender for the previous sample.
prevApp := app
if encoding != prevEncoding { // For the first sample, this will always be true as EncNone != EncXOR | EncHistogram | EncFloatHistogram
if prevEncoding != chunkenc.EncNone {
chks = append(chks, memChunk{chunk, cmint, cmaxt, nil})
}
cmint = s.t
switch encoding {
case chunkenc.EncXOR:
chunk = chunkenc.NewXORChunk()
case chunkenc.EncHistogram:
chunk = chunkenc.NewHistogramChunk()
case chunkenc.EncFloatHistogram:
chunk = chunkenc.NewFloatHistogramChunk()
default:
chunk = chunkenc.NewXORChunk()
}
app, err = chunk.Appender()
if err != nil {
return
}
}
switch encoding {
case chunkenc.EncXOR:
app.Append(s.t, s.f)
case chunkenc.EncHistogram:
// Ignoring ok is ok, since we don't want to compare to the wrong previous appender anyway.
prevHApp, _ := prevApp.(*chunkenc.HistogramAppender)
var (
newChunk chunkenc.Chunk
recoded bool
)
newChunk, recoded, app, _ = app.AppendHistogram(prevHApp, s.t, s.h, false)
if newChunk != nil { // A new chunk was allocated.
if !recoded {
chks = append(chks, memChunk{chunk, cmint, cmaxt, nil})
}
chunk = newChunk
cmint = s.t
}
case chunkenc.EncFloatHistogram:
// Ignoring ok is ok, since we don't want to compare to the wrong previous appender anyway.
prevHApp, _ := prevApp.(*chunkenc.FloatHistogramAppender)
var (
newChunk chunkenc.Chunk
recoded bool
)
newChunk, recoded, app, _ = app.AppendFloatHistogram(prevHApp, s.t, s.fh, false)
if newChunk != nil { // A new chunk was allocated.
if !recoded {
chks = append(chks, memChunk{chunk, cmint, cmaxt, nil})
}
chunk = newChunk
cmint = s.t
}
}
cmaxt = s.t
prevEncoding = encoding
} }
return x, nil if prevEncoding != chunkenc.EncNone {
chks = append(chks, memChunk{chunk, cmint, cmaxt, nil})
}
return chks, nil
} }
var _ BlockReader = &OOORangeHead{} var _ BlockReader = &OOORangeHead{}

View file

@ -108,11 +108,19 @@ func (oh *OOOHeadIndexReader) series(ref storage.SeriesRef, builder *labels.Scra
c := s.ooo.oooHeadChunk c := s.ooo.oooHeadChunk
if c.OverlapsClosedInterval(oh.mint, oh.maxt) && maxMmapRef == 0 { if c.OverlapsClosedInterval(oh.mint, oh.maxt) && maxMmapRef == 0 {
ref := chunks.ChunkRef(chunks.NewHeadChunkRef(s.ref, s.oooHeadChunkID(len(s.ooo.oooMmappedChunks)))) ref := chunks.ChunkRef(chunks.NewHeadChunkRef(s.ref, s.oooHeadChunkID(len(s.ooo.oooMmappedChunks))))
var xor chunkenc.Chunk
if len(c.chunk.samples) > 0 { // Empty samples happens in tests, at least. if len(c.chunk.samples) > 0 { // Empty samples happens in tests, at least.
xor, _ = c.chunk.ToXOR() // Ignoring error because it can't fail. chks, err := s.ooo.oooHeadChunk.chunk.ToEncodedChunks(c.minTime, c.maxTime)
if err != nil {
handleChunkWriteError(err)
return nil
}
for _, chk := range chks {
addChunk(c.minTime, c.maxTime, ref, chk.chunk)
}
} else {
var emptyChunk chunkenc.Chunk
addChunk(c.minTime, c.maxTime, ref, emptyChunk)
} }
addChunk(c.minTime, c.maxTime, ref, xor)
} }
} }
for i := len(s.ooo.oooMmappedChunks) - 1; i >= 0; i-- { for i := len(s.ooo.oooMmappedChunks) - 1; i >= 0; i-- {
@ -341,14 +349,20 @@ func NewOOOCompactionHead(ctx context.Context, head *Head) (*OOOCompactionHead,
continue continue
} }
mmapRef := ms.mmapCurrentOOOHeadChunk(head.chunkDiskMapper) var lastMmapRef chunks.ChunkDiskMapperRef
if mmapRef == 0 && len(ms.ooo.oooMmappedChunks) > 0 { mmapRefs := ms.mmapCurrentOOOHeadChunk(head.chunkDiskMapper)
if len(mmapRefs) == 0 && len(ms.ooo.oooMmappedChunks) > 0 {
// Nothing was m-mapped. So take the mmapRef from the existing slice if it exists. // Nothing was m-mapped. So take the mmapRef from the existing slice if it exists.
mmapRef = ms.ooo.oooMmappedChunks[len(ms.ooo.oooMmappedChunks)-1].ref mmapRefs = []chunks.ChunkDiskMapperRef{ms.ooo.oooMmappedChunks[len(ms.ooo.oooMmappedChunks)-1].ref}
} }
seq, off := mmapRef.Unpack() if len(mmapRefs) == 0 {
lastMmapRef = 0
} else {
lastMmapRef = mmapRefs[len(mmapRefs)-1]
}
seq, off := lastMmapRef.Unpack()
if seq > lastSeq || (seq == lastSeq && off > lastOff) { if seq > lastSeq || (seq == lastSeq && off > lastOff) {
ch.lastMmapRef, lastSeq, lastOff = mmapRef, seq, off ch.lastMmapRef, lastSeq, lastOff = lastMmapRef, seq, off
} }
if len(ms.ooo.oooMmappedChunks) > 0 { if len(ms.ooo.oooMmappedChunks) > 0 {
ch.postings = append(ch.postings, seriesRef) ch.postings = append(ch.postings, seriesRef)

View file

@ -77,12 +77,12 @@ func newBlockBaseQuerier(b BlockReader, mint, maxt int64) (*blockBaseQuerier, er
}, nil }, nil
} }
func (q *blockBaseQuerier) LabelValues(ctx context.Context, name string, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) { func (q *blockBaseQuerier) LabelValues(ctx context.Context, name string, hints *storage.LabelHints, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) {
res, err := q.index.SortedLabelValues(ctx, name, matchers...) res, err := q.index.SortedLabelValues(ctx, name, matchers...)
return res, nil, err return res, nil, err
} }
func (q *blockBaseQuerier) LabelNames(ctx context.Context, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) { func (q *blockBaseQuerier) LabelNames(ctx context.Context, hints *storage.LabelHints, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) {
res, err := q.index.LabelNames(ctx, matchers...) res, err := q.index.LabelNames(ctx, matchers...)
return res, nil, err return res, nil, err
} }

View file

@ -3022,7 +3022,7 @@ func TestQuerierIndexQueriesRace(t *testing.T) {
q, err := db.Querier(math.MinInt64, math.MaxInt64) q, err := db.Querier(math.MinInt64, math.MaxInt64)
require.NoError(t, err) require.NoError(t, err)
values, _, err := q.LabelValues(ctx, "seq", c.matchers...) values, _, err := q.LabelValues(ctx, "seq", nil, c.matchers...)
require.NoError(t, err) require.NoError(t, err)
require.Emptyf(t, values, `label values for label "seq" should be empty`) require.Emptyf(t, values, `label values for label "seq" should be empty`)

View file

@ -30,12 +30,10 @@ func GenerateTestHistograms(n int) (r []*histogram.Histogram) {
return r return r
} }
func GenerateTestHistogramsWithUnknownResetHint(n int) []*histogram.Histogram { func GenerateTestHistogramWithHint(n int, hint histogram.CounterResetHint) *histogram.Histogram {
hs := GenerateTestHistograms(n) h := GenerateTestHistogram(n)
for i := range hs { h.CounterResetHint = hint
hs[i].CounterResetHint = histogram.UnknownCounterReset return h
}
return hs
} }
// GenerateTestHistogram but it is up to the user to set any known counter reset hint. // GenerateTestHistogram but it is up to the user to set any known counter reset hint.

View file

@ -265,6 +265,11 @@ func (w *Watcher) loop() {
// Run the watcher, which will tail the WAL until the quit channel is closed // Run the watcher, which will tail the WAL until the quit channel is closed
// or an error case is hit. // or an error case is hit.
func (w *Watcher) Run() error { func (w *Watcher) Run() error {
_, lastSegment, err := w.firstAndLast()
if err != nil {
return fmt.Errorf("wal.Segments: %w", err)
}
// We want to ensure this is false across iterations since // We want to ensure this is false across iterations since
// Run will be called again if there was a failure to read the WAL. // Run will be called again if there was a failure to read the WAL.
w.sendSamples = false w.sendSamples = false
@ -289,20 +294,14 @@ func (w *Watcher) Run() error {
return err return err
} }
level.Debug(w.logger).Log("msg", "Tailing WAL", "lastCheckpoint", lastCheckpoint, "checkpointIndex", checkpointIndex, "currentSegment", currentSegment) level.Debug(w.logger).Log("msg", "Tailing WAL", "lastCheckpoint", lastCheckpoint, "checkpointIndex", checkpointIndex, "currentSegment", currentSegment, "lastSegment", lastSegment)
for !isClosed(w.quit) { for !isClosed(w.quit) {
w.currentSegmentMetric.Set(float64(currentSegment)) w.currentSegmentMetric.Set(float64(currentSegment))
// Re-check on each iteration in case a new segment was added, // On start, after reading the existing WAL for series records, we have a pointer to what is the latest segment.
// because watch() will wait for notifications on the last segment. // On subsequent calls to this function, currentSegment will have been incremented and we should open that segment.
_, lastSegment, err := w.firstAndLast() level.Debug(w.logger).Log("msg", "Processing segment", "currentSegment", currentSegment)
if err != nil { if err := w.watch(currentSegment, currentSegment >= lastSegment); err != nil && !errors.Is(err, ErrIgnorable) {
return fmt.Errorf("wal.Segments: %w", err)
}
tail := currentSegment >= lastSegment
level.Debug(w.logger).Log("msg", "Processing segment", "currentSegment", currentSegment, "lastSegment", lastSegment)
if err := w.watch(currentSegment, tail); err != nil && !errors.Is(err, ErrIgnorable) {
return err return err
} }

View file

@ -17,6 +17,7 @@ import (
"math/rand" "math/rand"
"os" "os"
"path" "path"
"runtime"
"sync" "sync"
"testing" "testing"
"time" "time"
@ -700,11 +701,46 @@ func TestRun_StartupTime(t *testing.T) {
} }
} }
func generateWALRecords(w *WL, segment, seriesCount, samplesCount int) error {
enc := record.Encoder{}
for j := 0; j < seriesCount; j++ {
ref := j + (segment * 100)
series := enc.Series([]record.RefSeries{
{
Ref: chunks.HeadSeriesRef(ref),
Labels: labels.FromStrings("__name__", fmt.Sprintf("metric_%d", segment)),
},
}, nil)
if err := w.Log(series); err != nil {
return err
}
for k := 0; k < samplesCount; k++ {
inner := rand.Intn(ref + 1)
sample := enc.Samples([]record.RefSample{
{
Ref: chunks.HeadSeriesRef(inner),
T: int64(segment),
V: float64(segment),
},
}, nil)
if err := w.Log(sample); err != nil {
return err
}
}
}
return nil
}
func TestRun_AvoidNotifyWhenBehind(t *testing.T) { func TestRun_AvoidNotifyWhenBehind(t *testing.T) {
const pageSize = 32 * 1024 if runtime.GOOS == "windows" { // Takes a really long time, perhaps because min sleep time is 15ms.
const segments = 10 t.SkipNow()
const seriesCount = 20 }
const samplesCount = 300 const segmentSize = pageSize // Smallest allowed segment size.
const segmentsToWrite = 5
const segmentsToRead = segmentsToWrite - 1
const seriesCount = 10
const samplesCount = 50
// This test can take longer than intended to finish in cloud CI. // This test can take longer than intended to finish in cloud CI.
readTimeout := 10 * time.Second readTimeout := 10 * time.Second
@ -717,73 +753,37 @@ func TestRun_AvoidNotifyWhenBehind(t *testing.T) {
err := os.Mkdir(wdir, 0o777) err := os.Mkdir(wdir, 0o777)
require.NoError(t, err) require.NoError(t, err)
enc := record.Encoder{} w, err := NewSize(nil, nil, wdir, segmentSize, compress)
w, err := NewSize(nil, nil, wdir, pageSize, compress)
require.NoError(t, err) require.NoError(t, err)
var wg sync.WaitGroup var wg sync.WaitGroup
// add one segment initially to ensure there's a value > 0 for the last segment id // Generate one segment initially to ensure that watcher.Run() finds at least one segment on disk.
for i := 0; i < 1; i++ { require.NoError(t, generateWALRecords(w, 0, seriesCount, samplesCount))
for j := 0; j < seriesCount; j++ { w.NextSegment() // Force creation of the next segment
ref := j + (i * 100)
series := enc.Series([]record.RefSeries{
{
Ref: chunks.HeadSeriesRef(ref),
Labels: labels.FromStrings("__name__", fmt.Sprintf("metric_%d", i)),
},
}, nil)
require.NoError(t, w.Log(series))
for k := 0; k < samplesCount; k++ {
inner := rand.Intn(ref + 1)
sample := enc.Samples([]record.RefSample{
{
Ref: chunks.HeadSeriesRef(inner),
T: int64(i),
V: float64(i),
},
}, nil)
require.NoError(t, w.Log(sample))
}
}
}
wg.Add(1) wg.Add(1)
go func() { go func() {
defer wg.Done() defer wg.Done()
for i := 1; i < segments; i++ { for i := 1; i < segmentsToWrite; i++ {
for j := 0; j < seriesCount; j++ { require.NoError(t, generateWALRecords(w, i, seriesCount, samplesCount))
ref := j + (i * 100) w.NextSegment()
series := enc.Series([]record.RefSeries{
{
Ref: chunks.HeadSeriesRef(ref),
Labels: labels.FromStrings("__name__", fmt.Sprintf("metric_%d", i)),
},
}, nil)
require.NoError(t, w.Log(series))
for k := 0; k < samplesCount; k++ {
inner := rand.Intn(ref + 1)
sample := enc.Samples([]record.RefSample{
{
Ref: chunks.HeadSeriesRef(inner),
T: int64(i),
V: float64(i),
},
}, nil)
require.NoError(t, w.Log(sample))
}
}
} }
}() }()
wt := newWriteToMock(time.Millisecond) wt := newWriteToMock(time.Millisecond)
watcher := NewWatcher(wMetrics, nil, nil, "", wt, dir, false, false, false) watcher := NewWatcher(wMetrics, nil, nil, "", wt, dir, false, false, false)
watcher.MaxSegment = segments watcher.MaxSegment = segmentsToRead
watcher.setMetrics() watcher.setMetrics()
startTime := time.Now() startTime := time.Now()
err = watcher.Run() err = watcher.Run()
wg.Wait() wg.Wait()
require.Less(t, time.Since(startTime), readTimeout) require.Less(t, time.Since(startTime), readTimeout)
// But samples records shouldn't get dropped
retry(t, defaultRetryInterval, defaultRetries, func() bool {
return wt.checkNumSeries() > 0
})
require.Equal(t, segmentsToRead*seriesCount*samplesCount, wt.samplesAppended)
require.NoError(t, err) require.NoError(t, err)
require.NoError(t, w.Close()) require.NoError(t, w.Close())
}) })

Some files were not shown because too many files have changed in this diff Show more